refactor SC mapping into a dedicated gbp driver

All SC related operations (create/delete/update chains) will now
exist on a separated gbp mapping driver. This driver will likely
run as the last of the driver chain (ipd rmd and cmd).

Partially implements blueprint node-centric-chain-plugin
Change-Id: I1f329101f32640058ed5250e8fe49a53b1f3deee
This commit is contained in:
Ivar Lazzaro 2015-09-24 14:28:18 -07:00
parent 280d00dbdd
commit 90de127ec7
16 changed files with 862 additions and 801 deletions

View File

@ -0,0 +1,12 @@
[chain_mapping]
# Chain owner username. If set, will be used in place of the Neutron service
# admin for retrieving tenant owner information through Keystone.
# chain_owner_user = <username>
# Chain owner password.
# chain_owner_password = <secret>
# Name of the Tenant that will own the service chain instances for this driver.
# Leave empty for provider owned chains.
# chain_owner_tenant_name = <tenant_name>

View File

@ -2,14 +2,3 @@
# DNS nameservers to be used configured in the PTG subnets by this driver.
# dns_nameservers = 8.8.8.7, 8.8.8.8
# Chain owner username. If set, will be used in place of the Neutron service
# admin for retrieving tenant owner information through Keystone.
# chain_owner_user = <username>
# Chain owner password.
# chain_owner_password = <secret>
# Name of the Tenant that will own the service chain instances for this driver.
# Leave empty for provider owned chains.
# chain_owner_tenant_name = <tenant_name>

View File

@ -2,7 +2,7 @@
# (ListOpt) An ordered list of policy driver entrypoints to be loaded
# from the gbp.neutron.group_policy.policy_drivers namespace.
# policy_drivers =
# Example: policy_drivers = implicit_policy,resource_mapping
# Example: policy_drivers = implicit_policy,resource_mapping, chain_mapping
# (ListOpt) An ordered list of extension driver entrypoints to be
# loaded from the gbp.neutron.group_policy.extension_drivers

View File

@ -90,4 +90,4 @@ def get_keystone_creds():
def set_difference(iterable_1, iterable_2):
set1 = set(iterable_1)
set2 = set(iterable_2)
return (set1 - set2), (set2 - set1)
return (set1 - set2), (set2 - set1)

View File

@ -0,0 +1,657 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient import exceptions as k_exceptions
from keystoneclient.v2_0 import client as k_client
from neutron.common import log
from neutron.db import model_base
from neutron.db import models_v2
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import excutils
import sqlalchemy as sa
from gbpservice.common import utils
from gbpservice.network.neutronv2 import local_api
from gbpservice.neutron.db.grouppolicy import group_policy_mapping_db as gpdb
from gbpservice.neutron.db import servicechain_db # noqa
from gbpservice.neutron.services.grouppolicy import (
group_policy_driver_api as api)
from gbpservice.neutron.services.grouppolicy.common import constants as gconst
from gbpservice.neutron.services.grouppolicy.common import exceptions as exc
from gbpservice.neutron.services.grouppolicy.drivers import nsp_manager
LOG = logging.getLogger(__name__)
SCI_CONSUMER_NOT_AVAILABLE = 'N/A'
chain_mapping_opts = [
cfg.StrOpt('chain_owner_user',
help=_("Chain owner username. If set, will be used in "
"place of the Neutron service admin for retrieving "
"tenant owner information through Keystone."),
default=''),
cfg.StrOpt('chain_owner_password',
help=_("Chain owner password."), default='',
secret=True),
cfg.StrOpt('chain_owner_tenant_name',
help=_("Name of the Tenant that will own the service chain "
"instances for this driver. Leave empty for provider "
"owned chains."), default=''),
]
cfg.CONF.register_opts(chain_mapping_opts, "chain_mapping")
class PtgServiceChainInstanceMapping(model_base.BASEV2, models_v2.HasTenant):
"""Policy Target Group to ServiceChainInstance mapping DB."""
__tablename__ = 'gpm_ptgs_servicechain_mapping'
provider_ptg_id = sa.Column(sa.String(36),
sa.ForeignKey('gp_policy_target_groups.id',
ondelete='CASCADE'),
nullable=False)
# Consumer PTG could be an External Policy
consumer_ptg_id = sa.Column(sa.String(36), nullable=False)
servicechain_instance_id = sa.Column(sa.String(36),
sa.ForeignKey('sc_instances.id',
ondelete='CASCADE'),
primary_key=True)
class ChainMappingDriver(api.PolicyDriver, local_api.LocalAPI,
nsp_manager.NetworkServicePolicyMappingMixin):
"""Resource Mapping driver for Group Policy plugin.
This driver implements service chain semantics by mapping group
policy resources to various service chain constructs.
"""
@log.log
def initialize(self):
self._cached_agent_notifier = None
self.chain_owner = ChainMappingDriver.chain_tenant_id(reraise=True)
@staticmethod
def chain_tenant_id(reraise=False):
keystone = ChainMappingDriver.chain_tenant_keystone_client()
if keystone:
tenant = cfg.CONF.chain_mapping.chain_owner_tenant_name
try:
# Can it be retrieved directly, without a further keystone
# call?
tenant = keystone.tenants.find(name=tenant)
return tenant.id
except k_exceptions.NotFound:
with excutils.save_and_reraise_exception(reraise=reraise):
LOG.error(_('No tenant with name %s exists.'), tenant)
except k_exceptions.NoUniqueMatch:
with excutils.save_and_reraise_exception(reraise=reraise):
LOG.error(_('Multiple tenants matches found for %s'),
tenant)
@staticmethod
def chain_tenant_keystone_client():
chain_user = cfg.CONF.chain_mapping.chain_owner_user
user, pwd, tenant, auth_url = utils.get_keystone_creds()
user = (chain_user or user)
pwd = (cfg.CONF.chain_mapping.chain_owner_password or
(pwd if not chain_user else ''))
# Tenant must be configured in the resource_mapping section, provider
# owner will be used otherwise.
tenant = cfg.CONF.chain_mapping.chain_owner_tenant_name
if tenant:
return k_client.Client(username=user, password=pwd,
auth_url=auth_url)
@log.log
def create_policy_target_group_precommit(self, context):
self._validate_ptg_prss(context, context.current)
@log.log
def create_policy_target_group_postcommit(self, context):
self._handle_policy_rule_sets(context)
@log.log
def update_policy_target_group_precommit(self, context):
self._validate_ptg_prss(context, context.current)
self._stash_ptg_modified_chains(context)
@log.log
def update_policy_target_group_postcommit(self, context):
#Update service chain instance when any ruleset is changed
orig = context.original
curr = context.current
new_provided_policy_rule_sets = list(
set(curr['provided_policy_rule_sets']) - set(
orig['provided_policy_rule_sets']))
# Only the ones set in context in precommit operation will be deleted
self._cleanup_redirect_action(context)
# If the spec is changed, then update the chain with new spec
# If redirect is newly added, create the chain
if self._is_redirect_in_policy_rule_sets(
context, new_provided_policy_rule_sets) and not (
context.current.get('proxied_group_id')):
policy_rule_sets = (curr['provided_policy_rule_sets'] +
orig['provided_policy_rule_sets'])
self._handle_redirect_action(context, policy_rule_sets)
@log.log
def delete_policy_target_group_precommit(self, context):
provider_ptg_chain_map = self._get_ptg_servicechain_mapping(
context._plugin_context.session, context.current['id'], None)
consumer_ptg_chain_map = self._get_ptg_servicechain_mapping(
context._plugin_context.session, None, context.current['id'])
context.ptg_chain_map = provider_ptg_chain_map + consumer_ptg_chain_map
@log.log
def update_policy_classifier_postcommit(self, context):
self._handle_classifier_update_notification(context)
@log.log
def create_policy_action_precommit(self, context):
spec_id = context.current['action_value']
if spec_id:
specs = self._get_servicechain_specs(
context._plugin_context, filters={'id': [spec_id]})
for spec in specs:
if not spec.get('shared', False):
self._reject_shared(context.current, 'policy_action')
@log.log
def update_policy_action_postcommit(self, context):
self._handle_redirect_spec_id_update(context)
@log.log
def create_policy_rule_precommit(self, context):
self._reject_multiple_redirects_in_rule(context)
@log.log
def update_policy_rule_precommit(self, context):
self._reject_multiple_redirects_in_rule(context)
old_redirect = self._get_redirect_action(context, context.original)
new_redirect = self._get_redirect_action(context, context.current)
if not old_redirect and new_redirect:
# If redirect action is added, check that there's no contract that
# already has a redirect action
for prs in context._plugin.get_policy_rule_sets(
context._plugin_context,
{'id': context.current['policy_rule_sets']}):
# Make sure the PRS can have a new redirect action
self._validate_new_prs_redirect(context, prs)
@log.log
def update_policy_rule_postcommit(self, context):
old_classifier_id = context.original['policy_classifier_id']
new_classifier_id = context.current['policy_classifier_id']
old_action_set = set(context.current['policy_actions'])
new_action_set = set(context.original['policy_actions'])
if (old_classifier_id != new_classifier_id or
old_action_set != new_action_set):
policy_rule_sets = (
context._plugin._get_policy_rule_policy_rule_sets(
context._plugin_context, context.current['id']))
old_redirect_policy_actions = context._plugin.get_policy_actions(
context._plugin_context,
filters={'id': context.original['policy_actions'],
'action_type': [gconst.GP_ACTION_REDIRECT]})
new_redirect_policy_actions = context._plugin.get_policy_actions(
context._plugin_context,
filters={'id': context.current['policy_actions'],
'action_type': [gconst.GP_ACTION_REDIRECT]})
if old_redirect_policy_actions or new_redirect_policy_actions:
self._handle_redirect_action(context, policy_rule_sets)
@log.log
def create_policy_rule_set_precommit(self, context):
self._reject_multiple_redirects_in_prs(context)
@log.log
def create_policy_rule_set_postcommit(self, context):
if context.current['child_policy_rule_sets']:
self._handle_redirect_action(
context, context.current['child_policy_rule_sets'])
@log.log
def update_policy_rule_set_precommit(self, context):
self._reject_multiple_redirects_in_prs(context)
# If a redirect action is added (from 0 to one) we have to validate
# the providing and consuming PTGs. Not needed at creation time since
# no PTG could be possibly providing or consuming it
old_red_count = self._multiple_pr_redirect_action_number(
context._plugin_context.session, context.original['policy_rules'])
new_red_count = self._multiple_pr_redirect_action_number(
context._plugin_context.session, context.current['policy_rules'])
if new_red_count > old_red_count:
self._validate_new_prs_redirect(context, context.current)
@log.log
def update_policy_rule_set_postcommit(self, context):
# Handle any Redirects from the current Policy Rule Set
self._handle_redirect_action(context, [context.current['id']])
# Handle Update/Delete of Redirects for any child Rule Sets
if (set(context.original['child_policy_rule_sets']) !=
set(context.current['child_policy_rule_sets'])):
if context.original['child_policy_rule_sets']:
self._handle_redirect_action(
context, context.original['child_policy_rule_sets'])
if context.current['child_policy_rule_sets']:
self._handle_redirect_action(
context, context.current['child_policy_rule_sets'])
@log.log
def delete_policy_rule_set_postcommit(self, context):
if context.current['child_policy_rule_sets']:
self._handle_redirect_action(
context, context.current['child_policy_rule_sets'])
def _handle_policy_rule_sets(self, context):
consumed_prs = context.current['consumed_policy_rule_sets']
provided_prs = context.current['provided_policy_rule_sets']
if provided_prs and not context.current.get('proxied_group_id'):
policy_rule_sets = consumed_prs + provided_prs
self._handle_redirect_action(context, policy_rule_sets)
def _handle_redirect_spec_id_update(self, context):
if (context.current['action_type'] != gconst.GP_ACTION_REDIRECT
or context.current['action_value'] ==
context.original['action_value']):
return
spec = self._servicechain_plugin._get_servicechain_spec(
context._plugin_context, context.original['action_value'])
for servicechain_instance in spec.instances:
sc_instance_id = servicechain_instance.servicechain_instance_id
sc_instance = self._servicechain_plugin.get_servicechain_instance(
context._plugin_context, sc_instance_id)
old_specs = sc_instance['servicechain_specs']
# Use the parent/child redirect spec as it is. Only replace the
# current one
new_specs = [context.current['action_value'] if
x == context.original['action_value'] else
x for x in old_specs]
self._update_servicechain_instance(
context._plugin_context,
servicechain_instance.servicechain_instance_id,
sc_specs=new_specs)
def _update_servicechain_instance(self, plugin_context, sc_instance_id,
classifier_id=None, sc_specs=None):
sc_instance_update_data = {}
if sc_specs:
sc_instance_update_data.update({'servicechain_specs': sc_specs})
if classifier_id:
sc_instance_update_data.update({'classifier_id': classifier_id})
super(ChainMappingDriver, self)._update_servicechain_instance(
self._get_chain_admin_context(
plugin_context, instance_id=sc_instance_id),
sc_instance_id, sc_instance_update_data)
# This method would either update an existing chain instance, or creates a
# new chain instance or delete the existing instance. In case of updates,
# the parameters that can be updated are service chain spec and
# classifier ID.
def _handle_redirect_action(self, context, policy_rule_set_ids):
policy_rule_sets = context._plugin.get_policy_rule_sets(
context._plugin_context,
filters={'id': policy_rule_set_ids})
for policy_rule_set in policy_rule_sets:
ptgs_providing_prs = policy_rule_set[
'providing_policy_target_groups']
# Create the ServiceChain Instance when we have both Provider and
# consumer PTGs. If Labels are available, they have to be applied
if not ptgs_providing_prs:
continue
ptgs_providing_prs = context._plugin.get_policy_target_groups(
context._plugin_context.elevated(), {'id': ptgs_providing_prs})
parent_classifier_id = None
parent_spec_id = None
if policy_rule_set['parent_id']:
parent = context._plugin.get_policy_rule_set(
context._plugin_context, policy_rule_set['parent_id'])
policy_rules = context._plugin.get_policy_rules(
context._plugin_context,
filters={'id': parent['policy_rules']})
for policy_rule in policy_rules:
policy_actions = context._plugin.get_policy_actions(
context._plugin_context,
filters={'id': policy_rule["policy_actions"],
'action_type': [gconst.GP_ACTION_REDIRECT]})
if policy_actions:
parent_spec_id = policy_actions[0].get("action_value")
parent_classifier_id = policy_rule.get(
"policy_classifier_id")
break # only one redirect action is supported
policy_rules = context._plugin.get_policy_rules(
context._plugin_context,
filters={'id': policy_rule_set['policy_rules']})
for policy_rule in policy_rules:
hierarchial_classifier_mismatch = False
classifier_id = policy_rule.get("policy_classifier_id")
if parent_classifier_id and (parent_classifier_id !=
classifier_id):
hierarchial_classifier_mismatch = True
policy_actions = context._plugin.get_policy_actions(
context._plugin_context,
filters={'id': policy_rule.get("policy_actions"),
'action_type': [gconst.GP_ACTION_REDIRECT]})
# Only one Redirect action per PRS. The chain may belong to
# another PRS in which case the chain should not be deleted
if (self._is_redirect_in_policy_rule_sets(
context, policy_rule_set_ids) and not policy_actions):
continue
spec_id = (policy_actions and policy_actions[0]['action_value']
or None)
for ptg_providing_prs in ptgs_providing_prs:
# REVISIT(Magesh): There may be concurrency issues here
if not ptg_providing_prs.get('proxied_group_id'):
self._create_or_update_chain(
context, ptg_providing_prs['id'],
SCI_CONSUMER_NOT_AVAILABLE, spec_id,
parent_spec_id, classifier_id,
hierarchial_classifier_mismatch,
policy_rule_set)
def _create_or_update_chain(self, context, provider, consumer, spec_id,
parent_spec_id, classifier_id,
hierarchial_classifier_mismatch, prs_id):
ptg_chain_map = self._get_ptg_servicechain_mapping(
context._plugin_context.session, provider)
if ptg_chain_map:
if hierarchial_classifier_mismatch or not spec_id:
ctx = self._get_chain_admin_context(
context._plugin_context,
tenant_id=ptg_chain_map[0].tenant_id)
self._delete_servicechain_instance(
ctx, ptg_chain_map[0].servicechain_instance_id)
else:
sc_specs = [spec_id]
if parent_spec_id:
sc_specs.insert(0, parent_spec_id)
# One Chain between a unique pair of provider and consumer
self._update_servicechain_instance(
context._plugin_context,
ptg_chain_map[0].servicechain_instance_id,
classifier_id=classifier_id,
sc_specs=sc_specs)
elif spec_id and not hierarchial_classifier_mismatch:
self._create_servicechain_instance(
context, spec_id, parent_spec_id, provider,
SCI_CONSUMER_NOT_AVAILABLE, classifier_id, prs_id)
def _cleanup_redirect_action(self, context):
for ptg_chain in context.ptg_chain_map:
ctx = self._get_chain_admin_context(context._plugin_context,
tenant_id=ptg_chain.tenant_id)
self._delete_servicechain_instance(
ctx, ptg_chain.servicechain_instance_id)
def _create_servicechain_instance(self, context, servicechain_spec,
parent_servicechain_spec,
provider_ptg_id, consumer_ptg_id,
classifier_id, policy_rule_set):
sc_spec = [servicechain_spec]
if parent_servicechain_spec:
sc_spec.insert(0, parent_servicechain_spec)
config_param_values = {}
provider_ptg = context._plugin.get_policy_target_group(
utils.admin_context(context._plugin_context), provider_ptg_id)
p_ctx = self._get_chain_admin_context(
context._plugin_context,
provider_tenant_id=provider_ptg['tenant_id'])
session = context._plugin_context.session
network_service_policy_id = provider_ptg.get(
"network_service_policy_id")
if network_service_policy_id:
nsp = context._plugin.get_network_service_policy(
p_ctx, network_service_policy_id)
service_params = nsp.get("network_service_params")
for service_parameter in service_params:
param_type = service_parameter.get("type")
param_value = service_parameter.get("value")
if param_type == "ip_single" and param_value == "self_subnet":
key = service_parameter.get("name")
servicepolicy_ptg_ip_map = (
self._get_ptg_policy_ipaddress_mapping(
session, provider_ptg_id))
servicepolicy_ip = servicepolicy_ptg_ip_map.get(
"ipaddress")
config_param_values[key] = servicepolicy_ip
elif param_type == "ip_single" and param_value == "nat_pool":
key = service_parameter.get("name")
fip_maps = (
self._get_ptg_policy_fip_mapping(
context._plugin_context.session,
provider_ptg_id))
servicepolicy_fip_ids = []
for fip_map in fip_maps:
servicepolicy_fip_ids.append(fip_map.floatingip_id)
config_param_values[key] = servicepolicy_fip_ids
name = 'gbp_%s_%s' % (policy_rule_set['name'], provider_ptg['name'])
attrs = {'tenant_id': p_ctx.tenant,
'name': name,
'description': "",
'servicechain_specs': sc_spec,
'provider_ptg_id': provider_ptg_id,
'consumer_ptg_id': SCI_CONSUMER_NOT_AVAILABLE,
'management_ptg_id': None,
'classifier_id': classifier_id,
'config_param_values': jsonutils.dumps(config_param_values)}
sc_instance = super(
ChainMappingDriver, self)._create_servicechain_instance(
p_ctx, attrs)
self._set_ptg_servicechain_instance_mapping(
session, provider_ptg_id, SCI_CONSUMER_NOT_AVAILABLE,
sc_instance['id'], p_ctx.tenant)
return sc_instance
def _set_ptg_servicechain_instance_mapping(self, session, provider_ptg_id,
consumer_ptg_id,
servicechain_instance_id,
provider_tenant_id):
with session.begin(subtransactions=True):
mapping = PtgServiceChainInstanceMapping(
provider_ptg_id=provider_ptg_id,
consumer_ptg_id=consumer_ptg_id,
servicechain_instance_id=servicechain_instance_id,
tenant_id=provider_tenant_id)
session.add(mapping)
def _get_ptg_servicechain_mapping(self, session, provider_ptg_id=None,
consumer_ptg_id=None, tenant_id=None,
servicechain_instance_id=None):
with session.begin(subtransactions=True):
query = session.query(PtgServiceChainInstanceMapping)
if provider_ptg_id:
query = query.filter_by(provider_ptg_id=provider_ptg_id)
if consumer_ptg_id:
query = query.filter_by(consumer_ptg_id=consumer_ptg_id)
if servicechain_instance_id:
query = query.filter_by(
servicechain_instance_id=servicechain_instance_id)
if tenant_id:
query = query.filter_by(consumer_ptg_id=tenant_id)
all = query.all()
return [utils.DictClass([('provider_ptg_id', x.provider_ptg_id),
('consumer_ptg_id', x.consumer_ptg_id),
('servicechain_instance_id',
x.servicechain_instance_id),
('tenant_id', x.tenant_id)])
for x in all]
def _get_chain_admin_context(self, plugin_context, tenant_id=None,
provider_tenant_id=None, instance_id=None):
ctx = plugin_context.elevated()
# REVISIT(Ivar): Any particular implication when a provider owned PT
# exist in the consumer PTG? Especially when the consumer PTG belongs
# to another tenant? We may want to consider a strong convention
# for reference plumbers to absolutely avoid this kind of inter tenant
# object creation when the owner is the provider (in which case, the
# context can as well be a normal context without admin capabilities).
ctx.tenant_id = None
if instance_id:
cmap = self._get_ptg_servicechain_mapping(
ctx.session, servicechain_instance_id=instance_id)
if cmap:
ctx.tenant_id = cmap[0].tenant_id
if not ctx.tenant_id:
ctx.tenant_id = tenant_id or self.chain_owner or provider_tenant_id
if self.chain_owner == ctx.tenant_id:
ctx.auth_token = self.chain_tenant_keystone_client().get_token(
self.chain_owner)
return ctx
def _is_redirect_in_policy_rule_sets(self, context, policy_rule_sets):
policy_rule_ids = []
for prs in context._plugin.get_policy_rule_sets(
context._plugin_context, filters={'id': policy_rule_sets}):
policy_rule_ids.extend(prs['policy_rules'])
for rule in context._plugin.get_policy_rules(
context._plugin_context, filters={'id': policy_rule_ids}):
redirect_actions = context._plugin.get_policy_actions(
context._plugin_context,
filters={'id': rule["policy_actions"],
'action_type': [gconst.GP_ACTION_REDIRECT]})
if redirect_actions:
return True
return False
def _get_redirect_action(self, context, policy_rule):
for action in context._plugin.get_policy_actions(
context._plugin_context,
filters={'id': policy_rule['policy_actions']}):
if action['action_type'] == gconst.GP_ACTION_REDIRECT:
return action
def _validate_new_prs_redirect(self, context, prs):
if self._prss_redirect_rules(context._plugin_context.session,
[prs['id']]) > 1:
raise exc.MultipleRedirectActionsNotSupportedForPRS()
for ptg in context._plugin.get_policy_target_groups(
context._plugin_context,
{'id': prs['providing_policy_target_groups']}):
self._validate_ptg_prss(context, ptg)
def _prss_redirect_rules(self, session, prs_ids):
if len(prs_ids) == 0:
# No result will be found in this case
return 0
query = (session.query(gpdb.gpdb.PolicyAction).
join(gpdb.gpdb.PolicyRuleActionAssociation).
join(gpdb.gpdb.PolicyRule).
join(gpdb.gpdb.PRSToPRAssociation).
filter(
gpdb.gpdb.PRSToPRAssociation.policy_rule_set_id.in_(prs_ids)).
filter(gpdb.gpdb.PolicyAction.action_type ==
gconst.GP_ACTION_REDIRECT))
return query.count()
def _multiple_pr_redirect_action_number(self, session, pr_ids):
# Given a set of rules, gives the total number of redirect actions
# found
if len(pr_ids) == 0:
# No result will be found in this case
return 0
return (session.query(gpdb.gpdb.PolicyAction).
join(gpdb.gpdb.PolicyRuleActionAssociation).
filter(
gpdb.gpdb.PolicyRuleActionAssociation.policy_rule_id.in_(
pr_ids)).
filter(gpdb.gpdb.PolicyAction.action_type ==
gconst.GP_ACTION_REDIRECT)).count()
def _reject_shared(self, object, type):
if object.get('shared'):
raise exc.InvalidSharedResource(type=type,
driver='chain_mapping')
def _reject_multiple_redirects_in_rule(self, context):
policy_actions = context._plugin.get_policy_actions(
context._plugin_context,
filters={'id': context.current['policy_actions'],
'action_type': [gconst.GP_ACTION_REDIRECT]})
if len(policy_actions) > 1:
raise exc.MultipleRedirectActionsNotSupportedForRule()
def _reject_multiple_redirects_in_prs(self, context):
policy_rules = context._plugin.get_policy_rules(
context._plugin_context,
filters={'id': context.current['policy_rules']})
redirect_actions_list = []
for policy_rule in policy_rules:
policy_actions = context._plugin.get_policy_actions(
context._plugin_context,
filters={'id': policy_rule['policy_actions'],
'action_type': [gconst.GP_ACTION_REDIRECT]})
redirect_actions_list.extend(policy_actions)
if len(redirect_actions_list) > 1:
raise exc.MultipleRedirectActionsNotSupportedForPRS()
def _validate_ptg_prss(self, context, ptg):
# If the PTG is providing a redirect PRS, it can't provide any more
# redirect rules
if self._prss_redirect_rules(context._plugin_context.session,
ptg['provided_policy_rule_sets']) > 1:
raise exc.PTGAlreadyProvidingRedirectPRS(ptg_id=ptg['id'])
def _handle_classifier_update_notification(self, context):
# Invoke Service chain update notify hook if protocol or port or
# direction is updated. The SC side will have to reclassify the chain
# and update the traffic steering programming
if (context.original['port_range'] != context.current['port_range'] or
context.original['protocol'] != context.current['protocol'] or
context.original['direction'] != context.current['direction']):
sc_instances = (
self._servicechain_plugin.get_servicechain_instances(
context._plugin_context.elevated(),
filters={'classifier_id': [context.current['id']]}))
for sc_instance in sc_instances:
cmap = self._get_ptg_servicechain_mapping(
context._plugin_context.session,
servicechain_instance_id=sc_instance['id'])
ctx = self._get_chain_admin_context(context._plugin_context,
cmap[0].tenant_id)
self._servicechain_plugin.notify_chain_parameters_updated(
ctx, sc_instance['id'])
def _stash_ptg_modified_chains(self, context):
#Update service chain instance when any ruleset is changed
orig_provided_policy_rule_sets = context.original[
'provided_policy_rule_sets']
curr_provided_policy_rule_sets = context.current[
'provided_policy_rule_sets']
removed_provided_prs = (set(orig_provided_policy_rule_sets) -
set(curr_provided_policy_rule_sets))
added_provided_prs = (set(curr_provided_policy_rule_sets) -
set(orig_provided_policy_rule_sets))
context.ptg_chain_map = []
# If the Redirect is removed, delete the chain. If the spec is
# changed, then update the existing instance with new spec
if (self._is_redirect_in_policy_rule_sets(
context, removed_provided_prs) and not
self._is_redirect_in_policy_rule_sets(
context, added_provided_prs)):
context.ptg_chain_map += self._get_ptg_servicechain_mapping(
context._plugin_context.session, context.current['id'])

View File

@ -345,7 +345,7 @@ class ApicMappingDriver(api.ResourceMappingDriver):
pass
def create_policy_rule_precommit(self, context):
self._validate_one_action_per_pr(context)
pass
def create_policy_rule_postcommit(self, context, transaction=None):
action = context._plugin.get_policy_action(
@ -389,7 +389,6 @@ class ApicMappingDriver(api.ResourceMappingDriver):
if not self.name_mapper._is_apic_reference(context.current):
if context.current['child_policy_rule_sets']:
raise HierarchicalContractsNotSupported()
self._reject_multiple_redirects_in_prs(context)
else:
self.name_mapper.has_valid_name(context.current)
@ -454,9 +453,8 @@ class ApicMappingDriver(api.ResourceMappingDriver):
context._plugin_context, context.current['id'])
db_group.l2_policy_id = proxied['l2_policy_id']
context.current['l2_policy_id'] = proxied['l2_policy_id']
self._validate_ptg_prss(context, context.current)
else:
self.name_mapper.has_valid_name(context.current)
else:
self.name_mapper.has_valid_name(context.current)
def create_policy_target_group_postcommit(self, context):
if not context.current['subnets']:
@ -475,9 +473,6 @@ class ApicMappingDriver(api.ResourceMappingDriver):
self.apic_manager.ensure_epg_created(tenant, epg,
bd_owner=bd_owner,
bd_name=l2_policy)
l2p = context._plugin.get_l2_policy(
context._plugin_context, context.current['l2_policy_id'])
self._configure_epg_service_contract(
context, context.current, l2p, epg, transaction=trs)
self._configure_epg_implicit_contract(
@ -488,12 +483,6 @@ class ApicMappingDriver(api.ResourceMappingDriver):
if context.current.get('proxied_group_id'):
self._stitch_proxy_ptg_to_l3p(context, l3p)
self._handle_network_service_policy(context)
# Handle redirect action if any
consumed_prs = context.current['consumed_policy_rule_sets']
provided_prs = context.current['provided_policy_rule_sets']
if provided_prs and not context.current.get('proxied_group_id'):
policy_rule_sets = (consumed_prs + provided_prs)
self._handle_redirect_action(context, policy_rule_sets)
self._manage_ptg_policy_rule_sets(
context, context.current['provided_policy_rule_sets'],
@ -709,22 +698,10 @@ class ApicMappingDriver(api.ResourceMappingDriver):
self._cleanup_router(context._plugin_context, router_id)
def update_policy_rule_set_precommit(self, context):
self._reject_apic_name_change(context)
if not self.name_mapper._is_apic_reference(context.current):
self._reject_shared_update(context, 'policy_rule_set')
self._reject_multiple_redirects_in_prs(context)
if context.current['child_policy_rule_sets']:
raise HierarchicalContractsNotSupported()
# If a redirect action is added (from 0 to one) we have to validate
# the providing and consuming PTGs
old_red_count = self._multiple_pr_redirect_action_number(
context._plugin_context.session,
context.original['policy_rules'])
new_red_count = self._multiple_pr_redirect_action_number(
context._plugin_context.session,
context.current['policy_rules'])
if new_red_count > old_red_count:
self._validate_new_prs_redirect(context, context.current)
def update_policy_rule_set_postcommit(self, context):
# Update policy_rule_set rules
@ -738,8 +715,6 @@ class ApicMappingDriver(api.ResourceMappingDriver):
to_remove)
self._apply_policy_rule_set_rules(context, context.current, to_add)
self._handle_redirect_action(context, [context.current['id']])
def update_policy_target_precommit(self, context):
if (context.original['policy_target_group_id'] !=
context.current['policy_target_group_id']):
@ -755,24 +730,14 @@ class ApicMappingDriver(api.ResourceMappingDriver):
context.current['port_id'])
def update_policy_rule_precommit(self, context):
self._validate_one_action_per_pr(context)
old_redirect = self._get_redirect_action(context, context.original)
new_redirect = self._get_redirect_action(context, context.current)
if not old_redirect and new_redirect:
# If redirect action is added, check that there's no contract that
# already has a redirect action
for prs in context._plugin.get_policy_rule_sets(
context._plugin_context,
{'id': context.current['policy_rule_sets']}):
# Make sure the PRS can have a new redirect action
self._validate_new_prs_redirect(context, prs)
pass
def update_policy_rule_postcommit(self, context):
self._update_policy_rule_on_apic(context)
super(ApicMappingDriver, self).update_policy_rule_postcommit(context)
def update_policy_action_postcommit(self, context):
self._handle_redirect_spec_id_update(context)
pass
def _update_policy_rule_on_apic(self, context):
self._delete_policy_rule_from_apic(context, transaction=None)
@ -780,20 +745,12 @@ class ApicMappingDriver(api.ResourceMappingDriver):
self.create_policy_rule_postcommit(context, transaction=None)
def update_policy_target_group_precommit(self, context):
self._reject_apic_name_change(context)
if not self.name_mapper._is_apic_reference(context.current):
if set(context.original['subnets']) != set(
context.current['subnets']):
context.current['subnets']):
raise ExplicitSubnetAssociationNotSupported()
self._reject_shared_update(context, 'policy_target_group')
if set(context.original['subnets']) != set(
context.current['subnets']):
raise ExplicitSubnetAssociationNotSupported()
self._reject_shared_update(context, 'policy_target_group')
self._validate_ptg_prss(context, context.current)
self._stash_ptg_modified_chains(context)
def update_policy_target_group_postcommit(self, context):
if not self.name_mapper._is_apic_reference(context.current):
# TODO(ivar): refactor parent to avoid code duplication
@ -820,14 +777,6 @@ class ApicMappingDriver(api.ResourceMappingDriver):
curr_consumed_policy_rule_sets))
self._handle_nsp_update_on_ptg(context)
self._cleanup_redirect_action(context)
if self._is_redirect_in_policy_rule_sets(
context, new_provided_policy_rule_sets) and not (
context.current.get('proxied_group_id')):
policy_rule_sets = (curr_consumed_policy_rule_sets +
curr_provided_policy_rule_sets)
self._handle_redirect_action(context, policy_rule_sets)
self._manage_ptg_policy_rule_sets(
context, new_provided_policy_rule_sets,
@ -935,7 +884,6 @@ class ApicMappingDriver(api.ResourceMappingDriver):
self._remove_policy_rule_set_rules(
context, prs, [(rule, context.original)])
self._apply_policy_rule_set_rules(context, prs, [rule])
self._handle_classifier_update_notification(context)
def create_external_segment_precommit(self, context):
if context.current['port_address_translation']:
@ -1979,27 +1927,6 @@ class ApicMappingDriver(api.ResourceMappingDriver):
tenant, epg_name, contract, provider=True,
contract_owner=contract_owner, transaction=trs)
def _get_redirect_action(self, context, policy_rule):
for action in context._plugin.get_policy_actions(
context._plugin_context,
filters={'id': policy_rule['policy_actions']}):
if action['action_type'] == g_const.GP_ACTION_REDIRECT:
return action
def _multiple_pr_redirect_action_number(self, session, pr_ids):
# Given a set of rules, gives the total number of redirect actions
# found
if len(pr_ids) == 0:
# No result will be found in this case
return 0
return (session.query(gpdb.gpdb.PolicyAction).
join(gpdb.gpdb.PolicyRuleActionAssociation).
filter(
gpdb.gpdb.PolicyRuleActionAssociation.policy_rule_id.in_(
pr_ids)).
filter(gpdb.gpdb.PolicyAction.action_type ==
g_const.GP_ACTION_REDIRECT)).count()
def _check_es_subnet(self, context, es):
if es['subnet_id']:
subnet = self._get_subnet(context._plugin_context,
@ -2303,11 +2230,6 @@ class ApicMappingDriver(api.ResourceMappingDriver):
for port in ports:
self._notify_port_update(plugin_context, port['id'])
def _validate_one_action_per_pr(self, context):
if ('policy_actions' in context.current and
len(context.current['policy_actions']) != 1):
raise ExactlyOneActionPerRuleIsSupportedOnApicDriver()
def _create_any_contract(self, origin_ptg_id, transaction=None):
tenant = apic_manager.TENANT_COMMON
contract = ANY_PREFIX + origin_ptg_id

View File

@ -0,0 +1,134 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.db import model_base
import sqlalchemy as sa
class ServicePolicyPTGIpAddressMapping(model_base.BASEV2):
"""Service Policy to IP Address mapping DB."""
__tablename__ = 'gpm_service_policy_ipaddress_mappings'
service_policy_id = sa.Column(
sa.String(36), sa.ForeignKey('gp_network_service_policies.id'),
nullable=False, primary_key=True)
policy_target_group = sa.Column(
sa.String(36), sa.ForeignKey('gp_policy_target_groups.id'),
nullable=False, primary_key=True)
ipaddress = sa.Column(sa.String(36))
class ServicePolicyPTGFipMapping(model_base.BASEV2):
"""Service Policy to FIP Address mapping DB."""
__tablename__ = 'gpm_service_policy_fip_mappings'
service_policy_id = sa.Column(
sa.String(36), sa.ForeignKey('gp_network_service_policies.id',
ondelete='CASCADE'),
nullable=False, primary_key=True)
policy_target_group_id = sa.Column(
sa.String(36), sa.ForeignKey('gp_policy_target_groups.id',
ondelete='CASCADE'),
nullable=False, primary_key=True)
floatingip_id = sa.Column(sa.String(36),
sa.ForeignKey('floatingips.id',
ondelete='CASCADE'),
nullable=False,
primary_key=True)
class PolicyTargetFloatingIPMapping(model_base.BASEV2):
"""Mapping of PolicyTarget to Floating IP."""
__tablename__ = 'gpm_pt_floatingip_mappings'
policy_target_id = sa.Column(
sa.String(36), sa.ForeignKey('gp_policy_targets.id',
ondelete='CASCADE'),
nullable=False, primary_key=True)
floatingip_id = sa.Column(sa.String(36),
sa.ForeignKey('floatingips.id',
ondelete='CASCADE'),
nullable=False,
primary_key=True)
class NetworkServicePolicyMappingMixin(object):
def _set_policy_ipaddress_mapping(self, session, service_policy_id,
policy_target_group, ipaddress):
with session.begin(subtransactions=True):
mapping = ServicePolicyPTGIpAddressMapping(
service_policy_id=service_policy_id,
policy_target_group=policy_target_group, ipaddress=ipaddress)
session.add(mapping)
def _get_ptg_policy_ipaddress_mapping(self, session, policy_target_group):
with session.begin(subtransactions=True):
return (session.query(ServicePolicyPTGIpAddressMapping).
filter_by(policy_target_group=policy_target_group).first())
def _delete_policy_ipaddress_mapping(self, session, policy_target_group):
with session.begin(subtransactions=True):
ip_mapping = session.query(
ServicePolicyPTGIpAddressMapping).filter_by(
policy_target_group=policy_target_group).first()
if ip_mapping:
session.delete(ip_mapping)
def _set_ptg_policy_fip_mapping(self, session, service_policy_id,
policy_target_group_id, fip_id):
with session.begin(subtransactions=True):
mapping = ServicePolicyPTGFipMapping(
service_policy_id=service_policy_id,
policy_target_group_id=policy_target_group_id,
floatingip_id=fip_id)
session.add(mapping)
def _get_ptg_policy_fip_mapping(self, session, policy_target_group_id):
with session.begin(subtransactions=True):
return (session.query(ServicePolicyPTGFipMapping).
filter_by(policy_target_group_id=policy_target_group_id).
all())
def _delete_ptg_policy_fip_mapping(self, session, policy_target_group_id):
with session.begin(subtransactions=True):
mappings = session.query(
ServicePolicyPTGFipMapping).filter_by(
policy_target_group_id=policy_target_group_id).all()
for mapping in mappings:
session.delete(mapping)
def _set_pt_floating_ips_mapping(self, session, policy_target_id, fip_ids):
with session.begin(subtransactions=True):
for fip_id in fip_ids:
mapping = PolicyTargetFloatingIPMapping(
policy_target_id=policy_target_id, floatingip_id=fip_id)
session.add(mapping)
def _set_pts_floating_ips_mapping(self, session, pt_fip_map):
with session.begin(subtransactions=True):
for policy_target_id in pt_fip_map:
self._set_pt_floating_ips_mapping(
session, policy_target_id,
pt_fip_map[policy_target_id])
def _get_pt_floating_ip_mapping(self, session, policy_target_id):
with session.begin(subtransactions=True):
return (session.query(PolicyTargetFloatingIPMapping).
filter_by(policy_target_id=policy_target_id).all())
def _delete_pt_floating_ip_mapping(self, session, policy_target_id):
with session.begin(subtransactions=True):
fip_mappings = session.query(
PolicyTargetFloatingIPMapping).filter_by(
policy_target_id=policy_target_id).all()
for fip_mapping in fip_mappings:
session.delete(fip_mapping)

View File

@ -13,8 +13,6 @@
import netaddr
import operator
from keystoneclient import exceptions as k_exceptions
from keystoneclient.v2_0 import client as k_client
from neutron.api.v2 import attributes
from neutron.common import constants as const
from neutron.common import exceptions as n_exc
@ -26,8 +24,6 @@ from neutron.extensions import l3 as ext_l3
from neutron.extensions import securitygroup as ext_sg
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import excutils
import sqlalchemy as sa
from gbpservice.common import utils
@ -40,32 +36,21 @@ from gbpservice.neutron.services.grouppolicy import (
group_policy_driver_api as api)
from gbpservice.neutron.services.grouppolicy.common import constants as gconst
from gbpservice.neutron.services.grouppolicy.common import exceptions as exc
from gbpservice.neutron.services.grouppolicy.drivers import nsp_manager
LOG = logging.getLogger(__name__)
DEFAULT_SG_PREFIX = 'gbp_%s'
SCI_CONSUMER_NOT_AVAILABLE = 'N/A'
group_policy_opts = [
cfg.StrOpt('chain_owner_user',
help=_("Chain owner username. If set, will be used in "
"place of the Neutron service admin for retrieving "
"tenant owner information through Keystone."),
default=''),
cfg.StrOpt('chain_owner_password',
help=_("Chain owner password."), default='',
secret=True),
cfg.StrOpt('chain_owner_tenant_name',
help=_("Name of the Tenant that will own the service chain "
"instances for this driver. Leave empty for provider "
"owned chains."), default=''),
opts = [
cfg.ListOpt('dns_nameservers',
default=[],
help=_("List of DNS nameservers to be configured for the "
"PTG subnets")),
]
cfg.CONF.register_opts(group_policy_opts, "resource_mapping")
cfg.CONF.register_opts(opts, "resource_mapping")
class OwnedPort(model_base.BASEV2):
@ -118,74 +103,13 @@ class PolicyRuleSetSGsMapping(model_base.BASEV2):
sa.ForeignKey('securitygroups.id'))
class PtgServiceChainInstanceMapping(model_base.BASEV2, models_v2.HasTenant):
"""Policy Target Group to ServiceChainInstance mapping DB."""
__tablename__ = 'gpm_ptgs_servicechain_mapping'
provider_ptg_id = sa.Column(sa.String(36),
sa.ForeignKey('gp_policy_target_groups.id',
ondelete='CASCADE'),
nullable=False)
# Consumer PTG could be an External Policy
consumer_ptg_id = sa.Column(sa.String(36), nullable=False)
servicechain_instance_id = sa.Column(sa.String(36),
sa.ForeignKey('sc_instances.id',
ondelete='CASCADE'),
primary_key=True)
class ServicePolicyPTGIpAddressMapping(model_base.BASEV2):
"""Service Policy to IP Address mapping DB."""
__tablename__ = 'gpm_service_policy_ipaddress_mappings'
service_policy_id = sa.Column(
sa.String(36), sa.ForeignKey('gp_network_service_policies.id'),
nullable=False, primary_key=True)
policy_target_group = sa.Column(
sa.String(36), sa.ForeignKey('gp_policy_target_groups.id'),
nullable=False, primary_key=True)
ipaddress = sa.Column(sa.String(36))
class ServicePolicyPTGFipMapping(model_base.BASEV2):
"""Service Policy to FIP Address mapping DB."""
__tablename__ = 'gpm_service_policy_fip_mappings'
service_policy_id = sa.Column(
sa.String(36), sa.ForeignKey('gp_network_service_policies.id',
ondelete='CASCADE'),
nullable=False, primary_key=True)
policy_target_group_id = sa.Column(
sa.String(36), sa.ForeignKey('gp_policy_target_groups.id',
ondelete='CASCADE'),
nullable=False, primary_key=True)
floatingip_id = sa.Column(sa.String(36),
sa.ForeignKey('floatingips.id',
ondelete='CASCADE'),
nullable=False,
primary_key=True)
class PolicyTargetFloatingIPMapping(model_base.BASEV2):
"""Mapping of PolicyTarget to Floating IP."""
__tablename__ = 'gpm_pt_floatingip_mappings'
policy_target_id = sa.Column(
sa.String(36), sa.ForeignKey('gp_policy_targets.id',
ondelete='CASCADE'),
nullable=False, primary_key=True)
floatingip_id = sa.Column(sa.String(36),
sa.ForeignKey('floatingips.id',
ondelete='CASCADE'),
nullable=False,
primary_key=True)
# This exception should never escape the driver.
class CidrInUse(exc.GroupPolicyInternalError):
message = _("CIDR %(cidr)s in-use within L3 policy %(l3p_id)s")
class ResourceMappingDriver(api.PolicyDriver, local_api.LocalAPI):
class ResourceMappingDriver(api.PolicyDriver, local_api.LocalAPI,
nsp_manager.NetworkServicePolicyMappingMixin):
"""Resource Mapping driver for Group Policy plugin.
This driver implements group policy semantics by mapping group
@ -195,41 +119,6 @@ class ResourceMappingDriver(api.PolicyDriver, local_api.LocalAPI):
@log.log
def initialize(self):
self._cached_agent_notifier = None
self.chain_owner = ResourceMappingDriver.chain_tenant_id(reraise=True)
@staticmethod
def chain_tenant_id(reraise=False):
keystone = ResourceMappingDriver.chain_tenant_keystone_client()
if keystone:
tenant = cfg.CONF.resource_mapping.chain_owner_tenant_name
try:
# Can it be retrieved directly, without a further keystone
# call?
tenant = keystone.tenants.find(name=tenant)
return tenant.id
except k_exceptions.NotFound:
with excutils.save_and_reraise_exception(reraise=reraise):
LOG.error(_('No tenant with name %s exists.'), tenant)
except k_exceptions.NoUniqueMatch:
with excutils.save_and_reraise_exception(reraise=reraise):
LOG.error(_('Multiple tenants matches found for %s'),
tenant)
@staticmethod
def chain_tenant_keystone_client():
chain_user = cfg.CONF.resource_mapping.chain_owner_user
user, pwd, tenant, auth_url = utils.get_keystone_creds()
user = (chain_user or user)
pwd = (cfg.CONF.resource_mapping.chain_owner_password or
(pwd if not chain_user else ''))
# Tenant must be configured in the resource_mapping section, provider
# owner will be used otherwise.
tenant = cfg.CONF.resource_mapping.chain_owner_tenant_name
if tenant:
return k_client.Client(username=user, password=pwd,
auth_url=auth_url)
def _reject_shared(self, object, type):
if object.get('shared'):
@ -308,28 +197,6 @@ class ResourceMappingDriver(api.PolicyDriver, local_api.LocalAPI):
router_id=router_id,
tenant_id=context.current['tenant_id'])
def _reject_multiple_redirects_in_rule(self, context):
policy_actions = context._plugin.get_policy_actions(
context._plugin_context,
filters={'id': context.current['policy_actions'],
'action_type': [gconst.GP_ACTION_REDIRECT]})
if len(policy_actions) > 1:
raise exc.MultipleRedirectActionsNotSupportedForRule()
def _reject_multiple_redirects_in_prs(self, context):
policy_rules = context._plugin.get_policy_rules(
context._plugin_context,
filters={'id': context.current['policy_rules']})
redirect_actions_list = []
for policy_rule in policy_rules:
policy_actions = context._plugin.get_policy_actions(
context._plugin_context,
filters={'id': policy_rule['policy_actions'],
'action_type': [gconst.GP_ACTION_REDIRECT]})
redirect_actions_list.extend(policy_actions)
if len(redirect_actions_list) > 1:
raise exc.MultipleRedirectActionsNotSupportedForPRS()
@log.log
def create_policy_target_precommit(self, context):
if not context.current['policy_target_group_id']:
@ -503,9 +370,7 @@ class ResourceMappingDriver(api.PolicyDriver, local_api.LocalAPI):
self._reject_cross_tenant_ptg_l2p(context)
self._validate_ptg_subnets(context)
self._validate_nat_pool_for_nsp(context)
self._validate_ptg_prss(context, context.current)
self._validate_proxy_ptg(context)
self._validate_ptg_prss(context, context.current)
@log.log
def create_policy_target_group_postcommit(self, context):
@ -696,48 +561,10 @@ class ResourceMappingDriver(api.PolicyDriver, local_api.LocalAPI):
raise exc.PolicyTargetGroupSubnetRemovalNotSupported()
self._validate_ptg_subnets(context, context.current['subnets'])
self._validate_ptg_prss(context, context.current)
self._reject_cross_tenant_ptg_l2p(context)
if (context.current['network_service_policy_id'] !=
context.original['network_service_policy_id']):
self._validate_nat_pool_for_nsp(context)
self._stash_ptg_modified_chains(context)
def _stash_ptg_modified_chains(self, context):
#Update service chain instance when any ruleset is changed
orig_provided_policy_rule_sets = context.original[
'provided_policy_rule_sets']
curr_provided_policy_rule_sets = context.current[
'provided_policy_rule_sets']
removed_provided_prs = (set(orig_provided_policy_rule_sets) -
set(curr_provided_policy_rule_sets))
added_provided_prs = (set(curr_provided_policy_rule_sets) -
set(orig_provided_policy_rule_sets))
context.ptg_chain_map = []
# If the Redirect is removed, delete the chain. If the spec is
# changed, then update the existing instance with new spec
if (self._is_redirect_in_policy_rule_sets(
context, removed_provided_prs) and not
self._is_redirect_in_policy_rule_sets(
context, added_provided_prs)):
context.ptg_chain_map += self._get_ptg_servicechain_mapping(
context._plugin_context.session, context.current['id'])
def _is_redirect_in_policy_rule_sets(self, context, policy_rule_sets):
policy_rule_ids = []
for prs in context._plugin.get_policy_rule_sets(
context._plugin_context, filters={'id': policy_rule_sets}):
policy_rule_ids.extend(prs['policy_rules'])
for rule in context._plugin.get_policy_rules(
context._plugin_context, filters={'id': policy_rule_ids}):
redirect_actions = context._plugin.get_policy_actions(
context._plugin_context,
filters={'id': rule["policy_actions"],
'action_type': [gconst.GP_ACTION_REDIRECT]})
if redirect_actions:
return True
return False
@log.log
def update_policy_target_group_postcommit(self, context):
@ -776,16 +603,6 @@ class ResourceMappingDriver(api.PolicyDriver, local_api.LocalAPI):
self._handle_nsp_update_on_ptg(context)
# Only the ones set in context in precommit operation will be deleted
self._cleanup_redirect_action(context)
# If the spec is changed, then update the chain with new spec
# If redirect is newly added, create the chain
if self._is_redirect_in_policy_rule_sets(
context, new_provided_policy_rule_sets):
policy_rule_sets = (curr_consumed_policy_rule_sets +
curr_provided_policy_rule_sets)
self._handle_redirect_action(context, policy_rule_sets)
# if PTG associated policy_rule_sets are updated, we need to update
# the policy rules, then assoicate SGs to ports
if new_provided_policy_rule_sets or new_consumed_policy_rule_sets:
@ -833,10 +650,6 @@ class ResourceMappingDriver(api.PolicyDriver, local_api.LocalAPI):
context._plugin_context.session, context.current['id'])
context.nsp_cleanup_fips = self._get_ptg_policy_fip_mapping(
context._plugin_context.session, context.current['id'])
provider_ptg_chain_map = self._get_ptg_servicechain_mapping(
context._plugin_context.session, context.current['id'])
context.ptg_chain_map = provider_ptg_chain_map
@log.log
def delete_policy_target_group_postcommit(self, context):
@ -844,7 +657,6 @@ class ResourceMappingDriver(api.PolicyDriver, local_api.LocalAPI):
context.current,
context.nsp_cleanup_ipaddress,
context.nsp_cleanup_fips)
self._cleanup_redirect_action(context)
# Cleanup SGs
self._unset_sg_rules_for_subnets(
context, context.current['subnets'],
@ -1013,28 +825,6 @@ class ResourceMappingDriver(api.PolicyDriver, local_api.LocalAPI):
self._update_policy_rule_sg_rules(context, pr_sets,
policy_rule, context.original, context.current)
self._handle_classifier_update_notification(context)
def _handle_classifier_update_notification(self, context):
# Invoke Service chain update notify hook if protocol or port or
# direction is updated. The SC side will have to reclassify the chain
# and update the traffic steering programming
if (context.original['port_range'] != context.current['port_range'] or
context.original['protocol'] != context.current['protocol'] or
context.original['direction'] != context.current['direction']):
sc_instances = (
self._servicechain_plugin.get_servicechain_instances(
context._plugin_context.elevated(),
filters={'classifier_id': [context.current['id']]}))
for sc_instance in sc_instances:
cmap = self._get_ptg_servicechain_mapping(
context._plugin_context.session,
servicechain_instance_id=sc_instance['id'])
ctx = self._get_chain_admin_context(context._plugin_context,
cmap[0].tenant_id)
self._servicechain_plugin.notify_chain_parameters_updated(
ctx, sc_instance['id'])
@log.log
def delete_policy_classifier_precommit(self, context):
pass
@ -1045,13 +835,7 @@ class ResourceMappingDriver(api.PolicyDriver, local_api.LocalAPI):
@log.log
def create_policy_action_precommit(self, context):
spec_id = context.current['action_value']
if spec_id:
specs = self._get_servicechain_specs(
context._plugin_context, filters={'id': [spec_id]})
for spec in specs:
if not spec.get('shared', False):
self._reject_shared(context.current, 'policy_action')
pass
@log.log
def create_policy_action_postcommit(self, context):
@ -1063,8 +847,7 @@ class ResourceMappingDriver(api.PolicyDriver, local_api.LocalAPI):
@log.log
def update_policy_action_postcommit(self, context):
# TODO(ivar): Should affect related SGs
self._handle_redirect_spec_id_update(context)
pass
@log.log
def delete_policy_action_precommit(self, context):
@ -1076,7 +859,7 @@ class ResourceMappingDriver(api.PolicyDriver, local_api.LocalAPI):
@log.log
def create_policy_rule_precommit(self, context):
self._reject_multiple_redirects_in_rule(context)
pass
@log.log
def create_policy_rule_postcommit(self, context):
@ -1084,14 +867,7 @@ class ResourceMappingDriver(api.PolicyDriver, local_api.LocalAPI):
@log.log
def update_policy_rule_precommit(self, context):
self._reject_multiple_redirects_in_rule(context)
old_redirect = self._get_redirect_action(context, context.original)
new_redirect = self._get_redirect_action(context, context.current)
if not old_redirect and new_redirect:
for prs in context._plugin.get_policy_rule_sets(
context._plugin_context,
{'id': context.current['policy_rule_sets']}):
self._validate_new_prs_redirect(context, prs)
pass
@log.log
def update_policy_rule_postcommit(self, context):
@ -1111,17 +887,6 @@ class ResourceMappingDriver(api.PolicyDriver, local_api.LocalAPI):
self._apply_policy_rule_set_rules(context, prs,
[context.current])
old_redirect_policy_actions = context._plugin.get_policy_actions(
context._plugin_context,
filters={'id': context.original['policy_actions'],
'action_type': [gconst.GP_ACTION_REDIRECT]})
new_redirect_policy_actions = context._plugin.get_policy_actions(
context._plugin_context,
filters={'id': context.current['policy_actions'],
'action_type': [gconst.GP_ACTION_REDIRECT]})
if old_redirect_policy_actions or new_redirect_policy_actions:
self._handle_redirect_action(context, policy_rule_sets)
@log.log
def delete_policy_rule_precommit(self, context):
# REVISIT(ivar): This will be removed once navigability issue is
@ -1140,7 +905,6 @@ class ResourceMappingDriver(api.PolicyDriver, local_api.LocalAPI):
@log.log
def create_policy_rule_set_precommit(self, context):
self._reject_shared(context.current, 'policy_rule_set')
self._reject_multiple_redirects_in_prs(context)
@log.log
def create_policy_rule_set_postcommit(self, context):
@ -1160,22 +924,10 @@ class ResourceMappingDriver(api.PolicyDriver, local_api.LocalAPI):
if context.current['child_policy_rule_sets']:
self._recompute_policy_rule_sets(
context, context.current['child_policy_rule_sets'])
self._handle_redirect_action(
context, context.current['child_policy_rule_sets'])
@log.log
def update_policy_rule_set_precommit(self, context):
self._reject_shared(context.current, 'policy_rule_set')
self._reject_multiple_redirects_in_prs(context)
old_red_count = self._multiple_pr_redirect_action_number(
context._plugin_context.session,
context.original['policy_rules'])
new_red_count = self._multiple_pr_redirect_action_number(
context._plugin_context.session,
context.current['policy_rules'])
if new_red_count > old_red_count:
self._validate_new_prs_redirect(context, context.current)
@log.log
def update_policy_rule_set_postcommit(self, context):
@ -1196,17 +948,6 @@ class ResourceMappingDriver(api.PolicyDriver, local_api.LocalAPI):
to_recompute = (set(context.original['child_policy_rule_sets']) &
set(context.current['child_policy_rule_sets']))
self._recompute_policy_rule_sets(context, to_recompute)
# Handle any Redirects from the current Policy Rule Set
self._handle_redirect_action(context, [context.current['id']])
# Handle Update/Delete of Redirects for any child Rule Sets
if (set(context.original['child_policy_rule_sets']) !=
set(context.current['child_policy_rule_sets'])):
if context.original['child_policy_rule_sets']:
self._handle_redirect_action(
context, context.original['child_policy_rule_sets'])
if context.current['child_policy_rule_sets']:
self._handle_redirect_action(
context, context.current['child_policy_rule_sets'])
@log.log
def delete_policy_rule_set_precommit(self, context):
@ -1229,9 +970,6 @@ class ResourceMappingDriver(api.PolicyDriver, local_api.LocalAPI):
# Delete SGs
for sg in sg_list:
self._delete_sg(context._plugin_context, sg)
if context.current['child_policy_rule_sets']:
self._handle_redirect_action(
context, context.current['child_policy_rule_sets'])
@log.log
def create_network_service_policy_precommit(self, context):
@ -1974,10 +1712,6 @@ class ResourceMappingDriver(api.PolicyDriver, local_api.LocalAPI):
'provided_policy_rule_sets']
subnets = context.current['subnets']
ptg_id = context.current['id']
if provided_policy_rule_sets:
policy_rule_sets = (
consumed_policy_rule_sets + provided_policy_rule_sets)
self._handle_redirect_action(context, policy_rule_sets)
self._set_sg_rules_for_subnets(context, subnets,
provided_policy_rule_sets,
consumed_policy_rule_sets)
@ -2008,220 +1742,12 @@ class ResourceMappingDriver(api.PolicyDriver, local_api.LocalAPI):
context, policy_rule, policy_rule_set_sg_mappings,
cidr_mapping, classifier=new_classifier)
def _set_policy_ipaddress_mapping(self, session, service_policy_id,
policy_target_group, ipaddress):
with session.begin(subtransactions=True):
mapping = ServicePolicyPTGIpAddressMapping(
service_policy_id=service_policy_id,
policy_target_group=policy_target_group, ipaddress=ipaddress)
session.add(mapping)
def _get_ptg_policy_ipaddress_mapping(self, session, policy_target_group):
with session.begin(subtransactions=True):
return (session.query(ServicePolicyPTGIpAddressMapping).
filter_by(policy_target_group=policy_target_group).first())
def _delete_policy_ipaddress_mapping(self, session, policy_target_group):
with session.begin(subtransactions=True):
ip_mapping = session.query(
ServicePolicyPTGIpAddressMapping).filter_by(
policy_target_group=policy_target_group).first()
if ip_mapping:
session.delete(ip_mapping)
def _set_ptg_policy_fip_mapping(self, session, service_policy_id,
policy_target_group_id, fip_id):
with session.begin(subtransactions=True):
mapping = ServicePolicyPTGFipMapping(
service_policy_id=service_policy_id,
policy_target_group_id=policy_target_group_id,
floatingip_id=fip_id)
session.add(mapping)
def _get_ptg_policy_fip_mapping(self, session, policy_target_group_id):
with session.begin(subtransactions=True):
return (session.query(ServicePolicyPTGFipMapping).
filter_by(policy_target_group_id=policy_target_group_id).
all())
def _delete_ptg_policy_fip_mapping(self, session, policy_target_group_id):
with session.begin(subtransactions=True):
mappings = session.query(
ServicePolicyPTGFipMapping).filter_by(
policy_target_group_id=policy_target_group_id).all()
for mapping in mappings:
session.delete(mapping)
def _set_pt_floating_ips_mapping(self, session, policy_target_id, fip_ids):
with session.begin(subtransactions=True):
for fip_id in fip_ids:
mapping = PolicyTargetFloatingIPMapping(
policy_target_id=policy_target_id, floatingip_id=fip_id)
session.add(mapping)
def _set_pts_floating_ips_mapping(self, session, pt_fip_map):
with session.begin(subtransactions=True):
for policy_target_id in pt_fip_map:
self._set_pt_floating_ips_mapping(
session, policy_target_id,
pt_fip_map[policy_target_id])
def _get_pt_floating_ip_mapping(self, session, policy_target_id):
with session.begin(subtransactions=True):
return (session.query(PolicyTargetFloatingIPMapping).
filter_by(policy_target_id=policy_target_id).all())
def _delete_pt_floating_ip_mapping(self, session, policy_target_id):
with session.begin(subtransactions=True):
fip_mappings = session.query(
PolicyTargetFloatingIPMapping).filter_by(
policy_target_id=policy_target_id).all()
for fip_mapping in fip_mappings:
session.delete(fip_mapping)
def _handle_redirect_spec_id_update(self, context):
if (context.current['action_type'] != gconst.GP_ACTION_REDIRECT
or context.current['action_value'] ==
context.original['action_value']):
return
spec = self._servicechain_plugin._get_servicechain_spec(
context._plugin_context, context.original['action_value'])
for servicechain_instance in spec.instances:
sc_instance_id = servicechain_instance.servicechain_instance_id
sc_instance = self._servicechain_plugin.get_servicechain_instance(
context._plugin_context, sc_instance_id)
old_specs = sc_instance['servicechain_specs']
# Use the parent/child redirect spec as it is. Only replace the
# current one
new_specs = [context.current['action_value'] if
x == context.original['action_value'] else
x for x in old_specs]
self._update_servicechain_instance(
context._plugin_context,
servicechain_instance.servicechain_instance_id,
sc_specs=new_specs)
def _update_servicechain_instance(self, plugin_context, sc_instance_id,
classifier_id=None, sc_specs=None):
sc_instance_update_data = {}
if sc_specs:
sc_instance_update_data.update({'servicechain_specs': sc_specs})
if classifier_id:
sc_instance_update_data.update({'classifier_id': classifier_id})
super(ResourceMappingDriver, self)._update_servicechain_instance(
self._get_chain_admin_context(
plugin_context, instance_id=sc_instance_id),
sc_instance_id, sc_instance_update_data)
def _get_rule_ids_for_actions(self, context, action_id):
policy_rule_qry = context.session.query(
gpdb.PolicyRuleActionAssociation.policy_rule_id)
policy_rule_qry.filter_by(policy_action_id=action_id)
return policy_rule_qry.all()
# This method would either update an existing chain instance, or creates a
# new chain instance or delete the existing instance. In case of updates,
# the parameters that can be updated are service chain spec and
# classifier ID.
def _handle_redirect_action(self, context, policy_rule_set_ids):
policy_rule_sets = context._plugin.get_policy_rule_sets(
context._plugin_context,
filters={'id': policy_rule_set_ids})
for policy_rule_set in policy_rule_sets:
ptgs_providing_prs = policy_rule_set[
'providing_policy_target_groups']
# Create the ServiceChain Instance when we have both Provider and
# consumer PTGs. If Labels are available, they have to be applied
if not ptgs_providing_prs:
continue
ptgs_providing_prs = context._plugin.get_policy_target_groups(
context._plugin_context.elevated(), {'id': ptgs_providing_prs})
parent_classifier_id = None
parent_spec_id = None
if policy_rule_set['parent_id']:
parent = context._plugin.get_policy_rule_set(
context._plugin_context, policy_rule_set['parent_id'])
policy_rules = context._plugin.get_policy_rules(
context._plugin_context,
filters={'id': parent['policy_rules']})
for policy_rule in policy_rules:
policy_actions = context._plugin.get_policy_actions(
context._plugin_context,
filters={'id': policy_rule["policy_actions"],
'action_type': [gconst.GP_ACTION_REDIRECT]})
if policy_actions:
parent_spec_id = policy_actions[0].get("action_value")
parent_classifier_id = policy_rule.get(
"policy_classifier_id")
break # only one redirect action is supported
policy_rules = context._plugin.get_policy_rules(
context._plugin_context,
filters={'id': policy_rule_set['policy_rules']})
for policy_rule in policy_rules:
hierarchial_classifier_mismatch = False
classifier_id = policy_rule.get("policy_classifier_id")
if parent_classifier_id and (parent_classifier_id !=
classifier_id):
hierarchial_classifier_mismatch = True
policy_actions = context._plugin.get_policy_actions(
context._plugin_context,
filters={'id': policy_rule.get("policy_actions"),
'action_type': [gconst.GP_ACTION_REDIRECT]})
# Only one Redirect action per PRS. The chain may belong to
# another PRS in which case the chain should not be deleted
if (self._is_redirect_in_policy_rule_sets(
context, policy_rule_set_ids) and not policy_actions):
continue
spec_id = (policy_actions and policy_actions[0]['action_value']
or None)
for ptg_providing_prs in ptgs_providing_prs:
# REVISIT(Magesh): There may be concurrency issues here
if not ptg_providing_prs.get('proxied_group_id'):
self._create_or_update_chain(
context, ptg_providing_prs['id'],
SCI_CONSUMER_NOT_AVAILABLE, spec_id,
parent_spec_id, classifier_id,
hierarchial_classifier_mismatch,
policy_rule_set)
def _create_or_update_chain(self, context, provider, consumer, spec_id,
parent_spec_id, classifier_id,
hierarchial_classifier_mismatch, prs_id):
ptg_chain_map = self._get_ptg_servicechain_mapping(
context._plugin_context.session, provider)
if ptg_chain_map:
if hierarchial_classifier_mismatch or not spec_id:
ctx = self._get_chain_admin_context(
context._plugin_context,
tenant_id=ptg_chain_map[0].tenant_id)
self._delete_servicechain_instance(
ctx, ptg_chain_map[0].servicechain_instance_id)
else:
sc_specs = [spec_id]
if parent_spec_id:
sc_specs.insert(0, parent_spec_id)
# One Chain between a unique pair of provider and consumer
self._update_servicechain_instance(
context._plugin_context,
ptg_chain_map[0].servicechain_instance_id,
classifier_id=classifier_id,
sc_specs=sc_specs)
elif spec_id and not hierarchial_classifier_mismatch:
self._create_servicechain_instance(
context, spec_id, parent_spec_id, provider,
SCI_CONSUMER_NOT_AVAILABLE, classifier_id, prs_id)
def _cleanup_redirect_action(self, context):
for ptg_chain in context.ptg_chain_map:
ctx = self._get_chain_admin_context(context._plugin_context,
tenant_id=ptg_chain.tenant_id)
self._delete_servicechain_instance(
ctx, ptg_chain.servicechain_instance_id)
def _restore_ip_to_allocation_pool(self, context, subnet_id, ip_address):
# TODO(Magesh):Pass subnets and loop on subnets. Better to add logic
# to Merge the pools together after Fragmentation
@ -2269,67 +1795,6 @@ class ResourceMappingDriver(api.PolicyDriver, local_api.LocalAPI):
ip_address = ip_range['last_ip']
return ip_address
def _create_servicechain_instance(self, context, servicechain_spec,
parent_servicechain_spec,
provider_ptg_id, consumer_ptg_id,
classifier_id, policy_rule_set,
config_params=None):
sc_spec = [servicechain_spec]
if parent_servicechain_spec:
sc_spec.insert(0, parent_servicechain_spec)
config_param_values = {}
provider_ptg = context._plugin.get_policy_target_group(
utils.admin_context(context._plugin_context), provider_ptg_id)
p_ctx = self._get_chain_admin_context(
context._plugin_context,
provider_tenant_id=provider_ptg['tenant_id'])
session = context._plugin_context.session
network_service_policy_id = provider_ptg.get(
"network_service_policy_id")
if network_service_policy_id:
nsp = context._plugin.get_network_service_policy(
p_ctx, network_service_policy_id)
service_params = nsp.get("network_service_params")
for service_parameter in service_params:
param_type = service_parameter.get("type")
param_value = service_parameter.get("value")
if param_type == "ip_single" and param_value == "self_subnet":
key = service_parameter.get("name")
servicepolicy_ptg_ip_map = (
self._get_ptg_policy_ipaddress_mapping(
session, provider_ptg_id))
servicepolicy_ip = servicepolicy_ptg_ip_map.get(
"ipaddress")
config_param_values[key] = servicepolicy_ip
elif param_type == "ip_single" and param_value == "nat_pool":
key = service_parameter.get("name")
fip_maps = (
self._get_ptg_policy_fip_mapping(
context._plugin_context.session,
provider_ptg_id))
servicepolicy_fip_ids = []
for fip_map in fip_maps:
servicepolicy_fip_ids.append(fip_map.floatingip_id)
config_param_values[key] = servicepolicy_fip_ids
name = 'gbp_%s_%s' % (policy_rule_set['name'], provider_ptg['name'])
attrs = {'tenant_id': p_ctx.tenant,
'name': name,
'description': "",
'servicechain_specs': sc_spec,
'provider_ptg_id': provider_ptg_id,
'consumer_ptg_id': SCI_CONSUMER_NOT_AVAILABLE,
'management_ptg_id': None,
'classifier_id': classifier_id,
'config_param_values': jsonutils.dumps(config_param_values)}
sc_instance = super(
ResourceMappingDriver, self)._create_servicechain_instance(
p_ctx, attrs)
self._set_ptg_servicechain_instance_mapping(
session, provider_ptg_id, SCI_CONSUMER_NOT_AVAILABLE,
sc_instance['id'], p_ctx.tenant)
return sc_instance
# Do Not Pass floating_ip_address to this method until after Kilo Release
def _create_floatingip(self, plugin_context, tenant_id, ext_net_id,
internal_port_id=None, floating_ip_address=None,
@ -2750,14 +2215,6 @@ class ResourceMappingDriver(api.PolicyDriver, local_api.LocalAPI):
if sg_id:
self._delete_sg(plugin_context, sg_id)
def _get_ptgs_by_id(self, context, ids):
if ids:
filters = {'id': ids}
return context._plugin.get_policy_target_groups(
context._plugin_context, filters)
else:
return []
def _get_ptg_cidrs(self, context, ptgs, ptg_dicts=None):
cidrs = []
if ptg_dicts:
@ -2795,40 +2252,6 @@ class ResourceMappingDriver(api.PolicyDriver, local_api.LocalAPI):
context, consuming_ptgs) + self._get_ep_cidrs(context,
consuming_eps)}
def _set_ptg_servicechain_instance_mapping(self, session, provider_ptg_id,
consumer_ptg_id,
servicechain_instance_id,
provider_tenant_id):
with session.begin(subtransactions=True):
mapping = PtgServiceChainInstanceMapping(
provider_ptg_id=provider_ptg_id,
consumer_ptg_id=consumer_ptg_id,
servicechain_instance_id=servicechain_instance_id,
tenant_id=provider_tenant_id)
session.add(mapping)
def _get_ptg_servicechain_mapping(self, session, provider_ptg_id=None,
consumer_ptg_id=None, tenant_id=None,
servicechain_instance_id=None):
with session.begin(subtransactions=True):
query = session.query(PtgServiceChainInstanceMapping)
if provider_ptg_id:
query = query.filter_by(provider_ptg_id=provider_ptg_id)
if consumer_ptg_id:
query = query.filter_by(consumer_ptg_id=consumer_ptg_id)
if servicechain_instance_id:
query = query.filter_by(
servicechain_instance_id=servicechain_instance_id)
if tenant_id:
query = query.filter_by(consumer_ptg_id=tenant_id)
all = query.all()
return [utils.DictClass([('provider_ptg_id', x.provider_ptg_id),
('consumer_ptg_id', x.consumer_ptg_id),
('servicechain_instance_id',
x.servicechain_instance_id),
('tenant_id', x.tenant_id)])
for x in all]
def _get_ep_cidr_list(self, context, ep):
es_list = context._plugin.get_external_segments(
context._plugin_context,
@ -3033,28 +2456,6 @@ class ResourceMappingDriver(api.PolicyDriver, local_api.LocalAPI):
port_subnet_id=port_subnet_id,
policy_target_group_id=ptg_id)
def _get_chain_admin_context(self, plugin_context, tenant_id=None,
provider_tenant_id=None, instance_id=None):
ctx = plugin_context.elevated()
# REVISIT(Ivar): Any particular implication when a provider owned PT
# exist in the consumer PTG? Especially when the consumer PTG belongs
# to another tenant? We may want to consider a strong convention
# for reference plumbers to absolutely avoid this kind of inter tenant
# object creation when the owner is the provider (in which case, the
# context can as well be a normal context without admin capabilities).
ctx.tenant_id = None
if instance_id:
cmap = self._get_ptg_servicechain_mapping(
ctx.session, servicechain_instance_id=instance_id)
if cmap:
ctx.tenant_id = cmap[0].tenant_id
if not ctx.tenant_id:
ctx.tenant_id = tenant_id or self.chain_owner or provider_tenant_id
if self.chain_owner == ctx.tenant_id:
ctx.auth_token = self.chain_tenant_keystone_client().get_token(
self.chain_owner)
return ctx
def _get_ptg_l3p(self, context, ptg):
l3p_id = context._plugin.get_l2_policy(
context._plugin_context, ptg['l2_policy_id'])['l3_policy_id']
@ -3116,54 +2517,3 @@ class ResourceMappingDriver(api.PolicyDriver, local_api.LocalAPI):
def _unset_proxy_gateway_routes(self, context, pt):
self._update_proxy_gateway_routes(context, pt, unset=True)
def _validate_ptg_prss(self, context, ptg):
# If the PTG is providing a redirect PRS, it can't provide any more
# redirect rules
if self._prss_redirect_rules(context._plugin_context.session,
ptg['provided_policy_rule_sets']) > 1:
raise exc.PTGAlreadyProvidingRedirectPRS(ptg_id=ptg['id'])
def _validate_new_prs_redirect(self, context, prs):
if self._prss_redirect_rules(context._plugin_context.session,
[prs['id']]) > 1:
raise exc.MultipleRedirectActionsNotSupportedForPRS()
for ptg in context._plugin.get_policy_target_groups(
context._plugin_context,
{'id': prs['providing_policy_target_groups']}):
self._validate_ptg_prss(context, ptg)
def _prss_redirect_rules(self, session, prs_ids):
if len(prs_ids) == 0:
# No result will be found in this case
return 0
query = (session.query(gpdb.PolicyAction).
join(gpdb.PolicyRuleActionAssociation).
join(gpdb.PolicyRule).
join(gpdb.PRSToPRAssociation).
filter(
gpdb.PRSToPRAssociation.policy_rule_set_id.in_(prs_ids)).
filter(gpdb.PolicyAction.action_type ==
gconst.GP_ACTION_REDIRECT))
return query.count()
def _multiple_pr_redirect_action_number(self, session, pr_ids):
# Given a set of rules, gives the total number of redirect actions
# found
if len(pr_ids) == 0:
# No result will be found in this case
return 0
return (session.query(gpdb.PolicyAction).
join(gpdb.PolicyRuleActionAssociation).
filter(
gpdb.PolicyRuleActionAssociation.policy_rule_id.in_(
pr_ids)).
filter(gpdb.PolicyAction.action_type ==
gconst.GP_ACTION_REDIRECT)).count()
def _get_redirect_action(self, context, policy_rule):
for action in context._plugin.get_policy_actions(
context._plugin_context,
filters={'id': policy_rule['policy_actions']}):
if action['action_type'] == gconst.GP_ACTION_REDIRECT:
return action

View File

@ -102,7 +102,7 @@ class ApicMappingTestCase(
vm.name = 'someid'
nova_client.return_value = vm
super(ApicMappingTestCase, self).setUp(
policy_drivers=['implicit_policy', 'apic'],
policy_drivers=['implicit_policy', 'apic', 'chain_mapping'],
core_plugin=test_plugin.PLUGIN_NAME,
ml2_options=ml2_opts, sc_plugin=sc_plugin)
engine = db_api.get_engine()
@ -1388,16 +1388,6 @@ class TestPolicyRule(ApicMappingTestCase):
def test_policy_rule_created_on_apic_shared(self):
self._test_policy_rule_created_on_apic(shared=True)
def test_policy_rule_many_actions_rejected(self):
actions = [self.create_policy_action(
action_type='allow')['policy_action']['id'] for x in range(2)]
cls = self.create_policy_classifier(direction='in', protocol='udp',
port_range=80)['policy_classifier']
self.create_policy_rule(policy_classifier_id=cls['id'],
expected_res_status=400,
policy_actions=actions)
def _test_policy_rule_deleted_on_apic(self, shared=False):
pr = self._create_simple_policy_rule(shared=shared)
req = self.new_delete_request('policy_rules', pr['id'], self.fmt)

View File

@ -65,7 +65,7 @@ class NuageGBPDriverTestCase(test_gp_plugin.GroupPolicyPluginTestCase):
def setUp(self):
config.cfg.CONF.set_override('policy_drivers',
['implicit_policy', 'resource_mapping',
'nuage_gbp_driver'],
'nuage_gbp_driver', 'chain_mapping'],
group='group_policy')
ml2_opts = {
'mechanism_drivers': ['nuage_gbp'],

View File

@ -54,7 +54,8 @@ class OneConvergenceGBPDriverTestCase(
test_resource_mapping.ResourceMappingTestCase):
def setUp(self):
policy_drivers = ['implicit_policy', 'oneconvergence_gbp_driver']
policy_drivers = ['implicit_policy', 'oneconvergence_gbp_driver',
'chain_mapping']
with mock.patch.object(
api, 'NVSDServiceApi',
new=MockNVSDApiClient) as self.mockNVSDApi:

View File

@ -38,6 +38,8 @@ from gbpservice.neutron.db.grouppolicy import group_policy_db as gpdb
from gbpservice.neutron.db import servicechain_db
from gbpservice.neutron.services.grouppolicy.common import constants as gconst
from gbpservice.neutron.services.grouppolicy import config
from gbpservice.neutron.services.grouppolicy.drivers import chain_mapping
from gbpservice.neutron.services.grouppolicy.drivers import nsp_manager
from gbpservice.neutron.services.grouppolicy.drivers import resource_mapping
from gbpservice.neutron.services.servicechain.plugins.msc import (
config as sc_cfg)
@ -69,7 +71,8 @@ class ResourceMappingTestCase(test_plugin.GroupPolicyPluginTestCase):
core_plugin=n_test_plugin.PLUGIN_NAME, ml2_options=None,
sc_plugin=None):
policy_drivers = policy_drivers or ['implicit_policy',
'resource_mapping']
'resource_mapping',
'chain_mapping']
config.cfg.CONF.set_override('policy_drivers',
policy_drivers,
group='group_policy')
@ -381,7 +384,7 @@ class ResourceMappingTestCase(test_plugin.GroupPolicyPluginTestCase):
ctx = nctx.get_admin_context()
with ctx.session.begin(subtransactions=True):
return (ctx.session.query(
resource_mapping.ServicePolicyPTGFipMapping).
nsp_manager.ServicePolicyPTGFipMapping).
filter_by(policy_target_group_id=ptg_id).
all())
@ -2080,40 +2083,40 @@ class TestServiceChain(ResourceMappingTestCase):
self.assertEqual('http://127.0.0.1:35357/v2.0/', uri)
def test_chain_tenant_keystone_client(self):
resource_mapping.k_client = mock.Mock()
chain_mapping.k_client = mock.Mock()
self._override_keystone_creds(
'key_user', 'key_password', 'key_tenant_name',
'http://127.0.0.1:35357/v2.0/')
config.cfg.CONF.set_override(
'chain_owner_tenant_name', 'chain_owner',
group='resource_mapping')
resource_mapping.ResourceMappingDriver.chain_tenant_keystone_client()
resource_mapping.k_client.Client.assert_called_once_with(
group='chain_mapping')
chain_mapping.ChainMappingDriver.chain_tenant_keystone_client()
chain_mapping.k_client.Client.assert_called_once_with(
username='key_user', password='key_password',
auth_url='http://127.0.0.1:35357/v2.0/')
# Use chain specific tenants
resource_mapping.k_client.reset_mock()
chain_mapping.k_client.reset_mock()
config.cfg.CONF.set_override(
'chain_owner_user', 'chain_owner_user', group='resource_mapping')
'chain_owner_user', 'chain_owner_user', group='chain_mapping')
config.cfg.CONF.set_override(
'chain_owner_password', 'chain_owner_p', group='resource_mapping')
resource_mapping.ResourceMappingDriver.chain_tenant_keystone_client()
resource_mapping.k_client.Client.assert_called_once_with(
'chain_owner_password', 'chain_owner_p', group='chain_mapping')
chain_mapping.ChainMappingDriver.chain_tenant_keystone_client()
chain_mapping.k_client.Client.assert_called_once_with(
username='chain_owner_user', password='chain_owner_p',
auth_url='http://127.0.0.1:35357/v2.0/')
# Not called if no tenant name
resource_mapping.k_client.reset_mock()
chain_mapping.k_client.reset_mock()
config.cfg.CONF.set_override(
'chain_owner_tenant_name', '', group='resource_mapping')
resource_mapping.ResourceMappingDriver.chain_tenant_keystone_client()
self.assertFalse(resource_mapping.k_client.Client.called)
'chain_owner_tenant_name', '', group='chain_mapping')
chain_mapping.ChainMappingDriver.chain_tenant_keystone_client()
self.assertFalse(chain_mapping.k_client.Client.called)
def test_chain_tenant_id(self):
keyclient = mock.Mock()
with mock.patch.object(
resource_mapping.ResourceMappingDriver,
chain_mapping.ChainMappingDriver,
'chain_tenant_keystone_client') as key_client:
key_client.return_value = keyclient
@ -2125,35 +2128,35 @@ class TestServiceChain(ResourceMappingTestCase):
return res
keyclient.tenants.find = ok
res = resource_mapping.ResourceMappingDriver.chain_tenant_id()
res = chain_mapping.ChainMappingDriver.chain_tenant_id()
self.assertEqual(CHAIN_TENANT_ID, res)
# Test NotFound
def not_found(name=''):
raise resource_mapping.k_exceptions.NotFound()
raise chain_mapping.k_exceptions.NotFound()
keyclient.tenants.find = not_found
# Do not rerise
res = resource_mapping.ResourceMappingDriver.chain_tenant_id()
res = chain_mapping.ChainMappingDriver.chain_tenant_id()
self.assertIsNone(res)
# Rerise
self.assertRaises(
resource_mapping.k_exceptions.NotFound,
resource_mapping.ResourceMappingDriver.chain_tenant_id, True)
chain_mapping.k_exceptions.NotFound,
chain_mapping.ChainMappingDriver.chain_tenant_id, True)
# Test Duplicated
def duplicated(name=''):
raise resource_mapping.k_exceptions.NoUniqueMatch()
raise chain_mapping.k_exceptions.NoUniqueMatch()
keyclient.tenants.find = duplicated
# Do not rerise
res = resource_mapping.ResourceMappingDriver.chain_tenant_id()
res = chain_mapping.ChainMappingDriver.chain_tenant_id()
self.assertIsNone(res)
# Rerise
self.assertRaises(
resource_mapping.k_exceptions.NoUniqueMatch,
resource_mapping.ResourceMappingDriver.chain_tenant_id, True)
chain_mapping.k_exceptions.NoUniqueMatch,
chain_mapping.ChainMappingDriver.chain_tenant_id, True)
def test_update_ptg_with_redirect_prs(self):
scs_id = self._create_servicechain_spec()
@ -2955,10 +2958,10 @@ class TestServiceChainAdminOwner(TestServiceChain):
def setUp(self, **kwargs):
mock.patch('gbpservice.neutron.services.grouppolicy.drivers.'
'resource_mapping.ResourceMappingDriver.'
'chain_mapping.ChainMappingDriver.'
'chain_tenant_keystone_client').start()
res = mock.patch('gbpservice.neutron.services.grouppolicy.drivers.'
'resource_mapping.ResourceMappingDriver.'
'chain_mapping.ChainMappingDriver.'
'chain_tenant_id').start()
res.return_value = CHAIN_TENANT_ID
super(TestServiceChainAdminOwner, self).setUp(**kwargs)

View File

@ -127,7 +127,8 @@ class NodeCompositionPluginTestCase(
cfg.CONF.set_override('node_plumber', node_plumber or 'dummy_plumber',
group='node_composition_plugin')
config.cfg.CONF.set_override('policy_drivers',
['implicit_policy', 'resource_mapping'],
['implicit_policy', 'resource_mapping',
'chain_mapping'],
group='group_policy')
super(NodeCompositionPluginTestCase, self).setUp(
core_plugin=core_plugin or CORE_PLUGIN,
@ -687,7 +688,8 @@ class TestQuotasForServiceChain(test_base.ServiceChainPluginTestCase):
cfg.CONF.set_override('node_plumber', node_plumber or 'dummy_plumber',
group='node_composition_plugin')
config.cfg.CONF.set_override('policy_drivers',
['implicit_policy', 'resource_mapping'],
['implicit_policy', 'resource_mapping',
'chain_mapping'],
group='group_policy')
super(TestQuotasForServiceChain, self).setUp(
core_plugin=core_plugin or CORE_PLUGIN,

View File

@ -1024,10 +1024,10 @@ class TestApicChainsAdminOwner(TestApicChains):
def setUp(self, **kwargs):
mock.patch('gbpservice.neutron.services.grouppolicy.drivers.'
'resource_mapping.ResourceMappingDriver.'
'chain_mapping.ChainMappingDriver.'
'chain_tenant_keystone_client').start()
res = mock.patch('gbpservice.neutron.services.grouppolicy.drivers.'
'resource_mapping.ResourceMappingDriver.'
'chain_mapping.ChainMappingDriver.'
'chain_tenant_id').start()
res.return_value = test_rmd.CHAIN_TENANT_ID
super(TestApicChainsAdminOwner, self).setUp(**kwargs)

View File

@ -74,7 +74,7 @@ plugin_dirs=/opt/stack/gbpautomation/gbpautomation/heat
[[post-config|/etc/neutron/neutron.conf]]
[group_policy]
policy_drivers=implicit_policy,resource_mapping
policy_drivers=implicit_policy,resource_mapping,chain_mapping
extension_drivers=proxy_group
[servicechain]

View File

@ -52,6 +52,7 @@ gbpservice.neutron.group_policy.policy_drivers =
dummy = gbpservice.neutron.services.grouppolicy.drivers.dummy_driver:NoopDriver
implicit_policy = gbpservice.neutron.services.grouppolicy.drivers.implicit_policy:ImplicitPolicyDriver
resource_mapping = gbpservice.neutron.services.grouppolicy.drivers.resource_mapping:ResourceMappingDriver
chain_mapping = gbpservice.neutron.services.grouppolicy.drivers.chain_mapping:ChainMappingDriver
apic = gbpservice.neutron.services.grouppolicy.drivers.cisco.apic.apic_mapping:ApicMappingDriver
odl = gbpservice.neutron.services.grouppolicy.drivers.odl.odl_mapping:OdlMappingDriver
oneconvergence_gbp_driver = gbpservice.neutron.services.grouppolicy.drivers.oneconvergence.nvsd_gbp_driver:NvsdGbpDriver