From 74c36b721d54a3d6d8d7a5da586833bd608f67b2 Mon Sep 17 00:00:00 2001 From: Kent Wu Date: Wed, 30 Aug 2017 15:52:22 -0700 Subject: [PATCH] Security Group support 1. Change 2 binding parameters to False, which will disable iptables FW. This is basically what we do for the legacy GBP. 2. Provide a monkey patch to address those upstream bugs related to SG and SG_rules. 3. Register for those SG and SG_rules events then process them accordingly. This will create/update/delete the corresponding objects in AIM. 4. If a SG_rule's remote_group_id is set, expand the port's fixed_ips into this rule's remote_ips. Also remove that ip when the port is being deleted. This will also happen when SGs are added or removed from the port. 5. Insert some default rules to allow ARP traffic to go thru during the MD initialization. 6. Add the SG info into the EP file. Change-Id: I4b0d9f9da2c15ac95aef6697a74f03194eb74487 --- gbpservice/neutron/extensions/patch.py | 245 ++++++++++++++++++ .../neutron/plugins/ml2plus/driver_api.py | 228 +++++++++++++++- .../neutron/plugins/ml2plus/driver_context.py | 36 +++ .../drivers/apic_aim/mechanism_driver.py | 223 +++++++++++++++- .../neutron/plugins/ml2plus/managers.py | 48 ++++ gbpservice/neutron/plugins/ml2plus/plugin.py | 56 ++++ .../drivers/cisco/apic/aim_mapping_rpc.py | 20 ++ .../unit/plugins/ml2plus/test_apic_aim.py | 181 ++++++++++++- .../tests/unit/plugins/ml2plus/test_plugin.py | 10 +- .../grouppolicy/test_aim_mapping_driver.py | 10 + 10 files changed, 1045 insertions(+), 12 deletions(-) diff --git a/gbpservice/neutron/extensions/patch.py b/gbpservice/neutron/extensions/patch.py index ee7e31da4..6b2813037 100644 --- a/gbpservice/neutron/extensions/patch.py +++ b/gbpservice/neutron/extensions/patch.py @@ -10,19 +10,26 @@ # License for the specific language governing permissions and limitations # under the License. +from neutron.callbacks import events +from neutron.callbacks import registry +from neutron.callbacks import resources from neutron.db import address_scope_db from neutron.db import api as db_api from neutron.db import common_db_mixin from neutron.db import l3_db +from neutron.db.models import securitygroup as sg_models from neutron.db import models_v2 from neutron.db import securitygroups_db from neutron.extensions import address_scope as ext_address_scope +from neutron.extensions import securitygroup as ext_sg from neutron.objects import subnetpool as subnetpool_obj from neutron.plugins.ml2 import db as ml2_db from neutron_lib.api import validators from neutron_lib import exceptions as n_exc from oslo_log import log +from oslo_utils import uuidutils from sqlalchemy import event +from sqlalchemy.orm import exc LOG = log.getLogger(__name__) @@ -83,6 +90,244 @@ securitygroups_db.SecurityGroupDbMixin._get_security_groups_on_port = ( _get_security_groups_on_port) +# REVISIT(kent): Neutron doesn't pass the remote_group_id while creating the +# ingress rule for the default SG. It also doesn't pass the newly created SG +# for the PRECOMMIT_CREATE event. Note that we should remove this in Pike as +# upstream has fixed the bug there +def create_security_group(self, context, security_group, default_sg=False): + """Create security group. + + If default_sg is true that means we are a default security group for + a given tenant if it does not exist. + """ + s = security_group['security_group'] + kwargs = { + 'context': context, + 'security_group': s, + 'is_default': default_sg, + } + + self._registry_notify(resources.SECURITY_GROUP, events.BEFORE_CREATE, + exc_cls=ext_sg.SecurityGroupConflict, **kwargs) + + tenant_id = s['tenant_id'] + + if not default_sg: + self._ensure_default_security_group(context, tenant_id) + else: + existing_def_sg_id = self._get_default_sg_id(context, tenant_id) + if existing_def_sg_id is not None: + # default already exists, return it + return self.get_security_group(context, existing_def_sg_id) + + with db_api.autonested_transaction(context.session): + security_group_db = sg_models.SecurityGroup(id=s.get('id') or ( + uuidutils.generate_uuid()), + description=s['description'], + tenant_id=tenant_id, + name=s['name']) + context.session.add(security_group_db) + if default_sg: + context.session.add(sg_models.DefaultSecurityGroup( + security_group=security_group_db, + tenant_id=security_group_db['tenant_id'])) + for ethertype in ext_sg.sg_supported_ethertypes: + if default_sg: + # Allow intercommunication + ingress_rule = sg_models.SecurityGroupRule( + id=uuidutils.generate_uuid(), tenant_id=tenant_id, + security_group=security_group_db, + direction='ingress', + ethertype=ethertype, + remote_group_id=security_group_db.id, + source_group=security_group_db) + context.session.add(ingress_rule) + + egress_rule = sg_models.SecurityGroupRule( + id=uuidutils.generate_uuid(), tenant_id=tenant_id, + security_group=security_group_db, + direction='egress', + ethertype=ethertype) + context.session.add(egress_rule) + + secgroup_dict = self._make_security_group_dict(security_group_db) + kwargs['security_group'] = secgroup_dict + self._registry_notify(resources.SECURITY_GROUP, + events.PRECOMMIT_CREATE, + exc_cls=ext_sg.SecurityGroupConflict, + **kwargs) + + registry.notify(resources.SECURITY_GROUP, events.AFTER_CREATE, self, + **kwargs) + return secgroup_dict + +securitygroups_db.SecurityGroupDbMixin.create_security_group = ( + create_security_group) + + +# REVISIT(kent): Neutron doesn't pass the updated SG for the PRECOMMIT_UPDATE +# event. Note that we should remove this in Pike as upstream has fixed the bug +# there +def update_security_group(self, context, id, security_group): + s = security_group['security_group'] + + kwargs = { + 'context': context, + 'security_group_id': id, + 'security_group': s, + } + self._registry_notify(resources.SECURITY_GROUP, events.BEFORE_UPDATE, + exc_cls=ext_sg.SecurityGroupConflict, **kwargs) + + with context.session.begin(subtransactions=True): + sg = self._get_security_group(context, id) + if sg['name'] == 'default' and 'name' in s: + raise ext_sg.SecurityGroupCannotUpdateDefault() + sg_dict = self._make_security_group_dict(sg) + kwargs['original_security_group'] = sg_dict + sg.update(s) + sg_dict = self._make_security_group_dict(sg) + kwargs['security_group'] = sg_dict + self._registry_notify( + resources.SECURITY_GROUP, + events.PRECOMMIT_UPDATE, + exc_cls=ext_sg.SecurityGroupConflict, **kwargs) + + registry.notify(resources.SECURITY_GROUP, events.AFTER_UPDATE, self, + **kwargs) + return sg_dict + +securitygroups_db.SecurityGroupDbMixin.update_security_group = ( + update_security_group) + + +# REVISIT(kent): Neutron doesn't pass the SG rules for the PRECOMMIT_DELETE +# event. Note that we should remove this in Pike as upstream has fixed the bug +# there +def delete_security_group(self, context, id): + filters = {'security_group_id': [id]} + ports = self._get_port_security_group_bindings(context, filters) + if ports: + raise ext_sg.SecurityGroupInUse(id=id) + # confirm security group exists + sg = self._get_security_group(context, id) + + if sg['name'] == 'default' and not context.is_admin: + raise ext_sg.SecurityGroupCannotRemoveDefault() + kwargs = { + 'context': context, + 'security_group_id': id, + 'security_group': sg, + } + self._registry_notify(resources.SECURITY_GROUP, events.BEFORE_DELETE, + exc_cls=ext_sg.SecurityGroupInUse, id=id, + **kwargs) + + with context.session.begin(subtransactions=True): + # pass security_group_rule_ids to ensure + # consistency with deleted rules + kwargs['security_group_rule_ids'] = [r['id'] for r in sg.rules] + kwargs['security_group'] = self._make_security_group_dict(sg) + self._registry_notify(resources.SECURITY_GROUP, + events.PRECOMMIT_DELETE, + exc_cls=ext_sg.SecurityGroupInUse, id=id, + **kwargs) + context.session.delete(sg) + + kwargs.pop('security_group') + registry.notify(resources.SECURITY_GROUP, events.AFTER_DELETE, self, + **kwargs) + +securitygroups_db.SecurityGroupDbMixin.delete_security_group = ( + delete_security_group) + + +# REVISIT(kent): Neutron doesn't pass the newly created SG rule for the +# PRECOMMIT_CREATE event. Note that we should remove this in Pike as upstream +# has fixed the bug there +def _create_security_group_rule(self, context, security_group_rule, + validate=True): + if validate: + self._validate_security_group_rule(context, security_group_rule) + rule_dict = security_group_rule['security_group_rule'] + kwargs = { + 'context': context, + 'security_group_rule': rule_dict + } + self._registry_notify(resources.SECURITY_GROUP_RULE, + events.BEFORE_CREATE, + exc_cls=ext_sg.SecurityGroupConflict, **kwargs) + + with context.session.begin(subtransactions=True): + if validate: + self._check_for_duplicate_rules_in_db(context, + security_group_rule) + db = sg_models.SecurityGroupRule( + id=(rule_dict.get('id') or uuidutils.generate_uuid()), + tenant_id=rule_dict['tenant_id'], + security_group_id=rule_dict['security_group_id'], + direction=rule_dict['direction'], + remote_group_id=rule_dict.get('remote_group_id'), + ethertype=rule_dict['ethertype'], + protocol=rule_dict['protocol'], + port_range_min=rule_dict['port_range_min'], + port_range_max=rule_dict['port_range_max'], + remote_ip_prefix=rule_dict.get('remote_ip_prefix'), + description=rule_dict.get('description') + ) + context.session.add(db) + res_rule_dict = self._make_security_group_rule_dict(db) + kwargs['security_group_rule'] = res_rule_dict + self._registry_notify(resources.SECURITY_GROUP_RULE, + events.PRECOMMIT_CREATE, + exc_cls=ext_sg.SecurityGroupConflict, **kwargs) + registry.notify( + resources.SECURITY_GROUP_RULE, events.AFTER_CREATE, self, + **kwargs) + return res_rule_dict + +securitygroups_db.SecurityGroupDbMixin._create_security_group_rule = ( + _create_security_group_rule) + + +# REVISIT(kent): Neutron doesn't pass the SG ID of the rule for the +# PRECOMMIT_DELETE event. Note that we should remove this in Pike as upstream +# has fixed the bug there +def delete_security_group_rule(self, context, id): + kwargs = { + 'context': context, + 'security_group_rule_id': id + } + self._registry_notify(resources.SECURITY_GROUP_RULE, + events.BEFORE_DELETE, id=id, + exc_cls=ext_sg.SecurityGroupRuleInUse, **kwargs) + + with context.session.begin(subtransactions=True): + query = self._model_query(context, + sg_models.SecurityGroupRule).filter( + sg_models.SecurityGroupRule.id == id) + try: + # As there is a filter on a primary key it is not possible for + # MultipleResultsFound to be raised + sg_rule = query.one() + except exc.NoResultFound: + raise ext_sg.SecurityGroupRuleNotFound(id=id) + + kwargs['security_group_id'] = sg_rule['security_group_id'] + self._registry_notify(resources.SECURITY_GROUP_RULE, + events.PRECOMMIT_DELETE, + exc_cls=ext_sg.SecurityGroupRuleInUse, id=id, + **kwargs) + context.session.delete(sg_rule) + + registry.notify( + resources.SECURITY_GROUP_RULE, events.AFTER_DELETE, self, + **kwargs) + +securitygroups_db.SecurityGroupDbMixin.delete_security_group_rule = ( + delete_security_group_rule) + + def get_port_from_device_mac(context, device_mac): LOG.debug("get_port_from_device_mac() called for mac %s", device_mac) qry = context.session.query(models_v2.Port).filter_by( diff --git a/gbpservice/neutron/plugins/ml2plus/driver_api.py b/gbpservice/neutron/plugins/ml2plus/driver_api.py index c309f7e3c..8fc64cd83 100644 --- a/gbpservice/neutron/plugins/ml2plus/driver_api.py +++ b/gbpservice/neutron/plugins/ml2plus/driver_api.py @@ -83,6 +83,70 @@ class AddressScopeContext(object): pass +@six.add_metaclass(abc.ABCMeta) +class SecurityGroupContext(object): + """Context passed to MechanismDrivers for changes to security group + resources. + + A SecurityGroupContext instance wraps a security group + resource. It provides helper methods for accessing other relevant + information. Results from expensive operations are cached so that + other MechanismDrivers can freely access the same information. + """ + + @abc.abstractproperty + def current(self): + """Return the security group in its current configuration. + + Return the security group with all its properties 'current' at + the time the context was established. + """ + pass + + @abc.abstractproperty + def original(self): + """Return the security group in its original configuration. + + Return the security group, with all its properties set to their + original values prior to a call to update_security_group. Method is + only valid within calls to update_security_group_precommit and + update_security_group_postcommit. + """ + pass + + +@six.add_metaclass(abc.ABCMeta) +class SecurityGroupRuleContext(object): + """Context passed to MechanismDrivers for changes to security group + rule resources. + + A SecurityGroupRuleContext instance wraps a security group rule + resource. It provides helper methods for accessing other relevant + information. Results from expensive operations are cached so that + other MechanismDrivers can freely access the same information. + """ + + @abc.abstractproperty + def current(self): + """Return the security group rule in its current configuration. + + Return the security group rule with all its properties 'current' at + the time the context was established. + """ + pass + + @abc.abstractproperty + def original(self): + """Return the security group rule in its original configuration. + + Return the security group rule, with all its properties set to their + original values prior to a call to update_security_group. Method is + only valid within calls to update_security_group_rule_precommit and + update_security_group_rule_postcommit. + """ + pass + + @six.add_metaclass(abc.ABCMeta) class MechanismDriver(driver_api.MechanismDriver): @@ -289,9 +353,167 @@ class MechanismDriver(driver_api.MechanismDriver): """ pass - # REVISIT(rkukura): Add precommit/postcommit calls for other - # resources implemented in ML2, such as security groups and - # security group rules? + def create_security_group_precommit(self, context): + """Allocate resources for a new security group. + + :param context: SecurityGroupContext instance describing the + new security group. + + Create a new security group, allocating resources as necessary + in the database. Called inside transaction context on + session. Call cannot block. Raising an exception will result + in a rollback of the current transaction. + """ + pass + + def create_security_group_postcommit(self, context): + """Create a security group. + + :param context: SecurityGroupContext instance describing the + new security group. + + Called after the transaction commits. Call can block, though + will block the entire process so care should be taken to not + drastically affect performance. Raising an exception will + cause the deletion of the resource. + + This API is not being implemented at this moment. + """ + pass + + def update_security_group_precommit(self, context): + """Update resources of a security group. + + :param context: SecurityGroupContext instance describing the + new state of the security group, as well as the original state + prior to the update_security_group call. + + Update values of an security group, updating the associated + resources in the database. Called inside transaction context + on session. Raising an exception will result in rollback of + the transaction. + + update_security_group_precommit is called for all changes to + the security group state. It is up to the mechanism driver to + ignore state or state changes that it does not know or care + about. + """ + pass + + def update_security_group_postcommit(self, context): + """Update a security group. + + :param context: SecurityGroupContext instance describing the + new state of the security group, as well as the original state + prior to the update_security_group call. + + Called after the transaction commits. Call can block, though + will block the entire process so care should be taken to not + drastically affect performance. Raising an exception will + cause the deletion of the resource. + + update_security_group_postcommit is called for all changes to + the security group state. It is up to the mechanism driver to + ignore state or state changes that it does not know or care + about. + + This API is not being implemented at this moment. + """ + pass + + def delete_security_group_precommit(self, context): + """Delete resources for a security group. + + :param context: SecurityGroupContext instance describing the + current state of the security group, prior to the call to + delete it. + + Delete security group resources previously allocated by this + mechanism driver for an security group. Called inside + transaction context on session. Runtime errors are not + expected, but raising an exception will result in rollback of + the transaction. + """ + pass + + def delete_security_group_postcommit(self, context): + """Delete a security group. + + :param context: SecurityGroupContext instance describing the + current state of the security group, prior to the call to + delete it. + + Called after the transaction commits. Call can block, though + will block the entire process so care should be taken to not + drastically affect performance. Runtime errors are not + expected, and will not prevent the resource from being + deleted. + + This API is not being implemented at this moment. + """ + pass + + def create_security_group_rule_precommit(self, context): + """Allocate resources for a new security group. + + :param context: SecurityGroupRuleContext instance describing the + new security group rule. + + Create a new security group rule, allocating resources as necessary + in the database. Called inside transaction context on + session. Call cannot block. Raising an exception will result + in a rollback of the current transaction. + """ + pass + + def create_security_group_rule_postcommit(self, context): + """Create a security group rule. + + :param context: SecurityGroupRuleContext instance describing the + new security group rule. + + Called after the transaction commits. Call can block, though + will block the entire process so care should be taken to not + drastically affect performance. Raising an exception will + cause the deletion of the resource. + + This API is not being implemented at this moment. + """ + pass + + # Security group rule updates are not supported by the Neutron API. + + def delete_security_group_rule_precommit(self, context): + """Delete resources for a security group rule. + + :param context: SecurityGroupRuleContext instance describing the + current state of the security group rule, prior to the call to + delete it. + + Delete security group rule resources previously allocated by this + mechanism driver for an security group rule. Called inside + transaction context on session. Runtime errors are not + expected, but raising an exception will result in rollback of + the transaction. + """ + pass + + def delete_security_group_rule_postcommit(self, context): + """Delete a security group rule. + + :param context: SecurityGroupRuleContext instance describing the + current state of the security group rule, prior to the call to + delete it. + + Called after the transaction commits. Call can block, though + will block the entire process so care should be taken to not + drastically affect performance. Runtime errors are not + expected, and will not prevent the resource from being + deleted. + + This API is not being implemented at this moment. + """ + pass @six.add_metaclass(abc.ABCMeta) diff --git a/gbpservice/neutron/plugins/ml2plus/driver_context.py b/gbpservice/neutron/plugins/ml2plus/driver_context.py index aa3b315f6..7fd0be667 100644 --- a/gbpservice/neutron/plugins/ml2plus/driver_context.py +++ b/gbpservice/neutron/plugins/ml2plus/driver_context.py @@ -52,3 +52,39 @@ class AddressScopeContext(ml2_context.MechanismDriverContext, @property def original(self): return self._original_address_scope + + +class SecurityGroupContext(ml2_context.MechanismDriverContext, + api.SecurityGroupContext): + + def __init__(self, plugin, plugin_context, security_group, + original_security_group=None): + super(SecurityGroupContext, self).__init__(plugin, plugin_context) + self._security_group = security_group + self._original_security_group = original_security_group + + @property + def current(self): + return self._security_group + + @property + def original(self): + return self._original_security_group + + +class SecurityGroupRuleContext(ml2_context.MechanismDriverContext, + api.SecurityGroupRuleContext): + + def __init__(self, plugin, plugin_context, security_group_rule, + original_security_group_rule=None): + super(SecurityGroupRuleContext, self).__init__(plugin, plugin_context) + self._security_group_rule = security_group_rule + self._original_security_group_rule = original_security_group_rule + + @property + def current(self): + return self._security_group_rule + + @property + def original(self): + return self._original_security_group_rule diff --git a/gbpservice/neutron/plugins/ml2plus/drivers/apic_aim/mechanism_driver.py b/gbpservice/neutron/plugins/ml2plus/drivers/apic_aim/mechanism_driver.py index 07b5214fe..bd93fdf88 100644 --- a/gbpservice/neutron/plugins/ml2plus/drivers/apic_aim/mechanism_driver.py +++ b/gbpservice/neutron/plugins/ml2plus/drivers/apic_aim/mechanism_driver.py @@ -31,6 +31,7 @@ from neutron.db import api as db_api from neutron.db import l3_db from neutron.db.models import address_scope as as_db from neutron.db.models import allowed_address_pair as n_addr_pair_db +from neutron.db.models import securitygroup as sg_models from neutron.db import models_v2 from neutron.db import rbac_db_models from neutron.db import segments_db @@ -89,6 +90,7 @@ FABRIC_HOST_ID = 'fabric' NO_ADDR_SCOPE = object() DVS_AGENT_KLASS = 'networking_vsphere.common.dvs_agent_rpc_api.DVSClientAPI' +GBP_DEFAULT = 'gbp_default' class KeystoneNotificationEndpoint(object): @@ -199,6 +201,44 @@ class ApicMechanismDriver(api_plus.MechanismDriver, self.enable_keystone_notification_purge = (cfg.CONF.ml2_apic_aim. enable_keystone_notification_purge) local_api.QUEUE_OUT_OF_PROCESS_NOTIFICATIONS = True + self._setup_default_arp_security_group_rules() + + def _setup_default_arp_security_group_rules(self): + session = db_api.get_session() + aim_ctx = aim_context.AimContext(session) + sg = aim_resource.SecurityGroup( + tenant_name=COMMON_TENANT_NAME, name=GBP_DEFAULT) + try: + self.aim.create(aim_ctx, sg, overwrite=True) + except db_exc.DBNonExistentTable as e: + # This is expected in the UT env. but will never + # happen in the real fab. + LOG.error(e) + return + + sg_subject = aim_resource.SecurityGroupSubject( + tenant_name=COMMON_TENANT_NAME, + security_group_name=GBP_DEFAULT, name='default') + self.aim.create(aim_ctx, sg_subject, overwrite=True) + + arp_egress_rule = aim_resource.SecurityGroupRule( + tenant_name=COMMON_TENANT_NAME, + security_group_name=GBP_DEFAULT, + security_group_subject_name='default', + name='arp_egress', + direction='egress', + ethertype='arp', + conn_track='normal') + arp_ingress_rule = aim_resource.SecurityGroupRule( + tenant_name=COMMON_TENANT_NAME, + security_group_name=GBP_DEFAULT, + security_group_subject_name='default', + name='arp_ingress', + direction='ingress', + ethertype='arp', + conn_track='normal') + self.aim.create(aim_ctx, arp_egress_rule, overwrite=True) + self.aim.create(aim_ctx, arp_ingress_rule, overwrite=True) def _setup_keystone_notification_listeners(self): targets = [oslo_messaging.Target( @@ -1235,6 +1275,61 @@ class ApicMechanismDriver(api_plus.MechanismDriver, # hierarchically if the network-type is OpFlex. self._bind_physical_node(context) + def _update_sg_rule_with_remote_group_set(self, context, port): + security_groups = port['security_groups'] + original_port = context.original + if original_port: + removed_sgs = (set(original_port['security_groups']) - + set(security_groups)) + added_sgs = (set(security_groups) - + set(original_port['security_groups'])) + self._really_update_sg_rule_with_remote_group_set( + context, port, removed_sgs, is_delete=True) + self._really_update_sg_rule_with_remote_group_set( + context, port, added_sgs, is_delete=False) + + def _really_update_sg_rule_with_remote_group_set( + self, context, port, security_groups, is_delete): + if not security_groups: + return + session = context._plugin_context.session + aim_ctx = aim_context.AimContext(session) + sg_rules = (session.query(sg_models.SecurityGroupRule). + filter(sg_models.SecurityGroupRule.remote_group_id. + in_(security_groups)). + all()) + fixed_ips = [x['ip_address'] for x in port['fixed_ips']] + for sg_rule in sg_rules: + tenant_aname = self.name_mapper.project(session, + sg_rule['tenant_id']) + sg_rule_aim = aim_resource.SecurityGroupRule( + tenant_name=tenant_aname, + security_group_name=sg_rule['security_group_id'], + security_group_subject_name='default', + name=sg_rule['id']) + aim_sg_rule = self.aim.get(aim_ctx, sg_rule_aim) + if not aim_sg_rule: + continue + ip_version = 0 + if sg_rule['ethertype'] == 'IPv4': + ip_version = 4 + elif sg_rule['ethertype'] == 'IPv6': + ip_version = 6 + for fixed_ip in fixed_ips: + if is_delete: + if fixed_ip in aim_sg_rule.remote_ips: + aim_sg_rule.remote_ips.remove(fixed_ip) + elif ip_version == netaddr.IPAddress(fixed_ip).version: + if fixed_ip not in aim_sg_rule.remote_ips: + aim_sg_rule.remote_ips.append(fixed_ip) + self.aim.update(aim_ctx, sg_rule_aim, + remote_ips=aim_sg_rule.remote_ips) + + def create_port_precommit(self, context): + port = context.current + self._really_update_sg_rule_with_remote_group_set( + context, port, port['security_groups'], is_delete=False) + def update_port_precommit(self, context): port = context.current if context.original_host and context.original_host != context.host: @@ -1244,7 +1339,6 @@ class ApicMechanismDriver(api_plus.MechanismDriver, self._update_static_path(context, host=context.original_host, segment=context.original_bottom_bound_segment, remove=True) self._release_dynamic_segment(context, use_original=True) - if self._is_port_bound(port): if self._use_static_path(context.bottom_bound_segment): self._associate_domain(context, is_vmm=False) @@ -1253,6 +1347,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver, self._is_opflex_type( context.bottom_bound_segment[api.NETWORK_TYPE])): self._associate_domain(context, is_vmm=True) + self._update_sg_rule_with_remote_group_set(context, port) def update_port_postcommit(self, context): port = context.current @@ -1277,6 +1372,128 @@ class ApicMechanismDriver(api_plus.MechanismDriver, self._is_opflex_type( context.bottom_bound_segment[api.NETWORK_TYPE])): self.disassociate_domain(context) + self._really_update_sg_rule_with_remote_group_set( + context, port, port['security_groups'], is_delete=True) + + def create_security_group_precommit(self, context): + session = context._plugin_context.session + aim_ctx = aim_context.AimContext(session) + + sg = context.current + tenant_aname = self.name_mapper.project(session, sg['tenant_id']) + sg_aim = aim_resource.SecurityGroup( + tenant_name=tenant_aname, name=sg['id'], + display_name=aim_utils.sanitize_display_name(sg['name'])) + self.aim.create(aim_ctx, sg_aim) + # Always create this default subject + sg_subject = aim_resource.SecurityGroupSubject( + tenant_name=tenant_aname, + security_group_name=sg['id'], name='default') + self.aim.create(aim_ctx, sg_subject) + + # Create those implicit rules + for sg_rule in sg.get('security_group_rules', []): + sg_rule_aim = aim_resource.SecurityGroupRule( + tenant_name=tenant_aname, + security_group_name=sg['id'], + security_group_subject_name='default', + name=sg_rule['id'], + direction=sg_rule['direction'], + ethertype=sg_rule['ethertype'].lower(), + ip_protocol=(sg_rule['protocol'] if sg_rule['protocol'] + else 'unspecified'), + remote_ips=(sg_rule['remote_ip_prefix'] + if sg_rule['remote_ip_prefix'] else ''), + from_port=(sg_rule['port_range_min'] + if sg_rule['port_range_min'] else 'unspecified'), + to_port=(sg_rule['port_range_max'] + if sg_rule['port_range_max'] else 'unspecified')) + self.aim.create(aim_ctx, sg_rule_aim) + + def update_security_group_precommit(self, context): + # Only display_name change makes sense here + sg = context.current + original_sg = context.original + if sg.get('name') == original_sg.get('name'): + return + session = context._plugin_context.session + aim_ctx = aim_context.AimContext(session) + tenant_aname = self.name_mapper.project(session, sg['tenant_id']) + sg_aim = aim_resource.SecurityGroup( + tenant_name=tenant_aname, name=sg['id']) + self.aim.update(aim_ctx, sg_aim, + display_name=aim_utils.sanitize_display_name( + sg['name'])) + + def delete_security_group_precommit(self, context): + session = context._plugin_context.session + aim_ctx = aim_context.AimContext(session) + sg = context.current + tenant_aname = self.name_mapper.project(session, sg['tenant_id']) + for sg_rule in sg.get('security_group_rules'): + sg_rule_aim = aim_resource.SecurityGroupRule( + tenant_name=tenant_aname, + security_group_name=sg['id'], + security_group_subject_name='default', + name=sg_rule['id']) + self.aim.delete(aim_ctx, sg_rule_aim) + sg_subject = aim_resource.SecurityGroupSubject( + tenant_name=tenant_aname, + security_group_name=sg['id'], name='default') + self.aim.delete(aim_ctx, sg_subject) + sg_aim = aim_resource.SecurityGroup(tenant_name=tenant_aname, + name=sg['id']) + self.aim.delete(aim_ctx, sg_aim) + + def create_security_group_rule_precommit(self, context): + session = context._plugin_context.session + aim_ctx = aim_context.AimContext(session) + sg_rule = context.current + tenant_aname = self.name_mapper.project(session, sg_rule['tenant_id']) + if sg_rule.get('remote_group_id'): + remote_ips = [] + sg_ports = (session.query(models_v2.Port). + join(sg_models.SecurityGroupPortBinding, + sg_models.SecurityGroupPortBinding.port_id == + models_v2.Port.id). + filter(sg_models.SecurityGroupPortBinding. + security_group_id == + sg_rule['remote_group_id']). + all()) + for sg_port in sg_ports: + for fixed_ip in sg_port['fixed_ips']: + remote_ips.append(fixed_ip['ip_address']) + else: + remote_ips = ([sg_rule['remote_ip_prefix']] + if sg_rule['remote_ip_prefix'] else '') + + sg_rule_aim = aim_resource.SecurityGroupRule( + tenant_name=tenant_aname, + security_group_name=sg_rule['security_group_id'], + security_group_subject_name='default', + name=sg_rule['id'], + direction=sg_rule['direction'], + ethertype=sg_rule['ethertype'].lower(), + ip_protocol=(sg_rule['protocol'] if sg_rule['protocol'] + else 'unspecified'), + remote_ips=remote_ips, + from_port=(sg_rule['port_range_min'] + if sg_rule['port_range_min'] else 'unspecified'), + to_port=(sg_rule['port_range_max'] + if sg_rule['port_range_max'] else 'unspecified')) + self.aim.create(aim_ctx, sg_rule_aim) + + def delete_security_group_rule_precommit(self, context): + session = context._plugin_context.session + aim_ctx = aim_context.AimContext(session) + sg_rule = context.current + tenant_aname = self.name_mapper.project(session, sg_rule['tenant_id']) + sg_rule_aim = aim_resource.SecurityGroupRule( + tenant_name=tenant_aname, + security_group_name=sg_rule['security_group_id'], + security_group_subject_name='default', + name=sg_rule['id']) + self.aim.delete(aim_ctx, sg_rule_aim) def delete_port_postcommit(self, context): port = context.current @@ -1477,8 +1694,8 @@ class ApicMechanismDriver(api_plus.MechanismDriver, def _complete_binding(self, context, segment): context.set_binding( segment[api.ID], portbindings.VIF_TYPE_OVS, - {portbindings.CAP_PORT_FILTER: self.sg_enabled, - portbindings.OVS_HYBRID_PLUG: self.sg_enabled}) + {portbindings.CAP_PORT_FILTER: False, + portbindings.OVS_HYBRID_PLUG: False}) @property def plugin(self): diff --git a/gbpservice/neutron/plugins/ml2plus/managers.py b/gbpservice/neutron/plugins/ml2plus/managers.py index 7403d9ed2..e58dcfecf 100644 --- a/gbpservice/neutron/plugins/ml2plus/managers.py +++ b/gbpservice/neutron/plugins/ml2plus/managers.py @@ -133,6 +133,54 @@ class MechanismManager(managers.MechanismManager): self._call_on_extended_drivers("delete_address_scope_postcommit", context, continue_on_failure=True) + def create_security_group_precommit(self, context): + self._call_on_extended_drivers("create_security_group_precommit", + context, raise_db_retriable=True) + + def create_security_group_postcommit(self, context): + self._call_on_extended_drivers("create_security_group_postcommit", + context) + + def update_security_group_precommit(self, context): + self._call_on_extended_drivers("update_security_group_precommit", + context, raise_db_retriable=True) + + def update_security_group_postcommit(self, context): + self._call_on_extended_drivers("update_security_group_postcommit", + context, continue_on_failure=True) + + def delete_security_group_precommit(self, context): + self._call_on_extended_drivers("delete_security_group_precommit", + context, raise_db_retriable=True) + + def delete_security_group_postcommit(self, context): + self._call_on_extended_drivers("delete_security_group_postcommit", + context, continue_on_failure=True) + + def create_security_group_rule_precommit(self, context): + self._call_on_extended_drivers("create_security_group_rule_precommit", + context, raise_db_retriable=True) + + def create_security_group_rule_postcommit(self, context): + self._call_on_extended_drivers("create_security_group_rule_postcommit", + context) + + def update_security_group_rule_precommit(self, context): + self._call_on_extended_drivers("update_security_group_rule_precommit", + context, raise_db_retriable=True) + + def update_security_group_rule_postcommit(self, context): + self._call_on_extended_drivers("update_security_group_rule_postcommit", + context, continue_on_failure=True) + + def delete_security_group_rule_precommit(self, context): + self._call_on_extended_drivers("delete_security_group_rule_precommit", + context, raise_db_retriable=True) + + def delete_security_group_rule_postcommit(self, context): + self._call_on_extended_drivers("delete_security_group_rule_postcommit", + context, continue_on_failure=True) + class ExtensionManager(managers.ExtensionManager): diff --git a/gbpservice/neutron/plugins/ml2plus/plugin.py b/gbpservice/neutron/plugins/ml2plus/plugin.py index 2f4008e88..e7d2d81cb 100644 --- a/gbpservice/neutron/plugins/ml2plus/plugin.py +++ b/gbpservice/neutron/plugins/ml2plus/plugin.py @@ -166,6 +166,23 @@ class Ml2PlusPlugin(ml2_plugin.Ml2Plugin, events.AFTER_CREATE) registry.subscribe(self._handle_segment_change, resources.SEGMENT, events.AFTER_DELETE) + + # REVISIT(kent): All the postcommit calls for SG and SG rules are not + # currently implemented as they are not needed at this moment. + registry.subscribe(self._handle_security_group_change, + resources.SECURITY_GROUP, events.PRECOMMIT_CREATE) + registry.subscribe(self._handle_security_group_change, + resources.SECURITY_GROUP, events.PRECOMMIT_DELETE) + registry.subscribe(self._handle_security_group_change, + resources.SECURITY_GROUP, events.PRECOMMIT_UPDATE) + + # There is no update event to the security_group_rule + registry.subscribe(self._handle_security_group_rule_change, + resources.SECURITY_GROUP_RULE, + events.PRECOMMIT_CREATE) + registry.subscribe(self._handle_security_group_rule_change, + resources.SECURITY_GROUP_RULE, + events.PRECOMMIT_DELETE) try: registry.subscribe(self._subnet_delete_precommit_handler, resources.SUBNET, events.PRECOMMIT_DELETE) @@ -194,6 +211,45 @@ class Ml2PlusPlugin(ml2_plugin.Ml2Plugin, db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( as_ext.ADDRESS_SCOPES, ['_ml2_md_extend_address_scope_dict']) + def _handle_security_group_change(self, resource, event, trigger, + **kwargs): + context = kwargs.get('context') + security_group = kwargs.get('security_group') + original_security_group = kwargs.get('original_security_group') + mech_context = driver_context.SecurityGroupContext( + self, context, security_group, original_security_group) + if event == events.PRECOMMIT_CREATE: + self._ensure_tenant(context, security_group) + self.mechanism_manager.create_security_group_precommit( + mech_context) + return + if event == events.PRECOMMIT_DELETE: + self.mechanism_manager.delete_security_group_precommit( + mech_context) + return + if event == events.PRECOMMIT_UPDATE: + self.mechanism_manager.update_security_group_precommit( + mech_context) + + def _handle_security_group_rule_change(self, resource, event, trigger, + **kwargs): + context = kwargs.get('context') + if event == events.PRECOMMIT_CREATE: + sg_rule = kwargs.get('security_group_rule') + mech_context = driver_context.SecurityGroupRuleContext( + self, context, sg_rule) + self.mechanism_manager.create_security_group_rule_precommit( + mech_context) + return + if event == events.PRECOMMIT_DELETE: + sg_rule = {'id': kwargs.get('security_group_rule_id'), + 'security_group_id': kwargs.get('security_group_id'), + 'tenant_id': context.tenant} + mech_context = driver_context.SecurityGroupRuleContext( + self, context, sg_rule) + self.mechanism_manager.delete_security_group_rule_precommit( + mech_context) + def _ml2_md_extend_network_dict(self, result, netdb): session = patch_neutron.get_current_session() with session.begin(subtransactions=True): diff --git a/gbpservice/neutron/services/grouppolicy/drivers/cisco/apic/aim_mapping_rpc.py b/gbpservice/neutron/services/grouppolicy/drivers/cisco/apic/aim_mapping_rpc.py index 853b88c11..b08d1ab44 100644 --- a/gbpservice/neutron/services/grouppolicy/drivers/cisco/apic/aim_mapping_rpc.py +++ b/gbpservice/neutron/services/grouppolicy/drivers/cisco/apic/aim_mapping_rpc.py @@ -179,6 +179,9 @@ class AIMMappingRPCMixin(ha_ip_db.HAIPOwnerDbMixin): if mtu: details['interface_mtu'] = mtu + if port.get('security_groups'): + self._add_security_group_details(context, port, details) + # NOTE(ivar): having these methods cleanly separated actually makes # things less efficient by requiring lots of calls duplication. # we could alleviate this by passing down a cache that stores @@ -202,6 +205,23 @@ class AIMMappingRPCMixin(ha_ip_db.HAIPOwnerDbMixin): def _get_owned_addresses(self, plugin_context, port_id): return set(self.ha_ip_handler.get_ha_ipaddresses_for_port(port_id)) + def _add_security_group_details(self, context, port, details): + vif_details = port.get('binding:vif_details') + # For legacy VMs, they are running in this mode which means + # they will use iptables to support SG. Then we don't bother + # to configure any SG for them here. + if (vif_details and vif_details.get('port_filter') and + vif_details.get('ovs_hybrid_plug')): + return + details['security_group'] = [] + for sg_id in port['security_groups']: + details['security_group'].append( + {'policy-space': details['ptg_tenant'], + 'name': sg_id}) + # Always include this SG which has the default arp & dhcp rules + details['security_group'].append({'policy-space': 'common', + 'name': 'gbp_default'}) + # Child class needs to support: # - self._get_subnet_details(context, port, details) def _add_subnet_details(self, context, port, details): diff --git a/gbpservice/neutron/tests/unit/plugins/ml2plus/test_apic_aim.py b/gbpservice/neutron/tests/unit/plugins/ml2plus/test_apic_aim.py index e945eff8b..78d30f831 100644 --- a/gbpservice/neutron/tests/unit/plugins/ml2plus/test_apic_aim.py +++ b/gbpservice/neutron/tests/unit/plugins/ml2plus/test_apic_aim.py @@ -39,6 +39,7 @@ from neutron.tests.unit.api import test_extensions from neutron.tests.unit.db import test_db_base_plugin_v2 as test_plugin from neutron.tests.unit.extensions import test_address_scope from neutron.tests.unit.extensions import test_l3 +from neutron.tests.unit.extensions import test_securitygroup from neutron_lib import constants as n_constants from neutron_lib.plugins import directory from opflexagent import constants as ofcst @@ -159,7 +160,8 @@ class ApicAimTestMixin(object): class ApicAimTestCase(test_address_scope.AddressScopeTestCase, - test_l3.L3NatTestCaseMixin, ApicAimTestMixin): + test_l3.L3NatTestCaseMixin, ApicAimTestMixin, + test_securitygroup.SecurityGroupsTestCase): def setUp(self, mechanism_drivers=None, tenant_network_types=None): # Enable the test mechanism driver to ensure that @@ -323,6 +325,39 @@ class ApicAimTestCase(test_address_scope.AddressScopeTestCase, raise webob.exc.HTTPClientError(code=res.status_int) return self.deserialize(self.fmt, res) + def _get_sg(self, sg_id, tenant_name): + session = db_api.get_session() + aim_ctx = aim_context.AimContext(session) + sg = aim_resource.SecurityGroup(tenant_name=tenant_name, + name=sg_id) + sg = self.aim_mgr.get(aim_ctx, sg) + self.assertIsNotNone(sg) + return sg + + def _sg_should_not_exist(self, sg_id): + session = db_api.get_session() + aim_ctx = aim_context.AimContext(session) + sgs = self.aim_mgr.find( + aim_ctx, aim_resource.SecurityGroup, name=sg_id) + self.assertEqual([], sgs) + + def _get_sg_rule(self, sg_id, sg_rule_id, tenant_name): + session = db_api.get_session() + aim_ctx = aim_context.AimContext(session) + sg_rule = aim_resource.SecurityGroupRule( + tenant_name=tenant_name, security_group_name=sg_id, + security_group_subject_name='default', name=sg_rule_id) + sg_rule = self.aim_mgr.get(aim_ctx, sg_rule) + self.assertIsNotNone(sg_rule) + return sg_rule + + def _sg_rule_should_not_exist(self, sg_rule_id): + session = db_api.get_session() + aim_ctx = aim_context.AimContext(session) + sg_rules = self.aim_mgr.find( + aim_ctx, aim_resource.SecurityGroupRule, name=sg_rule_id) + self.assertEqual([], sg_rules) + def port_notif_verifier(self): def verify(plugin_context, port): self.assertFalse(plugin_context.session.is_active) @@ -619,6 +654,46 @@ class TestAimMapping(ApicAimTestCase): aname = self.name_mapper.address_scope(None, scope['id']) self._vrf_should_not_exist(aname) + def _check_sg(self, sg): + tenant_aname = self.name_mapper.project(None, sg['tenant_id']) + self._get_tenant(tenant_aname) + + aim_sg = self._get_sg(sg['id'], tenant_aname) + self.assertEqual(tenant_aname, aim_sg.tenant_name) + self.assertEqual(sg['id'], aim_sg.name) + self.assertEqual(sg['name'], aim_sg.display_name) + + # check those SG rules including the default ones + for sg_rule in sg.get('security_group_rules', []): + self._check_sg_rule(sg['id'], sg_rule) + + def _check_sg_rule(self, sg_id, sg_rule): + tenant_aname = self.name_mapper.project(None, sg_rule['tenant_id']) + self._get_tenant(tenant_aname) + + aim_sg_rule = self._get_sg_rule(sg_id, sg_rule['id'], tenant_aname) + self.assertEqual(tenant_aname, aim_sg_rule.tenant_name) + self.assertEqual(sg_id, aim_sg_rule.security_group_name) + self.assertEqual('default', + aim_sg_rule.security_group_subject_name) + self.assertEqual(sg_rule['id'], aim_sg_rule.name) + self.assertEqual(sg_rule['direction'], aim_sg_rule.direction) + self.assertEqual(sg_rule['ethertype'].lower(), + aim_sg_rule.ethertype) + self.assertEqual('reflexive', aim_sg_rule.conn_track) + self.assertEqual(([sg_rule['remote_ip_prefix']] if + sg_rule['remote_ip_prefix'] else []), + aim_sg_rule.remote_ips) + self.assertEqual((sg_rule['protocol'] if + sg_rule['protocol'] else 'unspecified'), + aim_sg_rule.ip_protocol) + self.assertEqual((str(sg_rule['port_range_min']) if + sg_rule['port_range_min'] else 'unspecified'), + aim_sg_rule.from_port) + self.assertEqual((str(sg_rule['port_range_max']) if + sg_rule['port_range_max'] else 'unspecified'), + aim_sg_rule.to_port) + def _check_router(self, router, expected_gw_ips, scopes=None, unscoped_project=None): dns = copy.copy(router.get(DN)) @@ -730,6 +805,44 @@ class TestAimMapping(ApicAimTestCase): self._delete('networks', net_id) self._check_network_deleted(net) + def test_security_group_lifecycle(self): + # Test create + sg = self._make_security_group(self.fmt, + 'sg1', 'test')['security_group'] + sg_id = sg['id'] + self._check_sg(sg) + + # Test show. + sg = self._show('security-groups', sg_id)['security_group'] + self._check_sg(sg) + + # Test update. + data = {'security_group': {'name': 'new_sg_name'}} + sg = self._update('security-groups', sg_id, data)['security_group'] + self._check_sg(sg) + + # Test adding rules + rule1 = self._build_security_group_rule( + sg_id, 'ingress', n_constants.PROTO_NAME_TCP, '22', '23', + remote_ip_prefix='1.1.1.1/0', remote_group_id=None, + ethertype=n_constants.IPv4) + rules = {'security_group_rules': [rule1['security_group_rule']]} + sg_rule = self._make_security_group_rule( + self.fmt, rules)['security_group_rules'][0] + self._check_sg_rule(sg_id, sg_rule) + sg = self._show('security-groups', sg_id)['security_group'] + self._check_sg(sg) + + # Test show rule + sg_rule = self._show('security-group-rules', + sg_rule['id'])['security_group_rule'] + self._check_sg_rule(sg_id, sg_rule) + + # Test delete which will delete all the rules too + self._delete('security-groups', sg_id) + self._sg_should_not_exist(sg_id) + self._sg_rule_should_not_exist(sg_rule['id']) + def test_subnet_lifecycle(self): # Create network. net_resp = self._make_network(self.fmt, 'net1', True) @@ -3177,7 +3290,7 @@ class TestPortBinding(ApicAimTestCase): port_id = port['id'] port = self._bind_port_to_host(port_id, 'host1')['port'] self.assertEqual('ovs', port['binding:vif_type']) - self.assertEqual({'port_filter': True, 'ovs_hybrid_plug': True}, + self.assertEqual({'port_filter': False, 'ovs_hybrid_plug': False}, port['binding:vif_details']) def test_bind_unsupported_vnic_type(self): @@ -5202,6 +5315,70 @@ class TestPortOnPhysicalNode(TestPortVlanNetwork): self.assertEqual(set(['ph1', 'ph2']), set(epg1.physical_domain_names)) + def test_update_sg_rule_with_remote_group_set(self): + # Create network. + net_resp = self._make_network(self.fmt, 'net1', True) + net = net_resp['network'] + + # Create subnet + subnet = self._make_subnet(self.fmt, net_resp, '10.0.1.1', + '10.0.1.0/24')['subnet'] + subnet_id = subnet['id'] + + # Create port on subnet + fixed_ips = [{'subnet_id': subnet_id, 'ip_address': '10.0.1.100'}] + port = self._make_port(self.fmt, net['id'], + fixed_ips=fixed_ips)['port'] + default_sg_id = port['security_groups'][0] + default_sg = self._show('security-groups', + default_sg_id)['security_group'] + for sg_rule in default_sg['security_group_rules']: + if sg_rule['remote_group_id'] and sg_rule['ethertype'] == 'IPv4': + break + tenant_aname = self.name_mapper.project(None, default_sg['tenant_id']) + aim_sg_rule = self._get_sg_rule(default_sg_id, sg_rule['id'], + tenant_aname) + self.assertEqual(aim_sg_rule.remote_ips, ['10.0.1.100']) + + # add another rule with remote_group_id set + rule1 = self._build_security_group_rule( + default_sg_id, 'ingress', n_constants.PROTO_NAME_TCP, '22', '23', + remote_group_id=default_sg_id, ethertype=n_constants.IPv4) + rules = {'security_group_rules': [rule1['security_group_rule']]} + sg_rule1 = self._make_security_group_rule( + self.fmt, rules)['security_group_rules'][0] + aim_sg_rule = self._get_sg_rule(default_sg_id, sg_rule1['id'], + tenant_aname) + self.assertEqual(aim_sg_rule.remote_ips, ['10.0.1.100']) + + # delete SG from port + data = {'port': {'security_groups': []}} + port = self._update('ports', port['id'], data)['port'] + aim_sg_rule = self._get_sg_rule(default_sg_id, sg_rule['id'], + tenant_aname) + self.assertEqual(aim_sg_rule.remote_ips, []) + aim_sg_rule = self._get_sg_rule(default_sg_id, sg_rule1['id'], + tenant_aname) + self.assertEqual(aim_sg_rule.remote_ips, []) + + # add SG to port + data = {'port': {'security_groups': [default_sg_id]}} + port = self._update('ports', port['id'], data)['port'] + aim_sg_rule = self._get_sg_rule(default_sg_id, sg_rule['id'], + tenant_aname) + self.assertEqual(aim_sg_rule.remote_ips, ['10.0.1.100']) + aim_sg_rule = self._get_sg_rule(default_sg_id, sg_rule1['id'], + tenant_aname) + self.assertEqual(aim_sg_rule.remote_ips, ['10.0.1.100']) + + self._delete('ports', port['id']) + aim_sg_rule = self._get_sg_rule(default_sg_id, sg_rule['id'], + tenant_aname) + self.assertEqual(aim_sg_rule.remote_ips, []) + aim_sg_rule = self._get_sg_rule(default_sg_id, sg_rule1['id'], + tenant_aname) + self.assertEqual(aim_sg_rule.remote_ips, []) + def test_mixed_ports_on_network_with_specific_domains(self): aim_ctx = aim_context.AimContext(self.db_session) hd_mapping = aim_infra.HostDomainMapping(host_name='opflex-1', diff --git a/gbpservice/neutron/tests/unit/plugins/ml2plus/test_plugin.py b/gbpservice/neutron/tests/unit/plugins/ml2plus/test_plugin.py index e78dbceb1..69f578123 100644 --- a/gbpservice/neutron/tests/unit/plugins/ml2plus/test_plugin.py +++ b/gbpservice/neutron/tests/unit/plugins/ml2plus/test_plugin.py @@ -68,7 +68,8 @@ class TestEnsureTenant(Ml2PlusPluginV2TestCase): with mock.patch.object(mech_logger.LoggerPlusMechanismDriver, 'ensure_tenant') as et: self._make_network(self.fmt, 'net', True, tenant_id='t1') - et.assert_called_once_with(mock.ANY, 't1') + et.assert_has_calls([mock.call(mock.ANY, 't1')]) + self.assertEqual(2, et.call_count) def test_network_bulk(self): with mock.patch.object(mech_logger.LoggerPlusMechanismDriver, @@ -84,7 +85,7 @@ class TestEnsureTenant(Ml2PlusPluginV2TestCase): et.assert_has_calls([mock.call(mock.ANY, 't1'), mock.call(mock.ANY, 't2')], any_order=True) - self.assertEqual(2, et.call_count) + self.assertEqual(4, et.call_count) def test_subnet(self): net = self._make_network(self.fmt, 'net', True) @@ -129,7 +130,8 @@ class TestEnsureTenant(Ml2PlusPluginV2TestCase): with mock.patch.object(mech_logger.LoggerPlusMechanismDriver, 'ensure_tenant') as et: self._make_port(self.fmt, net['network']['id'], tenant_id='t1') - et.assert_called_once_with(mock.ANY, 't1') + et.assert_has_calls([mock.call(mock.ANY, 't1')]) + self.assertEqual(2, et.call_count) def test_port_bulk(self): net = self._make_network(self.fmt, 'net', True) @@ -151,7 +153,7 @@ class TestEnsureTenant(Ml2PlusPluginV2TestCase): et.assert_has_calls([mock.call(mock.ANY, 't1'), mock.call(mock.ANY, 't2')], any_order=True) - self.assertEqual(2, et.call_count) + self.assertEqual(4, et.call_count) def test_subnetpool(self): with mock.patch.object(mech_logger.LoggerPlusMechanismDriver, diff --git a/gbpservice/neutron/tests/unit/services/grouppolicy/test_aim_mapping_driver.py b/gbpservice/neutron/tests/unit/services/grouppolicy/test_aim_mapping_driver.py index 0b36f6ced..73130ae87 100644 --- a/gbpservice/neutron/tests/unit/services/grouppolicy/test_aim_mapping_driver.py +++ b/gbpservice/neutron/tests/unit/services/grouppolicy/test_aim_mapping_driver.py @@ -2998,6 +2998,16 @@ class TestPolicyTarget(AIMBaseTestCase): self.assertEqual(1000, mapping['interface_mtu']) self.assertEqual(100, mapping['dhcp_lease_time']) + port = self._plugin.get_port(self._context, pt2['port_id']) + sg_list = [] + for sg_id in port['security_groups']: + sg_list.append( + {'policy-space': mapping['ptg_tenant'], + 'name': sg_id}) + sg_list.append({'policy-space': 'common', + 'name': 'gbp_default'}) + self.assertEqual(sg_list, mapping['security_group']) + def _do_test_gbp_details_no_pt(self, use_as=True, routed=True, pre_vrf=None): # Create port and bind it