Add_zed_support

Change-Id: I48b96bfd44330f9e75fa27c5b9ecc63ab95f4ffd
This commit is contained in:
Pulkit vajpayee 2023-09-26 06:47:43 +00:00
parent d0d6094bee
commit 1a825ef921
21 changed files with 228 additions and 185 deletions

View File

@ -1,7 +1,7 @@
- project:
name: x/group-based-policy
templates:
- openstack-python3-yoga-jobs
- openstack-python3-zed-jobs
- publish-to-pypi
# REVISIT: In the jobs below, the required-projects clause is needed on
# the master branch to select the correct version of the requirements
@ -12,25 +12,20 @@
check:
jobs:
- openstack-tox-pep8:
nodeset: ubuntu-bionic
nodeset: ubuntu-focal
required-projects:
- name: openstack/requirements
override-checkout: stable/yoga
- openstack-tox-py36:
nodeset: ubuntu-bionic
required-projects:
- name: openstack/requirements
override-checkout: stable/yoga
override-checkout: stable/zed
- openstack-tox-py38:
nodeset: ubuntu-bionic
nodeset: ubuntu-focal
required-projects:
- name: openstack/requirements
override-checkout: stable/yoga
override-checkout: stable/zed
- openstack-tox-py39:
nodeset: ubuntu-focal
required-projects:
- name: openstack/requirements
override-checkout: stable/yoga
override-checkout: stable/zed
- legacy-group-based-policy-dsvm-functional:
voting: false
- legacy-group-based-policy-dsvm-aim:
@ -40,22 +35,17 @@
gate:
jobs:
- openstack-tox-pep8:
nodeset: ubuntu-bionic
nodeset: ubuntu-focal
required-projects:
- name: openstack/requirements
override-checkout: stable/yoga
- openstack-tox-py36:
nodeset: ubuntu-bionic
required-projects:
- name: openstack/requirements
override-checkout: stable/yoga
override-checkout: stable/zed
- openstack-tox-py38:
nodeset: ubuntu-bionic
nodeset: ubuntu-focal
required-projects:
- name: openstack/requirements
override-checkout: stable/yoga
override-checkout: stable/zed
- openstack-tox-py39:
nodeset: ubuntu-focal
required-projects:
- name: openstack/requirements
override-checkout: stable/yoga
override-checkout: stable/zed

View File

@ -43,11 +43,11 @@ if [[ $ENABLE_NFP = True ]]; then
# Make sure that your public interface is not attached to any bridge.
PUBLIC_INTERFACE=
enable_plugin neutron-fwaas http://opendev.org/openstack/neutron-fwaas.git stable/yoga
enable_plugin neutron-lbaas https://opendev.org/openstack/neutron-lbaas.git stable/yoga
enable_plugin neutron https://opendev.org/openstack/neutron.git stable/yoga
enable_plugin neutron-vpnaas https://opendev.org/openstack/neutron-vpnaas.git stable/yoga
enable_plugin octavia https://opendev.org/openstack/octavia.git stable/yoga
enable_plugin neutron-fwaas http://opendev.org/openstack/neutron-fwaas.git stable/zed
enable_plugin neutron-lbaas https://opendev.org/openstack/neutron-lbaas.git stable/zed
enable_plugin neutron https://opendev.org/openstack/neutron.git stable/zed
enable_plugin neutron-vpnaas https://opendev.org/openstack/neutron-vpnaas.git stable/zed
enable_plugin octavia https://opendev.org/openstack/octavia.git stable/zed
fi
fi

View File

@ -557,9 +557,9 @@ class VPNTestData(object):
'''
Prepares a simple resource_data dictionary of respective service
'''
if operation is 'delete':
if operation == 'delete':
return self._delete_ipsec_site_conn_obj()
if operation is 'update':
if operation == 'update':
return self._update_ipsec_site_conn_obj()
if operation == 'create' and service_type == 'ipsec':

View File

@ -39,6 +39,7 @@ is_retriable = api.is_retriable
resource_fields = db_utils.resource_fields
retry_db_errors = api.retry_db_errors
retry_if_session_inactive = api.retry_if_session_inactive
is_session_active = api.is_session_active
CONTEXT_READER = api.CONTEXT_READER
CONTEXT_WRITER = api.CONTEXT_WRITER

View File

@ -1109,7 +1109,7 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase):
# than size of the ip_pool's subnet.
if netaddr.IPNetwork(pool).version == 4:
ip_pool_prefix_length = netaddr.IPNetwork(pool).prefixlen
if(ip_pool_prefix_length > new_prefix_length):
if (ip_pool_prefix_length > new_prefix_length):
raise gpolicy.SubnetPrefixLengthExceedsIpPool(
ip_pool=pool, subnet_size=new_prefix_length)

View File

@ -44,10 +44,9 @@ from neutron.db import rbac_db_models
from neutron.db import segments_db
from neutron.plugins.ml2 import db as n_db
from neutron.plugins.ml2 import driver_context as ml2_context
from neutron.plugins.ml2.drivers.openvswitch.agent.common import (
constants as a_const)
from neutron.plugins.ml2 import models
from neutron.services.trunk import exceptions as trunk_exc
from neutron_lib.plugins.ml2 import ovs_constants as a_const
from neutron_lib.agent import topics as n_topics
from neutron_lib.api.definitions import external_net
from neutron_lib.api.definitions import portbindings
@ -7264,7 +7263,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
bd.display_name = dname
bd.vrf_name = vrf.name
bd.enable_arp_flood = True
bd.enable_routing = len(router_contract_names) is not 0
bd.enable_routing = len(router_contract_names) != 0
bd.limit_ip_learn_to_subnets = True
bd.ep_move_detect_mode = 'garp'
bd.l3out_names = []

View File

@ -180,7 +180,7 @@ class Ml2PlusPlugin(ml2_plugin.Ml2Plugin,
def _ml2_md_extend_network_dict(result, netdb):
plugin = directory.get_plugin()
session = db_api.get_session_from_obj(netdb)
if session and session.is_active:
if session and db_api.is_session_active(session):
# REVISIT: Check if transaction begin is still
# required here, and if so, if reader pattern
# can be used instead (will require getting the
@ -201,7 +201,7 @@ class Ml2PlusPlugin(ml2_plugin.Ml2Plugin,
netdb = results[0][1] if results else None
plugin = directory.get_plugin()
session = db_api.get_session_from_obj(netdb)
if session and session.is_active:
if session and db_api.is_session_active(session):
with db_api.CONTEXT_READER.using(session):
plugin.extension_manager.extend_network_dict_bulk(session,
results)
@ -214,7 +214,7 @@ class Ml2PlusPlugin(ml2_plugin.Ml2Plugin,
def _ml2_md_extend_port_dict(result, portdb):
plugin = directory.get_plugin()
session = db_api.get_session_from_obj(portdb)
if session and session.is_active:
if session and db_api.is_session_active(session):
# REVISIT: Check if transaction begin is still
# required here, and if so, if reader pattern
# can be used instead (will require getting the
@ -235,7 +235,7 @@ class Ml2PlusPlugin(ml2_plugin.Ml2Plugin,
portdb = results[0][1] if results else None
plugin = directory.get_plugin()
session = db_api.get_session_from_obj(portdb)
if session and session.is_active:
if session and db_api.is_session_active(session):
with db_api.CONTEXT_READER.using(session):
plugin.extension_manager.extend_port_dict_bulk(session,
results)
@ -248,7 +248,7 @@ class Ml2PlusPlugin(ml2_plugin.Ml2Plugin,
def _ml2_md_extend_subnet_dict(result, subnetdb):
plugin = directory.get_plugin()
session = db_api.get_session_from_obj(subnetdb)
if session and session.is_active:
if session and db_api.is_session_active(session):
# REVISIT: Check if transaction begin is still
# required here, and if so, if reader pattern
# can be used instead (will require getting the
@ -269,7 +269,7 @@ class Ml2PlusPlugin(ml2_plugin.Ml2Plugin,
subnetdb = results[0][1] if results else None
plugin = directory.get_plugin()
session = db_api.get_session_from_obj(subnetdb)
if session and session.is_active:
if session and db_api.is_session_active(session):
with db_api.CONTEXT_READER.using(session):
plugin.extension_manager.extend_subnet_dict_bulk(session,
results)
@ -282,7 +282,7 @@ class Ml2PlusPlugin(ml2_plugin.Ml2Plugin,
def _ml2_md_extend_subnetpool_dict(result, subnetpooldb):
plugin = directory.get_plugin()
session = db_api.get_session_from_obj(subnetpooldb)
if session and session.is_active:
if session and db_api.is_session_active(session):
# REVISIT: Check if transaction begin is still
# required here, and if so, if reader pattern
# can be used instead (will require getting the
@ -303,7 +303,7 @@ class Ml2PlusPlugin(ml2_plugin.Ml2Plugin,
subnetpooldb = results[0][1] if results else None
plugin = directory.get_plugin()
session = db_api.get_session_from_obj(subnetpooldb)
if session and session.is_active:
if session and db_api.is_session_active(session):
with db_api.CONTEXT_READER.using(session):
plugin.extension_manager.extend_subnetpool_dict_bulk(session,
results)
@ -317,7 +317,7 @@ class Ml2PlusPlugin(ml2_plugin.Ml2Plugin,
def _ml2_md_extend_address_scope_dict(result, address_scope):
plugin = directory.get_plugin()
session = db_api.get_session_from_obj(address_scope)
if session and session.is_active:
if session and db_api.is_session_active(session):
# REVISIT: Check if transaction begin is still
# required here, and if so, if reader pattern
# can be used instead (will require getting the
@ -338,7 +338,7 @@ class Ml2PlusPlugin(ml2_plugin.Ml2Plugin,
address_scope = results[0][1] if results else None
plugin = directory.get_plugin()
session = db_api.get_session_from_obj(address_scope)
if session and session.is_active:
if session and db_api.is_session_active(session):
with db_api.CONTEXT_READER.using(session):
plugin.extension_manager.extend_address_scope_dict_bulk(
session, results)

View File

@ -1755,93 +1755,102 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
session = context._plugin_context.session
aim_ctx = aim_context.AimContext(session)
# Infra Services' FilterEntries and attributes
infra_entries = alib.get_service_contract_filter_entries()
# ARP FilterEntry and attributes
arp_entries = alib.get_arp_filter_entry()
contracts = {alib.SERVICE_PREFIX: infra_entries,
alib.IMPLICIT_PREFIX: arp_entries}
# Infra Services' FilterEntries and attributes
infra_entries = alib.get_service_contract_filter_entries()
# ARP FilterEntry and attributes
arp_entries = alib.get_arp_filter_entry()
contracts = {alib.SERVICE_PREFIX: infra_entries,
alib.IMPLICIT_PREFIX: arp_entries}
for contract_name_prefix, entries in six.iteritems(contracts):
contract_name = self.name_mapper.l3_policy(
session, l3p['id'], prefix=contract_name_prefix)
# Create Contract (one per l3_policy)
aim_contract = aim_resource.Contract(
tenant_name=self._aim_tenant_name(
session, l3p['tenant_id'], aim_resource.Contract),
name=contract_name, display_name=contract_name)
if get:
aim_resources = {}
aim_resources[FILTERS] = []
aim_resources[FILTER_ENTRIES] = []
aim_resources[CONTRACT_SUBJECTS] = []
contract_fetched = self.aim.get(aim_ctx, aim_contract)
aim_resources[CONTRACTS] = [contract_fetched]
else:
if create:
self.aim.create(aim_ctx, aim_contract, overwrite=True)
if not delete and epg_dn:
aim_epg = self.aim.get(
aim_ctx, aim_resource.EndpointGroup.from_dn(epg_dn))
# Add Contracts to the default EPG
if contract_name_prefix == alib.IMPLICIT_PREFIX:
# Default EPG provides and consumes ARP Contract
self._add_contracts_for_epg(
aim_ctx, aim_epg,
provided_contracts=[contract_name],
consumed_contracts=[contract_name])
else:
# Default EPG provides Infra Services' Contract
self._add_contracts_for_epg(
aim_ctx, aim_epg,
provided_contracts=[contract_name])
continue
filter_names = []
for k, v in six.iteritems(entries):
filter_name = self.name_mapper.l3_policy(
session, l3p['id'],
prefix=''.join([contract_name_prefix, k, '-']))
# Create Filter (one per l3_policy)
aim_filter = aim_resource.Filter(
for contract_name_prefix, entries in six.iteritems(contracts):
contract_name = self.name_mapper.l3_policy(
session, l3p['id'], prefix=contract_name_prefix)
# Create Contract (one per l3_policy)
aim_contract = aim_resource.Contract(
tenant_name=self._aim_tenant_name(
session, l3p['tenant_id'], aim_resource.Filter),
name=filter_name, display_name=filter_name)
session, l3p['tenant_id'], aim_resource.Contract),
name=contract_name, display_name=contract_name)
if get:
filter_fetched = self.aim.get(aim_ctx, aim_filter)
aim_resources[FILTERS].append(filter_fetched)
aim_filter_entry = self._aim_filter_entry(
session, aim_filter, k,
alib.map_to_aim_filter_entry(v))
entry_fetched = self.aim.get(aim_ctx, aim_filter_entry)
aim_resources[FILTER_ENTRIES].append(entry_fetched)
aim_resources = {}
aim_resources[FILTERS] = []
aim_resources[FILTER_ENTRIES] = []
aim_resources[CONTRACT_SUBJECTS] = []
contract_fetched = self.aim.get(aim_ctx, aim_contract)
aim_resources[CONTRACTS] = [contract_fetched]
else:
if create:
self.aim.create(aim_ctx, aim_filter, overwrite=True)
# Create FilterEntries (one per l3_policy) and
# associate with Filter
self._create_aim_filter_entry(
session, aim_ctx, aim_filter, k, v, overwrite=True)
filter_names.append(aim_filter.name)
self.aim.create(aim_ctx, aim_contract, overwrite=True)
if not delete and epg_dn:
aim_epg = self.aim.get(
aim_ctx, aim_resource.EndpointGroup.from_dn(
epg_dn))
# Add Contracts to the default EPG
if contract_name_prefix == alib.IMPLICIT_PREFIX:
# Default EPG provides and consumes ARP Contract
self._add_contracts_for_epg(
aim_ctx, aim_epg,
provided_contracts=[contract_name],
consumed_contracts=[contract_name])
else:
# Default EPG provides Infra Services' Contract
self._add_contracts_for_epg(
aim_ctx, aim_epg,
provided_contracts=[contract_name])
continue
filter_names = []
for k, v in six.iteritems(entries):
filter_name = self.name_mapper.l3_policy(
session, l3p['id'],
prefix=''.join([contract_name_prefix, k, '-']))
# Create Filter (one per l3_policy)
aim_filter = aim_resource.Filter(
tenant_name=self._aim_tenant_name(
session, l3p['tenant_id'],
aim_resource.Filter),
name=filter_name, display_name=filter_name)
if get:
filter_fetched = self.aim.get(aim_ctx, aim_filter)
aim_resources[FILTERS].append(filter_fetched)
aim_filter_entry = self._aim_filter_entry(
session, aim_filter, k,
alib.map_to_aim_filter_entry(v))
entry_fetched = self.aim.get(aim_ctx, aim_filter_entry)
aim_resources[FILTER_ENTRIES].append(entry_fetched)
else:
if create:
self.aim.create(aim_ctx, aim_filter,
overwrite=True)
# Create FilterEntries (one per l3_policy) and
# associate with Filter
self._create_aim_filter_entry(
session, aim_ctx, aim_filter, k, v,
overwrite=True)
filter_names.append(
aim_filter.name)
if delete:
self._delete_aim_filter_entries(aim_ctx,
aim_filter)
self.aim.delete(aim_ctx, aim_filter)
if get:
aim_contract_subject = self._aim_contract_subject(
aim_contract)
subject_fetched = self.aim.get(aim_ctx,
aim_contract_subject)
aim_resources[CONTRACT_SUBJECTS].append(subject_fetched)
return aim_resources
else:
if create:
# Create ContractSubject (one per l3_policy) with
# relevant Filters, and associate with Contract
self._populate_aim_contract_subject_by_filters(
context, aim_contract, bi_filters=filter_names)
if delete:
self._delete_aim_filter_entries(aim_ctx, aim_filter)
self.aim.delete(aim_ctx, aim_filter)
if get:
aim_contract_subject = self._aim_contract_subject(aim_contract)
subject_fetched = self.aim.get(aim_ctx, aim_contract_subject)
aim_resources[CONTRACT_SUBJECTS].append(subject_fetched)
return aim_resources
else:
if create:
# Create ContractSubject (one per l3_policy) with relevant
# Filters, and associate with Contract
self._populate_aim_contract_subject_by_filters(
context, aim_contract, bi_filters=filter_names)
if delete:
self._delete_aim_contract_subject(aim_ctx, aim_contract)
self.aim.delete(aim_ctx, aim_contract)
self._delete_aim_contract_subject(aim_ctx,
aim_contract)
self.aim.delete(aim_ctx, aim_contract)
def _add_implicit_svc_contracts_to_epg(self, context, l2p, aim_epg):
session = context._plugin_context.session

View File

@ -306,8 +306,8 @@ class ValidationManager(object):
expected_instances[key] = instance
def query_db_instances(self, entities, args, filters):
assert(1 == len(entities))
assert(0 == len(args))
assert 1 == len(entities)
assert 0 == len(args)
instance_class = entities[0]
expected_instances = self._expected_db_instances[instance_class]
primary_keys = self._db_instance_primary_keys[instance_class]
@ -571,9 +571,9 @@ class ValidationAimStore(aim_store.AimStore):
def query(self, db_obj_type, resource_class, in_=None, notin_=None,
order_by=None, lock_update=False, **filters):
assert(in_ is None)
assert(notin_ is None)
assert(order_by is None)
assert in_ is None
assert notin_ is None
assert order_by is None
if filters:
if (set(filters.keys()) ==
set(resource_class.identity_attributes.keys())):
@ -590,18 +590,18 @@ class ValidationAimStore(aim_store.AimStore):
def count(self, db_obj_type, resource_class, in_=None, notin_=None,
**filters):
assert(False)
assert False
def delete_all(self, db_obj_type, resource_class, in_=None, notin_=None,
**filters):
assert(False)
assert False
def from_attr(self, db_obj, resource_class, attribute_dict):
for k, v in list(attribute_dict.items()):
setattr(db_obj, k, v)
def to_attr(self, resource_class, db_obj):
assert(False)
assert False
def make_resource(self, cls, db_obj, include_aim_id=False):
return copy.deepcopy(db_obj)

View File

@ -2772,7 +2772,7 @@ class ResourceMappingDriver(api.PolicyDriver, ImplicitResourceOperations,
context._plugin_context, ptg_id)
provided_policy_rule_sets = ptg['provided_policy_rule_sets']
consumed_policy_rule_sets = ptg['consumed_policy_rule_sets']
return(self._generate_list_sg_from_policy_rule_set_list(
return (self._generate_list_sg_from_policy_rule_set_list(
context, provided_policy_rule_sets, consumed_policy_rule_sets))
def _generate_list_sg_from_policy_rule_set_list(self, context,

View File

@ -12,7 +12,9 @@
# limitations under the License.
import copy
from importlib import util as imp_util
import os
import sys
from unittest import mock
@ -25,6 +27,7 @@ from neutron.tests.unit.db import test_db_base_plugin_v2
from neutron_lib import context
from neutron_lib.plugins import constants
from neutron_lib.plugins import directory
from oslo_log import log as logging
from oslo_utils import importutils
from oslo_utils import uuidutils
import six
@ -40,6 +43,8 @@ from networking_sfc.extensions import flowclassifier
from networking_sfc.extensions import sfc
LOG = logging.getLogger(__name__)
JSON_FORMAT = 'json'
_uuid = uuidutils.generate_uuid
TESTDIR = os.path.dirname(os.path.abspath(gbpservice.neutron.tests.__file__))
@ -382,6 +387,39 @@ class GroupPolicyDbTestCase(GroupPolicyDBTestBase,
plugins.get('CORE').__dict__['_aliases'].remove(
'dhcp_agent_scheduler')
def _load_all_extensions_from_path(self, path):
# Sorting the extension list makes the order in which they
# are loaded predictable across a cluster of load-balanced
# Neutron Servers
for f in sorted(os.listdir(path)):
try:
LOG.debug('Loading extension file: %s', f)
mod_name, file_ext = os.path.splitext(os.path.split(f)[-1])
ext_path = os.path.join(path, f)
if file_ext.lower() == '.py' and not mod_name.startswith('_'):
spec = imp_util.spec_from_file_location(mod_name, ext_path)
mod = imp_util.module_from_spec(spec)
if (mod_name == 'flowclassifier' or mod_name == 'sfc'):
sys.modules[mod_name] = mod
spec.loader.exec_module(mod)
ext_name = mod_name.capitalize()
new_ext_class = getattr(mod, ext_name, None)
if not new_ext_class:
LOG.warning('Did not find expected name '
'"%(ext_name)s" in %(file)s',
{'ext_name': ext_name,
'file': ext_path})
continue
new_ext = new_ext_class()
self.add_extension(new_ext)
except Exception as exception:
LOG.warning("Extension file %(f)s wasn't loaded due to "
"%(exception)s",
{'f': f, 'exception': exception})
extensions.ExtensionManager._load_all_extensions_from_path = (
_load_all_extensions_from_path)
def tearDown(self):
self._unset_notification_mocks()
registry.clear()

View File

@ -100,7 +100,7 @@ class CommonLibraryTest(unittest2.TestCase):
return MockResponse()
def _uget(self, path):
return(200, "")
return (200, "")
def _post(self, path, body, method_type):
return (200, "")

View File

@ -47,25 +47,25 @@ class TestExtensionDriver(TestExtensionDriverBase):
self.address_scope_extension = 'Test_AddressScope_Extension'
def _check_create(self, session, data, result):
assert(isinstance(session, oslo_db.sqlalchemy.session.Session))
assert(isinstance(data, dict))
assert('id' not in data)
assert(isinstance(result, dict))
assert(result['id'] is not None)
assert isinstance(session, oslo_db.sqlalchemy.session.Session)
assert isinstance(data, dict)
assert 'id' not in data
assert isinstance(result, dict)
assert result['id'] is not None
def _check_update(self, session, data, result):
assert(isinstance(session, oslo_db.sqlalchemy.session.Session))
assert(isinstance(data, dict))
assert(isinstance(result, dict))
assert(result['id'] is not None)
assert isinstance(session, oslo_db.sqlalchemy.session.Session)
assert isinstance(data, dict)
assert isinstance(result, dict)
assert result['id'] is not None
def _check_extend(self, session, result, db_entry,
expected_db_entry_class):
assert(isinstance(session, oslo_db.sqlalchemy.session.Session))
assert(isinstance(result, dict))
assert(result['id'] is not None)
assert(isinstance(db_entry, expected_db_entry_class))
assert(db_entry.id == result['id'])
assert isinstance(session, oslo_db.sqlalchemy.session.Session)
assert isinstance(result, dict)
assert result['id'] is not None
assert isinstance(db_entry, expected_db_entry_class)
assert db_entry.id == result['id']
def process_create_subnetpool(self, plugin_context, data, result):
session = plugin_context.session

View File

@ -597,7 +597,7 @@ class ApicAimTestCase(test_address_scope.AddressScopeTestCase,
def port_notif_verifier(self):
def verify(plugin_context, port):
self.assertFalse(plugin_context.session.is_active)
self.assertFalse(db_api.is_session_active(plugin_context.session))
return mock.DEFAULT
return verify
@ -2455,32 +2455,32 @@ class TestAimMapping(ApicAimTestCase):
FakeProjectManager.set('test-tenant-update',
'new-tenant', 'bad\"\'descr')
keystone_ep.info(None, None, 'identity.project.updated', payload, None)
assert(self.driver.aim.update.call_args_list[0] == mock.call(
mock.ANY, tenant, display_name='new-tenant', descr='bad__descr'))
assert self.driver.aim.update.call_args_list[0] == mock.call(
mock.ANY, tenant, display_name='new-tenant', descr='bad__descr')
# Test project.updated event. Update only the project name.
FakeProjectManager.set('test-tenant-update', 'name123', 'new-descr')
keystone_ep.info(None, None, 'identity.project.updated', payload, None)
assert(self.driver.aim.update.call_args_list[1] == mock.call(
mock.ANY, tenant, display_name='name123', descr='new-descr'))
assert self.driver.aim.update.call_args_list[1] == mock.call(
mock.ANY, tenant, display_name='name123', descr='new-descr')
# Test project.updated event. Update only the project description.
FakeProjectManager.set('test-tenant-update', 'name123', 'descr123')
keystone_ep.info(None, None, 'identity.project.updated', payload, None)
assert(self.driver.aim.update.call_args_list[2] == mock.call(
mock.ANY, tenant, display_name='name123', descr='descr123'))
assert self.driver.aim.update.call_args_list[2] == mock.call(
mock.ANY, tenant, display_name='name123', descr='descr123')
# Test project.updated event. Clear the project description.
FakeProjectManager.set('test-tenant-update', 'name123', '')
keystone_ep.info(None, None, 'identity.project.updated', payload, None)
assert(self.driver.aim.update.call_args_list[3] == mock.call(
mock.ANY, tenant, display_name='name123', descr=''))
assert self.driver.aim.update.call_args_list[3] == mock.call(
mock.ANY, tenant, display_name='name123', descr='')
# Test project.updated event. Update project name and description.
FakeProjectManager.set('test-tenant-update', 'prj1', 'prj2')
keystone_ep.info(None, None, 'identity.project.updated', payload, None)
assert(self.driver.aim.update.call_args_list[4] == mock.call(
mock.ANY, tenant, display_name='prj1', descr='prj2'))
assert self.driver.aim.update.call_args_list[4] == mock.call(
mock.ANY, tenant, display_name='prj1', descr='prj2')
# Test project.updated event. Add new tenant.
FakeProjectManager.set('test-tenant-new', 'add-tenant', 'add-descr')
@ -2490,13 +2490,13 @@ class TestAimMapping(ApicAimTestCase):
tenant = aim_resource.Tenant(name=tenant_name)
payload['resource_info'] = 'test-tenant-new'
keystone_ep.info(None, None, 'identity.project.updated', payload, None)
assert(self.driver.aim.update.call_args_list[5] == mock.call(
mock.ANY, tenant, display_name='add-tenant', descr='add-descr'))
assert self.driver.aim.update.call_args_list[5] == mock.call(
mock.ANY, tenant, display_name='add-tenant', descr='add-descr')
# Test project.updated event. No change in name or description.
payload['resource_info'] = 'test-tenant-new'
keystone_ep.info(None, None, 'identity.project.updated', payload, None)
assert(len(self.driver.aim.update.call_args_list) == 6)
assert len(self.driver.aim.update.call_args_list) == 6
# Test with project.deleted event.
payload['resource_info'] = 'test-tenant'
@ -9367,7 +9367,7 @@ class TestExternalConnectivityBase(object):
self._make_ext_network('net1',
dn=self.dn_t1_l1_n1,
cidrs=['20.10.0.0/16', '4.4.4.0/24'])
if self.nat_type is not 'distributed' and self.nat_type is not 'edge':
if self.nat_type != 'distributed' and self.nat_type != 'edge':
vmm_domains = []
self.mock_ns.create_l3outside.assert_called_once_with(
mock.ANY,

View File

@ -1911,9 +1911,10 @@ class TestL2PolicyWithAutoPTG(TestL2PolicyBase):
self._test_multiple_l2p_post_create()
def _test_epg_policy_enforcement_attr(self, ptg):
aim_epg_name = self.driver.apic_epg_name_for_policy_target_group(
db_api.get_writer_session(), ptg['id'],
context=self._neutron_context)
session = db_api.get_writer_session()
with session.begin():
aim_epg_name = self.driver.apic_epg_name_for_policy_target_group(
session, ptg['id'], context=self._neutron_context)
aim_epg = self.aim_mgr.find(
self._aim_context, aim_resource.EndpointGroup,
name=aim_epg_name)[0]
@ -2610,7 +2611,7 @@ class TestPolicyTargetGroupIpv6(TestPolicyTargetGroupIpv4):
'subnetpools': []}}
def _family_specific_subnet_validation(self, subnet):
if subnet['ip_version'] is 6:
if subnet['ip_version'] == 6:
self.assertEqual(subnet['ipv6_ra_mode'], 'slaac')
self.assertEqual(subnet['ipv6_address_mode'], 'slaac')

View File

@ -38,7 +38,6 @@ class CommonNeutronBaseTestCase(test_plugin.GroupPolicyPluginTestBase):
config.cfg.CONF.set_override('policy_drivers',
policy_drivers,
group='group_policy')
config.cfg.CONF.set_override('allow_overlapping_ips', True)
super(CommonNeutronBaseTestCase, self).setUp(core_plugin=core_plugin,
l3_plugin=l3_plugin,
ml2_options=ml2_options,

View File

@ -77,7 +77,6 @@ class ResourceMappingTestCase(test_plugin.GroupPolicyPluginTestCase):
config.cfg.CONF.set_override('policy_drivers',
policy_drivers,
group='group_policy')
config.cfg.CONF.set_override('allow_overlapping_ips', True)
ml2_opts = ml2_options or {
'mechanism_drivers': ['openvswitch'],

View File

@ -77,7 +77,7 @@ class TestServerEp(manager.NetworkScenarioTest):
elif success_msg in output and self.deleted_rule is True:
LOG.debug("Waiting for rule to be deleted in fabric")
retry -= 1
if retry is 0:
if retry == 0:
LOG.error("Error - Still Pinging even after "
"deleting the security group rule")
break
@ -87,13 +87,13 @@ class TestServerEp(manager.NetworkScenarioTest):
self.deleted_rule = self._delete_security_group_rule(
self.servers)
retry -= 1
if retry is 0:
if retry == 0:
LOG.error("Security rule was not deleted in time")
break
else:
LOG.debug("Waiting for Server to get Active")
retry -= 1
if retry is 0:
if retry == 0:
LOG.error("Error - %s", (output,))
break

View File

@ -14,8 +14,8 @@ classifier =
Operating System :: POSIX :: Linux
Programming Language :: Python
Programming Language :: Python :: 3
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9
[files]
packages =

View File

@ -1,25 +1,27 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
hacking>=1.1.0,<1.2.0 # Apache-2.0
hacking>=6.0.1 # Apache-2.0
# Since version numbers for these are specified in
# https://releases.openstack.org/constraints/upper/yoga, they cannot be
# https://releases.openstack.org/constraints/upper/zed, they cannot be
# referenced as GIT URLs.
neutron
python-heatclient
python-keystoneclient
-e git+https://opendev.org/openstack/networking-sfc.git@stable/yoga#egg=networking-sfc
-e git+https://opendev.org/openstack/networking-sfc.git@stable/zed#egg=networking-sfc
-e git+https://github.com/noironetworks/apicapi.git@master#egg=apicapi
-e git+https://github.com/noironetworks/python-opflex-agent.git@stable/yoga#egg=neutron-opflex-agent
-e git+https://github.com/noironetworks/python-opflex-agent.git@stable/zed#egg=neutron-opflex-agent
-e git+https://opendev.org/x/python-group-based-policy-client.git@stable/yoga#egg=python-group-based-policy-client
-e git+https://opendev.org/x/python-group-based-policy-client.git@stable/zed#egg=python-group-based-policy-client
coverage!=4.4,>=4.0 # Apache-2.0
flake8-import-order==0.12 # LGPLv3
flake8>=5.0.1
pyflakes>=2.5.0
sphinx!=1.6.6,>=1.6.2 # BSD
oslosphinx>=4.7.0 # Apache-2.0
testtools>=2.2.0 # MIT

19
tox.ini
View File

@ -1,11 +1,11 @@
[tox]
envlist = py36,py37,pep8,py38
minversion = 3.2.0
envlist = py38,py39,pep8
minversion = 3.18.0
skipsdist = False
ignore_basepython_conflict = True
[testenv]
basepython = python3
basepython = {env:TOX_PYTHON:python3}
setenv = VIRTUAL_ENV={envdir}
OS_LOG_CAPTURE={env:OS_LOG_CAPTURE:true}
OS_STDOUT_CAPTURE={env:OS_STDOUT_CAPTURE:true}
@ -24,7 +24,7 @@ usedevelop = True
install_command =
pip install {opts} {packages}
deps =
-c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/yoga}
-c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/zed}
-r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
whitelist_externals = sh
@ -37,7 +37,7 @@ commands = stestr run {posargs}
setenv = VIRTUAL_ENV={envdir}
[testenv:functional]
basepython = python3
basepython = {env:TOX_PYTHON:python3}
setenv = OS_TEST_PATH=./gbpservice/tests/functional
OS_SUDO_TESTING=1
@ -53,7 +53,7 @@ sitepackages=True
sitepackages = True
[testenv:pep8]
basepython = python3
basepython = {env:TOX_PYTHON:python3}
commands =
flake8
gbp-db-manage check_migration
@ -83,16 +83,21 @@ commands = python setup.py build_sphinx
# E402 module level import not at top of file - REVISIT
# E731 do not assign a lambda expression, use a def - REVISIT
# E741 ambiguous variable name - REVISIT
# F601 dictionary key name repeated with different values - REVISIT
# F811 redefinition of unused variable - REVISIT
# F812 list comprehension redefines name from line - REVISIT
# H214: Use assertIn/NotIn(A, B) rather than assertTrue/False(A in/not in B) when checking collection contents - REVISIT
# H216 The unittest.mock module should be used rather than the third party mock package unless actually needed
# H237 module is removed in Python 3 - REVISIT
# H301: one import per line - REVISIT
# H306: imports not in alphabetical order (time, os) - REVISIT
# H401 docstring should not start with a space - REVISIT
# H404 multi line docstring should start with a summary - REVISIT
# H405 multi line docstring summary not separated with an empty line
# N530 direct neutron imports not allowed
# W504 line break after binary operator - REVISIT
# W605 invalid escape sequence - REVISIT
ignore = E125,E126,E128,E129,E402,E731,E741,F811,F812,H237,H401,H404,H405,N530,W504,W605
ignore = E125,E126,E128,E129,E402,E731,E741,F601,F811,F812,H214,H216,H237,H301,H306,H401,H404,H405,N530,W504,W605
# H106: Dont put vim configuration in source files
# H203: Use assertIs(Not)None to check for None
# H204: Use assert(Not)Equal to check for equality