[apic_aim] Map neutron resources to AIM, part 5

Complete basic east/west routing. Enables routing on BDs of routed
networks and associates those BDs with the address scope's VRF if
applicable, or else with a per-tenant default routed VRF. The selected
VRF is exposed via the extended attributes of the Neutron network and
router resources.

Validation of routing topologies will be implemented in a follow-on
patch.

Change-Id: Ic7396e5ebbc466ea5be0028931b31bdbab9833e6
This commit is contained in:
Robert Kukura 2016-09-20 22:51:35 -04:00
parent d5a7488e5f
commit 18a0972197
3 changed files with 477 additions and 119 deletions

View File

@ -22,6 +22,7 @@ ALIAS = 'cisco-apic-l3'
CONTRACT = 'Contract'
CONTRACT_SUBJECT = 'ContractSubject'
VRF = 'VRF'
EXTENDED_ATTRIBUTES_2_0 = {
l3.ROUTERS: cisco_apic.APIC_ATTRIBUTES

View File

@ -23,6 +23,7 @@ from neutron._i18n import _LW
from neutron.agent.linux import dhcp
from neutron.common import constants as n_constants
from neutron.common import rpc as n_rpc
from neutron.db import address_scope_db
from neutron.db import l3_db
from neutron.db import models_v2
from neutron.extensions import portbindings
@ -41,12 +42,18 @@ from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import cache
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import model
LOG = log.getLogger(__name__)
# REVISIT(rkukura): Consider making these APIC name constants
# configurable, although changing them would break an existing
# deployment.
AP_NAME = 'NeutronAP'
ANY_FILTER_NAME = 'AnyFilter'
ANY_FILTER_ENTRY_NAME = 'AnyFilterEntry'
DEFAULT_VRF_NAME = 'DefaultVRF'
UNROUTED_VRF_NAME = 'UnroutedVRF'
COMMON_TENANT_NAME = 'common'
ROUTER_SUBJECT_NAME = 'route'
AGENT_TYPE_DVS = 'DVS agent'
VIF_TYPE_DVS = 'dvs'
PROMISCUOUS_TYPES = [n_constants.DEVICE_OWNER_DHCP,
@ -54,6 +61,8 @@ PROMISCUOUS_TYPES = [n_constants.DEVICE_OWNER_DHCP,
class ApicMechanismDriver(api_plus.MechanismDriver):
# TODO(rkukura): Derivations of tenant_aname throughout need to
# take sharing into account.
def __init__(self):
LOG.info(_LI("APIC AIM MD __init__"))
@ -221,33 +230,89 @@ class ApicMechanismDriver(api_plus.MechanismDriver):
self.name_mapper.delete_apic_name(session, id)
def extend_network_dict(self, session, base_model, result):
def extend_network_dict(self, session, network_db, result):
LOG.debug("APIC AIM MD extending dict for network: %s", result)
tenant_id = result['tenant_id']
# REVISIT(rkukura): Consider optimizing this method by
# persisting the network->VRF relationship.
sync_state = cisco_apic.SYNC_SYNCED
dist_names = {}
aim_ctx = aim_context.AimContext(session)
tenant_id = network_db.tenant_id
tenant_aname = self.name_mapper.tenant(session, tenant_id)
LOG.debug("Mapped tenant_id %(id)s to %(aname)s",
{'id': tenant_id, 'aname': tenant_aname})
id = result['id']
name = result['name']
id = network_db.id
name = network_db.name
aname = self.name_mapper.network(session, id, name)
LOG.debug("Mapped network_id %(id)s with name %(name)s to %(aname)s",
{'id': id, 'name': name, 'aname': aname})
bd = aim_resource.BridgeDomain(tenant_name=tenant_aname,
name=aname)
aim_bd = aim_resource.BridgeDomain(tenant_name=tenant_aname,
name=aname)
dist_names[cisco_apic.BD] = aim_bd.dn
sync_state = self._merge_status(aim_ctx, sync_state, aim_bd)
epg = aim_resource.EndpointGroup(tenant_name=tenant_aname,
app_profile_name=AP_NAME,
name=aname)
aim_epg = aim_resource.EndpointGroup(tenant_name=tenant_aname,
app_profile_name=AP_NAME,
name=aname)
dist_names[cisco_apic.EPG] = aim_epg.dn
sync_state = self._merge_status(aim_ctx, sync_state, aim_epg)
aim_ctx = aim_context.AimContext(session)
sync_state = cisco_apic.SYNC_SYNCED
sync_state = self._merge_status(aim_ctx, sync_state, bd)
sync_state = self._merge_status(aim_ctx, sync_state, epg)
result[cisco_apic.DIST_NAMES] = {cisco_apic.BD: bd.dn,
cisco_apic.EPG: epg.dn}
# See if this network is interfaced to any routers.
rp = (session.query(l3_db.RouterPort).
join(models_v2.Port).
filter(models_v2.Port.network_id == network_db.id,
l3_db.RouterPort.port_type ==
n_constants.DEVICE_OWNER_ROUTER_INTF).first())
if rp:
# A network is constrained to only one subnetpool per
# address family. To support both single and dual stack,
# use the IPv4 address scope's VRF if it exists, and
# otherwise use the IPv6 address scope's VRF. For dual
# stack, the plan is for identity NAT to move IPv6 traffic
# from the IPv4 address scope's VRF to the IPv6 address
# scope's VRF.
#
# REVISIT(rkukura): Ignore subnets that are not attached
# to any router, or maybe just do a query joining
# RouterPorts, Ports, Subnets, SubnetPools and
# AddressScopes.
pool_dbs = {subnet.subnetpool for subnet in network_db.subnets
if subnet.subnetpool}
scope_id = None
for pool_db in pool_dbs:
if pool_db.ip_version == 4:
scope_id = pool_db.address_scope_id
break
elif pool_db.ip_version == 6:
scope_id = pool_db.address_scope_id
if scope_id:
scope_db = self._scope_by_id(session, scope_id)
scope_tenant_id = scope_db.tenant_id
vrf_tenant_aname = self.name_mapper.tenant(session,
scope_tenant_id)
LOG.debug("Mapped tenant_id %(id)s to %(aname)s",
{'id': scope_tenant_id, 'aname': vrf_tenant_aname})
vrf_aname = self.name_mapper.address_scope(session, scope_id)
LOG.debug("Mapped address_scope_id %(id)s to %(aname)s",
{'id': scope_id, 'aname': vrf_aname})
else:
vrf_tenant_aname = tenant_aname # REVISIT(rkukura)
vrf_aname = DEFAULT_VRF_NAME
else:
vrf_tenant_aname = COMMON_TENANT_NAME
vrf_aname = UNROUTED_VRF_NAME
aim_vrf = aim_resource.VRF(tenant_name=vrf_tenant_aname,
name=vrf_aname)
dist_names[cisco_apic.VRF] = aim_vrf.dn
sync_state = self._merge_status(aim_ctx, sync_state, aim_vrf)
result[cisco_apic.DIST_NAMES] = dist_names
result[cisco_apic.SYNC_STATE] = sync_state
def create_subnet_precommit(self, context):
@ -262,10 +327,10 @@ class ApicMechanismDriver(api_plus.MechanismDriver):
session = context._plugin_context.session
network_id = context.current['network_id']
network = self.plugin.get_network(context._plugin_context,
network_id)
network_db = self.plugin._get_network(context._plugin_context,
network_id)
network_tenant_id = network['tenant_id']
network_tenant_id = network_db.tenant_id
network_tenant_aname = self.name_mapper.tenant(session,
network_tenant_id)
LOG.debug("Mapped tenant_id %(id)s to %(aname)s",
@ -298,15 +363,16 @@ class ApicMechanismDriver(api_plus.MechanismDriver):
# Neutron subnets are unmapped from AIM Subnets as they are
# removed from routers.
def extend_subnet_dict(self, session, base_model, result):
def extend_subnet_dict(self, session, subnet_db, result):
LOG.debug("APIC AIM MD extending dict for subnet: %s", result)
sync_state = cisco_apic.SYNC_SYNCED
dist_names = {}
prefix_len = result['cidr'].split('/')[1]
aim_ctx = aim_context.AimContext(session)
network_id = result['network_id']
prefix_len = subnet_db.cidr.split('/')[1]
network_id = subnet_db.network_id
network_db = (session.query(models_v2.Network).
filter_by(id=network_id).
one())
@ -321,7 +387,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver):
LOG.debug("Mapped network_id %(id)s to %(aname)s",
{'id': network_id, 'aname': network_aname})
subnet_id = result['id']
subnet_id = subnet_db.id
for intf in self._subnet_router_interfaces(session, subnet_id):
gw_ip = intf.ip_address
gw_ip_mask = gw_ip + '/' + prefix_len
@ -334,6 +400,9 @@ class ApicMechanismDriver(api_plus.MechanismDriver):
result[cisco_apic.DIST_NAMES] = dist_names
result[cisco_apic.SYNC_STATE] = sync_state
# TODO(rkukura): Implement update_subnetpool_precommit to handle
# changing subnetpool's address_scope_id.
def create_address_scope_precommit(self, context):
LOG.debug("APIC AIM MD creating address scope: %s", context.current)
@ -416,28 +485,31 @@ class ApicMechanismDriver(api_plus.MechanismDriver):
self.name_mapper.delete_apic_name(session, id)
def extend_address_scope_dict(self, session, base_model, result):
def extend_address_scope_dict(self, session, scope_db, result):
LOG.debug("APIC AIM MD extending dict for address scope: %s", result)
tenant_id = result['tenant_id']
sync_state = cisco_apic.SYNC_SYNCED
dist_names = {}
aim_ctx = aim_context.AimContext(session)
tenant_id = scope_db.tenant_id
tenant_aname = self.name_mapper.tenant(session, tenant_id)
LOG.debug("Mapped tenant_id %(id)s to %(aname)s",
{'id': tenant_id, 'aname': tenant_aname})
id = result['id']
name = result['name']
id = scope_db.id
name = scope_db.name
aname = self.name_mapper.address_scope(session, id, name)
LOG.debug("Mapped address_scope_id %(id)s with name %(name)s to "
"%(aname)s",
{'id': id, 'name': name, 'aname': aname})
vrf = aim_resource.VRF(tenant_name=tenant_aname,
name=aname)
aim_vrf = aim_resource.VRF(tenant_name=tenant_aname,
name=aname)
dist_names[cisco_apic.VRF] = aim_vrf.dn
sync_state = self._merge_status(aim_ctx, sync_state, aim_vrf)
aim_ctx = aim_context.AimContext(session)
sync_state = cisco_apic.SYNC_SYNCED
sync_state = self._merge_status(aim_ctx, sync_state, vrf)
result[cisco_apic.DIST_NAMES] = {cisco_apic.VRF: vrf.dn}
result[cisco_apic.DIST_NAMES] = dist_names
result[cisco_apic.SYNC_STATE] = sync_state
def create_router(self, context, current):
@ -544,37 +616,91 @@ class ApicMechanismDriver(api_plus.MechanismDriver):
self.name_mapper.delete_apic_name(session, id)
def extend_router_dict(self, session, base_model, result):
def extend_router_dict(self, session, router_db, result):
LOG.debug("APIC AIM MD extending dict for router: %s", result)
tenant_id = result['tenant_id']
# REVISIT(rkukura): Consider optimizing this method by
# persisting the router->VRF relationship.
sync_state = cisco_apic.SYNC_SYNCED
dist_names = {}
aim_ctx = aim_context.AimContext(session)
tenant_id = router_db.tenant_id
tenant_aname = self.name_mapper.tenant(session, tenant_id)
LOG.debug("Mapped tenant_id %(id)s to %(aname)s",
{'id': tenant_id, 'aname': tenant_aname})
id = result['id']
name = result['name']
id = router_db.id
name = router_db.name
aname = self.name_mapper.router(session, id, name)
LOG.debug("Mapped router_id %(id)s with name %(name)s to "
"%(aname)s",
{'id': id, 'name': name, 'aname': aname})
contract = aim_resource.Contract(tenant_name=tenant_aname,
name=aname)
aim_contract = aim_resource.Contract(tenant_name=tenant_aname,
name=aname)
dist_names[cisco_apic_l3.CONTRACT] = aim_contract.dn
sync_state = self._merge_status(aim_ctx, sync_state, aim_contract)
subject = aim_resource.ContractSubject(tenant_name=tenant_aname,
contract_name=aname,
name=ROUTER_SUBJECT_NAME)
aim_subject = aim_resource.ContractSubject(tenant_name=tenant_aname,
contract_name=aname,
name=ROUTER_SUBJECT_NAME)
dist_names[cisco_apic_l3.CONTRACT_SUBJECT] = aim_subject.dn
sync_state = self._merge_status(aim_ctx, sync_state, aim_subject)
# TODO(rkukura): Also include VRF and interfaced Subnets.
# See if this router has any attached interfaces.
if (session.query(l3_db.RouterPort).
filter(l3_db.RouterPort.router_id == id,
l3_db.RouterPort.port_type ==
n_constants.DEVICE_OWNER_ROUTER_INTF).
count()):
# Find this router's IPv4 address scope if it has one, or
# else its IPv6 address scope.
scope_id = None
for pool_db in (session.query(models_v2.SubnetPool).
join(models_v2.Subnet,
models_v2.Subnet.subnetpool_id ==
models_v2.SubnetPool.id).
join(models_v2.IPAllocation).
join(models_v2.Port).
join(l3_db.RouterPort).
filter(l3_db.RouterPort.router_id == id,
l3_db.RouterPort.port_type ==
n_constants.DEVICE_OWNER_ROUTER_INTF).
distinct()):
LOG.debug("got pool_db: %s", pool_db)
if pool_db.ip_version == 4:
scope_id = pool_db.address_scope_id
break
elif pool_db.ip_version == 6:
scope_id = pool_db.address_scope_id
if scope_id:
scope_db = self._scope_by_id(session, scope_id)
scope_tenant_id = scope_db.tenant_id
vrf_tenant_aname = self.name_mapper.tenant(session,
scope_tenant_id)
LOG.debug("Mapped tenant_id %(id)s to %(aname)s",
{'id': scope_tenant_id, 'aname': vrf_tenant_aname})
aim_ctx = aim_context.AimContext(session)
sync_state = cisco_apic.SYNC_SYNCED
sync_state = self._merge_status(aim_ctx, sync_state, contract)
sync_state = self._merge_status(aim_ctx, sync_state, subject)
result[cisco_apic.DIST_NAMES] = {cisco_apic_l3.CONTRACT: contract.dn,
cisco_apic_l3.CONTRACT_SUBJECT:
subject.dn}
vrf_aname = self.name_mapper.address_scope(session, scope_id)
LOG.debug("Mapped address_scope_id %(id)s to %(aname)s",
{'id': scope_id, 'aname': vrf_aname})
else:
vrf_tenant_aname = tenant_aname # REVISIT(rkukura)
vrf_aname = DEFAULT_VRF_NAME
aim_vrf = aim_resource.VRF(tenant_name=vrf_tenant_aname,
name=vrf_aname)
dist_names[cisco_apic_l3.VRF] = aim_vrf.dn
sync_state = self._merge_status(aim_ctx, sync_state, aim_vrf)
# TODO(rkukura): Also include interfaced Subnets. This
# probably means splitting the above SubnetPool query to first
# query for subnets, add the corresponding AIM subnet, then
# check the subnet's subnetpool's address scope.
result[cisco_apic.DIST_NAMES] = dist_names
result[cisco_apic.SYNC_STATE] = sync_state
def add_router_interface(self, context, router, port, subnets):
@ -585,9 +711,9 @@ class ApicMechanismDriver(api_plus.MechanismDriver):
session = context.session
network_id = port['network_id']
network = self.plugin.get_network(context, network_id)
network_db = self.plugin._get_network(context, network_id)
network_tenant_id = network['tenant_id']
network_tenant_id = network_db.tenant_id
network_tenant_aname = self.name_mapper.tenant(session,
network_tenant_id)
LOG.debug("Mapped tenant_id %(id)s to %(aname)s",
@ -641,8 +767,42 @@ class ApicMechanismDriver(api_plus.MechanismDriver):
aim_epg = self.aim.update(aim_ctx, aim_epg,
provided_contract_names=contracts)
# TODO(rkukura): Implement selecting/setting VRF and
# validating topology.
# Find routers with interfaces to this network. The current
# interface is not included, because the RouterPort has not
# yet been added to the DB session.
router_ids = [r[0] for r in
session.query(l3_db.RouterPort.router_id).
join(models_v2.Port).
filter(models_v2.Port.network_id == network_id,
l3_db.RouterPort.port_type ==
n_constants.DEVICE_OWNER_ROUTER_INTF).distinct()]
# TODO(rkukura): Validate topology.
if not router_ids:
# Enable routing for BD and set VRF.
subnetpool_id = subnets[0]['subnetpool_id']
if subnetpool_id:
subnetpool_db = self.plugin._get_subnetpool(context,
subnetpool_id)
address_scope_id = subnetpool_db.address_scope_id
if address_scope_id:
vrf_aname = self.name_mapper.address_scope(
session, address_scope_id)
LOG.debug("Mapped address_scope_id %(id)s to %(aname)s",
{'id': id, 'aname': vrf_aname})
else:
vrf_aname = self._get_default_vrf(
aim_ctx, router_tenant_aname).name
else:
vrf_aname = self._get_default_vrf(
aim_ctx, router_tenant_aname).name
aim_bd = aim_resource.BridgeDomain(
tenant_name=network_tenant_aname, name=network_aname)
aim_bd = self.aim.update(aim_ctx, aim_bd, enable_routing=True,
vrf_name=vrf_aname)
def remove_router_interface(self, context, router_id, port_db, subnets):
LOG.debug("APIC AIM MD removing subnets %(subnets)s from router "
@ -652,9 +812,9 @@ class ApicMechanismDriver(api_plus.MechanismDriver):
session = context.session
network_id = port_db.network_id
network = self.plugin.get_network(context, network_id)
network_db = self.plugin._get_network(context, network_id)
network_tenant_id = network['tenant_id']
network_tenant_id = network_db.tenant_id
network_tenant_aname = self.name_mapper.tenant(session,
network_tenant_id)
LOG.debug("Mapped tenant_id %(id)s to %(aname)s",
@ -718,8 +878,15 @@ class ApicMechanismDriver(api_plus.MechanismDriver):
aim_epg = self.aim.update(aim_ctx, aim_epg,
provided_contract_names=contracts)
# TODO(rkukura): If network no longer connected to any router,
# make the network's BD unrouted.
# If network is no longer connected to any router, make the
# network's BD unrouted.
if not router_ids:
vrf = self._get_unrouted_vrf(aim_ctx)
aim_bd = aim_resource.BridgeDomain(
tenant_name=network_tenant_aname, name=network_aname)
aim_bd = self.aim.update(aim_ctx, aim_bd, enable_routing=False,
vrf_name=vrf.name)
def bind_port(self, context):
LOG.debug("Attempting to bind port %(port)s on network %(net)s",
@ -974,6 +1141,11 @@ class ApicMechanismDriver(api_plus.MechanismDriver):
n_constants.DEVICE_OWNER_ROUTER_INTF
))
def _scope_by_id(self, session, scope_id):
return (session.query(address_scope_db.AddressScope).
filter_by(id=scope_id).
one())
def _get_common_tenant(self, aim_ctx):
attrs = aim_resource.Tenant(name=COMMON_TENANT_NAME,
display_name='Common Tenant')
@ -987,9 +1159,19 @@ class ApicMechanismDriver(api_plus.MechanismDriver):
tenant = self._get_common_tenant(aim_ctx)
attrs = aim_resource.VRF(tenant_name=tenant.name,
name=UNROUTED_VRF_NAME,
display_name='Common Unrouted Context')
display_name='Common Unrouted VRF')
vrf = self.aim.get(aim_ctx, attrs)
if not vrf:
LOG.info(_LI("Creating common unrouted VRF"))
vrf = self.aim.create(aim_ctx, attrs)
return vrf
def _get_default_vrf(self, aim_ctx, tenant_aname):
attrs = aim_resource.VRF(tenant_name=tenant_aname,
name=DEFAULT_VRF_NAME,
display_name='Default Routed VRF')
vrf = self.aim.get(aim_ctx, attrs)
if not vrf:
LOG.info(_LI("Creating default VRF for %s"), tenant_aname)
vrf = self.aim.create(aim_ctx, attrs)
return vrf

View File

@ -119,7 +119,6 @@ class ApicAimTestCase(test_address_scope.AddressScopeTestCase,
self._app_profile_name = 'NeutronAP'
self._tenant_name = self._map_name({'id': 'test-tenant',
'name': 'TestTenantName'})
self._unrouted_vrf_name = 'UnroutedVRF'
def tearDown(self):
engine = db_api.get_engine()
@ -262,18 +261,7 @@ class TestAimMapping(ApicAimTestCase):
self.assertIsInstance(dist_names, dict)
self.assertNotIn(key, dist_names)
def _check_unrouted_vrf(self):
aim_tenant = self._get_tenant('common')
self.assertEqual('common', aim_tenant.name)
self.assertEqual("Common Tenant", aim_tenant.display_name)
aim_vrf = self._get_vrf(self._unrouted_vrf_name, 'common')
self.assertEqual('common', aim_vrf.tenant_name)
self.assertEqual(self._unrouted_vrf_name, aim_vrf.name)
self.assertEqual('Common Unrouted Context', aim_vrf.display_name)
self.assertEqual('enforced', aim_vrf.policy_enforcement_pref)
def _check_network(self, net, orig_net=None, routers=None):
def _check_network(self, net, orig_net=None, routers=None, scope=None):
orig_net = orig_net or net
# REVISIT(rkukura): Check AIM Tenant here?
@ -283,14 +271,34 @@ class TestAimMapping(ApicAimTestCase):
router_anames = [self._map_name(router) for router in routers or []]
if routers:
if scope:
vrf_aname = self._map_name(scope)
vrf_dname = scope['name']
vrf_tenant_aname = self._tenant_name
vrf_tenant_dname = None
else:
vrf_aname = 'DefaultVRF'
vrf_dname = 'Default Routed VRF'
vrf_tenant_aname = self._tenant_name
vrf_tenant_dname = None
else:
vrf_aname = 'UnroutedVRF'
vrf_dname = 'Common Unrouted VRF'
vrf_tenant_aname = 'common'
vrf_tenant_dname = 'Common Tenant'
aim_bd = self._get_bd(aname,
self._tenant_name)
self.assertEqual(self._tenant_name, aim_bd.tenant_name)
self.assertEqual(aname, aim_bd.name)
self.assertEqual(net['name'], aim_bd.display_name)
self.assertEqual(self._unrouted_vrf_name, aim_bd.vrf_name)
self.assertEqual(vrf_aname, aim_bd.vrf_name)
self.assertTrue(aim_bd.enable_arp_flood)
self.assertFalse(aim_bd.enable_routing) # TODO(rkukura)
if routers:
self.assertTrue(aim_bd.enable_routing)
else:
self.assertFalse(aim_bd.enable_routing)
self.assertTrue(aim_bd.limit_ip_learn_to_subnets)
self.assertEqual('proxy', aim_bd.l2_unknown_unicast_mode)
self.assertEqual('garp', aim_bd.ep_move_detect_mode)
@ -310,8 +318,17 @@ class TestAimMapping(ApicAimTestCase):
# physical_domain_names?
self._check_dn(net, aim_epg, 'EndpointGroup')
if not routers:
self._check_unrouted_vrf()
aim_tenant = self._get_tenant(vrf_tenant_aname)
self.assertEqual(vrf_tenant_aname, aim_tenant.name)
self.assertEqual(vrf_tenant_dname, aim_tenant.display_name)
aim_vrf = self._get_vrf(vrf_aname,
vrf_tenant_aname)
self.assertEqual(vrf_tenant_aname, aim_vrf.tenant_name)
self.assertEqual(vrf_aname, aim_vrf.name)
self.assertEqual(vrf_dname, aim_vrf.display_name)
self.assertEqual('enforced', aim_vrf.policy_enforcement_pref)
self._check_dn(net, aim_vrf, 'VRF')
def _check_network_deleted(self, net):
aname = self._map_name(net)
@ -359,30 +376,31 @@ class TestAimMapping(ApicAimTestCase):
# are in this subnet.
pass
def _check_address_scope(self, a_s, orig_a_s=None):
orig_a_s = orig_a_s or a_s
def _check_address_scope(self, scope, orig_scope=None):
orig_scope = orig_scope or scope
# REVISIT(rkukura): Check AIM Tenant here?
self.assertEqual('test-tenant', a_s['tenant_id'])
self.assertEqual('test-tenant', scope['tenant_id'])
aname = self._map_name(orig_a_s)
aname = self._map_name(orig_scope)
aim_vrf = self._get_vrf(aname,
self._tenant_name)
self.assertEqual(self._tenant_name, aim_vrf.tenant_name)
self.assertEqual(aname, aim_vrf.name)
self.assertEqual(a_s['name'], aim_vrf.display_name)
self.assertEqual(scope['name'], aim_vrf.display_name)
self.assertEqual('enforced', aim_vrf.policy_enforcement_pref)
self._check_dn(a_s, aim_vrf, 'VRF')
self._check_dn(scope, aim_vrf, 'VRF')
def _check_address_scope_deleted(self, a_s):
aname = self._map_name(a_s)
def _check_address_scope_deleted(self, scope):
aname = self._map_name(scope)
self._get_vrf(aname,
self._tenant_name,
should_exist=False)
def _check_router(self, router, orig_router=None):
def _check_router(self, router, orig_router=None, active=False,
scope=None):
orig_router = orig_router or router
# REVISIT(rkukura): Check AIM Tenant here?
@ -408,7 +426,34 @@ class TestAimMapping(ApicAimTestCase):
self._check_dn(router, aim_subject, 'ContractSubject')
self._check_any_filter()
# REVISIT(rkukura): Anything else to check?
# TODO(rkukura): Once AIM Subnets are exposed on router, pass
# in expected_gw_ips and use instead of this active flag.
if active:
if scope:
vrf_aname = self._map_name(scope)
vrf_dname = scope['name']
vrf_tenant_aname = self._tenant_name
vrf_tenant_dname = None
else:
vrf_aname = 'DefaultVRF'
vrf_dname = 'Default Routed VRF'
vrf_tenant_aname = self._tenant_name
vrf_tenant_dname = None
aim_tenant = self._get_tenant(vrf_tenant_aname)
self.assertEqual(vrf_tenant_aname, aim_tenant.name)
self.assertEqual(vrf_tenant_dname, aim_tenant.display_name)
aim_vrf = self._get_vrf(vrf_aname,
vrf_tenant_aname)
self.assertEqual(vrf_tenant_aname, aim_vrf.tenant_name)
self.assertEqual(vrf_aname, aim_vrf.name)
self.assertEqual(vrf_dname, aim_vrf.display_name)
self.assertEqual('enforced', aim_vrf.policy_enforcement_pref)
self._check_dn(router, aim_vrf, 'VRF')
else:
self._check_no_dn(router, 'VRF')
def _check_router_deleted(self, router):
aname = self._map_name(router)
@ -418,8 +463,6 @@ class TestAimMapping(ApicAimTestCase):
self._get_subject('route', aname, self._tenant_name,
should_exist=False)
# REVISIT(rkukura): Anything else to check?
def _check_any_filter(self):
aim_filter = self._get_filter('AnyFilter', self._tenant_name)
self.assertEqual(self._tenant_name, aim_filter.tenant_name)
@ -491,23 +534,23 @@ class TestAimMapping(ApicAimTestCase):
def test_address_scope_lifecycle(self):
# Test create.
orig_a_s = self._make_address_scope(
orig_scope = self._make_address_scope(
self.fmt, 4, name='as1')['address_scope']
a_s_id = orig_a_s['id']
self._check_address_scope(orig_a_s)
scope_id = orig_scope['id']
self._check_address_scope(orig_scope)
# Test show.
a_s = self._show('address-scopes', a_s_id)['address_scope']
self._check_address_scope(a_s)
scope = self._show('address-scopes', scope_id)['address_scope']
self._check_address_scope(scope)
# Test update.
data = {'address_scope': {'name': 'newnameforaddressscope'}}
a_s = self._update('address-scopes', a_s_id, data)['address_scope']
self._check_address_scope(a_s, orig_a_s)
scope = self._update('address-scopes', scope_id, data)['address_scope']
self._check_address_scope(scope, orig_scope)
# Test delete.
self._delete('address-scopes', a_s_id)
self._check_address_scope_deleted(orig_a_s)
self._delete('address-scopes', scope_id)
self._check_address_scope_deleted(orig_scope)
def test_router_lifecycle(self):
# Test create.
@ -560,7 +603,10 @@ class TestAimMapping(ApicAimTestCase):
info = self.l3_plugin.add_router_interface(
context.get_admin_context(), router_id, {'subnet_id': subnet1_id})
self.assertIn(subnet1_id, info['subnet_ids'])
# REVISIT(rkukura): Get and check router?
# Check router.
router = self._show('routers', router_id)['router']
self._check_router(router, active=True)
# Check network.
net = self._show('networks', net_id)['network']
@ -586,7 +632,10 @@ class TestAimMapping(ApicAimTestCase):
info = self.l3_plugin.add_router_interface(
context.get_admin_context(), router_id, {'port_id': port2_id})
self.assertIn(subnet2_id, info['subnet_ids'])
# REVISIT(rkukura): Get and check router?
# Check router.
router = self._show('routers', router_id)['router']
self._check_router(router, active=True)
# Check network.
net = self._show('networks', net_id)['network']
@ -604,7 +653,10 @@ class TestAimMapping(ApicAimTestCase):
info = self.l3_plugin.remove_router_interface(
context.get_admin_context(), router_id, {'subnet_id': subnet1_id})
self.assertIn(subnet1_id, info['subnet_ids'])
# REVISIT(rkukura): Get and check router?
# Check router.
router = self._show('routers', router_id)['router']
self._check_router(router, active=True)
# Check network.
net = self._show('networks', net_id)['network']
@ -622,7 +674,10 @@ class TestAimMapping(ApicAimTestCase):
info = self.l3_plugin.remove_router_interface(
context.get_admin_context(), router_id, {'port_id': port2_id})
self.assertIn(subnet2_id, info['subnet_ids'])
# REVISIT(rkukura): Get and check router?
# Check router.
router = self._show('routers', router_id)['router']
self._check_router(router)
# Check network.
net = self._show('networks', net_id)['network']
@ -636,28 +691,148 @@ class TestAimMapping(ApicAimTestCase):
subnet = self._show('subnets', subnet2_id)['subnet']
self._check_subnet(subnet, net, [], [gw2_ip])
# def test_create_subnet_with_address_scope(self):
# net_resp = self._make_network(self.fmt, 'net1', True)
# name = self._map_name(net_resp['network'])
# self._check(name, vrf_name='UnroutedVRF')
def test_router_interface_with_address_scope(self):
# REVISIT(rkukura): Currently follows same workflow as above,
# but might be sufficient to test with a single subnet with
# its CIDR allocated from the subnet pool.
# a_s = self._make_address_scope(self.fmt, 4, name='as1')
# a_s_id = a_s['address_scope']['id']
# # vrf_name = self._map_name(a_s['address_scope'])
# Create address scope.
scope = self._make_address_scope(
self.fmt, 4, name='as1')['address_scope']
scope_id = scope['id']
self._check_address_scope(scope)
# sp = self._make_subnetpool(self.fmt, ['10.0.0.0/8'], name='sp1',
# tenant_id='test-tenant', # REVISIT
# address_scope_id=a_s_id,
# default_prefixlen=24)
# sp_id = sp['subnetpool']['id']
# Create subnet pool.
pool = self._make_subnetpool(self.fmt, ['10.0.0.0/8'], name='sp1',
tenant_id='test-tenant', # REVISIT
address_scope_id=scope_id,
default_prefixlen=24)['subnetpool']
pool_id = pool['id']
# self._make_subnet(self.fmt, net_resp, None, None,
# subnetpool_id=sp_id)
# # REVISIT(rkukura): Should the address_scopes VRF be used
# # immediately, or not until connected to a router?
# #
# # self._check(name, vrf_name=vrf_name)
# self._check(name, vrf_name='UnroutedVRF')
# Create router.
router = self._make_router(
self.fmt, 'test-tenant', 'router1')['router']
router_id = router['id']
self._check_router(router, scope=scope)
# Create network.
net_resp = self._make_network(self.fmt, 'net1', True)
net = net_resp['network']
net_id = net['id']
self._check_network(net)
# Create subnet1.
gw1_ip = '10.0.1.1'
subnet = self._make_subnet(
self.fmt, net_resp, gw1_ip, '10.0.1.0/24',
subnetpool_id=pool_id)['subnet']
subnet1_id = subnet['id']
self._check_subnet(subnet, net, [], [gw1_ip])
# Create subnet2.
gw2_ip = '10.0.2.1'
subnet = self._make_subnet(
self.fmt, net_resp, gw2_ip, '10.0.2.0/24',
subnetpool_id=pool_id)['subnet']
subnet2_id = subnet['id']
self._check_subnet(subnet, net, [], [gw2_ip])
# Add subnet1 to router by subnet.
info = self.l3_plugin.add_router_interface(
context.get_admin_context(), router_id, {'subnet_id': subnet1_id})
self.assertIn(subnet1_id, info['subnet_ids'])
# Check router.
router = self._show('routers', router_id)['router']
self._check_router(router, active=True, scope=scope)
# Check network.
net = self._show('networks', net_id)['network']
self._check_network(net, routers=[router], scope=scope)
# Check subnet1.
subnet = self._show('subnets', subnet1_id)['subnet']
self._check_subnet(subnet, net, [gw1_ip], [])
# Check subnet2.
subnet = self._show('subnets', subnet2_id)['subnet']
self._check_subnet(subnet, net, [], [gw2_ip])
# Test subnet update.
data = {'subnet': {'name': 'newnameforsubnet'}}
subnet = self._update('subnets', subnet1_id, data)['subnet']
self._check_subnet(subnet, net, [gw1_ip], [])
# Add subnet2 to router by port.
fixed_ips = [{'subnet_id': subnet2_id, 'ip_address': gw2_ip}]
port = self._make_port(self.fmt, net_id, fixed_ips=fixed_ips)['port']
port2_id = port['id']
info = self.l3_plugin.add_router_interface(
context.get_admin_context(), router_id, {'port_id': port2_id})
self.assertIn(subnet2_id, info['subnet_ids'])
# Check router.
router = self._show('routers', router_id)['router']
self._check_router(router, active=True, scope=scope)
# Check network.
net = self._show('networks', net_id)['network']
self._check_network(net, routers=[router], scope=scope)
# Check subnet1.
subnet = self._show('subnets', subnet1_id)['subnet']
self._check_subnet(subnet, net, [gw1_ip], [])
# Check subnet2.
subnet = self._show('subnets', subnet2_id)['subnet']
self._check_subnet(subnet, net, [gw2_ip], [])
# Remove subnet1 from router by subnet.
info = self.l3_plugin.remove_router_interface(
context.get_admin_context(), router_id, {'subnet_id': subnet1_id})
self.assertIn(subnet1_id, info['subnet_ids'])
# Check router.
router = self._show('routers', router_id)['router']
self._check_router(router, active=True, scope=scope)
# Check network.
net = self._show('networks', net_id)['network']
self._check_network(net, routers=[router], scope=scope)
# Check subnet1.
subnet = self._show('subnets', subnet1_id)['subnet']
self._check_subnet(subnet, net, [], [gw1_ip])
# Check subnet2.
subnet = self._show('subnets', subnet2_id)['subnet']
self._check_subnet(subnet, net, [gw2_ip], [])
# Remove subnet2 from router by port.
info = self.l3_plugin.remove_router_interface(
context.get_admin_context(), router_id, {'port_id': port2_id})
self.assertIn(subnet2_id, info['subnet_ids'])
# Check router.
router = self._show('routers', router_id)['router']
self._check_router(router, scope=scope)
# Check network.
net = self._show('networks', net_id)['network']
self._check_network(net)
# Check subnet1.
subnet = self._show('subnets', subnet1_id)['subnet']
self._check_subnet(subnet, net, [], [gw1_ip])
# Check subnet2.
subnet = self._show('subnets', subnet2_id)['subnet']
self._check_subnet(subnet, net, [], [gw2_ip])
# TODO(rkukura): Test IPv6 and dual stack router interfaces.
# TODO(rkukura): Test synchronization_state attribute for each AIM
# resource.
class TestPortBinding(ApicAimTestCase):