Bulk extension support for subnets
Add support for bulk ext handling for subnets in AIM. Changes to the extension driver and the mechanism driver for bulk extension processing for subnets. Change-Id: I3b2ba7ba87499f7fed49d5d0532c9ca8a089d19c
This commit is contained in:
parent
474d056175
commit
dfdf67a51b
|
@ -206,6 +206,21 @@ class ApicExtensionDriver(api_plus.ExtensionDriver,
|
|||
else:
|
||||
LOG.exception("APIC AIM extend_subnet_dict failed")
|
||||
|
||||
def extend_subnet_dict_bulk(self, session, results):
|
||||
try:
|
||||
self._md.extend_subnet_dict_bulk(session, results)
|
||||
for result, subnet_db in results:
|
||||
res_dict = self.get_subnet_extn_db(session, subnet_db['id'])
|
||||
result[cisco_apic.SNAT_HOST_POOL] = (
|
||||
res_dict.get(cisco_apic.SNAT_HOST_POOL, False))
|
||||
except Exception as e:
|
||||
with excutils.save_and_reraise_exception():
|
||||
if db_api.is_retriable(e):
|
||||
LOG.debug("APIC AIM extend_subnet_dict_bulk got retriable "
|
||||
"exception: %s", type(e))
|
||||
else:
|
||||
LOG.exception("APIC AIM extend_subnet_dict_bulk failed")
|
||||
|
||||
def process_create_subnet(self, plugin_context, data, result):
|
||||
res_dict = {cisco_apic.SNAT_HOST_POOL:
|
||||
data.get(cisco_apic.SNAT_HOST_POOL, False)}
|
||||
|
|
|
@ -1104,47 +1104,71 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
|
|||
# Non-external neutron subnets are unmapped from AIM Subnets as
|
||||
# they are removed from routers.
|
||||
|
||||
def extend_subnet_dict_bulk(self, session, results):
|
||||
LOG.debug("APIC AIM MD Bulk extending dict for subnet: %s", results)
|
||||
|
||||
aim_ctx = aim_context.AimContext(session)
|
||||
aim_resources = []
|
||||
res_dict_by_aim_res_dn = {}
|
||||
|
||||
net_ids = []
|
||||
for result in results:
|
||||
net_ids.append(result[0]['network_id'])
|
||||
net_ids = list(set(net_ids))
|
||||
|
||||
# TODO(sridar): Baked query - evaluate across branches,
|
||||
# with in_
|
||||
networks_db = (session.query(models_v2.Network).
|
||||
filter(models_v2.Network.id.in_(net_ids)).all())
|
||||
net_map = {network['id']: network for network in networks_db}
|
||||
|
||||
for res_dict, subnet_db in results:
|
||||
res_dict[cisco_apic.SYNC_STATE] = cisco_apic.SYNC_NOT_APPLICABLE
|
||||
res_dict[cisco_apic.DIST_NAMES] = {}
|
||||
dist_names = res_dict[cisco_apic.DIST_NAMES]
|
||||
|
||||
network_db = net_map.get(res_dict['network_id'], None)
|
||||
# TODO(sridar): Not sure if this can happen - validate.
|
||||
if not network_db:
|
||||
LOG.warning("Network not found in extend_subnet_dict_bulk "
|
||||
"for %s", subnet_db)
|
||||
continue
|
||||
|
||||
if network_db.external is not None:
|
||||
l3out, ext_net, ns = self._get_aim_nat_strategy_db(session,
|
||||
network_db)
|
||||
if ext_net:
|
||||
sub = ns.get_subnet(aim_ctx, l3out,
|
||||
self._subnet_to_gw_ip_mask(subnet_db))
|
||||
if sub:
|
||||
dist_names[cisco_apic.SUBNET] = sub.dn
|
||||
res_dict_by_aim_res_dn[sub.dn] = res_dict
|
||||
aim_resources.append(sub)
|
||||
elif network_db.aim_mapping and network_db.aim_mapping.bd_name:
|
||||
bd = self._get_network_bd(network_db.aim_mapping)
|
||||
|
||||
for gw_ip, router_id in self._subnet_router_ips(session,
|
||||
subnet_db.id):
|
||||
sn = self._map_subnet(subnet_db, gw_ip, bd)
|
||||
dist_names[gw_ip] = sn.dn
|
||||
res_dict_by_aim_res_dn[sn.dn] = res_dict
|
||||
aim_resources.append(sn)
|
||||
|
||||
for status in self.aim.get_statuses(aim_ctx, aim_resources):
|
||||
res_dict = res_dict_by_aim_res_dn.get(status.resource_dn, {})
|
||||
res_dict[cisco_apic.SYNC_STATE] = self._merge_status(
|
||||
aim_ctx,
|
||||
res_dict.get(cisco_apic.SYNC_STATE,
|
||||
cisco_apic.SYNC_NOT_APPLICABLE),
|
||||
None, status=status)
|
||||
|
||||
def extend_subnet_dict(self, session, subnet_db, result):
|
||||
if result.get(api_plus.BULK_EXTENDED):
|
||||
return
|
||||
|
||||
LOG.debug("APIC AIM MD extending dict for subnet: %s", result)
|
||||
|
||||
sync_state = cisco_apic.SYNC_NOT_APPLICABLE
|
||||
dist_names = {}
|
||||
aim_ctx = aim_context.AimContext(session)
|
||||
|
||||
query = BAKERY(lambda s: s.query(
|
||||
models_v2.Network))
|
||||
query += lambda q: q.filter_by(
|
||||
id=sa.bindparam('network_id'))
|
||||
network_db = query(session).params(
|
||||
network_id=subnet_db.network_id).one_or_none()
|
||||
|
||||
if not network_db:
|
||||
LOG.warning("Network not found in extend_subnet_dict for %s",
|
||||
result)
|
||||
return
|
||||
|
||||
if network_db.external is not None:
|
||||
l3out, ext_net, ns = self._get_aim_nat_strategy_db(session,
|
||||
network_db)
|
||||
if ext_net:
|
||||
sub = ns.get_subnet(aim_ctx, l3out,
|
||||
self._subnet_to_gw_ip_mask(subnet_db))
|
||||
if sub:
|
||||
dist_names[cisco_apic.SUBNET] = sub.dn
|
||||
sync_state = self._merge_status(aim_ctx, sync_state, sub)
|
||||
elif network_db.aim_mapping and network_db.aim_mapping.bd_name:
|
||||
bd = self._get_network_bd(network_db.aim_mapping)
|
||||
|
||||
for gw_ip, router_id in self._subnet_router_ips(session,
|
||||
subnet_db.id):
|
||||
sn = self._map_subnet(subnet_db, gw_ip, bd)
|
||||
dist_names[gw_ip] = sn.dn
|
||||
sync_state = self._merge_status(aim_ctx, sync_state, sn)
|
||||
|
||||
result[cisco_apic.DIST_NAMES] = dist_names
|
||||
result[cisco_apic.SYNC_STATE] = sync_state
|
||||
self.extend_subnet_dict_bulk(session, [(result, subnet_db)])
|
||||
|
||||
def update_subnetpool_precommit(self, context):
|
||||
current = context.current
|
||||
|
|
|
@ -604,3 +604,74 @@ class Ml2PlusPlugin(ml2_plugin.Ml2Plugin,
|
|||
self.type_manager.extend_networks_dict_provider(context, net_data)
|
||||
nets = self._filter_nets_provider(context, net_data, filters)
|
||||
return [db_utils.resource_fields(net, fields) for net in nets]
|
||||
|
||||
def _make_subnets_dict(self, subnets_db, fields=None, context=None):
|
||||
subnets = []
|
||||
for subnet_db in subnets_db:
|
||||
res = {'id': subnet_db['id'],
|
||||
'name': subnet_db['name'],
|
||||
'tenant_id': subnet_db['tenant_id'],
|
||||
'network_id': subnet_db['network_id'],
|
||||
'ip_version': subnet_db['ip_version'],
|
||||
'subnetpool_id': subnet_db['subnetpool_id'],
|
||||
'enable_dhcp': subnet_db['enable_dhcp'],
|
||||
'ipv6_ra_mode': subnet_db['ipv6_ra_mode'],
|
||||
'ipv6_address_mode': subnet_db['ipv6_address_mode'],
|
||||
}
|
||||
res['gateway_ip'] = str(
|
||||
subnet_db['gateway_ip']) if subnet_db['gateway_ip'] else (
|
||||
None)
|
||||
res['cidr'] = subnet_db['cidr']
|
||||
res['allocation_pools'] = [{'start': pool['first_ip'],
|
||||
'end': pool['last_ip']}
|
||||
for pool in
|
||||
subnet_db['allocation_pools']]
|
||||
res['host_routes'] = [{'destination': route['destination'],
|
||||
'nexthop': route['nexthop']}
|
||||
for route in subnet_db['routes']]
|
||||
res['dns_nameservers'] = [dns['address']
|
||||
for dns in subnet_db['dns_nameservers']]
|
||||
|
||||
# The shared attribute for a subnet is the same
|
||||
# as its parent network
|
||||
res['shared'] = self._is_network_shared(context,
|
||||
subnet_db.rbac_entries)
|
||||
|
||||
subnets.append((res, subnet_db))
|
||||
|
||||
resource_extend.apply_funcs(subnet_def.COLLECTION_NAME + '_BULK',
|
||||
subnets, None)
|
||||
|
||||
result = []
|
||||
for res, subnet_db in subnets:
|
||||
res[api_plus.BULK_EXTENDED] = True
|
||||
resource_extend.apply_funcs(subnet_def.COLLECTION_NAME,
|
||||
res, subnet_db)
|
||||
res.pop(api_plus.BULK_EXTENDED, None)
|
||||
result.append(db_utils.resource_fields(res, []))
|
||||
|
||||
return result
|
||||
|
||||
@db_api.retry_if_session_inactive()
|
||||
def get_subnets(self, context, filters=None, fields=None,
|
||||
sorts=None, limit=None, marker=None,
|
||||
page_reverse=False):
|
||||
|
||||
with db_api.context_manager.writer.using(context):
|
||||
marker_obj = self._get_marker_obj(context, 'subnet', limit, marker)
|
||||
|
||||
# REVIST(sridar): We need to rethink if we want to support
|
||||
# OVO. For now we put our head in the sand but this needs a
|
||||
# revisit.
|
||||
# Also, older branches are a slight variation, in line with
|
||||
# upstream code.
|
||||
subnets_db = self._get_collection(context, models_v2.Subnet,
|
||||
filters=filters,
|
||||
dict_func=None,
|
||||
sorts=sorts,
|
||||
limit=limit,
|
||||
marker_obj=marker_obj,
|
||||
page_reverse=page_reverse)
|
||||
|
||||
subnets = self._make_subnets_dict(subnets_db, fields, context)
|
||||
return [self._fields(subnet, fields) for subnet in subnets]
|
||||
|
|
|
@ -3449,6 +3449,51 @@ class TestSyncState(ApicAimTestCase):
|
|||
with mock.patch('aim.aim_manager.AimManager.get_status', get_status):
|
||||
self._test_router_interface_subnet('error')
|
||||
|
||||
def _test_router_interface_multiple_subnets(self, expected_state,
|
||||
subnet2_attach=True):
|
||||
net_resp = self._make_network(self.fmt, 'net1', True)
|
||||
|
||||
subnet1 = self._make_subnet(
|
||||
self.fmt, net_resp, '10.0.0.1', '10.0.0.0/24')['subnet']
|
||||
|
||||
subnet2 = self._make_subnet(
|
||||
self.fmt, net_resp, '10.0.1.1', '10.0.1.0/24')['subnet']
|
||||
sub_exp_sync_state = {}
|
||||
sub_exp_sync_state[subnet1['id']] = expected_state
|
||||
sub_exp_sync_state[subnet2['id']] = (expected_state if subnet2_attach
|
||||
else 'N/A')
|
||||
|
||||
router = self._make_router(self.fmt, 'test-tenant', 'router1')[
|
||||
'router']
|
||||
|
||||
self.l3_plugin.add_router_interface(
|
||||
n_context.get_admin_context(), router['id'],
|
||||
{'subnet_id': subnet1['id']})
|
||||
if subnet2_attach:
|
||||
self.l3_plugin.add_router_interface(
|
||||
n_context.get_admin_context(), router['id'],
|
||||
{'subnet_id': subnet2['id']})
|
||||
|
||||
router = self._show('routers', router['id'])['router']
|
||||
self.assertEqual(expected_state,
|
||||
router['apic:synchronization_state'])
|
||||
|
||||
subnets = self._list('subnets')
|
||||
for subnet in subnets['subnets']:
|
||||
self.assertEqual(sub_exp_sync_state[subnet['id']],
|
||||
subnet['apic:synchronization_state'])
|
||||
|
||||
def test_router_interface_multiple_subnets_synced(self):
|
||||
with mock.patch('aim.aim_manager.AimManager.get_status',
|
||||
TestSyncState._get_synced_status):
|
||||
self._test_router_interface_multiple_subnets('synced')
|
||||
|
||||
def test_router_interface_multiple_subnets_one_attached(self):
|
||||
with mock.patch('aim.aim_manager.AimManager.get_status',
|
||||
TestSyncState._get_synced_status):
|
||||
self._test_router_interface_multiple_subnets('synced',
|
||||
subnet2_attach=False)
|
||||
|
||||
def _test_external_network(self, expected_state, dn=None, msg=None):
|
||||
net = self._make_ext_network('net1', dn=dn)
|
||||
self.assertEqual(expected_state, net['apic:synchronization_state'],
|
||||
|
@ -3516,8 +3561,10 @@ class TestSyncState(ApicAimTestCase):
|
|||
|
||||
with mock.patch('aim.aim_manager.AimManager.get_status',
|
||||
TestSyncState._get_synced_status):
|
||||
self._test_external_subnet('synced',
|
||||
dn=self.dn_t1_l1_n1)
|
||||
with mock.patch('aim.aim_manager.AimManager.get_statuses',
|
||||
TestSyncState._mocked_get_statuses):
|
||||
self._test_external_subnet('synced',
|
||||
dn=self.dn_t1_l1_n1)
|
||||
|
||||
for expected_status, status_func in [
|
||||
('build', TestSyncState._get_pending_status_for_type),
|
||||
|
@ -3526,8 +3573,10 @@ class TestSyncState(ApicAimTestCase):
|
|||
return status_func(context, resource, aim_resource.Subnet)
|
||||
with mock.patch('aim.aim_manager.AimManager.get_status',
|
||||
get_status):
|
||||
self._test_external_subnet(expected_status,
|
||||
dn=self.dn_t1_l1_n1)
|
||||
with mock.patch('aim.aim_manager.AimManager.get_statuses',
|
||||
TestSyncState._mocked_get_statuses):
|
||||
self._test_external_subnet(expected_status,
|
||||
dn=self.dn_t1_l1_n1)
|
||||
|
||||
def test_unmanaged_external_subnet(self):
|
||||
self._test_external_subnet('N/A')
|
||||
|
|
Loading…
Reference in New Issue