Use raw SQL statements to speed up get_gbp_details() call

1. Also added UTs to support Neutron workflow as previous
UTs only cover GBP workflow.
2. My measurement on fab6 shows that the whole get_gbp_details()
now would only take 27 milli-seconds for 1 EP with FIP association.
This is without counting the time nova API would take to get the
VM name though of course. Its a huge improvement compared to before
which would roughly take 2-4 seconds even without the nova API time.

Change-Id: Ie7e43fd711291a05426697743f85572285fdbcc5
This commit is contained in:
Kent Wu 2018-11-27 18:06:32 -08:00
parent 8342c6cdf7
commit 2210d27f48
5 changed files with 958 additions and 133 deletions

View File

@ -53,6 +53,10 @@ apic_opts = [
"plugin, formatted as a dictionary mapping Neutron external "
"network IDs (UUIDs) to ACI external network distinguished "
"names."),
cfg.BoolOpt('enable_raw_sql_for_device_rpc',
default=False,
help=("This will use those raw SQL statements to speed "
"up the calculation of the EP file.")),
]

View File

@ -256,6 +256,8 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
self.enable_iptables_firewall = (cfg.CONF.ml2_apic_aim.
enable_iptables_firewall)
self.l3_domain_dn = cfg.CONF.ml2_apic_aim.l3_domain_dn
self.enable_raw_sql_for_device_rpc = (cfg.CONF.ml2_apic_aim.
enable_raw_sql_for_device_rpc)
local_api.QUEUE_OUT_OF_PROCESS_NOTIFICATIONS = True
self._ensure_static_resources()
trunk_driver.register()
@ -2560,8 +2562,9 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
return vrfs.values()
# Used by policy driver.
def _get_address_scope_ids_for_vrf(self, session, vrf):
mappings = self._get_address_scope_mappings_for_vrf(session, vrf)
def _get_address_scope_ids_for_vrf(self, session, vrf, mappings=None):
mappings = mappings or self._get_address_scope_mappings_for_vrf(
session, vrf)
return [mapping.scope_id for mapping in mappings]
def _get_network_ids_for_vrf(self, session, vrf):
@ -3407,34 +3410,60 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
'prefixlen': <prefix_length_of_subnet>}
"""
session = plugin_context.session
query = BAKERY(lambda s: s.query(
models_v2.Port))
query += lambda q: q.filter(
models_v2.Port.network_id == sa.bindparam('network_id'),
models_v2.Port.device_id == sa.bindparam('device_id'),
models_v2.Port.device_owner == DEVICE_OWNER_SNAT_PORT)
snat_port = query(session).params(
network_id=ext_network['id'],
device_id=host_or_vrf).first()
snat_ip = None
if not snat_port or snat_port.fixed_ips is None:
# allocate SNAT port
extn_db_sn = extension_db.SubnetExtensionDb
if self.enable_raw_sql_for_device_rpc:
snat_port_query = ("SELECT id FROM ports "
"WHERE network_id = '" + ext_network['id'] + "' "
"AND device_id = '" + host_or_vrf + "' AND "
"device_owner = '" + DEVICE_OWNER_SNAT_PORT + "'")
snat_port = session.execute(snat_port_query).first()
if snat_port:
snat_port = dict(snat_port)
ip_query = ("SELECT ip_address, subnet_id FROM "
"ipallocations WHERE "
"port_id = '" + snat_port['id'] + "'")
ip_result = session.execute(ip_query)
snat_port['fixed_ips'] = []
for ip in ip_result:
snat_port['fixed_ips'].append(
{'ip_address': ip['ip_address'],
'subnet_id': ip['subnet_id']})
else:
query = BAKERY(lambda s: s.query(
models_v2.Subnet))
query += lambda q: q.join(
extn_db_sn,
extn_db_sn.subnet_id == models_v2.Subnet.id)
models_v2.Port))
query += lambda q: q.filter(
models_v2.Subnet.network_id == sa.bindparam('network_id'))
query += lambda q: q.filter(
extn_db_sn.snat_host_pool.is_(True))
snat_subnets = query(session).params(
network_id=ext_network['id']).all()
models_v2.Port.network_id == sa.bindparam('network_id'),
models_v2.Port.device_id == sa.bindparam('device_id'),
models_v2.Port.device_owner == DEVICE_OWNER_SNAT_PORT)
snat_port = query(session).params(
network_id=ext_network['id'],
device_id=host_or_vrf).first()
snat_ip = None
if not snat_port or snat_port['fixed_ips'] is None:
# allocate SNAT port
if self.enable_raw_sql_for_device_rpc:
snat_subnet_query = ("SELECT id, cidr, gateway_ip FROM "
"subnets JOIN "
"apic_aim_subnet_extensions AS "
"subnet_ext_1 ON "
"id = subnet_ext_1.subnet_id "
"WHERE network_id = '" +
ext_network['id'] + "' AND "
"subnet_ext_1.snat_host_pool = 1")
snat_subnets = session.execute(snat_subnet_query)
snat_subnets = list(snat_subnets)
else:
extn_db_sn = extension_db.SubnetExtensionDb
query = BAKERY(lambda s: s.query(
models_v2.Subnet))
query += lambda q: q.join(
extn_db_sn,
extn_db_sn.subnet_id == models_v2.Subnet.id)
query += lambda q: q.filter(
models_v2.Subnet.network_id == sa.bindparam('network_id'))
query += lambda q: q.filter(
extn_db_sn.snat_host_pool.is_(True))
snat_subnets = query(session).params(
network_id=ext_network['id']).all()
if not snat_subnets:
LOG.info('No subnet in external network %s is marked as '
'SNAT-pool',
@ -3460,15 +3489,20 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
'for SNAT IP allocation',
snat_subnet['id'])
else:
snat_ip = snat_port.fixed_ips[0].ip_address
query = BAKERY(lambda s: s.query(
models_v2.Subnet))
query += lambda q: q.filter(
models_v2.Subnet.id == sa.bindparam('subnet_id'))
snat_subnet = query(session).params(
subnet_id=snat_port.fixed_ips[0].subnet_id).one()
snat_ip = snat_port['fixed_ips'][0]['ip_address']
if self.enable_raw_sql_for_device_rpc:
snat_subnet_query = ("SELECT cidr, gateway_ip FROM subnets "
"WHERE id = '" +
snat_port['fixed_ips'][0]['subnet_id'] +
"'")
snat_subnet = session.execute(snat_subnet_query).first()
else:
query = BAKERY(lambda s: s.query(
models_v2.Subnet))
query += lambda q: q.filter(
models_v2.Subnet.id == sa.bindparam('subnet_id'))
snat_subnet = query(session).params(
subnet_id=snat_port.fixed_ips[0].subnet_id).one()
if snat_ip:
return {'host_snat_ip': snat_ip,
'gateway_ip': snat_subnet['gateway_ip'],

View File

@ -1806,8 +1806,11 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
session = context._plugin_context.session
return aim_context.AimContext(session)
def _is_port_promiscuous(self, plugin_context, port):
pt = self._port_id_to_pt(plugin_context, port['id'])
def _is_port_promiscuous(self, plugin_context, port, is_gbp=True):
if is_gbp:
pt = self._port_id_to_pt(plugin_context, port['id'])
else:
pt = None
if (pt and pt.get('cluster_id') and
pt.get('cluster_id') != pt['id']):
master = self._get_policy_target(plugin_context, pt['cluster_id'])
@ -1848,25 +1851,34 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
"port %s", port['id'])
return epg
def _get_subnet_details(self, plugin_context, port, details):
def _get_subnet_details(self, plugin_context, port, details, is_gbp=True):
# L2P might not exist for a pure Neutron port
l2p = self._network_id_to_l2p(plugin_context, port['network_id'])
if is_gbp:
l2p = self._network_id_to_l2p(plugin_context, port['network_id'])
else:
l2p = None
# TODO(ivar): support shadow network
# if not l2p and self._ptg_needs_shadow_network(context, ptg):
# l2p = self._get_l2_policy(context._plugin_context,
# ptg['l2_policy_id'])
subnets = self._get_subnets(
plugin_context,
filters={'id': [ip['subnet_id'] for ip in port['fixed_ips']]})
if 'subnets' in details['_cache']:
subnets = details['_cache']['subnets']
else:
subnets = self._get_subnets(
plugin_context,
filters={'id': [ip['subnet_id'] for ip in port['fixed_ips']]})
for subnet in subnets:
dhcp_ports = {}
subnet_dhcp_ips = set()
for dhcp_port in self._get_ports(
if 'dhcp_ports' in details['_cache']:
dhcp_ports_list = details['_cache']['dhcp_ports']
else:
dhcp_ports_list = self._get_ports(
plugin_context,
filters={
'network_id': [subnet['network_id']],
'device_owner': [n_constants.DEVICE_OWNER_DHCP]}):
filters={'network_id': [subnet['network_id']],
'device_owner': [n_constants.DEVICE_OWNER_DHCP]})
for dhcp_port in dhcp_ports_list:
dhcp_ips = set([x['ip_address'] for x in dhcp_port['fixed_ips']
if x['subnet_id'] == subnet['id']])
dhcp_ports.setdefault(dhcp_port['mac_address'], list(dhcp_ips))
@ -1921,8 +1933,11 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
def _get_aap_details(self, plugin_context, port, details):
aaps = port['allowed_address_pairs']
# Set the correct address ownership for this port
owned_addresses = self._get_owned_addresses(
plugin_context, port['id'])
if 'owned_addresses' in details['_cache']:
owned_addresses = details['_cache']['owned_addresses']
else:
owned_addresses = self._get_owned_addresses(
plugin_context, port['id'])
for allowed in aaps:
if allowed['ip_address'] in owned_addresses:
# Signal the agent that this particular address is active
@ -1940,6 +1955,10 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
details):
session = plugin_context.session
result = []
if 'address_scope' in details['_cache']:
mappings = details['_cache']['address_scope']
else:
mappings = None
# get all subnets of the specified VRF
with session.begin(subtransactions=True):
# Find VRF's address_scope first
@ -1947,14 +1966,19 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
self.aim_mech_driver._get_address_scope_ids_for_vrf(
session,
aim_resource.VRF(tenant_name=vrf_tenant_name,
name=vrf_name)))
name=vrf_name),
mappings))
if address_scope_ids:
for address_scope_id in address_scope_ids:
if 'subnetpools' in details['_cache']:
subnetpools = details['_cache']['subnetpools']
else:
subnetpools = self._get_subnetpools(
plugin_context,
filters={'address_scope_id': [address_scope_id]})
for pool in subnetpools:
result.extend(pool['prefixes'])
filters={'address_scope_id': address_scope_ids})
for pool in subnetpools:
result.extend(pool['prefixes'])
elif 'vrf_subnets' in details['_cache']:
result = details['_cache']['vrf_subnets']
else:
aim_ctx = aim_context.AimContext(db_session=session)
if vrf_tenant_name != md.COMMON_TENANT_NAME:
@ -1974,26 +1998,30 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
# already
bds = [x for x in bds if x.tenant_name in valid_tenants]
# Retrieve subnets from BDs
net_ids = []
for bd in bds:
try:
net_ids.append(self.name_mapper.reverse_network(
session, bd.name))
except md_exc.InternalError:
# Check if BD maps to an external network
ext_ids = self.aim_mech_driver.get_network_ids_for_bd(
session, bd)
net_ids.extend(ext_ids)
# If no external network is found, we ignore reverse
# mapping failures because there may be APIC BDs in the
# concerned VRF that Neutron is unaware of. This is
# especially true for VRFs in the common tenant.
net_ids = self._get_net_ids_from_bds(session, bds)
if net_ids:
subnets = self._get_subnets(plugin_context,
{'network_id': net_ids})
result = [x['cidr'] for x in subnets]
return result
def _get_net_ids_from_bds(self, session, bds):
net_ids = []
for bd in bds:
try:
net_ids.append(self.name_mapper.reverse_network(
session, bd.name))
except md_exc.InternalError:
# Check if BD maps to an external network
ext_ids = self.aim_mech_driver.get_network_ids_for_bd(
session, bd)
net_ids.extend(ext_ids)
# If no external network is found, we ignore reverse
# mapping failures because there may be APIC BDs in the
# concerned VRF that Neutron is unaware of. This is
# especially true for VRFs in the common tenant.
return net_ids
def _get_segmentation_labels(self, plugin_context, port, details):
pt = self._port_id_to_pt(plugin_context, port['id'])
if self.apic_segmentation_label_driver and pt and (
@ -2011,49 +2039,62 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
# Handle them depending on whether there is a FIP on that
# network.
ext_nets = []
port_sn = set([x['subnet_id'] for x in port['fixed_ips']])
router_intf_ports = self._get_ports(
plugin_context,
filters={'device_owner': [n_constants.DEVICE_OWNER_ROUTER_INTF],
'fixed_ips': {'subnet_id': port_sn}})
if router_intf_ports:
routers = self._get_routers(
if 'ext_nets' in details['_cache']:
ext_nets = details['_cache']['ext_nets']
else:
port_sn = set([x['subnet_id'] for x in port['fixed_ips']])
router_intf_ports = self._get_ports(
plugin_context,
filters={'id': [x['device_id']
for x in router_intf_ports]})
ext_nets = self._get_networks(
plugin_context,
filters={'id': [r['external_gateway_info']['network_id']
for r in routers
if r.get('external_gateway_info')]})
filters={'device_owner':
[n_constants.DEVICE_OWNER_ROUTER_INTF],
'fixed_ips': {'subnet_id': port_sn}})
if router_intf_ports:
routers = self._get_routers(
plugin_context,
filters={'id': [x['device_id']
for x in router_intf_ports]})
ext_nets = self._get_networks(
plugin_context,
filters={'id': [r['external_gateway_info']['network_id']
for r in routers
if r.get('external_gateway_info')]})
if not ext_nets:
return fips, ipms, host_snat_ips
# Handle FIPs of owned addresses - find other ports in the
# network whose address is owned by this port.
# If those ports have FIPs, then steal them.
fips_filter = [port['id']]
active_addrs = [a['ip_address']
for a in details['allowed_address_pairs']
if a.get('active')]
if active_addrs:
others = self._get_ports(
plugin_context,
filters={'network_id': [port['network_id']],
'fixed_ips': {'ip_address': active_addrs}})
fips_filter.extend([p['id'] for p in others])
fips = self._get_fips(plugin_context,
filters={'port_id': fips_filter})
if 'fips' in details['_cache']:
fips = details['_cache']['fips']
else:
fips_filter = [port['id']]
active_addrs = [a['ip_address']
for a in details['allowed_address_pairs']
if a.get('active')]
if active_addrs:
others = self._get_ports(
plugin_context,
filters={'network_id': [port['network_id']],
'fixed_ips': {'ip_address': active_addrs}})
fips_filter.extend([p['id'] for p in others])
fips = self._get_fips(plugin_context,
filters={'port_id': fips_filter})
for ext_net in ext_nets:
dn = ext_net.get(cisco_apic.DIST_NAMES, {}).get(
cisco_apic.EXTERNAL_NETWORK)
ext_net_epg_dn = ext_net.get(cisco_apic.DIST_NAMES, {}).get(
cisco_apic.EPG)
if 'ext_nets' in details['_cache']:
dn = ext_net.external_network_dn
ext_net_epg_dn = self.aim_mech_driver._get_network_epg(
ext_net).dn
nat_type = ext_net.nat_type
else:
dn = ext_net.get(cisco_apic.DIST_NAMES, {}).get(
cisco_apic.EXTERNAL_NETWORK)
ext_net_epg_dn = ext_net.get(cisco_apic.DIST_NAMES, {}).get(
cisco_apic.EPG)
nat_type = ext_net.get(cisco_apic.NAT_TYPE)
if not dn or not ext_net_epg_dn:
continue
if 'distributed' != ext_net.get(cisco_apic.NAT_TYPE):
if 'distributed' != nat_type:
continue
# TODO(amitbose) Handle per-tenant NAT EPG
@ -2437,7 +2478,7 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
query(context._plugin_context.session).params(
pr_ids=pr_ids).all())]
def _get_port_mtu(self, context, port):
def _get_port_mtu(self, context, port, details):
if self.advertise_mtu:
for dhcp_opt in port.get('extra_dhcp_opts'):
if (dhcp_opt.get('opt_name') == 'interface-mtu' or
@ -2447,7 +2488,10 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
return int(dhcp_opt['opt_value'])
except ValueError:
continue
network = self._get_network(context, port['network_id'])
if 'network' in details['_cache']:
network = details['_cache']['network']
else:
network = self._get_network(context, port['network_id'])
return network.get('mtu')
return None
@ -2455,8 +2499,11 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
network = self._get_network(context, port['network_id'])
return network.get('dns_domain')
def _get_nested_domain(self, context, port):
network = self._get_network(context, port['network_id'])
def _get_nested_domain(self, context, port, details):
if 'network' in details['_cache']:
network = details['_cache']['network']
else:
network = self._get_network(context, port['network_id'])
return (network.get('apic:nested_domain_name'),
network.get('apic:nested_domain_type'),
network.get('apic:nested_domain_infra_vlan'),

View File

@ -16,7 +16,6 @@ from sqlalchemy.ext import baked
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.db import api as db_api
from neutron.db import db_base_plugin_common
from neutron.db.models import securitygroup as sg_models
from neutron.objects import base as objects_base
@ -26,6 +25,8 @@ from neutron_lib.api.definitions import portbindings
from opflexagent import rpc as o_rpc
from oslo_log import log
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import (
mechanism_driver as md)
from gbpservice.neutron.services.grouppolicy.drivers.cisco.apic import (
nova_client as nclient)
from gbpservice.neutron.services.grouppolicy.drivers.cisco.apic import (
@ -112,13 +113,17 @@ class AIMMappingRPCMixin(ha_ip_db.HAIPOwnerDbMixin):
def _request_endpoint_details(self, context, **kwargs):
request = kwargs.get('request')
host = kwargs.get('host')
gbp_details = self._get_gbp_details(context, request, host)
if hasattr(context, 'neutron_details'):
neutron_details = context.neutron_details
else:
neutron_details = ml2_rpc.RpcCallbacks(None,
None).get_device_details(context, **request)
result = {'device': request['device'],
'timestamp': request['timestamp'],
'request_id': request['request_id'],
'gbp_details': self._get_gbp_details(context, request,
host),
'neutron_details': ml2_rpc.RpcCallbacks(
None, None).get_device_details(context, **request),
'gbp_details': gbp_details,
'neutron_details': neutron_details,
'trunk_details': self._get_trunk_details(context,
request, host)}
return result
@ -181,6 +186,12 @@ class AIMMappingRPCMixin(ha_ip_db.HAIPOwnerDbMixin):
# - self._get_dns_domain(context, port)
@db_api.retry_if_session_inactive()
def _get_gbp_details(self, context, request, host):
if self.aim_mech_driver.enable_raw_sql_for_device_rpc:
return self._get_gbp_details_new(context, request, host)
else:
return self._get_gbp_details_old(context, request, host)
def _get_gbp_details_old(self, context, request, host):
with context.session.begin(subtransactions=True):
device = request.get('device')
@ -228,20 +239,20 @@ class AIMMappingRPCMixin(ha_ip_db.HAIPOwnerDbMixin):
'compute:') and port['device_id']:
vm = nclient.NovaClient().get_server(port['device_id'])
details['vm-name'] = vm.name if vm else port['device_id']
mtu = self._get_port_mtu(context, port)
details['_cache'] = {}
mtu = self._get_port_mtu(context, port, details)
if mtu:
details['interface_mtu'] = mtu
details['dns_domain'] = self._get_dns_domain(context, port)
if port.get('security_groups'):
self._add_security_group_details(context, port, details)
# NOTE(ivar): having these methods cleanly separated actually makes
# things less efficient by requiring lots of calls duplication.
# we could alleviate this by passing down a cache that stores
# commonly requested objects (like EPGs). 'details' itself could
# be used for such caching.
details['_cache'] = {}
if port.get('security_groups'):
self._add_security_group_details(context, port, details)
vrf = self._get_port_vrf(context, port, details)
details['l3_policy_id'] = '%s %s' % (vrf.tenant_name, vrf.name)
self._add_subnet_details(context, port, details)
@ -251,8 +262,438 @@ class AIMMappingRPCMixin(ha_ip_db.HAIPOwnerDbMixin):
self._add_extra_details(context, port, details)
self._add_segmentation_label_details(context, port, details)
self._set_dhcp_lease_time(details)
details.pop('_cache', None)
self._add_nested_domain_details(context, port, details)
details.pop('_cache', None)
LOG.debug("Details for port %s : %s", port['id'], details)
return details
def _compose_in_filter_str(self, obj_list):
in_str = str(tuple(obj_list))
# Remove the ',' at the end otherwise MySQL will complain
if in_str[-1] == ')' and in_str[-2] == ',':
in_str = in_str[0:-2] + in_str[-1]
return in_str
def _build_up_details_cache(self, session, details, port, network):
ha_addr_query = ("SELECT ha_ip_address FROM "
"apic_ml2_ha_ipaddress_to_port_owner WHERE "
"apic_ml2_ha_ipaddress_to_port_owner.port_id = '"
+ port['id'] + "'")
ha_addr_result = session.execute(ha_addr_query)
owned_addresses = sorted([x[0] for x in ha_addr_result])
details['_cache']['owned_addresses'] = owned_addresses
if port.get('security_groups'):
# Remove the encoding presentation of the string
# otherwise MySQL will complain
sg_list = [str(r) for r in port['security_groups']]
in_str = self._compose_in_filter_str(sg_list)
sg_query = ("SELECT id, project_id FROM securitygroups WHERE "
"id in " + in_str)
sg_result = session.execute(sg_query)
details['_cache']['security_groups'] = sg_result
# Get the subnet info
subnets = []
subnet_ids = [str(ip['subnet_id']) for ip in port['fixed_ips']]
if subnet_ids:
subnet_in_str = self._compose_in_filter_str(subnet_ids)
subnet_query = ("SELECT * FROM subnets WHERE "
"id in " + subnet_in_str)
subnet_result = session.execute(subnet_query)
# Build up the ORM relationship manually
for subnet in subnet_result:
subnet_dict = dict(subnet)
dns_query = ("SELECT address FROM dnsnameservers WHERE "
"subnet_id = '" + subnet['id'] + "'")
dns_result = session.execute(dns_query)
subnet_dict['dns_nameservers'] = []
for dns in dns_result:
subnet_dict['dns_nameservers'].append(dns['address'])
route_query = ("SELECT destination, nexthop FROM "
"subnetroutes WHERE "
"subnet_id = '" + subnet['id'] + "'")
route_result = session.execute(route_query)
subnet_dict['host_routes'] = []
for route in route_result:
subnet_dict['host_routes'].append(
{'destination': route['destination'],
'nexthop': route['nexthop']})
subnets.append(subnet_dict)
else:
LOG.error("subnet_ids is empty. fixed_ips: %s, "
"port: %s", port['fixed_ips'], port['id'])
details['_cache']['subnets'] = subnets
# Get DHCP ports
dhcp_query = ("SELECT id, mac_address FROM ports WHERE "
"ports.network_id = '" + network['id'] + "'" + " AND "
"ports.device_owner = 'network:dhcp'")
dhcp_result = session.execute(dhcp_query)
# Build up the ORM relationship manually
dhcp_ports = []
for dhcp_port in dhcp_result:
dhcp_port_dict = dict(dhcp_port)
ip_query = ("SELECT ip_address, subnet_id FROM "
"ipallocations WHERE "
"port_id = '" + dhcp_port['id'] + "'")
ip_result = session.execute(ip_query)
dhcp_port_dict['fixed_ips'] = []
for ip in ip_result:
dhcp_port_dict['fixed_ips'].append(
{'ip_address': ip['ip_address'],
'subnet_id': ip['subnet_id']})
dhcp_ports.append(dhcp_port_dict)
details['_cache']['dhcp_ports'] = dhcp_ports
# Get address_scope, subnetpools and vrf_subnets
address_scope_query = (
"SELECT scope_id FROM apic_aim_address_scope_mappings WHERE "
"vrf_name = '" + network['vrf_name'] + "'" + " AND "
"vrf_tenant_name = '" + network['vrf_tenant_name'] + "'")
as_result = session.execute(address_scope_query)
subnetpools = []
if as_result.rowcount > 0 or as_result.rowcount == -1:
subnetpools_query = (
"SELECT subnetpools.id as id FROM subnetpools JOIN "
"address_scopes AS address_scopes_1 ON "
"address_scopes_1.id = subnetpools.address_scope_id JOIN "
"apic_aim_address_scope_mappings AS aim_as_mappings_1 ON "
"aim_as_mappings_1.scope_id = address_scopes_1.id WHERE "
"vrf_name = '" + network['vrf_name'] + "'" + " AND "
"vrf_tenant_name = '" + network['vrf_tenant_name'] +
"'")
subnetpools_res = session.execute(subnetpools_query)
# Build up the ORM relationship manually
for subnetpool in subnetpools_res:
subnetpool_dict = dict(subnetpool)
prefix_query = (
"SELECT cidr FROM subnetpoolprefixes WHERE "
"subnetpool_id = '" + subnetpool['id'] + "'")
prefix_result = session.execute(prefix_query)
subnetpool_dict['prefixes'] = []
for prefix in prefix_result:
subnetpool_dict['prefixes'].append(prefix['cidr'])
subnetpools.append(subnetpool_dict)
# Unfortunately, there is no relationship in the ORM between
# a VRF and BridgeDomainin -- the BDs reference the VRF by name,
# which doesn't include the ACI tenant. When the VRF lives in the
# common tenant, the only way we can deduce the BDs belonging to
# it is by eliminating all the BDs that are not in the common
# tenant, and have a VRF with the same name in their tenant.
vrf_subnets = []
if as_result.rowcount == 0 or as_result.rowcount == -1:
if network['vrf_tenant_name'] == md.COMMON_TENANT_NAME:
all_vrfs_bds_query = (
"SELECT name, tenant_name FROM aim_bridge_domains "
"WHERE vrf_name = '" + network['vrf_name'] + "'")
all_vrfs_bds_result = session.execute(all_vrfs_bds_query)
all_vrfs_query = (
"SELECT tenant_name FROM aim_vrfs WHERE "
"name = '" + network['vrf_name'] + "'")
all_vrfs_result = session.execute(all_vrfs_query)
bd_tenants = set(
[x.tenant_name for x in all_vrfs_bds_result])
vrf_tenants = set(
[x.tenant_name for x in all_vrfs_result
if x.tenant_name != md.COMMON_TENANT_NAME])
valid_tenants = bd_tenants - vrf_tenants
aim_bd_result = [x for x in all_vrfs_bds_result
if x.tenant_name in valid_tenants]
else:
aim_bd_query = (
"SELECT name, tenant_name FROM aim_bridge_domains "
"WHERE vrf_name = '" + network['vrf_name'] + "'" +
" AND tenant_name = '" +
network['vrf_tenant_name'] + "'")
aim_bd_result = session.execute(aim_bd_query)
net_ids = self._get_net_ids_from_bds(session,
aim_bd_result)
if net_ids:
net_id_list = [str(r) for r in net_ids]
in_str = self._compose_in_filter_str(net_id_list)
vrf_subnet_query = ("SELECT cidr FROM subnets WHERE "
"network_id in " + in_str)
vrf_subnet_result = session.execute(vrf_subnet_query)
vrf_subnets = [x['cidr'] for x in vrf_subnet_result]
details['_cache']['address_scope'] = as_result
details['_cache']['subnetpools'] = subnetpools
details['_cache']['vrf_subnets'] = vrf_subnets
# Get all the router interface ports that are on the same
# subnets as the fixed IPs for the port resource. Then
# use the router IDs from those ports to look for the
# external networks connected to those routers
if subnet_ids:
router_ports_query = (
"SELECT device_id FROM ports JOIN "
"ipallocations AS ipallocations_1 ON "
"ipallocations_1.port_id = ports.id WHERE "
"device_owner = 'network:router_interface' AND "
"ipallocations_1.subnet_id in " + subnet_in_str)
router_ports_result = session.execute(router_ports_query)
routers = [str(p.device_id) for p in router_ports_result]
else:
routers = []
ext_nets = []
if routers:
in_str = self._compose_in_filter_str(routers)
ext_net_query = (
"SELECT externalnetworks.network_id as id, "
"networks_1.project_id as tenant_id,"
"net_map_1.epg_name, net_map_1.epg_tenant_name, "
"net_map_1.epg_app_profile_name, net_ext_1.nat_type, "
"net_ext_1.external_network_dn FROM "
"externalnetworks JOIN networks AS networks_1 ON "
"networks_1.id = externalnetworks.network_id JOIN "
"apic_aim_network_mappings AS net_map_1 ON "
"net_map_1.network_id = externalnetworks.network_id JOIN "
"apic_aim_network_extensions AS net_ext_1 ON "
"net_ext_1.network_id = externalnetworks.network_id JOIN "
"ports AS ports_1 ON "
"ports_1.network_id = externalnetworks.network_id JOIN "
"routerports AS routerports_1 ON "
"routerports_1.port_id = ports_1.id WHERE "
"routerports_1.router_id in " + in_str)
ext_nets = session.execute(ext_net_query)
ext_nets = list(ext_nets)
details['_cache']['ext_nets'] = ext_nets
# For nested domain
nested_allowed_vlans_query = (
"SELECT vlan FROM "
"apic_aim_network_nested_domain_allowed_vlans WHERE "
"network_id = '" + network['id'] + "'")
nested_allowed_vlans_result = session.execute(
nested_allowed_vlans_query)
network['apic:nested_domain_allowed_vlans'] = []
for allowed_vlan in nested_allowed_vlans_result:
network['apic:nested_domain_allowed_vlans'].append(
allowed_vlan.vlan)
details['_cache']['network'] = network
def _get_gbp_details_new(self, context, request, host):
with context.session.begin(subtransactions=True):
device = request.get('device')
core_plugin = self._core_plugin
port_id = core_plugin._device_to_port_id(context, device)
port_query = ("SELECT project_id, id, name, network_id, "
"mac_address, admin_state_up, device_id, "
"device_owner, port_security_enabled, host, "
"vif_type, vif_details FROM "
"ports JOIN portsecuritybindings AS "
"portsecuritybindings_1 ON "
"ports.id = portsecuritybindings_1.port_id JOIN "
"ml2_port_bindings AS ml2_port_bindings_1 ON "
"ports.id = ml2_port_bindings_1.port_id "
"WHERE ports.id = '" + port_id + "'")
port_result = context.session.execute(port_query)
# in UT env., sqlite doesn't implement rowcount so the value
# is always -1
if port_result.rowcount != 1 and port_result.rowcount != -1:
LOG.warning("Can't find the matching port DB record for "
"this port ID: %(port_id)s",
{'port_id': port_id})
return {'device': request.get('device')}
port = port_result.first()
# Build up the ORM relationship manually
port = dict(port)
binding_level_query = ("SELECT segment_id FROM "
"ml2_port_binding_levels WHERE "
"port_id = '" + port_id + "' AND "
"host = '" + port['host'] + "'")
binding_levels = context.session.execute(binding_level_query)
port['binding_levels'] = []
for binding_level in binding_levels:
port['binding_levels'].append(
{'segment_id': binding_level['segment_id']})
ip_query = ("SELECT ip_address, subnet_id FROM "
"ipallocations WHERE "
"port_id = '" + port_id + "'")
ip_result = context.session.execute(ip_query)
port['fixed_ips'] = []
for ip in ip_result:
port['fixed_ips'].append(
{'ip_address': ip['ip_address'],
'subnet_id': ip['subnet_id']})
if not port['fixed_ips']:
LOG.error("fixed_ips is empty "
"for port: %s", port_id)
sg_query = ("SELECT security_group_id FROM "
"securitygroupportbindings WHERE "
"port_id = '" + port_id + "'")
sg_result = context.session.execute(sg_query)
port['security_groups'] = []
for sg in sg_result:
port['security_groups'].append(sg.security_group_id)
aap_query = ("SELECT mac_address, ip_address FROM "
"allowedaddresspairs WHERE "
"port_id = '" + port_id + "'")
aap_result = context.session.execute(aap_query)
port['allowed_address_pairs'] = []
for aap in aap_result:
port['allowed_address_pairs'].append(
{'ip_address': aap['ip_address'],
'mac_address': aap['mac_address']})
dhcp_opt_query = ("SELECT opt_name, opt_value FROM "
"extradhcpopts WHERE "
"port_id = '" + port_id + "'")
dhcp_opt_result = context.session.execute(dhcp_opt_query)
port['extra_dhcp_opts'] = []
for opt in dhcp_opt_result:
port['extra_dhcp_opts'].append(
{'opt_name': opt['opt_name'],
'opt_value': opt['opt_value']})
net_id = port['network_id']
net_query = ("SELECT id, epg_name, epg_app_profile_name, "
"epg_tenant_name, vrf_name, vrf_tenant_name, mtu, "
"nested_domain_name as 'apic:nested_domain_name', "
"nested_domain_type as 'apic:nested_domain_type', "
"nested_domain_infra_vlan as "
"'apic:nested_domain_infra_vlan', "
"nested_domain_service_vlan as "
"'apic:nested_domain_service_vlan', "
"nested_domain_node_network_vlan as "
"'apic:nested_domain_node_network_vlan', "
"dns_domain, port_security_enabled FROM "
"apic_aim_network_mappings JOIN "
"networks AS net_1 ON net_1.id = "
"apic_aim_network_mappings.network_id JOIN "
"apic_aim_network_extensions AS net_ext_1 ON "
"net_ext_1.network_id = "
"apic_aim_network_mappings.network_id "
"LEFT OUTER JOIN networksecuritybindings AS "
"networksecuritybindings_1 ON net_ext_1.network_id "
"= networksecuritybindings_1.network_id "
"LEFT OUTER JOIN networkdnsdomains AS "
"networkdnsdomains_1 ON net_ext_1.network_id = "
"networkdnsdomains_1.network_id WHERE "
"apic_aim_network_mappings.network_id = '"
+ net_id + "'")
net_result = context.session.execute(net_query)
if net_result.rowcount != 1 and net_result.rowcount != -1:
LOG.warning("Can't find the matching network DB record for "
"this network ID: %(net_id)s",
{'net_id': net_id})
return {'device': request.get('device')}
net_record = net_result.first()
network = dict(net_record)
# NOTE(ivar): removed the PROXY_PORT_PREFIX hack.
# This was needed to support network services without hotplug.
details = {'device': request.get('device'),
'enable_dhcp_optimization': self._is_dhcp_optimized(
context, port),
'enable_metadata_optimization': (
self._is_metadata_optimized(context, port)),
'port_id': port_id,
'mac_address': port['mac_address'],
'app_profile_name': network['epg_app_profile_name'],
'tenant_id': port['project_id'],
'host': port['host'],
# TODO(ivar): scope names, possibly through AIM or the
# name mapper
'ptg_tenant': network['epg_tenant_name'],
'endpoint_group_name': network['epg_name'],
# TODO(kentwu): make it to support GBP workflow also
'promiscuous_mode': self._is_port_promiscuous(
context, port, is_gbp=False),
'extra_ips': [],
'floating_ip': [],
'ip_mapping': [],
# Put per mac-address extra info
'extra_details': {}}
# Set VM name if needed.
if port['device_owner'].startswith(
'compute:') and port['device_id']:
vm = nclient.NovaClient().get_server(port['device_id'])
details['vm-name'] = vm.name if vm else port['device_id']
details['_cache'] = {}
self._build_up_details_cache(
context.session, details, port, network)
mtu = self._get_port_mtu(context, port, details)
if mtu:
details['interface_mtu'] = mtu
details['dns_domain'] = network['dns_domain']
if port.get('security_groups'):
self._add_security_group_details(context, port, details)
# TODO(kentwu): make it to support GBP workflow if needed
self._add_subnet_details(context, port, details, is_gbp=False)
self._add_allowed_address_pairs_details(context, port, details)
details['l3_policy_id'] = '%s %s' % (
network['vrf_tenant_name'], network['vrf_name'])
self._add_vrf_details(context, details['l3_policy_id'], details)
# Handle FIPs of owned addresses - find other ports in the
# network whose address is owned by this port.
# If those ports have FIPs, then steal them.
fips_filter = [str(port_id)]
active_addrs = [str(a['ip_address'])
for a in details['allowed_address_pairs']
if a.get('active')]
if active_addrs:
in_str = self._compose_in_filter_str(active_addrs)
ports_query = (
"SELECT DISTINCT id FROM ports JOIN "
"ipallocations AS ipallocations_1 ON "
"ipallocations_1.port_id = ports.id WHERE "
"ports.network_id = '" + net_id + "' AND "
"ipallocations_1.ip_address in " + in_str)
ports_result = context.session.execute(ports_query)
fips_filter.extend([str(p['id']) for p in ports_result])
in_str = self._compose_in_filter_str(fips_filter)
fips_query = (
"SELECT id, project_id, fixed_ip_address, "
"floating_ip_address, floating_network_id, "
"fixed_port_id as port_id FROM floatingips WHERE "
"floatingips.fixed_port_id in " + in_str)
fips_result = context.session.execute(fips_query)
fips = []
for fip in fips_result:
fip_dict = dict(fip)
fips.append(fip_dict)
details['_cache']['fips'] = fips
self._add_nat_details(context, port, host, details)
self._add_extra_details(context, port, details)
# TODO(kentwu): make it to support GBP workflow also
self._add_segmentation_label_details(context, port, details,
is_gbp=False)
self._set_dhcp_lease_time(details)
self._add_nested_domain_details(context, port, details)
details.pop('_cache', None)
# Get the neutron_details
segments_query = (
"SELECT id, network_type, physical_network FROM "
"networksegments WHERE "
"network_id = '" + net_id + "'")
segments = context.session.execute(segments_query)
bottom_segment = {}
if port['binding_levels']:
for segment in segments:
bottom_segment = dict(segment)
if (segment['id'] ==
port['binding_levels'][-1]['segment_id']):
break
neutron_details = {'admin_state_up': port['admin_state_up'],
'device_owner': port['device_owner'],
'fixed_ips': port['fixed_ips'],
'network_id': net_id,
'port_id': port_id,
'network_type':
bottom_segment.get('network_type'),
'physical_network':
bottom_segment.get('physical_network')}
context.neutron_details = neutron_details
LOG.debug("Details for port %s : %s", port['id'], details)
return details
@ -270,7 +711,9 @@ class AIMMappingRPCMixin(ha_ip_db.HAIPOwnerDbMixin):
return
details['security_group'] = []
if port['security_groups']:
if 'security_groups' in details['_cache']:
port_sgs = details['_cache']['security_groups']
else:
query = BAKERY(lambda s: s.query(
sg_models.SecurityGroup.id,
sg_models.SecurityGroup.tenant_id))
@ -279,14 +722,21 @@ class AIMMappingRPCMixin(ha_ip_db.HAIPOwnerDbMixin):
sa.bindparam('sg_ids', expanding=True)))
port_sgs = query(context.session).params(
sg_ids=port['security_groups']).all()
for sg_id, tenant_id in port_sgs:
tenant_aname = self.aim_mech_driver.name_mapper.project(
context.session, tenant_id)
details['security_group'].append(
{'policy-space': tenant_aname,
'name': sg_id})
previous_sg_id = None
previous_tenant_id = None
for sg_id, tenant_id in port_sgs:
# This is to work around an UT sqlite bug that duplicate SG
# entries will be returned somehow if we query it with a SELECT
# statement directly
if sg_id == previous_sg_id and tenant_id == previous_tenant_id:
continue
tenant_aname = self.aim_mech_driver.name_mapper.project(
context.session, tenant_id)
details['security_group'].append(
{'policy-space': tenant_aname,
'name': sg_id})
previous_sg_id = sg_id
previous_tenant_id = tenant_id
# Always include this SG which has the default arp & dhcp rules
details['security_group'].append(
{'policy-space': 'common',
@ -294,11 +744,12 @@ class AIMMappingRPCMixin(ha_ip_db.HAIPOwnerDbMixin):
# Child class needs to support:
# - self._get_subnet_details(context, port, details)
def _add_subnet_details(self, context, port, details):
def _add_subnet_details(self, context, port, details, is_gbp=True):
# This method needs to define requirements for this Mixin's child
# classes in order to fill the following result parameters:
# - subnets;
details['subnets'] = self._get_subnet_details(context, port, details)
details['subnets'] = self._get_subnet_details(context, port, details,
is_gbp)
def _add_nat_details(self, context, port, host, details):
# This method needs to define requirements for this Mixin's child
@ -354,18 +805,20 @@ class AIMMappingRPCMixin(ha_ip_db.HAIPOwnerDbMixin):
details['nested_domain_node_network_vlan'],
details['nested_domain_allowed_vlans'],
details['nested_host_vlan']) = (
self._get_nested_domain(context, port))
self._get_nested_domain(context, port, details))
# Child class needs to support:
# - self._get_segmentation_labels(context, port, details)
def _add_segmentation_label_details(self, context, port, details):
def _add_segmentation_label_details(self, context, port, details,
is_gbp=True):
# This method needs to define requirements for this Mixin's child
# classes in order to fill the following result parameters:
# - segmentation_labels
# apic_segmentation_label is a GBP driver extension configured
# for the aim_mapping driver
details['segmentation_labels'] = self._get_segmentation_labels(
context, port, details)
if is_gbp:
details['segmentation_labels'] = self._get_segmentation_labels(
context, port, details)
def _add_extra_details(self, context, port, details):
# TODO(ivar): Extra details depend on HA and SC implementation

View File

@ -2622,6 +2622,288 @@ class TestPolicyTargetGroupRollback(AIMBaseTestCase):
self.dummy.delete_l3_policy_precommit = orig_func
class TestGbpDetailsForML2(AIMBaseTestCase,
test_securitygroup.SecurityGroupsTestCase):
def setUp(self, *args, **kwargs):
super(TestGbpDetailsForML2, self).setUp(*args, **kwargs)
cfg.CONF.set_override('path_mtu', 1000, group='ml2')
cfg.CONF.set_override('global_physnet_mtu', 1000, None)
cfg.CONF.set_override('advertise_mtu', True, group='aim_mapping')
def _verify_gbp_details_assertions(self, mapping, req_mapping, port_id,
expected_epg_name, expected_epg_tenant,
subnets, dhcp_port, default_route=None,
map_tenant_name=True):
self.assertEqual(mapping, req_mapping['gbp_details'])
self.assertEqual(port_id, mapping['port_id'])
self.assertEqual(expected_epg_name, mapping['endpoint_group_name'])
exp_tenant = (self.name_mapper.project(None, expected_epg_tenant)
if map_tenant_name else expected_epg_tenant)
self.assertEqual(exp_tenant, mapping['ptg_tenant'])
self.assertEqual('someid', mapping['vm-name'])
self.assertTrue(mapping['enable_dhcp_optimization'])
self.assertFalse(mapping['enable_metadata_optimization'])
self.assertEqual(1, len(mapping['subnets']))
mapping_cidrs = [subnet['cidr'] for subnet in mapping['subnets']]
for subnet in subnets:
self.assertIn(subnet['subnet']['cidr'], mapping_cidrs)
dhcp_server_ports = mapping['subnets'][0]['dhcp_server_ports']
self.assertIn(dhcp_port['mac_address'],
dhcp_server_ports.keys())
dhcp_server_port = dhcp_server_ports[dhcp_port['mac_address']]
self.assertEqual(dhcp_server_port[0],
dhcp_port['fixed_ips'][0]['ip_address'])
if default_route:
self.assertTrue(
{'destination': '0.0.0.0/0', 'nexthop': default_route} in
mapping['subnets'][0]['host_routes'],
"Default route missing in %s" % mapping['subnets'][0])
# Verify Neutron details
self.assertEqual(port_id, req_mapping['neutron_details']['port_id'])
def _verify_vrf_details_assertions(self, vrf_mapping, expected_vrf_name,
expected_l3p_id, expected_subnets,
expected_vrf_tenant):
self.assertEqual(expected_vrf_name, vrf_mapping['vrf_name'])
self.assertEqual(expected_vrf_tenant, vrf_mapping['vrf_tenant'])
self.assertEqual(set(expected_subnets),
set(vrf_mapping['vrf_subnets']))
self.assertEqual(expected_l3p_id,
vrf_mapping['l3_policy_id'])
def _verify_fip_details(self, mapping, fip, ext_epg_tenant,
ext_epg_name, ext_epg_app_profile='OpenStack'):
self.assertEqual(1, len(mapping['floating_ip']))
fip = copy.deepcopy(fip)
fip['nat_epg_name'] = ext_epg_name
fip['nat_epg_tenant'] = ext_epg_tenant
fip['nat_epg_app_profile'] = ext_epg_app_profile
fip_mapping = mapping['floating_ip'][0]
self.assertEqual(fip['id'], fip_mapping['id'])
self.assertEqual(fip['port_id'], fip_mapping['port_id'])
self.assertEqual(fip['project_id'], fip_mapping['project_id'])
self.assertEqual(fip['fixed_ip_address'],
fip_mapping['fixed_ip_address'])
self.assertEqual(fip['floating_ip_address'],
fip_mapping['floating_ip_address'])
self.assertEqual(fip['floating_network_id'],
fip_mapping['floating_network_id'])
def _verify_ip_mapping_details(self, mapping, ext_segment_name,
ext_epg_tenant, ext_epg_name,
ext_epg_app_profile='OpenStack'):
self.assertTrue({'external_segment_name': ext_segment_name,
'nat_epg_name': ext_epg_name,
'nat_epg_app_profile': ext_epg_app_profile,
'nat_epg_tenant': ext_epg_tenant}
in mapping['ip_mapping'])
def _verify_host_snat_ip_details(self, mapping, ext_segment_name,
snat_ip, subnet_cidr):
gw, prefix = subnet_cidr.split('/')
self._check_ip_in_cidr(snat_ip, subnet_cidr)
mapping['host_snat_ips'][0].pop('host_snat_ip', None)
self.assertEqual({'external_segment_name': ext_segment_name,
'gateway_ip': gw,
'prefixlen': int(prefix)},
mapping['host_snat_ips'][0])
def _do_test_get_gbp_details(self, pre_vrf=None,
enable_raw_sql=False):
self.driver.aim_mech_driver.enable_raw_sql_for_device_rpc = (
enable_raw_sql)
self.driver.aim_mech_driver.apic_optimized_dhcp_lease_time = 100
ext_net1, rtr1, ext_net1_sub = self._setup_external_network(
'es1', dn='uni/tn-t1/out-l1/instP-n1')
ext_net2, rtr2, ext_net2_sub1 = self._setup_external_network(
'es2', dn='uni/tn-t1/out-l2/instP-n2')
ext_net2_sub2 = self._make_subnet(
self.fmt, {'network': {'id': ext_net2_sub1['network_id'],
'tenant_id': ext_net2_sub1['tenant_id']}},
'200.200.0.1', '200.200.0.0/16')['subnet']
self._update('subnets', ext_net2_sub2['id'],
{'subnet': {SNAT_HOST_POOL: True}})
if pre_vrf:
as_id = self._make_address_scope_for_vrf(
pre_vrf.dn, name='as1')['address_scope']['id']
else:
scope = self._make_address_scope(
self.fmt, 4, name='as1')['address_scope']
as_id = scope['id']
pool = self._make_subnetpool(self.fmt, ['10.0.0.0/8'], name='sp1',
tenant_id=self._tenant_id,
address_scope_id=as_id,
default_prefixlen=24)['subnetpool']
pool_id = pool['id']
network = self._make_network(self.fmt, 'net1', True)
net1 = network['network']
# Make two subnets on the network
gw1_ip = '10.0.1.1'
subnet1 = self._make_subnet(
self.fmt, network, gw1_ip, cidr='10.0.1.0/24',
subnetpool_id=pool_id)['subnet']
network = self._make_network(self.fmt, 'net2', True)
net2 = network['network']
gw2_ip = '10.0.2.1'
subnet2 = self._make_subnet(
self.fmt, network, gw2_ip, cidr='10.0.2.0/24',
subnetpool_id=pool_id)['subnet']
# Make a DHCP port on each subnet
dhcp_subnet1 = [{'subnet_id': subnet1['id']}]
dhcp_p1 = self._make_port(self.fmt, net1['id'],
device_owner='dhcp:',
fixed_ips=dhcp_subnet1)['port']
self._bind_other_port_to_host(dhcp_p1['id'], 'h1')
dhcp_subnet2 = [{'subnet_id': subnet2['id']}]
dhcp_p2 = self._make_port(self.fmt, net2['id'],
device_owner='dhcp:',
fixed_ips=dhcp_subnet2)['port']
self._bind_other_port_to_host(dhcp_p2['id'], 'h2')
# Make a VM port
p1 = self._make_port(self.fmt, net1['id'],
device_owner='compute:')['port']
self._bind_port_to_host(p1['id'], 'h1')
self._router_interface_action('add', rtr1['id'], subnet1['id'], None)
self._router_interface_action('add', rtr2['id'], subnet2['id'], None)
fip = self._make_floatingip(self.fmt, ext_net1_sub['network_id'],
port_id=p1['id'])['floatingip']
mapping = self.driver.get_gbp_details(
self._neutron_admin_context, device='tap%s' % p1['id'],
host='h1')
req_mapping = self.driver.request_endpoint_details(
nctx.get_admin_context(),
request={'device': 'tap%s' % p1['id'],
'timestamp': 0, 'request_id': 'request_id'},
host='h1')
epg_name = self.name_mapper.network(self._neutron_context.session,
net1['id'])
epg_tenant = net1['tenant_id']
net_db = self._get_object('networks', net1['id'], self.api)['network']
subnets = [self._get_object('subnets', subnet_id, self.api)
for subnet_id in net_db['subnets']
if subnet_id == p1['fixed_ips'][0]['subnet_id']]
self._verify_gbp_details_assertions(
mapping, req_mapping, p1['id'], epg_name,
epg_tenant, subnets, dhcp_p1)
if pre_vrf:
vrf_name = pre_vrf.name
vrf_tenant = pre_vrf.tenant_name
else:
vrf_name = self.name_mapper.address_scope(
None, as_id)
vrf_tenant = self.name_mapper.project(None,
self._tenant_id)
vrf_id = '%s %s' % (vrf_tenant, vrf_name)
subpools = self._get_object('subnetpools', pool_id,
self.api)['subnetpool']
prefixlist = [prefix.strip() for prefix in subpools['prefixes']]
self._verify_vrf_details_assertions(
mapping, vrf_name, vrf_id, prefixlist, vrf_tenant)
self._verify_fip_details(mapping, fip, 't1', 'EXT-l1')
# Create event on a second host to verify that the SNAT
# port gets created for this second host
p2 = self._make_port(self.fmt, net2['id'],
device_owner='compute:')['port']
self._bind_port_to_host(p2['id'], 'h2')
# As admin, create a SG in a different tenant then associate
# with the same port
sg = self._make_security_group(
self.fmt, 'sg_1', 'test',
tenant_id='test-tenant-2')['security_group']
port = self._plugin.get_port(self._context, p2['id'])
port['security_groups'].append(sg['id'])
port = self._plugin.update_port(
self._context, port['id'], {'port': port})
# Set the bad MTU through extra_dhcp_opts, it should fall back
# to the network MTU
data = {'port': {'extra_dhcp_opts': [{'opt_name': '26',
'opt_value': 'garbage'}]}}
port = self._update('ports', port['id'], data)['port']
mapping = self.driver.get_gbp_details(
self._neutron_admin_context, device='tap%s' % p2['id'],
host='h2')
req_mapping = self.driver.request_endpoint_details(
nctx.get_admin_context(),
request={'device': 'tap%s' % p2['id'],
'timestamp': 0, 'request_id': 'request_id'},
host='h2')
epg_name = self.name_mapper.network(self._neutron_context.session,
net2['id'])
epg_tenant = net2['tenant_id']
net_db = self._get_object('networks', net2['id'], self.api)['network']
subnets = [self._get_object('subnets', subnet_id, self.api)
for subnet_id in net_db['subnets']
if subnet_id == p2['fixed_ips'][0]['subnet_id']]
self._verify_gbp_details_assertions(
mapping, req_mapping, p2['id'], epg_name,
epg_tenant, subnets, dhcp_p2)
self.assertEqual(p2['id'], mapping['port_id'])
self._verify_ip_mapping_details(mapping,
'uni:tn-t1:out-l2:instP-n2', 't1', 'EXT-l2')
self._verify_host_snat_ip_details(mapping,
'uni:tn-t1:out-l2:instP-n2', '200.200.0.3', '200.200.0.1/16')
self.assertEqual(1000, mapping['interface_mtu'])
self.assertEqual(100, mapping['dhcp_lease_time'])
sg_list = []
ctx = nctx.get_admin_context()
port_sgs = (ctx.session.query(sg_models.SecurityGroup.id,
sg_models.SecurityGroup.tenant_id).
filter(sg_models.SecurityGroup.id.
in_(port['security_groups'])).
all())
for sg_id, tenant_id in port_sgs:
sg_tenant = self.name_mapper.project(None, tenant_id)
sg_list.append(
{'policy-space': sg_tenant,
'name': sg_id})
sg_list.append({'policy-space': 'common',
'name': self.driver.aim_mech_driver.apic_system_id +
'_DefaultSecurityGroup'})
self.assertEqual(sg_list, mapping['security_group'])
# Set the right MTU through extra_dhcp_opts
data = {'port': {'extra_dhcp_opts': [{'opt_name': 'interface-mtu',
'opt_value': '2000'}]}}
port = self._update('ports', port['id'], data)['port']
mapping = self.driver.get_gbp_details(
self._neutron_admin_context, device='tap%s' % p2['id'],
host='h2')
self.assertEqual(2000, mapping['interface_mtu'])
def test_get_gbp_details(self):
self._do_test_get_gbp_details()
def test_get_gbp_details_with_raw_sql(self):
self._do_test_get_gbp_details(enable_raw_sql=True)
def test_get_gbp_details_pre_existing_vrf(self):
aim_ctx = aim_context.AimContext(self.db_session)
vrf = self.aim_mgr.create(
aim_ctx, aim_resource.VRF(tenant_name='common', name='ctx1',
monitored=True))
self._do_test_get_gbp_details(pre_vrf=vrf)
def test_get_gbp_details_pre_existing_vrf_with_raw_sql(self):
aim_ctx = aim_context.AimContext(self.db_session)
vrf = self.aim_mgr.create(
aim_ctx, aim_resource.VRF(tenant_name='common', name='ctx1',
monitored=True))
self._do_test_get_gbp_details(pre_vrf=vrf,
enable_raw_sql=True)
class TestPolicyTarget(AIMBaseTestCase,
test_securitygroup.SecurityGroupsTestCase):
@ -3037,8 +3319,6 @@ class TestPolicyTarget(AIMBaseTestCase,
'prefixlen': int(prefix)},
mapping['host_snat_ips'][0])
# TODO(Kent): we should also add the ML2 related UTs for
# get_gbp_details(). Its missing completely....
def _do_test_get_gbp_details(self, pre_vrf=None):
self.driver.aim_mech_driver.apic_optimized_dhcp_lease_time = 100
es1, es1_sub = self._setup_external_segment(
@ -5340,6 +5620,13 @@ class TestNestedDomain(AIMBaseTestCase):
self.assertIsNone(details['nested_host_vlan'])
class TestNestedDomainWithRawSql(TestNestedDomain):
def setUp(self, **kwargs):
super(TestNestedDomainWithRawSql, self).setUp(**kwargs)
self.driver.aim_mech_driver.enable_raw_sql_for_device_rpc = True
class TestNeutronPortOperation(AIMBaseTestCase):
def setUp(self, **kwargs):