Add suport for baremetal vnic_type

Bind ports of vnic_type baremetal to networks that have
opflex type static segments, using Hierarchical Port Binding
(HPB), with dynamically created VLAN type segments, as well
as networks with static or dynamic VLAN type segments.  This
includes static port configuration on EPGs or L3 Out policies
in ACI using untagged mode with native VLANs. Support for using
baremetal vnic_types with trunk ports must be addressed in a
separate patch.

Requirements for baremetal vnic_type port binding are:
1) binding:profile must have a local_link_informaiton element
2) local_link_information must contain a switch_info element
3) switch_info must be a string containing comma-
   delimitted key/value pairs, which are separated
   by a colon. Required key/value pairs are:
      o  apic_dn:<dn for static path>
      o  physical_network:<physnet for interface>
   The PhysDom for the interface can optionally be
   specified using the key/value pair:
      o  physical_domain:<name of the PhysDom for the interface>
   If the physical_domain is populated, it will be used to associate
   the named PhysDom with the EPG. If no physical_domain is provided,
   then the existing domain association behavior is used (i.e. the
   HostDomainMappingV2 entries in AIM are searched for applicable
   entries).

The local_link_information also may contain the port_id
and switch_id elements, which should specify the individual
port_id and switch_id that the baremetal VNIC connects to.
However, this information currently isn't used.

Once a VNIC type of baremetal is bound, the static path information
in the binding:profile is stored in AIM, either in an EPG or the
Interface Profile for an L3 Out policy in the case of SVI networks.

Change-Id: I43b1305de59f68d9114c5856f76a98cc72ceb18e
(cherry picked from commit 26f4d72d49)
This commit is contained in:
Thomas Bachman 2019-11-12 15:50:04 +00:00 committed by Thomas Bachman
parent 21e3fdbfdd
commit a90aa7a3b8
2 changed files with 693 additions and 27 deletions

View File

@ -115,6 +115,7 @@ SYNC_STATE_TMP = 'synchronization_state_tmp'
AIM_RESOURCES_CNT = 'aim_resources_cnt'
SUPPORTED_VNIC_TYPES = [portbindings.VNIC_NORMAL,
portbindings.VNIC_BAREMETAL,
portbindings.VNIC_DIRECT]
AGENT_TYPE_DVS = 'DVS agent'
@ -131,6 +132,7 @@ LEGACY_SNAT_NET_NAME_PREFIX = 'host-snat-network-for-internal-use-'
LEGACY_SNAT_SUBNET_NAME = 'host-snat-pool-for-internal-use'
LEGACY_SNAT_PORT_NAME = 'host-snat-pool-port-for-internal-use'
LEGACY_SNAT_PORT_DEVICE_OWNER = 'host-snat-pool-port-device-owner-internal-use'
LL_INFO = 'local_link_information'
# TODO(kentwu): Move this to AIM utils maybe to avoid adding too much
# APIC logic to the mechanism driver
@ -2233,11 +2235,15 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
self._opflex_bind_port):
return
if self._is_baremetal_vnic_type(context.current):
self._bind_baremetal_vnic(context)
return
# If we reached here, it means that either there is no active opflex
# agent running on the host, or the agent on the host is not
# configured for this physical network. Treat the host as a physical
# node (i.e. has no OpFlex agent running) and try binding
# hierarchically if the network-type is OpFlex.
# agent running on the host, this was not a baremetal VNIC, or the
# agent on the host is not configured for this physical network.
# Treat the host as a physical node (i.e. has no OpFlex agent running)
# and try binding hierarchically if the network-type is OpFlex.
self._bind_physical_node(context)
def _update_sg_rule_with_remote_group_set(self, context, port):
@ -2769,12 +2775,61 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
VIF_TYPE_DVS, vif_details)
return True
def _is_baremetal_vnic_type(self, port):
return port.get(portbindings.VNIC_TYPE) == portbindings.VNIC_BAREMETAL
def _bind_baremetal_vnic(self, context):
"""Bind ports with VNIC type of baremtal.
:param context : Port context instance
Support binding baremetal VNIC types to networks that have
an opflex type static segment, or networks with vlan segments.
For vlan type segments, these can be static segments or dynamically
created segments from HPB. The topology information for the port
should contain the physical_network that the baremetal VNIC is
connected to. This is used to match the segment when binding
the port to vlan type segments, and as the physical_network
when creating dynamic vlan type segments for HPB.
"""
if not self._is_baremetal_port_bindable(context.current):
return False
_, _, physnet, _ = self._get_baremetal_topology(context.current)
# First attempt binding vlan type segments, in order to avoid
# dynamically allocating vlan segments if they're not needed.
for seg in context.segments_to_bind:
net_type = seg[api.NETWORK_TYPE]
physical_network = seg[api.PHYSICAL_NETWORK]
if (physical_network == physnet and
self._is_supported_non_opflex_type(net_type)):
# VLAN segments already have a segmentation ID, so we can
# complete the binding.
context.set_binding(seg[api.ID],
portbindings.VIF_TYPE_OTHER, {},
status=n_constants.PORT_STATUS_ACTIVE)
return True
# Baremetal vnics can only be connected to opflex networks
# using VLANs. We use Hierarchical Port Binding (HPB) for
# networks that have an opflex type static segment in order
# to allocate a VLAN for use with the static path that enables
# connectivity for the baremetal vnic.
for seg in context.segments_to_bind:
net_type = seg[api.NETWORK_TYPE]
physical_network = seg[api.PHYSICAL_NETWORK]
if self._is_opflex_type(net_type):
seg_args = {api.NETWORK_TYPE: pconst.TYPE_VLAN,
api.PHYSICAL_NETWORK: physnet}
dyn_seg = context.allocate_dynamic_segment(seg_args)
LOG.info('Allocated dynamic-segment %(s)s for port %(p)s',
{'s': dyn_seg, 'p': context.current['id']})
context.continue_binding(seg[api.ID], [dyn_seg])
return True
return False
def _bind_physical_node(self, context):
# Bind physical nodes hierarchically by creating a dynamic segment.
for segment in context.segments_to_bind:
net_type = segment[api.NETWORK_TYPE]
# TODO(amitbose) For ports on baremetal (Ironic) hosts, use
# binding:profile to decide if dynamic segment should be created.
if self._is_opflex_type(net_type):
# TODO(amitbose) Consider providing configuration options
# for picking network-type and physical-network name
@ -4088,6 +4143,11 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
nodes = []
node_paths = []
match = self.port_desc_re.match(path)
# The L3 Out model requires all the nodes supporting an L3
# Out are configured under the L3 Out's node profile. In the
# case where the static path is a VPC, then both nodes used
# in the VPC must be added, so their IDs must be extracted
# from the static path.
if match:
pod_id, switch, module, port = match.group(1, 2, 3, 4)
nodes.append(switch)
@ -4262,29 +4322,179 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
epg, epg.static_paths)
self.aim.update(aim_ctx, epg, static_paths=epg.static_paths)
def _get_static_ports(self, plugin_context, host, segment):
"""Get StaticPort objects for a host and segment.
def _get_static_ports(self, plugin_context, host, segment, port=None):
"""Get StaticPort objects for ACI.
:param plugin_context : plugin context
:param host : host ID for the static port
:param segment : bound segment of this host
:param port : port instance
:returns: List of zero or more static port objects
This method should be called when a neutron port requires
static port configuration state for an interface profile in
a L3 Out policy or for an Endpoint Group. This is retrieved
from the HostLink entries in the AIM database. The information is
only available on bound ports, as the encapsulation information
must also be available. The method should only be called by code
that has an active DB transaction, as it makes use of the session
from the plugin_context.
a L3 Out policy or for an Endpoint Group. There are two
sources of this information:
o from binding:profile when the port has a vnic_type
of "baremetal"
o from the HostLink entries in the AIM database
The information is only available on bound ports, as the
encapsulation information must also be available. The method
should only be called by code that has an active DB transaction,
as it makes use of the session from the plugin_context.
"""
encap = self._segment_to_vlan_encap(segment)
if not encap:
return []
# Return qualifying entries from the host links table in AIM.
session = plugin_context.session
aim_ctx = aim_context.AimContext(db_session=session)
host_links = self.aim.find(aim_ctx,
aim_infra.HostLink, host_name=host)
return [StaticPort(host_link, encap, 'regular') for host_link in
self._filter_host_links_by_segment(session,
segment, host_links)]
if port and self._is_baremetal_vnic_type(port) and (
self._is_baremetal_port_bindable(port)):
# The local_link_information should be populated, and
# will have the static path.
sp, ifs, pn, pd = self._get_baremetal_topology(port)
hlink = aim_infra.HostLink(host_name='',
interface_name='', path=sp)
return [StaticPort(hlink, encap, 'untagged')]
else:
# If it's not baremetal, return qualifying entries from the
# host links table in AIM, with the host links by segments
# filtering applied
session = plugin_context.session
aim_ctx = aim_context.AimContext(db_session=session)
host_links = self.aim.find(aim_ctx,
aim_infra.HostLink, host_name=host)
return [StaticPort(host_link, encap, 'regular') for host_link in
self._filter_host_links_by_segment(session,
segment, host_links)]
def _is_baremetal_port_bindable(self, port):
"""Verify that a port is a valid baremetal port.
:param port : Port instance
:returns: True if bindable baremetal vnic, False otherwise
The binding is valid for a baremetal port which has valid topology
information in the local_link_information list contained inside the
binding:profile.
"""
if self._is_baremetal_vnic_type(port):
if any(self._get_baremetal_topology(port)):
return True
return False
def _get_baremetal_topology(self, port):
"""Return topology information for a port of vnic_type baremetal.
:param port : Port instance
:returns: Tuple of topology information
Get the topology information relevant to a port that has a
vnic_type of baremetal. Topology is stored in the binding:profile
member of the port object, using the local_link_infomration list.
If there is more than one entry in the local_link_information list,
then the port corresponds to a VPC. In this case, properties that
should be the same for both entries are checked (e.g. static_path).
Some properties, such as port_id and switch_id, are allowed to be
different (and in the case of VPC should be different).
Currently returns a tuple with the format:
(static_path, interfaces, physical_network, physical_domain)
where:
static_path: DN to use for static path
interfaces: list of tuples, where each tuple has a string
for the leaf interface name and a string for the
leaf interface's MAC address
physical_network: physical network that the interfaces belong to
physical_domain: physical domain that the interfaces belong to
If the topology information is invalid, a tuple of None values
is returned instead.
"""
# REVISIT: Add checks for trunk parent and sub-ports.
interfaces = []
static_path = None
physical_domain = None
physical_network = None
lli_list = port.get(portbindings.PROFILE, {}).get(LL_INFO, [])
for lli_idx in range(len(lli_list)):
# 2 entries is VPC, one is single link. Others are
# invalid.
if lli_idx > 1:
LOG.error("Invalid topology: port %(port)s has more than "
"two elements in the binding:profile's "
"local_link_information array.",
{'port': port['id']})
return (None, None, None, None)
lli = lli_list[lli_idx]
mac = lli.get('switch_id')
interface = lli.get('port_id')
switch_info = lli.get('switch_info', '')
# switch_info must be a string of a comma-separated
# key-value pairs in order to be valid.
info_dict = {}
for kv_pair in switch_info.split(","):
if ":" in kv_pair:
key, value = kv_pair.split(':', 1)
info_dict[key] = value
if info_dict.get('apic_dn'):
dn = info_dict['apic_dn']
# If it's a VPC, the static paths should match.
if static_path and dn != static_path:
LOG.error("Invalid topology: port %(port)s has "
"inconsistently configured switch_info inside "
"the binding:profile's link_local_information "
"elements [apic_dn's: %(dn1)s:%(dn2)s]. The "
"switch_info field must be identical for all "
"ports used within a portgroup for a baremetal "
"VNIC.", {'port': port['id'],
'dn1': static_path, 'dn2': dn})
return (None, None, None, None)
static_path = dn
if mac or interface:
interfaces.append((interface, mac))
if info_dict.get(api.PHYSICAL_NETWORK):
physnet = info_dict[api.PHYSICAL_NETWORK]
# If it's a VPC, physical_networks should match.
if physical_network and physnet != physical_network:
LOG.error("Invalid topology: port %(port)s has "
"inconsistently configured switch_info inside "
"the binding:profile's link_local_information "
"elements [physical_network: %(pn1)s:%(pn2)s]. "
"The switch_info field must be identical for "
"all ports used within a portgroup for a "
"baremetal VNIC.",
{'port': port['id'], 'pn1': physical_network,
'pn2': physnet})
return (None, None, None, None)
physical_network = physnet
if info_dict.get('physical_domain'):
pd = info_dict['physical_domain']
# If it's a VPC, physical_domains should match.
if physical_domain and pd != physical_domain:
LOG.error("Invalid topology: port %(port)s has "
"inconsistently configured switch_info inside "
"the binding:profile's link_local_information "
"elements [physical_domain: %(pd1)s:%(pd2)s]. "
"The switch_info field must be identical for "
"all ports used within a portgroup for a "
"baremetal VNIC.",
{'port': port['id'], 'pd1': physical_domain,
'pn2': pd})
return (None, None, None, None)
physical_domain = pd
# We at least need the static path and physical_network
if not static_path or not physical_network:
LOG.warning("Invalid topology: port %(port)s does not contain "
"required topology information in the "
"binding:profile's local_link_information array.",
{'port': port['id']})
return (None, None, None, None)
return (static_path, interfaces, physical_network, physical_domain)
def _update_static_path(self, port_context, host=None, segment=None,
remove=False):
@ -4314,7 +4524,8 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
return
static_ports = self._get_static_ports(port_context._plugin_context,
host, segment)
host, segment,
port=port_context.current)
for static_port in static_ports:
if self._is_svi(port_context.network.current):
l3out, _, _ = self._get_aim_external_objects(
@ -4429,6 +4640,17 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
vmms = aim_epg.vmm_domains
self.aim.update(aim_ctx, epg, vmm_domains=vmms)
else:
# For baremetal VNIC types, there may be additional topology
# information in the binding:profile from Ironic. This may
# include the PhysDom name in ACI, which can be used to
# disambiguate interfaces on the same host through matching
# by PhysDom.
if self._is_baremetal_vnic_type(port):
_, _, _, physdom = self._get_baremetal_topology(port)
if physdom:
aim_hd_mappings = [aim_infra.HostDomainMappingV2(
domain_type='PhysDom', domain_name=physdom,
host_name=host_id)]
# Get all the Physical domains. We either get domains
# from a lookup of the HostDomainMappingV2
# table, or we get all the applicable Physical
@ -4475,6 +4697,17 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
aim_hd_mappings = self.aim.find(aim_ctx,
aim_infra.HostDomainMappingV2,
host_name=host_id)
# For baremetal VNIC types, there may be additional topology
# information in the binding:profile from Ironic. This may
# include the PhysDom name in ACI, which can be used to
# disambiguate interfaces on the same host through matching
# by PhysDom.
if self._is_baremetal_vnic_type(port):
_, _, _, physdom = self._get_baremetal_topology(port)
if physdom:
aim_hd_mappings = [aim_infra.HostDomainMappingV2(
domain_type='PhysDom', domain_name=physdom,
host_name=host_id)]
if not aim_hd_mappings:
return

View File

@ -18,6 +18,7 @@ import datetime
import fixtures
import mock
import netaddr
import re
import six
from sqlalchemy.orm import exc as sql_exc
import testtools
@ -409,10 +410,18 @@ class ApicAimTestCase(test_address_scope.AddressScopeTestCase,
def _check_ip_in_cidr(self, ip_addr, cidr):
self.assertTrue(netaddr.IPAddress(ip_addr) in netaddr.IPNetwork(cidr))
def _bind_port_to_host(self, port_id, host):
def _make_baremetal_port(self, project_id, net_id):
data = {'port': {'network_id': net_id,
portbindings.VNIC_TYPE: 'baremetal',
'project_id': project_id}}
req = self.new_create_request('ports', data, self.fmt)
return self.deserialize(self.fmt, req.get_response(self.api))
def _bind_port_to_host(self, port_id, host, **kwargs):
data = {'port': {'binding:host_id': host,
'device_owner': 'compute:',
'device_id': 'someid'}}
data['port'].update(kwargs)
req = self.new_update_request('ports', data, port_id,
self.fmt)
return self.deserialize(self.fmt, req.get_response(self.api))
@ -4846,6 +4855,127 @@ class TestPortBinding(ApicAimTestCase):
('vhu' + port_id)[:14])},
port['binding:vif_details'])
def test_bind_baremetal_opflex(self):
self._test_bind_baremetal()
def test_bind_baremetal_vlan(self):
self._test_bind_baremetal(network_type=u'vlan', physnet=u'physnet2')
def test_bind_baremetal_vlan_svi(self):
self._test_bind_baremetal(network_type=u'vlan',
is_svi=True, physnet=u'physnet2')
def _test_bind_baremetal(self, network_type=u'opflex', is_svi=False,
physnet=u'physnet1'):
# Do positive and negative port binding testing, using the
# different information in the binding profile.
def validate_binding(port):
self.assertEqual('baremetal', port[portbindings.VNIC_TYPE])
self.assertEqual(kwargs[portbindings.PROFILE],
port[portbindings.PROFILE])
self.assertEqual({}, port[portbindings.VIF_DETAILS])
self.assertEqual('other', port[portbindings.VIF_TYPE])
kwargs = {'provider:network_type': network_type,
'provider:physical_network': physnet}
if is_svi:
kwargs.update({'apic:svi': 'True'})
arg_list = self.extension_attributes + ('provider:physical_network',)
net = self._make_network(self.fmt, 'net1', True,
arg_list=arg_list, **kwargs)
self._make_subnet(self.fmt, net, '10.0.1.1', '10.0.1.0/24')
port = self._make_baremetal_port(net['network']['tenant_id'],
net['network']['id'])['port']
port_id = port['id']
# Negative test case: No binding:profile.
kwargs = {}
port = self._bind_port_to_host(port_id, 'host1', **kwargs)['port']
self.assertEqual(port[portbindings.VIF_TYPE],
portbindings.VIF_TYPE_BINDING_FAILED)
# Negative test case: Empty binding:profile.
kwargs = {portbindings.PROFILE: {}}
port = self._bind_port_to_host(port_id, 'host1', **kwargs)['port']
self.assertEqual(port[portbindings.VIF_TYPE],
portbindings.VIF_TYPE_BINDING_FAILED)
# Negative test case: Empty local_link_information.
kwargs = {portbindings.PROFILE: {
'local_link_information': []}}
port = self._bind_port_to_host(port_id, 'host1', **kwargs)['port']
self.assertEqual(port[portbindings.VIF_TYPE],
portbindings.VIF_TYPE_BINDING_FAILED)
# Negative test case: APIC DN but no physnet.
kwargs = {portbindings.PROFILE: {
'local_link_information': [
{"switch_info":
"apic_dn:topology/pod-1/paths-501/pathep-[eth1/1]"}]}}
port = self._bind_port_to_host(port_id, 'host1', **kwargs)['port']
self.assertEqual(port[portbindings.VIF_TYPE],
portbindings.VIF_TYPE_BINDING_FAILED)
# Negative test case: APIC DN with port and switch IDs, no physnet.
kwargs = {portbindings.PROFILE: {
'local_link_information': [
{"switch_info":
"apic_dn:topology/pod-1/paths-501/pathep-[eth1/1],",
"port_id": "Eth1/1", "switch_id": "00:c0:4a:21:23:24"}
]}}
port = self._bind_port_to_host(port_id, 'host1', **kwargs)['port']
self.assertEqual(port[portbindings.VIF_TYPE],
portbindings.VIF_TYPE_BINDING_FAILED)
# Negative test case: all info, but not key/value pairs
kwargs = {portbindings.PROFILE: {
'local_link_information': [
{"switch_info":
"topology/pod-1/paths-501/pathep-[eth1/1],"
"physnet1,pdom_physnet1",
"port_id": "Eth1/1", "switch_id": "00:c0:4a:21:23:24"}
]}}
port = self._bind_port_to_host(port_id, 'host1', **kwargs)['port']
self.assertEqual(port[portbindings.VIF_TYPE],
portbindings.VIF_TYPE_BINDING_FAILED)
# Positive test case: missing physdom
kwargs = {portbindings.PROFILE: {
'local_link_information': [
{"switch_info":
"apic_dn:topology/pod-1/paths-501/pathep-[eth1/1],"
"physical_network:physnet2",
"port_id": "Eth1/1", "switch_id": "00:c0:4a:21:23:24"}
]}}
port = self._bind_port_to_host(port_id, 'host1', **kwargs)['port']
validate_binding(port)
# Positive test case: interface information absent
kwargs = {portbindings.PROFILE: {
'local_link_information': [
{"switch_info":
"apic_dn:topology/pod-1/paths-501/pathep-[eth1/1],"
"physical_network:physnet2,physdom:pdom_physnet1"}
]}}
port = self._bind_port_to_host(port_id, 'host1', **kwargs)['port']
validate_binding(port)
# Positive test case all parameters
kwargs = {portbindings.PROFILE: {
'local_link_information': [
{"switch_info":
"apic_dn:topology/pod-1/paths-501/pathep-[eth1/1],"
"physical_network:physnet2,physdom:pdom_physnet1",
"port_id": "Eth1/1", "switch_id": "00:c0:4a:21:23:24"}
]}}
port = self._bind_port_to_host(port_id, 'host1', **kwargs)['port']
validate_binding(port)
# opflex: Positive test case: all info, but wrong physnet
# others: Negative test case: all info, but wrong physnet
kwargs = {portbindings.PROFILE: {
'local_link_information': [
{"switch_info":
"apic_dn:topology/pod-1/paths-501/pathep-[eth1/1],"
"physical_network:physnet3",
"port_id": "Eth1/1", "switch_id": "00:c0:4a:21:23:24"}
]}}
port = self._bind_port_to_host(port_id, 'host1', **kwargs)['port']
if network_type == u'opflex':
validate_binding(port)
else:
self.assertEqual(port[portbindings.VIF_TYPE],
portbindings.VIF_TYPE_BINDING_FAILED)
# TODO(rkukura): Add tests for opflex, local and unsupported
# network_type values.
@ -7066,7 +7196,8 @@ class TestPortVlanNetwork(ApicAimTestCase):
name=self.name_mapper.network(None, network['id']))
return epg
def _check_binding(self, port_id, expected_binding_info=None):
def _check_binding(self, port_id, expected_binding_info=None,
top_bound_physnet=None, bottom_bound_physnet=None):
port_context = self.plugin.get_bound_port_context(
n_context.get_admin_context(), port_id)
self.assertIsNotNone(port_context)
@ -7075,8 +7206,17 @@ class TestPortVlanNetwork(ApicAimTestCase):
for bl in port_context.binding_levels]
self.assertEqual(expected_binding_info or self.expected_binding_info,
binding_info)
self.assertEqual(port_context.top_bound_segment['physical_network'],
port_context.bottom_bound_segment['physical_network'])
if top_bound_physnet:
self.assertEqual(top_bound_physnet, port_context.
top_bound_segment['physical_network'])
if bottom_bound_physnet:
self.assertEqual(bottom_bound_physnet, port_context.
bottom_bound_segment['physical_network'])
if not (top_bound_physnet and bottom_bound_physnet):
self.assertEqual(port_context.
top_bound_segment['physical_network'],
port_context.
bottom_bound_segment['physical_network'])
return port_context.bottom_bound_segment['segmentation_id']
def _check_no_dynamic_segment(self, network_id):
@ -8672,6 +8812,77 @@ class TestPortOnPhysicalNode(TestPortVlanNetwork):
set(self._doms(epg1.physical_domains,
with_type=False)))
def test_baremetal_vnic_2_nics_single_host(self):
# Verify that the domain association and static
# path information is correctly used in the case
# where a single host has two NICs, which are
# handled as two separate bindings.
expected_binding_info = [('apic_aim', 'opflex'),
('apic_aim', 'vlan')]
aim_ctx = aim_context.AimContext(self.db_session)
self._register_agent('opflex-1', AGENT_CONF_OPFLEX)
net1 = self._make_network(
self.fmt, 'net1', True,
arg_list=('provider:physical_network', 'provider:network_type'),
**{'provider:physical_network': 'physnet3',
'provider:network_type': 'opflex'})['network']
epg1 = self._net_2_epg(net1)
bm_path1 = 'topology/pod-1/paths-102/pathep-[eth1/1]'
with self.subnet(network={'network': net1}):
p1 = self._make_baremetal_port(net1['tenant_id'],
net1['id'])['port']
switch_info = ("apic_dn:" + bm_path1 +
',physical_network:physnet3' +
',physical_domain:ph1')
kwargs = {portbindings.PROFILE: {
'local_link_information': [
{"switch_info": switch_info,
"port_id": 'bm1',
"switch_id": "00:c0:4a:21:23:24"}
]}}
p1 = self._bind_port_to_host(p1['id'], 'h1', **kwargs)['port']
vlan_p1 = self._check_binding(p1['id'],
expected_binding_info=expected_binding_info)
static_path_1 = {'path': bm_path1, 'encap': 'vlan-%s' % vlan_p1,
'mode': 'untagged'}
epg1 = self.aim_mgr.get(aim_ctx, epg1)
self.assertEqual([static_path_1],
epg1.static_paths)
self.assertEqual(set([]),
set(self._doms(epg1.vmm_domains)))
self.assertEqual(set(['ph1']),
set(self._doms(epg1.physical_domains,
with_type=False)))
p2 = self._make_baremetal_port(net1['tenant_id'],
net1['id'])['port']
bm_path2 = 'topology/pod-1/paths-101/pathep-[eth1/2]'
switch_info = ("apic_dn:" + bm_path2 +
',physical_network:physnet2' +
',physical_domain:ph2')
kwargs = {portbindings.PROFILE: {
'local_link_information': [
{"switch_info": switch_info,
"port_id": 'bm2',
"switch_id": "00:c0:4a:21:23:24"}
]}}
p2 = self._bind_port_to_host(p2['id'], 'h1', **kwargs)['port']
vlan_p2 = self._check_binding(p2['id'],
expected_binding_info=expected_binding_info,
top_bound_physnet='physnet3',
bottom_bound_physnet='physnet2')
static_path_2 = {'path': bm_path2, 'encap': 'vlan-%s' % vlan_p2,
'mode': 'untagged'}
epg1 = self.aim_mgr.get(aim_ctx, epg1)
self.assertIn(static_path_1, epg1.static_paths)
self.assertIn(static_path_2, epg1.static_paths)
self.assertEqual(set([]),
set(self._doms(epg1.vmm_domains)))
self.assertEqual(set(['ph1', 'ph2']),
set(self._doms(epg1.physical_domains,
with_type=False)))
def test_no_host_domain_mappings(self):
aim_ctx = aim_context.AimContext(self.db_session)
self.aim_mgr.create(aim_ctx,
@ -8959,6 +9170,228 @@ class TestPortOnPhysicalNodeSingleDriver(TestPortOnPhysicalNode):
self.expected_binding_info = [('apic_aim', 'opflex'),
('apic_aim', 'vlan')]
def test_bind_baremetal_with_default_domain_mapping(self,
net_type='opflex',
is_svi=False):
apic_dn_1 = 'topology/pod-1/paths-101/pathep-[eth1/34]'
apic_dn_2 = 'topology/pod-1/paths-102/pathep-[eth1/34]'
info = [{'local_link_information': [
{'port_id': 'Eth1/34',
'switch_id': '00:c0:4a:21:23:24',
'switch_info': 'apic_dn:' + apic_dn_1 +
',physical_network:physnet3'}]},
{'local_link_information': [
{'port_id': 'Eth1/34',
'switch_id': '00:c0:4a:21:23:25',
'switch_info': 'apic_dn:' + apic_dn_2 +
',physical_network:physnet3'}]}]
self._test_bind_baremetal_with_default_domain_mapping(info,
net_type=net_type, is_svi=is_svi)
def test_bind_baremetal_vlan_with_default_domain_mapping(self):
self.test_bind_baremetal_with_default_domain_mapping(net_type='vlan',
is_svi=False)
def test_bind_baremetal_svi_with_default_domain_mapping(self):
self.test_bind_baremetal_with_default_domain_mapping(net_type='vlan',
is_svi=True)
def test_bind_baremetal_vpc_with_default_domain_mapping(self,
net_type='opflex',
is_svi=False):
port_group_1 = '[po-101-1-34-and-102-1-34]'
apic_dn_1 = 'topology/pod-1/protpaths-101-102/pathep-%s' % port_group_1
port_group_2 = '[po-101-1-35-and-102-1-35]'
apic_dn_2 = 'topology/pod-1/protpaths-101-102/pathep-%s' % port_group_2
# VPCs require port groups in Ironic, which creates
# two entries in the local_link_information array
# (one for each Ironic port in the port group),
# as well as the local_group_information from
# the port group.
info = [{'local_link_information': [
{'port_id': 'Eth1/34',
'switch_id': '00:c0:4a:21:23:24',
'switch_info': 'apic_dn:' + apic_dn_1 +
',physical_network:physnet3'},
{'port_id': 'Eth1/34',
'switch_id': "00:c0:4a:21:23:25",
'switch_info': 'apic_dn:' + apic_dn_1 +
',physical_network:physnet3'}],
'local_group_information': {
'id': 'a4b43644-0cff-4194-9104-ca1aa5d4393e',
'name': 'pg-foo',
'bond_mode': '802.3ad',
'bond_properties': {
'foo': 'bar',
'bee': 'bop'}}},
{'local_link_information': [
{'port_id': 'Eth1/35',
'switch_id': '00:c0:4a:21:23:24',
'switch_info': 'apic_dn:' + apic_dn_2 +
',physical_network:physnet3'},
{'port_id': 'Eth1/35',
'switch_id': "00:c0:4a:21:23:25",
'switch_info': 'apic_dn:' + apic_dn_2 +
',physical_network:physnet3'}],
'local_group_information': {
'id': 'abb75c0e-a116-4e9f-80b1-3e0962d34601',
'name': 'pg-foo',
'bond_mode': '802.3ad',
'bond_properties': {
'foo': 'bar',
'bee': 'bop'}}}]
self._test_bind_baremetal_with_default_domain_mapping(info,
net_type=net_type, is_svi=is_svi)
def test_bind_baremetal_vlan_vpc_with_default_domain_mapping(self):
self.test_bind_baremetal_vpc_with_default_domain_mapping(
net_type='vlan', is_svi=False)
def test_bind_baremetal_svi_vpc_with_default_domain_mapping(self):
self.test_bind_baremetal_vpc_with_default_domain_mapping(
net_type='vlan', is_svi=True)
def _test_bind_baremetal_with_default_domain_mapping(self, info,
net_type='opflex',
is_svi=False):
# Verify that the EPG gets associated with the default physdom
# from the domain mapping table, and that the static path gets
# configured in the EPG. Also ensure that things get cleaned
# up once ports are unbound.
def parse_switch_info(binding_profile):
# In the case of VPCs, the switch info will be the same between
# both entries, so we can just use the first.
lli = binding_profile['local_link_information'][0]
switch_info = lli['switch_info']
kv_dict = {}
for k_v in switch_info.split(',', 1):
k, v = k_v.split(':', 1)
kv_dict[k] = v
# Extract the nodes from the path, so we can construct the node
# path needed to look them up in AIM.
node_paths = []
node_re = re.compile(md.ACI_PORT_DESCR_FORMATS)
vpc_node_re = re.compile(md.ACI_VPCPORT_DESCR_FORMAT)
match = node_re.match(kv_dict['apic_dn'])
vpc_match = vpc_node_re.match(kv_dict['apic_dn'])
if match:
pod_id, switch = match.group(1, 2)
node_paths.append(md.ACI_CHASSIS_DESCR_STRING % (pod_id,
switch))
elif vpc_match:
pod_id, switch1, switch2 = vpc_match.group(1, 2, 3)
for switch in (switch1, switch2):
node_paths.append(md.ACI_CHASSIS_DESCR_STRING % (pod_id,
switch))
kv_dict['node_paths'] = node_paths
return kv_dict
def validate_static_path_and_doms(aim_ctx, is_svi, net, kv_dict,
physical_domain, vlan, delete=False):
static_path = {'path': kv_dict['apic_dn'],
'encap': 'vlan-%s' % vlan, 'mode': 'untagged'}
if is_svi:
ext_net = aim_resource.ExternalNetwork.from_dn(
net[DN]['ExternalNetwork'])
for node_path in kv_dict['node_paths']:
l3out_node = aim_resource.L3OutNode(
tenant_name=ext_net.tenant_name,
l3out_name=ext_net.l3out_name,
node_profile_name=md.L3OUT_NODE_PROFILE_NAME,
node_path=node_path)
l3out_node = self.aim_mgr.get(aim_ctx, l3out_node)
self.assertIsNotNone(l3out_node)
l3out_if = aim_resource.L3OutInterface(
tenant_name=ext_net.tenant_name,
l3out_name=ext_net.l3out_name,
node_profile_name=md.L3OUT_NODE_PROFILE_NAME,
interface_profile_name=md.L3OUT_IF_PROFILE_NAME,
interface_path=static_path['path'])
l3out_if = self.aim_mgr.get(aim_ctx, l3out_if)
if delete:
self.assertIsNone(l3out_if)
else:
self.assertEqual(l3out_if.encap, 'vlan-%s' % vlan)
self.assertEqual(l3out_if.secondary_addr_a_list,
[{'addr': '10.0.0.1/24'}])
else:
epg = self._net_2_epg(net)
epg = self.aim_mgr.get(aim_ctx, epg)
if delete:
doms = []
else:
doms = [physical_domain]
self.assertEqual(set(doms),
set(self._doms(epg.physical_domains,
with_type=False)))
self.assertEqual([static_path], epg.static_paths)
if net_type == 'vlan':
expected_binding_info = [('apic_aim', 'vlan')]
else:
expected_binding_info = None
physical_domain = 'phys1'
physical_network = 'physnet3'
aim_ctx = aim_context.AimContext(self.db_session)
hd_mapping = aim_infra.HostDomainMappingV2(host_name='*',
domain_name=physical_domain,
domain_type='PhysDom')
self.aim_mgr.create(aim_ctx, hd_mapping)
net_arg_list = ('provider:physical_network', 'provider:network_type')
net_kwargs = {'provider:physical_network': physical_network,
'provider:network_type': net_type}
if is_svi:
net_arg_list += (SVI,)
net_kwargs.update({SVI: 'True'})
net1 = self._make_network(
self.fmt, 'net1', True,
arg_list=net_arg_list, **net_kwargs)['network']
# Bind the port using a single interface or VPC on one physnet.
with self.subnet(network={'network': net1}):
p1 = self._make_baremetal_port(net1['tenant_id'],
net1['id'])['port']
kwargs = {portbindings.PROFILE: info[0]}
kv_dict_1 = parse_switch_info(info[0])
# bind p1 to host h1
p1 = self._bind_port_to_host(p1['id'], 'h1', **kwargs)['port']
vlan_p1 = self._check_binding(p1['id'],
expected_binding_info=expected_binding_info)
validate_static_path_and_doms(aim_ctx, is_svi, net1, kv_dict_1,
physical_domain, vlan_p1)
net2 = self._make_network(
self.fmt, 'net2', True,
arg_list=net_arg_list, **net_kwargs)['network']
# Bind the port using a single interface or VPC on the same physnet.
with self.subnet(network={'network': net2}):
p2 = self._make_baremetal_port(net2['tenant_id'],
net2['id'])['port']
kwargs = {portbindings.PROFILE: info[1]}
kv_dict_2 = parse_switch_info(info[1])
p2 = self._bind_port_to_host(p2['id'], 'h1', **kwargs)['port']
vlan_p2 = self._check_binding(p2['id'],
expected_binding_info=expected_binding_info)
self.assertNotEqual(vlan_p1, vlan_p2)
validate_static_path_and_doms(aim_ctx, is_svi, net2, kv_dict_2,
physical_domain, vlan_p2)
self._delete('ports', p2['id'])
self._check_no_dynamic_segment(net2['id'])
validate_static_path_and_doms(aim_ctx, is_svi, net1, kv_dict_1,
physical_domain, vlan_p1)
# REVISIT: It looks like dissociation PhysDoms when using
# the default mapping fails (likely not specific to baremetal VNICs)
#validate_static_path_and_doms(aim_ctx, is_svi, net2, kv_dict_2,
# physical_domain, vlan_p2, delete=True)
self._delete('ports', p1['id'])
self._check_no_dynamic_segment(net1['id'])
# REVISIT: It looks like dissociation PhysDoms when using
# the default mapping fails (likely not specific to baremetal VNICs)
#validate_static_path_and_doms(aim_ctx, is_svi, net1, kv_dict_1,
# physical_domain, vlan_p1, delete=True)
class TestOpflexRpc(ApicAimTestCase):
def setUp(self, *args, **kwargs):