Add support for yoga

Change-Id: I010a02cc9e4128c92f4bfed1b62844c57961df08
This commit is contained in:
pulkitvajpayee07 2022-05-17 23:27:57 +05:30 committed by pulkit vajpayee
parent e9f13c36c9
commit 549f0f3688
10 changed files with 100 additions and 52 deletions

View File

@ -1,7 +1,7 @@
- project:
name: x/group-based-policy
templates:
- openstack-python3-xena-jobs
- openstack-python3-yoga-jobs
- publish-to-pypi
# REVISIT: In the jobs below, the required-projects clause is needed on
# the master branch to select the correct version of the requirements
@ -15,22 +15,22 @@
nodeset: ubuntu-bionic
required-projects:
- name: openstack/requirements
override-checkout: stable/xena
override-checkout: stable/yoga
- openstack-tox-py36:
nodeset: ubuntu-bionic
required-projects:
- name: openstack/requirements
override-checkout: stable/xena
override-checkout: stable/yoga
- openstack-tox-py37:
nodeset: ubuntu-bionic
required-projects:
- name: openstack/requirements
override-checkout: stable/xena
override-checkout: stable/yoga
- openstack-tox-py38:
nodeset: ubuntu-bionic
required-projects:
- name: openstack/requirements
override-checkout: stable/xena
override-checkout: stable/yoga
- legacy-group-based-policy-dsvm-functional:
voting: false
- legacy-group-based-policy-dsvm-aim:
@ -43,19 +43,19 @@
nodeset: ubuntu-bionic
required-projects:
- name: openstack/requirements
override-checkout: stable/xena
override-checkout: stable/yoga
- openstack-tox-py36:
nodeset: ubuntu-bionic
required-projects:
- name: openstack/requirements
override-checkout: stable/xena
override-checkout: stable/yoga
- openstack-tox-py37:
nodeset: ubuntu-bionic
required-projects:
- name: openstack/requirements
override-checkout: stable/xena
override-checkout: stable/yoga
- openstack-tox-py38:
nodeset: ubuntu-bionic
required-projects:
- name: openstack/requirements
override-checkout: stable/xena
override-checkout: stable/yoga

View File

@ -43,11 +43,11 @@ if [[ $ENABLE_NFP = True ]]; then
# Make sure that your public interface is not attached to any bridge.
PUBLIC_INTERFACE=
enable_plugin neutron-fwaas http://opendev.org/openstack/neutron-fwaas.git stable/xena
enable_plugin neutron-lbaas https://opendev.org/openstack/neutron-lbaas.git stable/xena
enable_plugin neutron https://opendev.org/openstack/neutron.git stable/xena
enable_plugin neutron-vpnaas https://opendev.org/openstack/neutron-vpnaas.git stable/xena
enable_plugin octavia https://opendev.org/openstack/octavia.git stable/xena
enable_plugin neutron-fwaas http://opendev.org/openstack/neutron-fwaas.git stable/yoga
enable_plugin neutron-lbaas https://opendev.org/openstack/neutron-lbaas.git stable/yoga
enable_plugin neutron https://opendev.org/openstack/neutron.git stable/yoga
enable_plugin neutron-vpnaas https://opendev.org/openstack/neutron-vpnaas.git stable/yoga
enable_plugin octavia https://opendev.org/openstack/octavia.git stable/yoga
fi
fi

View File

@ -310,6 +310,11 @@ class Servicechain(extensions.ExtensionDescriptor):
return ServiceChainPluginBase
def update_attributes_map(self, attributes):
# REVISIT: temporary solution until the services
# are removed fully.
if 'service_profiles' in attributes:
attributes['service_profiles'].pop('parent')
attributes['service_profiles'].pop('parameters')
super(Servicechain, self).update_attributes_map(
attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP)

View File

@ -264,7 +264,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
self.enable_iptables_firewall = (cfg.CONF.ml2_apic_aim.
enable_iptables_firewall)
self.vif_details = {portbindings.VIF_DETAILS_CONNECTIVITY:
portbindings.CONNECTIVITY_L2}
self.connectivity}
self.vif_details.update(self._update_binding_sg())
self.l3_domain_dn = cfg.CONF.ml2_apic_aim.l3_domain_dn
self.apic_nova_vm_name_cache_update_interval = (cfg.CONF.ml2_apic_aim.
@ -288,6 +288,10 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
self.apic_router_id_subnet = netaddr.IPSet([self.apic_router_id_pool])
self.qos_driver = qos_driver.register(self)
@property
def connectivity(self):
return portbindings.CONNECTIVITY_L2
def start_rpc_listeners(self):
LOG.info("APIC AIM MD starting RPC listeners")
return self._start_rpc_listeners()
@ -3182,8 +3186,9 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
self._update_sg_rule_with_remote_group_set(context, port)
self._check_allowed_address_pairs(context, port)
self._insert_provisioning_block(context)
registry.notify(aim_cst.GBP_PORT, events.PRECOMMIT_UPDATE,
self, driver_context=context)
registry.publish(aim_cst.GBP_PORT, events.PRECOMMIT_UPDATE,
self,
payload=events.DBEventPayload(context))
# No need to handle router gateway port update, as we don't
# care about its fixed_ips.
@ -3566,10 +3571,13 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
aim_ctx = aim_context.AimContext(db_session=session)
hlinks = self.aim.find(aim_ctx, aim_infra.HostLink, host_name=host)
nets_segs = self._get_non_opflex_segments_on_host(context, host)
registry.notify(aim_cst.GBP_NETWORK_LINK,
events.PRECOMMIT_UPDATE, self, context=context,
networks_map=nets_segs, host_links=hlinks,
host=host)
registry.publish(aim_cst.GBP_NETWORK_LINK, events.PRECOMMIT_UPDATE,
self,
payload=events.DBEventPayload(
context,
metadata={
'networks_map': nets_segs,
'host_links': hlinks, 'host': host}))
for network, segment in nets_segs:
self._rebuild_host_path_for_network(
context, network, segment, host, hlinks)
@ -6402,22 +6410,32 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
vrf):
with db_api.CONTEXT_WRITER.using(context) as session:
self._add_network_mapping(session, network_id, bd, epg, vrf)
registry.notify(aim_cst.GBP_NETWORK_VRF, events.PRECOMMIT_UPDATE,
self, context=context, network_id=network_id)
registry.publish(aim_cst.GBP_NETWORK_VRF, events.PRECOMMIT_UPDATE,
self,
payload=events.DBEventPayload(
context,
metadata={
'network_id': network_id}))
def _set_network_epg_and_notify(self, context, mapping, epg):
with db_api.CONTEXT_WRITER.using(context):
self._set_network_epg(mapping, epg)
registry.notify(aim_cst.GBP_NETWORK_EPG, events.PRECOMMIT_UPDATE,
self, context=context,
network_id=mapping.network_id)
registry.publish(aim_cst.GBP_NETWORK_VRF, events.PRECOMMIT_UPDATE,
self,
payload=events.DBEventPayload(
context,
metadata={
'network_id': mapping.network_id}))
def _set_network_vrf_and_notify(self, context, mapping, vrf):
with db_api.CONTEXT_WRITER.using(context):
self._set_network_vrf(mapping, vrf)
registry.notify(aim_cst.GBP_NETWORK_VRF, events.PRECOMMIT_UPDATE,
self, context=context,
network_id=mapping.network_id)
registry.publish(aim_cst.GBP_NETWORK_VRF, events.PRECOMMIT_UPDATE,
self,
payload=events.DBEventPayload(
context,
metadata={
'network_id': mapping.network_id}))
def validate_aim_mapping(self, mgr):
mgr.register_aim_resource_class(aim_infra.HostDomainMappingV2)

View File

@ -66,17 +66,26 @@ class FlowclassifierAIMDriver(FlowclassifierAIMDriverBase):
def create_flow_classifier_precommit(self, context):
self._validate_flow_classifier(context)
registry.notify(constants.GBP_FLOW_CLASSIFIER, events.PRECOMMIT_CREATE,
self, driver_context=context)
registry.publish(constants.GBP_FLOW_CLASSIFIER,
events.PRECOMMIT_CREATE,
self,
payload=events.DBEventPayload(
context))
def update_flow_classifier_precommit(self, context):
self._validate_flow_classifier(context)
registry.notify(constants.GBP_FLOW_CLASSIFIER, events.PRECOMMIT_UPDATE,
self, driver_context=context)
registry.publish(constants.GBP_FLOW_CLASSIFIER,
events.PRECOMMIT_UPDATE,
self,
payload=events.DBEventPayload(
context))
def delete_flow_classifier_precommit(self, context):
registry.notify(constants.GBP_FLOW_CLASSIFIER, events.PRECOMMIT_DELETE,
self, driver_context=context)
registry.publish(constants.GBP_FLOW_CLASSIFIER,
events.PRECOMMIT_DELETE,
self,
payload=events.DBEventPayload(
context))
def _validate_flow_classifier(self, context):
fc = context.current

View File

@ -863,8 +863,8 @@ class SfcAIMDriver(SfcAIMDriverBase):
# layer (can't delete a flowclassifier if in use).
@registry.receives(constants.GBP_FLOW_CLASSIFIER,
[events.PRECOMMIT_CREATE, events.PRECOMMIT_UPDATE])
def _handle_flow_classifier(self, rtype, event, trigger, driver_context,
**kwargs):
def _handle_flow_classifier(self, rtype, event, trigger, payload):
driver_context = payload.context
if event == events.PRECOMMIT_UPDATE:
if self._should_regenerate_flowc(driver_context):
for chain in self._get_chains_by_classifier_id(
@ -876,8 +876,8 @@ class SfcAIMDriver(SfcAIMDriverBase):
self.update_port_chain_precommit(c_ctx, remap=True)
@registry.receives(constants.GBP_PORT, [events.PRECOMMIT_UPDATE])
def _handle_port_bound(self, rtype, event, trigger, driver_context,
**kwargs):
def _handle_port_bound(self, rtype, event, trigger, payload):
driver_context = payload.context
if event == events.PRECOMMIT_UPDATE:
context = driver_context
p_ctx = driver_context._plugin_context
@ -913,8 +913,9 @@ class SfcAIMDriver(SfcAIMDriverBase):
@registry.receives(constants.GBP_NETWORK_EPG, [events.PRECOMMIT_UPDATE])
@registry.receives(constants.GBP_NETWORK_VRF, [events.PRECOMMIT_UPDATE])
def _handle_net_gbp_change(self, rtype, event, trigger, context,
network_id, **kwargs):
def _handle_net_gbp_change(self, rtype, event, trigger, payload):
context = payload.context
network_id = payload.metadata['network_id']
chains = {}
ppg_ids = self._get_group_ids_by_network_ids(context, [network_id])
flowc_ids = self.aim_flowc._get_classifiers_by_network_id(
@ -933,8 +934,11 @@ class SfcAIMDriver(SfcAIMDriverBase):
self._validate_port_chain(context, chain, flowcs, ppgs)
@registry.receives(constants.GBP_NETWORK_LINK, [events.PRECOMMIT_UPDATE])
def _handle_net_link_change(self, rtype, event, trigger, context,
networks_map, host_links, host, **kwargs):
def _handle_net_link_change(self, rtype, event, trigger, payload):
context = payload.context
networks_map = payload.metadata['networks_map']
host_links = payload.metadata['host_links']
host = payload.metadata['host']
aim_ctx = aim_context.AimContext(db_session=context.session)
cdis = self.aim.find(aim_ctx, aim_sg.ConcreteDeviceInterface,
host=host)

View File

@ -1975,12 +1975,14 @@ class TestAimMapping(ApicAimTestCase):
port = self._make_port(self.fmt, net_id, fixed_ips=fixed_ips)['port']
port = self._bind_port_to_host(port['id'], 'host1')['port']
port['dns_name'] = ''
port = self._show('ports', port['id'])['port']
port_calls = [mock.call(mock.ANY, port)]
fixed_ips = [{'subnet_id': subnet1_id, 'ip_address': '10.0.1.101'}]
port = self._make_port(self.fmt, net_id, fixed_ips=fixed_ips)['port']
port = self._bind_port_to_host(port['id'], 'host2')['port']
port['dns_name'] = ''
port = self._show('ports', port['id'])['port']
port_calls.append(mock.call(mock.ANY, port))
# The update to host_routes should trigger the port updates
@ -1995,6 +1997,7 @@ class TestAimMapping(ApicAimTestCase):
data = {'subnet': {'dns_nameservers': ['9.8.7.6']}}
subnet = self._update('subnets', subnet1_id, data)['subnet']
self._check_subnet(subnet, net, [], [gw1_ip])
port = self._show('ports', port['id'])['port']
mock_notif.assert_has_calls(port_calls, any_order=True)
# Create subnet2.
@ -2011,6 +2014,7 @@ class TestAimMapping(ApicAimTestCase):
fixed_ips=fixed_ips)['port']
port = self._bind_port_to_host(port['id'], 'host1')['port']
port['dns_name'] = ''
port = self._show('ports', port['id'])['port']
port_calls.append(mock.call(mock.ANY, port))
# Add subnet1 to router by subnet.
@ -2262,6 +2266,7 @@ class TestAimMapping(ApicAimTestCase):
port = self._bind_port_to_host(port['id'], 'host1')['port']
port['dns_name'] = ""
port['project_id'] = port['tenant_id']
port = self._show('ports', port['id'])['port']
port_calls = [mock.call(mock.ANY, port)]
# Create subnet2.
@ -2279,6 +2284,7 @@ class TestAimMapping(ApicAimTestCase):
port = self._bind_port_to_host(port['id'], 'host1')['port']
port['dns_name'] = ''
port['project_id'] = port['tenant_id']
port = self._show('ports', port['id'])['port']
port_calls.append(mock.call(mock.ANY, port))
# Add subnet1 to router by subnet.
@ -3456,6 +3462,7 @@ class TestAimMapping(ApicAimTestCase):
# Add subnet 1 to router A, which should create tenant 1's
# default VRF.
add_interface(rA, net1, sn1, gw1A, t1)
p1 = self._show('ports', p1['id'])['port']
check_port_notify([p1])
check_net(net1, sn1, [rA], [(gw1A, rA)], [], t1)
check_net(net2, sn2, [], [], [gw2A, gw2B], t1)
@ -3467,6 +3474,7 @@ class TestAimMapping(ApicAimTestCase):
# Add subnet 2 to router A.
add_interface(rA, net2, sn2, gw2A, t1)
p2 = self._show('ports', p2['id'])['port']
check_port_notify([p2])
check_net(net1, sn1, [rA], [(gw1A, rA)], [], t1)
check_net(net2, sn2, [rA], [(gw2A, rA)], [gw2B], t1)
@ -3489,6 +3497,7 @@ class TestAimMapping(ApicAimTestCase):
# Add subnet 3 to router B.
add_interface(rB, net3, sn3, gw3B, t1)
p3 = self._show('ports', p3['id'])['port']
check_port_notify([p3])
check_net(net1, sn1, [rA], [(gw1A, rA)], [], t1)
check_net(net2, sn2, [rA, rB], [(gw2A, rA), (gw2B, rB)], [], t1)
@ -6174,7 +6183,6 @@ class TestPortBinding(ApicAimTestCase):
subport = self._show('ports', subport_net1_port['id'])['port']
self.assertEqual(kwargs['binding:profile'],
subport['binding:profile'])
self.assertEqual({}, subport['binding:vif_details'])
self.assertEqual('other', subport['binding:vif_type'])
self.assertEqual('host1', subport['binding:host_id'])
# Verify the other port (not yet a subport) isn't bound.
@ -6194,7 +6202,6 @@ class TestPortBinding(ApicAimTestCase):
subport = self._show('ports', subport_net2_port['id'])['port']
self.assertEqual(kwargs['binding:profile'],
subport['binding:profile'])
self.assertEqual({}, subport['binding:vif_details'])
self.assertEqual('other', subport['binding:vif_type'])
self.assertEqual('host1', subport['binding:host_id'])
epg = self._net_2_epg(sb_net2['network'])
@ -6379,7 +6386,7 @@ class TestPortBinding(ApicAimTestCase):
# allocated for that physical_network. Even though the
# segmentation_ids overlap, this succeeds because they are
# from different physical_networks.
subports = [{'port_id': other_net1_port['id'],
subports = [{'port_id': subport_net2_port['id'],
'segmentation_id': 137,
'segmentation_type': 'vlan'}]
self._update_trunk(net1['network']['tenant_id'],
@ -6421,6 +6428,7 @@ class TestPortBinding(ApicAimTestCase):
net = self._make_network(self.fmt, 'net1', True)['network']
resp = self._create_port(self.fmt, net['id'])
port = self.deserialize(self.fmt, resp)
port = self._show('ports', port['port']['id'])
port = self._bind_port_to_host(port['port']['id'], 'host1')['port']
self.assertEqual(port['binding:vif_type'], 'ovs')
@ -8544,6 +8552,7 @@ class TestExternalConnectivityBase(object):
with self.port(subnet=sub) as port:
port = self._bind_port_to_host(port['port']['id'], 'host1')
port['port']['dns_name'] = ''
port = self._show('ports', port['port']['id'])
p.append(port['port'])
mock_notif = mock.Mock(side_effect=self.port_notif_verifier())
@ -8623,6 +8632,7 @@ class TestExternalConnectivityBase(object):
with self.port(subnet={'subnet': sub}) as p:
p = self._bind_port_to_host(p['port']['id'], 'host1')['port']
p['dns_name'] = ''
p = self._show('ports', p['id'])['port']
port_calls.append(mock.call(mock.ANY, p))
router = self._make_router(
@ -8669,6 +8679,7 @@ class TestExternalConnectivityBase(object):
p = self._bind_port_to_host(p['port']['id'], 'host1')['port']
p['dns_name'] = ''
subnets.append(sub['subnet'])
p = self._show('ports', p['id'])['port']
port_calls.append(mock.call(mock.ANY, p))
# add router - expect notifications
@ -9366,6 +9377,7 @@ class TestPortVlanNetwork(ApicAimTestCase):
'host': 'h1'}],
epg.static_paths)
self._validate()
p1 = self._show('ports', p1['port']['id'])
# The update to host_routes should trigger the port updates
port_calls = [mock.call(mock.ANY, p1['port'])]

View File

@ -89,7 +89,7 @@ class TestCiscoApicAimL3Plugin(test_aim_mapping_driver.AIMBaseTestCase):
'name': ROUTER}}
router = self.plugin.create_router(self.context, attr)
with mock.patch('neutron_lib.callbacks.registry.notify'):
with mock.patch('neutron_lib.callbacks.registry.publish'):
info = self.plugin.add_router_interface(self.context,
router['id'],
interface_info)

View File

@ -4,19 +4,19 @@
hacking>=1.1.0,<1.2.0 # Apache-2.0
# Since version numbers for these are specified in
# https://releases.openstack.org/constraints/upper/xena, they cannot be
# https://releases.openstack.org/constraints/upper/yoga, they cannot be
# referenced as GIT URLs.
neutron
python-heatclient
python-keystoneclient
-e git+https://opendev.org/openstack/networking-sfc.git@stable/xena#egg=networking-sfc
-e git+https://opendev.org/openstack/networking-sfc.git@stable/yoga#egg=networking-sfc
-e git+https://github.com/noironetworks/apicapi.git@master#egg=apicapi
-e git+https://github.com/noironetworks/python-opflex-agent.git@stable/xena#egg=neutron-opflex-agent
-e git+https://github.com/noironetworks/python-opflex-agent.git@stable/yoga#egg=neutron-opflex-agent
-e git+https://opendev.org/x/python-group-based-policy-client.git@stable/xena#egg=python-group-based-policy-client
-e git+https://opendev.org/x/python-group-based-policy-client.git@stable/yoga#egg=python-group-based-policy-client
coverage!=4.4,>=4.0 # Apache-2.0
flake8-import-order==0.12 # LGPLv3

View File

@ -16,7 +16,7 @@ usedevelop = True
install_command =
pip install {opts} {packages}
deps =
-c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/xena}
-c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/yoga}
-r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
whitelist_externals = sh