diff --git a/doc/source/admin/config-qos-min-bw.rst b/doc/source/admin/config-qos-min-bw.rst index fe0507a75a7..be3682f59cf 100644 --- a/doc/source/admin/config-qos-min-bw.rst +++ b/doc/source/admin/config-qos-min-bw.rst @@ -40,9 +40,6 @@ Limitations technical reasons (in this case the port is created too late for Neutron to affect scheduling). -* Bandwidth guarantees for ports can only be requested on networks - backed by a physical network (physnet). - * In Stein there is no support for networks with multiple physnets. However some simpler multi-segment networks are still supported: @@ -185,6 +182,13 @@ supported: by a ``direct-physical`` port. +Since 2023.1 (Antelope), Open vSwitch and OVN mechanism drivers can specify +the available bandwidth for tunnelled networks (SR-IOV does not support these +network types yet). The key "rp_tunnelled" is used to model those networks +that are not backed by a physical network. This bandwidth models the limits +of the VTEP/TEP interface used to send the tunnelled traffic (VXLAN, Geneve). + + neutron-server config ~~~~~~~~~~~~~~~~~~~~~ @@ -260,9 +264,20 @@ Valid values are all the [ovs] bridge_mappings = physnet0:br-physnet0,... - resource_provider_bandwidths = br-physnet0:10000000:10000000,... + resource_provider_bandwidths = br-physnet0:10000000:10000000,rp_tunnelled:20000000:20000000,... #resource_provider_inventory_defaults = step_size:1000,... + +.. note:: + + "rp_tunnelled" is not a bridge nor an interface present in the host. + The ML2/OVS agent will read the host local "resource_provider_bandwidths" + and will assign, by default, the "rp_tunnelled" resource provider to + the local host where is running. In other words, it is not needed to + populate "resource_provider_hypervisors" with the host assigned to this + specific resource provider. + + neutron-sriov-agent config ~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -296,9 +311,9 @@ SR-IOV and OVS agents. This is how the values are registered: $ root@dev20:~# ovs-vsctl list Open_vSwitch ... external_ids : {hostname=dev20.fistro.com, \ - ovn-cms-options="resource_provider_bandwidths=br-ex:1001:2000;br-ex2:3000:4000, \ + ovn-cms-options="resource_provider_bandwidths=br-ex:1001:2000;br-ex2:3000:4000;rp_tunnelled:5000:6000, \ resource_provider_inventory_defaults=allocation_ratio:1.0;min_unit:10, \ - resource_provider_hypervisors=br-ex:dev20.fistro.com;br-ex2:dev20.fistro.com", \ + resource_provider_hypervisors=br-ex:dev20.fistro.com;br-ex2:dev20.fistro.com;rp_tunnelled:dev20.fistro.com", \ rundir="/var/run/openvswitch", \ system-id="029e7d3d-d2ab-4f2c-bc92-ec58c94a8fc1"} ... @@ -354,7 +369,9 @@ queue periodically. $ openstack network agent show -f value -c configuration 5e57b85f-b017-419a-8745-9c406e149f9e {'bridge_mappings': {'physnet0': 'br-physnet0'}, 'resource_provider_bandwidths': {'br-physnet0': {'egress': 10000000, - 'ingress': 10000000}}, + 'ingress': 10000000} + 'rp_tunnelled': {'egress': 20000000, + 'ingress': 20000000}}, 'resource_provider_inventory_defaults': {'allocation_ratio': 1.0, 'min_unit': 1, 'reserved': 0, @@ -578,6 +595,7 @@ Please find an example in section `Propagation of resource information`_. | 1c7e83f0-108d-5c35-ada7-7ebebbe43aad | devstack0:NIC Switch agent:ens5 | 2 | 3b36d91e-bf60-460f-b1f8-3322dee5cdfd | 4a8a819d-61f9-5822-8c5c-3e9c7cb942d6 | | 89ca1421-5117-5348-acab-6d0e2054239c | devstack0:Open vSwitch agent | 0 | 3b36d91e-bf60-460f-b1f8-3322dee5cdfd | 3b36d91e-bf60-460f-b1f8-3322dee5cdfd | | f9c9ce07-679d-5d72-ac5f-31720811629a | devstack0:Open vSwitch agent:br-physnet0 | 2 | 3b36d91e-bf60-460f-b1f8-3322dee5cdfd | 89ca1421-5117-5348-acab-6d0e2054239c | + | 521f53a6-c8c0-583c-98da-7a47f39ff887 | devstack0:Open vSwitch agent:rp_tunnelled| 2 | 3b36d91e-bf60-460f-b1f8-3322dee5cdfd | 89ca1421-5117-5348-acab-6d0e2054239c | +--------------------------------------+------------------------------------------+------------+--------------------------------------+--------------------------------------+ * Does Placement have the expected traits? @@ -587,6 +605,7 @@ Please find an example in section `Propagation of resource information`_. # as admin $ openstack --os-placement-api-version 1.17 trait list | awk '/CUSTOM_/ { print $2 }' | sort CUSTOM_PHYSNET_PHYSNET0 + CUSTOM_TUNNELLED_NETWORKS CUSTOM_VNIC_TYPE_DIRECT CUSTOM_VNIC_TYPE_DIRECT_PHYSICAL CUSTOM_VNIC_TYPE_MACVTAP diff --git a/neutron/agent/common/placement_report.py b/neutron/agent/common/placement_report.py index 30d4b812af8..103a23b949b 100644 --- a/neutron/agent/common/placement_report.py +++ b/neutron/agent/common/placement_report.py @@ -15,8 +15,12 @@ from neutron_lib import constants as nlib_const from neutron_lib.placement import utils as place_utils import os_resource_classes as orc +from oslo_config import cfg from oslo_log import log as logging +from neutron.common import _constants as n_const + + LOG = logging.getLogger(__name__) @@ -99,6 +103,7 @@ class PlacementState(object): self._device_mappings = device_mappings self._supported_vnic_types = supported_vnic_types self._client = client + self._rp_tun_name = cfg.CONF.ml2.tunnelled_network_rp_name def _deferred_update_physnet_traits(self): traits = [] @@ -111,6 +116,10 @@ class PlacementState(object): name=place_utils.physnet_trait(physnet))) return traits + def _deferred_update_tunnelled_traits(self): + return [DeferredCall(self._client.update_trait, + name=n_const.TRAIT_NETWORK_TUNNEL)] + def _deferred_update_vnic_type_traits(self): traits = [] for vnic_type in self._supported_vnic_types: @@ -123,6 +132,7 @@ class PlacementState(object): def deferred_update_traits(self): traits = [] traits += self._deferred_update_physnet_traits() + traits += self._deferred_update_tunnelled_traits() traits += self._deferred_update_vnic_type_traits() return traits @@ -196,7 +206,8 @@ class PlacementState(object): def deferred_update_resource_provider_traits(self): rp_traits = [] - + tunnelled_trait_mappings = { + self._rp_tun_name: n_const.TRAIT_NETWORK_TUNNEL} physnet_trait_mappings = {} for physnet, devices in self._device_mappings.items(): for device in devices: @@ -210,8 +221,8 @@ class PlacementState(object): self._driver_uuid_namespace, self._hypervisor_rps[device]['name'], device) - traits = [] - traits.append(physnet_trait_mappings[device]) + traits = [physnet_trait_mappings.get(device) or + tunnelled_trait_mappings[device]] traits.extend(vnic_type_traits) rp_traits.append( DeferredCall( diff --git a/neutron/agent/common/utils.py b/neutron/agent/common/utils.py index f2cc9fb32f4..d2181d245e4 100644 --- a/neutron/agent/common/utils.py +++ b/neutron/agent/common/utils.py @@ -135,7 +135,8 @@ def get_hypervisor_hostname(): # TODO(bence romsics): rehome this to neutron_lib.placement.utils def default_rp_hypervisors(hypervisors, device_mappings, - default_hypervisor=None): + default_hypervisor=None, + tunnelled_network_rp_name=None): """Fill config option 'resource_provider_hypervisors' with defaults. :param hypervisors: Config option 'resource_provider_hypervisors' @@ -145,14 +146,13 @@ def default_rp_hypervisors(hypervisors, device_mappings, format. :param default_hypervisor: Default hypervisor hostname. If not set, it tries to default to fully qualified domain name (fqdn) + :param tunnelled_network_rp_name: the resource provider name for tunnelled + networks; if present, it will be added to the devices list. """ _default_hypervisor = default_hypervisor or get_hypervisor_hostname() - - rv = {} - for _physnet, devices in device_mappings.items(): - for device in devices: - if device in hypervisors: - rv[device] = hypervisors[device] - else: - rv[device] = _default_hypervisor - return rv + # device_mappings = {'physnet1': ['br-phy1'], 'physnet2': ['br-phy2'], ...} + devices = {dev for devs in device_mappings.values() for dev in devs} + if tunnelled_network_rp_name: + devices.add(tunnelled_network_rp_name) + return {device: hypervisors.get(device) or _default_hypervisor + for device in devices} diff --git a/neutron/common/_constants.py b/neutron/common/_constants.py index 5b8c1a46be9..97aea68a12f 100644 --- a/neutron/common/_constants.py +++ b/neutron/common/_constants.py @@ -78,3 +78,8 @@ IDPOOL_SELECT_SIZE = 100 AUTO_DELETE_PORT_OWNERS = [constants.DEVICE_OWNER_DHCP, constants.DEVICE_OWNER_DISTRIBUTED, constants.DEVICE_OWNER_AGENT_GW] + +# TODO(ralonsoh): move this constant to neutron_lib.placement.constants +# Tunnelled networks resource provider default name. +RP_TUNNELLED = 'rp_tunnelled' +TRAIT_NETWORK_TUNNEL = 'CUSTOM_NETWORK_TUNNEL_PROVIDER' diff --git a/neutron/common/utils.py b/neutron/common/utils.py index 9bd6809fb96..dfb9fd922a0 100644 --- a/neutron/common/utils.py +++ b/neutron/common/utils.py @@ -883,7 +883,8 @@ def port_ip_changed(new_port, original_port): return False -def validate_rp_bandwidth(rp_bandwidths, device_names): +def validate_rp_bandwidth(rp_bandwidths, device_names, + tunnelled_network_rp_name=None): """Validate resource provider bandwidths against device names. :param rp_bandwidths: Dict containing resource provider bandwidths, @@ -892,10 +893,14 @@ def validate_rp_bandwidth(rp_bandwidths, device_names): :param device_names: A set of the device names given in bridge_mappings in case of ovs-agent or in physical_device_mappings in case of sriov-agent + :param tunnelled_network_rp_name: the resource provider name for tunnelled + networks; if present, it will be added + to the devices list. :raises ValueError: In case of the devices (keys) in the rp_bandwidths dict are not in the device_names set. """ - + if tunnelled_network_rp_name: + device_names.add(tunnelled_network_rp_name) for dev_name in rp_bandwidths: if dev_name not in device_names: raise ValueError(_( diff --git a/neutron/conf/plugins/ml2/config.py b/neutron/conf/plugins/ml2/config.py index b82083acf29..78a4feb58d2 100644 --- a/neutron/conf/plugins/ml2/config.py +++ b/neutron/conf/plugins/ml2/config.py @@ -16,6 +16,8 @@ from oslo_config import cfg from neutron._i18n import _ +from neutron.common import _constants as common_const + ml2_opts = [ cfg.ListOpt('type_drivers', @@ -65,7 +67,16 @@ ml2_opts = [ cfg.IntOpt('overlay_ip_version', default=4, help=_("IP version of all overlay (tunnel) network endpoints. " - "Use a value of 4 for IPv4 or 6 for IPv6.")) + "Use a value of 4 for IPv4 or 6 for IPv6.")), + cfg.StrOpt('tunnelled_network_rp_name', + default=common_const.RP_TUNNELLED, + help=_("Resource provider name for the host with tunnelled " + "networks. This resource provider represents the " + "available bandwidth for all tunnelled networks in a " + "compute node. NOTE: this parameter is used both by the " + "Neutron server and the mechanism driver agents; it is " + "recommended not to change it once any resource " + "provider register has been created.")), ] diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/extension_drivers/qos_driver.py b/neutron/plugins/ml2/drivers/openvswitch/agent/extension_drivers/qos_driver.py index a8155d70ef0..b3645293cdb 100644 --- a/neutron/plugins/ml2/drivers/openvswitch/agent/extension_drivers/qos_driver.py +++ b/neutron/plugins/ml2/drivers/openvswitch/agent/extension_drivers/qos_driver.py @@ -432,6 +432,11 @@ class QosOVSAgentDriver(qos.QosLinuxAgentDriver, 'vif_port was not found. It seems that port is already ' 'deleted', port.get('port_id')) return + elif not port.get('physical_network'): + LOG.debug('update_minimum_bandwidth was received for port %s but ' + 'has no physical network associated', + port.get('port_id')) + return self.ports[port['port_id']][(qos_consts.RULE_TYPE_MINIMUM_BANDWIDTH, rule.direction)] = port diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py b/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py index 65ea3218337..4c14454cd83 100644 --- a/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py +++ b/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py @@ -62,6 +62,7 @@ from neutron.api.rpc.handlers import securitygroups_rpc as sg_rpc from neutron.common import config from neutron.common import utils as n_utils from neutron.conf.agent import common as agent_config +from neutron.conf.plugins.ml2 import config as ml2_config from neutron.conf import service as service_conf from neutron.plugins.ml2.drivers.agent import capabilities from neutron.plugins.ml2.drivers.l2pop.rpc_manager import l2population_rpc @@ -233,8 +234,9 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, self._validate_rp_pkt_processing_cfg() br_set = set(self.bridge_mappings.values()) - n_utils.validate_rp_bandwidth(self.rp_bandwidths, - br_set) + n_utils.validate_rp_bandwidth( + self.rp_bandwidths, br_set, + tunnelled_network_rp_name=self.conf.ml2.tunnelled_network_rp_name) self.rp_inventory_defaults = place_utils.parse_rp_inventory_defaults( ovs_conf.resource_provider_inventory_defaults) # At the moment the format of @@ -250,7 +252,8 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, self.rp_hypervisors = utils.default_rp_hypervisors( ovs_conf.resource_provider_hypervisors, {k: [v] for k, v in self.bridge_mappings.items()}, - ovs_conf.resource_provider_default_hypervisor + default_hypervisor=ovs_conf.resource_provider_default_hypervisor, + tunnelled_network_rp_name=self.conf.ml2.tunnelled_network_rp_name, ) self.phys_brs = {} @@ -2930,6 +2933,7 @@ def main(bridge_classes): l2_agent_extensions_manager.register_opts(cfg.CONF) agent_config.setup_privsep() service_conf.register_service_opts(service_conf.RPC_EXTRA_OPTS, cfg.CONF) + ml2_config.register_ml2_plugin_opts(cfg=cfg.CONF) ext_mgr = l2_agent_extensions_manager.L2AgentExtensionsManager(cfg.CONF) diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/extensions/placement.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/extensions/placement.py index ddf48b4808b..4b835a45de5 100644 --- a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/extensions/placement.py +++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/extensions/placement.py @@ -21,6 +21,7 @@ from neutron_lib.placement import utils as placement_utils from neutron_lib.plugins import constants as plugins_constants from neutron_lib.plugins import directory from neutron_lib.utils import helpers +from oslo_config import cfg from oslo_log import log as logging from ovsdbapp.backend.ovs_idl import event as row_event @@ -174,6 +175,7 @@ class OVNClientPlacementExtension(object): self._plugin = None self.uuid_ns = ovn_const.OVN_RP_UUID self.supported_vnic_types = ovn_const.OVN_SUPPORTED_VNIC_TYPES + self._rp_tun_name = cfg.CONF.ml2.tunnelled_network_rp_name @property def placement_plugin(self): @@ -247,6 +249,13 @@ class OVNClientPlacementExtension(object): LOG.debug('Building placement options for chassis %s: %s', chassis.name, cms_options) hypervisor_rps = {} + + # ML2/OVN can also track tunnelled networks bandwidth. The key + # RP_TUNNELLED must be defined in "resource_provider_bandwidths" and + # "resource_provider_hypervisors". E.g.: + # ovn-cms-options = + # resource_provider_bandwidths=br-ex:100:200;rp_tunnelled:300:400 + # resource_provider_hypervisors=br-ex:host1,rp_tunnelled:host1 for device, hyperv in cms_options[ovn_const.RP_HYPERVISORS].items(): try: hypervisor_rps[device] = {'name': hyperv, @@ -254,7 +263,12 @@ class OVNClientPlacementExtension(object): except (KeyError, AttributeError): continue - bridges = set(itertools.chain(*bridge_mappings.values())) + rp_devices = set(itertools.chain(*bridge_mappings.values())) + # If "ml2.tunnelled_network_rp_name" is present in configured resource + # providers, that means this ML2/OVN host will track the tunnelled + # networks available bandwidth. + if self._rp_tun_name in hypervisor_rps: + rp_devices.add(self._rp_tun_name) # Remove "cms_options[RP_BANDWIDTHS]" not present in "hypervisor_rps" # and "bridge_mappings". If we don't have a way to match the RP bridge # with a host ("hypervisor_rps") or a way to match the RP bridge with @@ -262,8 +276,8 @@ class OVNClientPlacementExtension(object): rp_bw = cms_options[n_const.RP_BANDWIDTHS] if rp_bw: cms_options[n_const.RP_BANDWIDTHS] = { - device: bw for device, bw in rp_bw.items() if - device in hypervisor_rps and device in bridges} + rp_device: bw for rp_device, bw in rp_bw.items() if + rp_device in hypervisor_rps and rp_device in rp_devices} # NOTE(ralonsoh): OVN only reports min BW RPs; packet processing RPs # will be added in a future implementation. If no RP_BANDWIDTHS values diff --git a/neutron/services/qos/drivers/openvswitch/driver.py b/neutron/services/qos/drivers/openvswitch/driver.py index 18d2db835c4..b3214b9fdaa 100644 --- a/neutron/services/qos/drivers/openvswitch/driver.py +++ b/neutron/services/qos/drivers/openvswitch/driver.py @@ -20,8 +20,6 @@ from neutron_lib.services.qos import base from neutron_lib.services.qos import constants as qos_consts from oslo_log import log as logging -from neutron.objects import network as network_object - LOG = logging.getLogger(__name__) @@ -76,17 +74,6 @@ class OVSDriver(base.DriverBase): def validate_rule_for_port(self, context, rule, port): return self.validate_rule_for_network(context, rule, port.network_id) - def validate_rule_for_network(self, context, rule, network_id): - # Minimum-bandwidth rule is only supported on networks whose - # first segment is backed by a physnet. - if rule.rule_type == qos_consts.RULE_TYPE_MINIMUM_BANDWIDTH: - net = network_object.Network.get_object( - context, id=network_id) - physnet = net.segments[0].physical_network - if physnet is None: - return False - return True - def register(): """Register the driver.""" diff --git a/neutron/services/qos/qos_plugin.py b/neutron/services/qos/qos_plugin.py index 28331ee4f07..776dae6da23 100644 --- a/neutron/services/qos/qos_plugin.py +++ b/neutron/services/qos/qos_plugin.py @@ -49,6 +49,7 @@ from oslo_config import cfg from oslo_log import log as logging from neutron._i18n import _ +from neutron.common import _constants as n_const from neutron.db import db_base_plugin_common from neutron.exceptions import qos as neutron_qos_exc from neutron.extensions import qos @@ -254,17 +255,22 @@ class QoSPlugin(qos.QoSPluginBase): # support will be available. See Placement spec: # https://review.opendev.org/565730 first_segment = segments[0] - if not first_segment or not first_segment.physical_network: + if not first_segment: return [] - physnet_trait = pl_utils.physnet_trait( - first_segment.physical_network) + elif not first_segment.physical_network: + # If there is no physical network this is because this is an + # overlay network (tunnelled network). + net_trait = n_const.TRAIT_NETWORK_TUNNEL + else: + net_trait = pl_utils.physnet_trait(first_segment.physical_network) + # NOTE(ralonsoh): we should not rely on the current execution order of # the port extending functions. Although here we have # port_res[VNIC_TYPE], we should retrieve this value from the port DB # object instead. vnic_trait = pl_utils.vnic_type_trait(vnic_type) - return [physnet_trait, vnic_trait] + return [net_trait, vnic_trait] @staticmethod @resource_extend.extends([port_def.COLLECTION_NAME_BULK]) diff --git a/neutron/tests/fullstack/test_qos.py b/neutron/tests/fullstack/test_qos.py index 0a09f605a46..52fc0fb4e30 100644 --- a/neutron/tests/fullstack/test_qos.py +++ b/neutron/tests/fullstack/test_qos.py @@ -825,30 +825,42 @@ class TestMinBwQoSOvs(_TestMinBwQoS, base.BaseFullStackTestCase): qoses, queues = self._qos_info(vm.bridge) self.fail(queuenum + qoses + queues) - def test_min_bw_qos_create_network_vxlan_not_supported(self): + def test_min_bw_qos_create_network_vxlan_supported(self): qos_policy = self._create_qos_policy() qos_policy_id = qos_policy['id'] self.safe_client.create_minimum_bandwidth_rule( self.tenant_id, qos_policy_id, MIN_BANDWIDTH, self.direction) network_args = {'network_type': 'vxlan', 'qos_policy_id': qos_policy_id} - self.assertRaises( - exceptions.Conflict, - self.safe_client.create_network, + net = self.safe_client.create_network( self.tenant_id, name='network-test', **network_args) + self.assertEqual(qos_policy_id, net['qos_policy_id']) - def test_min_bw_qos_update_network_vxlan_not_supported(self): - network_args = {'network_type': 'vxlan'} - network = self.safe_client.create_network( - self.tenant_id, name='network-test', **network_args) + def test_min_bw_qos_create_and_update_network_vxlan_supported(self): qos_policy = self._create_qos_policy() qos_policy_id = qos_policy['id'] self.safe_client.create_minimum_bandwidth_rule( self.tenant_id, qos_policy_id, MIN_BANDWIDTH, self.direction) - self.assertRaises( - exceptions.Conflict, - self.client.update_network, network['id'], - body={'network': {'qos_policy_id': qos_policy_id}}) + network_args = {'network_type': 'vxlan', + 'qos_policy_id': qos_policy_id} + network = self.safe_client.create_network( + self.tenant_id, name='network-test', **network_args) + self.assertEqual(qos_policy_id, network['qos_policy_id']) + + qos_policy2 = self._create_qos_policy() + qos_policy2_id = qos_policy2['id'] + self.client.update_network( + network['id'], body={'network': {'qos_policy_id': qos_policy2_id}}) + _net = self.client.show_network(network['id']) + self.assertEqual(qos_policy2_id, _net['network']['qos_policy_id']) + + # This action will remove the QoS policy from the network. This is also + # necessary before the cleanUp call, that will delete the QoS policy + # before the network. + self.client.update_network( + network['id'], body={'network': {'qos_policy_id': None}}) + _net = self.client.show_network(network['id']) + self.assertIsNone(_net['network']['qos_policy_id']) def test_min_bw_qos_port_removed(self): """Test if min BW limit config is properly removed when port removed. diff --git a/neutron/tests/functional/agent/l2/base.py b/neutron/tests/functional/agent/l2/base.py index a17c9bb167e..d62ce22e972 100644 --- a/neutron/tests/functional/agent/l2/base.py +++ b/neutron/tests/functional/agent/l2/base.py @@ -33,6 +33,7 @@ from neutron.common import utils from neutron.conf.agent import common as agent_config from neutron.conf.agent import ovs_conf as ovs_agent_config from neutron.conf import common as common_config +from neutron.conf.plugins.ml2 import config as ml2_config from neutron.conf.plugins.ml2.drivers import agent from neutron.conf.plugins.ml2.drivers import ovs_conf from neutron.plugins.ml2.drivers.openvswitch.agent.extension_drivers \ @@ -140,6 +141,7 @@ class OVSAgentTestFramework(base.BaseOVSLinuxTestCase, OVSOFControllerHelper): agent_config.register_agent_state_opts_helper(config) ovs_agent_config.register_ovs_agent_opts(config) ext_manager.register_opts(config) + ml2_config.register_ml2_plugin_opts(cfg=config) return config def _configure_agent(self): diff --git a/neutron/tests/unit/agent/common/test_placement_report.py b/neutron/tests/unit/agent/common/test_placement_report.py index 858a68759dc..26c41de20b5 100644 --- a/neutron/tests/unit/agent/common/test_placement_report.py +++ b/neutron/tests/unit/agent/common/test_placement_report.py @@ -16,6 +16,8 @@ from unittest import mock import uuid from neutron.agent.common import placement_report +from neutron.common import _constants as n_const +from neutron.conf.plugins.ml2 import config as ml2_config from neutron.tests import base @@ -43,6 +45,7 @@ class DeferredCallTestCase(base.BaseTestCase): class PlacementStateTestCase(base.BaseTestCase): def setUp(self): + ml2_config.register_ml2_plugin_opts() super(PlacementStateTestCase, self).setUp() self.client_mock = mock.Mock() self.driver_uuid_namespace = uuid.UUID( @@ -61,6 +64,10 @@ class PlacementStateTestCase(base.BaseTestCase): 'hypervisor_rps': { 'eth0': {'name': 'fakehost', 'uuid': self.hypervisor1_rp_uuid}, 'eth1': {'name': 'fakehost', 'uuid': self.hypervisor1_rp_uuid}, + # NOTE(ralonsoh): use the 'rp_tunnelled' n-lib constant once + # merged. + 'rp_tunnelled': {'name': 'fakehost', + 'uuid': self.hypervisor1_rp_uuid}, }, 'device_mappings': {}, 'supported_vnic_types': [], @@ -195,6 +202,7 @@ class PlacementStateTestCase(base.BaseTestCase): }, 'rp_bandwidths': { 'eth0': {'egress': 1, 'ingress': 1}, + 'rp_tunnelled': {'egress': 2, 'ingress': 3}, }, 'supported_vnic_types': ['normal'], }) @@ -211,6 +219,13 @@ class PlacementStateTestCase(base.BaseTestCase): '1ea6f823-bcf2-5dc5-9bee-4ee6177a6451'), traits=mock.ANY), + # uuid -v5 '00000000-0000-0000-0000-000000000001' \ + # 'fakehost:rp_tunnelled' + mock.call( + resource_provider_uuid=uuid.UUID( + '357001cb-88b4-5e1d-ae6e-85b238a7a83e'), + traits=mock.ANY), + # uuid -v5 '00000000-0000-0000-0000-000000000001' 'fakehost' mock.call( resource_provider_uuid=uuid.UUID( @@ -223,8 +238,9 @@ class PlacementStateTestCase(base.BaseTestCase): actual_traits = [set(args[1]['traits']) for args in self.client_mock.update_resource_provider_traits.call_args_list] self.assertEqual( - [set(['CUSTOM_PHYSNET_PHYSNET0', 'CUSTOM_VNIC_TYPE_NORMAL']), - set(['CUSTOM_VNIC_TYPE_NORMAL'])], + [{'CUSTOM_PHYSNET_PHYSNET0', 'CUSTOM_VNIC_TYPE_NORMAL'}, + {n_const.TRAIT_NETWORK_TUNNEL, 'CUSTOM_VNIC_TYPE_NORMAL'}, + {'CUSTOM_VNIC_TYPE_NORMAL'}], actual_traits) def test_deferred_update_resource_provider_inventories_bw(self): diff --git a/neutron/tests/unit/agent/common/test_utils.py b/neutron/tests/unit/agent/common/test_utils.py index dc6db966f11..51dd5a9d785 100644 --- a/neutron/tests/unit/agent/common/test_utils.py +++ b/neutron/tests/unit/agent/common/test_utils.py @@ -16,9 +16,12 @@ import socket from unittest import mock +from oslo_config import cfg + from neutron.agent.common import utils from neutron.agent.linux import interface from neutron.conf.agent import common as config +from neutron.conf.plugins.ml2 import config as ml2_config from neutron.tests import base from neutron.tests.unit import testlib_api @@ -158,6 +161,10 @@ class TestGetHypervisorHostname(base.BaseTestCase): # TODO(bence romsics): rehome this to neutron_lib class TestDefaultRpHypervisors(base.BaseTestCase): + def setUp(self): + super().setUp() + ml2_config.register_ml2_plugin_opts() + @mock.patch.object(utils, 'get_hypervisor_hostname', return_value='thishost') def test_defaults(self, hostname_mock): @@ -197,3 +204,26 @@ class TestDefaultRpHypervisors(base.BaseTestCase): default_hypervisor='defaulthost', ) ) + + rp_tunnelled = cfg.CONF.ml2.tunnelled_network_rp_name + self.assertEqual( + {'eth0': 'thathost', 'eth1': 'defaulthost', + rp_tunnelled: 'defaulthost'}, + utils.default_rp_hypervisors( + hypervisors={'eth0': 'thathost'}, + device_mappings={'physnet0': ['eth0', 'eth1']}, + default_hypervisor='defaulthost', + tunnelled_network_rp_name=rp_tunnelled + ) + ) + + self.assertEqual( + {'eth0': 'thathost', 'eth1': 'defaulthost', + rp_tunnelled: 'thathost'}, + utils.default_rp_hypervisors( + hypervisors={'eth0': 'thathost', rp_tunnelled: 'thathost'}, + device_mappings={'physnet0': ['eth0', 'eth1']}, + default_hypervisor='defaulthost', + tunnelled_network_rp_name=rp_tunnelled + ) + ) diff --git a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/extension_drivers/test_qos_driver.py b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/extension_drivers/test_qos_driver.py index e5a1a8cbfd0..dca207633da 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/extension_drivers/test_qos_driver.py +++ b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/extension_drivers/test_qos_driver.py @@ -386,19 +386,39 @@ class QosOVSAgentDriverTestCase(ovs_test_base.OVSAgentConfigTestBase): self.qos_driver.delete_minimum_bandwidth({'port_id': 'p_id'}) mock_delete_minimum_bandwidth_queue.assert_called_once_with('p_id') - def test_update_minimum_bandwidth_no_vif_port(self): + @mock.patch.object(qos_driver, 'LOG') + def test_update_minimum_bandwidth_no_vif_port(self, mock_log): with mock.patch.object(self.qos_driver.br_int, 'update_minimum_bandwidth_queue') \ as mock_delete_minimum_bandwidth_queue: - self.qos_driver.update_minimum_bandwidth({}, mock.ANY) + self.qos_driver.update_minimum_bandwidth( + {'port_id': 'portid'}, mock.ANY) mock_delete_minimum_bandwidth_queue.assert_not_called() + mock_log.debug.assert_called_once_with( + 'update_minimum_bandwidth was received for port %s but ' + 'vif_port was not found. It seems that port is already ' + 'deleted', 'portid') + + @mock.patch.object(qos_driver, 'LOG') + def test_update_minimum_bandwidth_no_physical_network(self, mock_log): + with mock.patch.object(self.qos_driver.br_int, + 'update_minimum_bandwidth_queue') \ + as mock_delete_minimum_bandwidth_queue: + port = {'vif_port': mock.ANY, 'port_id': 'portid', + 'physical_network': None} + self.qos_driver.update_minimum_bandwidth(port, mock.ANY) + mock_delete_minimum_bandwidth_queue.assert_not_called() + mock_log.debug.assert_called_once_with( + 'update_minimum_bandwidth was received for port %s but ' + 'has no physical network associated', 'portid') def test_update_minimum_bandwidth_no_phy_brs(self): vif_port = mock.Mock() vif_port.ofport = 'ofport' rule = mock.Mock() rule.min_kbps = 1500 - port = {'port_id': 'port_id', 'vif_port': vif_port} + port = {'port_id': 'port_id', 'vif_port': vif_port, + 'physical_network': mock.ANY} with mock.patch.object(self.qos_driver.br_int, 'update_minimum_bandwidth_queue') \ as mock_delete_minimum_bandwidth_queue, \ @@ -413,7 +433,8 @@ class QosOVSAgentDriverTestCase(ovs_test_base.OVSAgentConfigTestBase): vif_port.ofport = 'ofport' rule = mock.Mock() rule.min_kbps = 1500 - port = {'port_id': 'port_id', 'vif_port': vif_port} + port = {'port_id': 'port_id', 'vif_port': vif_port, + 'physical_network': mock.ANY} with mock.patch.object(self.qos_driver.br_int, 'update_minimum_bandwidth_queue') \ as mock_delete_minimum_bandwidth_queue, \ diff --git a/neutron/tests/unit/services/qos/drivers/openvswitch/test_driver.py b/neutron/tests/unit/services/qos/drivers/openvswitch/test_driver.py index 6345cb82608..09ac0a060fa 100644 --- a/neutron/tests/unit/services/qos/drivers/openvswitch/test_driver.py +++ b/neutron/tests/unit/services/qos/drivers/openvswitch/test_driver.py @@ -27,12 +27,14 @@ class TestOVSDriver(base.BaseQosTestCase): super(TestOVSDriver, self).setUp() self.driver = driver.OVSDriver.create() - def test_validate_min_bw_rule_vs_physnet_non_physnet(self): - scenarios = [ - ({'physical_network': 'fake physnet'}, self.assertTrue), - ({}, self.assertFalse), - ] - for segment_kwargs, test_method in scenarios: + def test_validate_min_bw_rule(self): + # Minimum bandwidth rules are now allowed for tunnelled networks since + # LP#1991965. The ML2/OVS backend cannot enforce them but Placement can + # schedule a VM using this information. + scenarios = [{'physical_network': 'fake physnet'}, + {}, + ] + for segment_kwargs in scenarios: segment = network_object.NetworkSegment(**segment_kwargs) net = network_object.Network(mock.Mock(), segments=[segment]) rule = mock.Mock() @@ -41,7 +43,7 @@ class TestOVSDriver(base.BaseQosTestCase): with mock.patch( 'neutron.objects.network.Network.get_object', return_value=net): - test_method(self.driver.validate_rule_for_port( + self.assertTrue(self.driver.validate_rule_for_port( mock.Mock(), rule, port)) - test_method(self.driver.validate_rule_for_network( + self.assertTrue(self.driver.validate_rule_for_network( mock.Mock(), rule, network_id=mock.Mock())) diff --git a/neutron/tests/unit/services/qos/test_qos_plugin.py b/neutron/tests/unit/services/qos/test_qos_plugin.py index b9ef242e4a5..875712f172c 100644 --- a/neutron/tests/unit/services/qos/test_qos_plugin.py +++ b/neutron/tests/unit/services/qos/test_qos_plugin.py @@ -15,6 +15,7 @@ from unittest import mock from keystoneauth1 import exceptions as ks_exc import netaddr +from neutron_lib.api.definitions import portbindings from neutron_lib.api.definitions import qos from neutron_lib.callbacks import events from neutron_lib import constants as lib_constants @@ -23,6 +24,7 @@ from neutron_lib import exceptions as lib_exc from neutron_lib.exceptions import placement as pl_exc from neutron_lib.exceptions import qos as qos_exc from neutron_lib.objects import utils as obj_utils +from neutron_lib.placement import utils as pl_utils from neutron_lib.plugins import constants as plugins_constants from neutron_lib.plugins import directory from neutron_lib.services.qos import constants as qos_consts @@ -32,6 +34,7 @@ from oslo_config import cfg from oslo_utils import uuidutils import webob.exc +from neutron.common import _constants as n_const from neutron.exceptions import qos as neutron_qos_exc from neutron.extensions import qos_pps_minimum_rule_alias from neutron.extensions import qos_rules_alias @@ -83,6 +86,7 @@ class TestQosPlugin(base.BaseQosTestCase): self.ctxt = context.Context('fake_user', 'fake_tenant') self.admin_ctxt = context.get_admin_context() + self.default_uuid = 'fake_uuid' self.policy_data = { 'policy': {'id': uuidutils.generate_uuid(), @@ -129,6 +133,9 @@ class TestQosPlugin(base.BaseQosTestCase): self.min_pps_rule = rule_object.QosMinimumPacketRateRule( self.ctxt, **self.rule_data['minimum_packet_rate_rule']) + self._rp_tun_name = cfg.CONF.ml2.tunnelled_network_rp_name + self._rp_tun_trait = n_const.TRAIT_NETWORK_TUNNEL + def _validate_driver_params(self, method_name, ctxt): call_args = self.qos_plugin.driver_manager.call.call_args[0] self.assertTrue(self.qos_plugin.driver_manager.call.called) @@ -172,7 +179,7 @@ class TestQosPlugin(base.BaseQosTestCase): return_value=min_pps_rules), \ mock.patch( 'uuid.uuid5', - return_value='fake_uuid', + return_value=self.default_uuid, side_effect=request_groups_uuids): return qos_plugin.QoSPlugin._extend_port_resource_request( port_res, self.port) @@ -333,34 +340,33 @@ class TestQosPlugin(base.BaseQosTestCase): port = self._create_and_extend_port([self.min_bw_rule], physical_network=None) - self.assertIsNone(port.get('resource_request')) + expected = { + 'request_groups': [{'id': self.default_uuid, + 'required': [self._rp_tun_trait, + 'CUSTOM_VNIC_TYPE_NORMAL'], + 'resources': { + orc.NET_BW_EGR_KILOBIT_PER_SEC: 10}}], + 'same_subtree': [self.default_uuid]} + self.assertEqual(expected, port['resource_request']) def test__extend_port_resource_request_mix_rules_non_provider_net(self): self.min_bw_rule.direction = lib_constants.EGRESS_DIRECTION - port = self._create_and_extend_port([self.min_bw_rule], - [self.min_pps_rule], - physical_network=None) - self.assertEqual( - 1, - len(port['resource_request']['request_groups']) - ) - self.assertEqual( - 'fake_uuid', - port['resource_request']['request_groups'][0]['id'] - ) - self.assertEqual( - ['CUSTOM_VNIC_TYPE_NORMAL'], - port['resource_request']['request_groups'][0]['required'] - ) - self.assertEqual( - {orc.NET_PACKET_RATE_KILOPACKET_PER_SEC: 10}, - port['resource_request']['request_groups'][0]['resources'], - ) - self.assertEqual( - ['fake_uuid'], - port['resource_request']['same_subtree'], - ) + port = self._create_and_extend_port( + [self.min_bw_rule], [self.min_pps_rule], physical_network=None, + request_groups_uuids=['fake_uuid0', 'fake_uuid1']) + request_groups = [ + {'id': 'fake_uuid0', + 'required': [self._rp_tun_trait, + 'CUSTOM_VNIC_TYPE_NORMAL'], + 'resources': {orc.NET_BW_EGR_KILOBIT_PER_SEC: 10}}, + {'id': 'fake_uuid1', + 'required': ['CUSTOM_VNIC_TYPE_NORMAL'], + 'resources': {orc.NET_PACKET_RATE_KILOPACKET_PER_SEC: 10}}] + expected = { + 'request_groups': request_groups, + 'same_subtree': ['fake_uuid0', 'fake_uuid1']} + self.assertEqual(expected, port['resource_request']) def test__extend_port_resource_request_bulk_min_bw_rule(self): self.min_bw_rule.direction = lib_constants.EGRESS_DIRECTION @@ -1844,6 +1850,24 @@ class TestQosPlugin(base.BaseQosTestCase): self.qos_plugin.get_rule_type, self.ctxt, qos_consts.RULE_TYPE_MINIMUM_PACKET_RATE) + def test__get_min_bw_traits(self): + vnic_type = portbindings.VNIC_NORMAL + segments = [None] + ret = self.qos_plugin._get_min_bw_traits(vnic_type, segments) + self.assertEqual([], ret) + + segments = [mock.Mock(physical_network=None)] + ret = self.qos_plugin._get_min_bw_traits(vnic_type, segments) + # NOTE(ralonsoh): once implemented, use the neutron-lib method to + # generate the tunnelled networks trait. + self.assertEqual([self._rp_tun_trait, + pl_utils.vnic_type_trait(vnic_type)], ret) + + segments = [mock.Mock(physical_network='physnet_1')] + ret = self.qos_plugin._get_min_bw_traits(vnic_type, segments) + self.assertEqual([pl_utils.physnet_trait('physnet_1'), + pl_utils.vnic_type_trait(vnic_type)], ret) + class QoSRuleAliasTestExtensionManager(object): diff --git a/releasenotes/notes/qos-minimum-bw-tunnelled-networks-8064d6f21f7d9267.yaml b/releasenotes/notes/qos-minimum-bw-tunnelled-networks-8064d6f21f7d9267.yaml new file mode 100644 index 00000000000..8aba619c247 --- /dev/null +++ b/releasenotes/notes/qos-minimum-bw-tunnelled-networks-8064d6f21f7d9267.yaml @@ -0,0 +1,10 @@ +--- +features: + - | + ML2/OVS and ML2/OVN now support modelling tunnelled networks in the + Placement API. The "tunnelled_network_rp_name" configuration option + defines the resource provider name used to represent all tunnelled + networks in a compute node (by default "rp_tunnelled"). If this string + is present in the "resource_provider_bandwidths" dictionary, the + corresponding mechanism driver will create a resource provider for + the overlay traffic.