libvirt: Start populating NUMACell.network_metadata field
We're basically reading what's stored in config. Nothing too complicated here, outside of the dynamic option magicness. Part of blueprint numa-aware-vswitches Change-Id: Ice45f64546cb05db4c6f2928af0efd484f3a6aaf
This commit is contained in:
parent
9dfac2fda7
commit
bd334f0048
|
@ -2243,6 +2243,10 @@ class BadRequirementEmulatorThreadsPolicy(Invalid):
|
|||
"CPU policy option.")
|
||||
|
||||
|
||||
class InvalidNetworkNUMAAffinity(Invalid):
|
||||
msg_fmt = _("Invalid NUMA network affinity configured: %(reason)s")
|
||||
|
||||
|
||||
class PowerVMAPIFailed(NovaException):
|
||||
msg_fmt = _("PowerVM API failed to complete for instance=%(inst_name)s. "
|
||||
"%(reason)s")
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
from oslo_config import fixture as config_fixture
|
||||
from oslo_policy import opts as policy_opts
|
||||
|
||||
from nova.conf import neutron
|
||||
from nova.conf import paths
|
||||
from nova import config
|
||||
from nova import ipv6
|
||||
|
@ -79,5 +80,6 @@ class ConfFixture(config_fixture.Config):
|
|||
config.parse_args([], default_config_files=[], configure_db=False,
|
||||
init_rpc=False)
|
||||
policy_opts.set_defaults(self.conf)
|
||||
neutron.register_dynamic_opts(self.conf)
|
||||
self.addCleanup(utils.cleanup_dns_managers)
|
||||
self.addCleanup(ipv6.api.reset_backend)
|
||||
|
|
|
@ -15157,7 +15157,18 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||
if key not in ['phys_function', 'virt_functions', 'label']:
|
||||
self.assertEqual(expectvfs[dev][key], actualvfs[dev][key])
|
||||
|
||||
# TODO(stephenfin): This only has one caller. Flatten it and remove the
|
||||
# 'mempages=False' branches or add the missing test
|
||||
def _test_get_host_numa_topology(self, mempages):
|
||||
self.flags(physnets=['foo', 'bar', 'baz'], group='neutron')
|
||||
# we need to call the below again to ensure the updated 'physnets'
|
||||
# value is read and the new groups created
|
||||
nova.conf.neutron.register_dynamic_opts(CONF)
|
||||
self.flags(numa_nodes=[0, 2], group='neutron_tunnel')
|
||||
self.flags(numa_nodes=[1], group='neutron_physnet_foo')
|
||||
self.flags(numa_nodes=[3], group='neutron_physnet_bar')
|
||||
self.flags(numa_nodes=[1, 2, 3], group='neutron_physnet_baz')
|
||||
|
||||
caps = vconfig.LibvirtConfigCaps()
|
||||
caps.host = vconfig.LibvirtConfigCapsHost()
|
||||
caps.host.cpu = vconfig.LibvirtConfigCPU()
|
||||
|
@ -15203,6 +15214,20 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||
self.assertEqual([set([0, 1])], got_topo.cells[0].siblings)
|
||||
self.assertEqual([set([3])], got_topo.cells[1].siblings)
|
||||
|
||||
self.assertEqual(set(),
|
||||
got_topo.cells[0].network_metadata.physnets)
|
||||
self.assertEqual(set(['foo', 'baz']),
|
||||
got_topo.cells[1].network_metadata.physnets)
|
||||
self.assertEqual(set(['baz']),
|
||||
got_topo.cells[2].network_metadata.physnets)
|
||||
self.assertEqual(set(['bar', 'baz']),
|
||||
got_topo.cells[3].network_metadata.physnets)
|
||||
|
||||
self.assertTrue(got_topo.cells[0].network_metadata.tunneled)
|
||||
self.assertFalse(got_topo.cells[1].network_metadata.tunneled)
|
||||
self.assertTrue(got_topo.cells[2].network_metadata.tunneled)
|
||||
self.assertFalse(got_topo.cells[3].network_metadata.tunneled)
|
||||
|
||||
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
|
||||
def test_get_host_numa_topology(self, mock_version):
|
||||
self._test_get_host_numa_topology(mempages=True)
|
||||
|
@ -15238,6 +15263,93 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||
mock_type.return_value = host.HV_DRIVER_XEN
|
||||
self.assertIsNone(drvr._get_host_numa_topology())
|
||||
|
||||
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
|
||||
def test_get_host_numa_topology_missing_network_metadata(self,
|
||||
mock_version):
|
||||
self.flags(physnets=['bar'], group='neutron')
|
||||
# we need to call the below again to ensure the updated 'physnets'
|
||||
# value is read and the new groups created
|
||||
nova.conf.neutron.register_dynamic_opts(CONF)
|
||||
|
||||
# we explicitly avoid registering a '[neutron_physnets_bar] numa_nodes'
|
||||
# option here
|
||||
|
||||
caps = vconfig.LibvirtConfigCaps()
|
||||
caps.host = vconfig.LibvirtConfigCapsHost()
|
||||
caps.host.cpu = vconfig.LibvirtConfigCPU()
|
||||
caps.host.cpu.arch = fields.Architecture.X86_64
|
||||
caps.host.topology = fakelibvirt.NUMATopology()
|
||||
|
||||
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
||||
with test.nested(
|
||||
mock.patch.object(host.Host, "get_capabilities",
|
||||
return_value=caps),
|
||||
mock.patch.object(hardware, 'get_vcpu_pin_set',
|
||||
return_value=set([0, 1, 3, 4, 5])),
|
||||
mock.patch.object(host.Host, 'get_online_cpus',
|
||||
return_value=set([0, 1, 2, 3, 6])),
|
||||
):
|
||||
self.assertRaisesRegex(
|
||||
exception.InvalidNetworkNUMAAffinity,
|
||||
"Invalid NUMA network affinity configured: the physnet 'bar' "
|
||||
"was listed in '\[neutron\] physnets' but no corresponding "
|
||||
"'\[neutron_physnet_bar\] numa_nodes' option was defined.",
|
||||
drvr._get_host_numa_topology)
|
||||
|
||||
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
|
||||
def _test_get_host_numa_topology_invalid_network_affinity(self,
|
||||
group_name, mock_version):
|
||||
self.flags(physnets=['foo', 'bar'], group='neutron')
|
||||
# we need to call the below again to ensure the updated 'physnets'
|
||||
# value is read and the new groups created
|
||||
nova.conf.neutron.register_dynamic_opts(CONF)
|
||||
|
||||
# set defaults...
|
||||
for group_ in ['neutron_physnet_foo', 'neutron_physnet_bar',
|
||||
'neutron_tunnel']:
|
||||
self.flags(numa_nodes=[0], group=group_)
|
||||
|
||||
# but override them for the error case
|
||||
self.flags(numa_nodes=[4], group=group_name)
|
||||
|
||||
caps = vconfig.LibvirtConfigCaps()
|
||||
caps.host = vconfig.LibvirtConfigCapsHost()
|
||||
caps.host.cpu = vconfig.LibvirtConfigCPU()
|
||||
caps.host.cpu.arch = fields.Architecture.X86_64
|
||||
caps.host.topology = fakelibvirt.NUMATopology()
|
||||
|
||||
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
||||
with test.nested(
|
||||
mock.patch.object(host.Host, "get_capabilities",
|
||||
return_value=caps),
|
||||
mock.patch.object(hardware, 'get_vcpu_pin_set',
|
||||
return_value=set([0, 1, 3, 4, 5])),
|
||||
mock.patch.object(host.Host, 'get_online_cpus',
|
||||
return_value=set([0, 1, 2, 3, 6])),
|
||||
):
|
||||
self.assertRaisesRegex(
|
||||
exception.InvalidNetworkNUMAAffinity,
|
||||
r'node 4 for \w+ \w+ is not present',
|
||||
drvr._get_host_numa_topology)
|
||||
|
||||
def test_get_host_numa_topology_invalid_physical_network_affinity(self):
|
||||
"""Ensure errors are raised for non-existent NUMA nodes.
|
||||
|
||||
If a physical network is affined to a non-existent NUMA node, an
|
||||
exception should be raised. Prove this to be the case.
|
||||
"""
|
||||
self._test_get_host_numa_topology_invalid_network_affinity(
|
||||
'neutron_physnet_bar')
|
||||
|
||||
def test_get_host_numa_topology_invalid_tunnel_network_affinity(self):
|
||||
"""Ensure errors are raised for non-existent NUMA nodes.
|
||||
|
||||
If a tunneled network is affined to a non-existent NUMA node, an
|
||||
exception should be raised. Prove this to be the case.
|
||||
"""
|
||||
self._test_get_host_numa_topology_invalid_network_affinity(
|
||||
'neutron_tunnel')
|
||||
|
||||
def test_diagnostic_vcpus_exception(self):
|
||||
xml = """
|
||||
<domain type='kvm'>
|
||||
|
|
|
@ -1638,6 +1638,30 @@ class NUMATopologyTest(test.NoDBTestCase):
|
|||
self.assertEqual(hostusage.cells[1].cpu_usage, 1)
|
||||
self.assertEqual(hostusage.cells[1].memory_usage, 128)
|
||||
|
||||
def test_topo_usage_with_network_metadata(self):
|
||||
"""Validate behavior with network_metadata.
|
||||
|
||||
Ensure we handle ``NUMACell``\s that have ``network_metadata`` set
|
||||
along with those where this is unset.
|
||||
"""
|
||||
|
||||
topo = objects.NUMATopology(cells=[
|
||||
objects.NUMACell(
|
||||
id=0, cpuset=set([0, 1, 2, 3]), memory=4096, cpu_usage=0,
|
||||
memory_usage=0, siblings=[set([0, 2]), set([1, 3])],
|
||||
mempages=[], pinned_cpus=set([]),
|
||||
network_metadata=objects.NetworkMetadata(
|
||||
physnets=set(['foo', 'bar']), tunneled=True)),
|
||||
objects.NUMACell(
|
||||
id=0, cpuset=set([0, 1, 2, 3]), memory=4096, cpu_usage=0,
|
||||
memory_usage=0, siblings=[set([0, 2]), set([1, 3])],
|
||||
mempages=[], pinned_cpus=set([])),
|
||||
])
|
||||
|
||||
new_topo = hw.numa_usage_from_instances(topo, [])
|
||||
self.assertIn('network_metadata', new_topo.cells[0])
|
||||
self.assertNotIn('network_metadata', new_topo.cells[1])
|
||||
|
||||
def assertNUMACellMatches(self, expected_cell, got_cell):
|
||||
attrs = ('cpuset', 'memory', 'id')
|
||||
if isinstance(expected_cell, objects.NUMATopology):
|
||||
|
|
|
@ -1786,6 +1786,9 @@ def numa_usage_from_instances(host, instances, free=False):
|
|||
cpu_usage=0, memory_usage=0, mempages=hostcell.mempages,
|
||||
pinned_cpus=hostcell.pinned_cpus, siblings=hostcell.siblings)
|
||||
|
||||
if 'network_metadata' in hostcell:
|
||||
newcell.network_metadata = hostcell.network_metadata
|
||||
|
||||
for instance in instances:
|
||||
for cellid, instancecell in enumerate(instance.cells):
|
||||
if instancecell.id != hostcell.id:
|
||||
|
|
|
@ -6275,6 +6275,46 @@ class LibvirtDriver(driver.ComputeDriver):
|
|||
cell = self._reserved_hugepages.get(cell_id, {})
|
||||
return cell.get(page_size, 0)
|
||||
|
||||
def _get_physnet_numa_affinity():
|
||||
affinities = {cell.id: set() for cell in topology.cells}
|
||||
for physnet in CONF.neutron.physnets:
|
||||
# This will error out if the group is not registered, which is
|
||||
# exactly what we want as that would be a bug
|
||||
group = getattr(CONF, 'neutron_physnet_%s' % physnet)
|
||||
|
||||
if not group.numa_nodes:
|
||||
msg = ("the physnet '%s' was listed in '[neutron] "
|
||||
"physnets' but no corresponding "
|
||||
"'[neutron_physnet_%s] numa_nodes' option was "
|
||||
"defined." % (physnet, physnet))
|
||||
raise exception.InvalidNetworkNUMAAffinity(reason=msg)
|
||||
|
||||
for node in group.numa_nodes:
|
||||
if node not in affinities:
|
||||
msg = ("node %d for physnet %s is not present in host "
|
||||
"affinity set %r" % (node, physnet, affinities))
|
||||
# The config option referenced an invalid node
|
||||
raise exception.InvalidNetworkNUMAAffinity(reason=msg)
|
||||
affinities[node].add(physnet)
|
||||
|
||||
return affinities
|
||||
|
||||
def _get_tunnel_numa_affinity():
|
||||
affinities = {cell.id: False for cell in topology.cells}
|
||||
|
||||
for node in CONF.neutron_tunnel.numa_nodes:
|
||||
if node not in affinities:
|
||||
msg = ("node %d for tunneled networks is not present "
|
||||
"in host affinity set %r" % (node, affinities))
|
||||
# The config option referenced an invalid node
|
||||
raise exception.InvalidNetworkNUMAAffinity(reason=msg)
|
||||
affinities[node] = True
|
||||
|
||||
return affinities
|
||||
|
||||
physnet_affinities = _get_physnet_numa_affinity()
|
||||
tunnel_affinities = _get_tunnel_numa_affinity()
|
||||
|
||||
for cell in topology.cells:
|
||||
cpuset = set(cpu.id for cpu in cell.cpus)
|
||||
siblings = sorted(map(set,
|
||||
|
@ -6296,12 +6336,17 @@ class LibvirtDriver(driver.ComputeDriver):
|
|||
self, cell.id, pages.size))
|
||||
for pages in cell.mempages]
|
||||
|
||||
network_metadata = objects.NetworkMetadata(
|
||||
physnets=physnet_affinities[cell.id],
|
||||
tunneled=tunnel_affinities[cell.id])
|
||||
|
||||
cell = objects.NUMACell(id=cell.id, cpuset=cpuset,
|
||||
memory=cell.memory / units.Ki,
|
||||
cpu_usage=0, memory_usage=0,
|
||||
siblings=siblings,
|
||||
pinned_cpus=set([]),
|
||||
mempages=mempages)
|
||||
mempages=mempages,
|
||||
network_metadata=network_metadata)
|
||||
cells.append(cell)
|
||||
|
||||
return objects.NUMATopology(cells=cells)
|
||||
|
|
Loading…
Reference in New Issue