Fix max cpu topologies with numa affinity

Nova has never supported specifying per numa node
cpu toplogies. Logically the  cpu toplogy of a guest
is independent of its numa toplogy and there is no
way to model different cpu toplogies per numa node
or implement that in hardware.

The presence of the code in nova that allowed the generation
of these invalid configuration has now been removed as it
broke the automatic selection of cpu topologies based
on hw:max_[cpus|sockets|threads] flavor and image properties.

This change removed the incorrect code and related unit
tests with assert nova could generate invalid topologies.

Closes-Bug: #1910466
Change-Id: Ia81a0fdbd950b51dbcc70c65ba492549a224ce2b
This commit is contained in:
Sean Mooney 2021-01-06 19:49:56 +00:00
parent fe25fa13e0
commit 387823b36d
6 changed files with 59 additions and 306 deletions

View File

@ -79,6 +79,8 @@ class InstanceNUMACell(base.NovaEphemeralObject,
'memory': obj_fields.IntegerField(),
'pagesize': obj_fields.IntegerField(nullable=True,
default=None),
# TODO(sean-k-mooney): This is no longer used and should be
# removed in v2
'cpu_topology': obj_fields.ObjectField('VirtCPUTopology',
nullable=True),
'cpu_pinning_raw': obj_fields.DictOfIntegersField(nullable=True,

View File

@ -137,22 +137,16 @@ class NUMAServersTest(NUMAServersTestBase):
'hw:cpu_max_sockets': '2',
'hw:cpu_max_cores': '2',
'hw:cpu_max_threads': '8',
'hw:cpu_policy': 'dedicated',
'hw:mem_page_size': 'any'
}
'hw:cpu_policy': 'dedicated'}
flavor_id = self._create_flavor(vcpu=8, extra_spec=extra_spec)
self._run_build_test(flavor_id, end_status='ERROR')
server = self._run_build_test(flavor_id)
# FIXME(sean-k-mooney): The instance should boot but
# it fails due to https://bugs.launchpad.net/nova/+bug/1910466
msg = "IndexError: list index out of range"
self.assertIn(msg, self.stdlog.logger.output)
# ctx = nova_context.get_admin_context()
# inst = objects.Instance.get_by_uuid(ctx, server['id'])
# self.assertEqual(2, len(inst.numa_topology.cells))
# self.assertLessEqual(inst.vcpu_model.topology.sockets, 2)
# self.assertLessEqual(inst.vcpu_model.topology.cores, 2)
# self.assertLessEqual(inst.vcpu_model.topology.threads, 8)
ctx = nova_context.get_admin_context()
inst = objects.Instance.get_by_uuid(ctx, server['id'])
self.assertEqual(2, len(inst.numa_topology.cells))
self.assertLessEqual(inst.vcpu_model.topology.sockets, 2)
self.assertLessEqual(inst.vcpu_model.topology.cores, 2)
self.assertLessEqual(inst.vcpu_model.topology.threads, 8)
def test_create_server_with_numa_fails(self):
"""Create a two NUMA node instance on a host with only one node.
@ -251,7 +245,7 @@ class NUMAServersTest(NUMAServersTestBase):
inst = objects.Instance.get_by_uuid(self.ctxt, server['id'])
self.assertEqual(1, len(inst.numa_topology.cells))
self.assertEqual(5, inst.numa_topology.cells[0].cpu_topology.cores)
self.assertEqual(5, inst.vcpu_model.topology.sockets)
def test_create_server_with_mixed_policy(self):
"""Create a server using the 'hw:cpu_policy=mixed' extra spec.
@ -301,7 +295,6 @@ class NUMAServersTest(NUMAServersTestBase):
# sanity check the instance topology
inst = objects.Instance.get_by_uuid(self.ctxt, server['id'])
self.assertEqual(1, len(inst.numa_topology.cells))
self.assertEqual(4, inst.numa_topology.cells[0].cpu_topology.cores)
self.assertEqual({0}, inst.numa_topology.cells[0].cpuset)
self.assertEqual({1, 2, 3}, inst.numa_topology.cells[0].pcpuset)
self.assertEqual(
@ -510,8 +503,6 @@ class NUMAServersTest(NUMAServersTestBase):
ctx = nova_context.get_admin_context()
inst = objects.Instance.get_by_uuid(ctx, server['id'])
self.assertEqual(1, len(inst.numa_topology.cells))
self.assertEqual(1, inst.numa_topology.cells[0].cpu_topology.cores)
self.assertEqual(2, inst.numa_topology.cells[0].cpu_topology.threads)
def test_create_server_with_pcpu_fails(self):
"""Create a pinned instance on a host with no PCPUs.

View File

@ -766,119 +766,6 @@ class VCPUTopologyTest(test.NoDBTestCase):
},
"expect": [16, 1, 1]
},
{ # NUMA needs threads, only cores requested by flavor
"allow_threads": True,
"flavor": objects.Flavor(
vcpus=4, memory_mb=2048,
extra_specs={
"hw:cpu_cores": "2",
}
),
"image": {
"properties": {
"hw_cpu_max_cores": 2,
}
},
"numa_topology": objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1]), pcpuset=set(), memory=1024,
cpu_topology=objects.VirtCPUTopology(
sockets=1, cores=1, threads=2)),
objects.InstanceNUMACell(
id=1, cpuset=set([2, 3]), pcpuset=set(), memory=1024),
]),
"expect": [1, 2, 2]
},
{ # NUMA needs threads, but more than requested by flavor - the
# least amount of threads wins
"allow_threads": True,
"flavor": objects.Flavor(
vcpus=4, memory_mb=2048,
extra_specs={
"hw:cpu_threads": "2",
}
),
"image": {
"properties": {}
},
"numa_topology": objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1, 2, 3]), pcpuset=set(),
memory=2048,
cpu_topology=objects.VirtCPUTopology(
sockets=1, cores=1, threads=4)),
]),
"expect": [2, 1, 2]
},
{ # NUMA needs threads, but more than limit in flavor - the
# least amount of threads which divides into the vcpu
# count wins. So with desired 4, max of 3, and
# vcpu count of 4, we should get 2 threads.
"allow_threads": True,
"flavor": objects.Flavor(
vcpus=4, memory_mb=2048,
extra_specs={
"hw:cpu_max_sockets": "5",
"hw:cpu_max_cores": "2",
"hw:cpu_max_threads": "3",
}
),
"image": {
"properties": {}
},
"numa_topology": objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1, 2, 3]), pcpuset=set(),
memory=2048,
cpu_topology=objects.VirtCPUTopology(
sockets=1, cores=1, threads=4)),
]),
"expect": [2, 1, 2]
},
{ # NUMA needs threads, but thread count does not
# divide into flavor vcpu count, so we must
# reduce thread count to closest divisor
"allow_threads": True,
"flavor": objects.Flavor(
vcpus=6, memory_mb=2048,
extra_specs={}
),
"image": {
"properties": {}
},
"numa_topology": objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1, 2, 3]), pcpuset=set(),
memory=2048,
cpu_topology=objects.VirtCPUTopology(
sockets=1, cores=1, threads=4)),
]),
"expect": [2, 1, 3]
},
{ # NUMA needs different number of threads per cell - the least
# amount of threads wins
"allow_threads": True,
"flavor": objects.Flavor(
vcpus=8, memory_mb=2048,
extra_specs={}
),
"image": {
"properties": {}
},
"numa_topology": objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1, 2, 3]), pcpuset=set(),
memory=1024,
cpu_topology=objects.VirtCPUTopology(
sockets=1, cores=2, threads=2)),
objects.InstanceNUMACell(
id=1, cpuset=set([4, 5, 6, 7]), pcpuset=set(),
memory=1024,
cpu_topology=objects.VirtCPUTopology(
sockets=1, cores=1, threads=4)),
]),
"expect": [4, 1, 2]
},
]
for topo_test in testdata:
@ -886,8 +773,7 @@ class VCPUTopologyTest(test.NoDBTestCase):
topology = hw._get_desirable_cpu_topologies(
topo_test["flavor"],
image_meta,
topo_test["allow_threads"],
topo_test.get("numa_topology"))[0]
topo_test["allow_threads"],)[0]
self.assertEqual(topo_test["expect"][0], topology.sockets)
self.assertEqual(topo_test["expect"][1], topology.cores)
@ -3284,8 +3170,7 @@ class CPUPinningCellTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
inst_pin = hw._numa_fit_instance_cell(host_pin, inst_pin, limits)
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=3, threads=1)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
self.assertNotIn('cpu_topology', inst_pin)
got_pinning = {x: x for x in range(0, 3)}
self.assertEqual(got_pinning, inst_pin.cpu_pinning)
@ -3340,8 +3225,7 @@ class CPUPinningCellTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
inst_pin = hw._numa_fit_instance_cell(host_pin, inst_pin, limits)
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=4, threads=1)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
self.assertNotIn('cpu_topology', inst_pin)
got_pinning = {x: x for x in range(0, 4)}
self.assertEqual(got_pinning, inst_pin.cpu_pinning)
@ -3369,8 +3253,7 @@ class CPUPinningCellTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
inst_pin = hw._numa_fit_instance_cell(host_pin, inst_pin, limits)
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
self.assertNotIn('cpu_topology', inst_pin)
got_pinning = {x: x for x in range(0, 4)}
self.assertEqual(got_pinning, inst_pin.cpu_pinning)
@ -3398,8 +3281,7 @@ class CPUPinningCellTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
inst_pin = hw._numa_fit_instance_cell(host_pin, inst_pin, limits)
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=4, threads=2)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
self.assertNotIn('cpu_topology', inst_pin)
got_pinning = {x: x for x in range(0, 8)}
self.assertEqual(got_pinning, inst_pin.cpu_pinning)
@ -3426,8 +3308,7 @@ class CPUPinningCellTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
inst_pin = hw._numa_fit_instance_cell(host_pin, inst_pin, limits)
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
self.assertNotIn('cpu_topology', inst_pin)
got_pinning = {0: 0, 1: 3, 2: 4, 3: 7}
self.assertEqual(got_pinning, inst_pin.cpu_pinning)
@ -3454,8 +3335,7 @@ class CPUPinningCellTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
inst_pin = hw._numa_fit_instance_cell(host_pin, inst_pin, limits)
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=1, threads=4)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
self.assertNotIn('cpu_topology', inst_pin)
got_pinning = {x: x for x in range(0, 4)}
self.assertEqual(got_pinning, inst_pin.cpu_pinning)
@ -3482,8 +3362,7 @@ class CPUPinningCellTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
inst_pin = hw._numa_fit_instance_cell(host_pin, inst_pin, limits)
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
self.assertNotIn('cpu_topology', inst_pin)
got_pinning = {x: x for x in range(0, 4)}
self.assertEqual(got_pinning, inst_pin.cpu_pinning)
@ -3561,8 +3440,7 @@ class CPUPinningCellTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
inst_pin = hw._numa_fit_instance_cell(host_pin, inst_pin, limits)
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
self.assertNotIn('cpu_topology', inst_pin)
def test_get_pinning_require_policy_fits_w_usage(self):
host_pin = objects.NUMACell(
@ -3588,8 +3466,7 @@ class CPUPinningCellTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
inst_pin = hw._numa_fit_instance_cell(host_pin, inst_pin, limits)
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
self.assertNotIn('cpu_topology', inst_pin)
def test_get_pinning_host_siblings_instance_odd_fit(self):
host_pin = objects.NUMACell(
@ -3612,10 +3489,8 @@ class CPUPinningCellTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
)
inst_pin = hw._numa_fit_instance_cell(host_pin, inst_pin, limits)
self.assertNotIn('cpu_topology', inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=5, threads=1)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
def test_get_pinning_host_siblings_instance_fit_optimize_threads(self):
host_pin = objects.NUMACell(
@ -3638,10 +3513,8 @@ class CPUPinningCellTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
)
inst_pin = hw._numa_fit_instance_cell(host_pin, inst_pin, limits)
self.assertNotIn('cpu_topology', inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=3, threads=2)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
def test_get_pinning_host_siblings_instance_odd_fit_w_usage(self):
host_pin = objects.NUMACell(
@ -3664,10 +3537,8 @@ class CPUPinningCellTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
)
inst_pin = hw._numa_fit_instance_cell(host_pin, inst_pin, limits)
self.assertNotIn('cpu_topology', inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=3, threads=1)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
def test_get_pinning_host_siblings_instance_mixed_siblings(self):
host_pin = objects.NUMACell(
@ -3690,10 +3561,8 @@ class CPUPinningCellTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
)
inst_pin = hw._numa_fit_instance_cell(host_pin, inst_pin, limits)
self.assertNotIn('cpu_topology', inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=4, threads=1)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
def test_get_pinning_host_siblings_instance_odd_fit_orphan_only(self):
host_pin = objects.NUMACell(
@ -3716,10 +3585,8 @@ class CPUPinningCellTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
)
inst_pin = hw._numa_fit_instance_cell(host_pin, inst_pin, limits)
self.assertNotIn('cpu_topology', inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=4, threads=1)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
def test_get_pinning_host_siblings_large_instance_odd_fit(self):
host_pin = objects.NUMACell(
@ -3744,11 +3611,9 @@ class CPUPinningCellTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
)
inst_pin = hw._numa_fit_instance_cell(host_pin, inst_pin, limits)
self.assertNotIn('cpu_topology', inst_pin)
self.assertInstanceCellPinned(inst_pin)
self.assertPinningPreferThreads(inst_pin, host_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=5, threads=1)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
# TODO(stephenfin): Remove when we drop support for vcpu_pin_set
def test_get_pinning_isolate_policy_too_few_fully_free_cores(self):
@ -3775,7 +3640,7 @@ class CPUPinningCellTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
)
inst_pin = hw._numa_fit_instance_cell(host_pin, inst_pin, limits)
self.assertNotIn('cpu_topology', inst_pin)
self.assertIsNone(inst_pin)
# TODO(stephenfin): Remove when we drop support for vcpu_pin_set
@ -3830,10 +3695,8 @@ class CPUPinningCellTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
)
inst_pin = hw._numa_fit_instance_cell(host_pin, inst_pin, limits)
self.assertNotIn('cpu_topology', inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=1)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
# TODO(stephenfin): Remove when we drop support for vcpu_pin_set
def test_get_pinning_isolate_policy_fits_ht_host(self):
@ -3860,10 +3723,8 @@ class CPUPinningCellTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
)
inst_pin = hw._numa_fit_instance_cell(host_pin, inst_pin, limits)
self.assertNotIn('cpu_topology', inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=1)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
# TODO(stephenfin): Remove when we drop support for vcpu_pin_set
def test_get_pinning_isolate_policy_fits_w_usage(self):
@ -3890,10 +3751,8 @@ class CPUPinningCellTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
)
inst_pin = hw._numa_fit_instance_cell(host_pin, inst_pin, limits)
self.assertNotIn('cpu_topology', inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=1)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
# TODO(stephenfin): Remove when we drop support for vcpu_pin_set
@mock.patch.object(hw, 'LOG')

View File

@ -14,7 +14,6 @@
import collections
import itertools
import math
import re
import typing as ty
@ -504,44 +503,6 @@ def _get_possible_cpu_topologies(vcpus, maxtopology,
return possible
def _filter_for_numa_threads(possible, wantthreads):
"""Filter topologies which closest match to NUMA threads.
Determine which topologies provide the closest match to the number
of threads desired by the NUMA topology of the instance.
The possible topologies may not have any entries which match the
desired thread count. This method will find the topologies which
have the closest matching count. For example, if 'wantthreads' is 4
and the possible topologies has entries with 6, 3, 2 or 1 threads,
the topologies which have 3 threads will be identified as the
closest match not greater than 4 and will be returned.
:param possible: list of objects.VirtCPUTopology instances
:param wantthreads: desired number of threads
:returns: list of objects.VirtCPUTopology instances
"""
# First figure out the largest available thread
# count which is not greater than wantthreads
mostthreads = 0
for topology in possible:
if topology.threads > wantthreads:
continue
if topology.threads > mostthreads:
mostthreads = topology.threads
# Now restrict to just those topologies which
# match the largest thread count
bestthreads = []
for topology in possible:
if topology.threads != mostthreads:
continue
bestthreads.append(topology)
return bestthreads
def _sort_possible_cpu_topologies(possible, wanttopology):
"""Sort the topologies in order of preference.
@ -579,8 +540,7 @@ def _sort_possible_cpu_topologies(possible, wanttopology):
return desired
def _get_desirable_cpu_topologies(flavor, image_meta, allow_threads=True,
numa_topology=None):
def _get_desirable_cpu_topologies(flavor, image_meta, allow_threads=True):
"""Identify desirable CPU topologies based for given constraints.
Look at the properties set in the flavor extra specs and the image
@ -591,10 +551,6 @@ def _get_desirable_cpu_topologies(flavor, image_meta, allow_threads=True,
:param flavor: objects.Flavor instance to query extra specs from
:param image_meta: nova.objects.ImageMeta object instance
:param allow_threads: if the hypervisor supports CPU threads
:param numa_topology: objects.InstanceNUMATopology instance that
may contain additional topology constraints
(such as threading information) that should
be considered
:returns: sorted list of objects.VirtCPUTopology instances
"""
@ -612,36 +568,12 @@ def _get_desirable_cpu_topologies(flavor, image_meta, allow_threads=True,
maximum,
allow_threads)
LOG.debug("Possible topologies %s", possible)
if numa_topology:
min_requested_threads = None
cell_topologies = [cell.cpu_topology for cell in numa_topology.cells
if ('cpu_topology' in cell and cell.cpu_topology)]
if cell_topologies:
min_requested_threads = min(
topo.threads for topo in cell_topologies)
if min_requested_threads:
if preferred.threads:
min_requested_threads = min(preferred.threads,
min_requested_threads)
specified_threads = max(1, min_requested_threads)
LOG.debug("Filtering topologies best for %d threads",
specified_threads)
possible = _filter_for_numa_threads(possible,
specified_threads)
LOG.debug("Remaining possible topologies %s",
possible)
desired = _sort_possible_cpu_topologies(possible, preferred)
LOG.debug("Sorted desired topologies %s", desired)
return desired
def get_best_cpu_topology(flavor, image_meta, allow_threads=True,
numa_topology=None):
def get_best_cpu_topology(flavor, image_meta, allow_threads=True):
"""Identify best CPU topology for given constraints.
Look at the properties set in the flavor extra specs and the image
@ -651,15 +583,11 @@ def get_best_cpu_topology(flavor, image_meta, allow_threads=True,
:param flavor: objects.Flavor instance to query extra specs from
:param image_meta: nova.objects.ImageMeta object instance
:param allow_threads: if the hypervisor supports CPU threads
:param numa_topology: objects.InstanceNUMATopology instance that
may contain additional topology constraints
(such as threading information) that should
be considered
:returns: an objects.VirtCPUTopology instance for best topology
"""
return _get_desirable_cpu_topologies(flavor, image_meta,
allow_threads, numa_topology)[0]
return _get_desirable_cpu_topologies(
flavor, image_meta, allow_threads)[0]
def _numa_cell_supports_pagesize_request(host_cell, inst_cell):
@ -753,43 +681,6 @@ def _pack_instance_onto_cores(host_cell, instance_cell,
pinning = None
threads_no = 1
def _orphans(instance_cell, threads_per_core):
"""Number of instance CPUs which will not fill up a host core.
Best explained by an example: consider set of free host cores as such:
[(0, 1), (3, 5), (6, 7, 8)]
This would be a case of 2 threads_per_core AKA an entry for 2 in the
sibling_sets structure.
If we attempt to pack a 5 core instance on it - due to the fact that we
iterate the list in order, we will end up with a single core of the
instance pinned to a thread "alone" (with id 6), and we would have one
'orphan' vcpu.
"""
return len(instance_cell) % threads_per_core
def _threads(instance_cell, threads_per_core):
"""Threads to expose to the instance via the VirtCPUTopology.
This is calculated by taking the GCD of the number of threads we are
considering at the moment, and the number of orphans. An example for
instance_cell = 6
threads_per_core = 4
So we can fit the instance as such:
[(0, 1, 2, 3), (4, 5, 6, 7), (8, 9, 10, 11)]
x x x x x x
We can't expose 4 threads, as that will not be a valid topology (all
cores exposed to the guest have to have an equal number of threads),
and 1 would be too restrictive, but we want all threads that guest sees
to be on the same physical core, so we take GCD of 4 (max number of
threads) and 2 (number of 'orphan' CPUs) and get 2 as the number of
threads.
"""
return math.gcd(threads_per_core, _orphans(instance_cell,
threads_per_core))
def _get_pinning(threads_no, sibling_set, instance_cores):
"""Determines pCPUs/vCPUs mapping
@ -988,7 +879,6 @@ def _pack_instance_onto_cores(host_cell, instance_cell,
if (instance_cell.cpu_thread_policy !=
fields.CPUThreadAllocationPolicy.REQUIRE and
not pinning):
threads_no = 1
# we create a fake sibling set by splitting all sibling sets and
# treating each core as if it has no siblings. This is necessary
# because '_get_pinning' will normally only take the same amount of
@ -1005,23 +895,12 @@ def _pack_instance_onto_cores(host_cell, instance_cell,
cpuset_reserved = _get_reserved(
sibling_set, pinning, num_cpu_reserved=num_cpu_reserved)
threads_no = _threads(instance_cell, threads_no)
if not pinning or (num_cpu_reserved and not cpuset_reserved):
return
LOG.debug('Selected cores for pinning: %s, in cell %s', pinning,
host_cell.id)
# TODO(stephenfin): we're using this attribute essentially as a container
# for the thread count used in '_get_desirable_cpu_topologies'; we should
# drop it and either use a non-persistent attrbiute or add a new
# 'min_threads' field
topology = objects.VirtCPUTopology(
sockets=1,
cores=len(instance_cell) // threads_no,
threads=threads_no)
instance_cell.pin_vcpus(*pinning)
instance_cell.cpu_topology = topology
instance_cell.id = host_cell.id
instance_cell.cpuset_reserved = cpuset_reserved
return instance_cell

View File

@ -5119,8 +5119,7 @@ class LibvirtDriver(driver.ComputeDriver):
if cpu is None:
return None
topology = hardware.get_best_cpu_topology(
flavor, image_meta, numa_topology=instance_numa_topology)
topology = hardware.get_best_cpu_topology(flavor, image_meta)
cpu.sockets = topology.sockets
cpu.cores = topology.cores

View File

@ -0,0 +1,23 @@
---
fixes:
- |
The nova libvirt driver supports two independent features, virtual CPU
topologies and virtual NUMA topologies. Previously, when
``hw:cpu_max_sockets``, ``hw:cpu_max_cores`` and ``hw:cpu_max_threads``
were specified for pinned instances (``hw:cpu_policy=dedicated``)
without explicit ``hw:cpu_sockets``, ``hw:cpu_cores``, ``hw:cpu_threads``
extra specs or their image equivalent, nova failed to generate a valid
virtual CPU topology. This has now been fixed and it is now possible to
use max CPU constraints with pinned instances.
e.g. a combination of ``hw:numa_nodes=2``, ``hw:cpu_max_sockets=2``,
``hw:cpu_max_cores=2``, ``hw:cpu_max_threads=8`` and
``hw:cpu_policy=dedicated`` can now generate a valid topology using
a flavor with 8 vCPUs.
upgrade:
- |
As part of the fix for bug 1910466, code that attempted to optimize VM CPU
thread assignment based on the host CPU topology as it was determined
to be buggy, undocumented and rejected valid virtual CPU topologies while
also producing different behavior when CPU pinning was enabled vs disabled.
The optimization may be reintroduced in the future with a more generic
implementation that works for both pinned and unpinned VMs.