virt/hardware: Fix 'isolate' case on non-SMT hosts

The 'isolate' policy is supposed to function on both hosts with an
SMT architecture (e.g. HyperThreading) and those without. The former
is true, but the latter is broken due to a an underlying implementation
detail in how vCPUs are "packed" onto pCPUs.

The '_pack_instance_onto_cores' function expects to work with a list of
sibling sets. Since non-SMT hosts don't have siblings, the function is
being given a list of all cores as one big sibling set. However, this
conflicts with the idea that, in the 'isolate' case, only one sibling
from each sibling set should be used. Using one sibling from the one
available sibling set means it is not possible to schedule instances
with more than one vCPU.

Resolve this mismatch by instead providing the function with a list of
multiple sibling sets, each containing a single core.

This also resolves another bug. When booting instances on a non-HT
host, the resulting NUMA topology should not define threads. By
correctly considering the cores on these systems as non-siblings,
the resulting instance topology will contain multiple cores with only
a single thread in each.

Change-Id: I2153f25fdb6382ada8e62fddf4215d9a0e3a6aa7
Closes-bug: #1550317
Closes-bug: #1417723
This commit is contained in:
Stephen Finucane 2016-02-26 13:07:56 +00:00
parent 52b9b1408a
commit 0b2e34f925
2 changed files with 19 additions and 3 deletions

View File

@ -2031,7 +2031,7 @@ class CPUPinningCellTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=1, threads=3)
got_topo = objects.VirtCPUTopology(sockets=1, cores=3, threads=1)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
got_pinning = {x: x for x in range(0, 3)}
self.assertEqual(got_pinning, inst_pin.cpu_pinning)
@ -2057,7 +2057,7 @@ class CPUPinningCellTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=1, threads=4)
got_topo = objects.VirtCPUTopology(sockets=1, cores=4, threads=1)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
got_pinning = {x: x for x in range(0, 4)}
self.assertEqual(got_pinning, inst_pin.cpu_pinning)
@ -2287,6 +2287,21 @@ class CPUPinningCellTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
self.assertIsNone(inst_pin)
def test_get_pinning_isolate_policy_fits(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=4096, memory_usage=0,
siblings=[],
mempages=[], pinned_cpus=set([]))
inst_pin = objects.InstanceNUMACell(
cpuset=set([0, 1]),
memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=1)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
def test_get_pinning_isolate_policy_fits_ht_host(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=4096, memory_usage=0,
siblings=[set([0, 1]), set([2, 3])],

View File

@ -831,8 +831,9 @@ def _numa_fit_instance_cell_with_pinning(host_cell, instance_cell):
else:
# Straightforward to pin to available cpus when there is no
# hyperthreading on the host
free_cpus = [set([cpu]) for cpu in host_cell.free_cpus]
return _pack_instance_onto_cores(
[host_cell.free_cpus], instance_cell, host_cell.id)
free_cpus, instance_cell, host_cell.id)
def _numa_fit_instance_cell(host_cell, instance_cell, limit_cell=None):