Allow linear packing of cores

Given the following single-socket, four-core, HT-enabled CPU topology:

   +---+---+ +---+---+ +---+---+ +---+---+
   | x | x | | x |   | | x |   | |   |   |
   +---+---+ +---+---+ +---+---+ +---+---+
     1   4     2   5     3   6     4   7

Attempting to boot an instance with four cores and no explicit
'cpu_thread_policy' should be successful, with cores 5,6,4,7 used.
However, the current implementation of this implicit policy attempts to
fit the same number of instance cores onto each host CPU. For example,
a four core instance would result in either a 2*2 layout (two instance
cores on each of two host CPUs), or a 1*4 layout (one instance core on
each of four host CPUs). This may be correct behavior *where possible*,
but if this is not possible then any and all cores should be used.

Resolve this issue by adding a fallthrough case, whereby if the
standard fitting policy fails, a linear assignment is used to properly
fit the instance cores.

Change-Id: I73f7f771b7514060f1f74066e3dea1da8fe74c21
Closes-Bug: #1578155
(cherry picked from commit 8361d8d6c3)
This commit is contained in:
Stephen Finucane 2016-07-19 14:01:53 -07:00 committed by Stephen Finucane
parent 0cb20bb014
commit 2dcf8c22f8
2 changed files with 24 additions and 2 deletions

View File

@ -2397,6 +2397,19 @@ class CPUPinningCellTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
got_topo = objects.VirtCPUTopology(sockets=1, cores=3, threads=1)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
def test_get_pinning_host_siblings_instance_mixed_siblings(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]),
memory=4096, memory_usage=0,
siblings=[set([0, 1]), set([2, 3]),
set([4, 5]), set([6, 7])],
mempages=[], pinned_cpus=set([0, 1, 2, 5]))
inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3]),
memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=4, threads=1)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
def test_get_pinning_host_siblings_instance_odd_fit_orphan_only(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]),
memory=4096, memory_usage=0,

View File

@ -752,7 +752,7 @@ def _pack_instance_onto_cores(available_siblings,
len(instance_cell))
elif (instance_cell.cpu_thread_policy ==
fields.CPUThreadAllocationPolicy.PREFER):
LOG.debug("Request 'prefer' thread policy for %d cores",
LOG.debug("Requested 'prefer' thread policy for %d cores",
len(instance_cell))
elif (instance_cell.cpu_thread_policy ==
fields.CPUThreadAllocationPolicy.ISOLATE):
@ -774,7 +774,7 @@ def _pack_instance_onto_cores(available_siblings,
pinning = _get_pinning(1, # we only want to "use" one thread per core
sibling_sets[threads_per_core],
instance_cell.cpuset)
else:
else: # REQUIRE, PREFER (explicit, implicit)
# NOTE(ndipanov): We iterate over the sibling sets in descending order
# of cores that can be packed. This is an attempt to evenly distribute
# instances among physical cores
@ -795,6 +795,15 @@ def _pack_instance_onto_cores(available_siblings,
if pinning:
break
# NOTE(sfinucan): If siblings weren't available and we're using PREFER
# (implicitly or explicitly), fall back to linear assignment across
# cores
if (instance_cell.cpu_thread_policy !=
fields.CPUThreadAllocationPolicy.REQUIRE and
not pinning):
pinning = zip(sorted(instance_cell.cpuset),
itertools.chain(*sibling_set))
threads_no = _threads(instance_cell, threads_no)
if not pinning: