Mark sibling CPUs as 'used' for cpu_thread_policy = 'isolated'
'isolated' CPU allocation thread policy is guarantee that no vCPUs from other guests wouldn't be able to be placed on the cores of booted VM (In this case core is a set of sibling vCPUs). But we still able to boot VMs with 'dedicated' CPU allocation policy on these cores. This problem is actual for hosts without HyperThreading. In this case sets of siblings vCPUs are empty for each core but we are still trying to work with them as with HyperThreading cores. This causes the problem when one "isolated" core is used by several VMs. To fix it we must use method unpin_cpus_with_siblings() only if NUMA cell has siblings (i.e. has HyperThreading). For cells without HyperThreading CPU isolation is guaranteed by 'dedicated' CPU allocation policy. Closes-Bug: #1635674 Change-Id: I8f72187153c930cd941b7ee7e835a20ed0c0de03
This commit is contained in:
parent
90f01d5ebb
commit
9f12b592d1
nova
@ -96,6 +96,12 @@ class NUMACell(base.NovaObject):
|
||||
self.pinned_cpus -= cpus
|
||||
|
||||
def pin_cpus_with_siblings(self, cpus):
|
||||
# NOTE(snikitin): Empty siblings list means that HyperThreading is
|
||||
# disabled on the NUMA cell and we must pin CPUs like normal CPUs.
|
||||
if not self.siblings:
|
||||
self.pin_cpus(cpus)
|
||||
return
|
||||
|
||||
pin_siblings = set()
|
||||
for sib in self.siblings:
|
||||
if cpus & sib:
|
||||
@ -103,6 +109,12 @@ class NUMACell(base.NovaObject):
|
||||
self.pin_cpus(pin_siblings)
|
||||
|
||||
def unpin_cpus_with_siblings(self, cpus):
|
||||
# NOTE(snikitin): Empty siblings list means that HyperThreading is
|
||||
# disabled on the NUMA cell and we must unpin CPUs like normal CPUs.
|
||||
if not self.siblings:
|
||||
self.unpin_cpus(cpus)
|
||||
return
|
||||
|
||||
pin_siblings = set()
|
||||
for sib in self.siblings:
|
||||
if cpus & sib:
|
||||
|
@ -124,6 +124,25 @@ class _TestNUMA(object):
|
||||
numacell.unpin_cpus_with_siblings(set([4]))
|
||||
self.assertEqual(set([1, 2, 3, 4]), numacell.free_cpus)
|
||||
|
||||
def test_pinning_with_siblings_with_empty_siblings_list(self):
|
||||
numacell = objects.NUMACell(id=0, cpuset=set([1, 2, 3, 4]), memory=512,
|
||||
cpu_usage=0, memory_usage=256,
|
||||
pinned_cpus=set([]),
|
||||
siblings=[],
|
||||
mempages=[])
|
||||
|
||||
numacell.pin_cpus_with_siblings(set([1, 2]))
|
||||
self.assertEqual(set([1, 2]), numacell.pinned_cpus)
|
||||
numacell.unpin_cpus_with_siblings(set([1]))
|
||||
self.assertEqual(set([2]), numacell.pinned_cpus)
|
||||
self.assertRaises(exception.CPUUnpinningInvalid,
|
||||
numacell.unpin_cpus_with_siblings,
|
||||
set([1]))
|
||||
self.assertRaises(exception.CPUPinningInvalid,
|
||||
numacell.pin_cpus_with_siblings,
|
||||
set([2]))
|
||||
self.assertEqual(set([2]), numacell.pinned_cpus)
|
||||
|
||||
def test_pages_topology_wipe(self):
|
||||
pages_topology = objects.NUMAPagesTopology(
|
||||
size_kb=2048, total=1024, used=512)
|
||||
|
@ -2765,6 +2765,48 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
|
||||
self.assertEqual(set([]), new_cell.cells[0].pinned_cpus)
|
||||
self.assertEqual(new_cell.cells[0].cpu_usage, 0)
|
||||
|
||||
def test_host_usage_from_instances_isolated_without_siblings(self):
|
||||
host_pin = objects.NUMATopology(
|
||||
cells=[objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
|
||||
memory=4096, cpu_usage=0,
|
||||
memory_usage=0,
|
||||
siblings=[],
|
||||
mempages=[], pinned_cpus=set([]))])
|
||||
inst_pin = objects.InstanceNUMATopology(
|
||||
cells=[objects.InstanceNUMACell(
|
||||
cpuset=set([0, 1, 2]), memory=2048, id=0,
|
||||
cpu_pinning={0: 0, 1: 1, 2: 2},
|
||||
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
|
||||
cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE
|
||||
)])
|
||||
|
||||
new_cell = hw.numa_usage_from_instances(host_pin, [inst_pin])
|
||||
self.assertEqual(inst_pin.cells[0].cpuset,
|
||||
new_cell.cells[0].pinned_cpus)
|
||||
self.assertEqual(new_cell.cells[0].cpu_usage, 3)
|
||||
|
||||
def test_host_usage_from_instances_isolated_without_siblings_free(self):
|
||||
host_pin = objects.NUMATopology(
|
||||
cells=[objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
|
||||
memory=4096, cpu_usage=4,
|
||||
memory_usage=0,
|
||||
siblings=[],
|
||||
mempages=[],
|
||||
pinned_cpus=set([0, 1, 2, 3]))])
|
||||
inst_pin = objects.InstanceNUMATopology(
|
||||
cells=[objects.InstanceNUMACell(
|
||||
cpuset=set([0, 1, 3]), memory=2048, id=0,
|
||||
cpu_pinning={0: 0, 1: 1, 2: 2},
|
||||
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
|
||||
cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE
|
||||
)])
|
||||
|
||||
new_cell = hw.numa_usage_from_instances(host_pin,
|
||||
[inst_pin],
|
||||
free=True)
|
||||
self.assertEqual(set([3]), new_cell.cells[0].pinned_cpus)
|
||||
self.assertEqual(new_cell.cells[0].cpu_usage, 1)
|
||||
|
||||
|
||||
class CPURealtimeTestCase(test.NoDBTestCase):
|
||||
def test_success_flavor(self):
|
||||
|
Loading…
x
Reference in New Issue
Block a user