libvirt: set NUMA memory allocation policy for instances

When NUMA placement is in effect, Nova configures KVM guest vCPUs
to be placed on certain host NUMA nodes. It also provides guest
NUMA topology of vCPUs and RAM. The kernel will normally satisfy
memory allocations from the host NUMA node that the guest vCPU
is running on, but this is only a preference. Nova needs to set
a strict policy to guarantee guest RAM is always allocated from
the desired host NUMA node. This change ensures that by setting
the XML

  <numatune>
    <memory mode='strict' nodeset='0-1'/>
    <memnode cellid='0' mode='strict' nodeset='0'/>
    <memnode cellid='1' mode='strict' nodeset='1'/>
  </numatune>

Closes-bug: #1385308
Change-Id: I02c301ae648235dc48527559a5d3c3d45de859ac
This commit is contained in:
Daniel P. Berrange 2014-10-27 11:30:24 +00:00
parent d9d04933a4
commit d9276355d9
2 changed files with 40 additions and 9 deletions

View File

@ -1367,6 +1367,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
self.assertIsNone(cfg.cpuset)
self.assertIsNone(cfg.cputune)
self.assertIsNone(cfg.numatune)
self.assertIsNotNone(cfg.cpu.numa)
for instance_cell, numa_cfg_cell in zip(
instance_topology.cells, cfg.cpu.numa.cells):
@ -1432,6 +1433,16 @@ class LibvirtConnTestCase(test.NoDBTestCase):
self.assertEqual(instance_cell.memory * units.Ki,
numa_cfg_cell.memory)
allnodes = [cell.id for cell in instance_topology.cells]
self.assertEqual(allnodes, cfg.numatune.memory.nodeset)
self.assertEqual("strict", cfg.numatune.memory.mode)
for instance_cell, memnode in zip(
instance_topology.cells, cfg.numatune.memnodes):
self.assertEqual(instance_cell.id, memnode.cellid)
self.assertEqual([instance_cell.id], memnode.nodeset)
self.assertEqual("strict", memnode.mode)
@mock.patch.object(objects.Flavor, 'get_by_id')
def test_get_guest_config_clock(self, mock_flavor):
self.flags(virt_type='kvm', group='libvirt')

View File

@ -3707,12 +3707,12 @@ class LibvirtDriver(driver.ComputeDriver):
libvirt XML config object representing the NUMA topology selected
for the guest. Returns a tuple of:
(cpu_set, guest_cpu_tune, guest_cpu_numa)
(cpu_set, guest_cpu_tune, guest_cpu_numa, guest_numa_tune)
With the following caveats:
a) If there is no specified guest NUMA topology, then
guest_cpu_tune and guest_cpu_numa shall be None. cpu_set
all tuple elements except cpu_set shall be None. cpu_set
will be populated with the chosen CPUs that the guest
allowed CPUs fit within, which could be the supplied
allowed_cpus value if the host doesn't support NUMA
@ -3725,7 +3725,8 @@ class LibvirtDriver(driver.ComputeDriver):
will contain a LibvirtConfigGuestCPUTune object representing
the optimized chosen cells that match the host capabilities
with the instance's requested topology. If the host does
not support NUMA, then guest_cpu_tune will be None.
not support NUMA, then guest_cpu_tune and guest_numa_tune
will be None.
"""
topology = self._get_host_numa_topology()
# We have instance NUMA so translate it to the config class
@ -3749,29 +3750,46 @@ class LibvirtDriver(driver.ComputeDriver):
# TODO(ndipanov): Attempt to spread the instance accross
# NUMA nodes and expose the topology to the instance as an
# optimisation
return allowed_cpus, None, None
return allowed_cpus, None, None, None
else:
pin_cpuset = random.choice(viable_cells_cpus)
return pin_cpuset, None, None
return pin_cpuset, None, None, None
else:
# We have no NUMA topology in the host either
return allowed_cpus, None, None
return allowed_cpus, None, None, None
else:
if topology:
# Now get the CpuTune configuration from the numa_topology
guest_cpu_tune = vconfig.LibvirtConfigGuestCPUTune()
guest_numa_tune = vconfig.LibvirtConfigGuestNUMATune()
numa_mem = vconfig.LibvirtConfigGuestNUMATuneMemory()
numa_memnodes = []
for host_cell in topology.cells:
for guest_cell in guest_cpu_numa.cells:
if guest_cell.id == host_cell.id:
node = vconfig.LibvirtConfigGuestNUMATuneMemNode()
node.cellid = guest_cell.id
node.nodeset = [host_cell.id]
node.mode = "strict"
numa_memnodes.append(node)
numa_mem.nodeset.append(host_cell.id)
for cpu in guest_cell.cpus:
pin_cpuset = (
vconfig.LibvirtConfigGuestCPUTuneVCPUPin())
pin_cpuset.id = cpu
pin_cpuset.cpuset = host_cell.cpuset
guest_cpu_tune.vcpupin.append(pin_cpuset)
return None, guest_cpu_tune, guest_cpu_numa
guest_numa_tune.memory = numa_mem
guest_numa_tune.memnodes = numa_memnodes
return None, guest_cpu_tune, guest_cpu_numa, guest_numa_tune
else:
return allowed_cpus, None, guest_cpu_numa
return allowed_cpus, None, guest_cpu_numa, None
def _get_guest_os_type(self, virt_type):
"""Returns the guest OS type based on virt type."""
@ -3960,10 +3978,12 @@ class LibvirtDriver(driver.ComputeDriver):
guest.vcpus = flavor.vcpus
allowed_cpus = hardware.get_vcpu_pin_set()
cpuset, cputune, guest_cpu_numa = self._get_guest_numa_config(
cpuset, cputune, guest_cpu_numa, guest_numa_tune = \
self._get_guest_numa_config(
context, instance, flavor, allowed_cpus)
guest.cpuset = cpuset
guest.cputune = cputune
guest.numatune = guest_numa_tune
guest.metadata.append(self._get_guest_config_meta(context,
instance,