Merge "placement: implement get_inventory() for libvirt"

This commit is contained in:
Jenkins 2017-05-24 17:01:49 +00:00 committed by Gerrit Code Review
commit 3ce504de02
5 changed files with 248 additions and 4 deletions

View File

@ -34,11 +34,13 @@ from nova import exception
from nova.i18n import _, _LI, _LW
from nova import objects
from nova.objects import base as obj_base
from nova.objects import fields
from nova.objects import migration as migration_obj
from nova.pci import manager as pci_manager
from nova.pci import request as pci_request
from nova import rpc
from nova.scheduler import client as scheduler_client
from nova.scheduler.client import report
from nova import utils
from nova.virt import hardware
@ -77,6 +79,51 @@ def _is_trackable_migration(migration):
'evacuation')
def _normalize_inventory_from_cn_obj(inv_data, cn):
"""Helper function that injects various information from a compute node
object into the inventory dict returned from the virt driver's
get_inventory() method. This function allows us to marry information like
*_allocation_ratio and reserved memory amounts that are in the
compute_nodes DB table and that the virt driver doesn't know about with the
information the virt driver *does* know about.
Note that if the supplied inv_data contains allocation_ratio, reserved or
other fields, we DO NOT override the value with that of the compute node.
This is to ensure that the virt driver is the single source of truth
regarding inventory information. For instance, the Ironic virt driver will
always return a very specific inventory with allocation_ratios pinned to
1.0.
:param inv_data: Dict, keyed by resource class, of inventory information
returned from virt driver's get_inventory() method
:param compute_node: `objects.ComputeNode` describing the compute node
"""
if fields.ResourceClass.VCPU in inv_data:
cpu_inv = inv_data[fields.ResourceClass.VCPU]
if 'allocation_ratio' not in cpu_inv:
cpu_inv['allocation_ratio'] = cn.cpu_allocation_ratio
if 'reserved' not in cpu_inv:
cpu_inv['reserved'] = CONF.reserved_host_cpus
if fields.ResourceClass.MEMORY_MB in inv_data:
mem_inv = inv_data[fields.ResourceClass.MEMORY_MB]
if 'allocation_ratio' not in mem_inv:
mem_inv['allocation_ratio'] = cn.ram_allocation_ratio
if 'reserved' not in mem_inv:
mem_inv['reserved'] = CONF.reserved_host_memory_mb
if fields.ResourceClass.DISK_GB in inv_data:
disk_inv = inv_data[fields.ResourceClass.DISK_GB]
if 'allocation_ratio' not in disk_inv:
disk_inv['allocation_ratio'] = cn.disk_allocation_ratio
if 'reserved' not in disk_inv:
# TODO(johngarbutt) We should either move to reserved_host_disk_gb
# or start tracking DISK_MB.
reserved_mb = CONF.reserved_host_disk_mb
reserved_gb = report.convert_mb_to_ceil_gb(reserved_mb)
disk_inv['reserved'] = reserved_gb
class ResourceTracker(object):
"""Compute helper class for keeping track of resource usage as instances
are built and destroyed.
@ -756,6 +803,7 @@ class ResourceTracker(object):
# Persist the stats to the Scheduler
try:
inv_data = self.driver.get_inventory(nodename)
_normalize_inventory_from_cn_obj(inv_data, compute_node)
self.scheduler_client.set_inventory_for_provider(
compute_node.uuid,
compute_node.hypervisor_hostname,

View File

@ -86,7 +86,7 @@ def safe_connect(f):
return wrapper
def _convert_mb_to_ceil_gb(mb_value):
def convert_mb_to_ceil_gb(mb_value):
gb_int = 0
if mb_value:
gb_float = mb_value / 1024.0
@ -126,7 +126,7 @@ def _compute_node_to_inventory_dict(compute_node):
if compute_node.local_gb > 0:
# TODO(johngarbutt) We should either move to reserved_host_disk_gb
# or start tracking DISK_MB.
reserved_disk_gb = _convert_mb_to_ceil_gb(CONF.reserved_host_disk_mb)
reserved_disk_gb = convert_mb_to_ceil_gb(CONF.reserved_host_disk_mb)
result[DISK_GB] = {
'total': compute_node.local_gb,
'reserved': reserved_disk_gb,
@ -149,7 +149,7 @@ def _instance_to_allocations_dict(instance):
instance)
# TODO(johngarbutt) we have to round up swap MB to the next GB.
# It would be better to claim disk in MB, but that is hard now.
swap_in_gb = _convert_mb_to_ceil_gb(instance.flavor.swap)
swap_in_gb = convert_mb_to_ceil_gb(instance.flavor.swap)
disk = ((0 if is_bfv else instance.flavor.root_gb) +
swap_in_gb + instance.flavor.ephemeral_gb)
alloc_dict = {

View File

@ -1157,8 +1157,11 @@ class TestUpdateComputeNode(BaseTestCase):
save_mock.assert_called_once_with()
ucn_mock.assert_called_once_with(new_compute)
@mock.patch('nova.compute.resource_tracker.'
'_normalize_inventory_from_cn_obj')
@mock.patch('nova.objects.ComputeNode.save')
def test_existing_node_get_inventory_implemented(self, save_mock):
def test_existing_node_get_inventory_implemented(self, save_mock,
norm_mock):
"""The get_inventory() virt driver method is only implemented for some
virt drivers. This method returns inventory information for a
node/provider in a way that the placement API better understands, and
@ -1192,6 +1195,129 @@ class TestUpdateComputeNode(BaseTestCase):
self.assertFalse(ucn_mock.called)
class TestNormalizatInventoryFromComputeNode(test.NoDBTestCase):
def test_normalize_libvirt(self):
self.flags(reserved_host_disk_mb=100,
reserved_host_memory_mb=10,
reserved_host_cpus=1)
vcpus = 24
memory_mb = 1024
disk_gb = 200
cn = objects.ComputeNode(
mock.sentinel.ctx,
ram_allocation_ratio=1.5,
cpu_allocation_ratio=16.0,
disk_allocation_ratio=1.0,
)
# What we get back from libvirt driver, for instance, doesn't contain
# allocation_ratio or reserved amounts for some resources. Verify that
# the information on the compute node fills in this information...
inv = {
obj_fields.ResourceClass.VCPU: {
'total': vcpus,
'min_unit': 1,
'max_unit': vcpus,
'step_size': 1,
},
obj_fields.ResourceClass.MEMORY_MB: {
'total': memory_mb,
'min_unit': 1,
'max_unit': memory_mb,
'step_size': 1,
},
obj_fields.ResourceClass.DISK_GB: {
'total': disk_gb,
'min_unit': 1,
'max_unit': disk_gb,
'step_size': 1,
},
}
expected = {
obj_fields.ResourceClass.VCPU: {
'total': vcpus,
'reserved': 1,
'min_unit': 1,
'max_unit': vcpus,
'step_size': 1,
'allocation_ratio': 16.0,
},
obj_fields.ResourceClass.MEMORY_MB: {
'total': memory_mb,
'reserved': 10,
'min_unit': 1,
'max_unit': memory_mb,
'step_size': 1,
'allocation_ratio': 1.5,
},
obj_fields.ResourceClass.DISK_GB: {
'total': disk_gb,
'reserved': 1, # Rounded up from CONF.reserved_host_disk_mb
'min_unit': 1,
'max_unit': disk_gb,
'step_size': 1,
'allocation_ratio': 1.0,
},
}
self.assertNotEqual(expected, inv)
resource_tracker._normalize_inventory_from_cn_obj(inv, cn)
self.assertEqual(expected, inv)
def test_normalize_ironic(self):
"""Test that when normalizing the return from Ironic virt driver's
get_inventory() method, we don't overwrite the information that the
virt driver gave us.
"""
self.flags(reserved_host_disk_mb=100,
reserved_host_memory_mb=10,
reserved_host_cpus=1)
vcpus = 24
memory_mb = 1024
disk_gb = 200
# We will make sure that these field values do NOT override what the
# Ironic virt driver sets (which is, for example, that allocation
# ratios are all 1.0 for Ironic baremetal nodes)
cn = objects.ComputeNode(
mock.sentinel.ctx,
ram_allocation_ratio=1.5,
cpu_allocation_ratio=16.0,
disk_allocation_ratio=1.0,
)
# What we get back from Ironic driver contains fully-filled-out info
# blocks for VCPU, MEMORY_MB, DISK_GB and the custom resource class
# inventory items
inv = {
obj_fields.ResourceClass.VCPU: {
'total': vcpus,
'reserved': 0,
'min_unit': 1,
'max_unit': vcpus,
'step_size': 1,
'allocation_ratio': 1.0,
},
obj_fields.ResourceClass.MEMORY_MB: {
'total': memory_mb,
'reserved': 0,
'min_unit': 1,
'max_unit': memory_mb,
'step_size': 1,
'allocation_ratio': 1.0,
},
obj_fields.ResourceClass.DISK_GB: {
'total': disk_gb,
'reserved': 0,
'min_unit': 1,
'max_unit': disk_gb,
'step_size': 1,
'allocation_ratio': 1.0,
},
}
# We are expecting that NOTHING changes after calling the normalization
# function
expected = copy.deepcopy(inv)
resource_tracker._normalize_inventory_from_cn_obj(inv, cn)
self.assertEqual(expected, inv)
class TestInstanceClaim(BaseTestCase):
def setUp(self):

View File

@ -15535,6 +15535,43 @@ class HostStateTestCase(test.NoDBTestCase):
HostStateTestCase.numa_topology._to_dict()))
class TestGetInventory(test.NoDBTestCase):
def setUp(self):
super(TestGetInventory, self).setUp()
self.useFixture(fakelibvirt.FakeLibvirtFixture())
self.driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_local_gb_info',
return_value={'total': 200})
@mock.patch('nova.virt.libvirt.host.Host.get_memory_mb_total',
return_value=1024)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_vcpu_total',
return_value=24)
def test_get_inventory(self, mock_vcpu, mock_mem, mock_disk):
expected_inv = {
fields.ResourceClass.VCPU: {
'total': 24,
'min_unit': 1,
'max_unit': 24,
'step_size': 1,
},
fields.ResourceClass.MEMORY_MB: {
'total': 1024,
'min_unit': 1,
'max_unit': 1024,
'step_size': 1,
},
fields.ResourceClass.DISK_GB: {
'total': 200,
'min_unit': 1,
'max_unit': 200,
'step_size': 1,
},
}
inv = self.driver.get_inventory(mock.sentinel.nodename)
self.assertEqual(expected_inv, inv)
class LibvirtDriverTestCase(test.NoDBTestCase):
"""Test for nova.virt.libvirt.libvirt_driver.LibvirtDriver."""
def setUp(self):

View File

@ -5608,6 +5608,39 @@ class LibvirtDriver(driver.ComputeDriver):
def refresh_instance_security_rules(self, instance):
self.firewall_driver.refresh_instance_security_rules(instance)
def get_inventory(self, nodename):
"""Return a dict, keyed by resource class, of inventory information for
the supplied node.
"""
disk_gb = int(self._get_local_gb_info()['total'])
memory_mb = int(self._host.get_memory_mb_total())
vcpus = self._get_vcpu_total()
# NOTE(jaypipes): We leave some fields like allocation_ratio and
# reserved out of the returned dicts here because, for now at least,
# the RT injects those values into the inventory dict based on the
# compute_nodes record values.
result = {
fields.ResourceClass.VCPU: {
'total': vcpus,
'min_unit': 1,
'max_unit': vcpus,
'step_size': 1,
},
fields.ResourceClass.MEMORY_MB: {
'total': memory_mb,
'min_unit': 1,
'max_unit': memory_mb,
'step_size': 1,
},
fields.ResourceClass.DISK_GB: {
'total': disk_gb,
'min_unit': 1,
'max_unit': disk_gb,
'step_size': 1,
},
}
return result
def get_available_resource(self, nodename):
"""Retrieve resource information.