PowerVM: update_provider_tree() (compatible)

Implement the update_provider_tree ComputeDriver method in the
PowerVMDriver.

In this patch, we *just* implement it to be backward compatible with how
the resource tracker populates the compute node resource provider
inventory based on get_available_resource. Future patches will bring
sanity to such values as allocation_ratio, min_unit, and reserved.
Future patches (as part of larger efforts) will also handle providers
and/or inventories for SR-IOV, devices, etc.

Change-Id: I78ee21165e6654c1f0b3725632f9acae2f437888
This commit is contained in:
Eric Fried 2018-08-14 16:42:48 -05:00
parent 9b518d8c99
commit dcc2bfa1c1
2 changed files with 167 additions and 5 deletions

View File

@ -16,12 +16,13 @@
from __future__ import absolute_import
import collections
import fixtures
import contextlib
import logging
import mock
from oslo_serialization import jsonutils
import fixtures
import mock
from nova import block_device as nova_block_device
from nova.compute import provider_tree
from nova.compute import task_states
from nova import conf as cfg
from nova import exception as exc
@ -29,10 +30,13 @@ from nova import objects
from nova.objects import base as obj_base
from nova.objects import block_device as bdmobj
from nova import test
from nova.tests import uuidsentinel as uuids
from nova.virt import block_device as nova_virt_bdm
from nova.virt import driver as virt_driver
from nova.virt import fake
from nova.virt import hardware
from nova.virt.powervm_ext import driver
from oslo_serialization import jsonutils
from pypowervm import adapter as pvm_adp
from pypowervm import const as pvm_const
from pypowervm import exceptions as pvm_exc
@ -42,8 +46,6 @@ from pypowervm.utils import transaction as pvm_tx
from pypowervm.wrappers import logical_partition as pvm_lpar
from pypowervm.wrappers import virtual_io_server as pvm_vios
from nova.virt.powervm_ext import driver
from nova_powervm.tests.virt import powervm
from nova_powervm.tests.virt.powervm import fixtures as fx
from nova_powervm.virt.powervm import exception as p_exc
@ -1593,6 +1595,106 @@ class TestPowerVMDriver(test.NoDBTestCase):
value = stats.get(fld, None)
self.assertIsNotNone(value)
@contextlib.contextmanager
def _update_provider_tree(self):
"""Host resource dict gets converted properly to provider tree inv."""
with mock.patch('nova_powervm.virt.powervm.host.'
'build_host_resource_from_ms') as mock_bhrfm:
mock_bhrfm.return_value = {
'vcpus': 8,
'memory_mb': 2048,
}
self.drv.host_wrapper = 'host_wrapper'
# Validate that this gets converted to int with floor
self.drv.disk_dvr = mock.Mock(capacity=2091.8)
exp_inv = {
'VCPU': {
'total': 8,
'max_unit': 8,
'allocation_ratio': 16.0,
'reserved': 0,
},
'MEMORY_MB': {
'total': 2048,
'max_unit': 2048,
'allocation_ratio': 1.5,
'reserved': 512,
},
'DISK_GB': {
'total': 2091,
'max_unit': 2091,
'allocation_ratio': 1.0,
'reserved': 0,
},
}
ptree = provider_tree.ProviderTree()
ptree.new_root('compute_host', uuids.cn)
# Let the caller muck with these
yield ptree, exp_inv
self.drv.update_provider_tree(ptree, 'compute_host')
self.assertEqual(exp_inv, ptree.data('compute_host').inventory)
mock_bhrfm.assert_called_once_with('host_wrapper')
def test_update_provider_tree(self):
# Basic: no inventory already on the provider, no extra providers, no
# aggregates or traits.
with self._update_provider_tree():
pass
def test_update_provider_tree_conf_overrides(self):
# Non-default CONF values for allocation ratios and reserved.
self.flags(cpu_allocation_ratio=12.3,
reserved_host_cpus=4,
ram_allocation_ratio=4.5,
reserved_host_memory_mb=32,
disk_allocation_ratio=6.7,
# This gets int(ceil)'d
reserved_host_disk_mb=5432.1)
with self._update_provider_tree() as (_, exp_inv):
exp_inv['VCPU']['allocation_ratio'] = 12.3
exp_inv['VCPU']['reserved'] = 4
exp_inv['MEMORY_MB']['allocation_ratio'] = 4.5
exp_inv['MEMORY_MB']['reserved'] = 32
exp_inv['DISK_GB']['allocation_ratio'] = 6.7
exp_inv['DISK_GB']['reserved'] = 6
def test_update_provider_tree_complex_ptree(self):
# Overrides inventory already on the provider; leaves other providers
# and aggregates/traits alone.
with self._update_provider_tree() as (ptree, _):
ptree.update_inventory('compute_host', {
# these should get blown away
'VCPU': {
'total': 16,
'max_unit': 2,
'allocation_ratio': 1.0,
'reserved': 10,
},
'CUSTOM_BOGUS': {
'total': 1234,
}
})
ptree.update_aggregates('compute_host',
[uuids.ss_agg, uuids.other_agg])
ptree.update_traits('compute_host', ['CUSTOM_FOO', 'CUSTOM_BAR'])
ptree.new_root('ssp', uuids.ssp)
ptree.update_inventory('ssp', {'sentinel': 'inventory',
'for': 'ssp'})
ptree.update_aggregates('ssp', [uuids.ss_agg])
ptree.new_child('sriov', 'compute_host', uuid=uuids.sriov)
# Make sure the compute's agg and traits were left alone
cndata = ptree.data('compute_host')
self.assertEqual(set([uuids.ss_agg, uuids.other_agg]),
cndata.aggregates)
self.assertEqual(set(['CUSTOM_FOO', 'CUSTOM_BAR']), cndata.traits)
# And the other providers were left alone
self.assertEqual(set([uuids.cn, uuids.ssp, uuids.sriov]),
set(ptree.get_provider_uuids()))
# ...including the ssp's aggregates
self.assertEqual(set([uuids.ss_agg]), ptree.data('ssp').aggregates)
@mock.patch('nova_powervm.virt.powervm.vif.plug_secure_rmc_vif')
@mock.patch('nova_powervm.virt.powervm.vif.get_secure_rmc_vswitch')
@mock.patch('nova_powervm.virt.powervm.vif.plug')

View File

@ -23,6 +23,7 @@ from nova import context as ctx
from nova import exception
from nova import image
from nova import objects
from nova import rc_fields
from nova import utils as n_utils
from nova.virt import configdrive
from nova.virt import driver
@ -974,6 +975,9 @@ class PowerVMDriver(driver.ComputeDriver):
"""
# Do this here so it refreshes each time this method is called.
self.host_wrapper = pvm_ms.System.get(self.adapter)[0]
return self._get_available_resource()
def _get_available_resource(self):
# Get host information
data = pvm_host.build_host_resource_from_ms(self.host_wrapper)
@ -983,6 +987,62 @@ class PowerVMDriver(driver.ComputeDriver):
return data
def update_provider_tree(self, provider_tree, nodename):
"""Update a ProviderTree with current provider and inventory data.
:param nova.compute.provider_tree.ProviderTree provider_tree:
A nova.compute.provider_tree.ProviderTree object representing all
the providers in the tree associated with the compute node, and any
sharing providers (those with the ``MISC_SHARES_VIA_AGGREGATE``
trait) associated via aggregate with any of those providers (but
not *their* tree- or aggregate-associated providers), as currently
known by placement.
:param nodename:
String name of the compute node (i.e.
ComputeNode.hypervisor_hostname) for which the caller is requesting
updated provider information.
"""
# Get (legacy) resource information. Same as get_available_resource,
# but we don't need to refresh self.host_wrapper as it was *just*
# refreshed by get_available_resource in the resource tracker's
# update_available_resource flow.
data = self._get_available_resource()
# TODO(efried): Fix these to reflect something like reality
# For now, duplicate the logic the resource tracker uses via
# update_compute_node when get_inventory/update_provider_tree is not
# implemented.
cpu_alloc_ratio = CONF.cpu_allocation_ratio or 16.0
cpu_reserved = CONF.reserved_host_cpus
mem_alloc_ratio = CONF.ram_allocation_ratio or 1.5
mem_reserved = CONF.reserved_host_memory_mb
disk_alloc_ratio = CONF.disk_allocation_ratio or 1.0
disk_reserved = compute_utils.convert_mb_to_ceil_gb(
CONF.reserved_host_disk_mb)
inventory = {
rc_fields.ResourceClass.VCPU: {
'total': data['vcpus'],
'max_unit': data['vcpus'],
'allocation_ratio': cpu_alloc_ratio,
'reserved': cpu_reserved,
},
rc_fields.ResourceClass.MEMORY_MB: {
'total': data['memory_mb'],
'max_unit': data['memory_mb'],
'allocation_ratio': mem_alloc_ratio,
'reserved': mem_reserved,
},
rc_fields.ResourceClass.DISK_GB: {
# TODO(efried): Proper DISK_GB sharing when SSP driver in play
'total': int(data['local_gb']),
'max_unit': int(data['local_gb']),
'allocation_ratio': disk_alloc_ratio,
'reserved': disk_reserved,
},
}
provider_tree.update_inventory(nodename, inventory)
def get_host_uptime(self):
"""Returns the result of calling "uptime" on the target host."""
# trivial implementation from libvirt/driver.py for consistency