Move compute_node_to_inventory_dict to test-only code
Since [1] the only thing still using this utility method is some functional report client test code so this change moves it to the test class that needs it. [1] Ib62ac0b692eb92a2ed364ec9f486ded05def39ad Change-Id: I016765112b4d7a811a855da5e503a8cb870afbbe
This commit is contained in:
parent
112999e1dd
commit
cea4f391f3
|
@ -22,7 +22,6 @@ import math
|
|||
import traceback
|
||||
|
||||
import netifaces
|
||||
import os_resource_classes as orc
|
||||
from oslo_log import log
|
||||
from oslo_serialization import jsonutils
|
||||
import six
|
||||
|
@ -1461,49 +1460,3 @@ def notify_about_instance_delete(notifier, context, instance,
|
|||
source=source,
|
||||
action=delete_type,
|
||||
phase=fields.NotificationPhase.END)
|
||||
|
||||
|
||||
# TODO(mriedem): We should be able to remove this now that the ResourceTracker
|
||||
# requires drivers to implement the update_provider_tree interface.
|
||||
def compute_node_to_inventory_dict(compute_node):
|
||||
"""Given a supplied `objects.ComputeNode` object, return a dict, keyed
|
||||
by resource class, of various inventory information.
|
||||
|
||||
:param compute_node: `objects.ComputeNode` object to translate
|
||||
"""
|
||||
result = {}
|
||||
|
||||
# NOTE(jaypipes): Ironic virt driver will return 0 values for vcpus,
|
||||
# memory_mb and disk_gb if the Ironic node is not available/operable
|
||||
if compute_node.vcpus > 0:
|
||||
result[orc.VCPU] = {
|
||||
'total': compute_node.vcpus,
|
||||
'reserved': CONF.reserved_host_cpus,
|
||||
'min_unit': 1,
|
||||
'max_unit': compute_node.vcpus,
|
||||
'step_size': 1,
|
||||
'allocation_ratio': compute_node.cpu_allocation_ratio,
|
||||
}
|
||||
if compute_node.memory_mb > 0:
|
||||
result[orc.MEMORY_MB] = {
|
||||
'total': compute_node.memory_mb,
|
||||
'reserved': CONF.reserved_host_memory_mb,
|
||||
'min_unit': 1,
|
||||
'max_unit': compute_node.memory_mb,
|
||||
'step_size': 1,
|
||||
'allocation_ratio': compute_node.ram_allocation_ratio,
|
||||
}
|
||||
if compute_node.local_gb > 0:
|
||||
# TODO(johngarbutt) We should either move to reserved_host_disk_gb
|
||||
# or start tracking DISK_MB.
|
||||
reserved_disk_gb = convert_mb_to_ceil_gb(
|
||||
CONF.reserved_host_disk_mb)
|
||||
result[orc.DISK_GB] = {
|
||||
'total': compute_node.local_gb,
|
||||
'reserved': reserved_disk_gb,
|
||||
'min_unit': 1,
|
||||
'max_unit': compute_node.local_gb,
|
||||
'step_size': 1,
|
||||
'allocation_ratio': compute_node.disk_allocation_ratio,
|
||||
}
|
||||
return result
|
||||
|
|
|
@ -177,6 +177,39 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase):
|
|||
# TODO(efried): Rip this out and just use `as client` throughout.
|
||||
self.client = client
|
||||
|
||||
def compute_node_to_inventory_dict(self):
|
||||
result = {}
|
||||
if self.compute_node.vcpus > 0:
|
||||
result[orc.VCPU] = {
|
||||
'total': self.compute_node.vcpus,
|
||||
'reserved': CONF.reserved_host_cpus,
|
||||
'min_unit': 1,
|
||||
'max_unit': self.compute_node.vcpus,
|
||||
'step_size': 1,
|
||||
'allocation_ratio': self.compute_node.cpu_allocation_ratio,
|
||||
}
|
||||
if self.compute_node.memory_mb > 0:
|
||||
result[orc.MEMORY_MB] = {
|
||||
'total': self.compute_node.memory_mb,
|
||||
'reserved': CONF.reserved_host_memory_mb,
|
||||
'min_unit': 1,
|
||||
'max_unit': self.compute_node.memory_mb,
|
||||
'step_size': 1,
|
||||
'allocation_ratio': self.compute_node.ram_allocation_ratio,
|
||||
}
|
||||
if self.compute_node.local_gb > 0:
|
||||
reserved_disk_gb = compute_utils.convert_mb_to_ceil_gb(
|
||||
CONF.reserved_host_disk_mb)
|
||||
result[orc.DISK_GB] = {
|
||||
'total': self.compute_node.local_gb,
|
||||
'reserved': reserved_disk_gb,
|
||||
'min_unit': 1,
|
||||
'max_unit': self.compute_node.local_gb,
|
||||
'step_size': 1,
|
||||
'allocation_ratio': self.compute_node.disk_allocation_ratio,
|
||||
}
|
||||
return result
|
||||
|
||||
def test_client_report_smoke(self):
|
||||
"""Check things go as expected when doing the right things."""
|
||||
# TODO(cdent): We should probably also have a test that
|
||||
|
@ -202,8 +235,7 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase):
|
|||
self.context, self.compute_uuid, name=self.compute_name)
|
||||
self.client.set_inventory_for_provider(
|
||||
self.context, self.compute_uuid,
|
||||
compute_utils.compute_node_to_inventory_dict(
|
||||
self.compute_node))
|
||||
self.compute_node_to_inventory_dict())
|
||||
|
||||
# So now we have a resource provider
|
||||
rp = self.client._get_resource_provider(self.context,
|
||||
|
@ -289,8 +321,7 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase):
|
|||
self.compute_node.local_gb = 0
|
||||
self.client.set_inventory_for_provider(
|
||||
self.context, self.compute_uuid,
|
||||
compute_utils.compute_node_to_inventory_dict(
|
||||
self.compute_node))
|
||||
self.compute_node_to_inventory_dict())
|
||||
|
||||
# Check there's no more inventory records
|
||||
resp = self.client.get(inventory_url)
|
||||
|
@ -349,8 +380,7 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase):
|
|||
self.context, self.compute_uuid, name=self.compute_name)
|
||||
self.client.set_inventory_for_provider(
|
||||
self.context, self.compute_uuid,
|
||||
compute_utils.compute_node_to_inventory_dict(
|
||||
self.compute_node))
|
||||
self.compute_node_to_inventory_dict())
|
||||
# The compute node is associated with two of the shared storages
|
||||
self.client.set_aggregates_for_provider(
|
||||
self.context, self.compute_uuid,
|
||||
|
|
|
@ -1486,65 +1486,3 @@ class IsVolumeBackedInstanceTestCase(test.TestCase):
|
|||
self.assertFalse(
|
||||
compute_utils.is_volume_backed_instance(ctxt, instance, None))
|
||||
mock_bdms.assert_called_with(ctxt, instance.uuid)
|
||||
|
||||
|
||||
class TestComputeNodeToInventoryDict(test.NoDBTestCase):
|
||||
def test_compute_node_inventory(self):
|
||||
uuid = uuids.compute_node
|
||||
name = 'computehost'
|
||||
compute_node = objects.ComputeNode(uuid=uuid,
|
||||
hypervisor_hostname=name,
|
||||
vcpus=2,
|
||||
cpu_allocation_ratio=16.0,
|
||||
memory_mb=1024,
|
||||
ram_allocation_ratio=1.5,
|
||||
local_gb=10,
|
||||
disk_allocation_ratio=1.0)
|
||||
|
||||
self.flags(reserved_host_memory_mb=1000)
|
||||
self.flags(reserved_host_disk_mb=200)
|
||||
self.flags(reserved_host_cpus=1)
|
||||
|
||||
result = compute_utils.compute_node_to_inventory_dict(compute_node)
|
||||
|
||||
expected = {
|
||||
'VCPU': {
|
||||
'total': compute_node.vcpus,
|
||||
'reserved': CONF.reserved_host_cpus,
|
||||
'min_unit': 1,
|
||||
'max_unit': compute_node.vcpus,
|
||||
'step_size': 1,
|
||||
'allocation_ratio': compute_node.cpu_allocation_ratio,
|
||||
},
|
||||
'MEMORY_MB': {
|
||||
'total': compute_node.memory_mb,
|
||||
'reserved': CONF.reserved_host_memory_mb,
|
||||
'min_unit': 1,
|
||||
'max_unit': compute_node.memory_mb,
|
||||
'step_size': 1,
|
||||
'allocation_ratio': compute_node.ram_allocation_ratio,
|
||||
},
|
||||
'DISK_GB': {
|
||||
'total': compute_node.local_gb,
|
||||
'reserved': 1, # this is ceil(1000/1024)
|
||||
'min_unit': 1,
|
||||
'max_unit': compute_node.local_gb,
|
||||
'step_size': 1,
|
||||
'allocation_ratio': compute_node.disk_allocation_ratio,
|
||||
},
|
||||
}
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_compute_node_inventory_empty(self):
|
||||
uuid = uuids.compute_node
|
||||
name = 'computehost'
|
||||
compute_node = objects.ComputeNode(uuid=uuid,
|
||||
hypervisor_hostname=name,
|
||||
vcpus=0,
|
||||
cpu_allocation_ratio=16.0,
|
||||
memory_mb=0,
|
||||
ram_allocation_ratio=1.5,
|
||||
local_gb=0,
|
||||
disk_allocation_ratio=1.0)
|
||||
result = compute_utils.compute_node_to_inventory_dict(compute_node)
|
||||
self.assertEqual({}, result)
|
||||
|
|
Loading…
Reference in New Issue