Add resource utilities to scheduler utils
This adds resources_from_flavor() and merge_resources() to help us in the following patches. Co-Authored-By: Jay Pipes <jaypipes@gmail.com> Change-Id: I34e4d572a5fb0e0be48e75dece49ebc7337ad30c
This commit is contained in:
parent
cd864faa9d
commit
057ca0b7ef
|
@ -28,6 +28,7 @@ from nova.compute import claims
|
|||
from nova.compute import monitors
|
||||
from nova.compute import stats
|
||||
from nova.compute import task_states
|
||||
from nova.compute import utils as compute_utils
|
||||
from nova.compute import vm_states
|
||||
import nova.conf
|
||||
from nova import exception
|
||||
|
@ -40,7 +41,6 @@ from nova.pci import manager as pci_manager
|
|||
from nova.pci import request as pci_request
|
||||
from nova import rpc
|
||||
from nova.scheduler import client as scheduler_client
|
||||
from nova.scheduler.client import report
|
||||
from nova import utils
|
||||
from nova.virt import hardware
|
||||
|
||||
|
@ -120,7 +120,7 @@ def _normalize_inventory_from_cn_obj(inv_data, cn):
|
|||
# TODO(johngarbutt) We should either move to reserved_host_disk_gb
|
||||
# or start tracking DISK_MB.
|
||||
reserved_mb = CONF.reserved_host_disk_mb
|
||||
reserved_gb = report.convert_mb_to_ceil_gb(reserved_mb)
|
||||
reserved_gb = compute_utils.convert_mb_to_ceil_gb(reserved_mb)
|
||||
disk_inv['reserved'] = reserved_gb
|
||||
|
||||
|
||||
|
|
|
@ -18,6 +18,7 @@ import contextlib
|
|||
import functools
|
||||
import inspect
|
||||
import itertools
|
||||
import math
|
||||
import string
|
||||
import traceback
|
||||
|
||||
|
@ -218,6 +219,15 @@ def is_volume_backed_instance(context, instance, bdms=None):
|
|||
return not instance['image_ref']
|
||||
|
||||
|
||||
def convert_mb_to_ceil_gb(mb_value):
|
||||
gb_int = 0
|
||||
if mb_value:
|
||||
gb_float = mb_value / 1024.0
|
||||
# ensure we reserve/allocate enough space by rounding up to nearest GB
|
||||
gb_int = int(math.ceil(gb_float))
|
||||
return gb_int
|
||||
|
||||
|
||||
def _get_unused_letter(used_letters):
|
||||
doubles = [first + second for second in string.ascii_lowercase
|
||||
for first in string.ascii_lowercase]
|
||||
|
|
|
@ -15,7 +15,6 @@
|
|||
|
||||
import copy
|
||||
import functools
|
||||
import math
|
||||
import re
|
||||
import time
|
||||
|
||||
|
@ -30,6 +29,7 @@ from nova import exception
|
|||
from nova.i18n import _LE, _LI, _LW
|
||||
from nova import objects
|
||||
from nova.objects import fields
|
||||
from nova.scheduler import utils as scheduler_utils
|
||||
|
||||
CONF = nova.conf.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
@ -87,15 +87,6 @@ def safe_connect(f):
|
|||
return wrapper
|
||||
|
||||
|
||||
def convert_mb_to_ceil_gb(mb_value):
|
||||
gb_int = 0
|
||||
if mb_value:
|
||||
gb_float = mb_value / 1024.0
|
||||
# ensure we reserve/allocate enough space by rounding up to nearest GB
|
||||
gb_int = int(math.ceil(gb_float))
|
||||
return gb_int
|
||||
|
||||
|
||||
def _compute_node_to_inventory_dict(compute_node):
|
||||
"""Given a supplied `objects.ComputeNode` object, return a dict, keyed
|
||||
by resource class, of various inventory information.
|
||||
|
@ -127,7 +118,8 @@ def _compute_node_to_inventory_dict(compute_node):
|
|||
if compute_node.local_gb > 0:
|
||||
# TODO(johngarbutt) We should either move to reserved_host_disk_gb
|
||||
# or start tracking DISK_MB.
|
||||
reserved_disk_gb = convert_mb_to_ceil_gb(CONF.reserved_host_disk_mb)
|
||||
reserved_disk_gb = compute_utils.convert_mb_to_ceil_gb(
|
||||
CONF.reserved_host_disk_mb)
|
||||
result[DISK_GB] = {
|
||||
'total': compute_node.local_gb,
|
||||
'reserved': reserved_disk_gb,
|
||||
|
@ -145,37 +137,8 @@ def _instance_to_allocations_dict(instance):
|
|||
|
||||
:param instance: `objects.Instance` object to translate
|
||||
"""
|
||||
# NOTE(danms): Boot-from-volume instances consume no local disk
|
||||
is_bfv = compute_utils.is_volume_backed_instance(instance._context,
|
||||
instance)
|
||||
# TODO(johngarbutt) we have to round up swap MB to the next GB.
|
||||
# It would be better to claim disk in MB, but that is hard now.
|
||||
swap_in_gb = convert_mb_to_ceil_gb(instance.flavor.swap)
|
||||
disk = ((0 if is_bfv else instance.flavor.root_gb) +
|
||||
swap_in_gb + instance.flavor.ephemeral_gb)
|
||||
alloc_dict = {
|
||||
MEMORY_MB: instance.flavor.memory_mb,
|
||||
VCPU: instance.flavor.vcpus,
|
||||
DISK_GB: disk,
|
||||
}
|
||||
|
||||
# Pull out any resource overrides, which are in the format
|
||||
# "resources:FOO" and generate a dict of FOO=value candidates
|
||||
# for overriding the resources in the allocation.
|
||||
overrides = {k.split(':', 1)[1]: v for k, v in
|
||||
instance.flavor.extra_specs.items()
|
||||
if k.startswith('resources:')}
|
||||
|
||||
# Any resource overrides which are properly namespaced as custom,
|
||||
# or are standard resource class values override the alloc_dict
|
||||
# already constructed from the base flavor values above. Since
|
||||
# extra_specs are string values and resource counts are always
|
||||
# integers, we convert them here too for any that we find.
|
||||
overrides = {k: int(v) for k, v in overrides.items()
|
||||
if (k.startswith(objects.ResourceClass.CUSTOM_NAMESPACE) or
|
||||
k in fields.ResourceClass.STANDARD)}
|
||||
|
||||
alloc_dict.update(overrides)
|
||||
alloc_dict = scheduler_utils.resources_from_flavor(instance,
|
||||
instance.flavor)
|
||||
|
||||
# Remove any zero allocations.
|
||||
return {key: val for key, val in alloc_dict.items() if val}
|
||||
|
@ -232,13 +195,8 @@ def _move_operation_alloc_request(source_allocs, dest_alloc_req):
|
|||
alloc['resource_provider']['uuid']):
|
||||
# Now sum the current allocation resource amounts with
|
||||
# the new allocation resource amounts.
|
||||
for res_name, res_value in alloc['resources'].items():
|
||||
if res_name in current_alloc['resources']:
|
||||
current_alloc['resources'][res_name] += res_value
|
||||
else:
|
||||
# This is a new resource class in the new
|
||||
# allocation for which we need to account.
|
||||
current_alloc['resources'][res_name] = res_value
|
||||
scheduler_utils.merge_resources(current_alloc['resources'],
|
||||
alloc['resources'])
|
||||
|
||||
LOG.debug("New allocation request containing both source and "
|
||||
"destination hosts in move operation: %s", new_alloc_req)
|
||||
|
|
|
@ -143,6 +143,42 @@ def _process_extra_specs(extra_specs, resources):
|
|||
resources[std_key] = val
|
||||
|
||||
|
||||
def resources_from_flavor(instance, flavor):
|
||||
"""Convert a flavor into a set of resources for placement, taking into
|
||||
account boot-from-volume instances.
|
||||
|
||||
This takes an instance and a flavor and returns a dict of
|
||||
resource_class:amount based on the attributes of the flavor, accounting for
|
||||
any overrides that are made in extra_specs.
|
||||
"""
|
||||
is_bfv = compute_utils.is_volume_backed_instance(instance._context,
|
||||
instance)
|
||||
swap_in_gb = compute_utils.convert_mb_to_ceil_gb(flavor.swap)
|
||||
disk = ((0 if is_bfv else flavor.root_gb) +
|
||||
swap_in_gb + flavor.ephemeral_gb)
|
||||
|
||||
resources = {
|
||||
fields.ResourceClass.VCPU: flavor.vcpus,
|
||||
fields.ResourceClass.MEMORY_MB: flavor.memory_mb,
|
||||
fields.ResourceClass.DISK_GB: disk,
|
||||
}
|
||||
if "extra_specs" in flavor:
|
||||
_process_extra_specs(flavor.extra_specs, resources)
|
||||
return resources
|
||||
|
||||
|
||||
def merge_resources(original_resources, new_resources, sign=1):
|
||||
"""Merge a list of new resources with existing resources.
|
||||
|
||||
Either add the resources (if sign is 1) or subtract (if sign is -1)
|
||||
"""
|
||||
|
||||
all_keys = set(original_resources.keys()) | set(new_resources.keys())
|
||||
for key in all_keys:
|
||||
original_resources[key] = (original_resources.get(key, 0) +
|
||||
(sign * new_resources.get(key, 0)))
|
||||
|
||||
|
||||
def resources_from_request_spec(spec_obj):
|
||||
"""Given a RequestSpec object, returns a dict, keyed by resource class
|
||||
name, of requested amounts of those resources.
|
||||
|
|
|
@ -161,3 +161,66 @@ class TestUtils(test.NoDBTestCase):
|
|||
extra_specs={"resources:CUSTOM_TEST_CLASS": ""})
|
||||
fake_spec = objects.RequestSpec(flavor=flavor)
|
||||
utils.resources_from_request_spec(fake_spec)
|
||||
|
||||
@mock.patch('nova.compute.utils.is_volume_backed_instance',
|
||||
return_value=False)
|
||||
def test_resources_from_flavor_no_bfv(self, mock_is_bfv):
|
||||
flavor = objects.Flavor(vcpus=1, memory_mb=1024, root_gb=10,
|
||||
ephemeral_gb=5, swap=1024,
|
||||
extra_specs={})
|
||||
instance = objects.Instance()
|
||||
expected = {
|
||||
'VCPU': 1,
|
||||
'MEMORY_MB': 1024,
|
||||
'DISK_GB': 16,
|
||||
}
|
||||
actual = utils.resources_from_flavor(instance, flavor)
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
@mock.patch('nova.compute.utils.is_volume_backed_instance',
|
||||
return_value=True)
|
||||
def test_resources_from_flavor_bfv(self, mock_is_bfv):
|
||||
flavor = objects.Flavor(vcpus=1, memory_mb=1024, root_gb=10,
|
||||
ephemeral_gb=5, swap=1024,
|
||||
extra_specs={})
|
||||
instance = objects.Instance()
|
||||
expected = {
|
||||
'VCPU': 1,
|
||||
'MEMORY_MB': 1024,
|
||||
'DISK_GB': 6, # No root disk...
|
||||
}
|
||||
actual = utils.resources_from_flavor(instance, flavor)
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
@mock.patch('nova.compute.utils.is_volume_backed_instance',
|
||||
return_value=False)
|
||||
def test_resources_from_flavor_with_override(self, mock_is_bfv):
|
||||
flavor = objects.Flavor(vcpus=1, memory_mb=1024, root_gb=10,
|
||||
ephemeral_gb=5, swap=1024,
|
||||
extra_specs={'resources:VCPU': '2'})
|
||||
instance = objects.Instance()
|
||||
expected = {
|
||||
'VCPU': 2,
|
||||
'MEMORY_MB': 1024,
|
||||
'DISK_GB': 16,
|
||||
}
|
||||
actual = utils.resources_from_flavor(instance, flavor)
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
def test_merge_resources(self):
|
||||
resources = {
|
||||
'VCPU': 1, 'MEMORY_MB': 1024,
|
||||
}
|
||||
new_resources = {
|
||||
'VCPU': 2, 'MEMORY_MB': 2048, 'CUSTOM_FOO': 1,
|
||||
}
|
||||
doubled = {
|
||||
'VCPU': 3, 'MEMORY_MB': 3072, 'CUSTOM_FOO': 1,
|
||||
}
|
||||
saved_orig = dict(resources)
|
||||
utils.merge_resources(resources, new_resources)
|
||||
# Check to see that we've doubled our resources
|
||||
self.assertEqual(doubled, resources)
|
||||
# and then removed those doubled resources
|
||||
utils.merge_resources(resources, saved_orig, -1)
|
||||
self.assertEqual(new_resources, resources)
|
||||
|
|
Loading…
Reference in New Issue