From a241fcee4ca65436458cc757fe81a19369451897 Mon Sep 17 00:00:00 2001 From: Chris Dent Date: Fri, 7 Dec 2018 18:37:20 +0000 Subject: [PATCH] Use os-resource-classes in placement os-resource-classes is a python library in which the standardized resource classes are maintained. It is done as a library so that multiple services (e.g., placement and nova) can use the same stuff. It is used and managed here in the same way the os-traits library is used: At system start up we compare the contents of the resource_classes table with the classes in the library and add any that are missing. CUSTOM resource classes are added with a high id (and always were, even before this change). Because we need to insert standard resource classes with an id of zero, so we need to protect against mysql thinking 0 on a primary key id is "generate the next one". We don't need a similar thing in os-traits because we don't care about the ids there. And we don't need to guard against postgresql or sqlite at this point because they do not have the same behavior. The resource_class_cache of id to string and string to id mappings continues to be maintained, but now it looks solely in the database. As part of confirming that code, it was discovered that the reader context manager was being entered twice, this has been fixed. Locking around every access to the resource class cache is fairly expensive (changes the perfload job from <2s to >5s). Prior to this change we would only go to cache if the resource classes in the query were not standards. Now we always look at the cache so rather than locking around reads and writes we only lock around writes. This should be okay, because as long as we do a get (intead of the previous two separate accesses) on the cache's dict that operation is safe and if it misses (because something else destroyed the cache) the fall through is to refresh the cache, which still has the lock. While updating the database fixture to ensure that the resource classes are synched properly, it was discovered that addCleanup was being called twice with the same args. That has been fixed. In objects/resource_provider.py the ResourceClass field is changed to a StringField. The field definition was in rc_field and we simply don't need it anymore. This is satisfactory because we don't do any validation on the field internal to the objects (but do elsewhere). Based on initial feedback, 'os_resource_classes' is imported as 'orc' throughout to avoid unwieldy length. Change-Id: Ib7e8081519c3b310cd526284db28c623c8410fbe --- lower-constraints.txt | 1 + placement/deploy.py | 1 + placement/objects/resource_provider.py | 93 +- placement/rc_fields.py | 71 -- placement/resource_class_cache.py | 93 +- placement/tests/fixtures.py | 9 +- .../db/test_allocation_candidates.py | 1043 ++++++++--------- .../tests/functional/db/test_consumer.py | 20 +- .../db/test_resource_class_cache.py | 64 +- .../functional/db/test_resource_provider.py | 164 ++- .../tests/functional/fixtures/gabbits.py | 14 +- .../unit/objects/test_resource_provider.py | 24 +- requirements.txt | 1 + 13 files changed, 735 insertions(+), 863 deletions(-) delete mode 100644 placement/rc_fields.py diff --git a/lower-constraints.txt b/lower-constraints.txt index 89117347b..72aa25309 100644 --- a/lower-constraints.txt +++ b/lower-constraints.txt @@ -41,6 +41,7 @@ mox3==0.20.0 msgpack-python==0.5.6 netaddr==0.7.18 netifaces==0.10.4 +os-resource-classes==0.1.0 os-client-config==1.29.0 os-service-types==1.2.0 os-traits==0.4.0 diff --git a/placement/deploy.py b/placement/deploy.py index 0ffb6fcdb..40d3e7c8f 100644 --- a/placement/deploy.py +++ b/placement/deploy.py @@ -97,6 +97,7 @@ def update_database(): """ ctx = db_api.DbContext() resource_provider.ensure_trait_sync(ctx) + resource_provider.ensure_resource_classes_sync(ctx) resource_provider.ensure_rc_cache(ctx) diff --git a/placement/objects/resource_provider.py b/placement/objects/resource_provider.py index 2f50875e1..68ee6e8d8 100644 --- a/placement/objects/resource_provider.py +++ b/placement/objects/resource_provider.py @@ -21,6 +21,7 @@ import random # not be registered and there is no need to express VERSIONs nor handle # obj_make_compatible. +import os_resource_classes as orc import os_traits from oslo_concurrency import lockutils from oslo_db import api as oslo_db_api @@ -43,7 +44,6 @@ from placement.i18n import _ from placement.objects import consumer as consumer_obj from placement.objects import project as project_obj from placement.objects import user as user_obj -from placement import rc_fields from placement import resource_class_cache as rc_cache _TRAIT_TBL = models.Trait.__table__ @@ -59,6 +59,8 @@ _PROJECT_TBL = models.Project.__table__ _USER_TBL = models.User.__table__ _CONSUMER_TBL = models.Consumer.__table__ _RC_CACHE = None +_RESOURCE_CLASSES_LOCK = 'resource_classes_sync' +_RESOURCE_CLASSES_SYNCED = False _TRAIT_LOCK = 'trait_sync' _TRAITS_SYNCED = False @@ -82,7 +84,7 @@ def ensure_rc_cache(ctx): @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) # Bug #1760322: If the caller raises an exception, we don't want the trait # sync rolled back; so use an .independent transaction -@db_api.placement_context_manager.writer.independent +@db_api.placement_context_manager.writer def _trait_sync(ctx): """Sync the os_traits symbols to the database. @@ -145,6 +147,47 @@ def ensure_trait_sync(ctx): _TRAITS_SYNCED = True +@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) +@db_api.placement_context_manager.writer +def _resource_classes_sync(ctx): + # Create a set of all resource class in the os_resource_classes library. + sel = sa.select([_RC_TBL.c.name]) + res = ctx.session.execute(sel).fetchall() + db_classes = [r[0] for r in res if not orc.is_custom(r[0])] + LOG.debug("Found existing resource classes in db: %s", db_classes) + # Determine those resource clases which are in os_resource_classes but not + # currently in the database, and insert them. + batch_args = [{'name': six.text_type(name), 'id': index} + for index, name in enumerate(orc.STANDARDS) + if name not in db_classes] + ins = _RC_TBL.insert() + if batch_args: + conn = ctx.session.connection() + if conn.engine.dialect.name == 'mysql': + # We need to do a literal insert of 0 to preserve the order + # of the resource class ids from the previous style of + # managing them. In some mysql settings a 0 is the same as + # "give me a default key". + conn.execute("SET SESSION SQL_MODE='NO_AUTO_VALUE_ON_ZERO'") + try: + ctx.session.execute(ins, batch_args) + LOG.info("Synced resource_classes from os_resource_classes: %s", + batch_args) + except db_exc.DBDuplicateEntry: + pass # some other process sync'd, just ignore + + +def ensure_resource_classes_sync(ctx): + global _RESOURCE_CLASSES_SYNCED + # If another thread is doing this work, wait for it to complete. + # When that thread is done _RESOURCE_CLASSES_SYNCED will be true in this + # thread and we'll simply return. + with lockutils.lock(_RESOURCE_CLASSES_LOCK): + if not _RESOURCE_CLASSES_SYNCED: + _resource_classes_sync(ctx) + _RESOURCE_CLASSES_SYNCED = True + + def _usage_select(rc_ids): usage = sa.select([_ALLOC_TBL.c.resource_provider_id, _ALLOC_TBL.c.resource_class_id, @@ -310,7 +353,7 @@ def _add_inventory(context, rp, inventory): """Add one Inventory that wasn't already on the provider. :raises `exception.ResourceClassNotFound` if inventory.resource_class - cannot be found in either the standard classes or the DB. + cannot be found in the DB. """ rc_id = _RC_CACHE.id_from_string(inventory.resource_class) inv_list = InventoryList(objects=[inventory]) @@ -324,7 +367,7 @@ def _update_inventory(context, rp, inventory): """Update an inventory already on the provider. :raises `exception.ResourceClassNotFound` if inventory.resource_class - cannot be found in either the standard classes or the DB. + cannot be found in the DB. """ rc_id = _RC_CACHE.id_from_string(inventory.resource_class) inv_list = InventoryList(objects=[inventory]) @@ -339,7 +382,7 @@ def _delete_inventory(context, rp, resource_class): """Delete up to one Inventory of the given resource_class string. :raises `exception.ResourceClassNotFound` if resource_class - cannot be found in either the standard classes or the DB. + cannot be found in the DB. """ rc_id = _RC_CACHE.id_from_string(resource_class) if not _delete_inventory_from_provider(context, rp, [rc_id]): @@ -365,8 +408,7 @@ def _set_inventory(context, rp, inv_list): allocations in between the time when this object was originally read and the call to set the inventory. :raises `exception.ResourceClassNotFound` if any resource class in any - inventory in inv_list cannot be found in either the standard - classes or the DB. + inventory in inv_list cannot be found in the DB. :raises `exception.InventoryInUse` if we attempt to delete inventory from a provider that has allocations for that resource class. """ @@ -1562,7 +1604,7 @@ class Inventory(base.VersionedObject, base.TimestampedObject): fields = { 'id': fields.IntegerField(read_only=True), 'resource_provider': fields.ObjectField('ResourceProvider'), - 'resource_class': rc_fields.ResourceClassField(read_only=True), + 'resource_class': fields.StringField(read_only=True), 'total': fields.NonNegativeIntegerField(), 'reserved': fields.NonNegativeIntegerField(default=0), 'min_unit': fields.NonNegativeIntegerField(default=1), @@ -1647,7 +1689,7 @@ class Allocation(base.VersionedObject, base.TimestampedObject): 'id': fields.IntegerField(), 'resource_provider': fields.ObjectField('ResourceProvider'), 'consumer': fields.ObjectField('Consumer', nullable=False), - 'resource_class': rc_fields.ResourceClassField(), + 'resource_class': fields.StringField(), 'used': fields.IntegerField(), } @@ -2012,8 +2054,7 @@ class AllocationList(base.ObjectListBase, base.VersionedObject): If there is not we roll back the entire set. :raises `exception.ResourceClassNotFound` if any resource class in any - allocation in allocs cannot be found in either the standard - classes or the DB. + allocation in allocs cannot be found in either the DB. :raises `exception.InvalidAllocationCapacityExceeded` if any inventory would be exhausted by the allocation. :raises `InvalidAllocationConstraintsViolated` if any of the @@ -2229,7 +2270,7 @@ class AllocationList(base.ObjectListBase, base.VersionedObject): class Usage(base.VersionedObject): fields = { - 'resource_class': rc_fields.ResourceClassField(read_only=True), + 'resource_class': fields.StringField(read_only=True), 'usage': fields.NonNegativeIntegerField(), } @@ -2325,7 +2366,7 @@ class ResourceClass(base.VersionedObject, base.TimestampedObject): fields = { 'id': fields.IntegerField(read_only=True), - 'name': rc_fields.ResourceClassField(nullable=False), + 'name': fields.StringField(nullable=False), } @staticmethod @@ -2359,7 +2400,7 @@ class ResourceClass(base.VersionedObject, base.TimestampedObject): """ query = context.session.query(func.max(models.ResourceClass.id)) max_id = query.one()[0] - if not max_id: + if not max_id or max_id < ResourceClass.MIN_CUSTOM_RESOURCE_CLASS_ID: return ResourceClass.MIN_CUSTOM_RESOURCE_CLASS_ID else: return max_id + 1 @@ -2371,14 +2412,13 @@ class ResourceClass(base.VersionedObject, base.TimestampedObject): if 'name' not in self: raise exception.ObjectActionError(action='create', reason='name is required') - if self.name in rc_fields.ResourceClass.STANDARD: + if self.name in orc.STANDARDS: raise exception.ResourceClassExists(resource_class=self.name) - if not self.name.startswith(rc_fields.ResourceClass.CUSTOM_NAMESPACE): + if not self.name.startswith(orc.CUSTOM_NAMESPACE): raise exception.ObjectActionError( action='create', - reason='name must start with ' + - rc_fields.ResourceClass.CUSTOM_NAMESPACE) + reason='name must start with ' + orc.CUSTOM_NAMESPACE) updates = self.obj_get_changes() # There is the possibility of a race when adding resource classes, as @@ -2424,9 +2464,8 @@ class ResourceClass(base.VersionedObject, base.TimestampedObject): if 'id' not in self: raise exception.ObjectActionError(action='destroy', reason='ID attribute not found') - # Never delete any standard resource class, since the standard resource - # classes don't even exist in the database table anyway. - if self.id in (rc['id'] for rc in _RC_CACHE.STANDARDS): + # Never delete any standard resource class. + if self.id < ResourceClass.MIN_CUSTOM_RESOURCE_CLASS_ID: raise exception.ResourceClassCannotDeleteStandard( resource_class=self.name) @@ -2453,9 +2492,8 @@ class ResourceClass(base.VersionedObject, base.TimestampedObject): raise exception.ObjectActionError(action='save', reason='ID attribute not found') updates = self.obj_get_changes() - # Never update any standard resource class, since the standard resource - # classes don't even exist in the database table anyway. - if self.id in (rc['id'] for rc in _RC_CACHE.STANDARDS): + # Never update any standard resource class. + if self.id < ResourceClass.MIN_CUSTOM_RESOURCE_CLASS_ID: raise exception.ResourceClassCannotUpdateStandard( resource_class=self.name) self._save(self._context, self.id, self.name, updates) @@ -2483,8 +2521,7 @@ class ResourceClassList(base.ObjectListBase, base.VersionedObject): @staticmethod @db_api.placement_context_manager.reader def _get_all(context): - customs = list(context.session.query(models.ResourceClass).all()) - return _RC_CACHE.STANDARDS + customs + return list(context.session.query(models.ResourceClass).all()) @classmethod def get_all(cls, context): @@ -2635,7 +2672,7 @@ class AllocationRequestResource(base.VersionedObject): fields = { 'resource_provider': fields.ObjectField('ResourceProvider'), - 'resource_class': rc_fields.ResourceClassField(read_only=True), + 'resource_class': fields.StringField(read_only=True), 'amount': fields.NonNegativeIntegerField(), } @@ -2675,7 +2712,7 @@ class AllocationRequest(base.VersionedObject): class ProviderSummaryResource(base.VersionedObject): fields = { - 'resource_class': rc_fields.ResourceClassField(read_only=True), + 'resource_class': fields.StringField(read_only=True), 'capacity': fields.NonNegativeIntegerField(), 'used': fields.NonNegativeIntegerField(), # Internal use only; not included when the object is serialized for diff --git a/placement/rc_fields.py b/placement/rc_fields.py deleted file mode 100644 index ab410860c..000000000 --- a/placement/rc_fields.py +++ /dev/null @@ -1,71 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Standard Resource Class Fields.""" - -# NOTE(cdent): This is kept as its own independent file as it is used by -# both the placement and nova sides of the placement interaction. On the -# placement side we don't want to import all the nova fields, nor all the -# nova objects (which are automatically loaded and registered if the -# nova.objects package is imported). - -import re - -from oslo_versionedobjects import fields - - -class ResourceClass(fields.StringField): - """Classes of resources provided to consumers.""" - - CUSTOM_NAMESPACE = 'CUSTOM_' - """All non-standard resource classes must begin with this string.""" - - VCPU = 'VCPU' - MEMORY_MB = 'MEMORY_MB' - DISK_GB = 'DISK_GB' - PCI_DEVICE = 'PCI_DEVICE' - SRIOV_NET_VF = 'SRIOV_NET_VF' - NUMA_SOCKET = 'NUMA_SOCKET' - NUMA_CORE = 'NUMA_CORE' - NUMA_THREAD = 'NUMA_THREAD' - NUMA_MEMORY_MB = 'NUMA_MEMORY_MB' - IPV4_ADDRESS = 'IPV4_ADDRESS' - VGPU = 'VGPU' - VGPU_DISPLAY_HEAD = 'VGPU_DISPLAY_HEAD' - # Standard resource class for network bandwidth egress measured in - # kilobits per second. - NET_BW_EGR_KILOBIT_PER_SEC = 'NET_BW_EGR_KILOBIT_PER_SEC' - # Standard resource class for network bandwidth ingress measured in - # kilobits per second. - NET_BW_IGR_KILOBIT_PER_SEC = 'NET_BW_IGR_KILOBIT_PER_SEC' - - # The ordering here is relevant. If you must add a value, only - # append. - STANDARD = (VCPU, MEMORY_MB, DISK_GB, PCI_DEVICE, SRIOV_NET_VF, - NUMA_SOCKET, NUMA_CORE, NUMA_THREAD, NUMA_MEMORY_MB, - IPV4_ADDRESS, VGPU, VGPU_DISPLAY_HEAD, - NET_BW_EGR_KILOBIT_PER_SEC, NET_BW_IGR_KILOBIT_PER_SEC) - - @classmethod - def normalize_name(cls, rc_name): - if rc_name is None: - return None - # Replace non-alphanumeric characters with underscores - norm_name = re.sub('[^0-9A-Za-z]+', '_', rc_name) - # Bug #1762789: Do .upper after replacing non alphanumerics. - norm_name = norm_name.upper() - norm_name = cls.CUSTOM_NAMESPACE + norm_name - return norm_name - - -class ResourceClassField(fields.AutoTypedField): - AUTO_TYPE = ResourceClass() diff --git a/placement/resource_class_cache.py b/placement/resource_class_cache.py index 7fe1cee96..c5be83e0b 100644 --- a/placement/resource_class_cache.py +++ b/placement/resource_class_cache.py @@ -16,7 +16,6 @@ import sqlalchemy as sa from placement.db.sqlalchemy import models from placement import db_api from placement import exception -from placement import rc_fields as fields _RC_TBL = models.ResourceClass.__table__ _LOCKNAME = 'rc_cache' @@ -24,28 +23,21 @@ _LOCKNAME = 'rc_cache' @db_api.placement_context_manager.reader def _refresh_from_db(ctx, cache): - """Grabs all custom resource classes from the DB table and populates the + """Grabs all resource classes from the DB table and populates the supplied cache object's internal integer and string identifier dicts. :param cache: ResourceClassCache object to refresh. """ - with db_api.placement_context_manager.reader.connection.using(ctx) as conn: - sel = sa.select([_RC_TBL.c.id, _RC_TBL.c.name, _RC_TBL.c.updated_at, - _RC_TBL.c.created_at]) - res = conn.execute(sel).fetchall() - cache.id_cache = {r[1]: r[0] for r in res} - cache.str_cache = {r[0]: r[1] for r in res} - cache.all_cache = {r[1]: r for r in res} + sel = sa.select([_RC_TBL.c.id, _RC_TBL.c.name, _RC_TBL.c.updated_at, + _RC_TBL.c.created_at]) + res = ctx.session.execute(sel).fetchall() + cache.id_cache = {r[1]: r[0] for r in res} + cache.str_cache = {r[0]: r[1] for r in res} + cache.all_cache = {r[1]: r for r in res} class ResourceClassCache(object): - """A cache of integer and string lookup values for resource classes.""" - - # List of dict of all standard resource classes, where every list item - # have a form {'id': , 'name': } - STANDARDS = [{'id': fields.ResourceClass.STANDARD.index(s), 'name': s, - 'updated_at': None, 'created_at': None} - for s in fields.ResourceClass.STANDARD] + """A cache of integer and string lookup values for resource classes.""" def __init__(self, ctx): """Initialize the cache of resource class identifiers. @@ -66,29 +58,23 @@ class ResourceClassCache(object): def id_from_string(self, rc_str): """Given a string representation of a resource class -- e.g. "DISK_GB" - or "IRON_SILVER" -- return the integer code for the resource class. For - standard resource classes, this integer code will match the list of - resource classes on the fields.ResourceClass field type. Other custom - resource classes will cause a DB lookup into the resource_classes - table, however the results of these DB lookups are cached since the - lookups are so frequent. + or "CUSTOM_IRON_SILVER" -- return the integer code for the resource + class by doing a DB lookup into the resource_classes table; however, + the results of these DB lookups are cached since the lookups are so + frequent. :param rc_str: The string representation of the resource class to look up a numeric identifier for. - :returns integer identifier for the resource class, or None, if no such - resource class was found in the list of standard resource - classes or the resource_classes database table. + :returns Integer identifier for the resource class. :raises `exception.ResourceClassNotFound` if rc_str cannot be found in - either the standard classes or the DB. + the DB. """ - # First check the standard resource classes - if rc_str in fields.ResourceClass.STANDARD: - return fields.ResourceClass.STANDARD.index(rc_str) + rc_id = self.id_cache.get(rc_str) + if rc_id is not None: + return rc_id + # Otherwise, check the database table with lockutils.lock(_LOCKNAME): - if rc_str in self.id_cache: - return self.id_cache[rc_str] - # Otherwise, check the database table _refresh_from_db(self.ctx, self) if rc_str in self.id_cache: return self.id_cache[rc_str] @@ -101,22 +87,17 @@ class ResourceClassCache(object): :param rc_str: The string representation of the resource class for which to look up a resource_class. :returns: dict representing the resource class fields, if the - resource class was found in the list of standard - resource classes or the resource_classes database table. + resource class was found in the resource_classes database + table. :raises: `exception.ResourceClassNotFound` if rc_str cannot be found in - either the standard classes or the DB. + the DB. """ - # First check the standard resource classes - if rc_str in fields.ResourceClass.STANDARD: - return {'id': fields.ResourceClass.STANDARD.index(rc_str), - 'name': rc_str, - 'updated_at': None, - 'created_at': None} + rc_id_str = self.all_cache.get(rc_str) + if rc_id_str is not None: + return rc_id_str + # Otherwise, check the database table with lockutils.lock(_LOCKNAME): - if rc_str in self.all_cache: - return self.all_cache[rc_str] - # Otherwise, check the database table _refresh_from_db(self.ctx, self) if rc_str in self.all_cache: return self.all_cache[rc_str] @@ -125,29 +106,21 @@ class ResourceClassCache(object): def string_from_id(self, rc_id): """The reverse of the id_from_string() method. Given a supplied numeric identifier for a resource class, we look up the corresponding string - representation, either in the list of standard resource classes or via - a DB lookup. The results of these DB lookups are cached since the - lookups are so frequent. + representation, via a DB lookup. The results of these DB lookups are + cached since the lookups are so frequent. :param rc_id: The numeric representation of the resource class to look up a string identifier for. - :returns: string identifier for the resource class, or None, if no such - resource class was found in the list of standard resource - classes or the resource_classes database table. + :returns: String identifier for the resource class. :raises `exception.ResourceClassNotFound` if rc_id cannot be found in - either the standard classes or the DB. + the DB. """ - # First check the fields.ResourceClass.STANDARD values - try: - return fields.ResourceClass.STANDARD[rc_id] - except IndexError: - pass + rc_str = self.str_cache.get(rc_id) + if rc_str is not None: + return rc_str + # Otherwise, check the database table with lockutils.lock(_LOCKNAME): - if rc_id in self.str_cache: - return self.str_cache[rc_id] - - # Otherwise, check the database table _refresh_from_db(self.ctx, self) if rc_id in self.str_cache: return self.str_cache[rc_id] diff --git a/placement/tests/fixtures.py b/placement/tests/fixtures.py index 6f01000ce..c5236439b 100644 --- a/placement/tests/fixtures.py +++ b/placement/tests/fixtures.py @@ -64,12 +64,15 @@ class Database(test_fixtures.GeneratesSchema, test_fixtures.AdHocDbFixture): engine) self.addCleanup(_reset_facade) + # Make sure db flags are correct at both the start and finish + # of the test. self.addCleanup(self.cleanup) - resource_provider._TRAITS_SYNCED = False - resource_provider._RC_CACHE = None + self.cleanup() + + # Sync traits and resource classes. deploy.update_database() - self.addCleanup(self.cleanup) def cleanup(self): resource_provider._TRAITS_SYNCED = False + resource_provider._RESOURCE_CLASSES_SYNCED = False resource_provider._RC_CACHE = None diff --git a/placement/tests/functional/db/test_allocation_candidates.py b/placement/tests/functional/db/test_allocation_candidates.py index 377d71d9c..abedcb55a 100644 --- a/placement/tests/functional/db/test_allocation_candidates.py +++ b/placement/tests/functional/db/test_allocation_candidates.py @@ -9,6 +9,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +import os_resource_classes as orc import os_traits from oslo_utils.fixture import uuidsentinel as uuids import six @@ -17,7 +18,6 @@ import sqlalchemy as sa from placement import exception from placement import lib as placement_lib from placement.objects import resource_provider as rp_obj -from placement import rc_fields as fields from placement.tests.functional.db import test_base as tb @@ -32,140 +32,115 @@ class ProviderDBHelperTestCase(tb.PlacementDbBaseTestCase): # Inventory of adequate CPU and memory, no allocations against it. excl_big_cm_noalloc = self._create_provider('big_cm_noalloc') - tb.add_inventory(excl_big_cm_noalloc, fields.ResourceClass.VCPU, 15) - tb.add_inventory(excl_big_cm_noalloc, fields.ResourceClass.MEMORY_MB, + tb.add_inventory(excl_big_cm_noalloc, orc.VCPU, 15) + tb.add_inventory(excl_big_cm_noalloc, orc.MEMORY_MB, 4096, max_unit=2048) # Inventory of adequate memory and disk, no allocations against it. excl_big_md_noalloc = self._create_provider('big_md_noalloc') - tb.add_inventory(excl_big_md_noalloc, fields.ResourceClass.MEMORY_MB, + tb.add_inventory(excl_big_md_noalloc, orc.MEMORY_MB, 4096, max_unit=2048) - tb.add_inventory(excl_big_md_noalloc, fields.ResourceClass.DISK_GB, - 2000) + tb.add_inventory(excl_big_md_noalloc, orc.DISK_GB, 2000) # Adequate inventory, no allocations against it. incl_biginv_noalloc = self._create_provider('biginv_noalloc') - tb.add_inventory(incl_biginv_noalloc, fields.ResourceClass.VCPU, 15) - tb.add_inventory(incl_biginv_noalloc, fields.ResourceClass.MEMORY_MB, + tb.add_inventory(incl_biginv_noalloc, orc.VCPU, 15) + tb.add_inventory(incl_biginv_noalloc, orc.MEMORY_MB, 4096, max_unit=2048) - tb.add_inventory(incl_biginv_noalloc, fields.ResourceClass.DISK_GB, - 2000) + tb.add_inventory(incl_biginv_noalloc, orc.DISK_GB, 2000) # No allocations, but inventory unusable. Try to hit all the possible # reasons for exclusion. # VCPU min_unit too high excl_badinv_min_unit = self._create_provider('badinv_min_unit') - tb.add_inventory(excl_badinv_min_unit, fields.ResourceClass.VCPU, 12, - min_unit=6) - tb.add_inventory(excl_badinv_min_unit, fields.ResourceClass.MEMORY_MB, + tb.add_inventory(excl_badinv_min_unit, orc.VCPU, 12, min_unit=6) + tb.add_inventory(excl_badinv_min_unit, orc.MEMORY_MB, 4096, max_unit=2048) - tb.add_inventory(excl_badinv_min_unit, fields.ResourceClass.DISK_GB, - 2000) + tb.add_inventory(excl_badinv_min_unit, orc.DISK_GB, 2000) # MEMORY_MB max_unit too low excl_badinv_max_unit = self._create_provider('badinv_max_unit') - tb.add_inventory(excl_badinv_max_unit, fields.ResourceClass.VCPU, 15) - tb.add_inventory(excl_badinv_max_unit, fields.ResourceClass.MEMORY_MB, + tb.add_inventory(excl_badinv_max_unit, orc.VCPU, 15) + tb.add_inventory(excl_badinv_max_unit, orc.MEMORY_MB, 4096, max_unit=512) - tb.add_inventory(excl_badinv_max_unit, fields.ResourceClass.DISK_GB, - 2000) + tb.add_inventory(excl_badinv_max_unit, orc.DISK_GB, 2000) # DISK_GB unsuitable step_size excl_badinv_step_size = self._create_provider('badinv_step_size') - tb.add_inventory(excl_badinv_step_size, fields.ResourceClass.VCPU, 15) - tb.add_inventory(excl_badinv_step_size, fields.ResourceClass.MEMORY_MB, + tb.add_inventory(excl_badinv_step_size, orc.VCPU, 15) + tb.add_inventory(excl_badinv_step_size, orc.MEMORY_MB, 4096, max_unit=2048) - tb.add_inventory(excl_badinv_step_size, fields.ResourceClass.DISK_GB, - 2000, step_size=7) + tb.add_inventory(excl_badinv_step_size, orc.DISK_GB, 2000, step_size=7) # Not enough total VCPU excl_badinv_total = self._create_provider('badinv_total') - tb.add_inventory(excl_badinv_total, fields.ResourceClass.VCPU, 4) - tb.add_inventory(excl_badinv_total, fields.ResourceClass.MEMORY_MB, + tb.add_inventory(excl_badinv_total, orc.VCPU, 4) + tb.add_inventory(excl_badinv_total, orc.MEMORY_MB, 4096, max_unit=2048) - tb.add_inventory(excl_badinv_total, fields.ResourceClass.DISK_GB, 2000) + tb.add_inventory(excl_badinv_total, orc.DISK_GB, 2000) # Too much reserved MEMORY_MB excl_badinv_reserved = self._create_provider('badinv_reserved') - tb.add_inventory(excl_badinv_reserved, fields.ResourceClass.VCPU, 15) - tb.add_inventory(excl_badinv_reserved, fields.ResourceClass.MEMORY_MB, + tb.add_inventory(excl_badinv_reserved, orc.VCPU, 15) + tb.add_inventory(excl_badinv_reserved, orc.MEMORY_MB, 4096, max_unit=2048, reserved=3500) - tb.add_inventory(excl_badinv_reserved, fields.ResourceClass.DISK_GB, - 2000) + tb.add_inventory(excl_badinv_reserved, orc.DISK_GB, 2000) # DISK_GB allocation ratio blows it up excl_badinv_alloc_ratio = self._create_provider('badinv_alloc_ratio') - tb.add_inventory(excl_badinv_alloc_ratio, fields.ResourceClass.VCPU, - 15) + tb.add_inventory(excl_badinv_alloc_ratio, orc.VCPU, 15) tb.add_inventory(excl_badinv_alloc_ratio, - fields.ResourceClass.MEMORY_MB, 4096, max_unit=2048) - tb.add_inventory(excl_badinv_alloc_ratio, fields.ResourceClass.DISK_GB, + orc.MEMORY_MB, 4096, max_unit=2048) + tb.add_inventory(excl_badinv_alloc_ratio, orc.DISK_GB, 2000, allocation_ratio=0.5) # Inventory consumed in one RC, but available in the others excl_1invunavail = self._create_provider('1invunavail') - tb.add_inventory(excl_1invunavail, fields.ResourceClass.VCPU, 10) - self.allocate_from_provider( - excl_1invunavail, fields.ResourceClass.VCPU, 7) - tb.add_inventory(excl_1invunavail, fields.ResourceClass.MEMORY_MB, - 4096) - self.allocate_from_provider(excl_1invunavail, - fields.ResourceClass.MEMORY_MB, 1024) - tb.add_inventory(excl_1invunavail, fields.ResourceClass.DISK_GB, 2000) - self.allocate_from_provider(excl_1invunavail, - fields.ResourceClass.DISK_GB, 400) + tb.add_inventory(excl_1invunavail, orc.VCPU, 10) + self.allocate_from_provider(excl_1invunavail, orc.VCPU, 7) + tb.add_inventory(excl_1invunavail, orc.MEMORY_MB, 4096) + self.allocate_from_provider(excl_1invunavail, orc.MEMORY_MB, 1024) + tb.add_inventory(excl_1invunavail, orc.DISK_GB, 2000) + self.allocate_from_provider(excl_1invunavail, orc.DISK_GB, 400) # Inventory all consumed excl_allused = self._create_provider('allused') - tb.add_inventory(excl_allused, fields.ResourceClass.VCPU, 10) - self.allocate_from_provider(excl_allused, fields.ResourceClass.VCPU, 7) - tb.add_inventory(excl_allused, fields.ResourceClass.MEMORY_MB, 4000) - self.allocate_from_provider(excl_allused, - fields.ResourceClass.MEMORY_MB, 1500) - self.allocate_from_provider(excl_allused, - fields.ResourceClass.MEMORY_MB, 2000) - tb.add_inventory(excl_allused, fields.ResourceClass.DISK_GB, 1500) - self.allocate_from_provider(excl_allused, fields.ResourceClass.DISK_GB, - 1) + tb.add_inventory(excl_allused, orc.VCPU, 10) + self.allocate_from_provider(excl_allused, orc.VCPU, 7) + tb.add_inventory(excl_allused, orc.MEMORY_MB, 4000) + self.allocate_from_provider(excl_allused, orc.MEMORY_MB, 1500) + self.allocate_from_provider(excl_allused, orc.MEMORY_MB, 2000) + tb.add_inventory(excl_allused, orc.DISK_GB, 1500) + self.allocate_from_provider(excl_allused, orc.DISK_GB, 1) # Inventory available in requested classes, but unavailable in others incl_extra_full = self._create_provider('extra_full') - tb.add_inventory(incl_extra_full, fields.ResourceClass.VCPU, 20) - self.allocate_from_provider(incl_extra_full, fields.ResourceClass.VCPU, - 15) - tb.add_inventory(incl_extra_full, fields.ResourceClass.MEMORY_MB, 4096) - self.allocate_from_provider(incl_extra_full, - fields.ResourceClass.MEMORY_MB, 1024) - tb.add_inventory(incl_extra_full, fields.ResourceClass.DISK_GB, 2000) - self.allocate_from_provider(incl_extra_full, - fields.ResourceClass.DISK_GB, 400) - tb.add_inventory(incl_extra_full, fields.ResourceClass.PCI_DEVICE, 4) - self.allocate_from_provider(incl_extra_full, - fields.ResourceClass.PCI_DEVICE, 1) - self.allocate_from_provider(incl_extra_full, - fields.ResourceClass.PCI_DEVICE, 3) + tb.add_inventory(incl_extra_full, orc.VCPU, 20) + self.allocate_from_provider(incl_extra_full, orc.VCPU, 15) + tb.add_inventory(incl_extra_full, orc.MEMORY_MB, 4096) + self.allocate_from_provider(incl_extra_full, orc.MEMORY_MB, 1024) + tb.add_inventory(incl_extra_full, orc.DISK_GB, 2000) + self.allocate_from_provider(incl_extra_full, orc.DISK_GB, 400) + tb.add_inventory(incl_extra_full, orc.PCI_DEVICE, 4) + self.allocate_from_provider(incl_extra_full, orc.PCI_DEVICE, 1) + self.allocate_from_provider(incl_extra_full, orc.PCI_DEVICE, 3) # Inventory available in a unrequested classes, not in requested ones excl_extra_avail = self._create_provider('extra_avail') # Incompatible step size - tb.add_inventory(excl_extra_avail, fields.ResourceClass.VCPU, 10, - step_size=3) + tb.add_inventory(excl_extra_avail, orc.VCPU, 10, step_size=3) # Not enough left after reserved + used - tb.add_inventory(excl_extra_avail, fields.ResourceClass.MEMORY_MB, - 4096, max_unit=2048, reserved=2048) - self.allocate_from_provider(excl_extra_avail, - fields.ResourceClass.MEMORY_MB, 1040) + tb.add_inventory(excl_extra_avail, orc.MEMORY_MB, 4096, + max_unit=2048, reserved=2048) + self.allocate_from_provider(excl_extra_avail, orc.MEMORY_MB, 1040) # Allocation ratio math - tb.add_inventory(excl_extra_avail, fields.ResourceClass.DISK_GB, 2000, + tb.add_inventory(excl_extra_avail, orc.DISK_GB, 2000, allocation_ratio=0.5) - tb.add_inventory(excl_extra_avail, fields.ResourceClass.IPV4_ADDRESS, - 48) + tb.add_inventory(excl_extra_avail, orc.IPV4_ADDRESS, 48) custom_special = rp_obj.ResourceClass(self.ctx, name='CUSTOM_SPECIAL') custom_special.create() tb.add_inventory(excl_extra_avail, 'CUSTOM_SPECIAL', 100) self.allocate_from_provider(excl_extra_avail, 'CUSTOM_SPECIAL', 99) resources = { - fields.ResourceClass.STANDARD.index(fields.ResourceClass.VCPU): 5, - fields.ResourceClass.STANDARD.index( - fields.ResourceClass.MEMORY_MB): 1024, - fields.ResourceClass.STANDARD.index( - fields.ResourceClass.DISK_GB): 1500 + orc.STANDARDS.index(orc.VCPU): 5, + orc.STANDARDS.index(orc.MEMORY_MB): 1024, + orc.STANDARDS.index(orc.DISK_GB): 1500 } # Run it! @@ -210,18 +185,17 @@ class ProviderDBHelperTestCase(tb.PlacementDbBaseTestCase): def test_get_provider_ids_matching_with_multiple_forbidden(self): rp1 = self._create_provider('rp1', uuids.agg1) - tb.add_inventory(rp1, fields.ResourceClass.VCPU, 64) + tb.add_inventory(rp1, orc.VCPU, 64) rp2 = self._create_provider('rp2', uuids.agg1) trait_two, = tb.set_traits(rp2, 'CUSTOM_TWO') - tb.add_inventory(rp2, fields.ResourceClass.VCPU, 64) + tb.add_inventory(rp2, orc.VCPU, 64) rp3 = self._create_provider('rp3') trait_three, = tb.set_traits(rp3, 'CUSTOM_THREE') - tb.add_inventory(rp3, fields.ResourceClass.VCPU, 64) + tb.add_inventory(rp3, orc.VCPU, 64) - resources = { - fields.ResourceClass.STANDARD.index(fields.ResourceClass.VCPU): 4} + resources = {orc.STANDARDS.index(orc.VCPU): 4} res = rp_obj._get_provider_ids_matching( self.ctx, resources, required_traits={}, @@ -294,9 +268,9 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): def setUp(self): super(AllocationCandidatesTestCase, self).setUp() self.requested_resources = { - fields.ResourceClass.VCPU: 1, - fields.ResourceClass.MEMORY_MB: 64, - fields.ResourceClass.DISK_GB: 1500, + orc.VCPU: 1, + orc.MEMORY_MB: 64, + orc.DISK_GB: 1500, } # For debugging purposes, populated by _create_provider and used by # _validate_allocation_requests to make failure results more readable. @@ -418,29 +392,29 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): allocation_requests and provider_summaries. """ cn1 = self._create_provider('cn1') - tb.add_inventory(cn1, fields.ResourceClass.VCPU, 8) - tb.add_inventory(cn1, fields.ResourceClass.MEMORY_MB, 2048) - tb.add_inventory(cn1, fields.ResourceClass.DISK_GB, 2000) + tb.add_inventory(cn1, orc.VCPU, 8) + tb.add_inventory(cn1, orc.MEMORY_MB, 2048) + tb.add_inventory(cn1, orc.DISK_GB, 2000) alloc_cands = self._get_allocation_candidates( {'': placement_lib.RequestGroup( use_same_provider=False, resources={ - fields.ResourceClass.VCPU: 1 + orc.VCPU: 1 } )} ) expected = [ - [('cn1', fields.ResourceClass.VCPU, 1)] + [('cn1', orc.VCPU, 1)] ] self._validate_allocation_requests(expected, alloc_cands) expected = { 'cn1': set([ - (fields.ResourceClass.VCPU, 8, 0), - (fields.ResourceClass.MEMORY_MB, 2048, 0), - (fields.ResourceClass.DISK_GB, 2000, 0) + (orc.VCPU, 8, 0), + (orc.MEMORY_MB, 2048, 0), + (orc.DISK_GB, 2000, 0) ]), } self._validate_provider_summary_resources(expected, alloc_cands) @@ -500,19 +474,19 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): {'': placement_lib.RequestGroup( use_same_provider=False, resources={ - fields.ResourceClass.VCPU: 1 + orc.VCPU: 1 } )} ) expected = [ - [('cn1', fields.ResourceClass.VCPU, 1)] + [('cn1', orc.VCPU, 1)] ] self._validate_allocation_requests(expected, alloc_cands) expected = { 'cn1': set([ - (fields.ResourceClass.VCPU, 8, 4) + (orc.VCPU, 8, 4) ]), } self._validate_provider_summary_resources(expected, alloc_cands) @@ -525,29 +499,29 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): # following test. ss1 = self._create_provider('ss1', uuids.agg1) tb.set_traits(ss1, "MISC_SHARES_VIA_AGGREGATE") - tb.add_inventory(ss1, fields.ResourceClass.VCPU, 8) + tb.add_inventory(ss1, orc.VCPU, 8) alloc_cands = self._get_allocation_candidates( {'': placement_lib.RequestGroup( use_same_provider=False, resources={ - fields.ResourceClass.VCPU: 1 + orc.VCPU: 1 } )} ) expected = [ - [('cn1', fields.ResourceClass.VCPU, 1)], - [('ss1', fields.ResourceClass.VCPU, 1)] + [('cn1', orc.VCPU, 1)], + [('ss1', orc.VCPU, 1)] ] self._validate_allocation_requests(expected, alloc_cands) expected = { 'cn1': set([ - (fields.ResourceClass.VCPU, 8, 4) + (orc.VCPU, 8, 4) ]), 'ss1': set([ - (fields.ResourceClass.VCPU, 8, 0) + (orc.VCPU, 8, 0) ]), } self._validate_provider_summary_resources(expected, alloc_cands) @@ -562,12 +536,12 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): cn1, cn2, cn3 = (self._create_provider(name) for name in ('cn1', 'cn2', 'cn3')) for cn in (cn1, cn2, cn3): - tb.add_inventory(cn, fields.ResourceClass.VCPU, 24, + tb.add_inventory(cn, orc.VCPU, 24, allocation_ratio=16.0) - tb.add_inventory(cn, fields.ResourceClass.MEMORY_MB, 32768, + tb.add_inventory(cn, orc.MEMORY_MB, 32768, min_unit=64, step_size=64, allocation_ratio=1.5) total_gb = 1000 if cn.name == 'cn3' else 2000 - tb.add_inventory(cn, fields.ResourceClass.DISK_GB, total_gb, + tb.add_inventory(cn, orc.DISK_GB, total_gb, reserved=100, min_unit=10, step_size=10, allocation_ratio=1.0) @@ -581,14 +555,14 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): # disk capacity. expected = { 'cn1': set([ - (fields.ResourceClass.VCPU, 24 * 16.0, 0), - (fields.ResourceClass.MEMORY_MB, 32768 * 1.5, 0), - (fields.ResourceClass.DISK_GB, 2000 - 100, 0), + (orc.VCPU, 24 * 16.0, 0), + (orc.MEMORY_MB, 32768 * 1.5, 0), + (orc.DISK_GB, 2000 - 100, 0), ]), 'cn2': set([ - (fields.ResourceClass.VCPU, 24 * 16.0, 0), - (fields.ResourceClass.MEMORY_MB, 32768 * 1.5, 0), - (fields.ResourceClass.DISK_GB, 2000 - 100, 0), + (orc.VCPU, 24 * 16.0, 0), + (orc.MEMORY_MB, 32768 * 1.5, 0), + (orc.DISK_GB, 2000 - 100, 0), ]), } self._validate_provider_summary_resources(expected, alloc_cands) @@ -600,12 +574,12 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): # resource amounts in the filter:resources dict passed to # AllocationCandidates.get_by_requests(). expected = [ - [('cn1', fields.ResourceClass.VCPU, 1), - ('cn1', fields.ResourceClass.MEMORY_MB, 64), - ('cn1', fields.ResourceClass.DISK_GB, 1500)], - [('cn2', fields.ResourceClass.VCPU, 1), - ('cn2', fields.ResourceClass.MEMORY_MB, 64), - ('cn2', fields.ResourceClass.DISK_GB, 1500)], + [('cn1', orc.VCPU, 1), + ('cn1', orc.MEMORY_MB, 64), + ('cn1', orc.DISK_GB, 1500)], + [('cn2', orc.VCPU, 1), + ('cn2', orc.MEMORY_MB, 64), + ('cn2', orc.DISK_GB, 1500)], ] self._validate_allocation_requests(expected, alloc_cands) @@ -635,9 +609,9 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): # Only cn2 should be in our allocation requests now since that's the # only one with the required trait expected = [ - [('cn2', fields.ResourceClass.VCPU, 1), - ('cn2', fields.ResourceClass.MEMORY_MB, 64), - ('cn2', fields.ResourceClass.DISK_GB, 1500)], + [('cn2', orc.VCPU, 1), + ('cn2', orc.MEMORY_MB, 64), + ('cn2', orc.DISK_GB, 1500)], ] self._validate_allocation_requests(expected, alloc_cands) p_sums = alloc_cands.provider_summaries @@ -645,9 +619,9 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): expected = { 'cn2': set([ - (fields.ResourceClass.VCPU, 24 * 16.0, 0), - (fields.ResourceClass.MEMORY_MB, 32768 * 1.5, 0), - (fields.ResourceClass.DISK_GB, 2000 - 100, 0), + (orc.VCPU, 24 * 16.0, 0), + (orc.MEMORY_MB, 32768 * 1.5, 0), + (orc.DISK_GB, 2000 - 100, 0), ]), } self._validate_provider_summary_resources(expected, alloc_cands) @@ -666,9 +640,9 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): )}, ) expected = [ - [('cn1', fields.ResourceClass.VCPU, 1), - ('cn1', fields.ResourceClass.MEMORY_MB, 64), - ('cn1', fields.ResourceClass.DISK_GB, 1500)], + [('cn1', orc.VCPU, 1), + ('cn1', orc.MEMORY_MB, 64), + ('cn1', orc.DISK_GB, 1500)], ] self._validate_allocation_requests(expected, alloc_cands) @@ -681,12 +655,11 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): # Create three compute node providers with VCPU, RAM and local disk for name in ('cn1', 'cn2', 'cn3'): cn = self._create_provider(name) - tb.add_inventory(cn, fields.ResourceClass.VCPU, 24, - allocation_ratio=16.0) - tb.add_inventory(cn, fields.ResourceClass.MEMORY_MB, 32768, + tb.add_inventory(cn, orc.VCPU, 24, allocation_ratio=16.0) + tb.add_inventory(cn, orc.MEMORY_MB, 32768, min_unit=64, step_size=64, allocation_ratio=1.5) total_gb = 1000 if name == 'cn3' else 2000 - tb.add_inventory(cn, fields.ResourceClass.DISK_GB, total_gb, + tb.add_inventory(cn, orc.DISK_GB, total_gb, reserved=100, min_unit=10, step_size=10, allocation_ratio=1.0) @@ -737,17 +710,15 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): cn1, cn2 = (self._create_provider(name, uuids.agg) for name in ('cn1', 'cn2')) for cn in (cn1, cn2): - tb.add_inventory(cn, fields.ResourceClass.VCPU, 24, - allocation_ratio=16.0) - tb.add_inventory(cn, fields.ResourceClass.MEMORY_MB, 1024, + tb.add_inventory(cn, orc.VCPU, 24, allocation_ratio=16.0) + tb.add_inventory(cn, orc.MEMORY_MB, 1024, min_unit=64, allocation_ratio=1.5) # Create the shared storage pool, asociated with the same aggregate ss = self._create_provider('shared storage', uuids.agg) # Give the shared storage pool some inventory of DISK_GB - tb.add_inventory(ss, fields.ResourceClass.DISK_GB, 2000, reserved=100, - min_unit=10) + tb.add_inventory(ss, orc.DISK_GB, 2000, reserved=100, min_unit=10) # Mark the shared storage pool as having inventory shared among any # provider associated via aggregate @@ -763,15 +734,15 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): # nodes and the shared provider. expected = { 'cn1': set([ - (fields.ResourceClass.VCPU, 24 * 16.0, 0), - (fields.ResourceClass.MEMORY_MB, 1024 * 1.5, 0), + (orc.VCPU, 24 * 16.0, 0), + (orc.MEMORY_MB, 1024 * 1.5, 0), ]), 'cn2': set([ - (fields.ResourceClass.VCPU, 24 * 16.0, 0), - (fields.ResourceClass.MEMORY_MB, 1024 * 1.5, 0), + (orc.VCPU, 24 * 16.0, 0), + (orc.MEMORY_MB, 1024 * 1.5, 0), ]), 'shared storage': set([ - (fields.ResourceClass.DISK_GB, 2000 - 100, 0) + (orc.DISK_GB, 2000 - 100, 0) ]), } self._validate_provider_summary_resources(expected, alloc_cands) @@ -785,12 +756,12 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): # MEMORY_MB should be the compute nodes while the provider for the # DISK_GB should be the shared storage pool expected = [ - [('cn1', fields.ResourceClass.VCPU, 1), - ('cn1', fields.ResourceClass.MEMORY_MB, 64), - ('shared storage', fields.ResourceClass.DISK_GB, 1500)], - [('cn2', fields.ResourceClass.VCPU, 1), - ('cn2', fields.ResourceClass.MEMORY_MB, 64), - ('shared storage', fields.ResourceClass.DISK_GB, 1500)], + [('cn1', orc.VCPU, 1), + ('cn1', orc.MEMORY_MB, 64), + ('shared storage', orc.DISK_GB, 1500)], + [('cn2', orc.VCPU, 1), + ('cn2', orc.MEMORY_MB, 64), + ('shared storage', orc.DISK_GB, 1500)], ] self._validate_allocation_requests(expected, alloc_cands) @@ -815,7 +786,7 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): # that's a distant possibility. expected = { 'shared storage': set([ - (fields.ResourceClass.DISK_GB, 2000 - 100, 0), + (orc.DISK_GB, 2000 - 100, 0), ]), } self._validate_provider_summary_resources(expected, alloc_cands) @@ -824,7 +795,7 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): # provider because the only thing we're requesting to allocate is # against the provider of DISK_GB, which happens to be the shared # storage provider. - expected = [[('shared storage', fields.ResourceClass.DISK_GB, 10)]] + expected = [[('shared storage', orc.DISK_GB, 10)]] self._validate_allocation_requests(expected, alloc_cands) # Now we're going to add a set of required traits into the request mix. @@ -862,15 +833,15 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): # provider in the summaries. expected = { 'cn1': set([ - (fields.ResourceClass.VCPU, 24 * 16.0, 0), - (fields.ResourceClass.MEMORY_MB, 1024 * 1.5, 0), + (orc.VCPU, 24 * 16.0, 0), + (orc.MEMORY_MB, 1024 * 1.5, 0), ]), 'cn2': set([ - (fields.ResourceClass.VCPU, 24 * 16.0, 0), - (fields.ResourceClass.MEMORY_MB, 1024 * 1.5, 0), + (orc.VCPU, 24 * 16.0, 0), + (orc.MEMORY_MB, 1024 * 1.5, 0), ]), 'shared storage': set([ - (fields.ResourceClass.DISK_GB, 2000 - 100, 0) + (orc.DISK_GB, 2000 - 100, 0) ]), } self._validate_provider_summary_resources(expected, alloc_cands) @@ -909,16 +880,15 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): )} ) expected = [ - [('cn1', fields.ResourceClass.VCPU, 1), - ('cn1', fields.ResourceClass.MEMORY_MB, 64), - ('shared storage', fields.ResourceClass.DISK_GB, 1500)], + [('cn1', orc.VCPU, 1), + ('cn1', orc.MEMORY_MB, 64), + ('shared storage', orc.DISK_GB, 1500)], ] self._validate_allocation_requests(expected, alloc_cands) # Add disk to cn1, forbid sharing, and require the AVX2 trait. # This should result in getting only cn1. - tb.add_inventory(cn1, fields.ResourceClass.DISK_GB, 2048, - allocation_ratio=1.5) + tb.add_inventory(cn1, orc.DISK_GB, 2048, allocation_ratio=1.5) alloc_cands = self._get_allocation_candidates( {'': placement_lib.RequestGroup( use_same_provider=False, @@ -928,9 +898,9 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): )} ) expected = [ - [('cn1', fields.ResourceClass.VCPU, 1), - ('cn1', fields.ResourceClass.MEMORY_MB, 64), - ('cn1', fields.ResourceClass.DISK_GB, 1500)], + [('cn1', orc.VCPU, 1), + ('cn1', orc.MEMORY_MB, 64), + ('cn1', orc.DISK_GB, 1500)], ] self._validate_allocation_requests(expected, alloc_cands) @@ -949,9 +919,8 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): # CUSTOM_MAGIC resources, associated with the aggregate. for name in ('cn1', 'cn2'): cn = self._create_provider(name, agg_uuid) - tb.add_inventory(cn, fields.ResourceClass.VCPU, 24, - allocation_ratio=16.0) - tb.add_inventory(cn, fields.ResourceClass.MEMORY_MB, 1024, + tb.add_inventory(cn, orc.VCPU, 24, allocation_ratio=16.0) + tb.add_inventory(cn, orc.MEMORY_MB, 1024, min_unit=64, allocation_ratio=1.5) # Create a custom resource called MAGIC @@ -974,8 +943,8 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): # The resources we will request requested_resources = { - fields.ResourceClass.VCPU: 1, - fields.ResourceClass.MEMORY_MB: 64, + orc.VCPU: 1, + orc.MEMORY_MB: 64, magic_rc.name: 512, } @@ -992,23 +961,23 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): # MEMORY_MB should be the compute nodes while the provider for the # MAGIC should be the shared custom resource provider. expected = [ - [('cn1', fields.ResourceClass.VCPU, 1), - ('cn1', fields.ResourceClass.MEMORY_MB, 64), + [('cn1', orc.VCPU, 1), + ('cn1', orc.MEMORY_MB, 64), ('shared custom resource provider', magic_rc.name, 512)], - [('cn2', fields.ResourceClass.VCPU, 1), - ('cn2', fields.ResourceClass.MEMORY_MB, 64), + [('cn2', orc.VCPU, 1), + ('cn2', orc.MEMORY_MB, 64), ('shared custom resource provider', magic_rc.name, 512)], ] self._validate_allocation_requests(expected, alloc_cands) expected = { 'cn1': set([ - (fields.ResourceClass.VCPU, 24 * 16.0, 0), - (fields.ResourceClass.MEMORY_MB, 1024 * 1.5, 0), + (orc.VCPU, 24 * 16.0, 0), + (orc.MEMORY_MB, 1024 * 1.5, 0), ]), 'cn2': set([ - (fields.ResourceClass.VCPU, 24 * 16.0, 0), - (fields.ResourceClass.MEMORY_MB, 1024 * 1.5, 0), + (orc.VCPU, 24 * 16.0, 0), + (orc.MEMORY_MB, 1024 * 1.5, 0), ]), 'shared custom resource provider': set([ (magic_rc.name, 1024, 0) @@ -1025,21 +994,18 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): # cn3 is not associated with the aggregate cn3 = self._create_provider('cn3') for cn in (cn1, cn2, cn3): - tb.add_inventory(cn, fields.ResourceClass.VCPU, 24, - allocation_ratio=16.0) - tb.add_inventory(cn, fields.ResourceClass.MEMORY_MB, 1024, + tb.add_inventory(cn, orc.VCPU, 24, allocation_ratio=16.0) + tb.add_inventory(cn, orc.MEMORY_MB, 1024, min_unit=64, allocation_ratio=1.5) # Only cn3 has disk - tb.add_inventory(cn3, fields.ResourceClass.DISK_GB, 2000, - reserved=100, min_unit=10) + tb.add_inventory(cn3, orc.DISK_GB, 2000, reserved=100, min_unit=10) # Create the shared storage pool in the same aggregate as the first two # compute nodes ss = self._create_provider('shared storage', uuids.agg) # Give the shared storage pool some inventory of DISK_GB - tb.add_inventory(ss, fields.ResourceClass.DISK_GB, 2000, reserved=100, - min_unit=10) + tb.add_inventory(ss, orc.DISK_GB, 2000, reserved=100, min_unit=10) tb.set_traits(ss, "MISC_SHARES_VIA_AGGREGATE") @@ -1048,35 +1014,35 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): # Expect cn1, cn2, cn3 and ss in the summaries expected = { 'cn1': set([ - (fields.ResourceClass.VCPU, 24 * 16.0, 0), - (fields.ResourceClass.MEMORY_MB, 1024 * 1.5, 0), + (orc.VCPU, 24 * 16.0, 0), + (orc.MEMORY_MB, 1024 * 1.5, 0), ]), 'cn2': set([ - (fields.ResourceClass.VCPU, 24 * 16.0, 0), - (fields.ResourceClass.MEMORY_MB, 1024 * 1.5, 0), + (orc.VCPU, 24 * 16.0, 0), + (orc.MEMORY_MB, 1024 * 1.5, 0), ]), 'cn3': set([ - (fields.ResourceClass.VCPU, 24 * 16.0, 0), - (fields.ResourceClass.MEMORY_MB, 1024 * 1.5, 0), - (fields.ResourceClass.DISK_GB, 2000 - 100, 0), + (orc.VCPU, 24 * 16.0, 0), + (orc.MEMORY_MB, 1024 * 1.5, 0), + (orc.DISK_GB, 2000 - 100, 0), ]), 'shared storage': set([ - (fields.ResourceClass.DISK_GB, 2000 - 100, 0), + (orc.DISK_GB, 2000 - 100, 0), ]), } self._validate_provider_summary_resources(expected, alloc_cands) # Expect three allocation requests: (cn1, ss), (cn2, ss), (cn3) expected = [ - [('cn1', fields.ResourceClass.VCPU, 1), - ('cn1', fields.ResourceClass.MEMORY_MB, 64), - ('shared storage', fields.ResourceClass.DISK_GB, 1500)], - [('cn2', fields.ResourceClass.VCPU, 1), - ('cn2', fields.ResourceClass.MEMORY_MB, 64), - ('shared storage', fields.ResourceClass.DISK_GB, 1500)], - [('cn3', fields.ResourceClass.VCPU, 1), - ('cn3', fields.ResourceClass.MEMORY_MB, 64), - ('cn3', fields.ResourceClass.DISK_GB, 1500)], + [('cn1', orc.VCPU, 1), + ('cn1', orc.MEMORY_MB, 64), + ('shared storage', orc.DISK_GB, 1500)], + [('cn2', orc.VCPU, 1), + ('cn2', orc.MEMORY_MB, 64), + ('shared storage', orc.DISK_GB, 1500)], + [('cn3', orc.VCPU, 1), + ('cn3', orc.MEMORY_MB, 64), + ('cn3', orc.DISK_GB, 1500)], ] self._validate_allocation_requests(expected, alloc_cands) @@ -1116,20 +1082,20 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): # provider in the summaries. expected = { 'cn1': set([ - (fields.ResourceClass.VCPU, 24 * 16.0, 0), - (fields.ResourceClass.MEMORY_MB, 1024 * 1.5, 0), + (orc.VCPU, 24 * 16.0, 0), + (orc.MEMORY_MB, 1024 * 1.5, 0), ]), 'cn2': set([ - (fields.ResourceClass.VCPU, 24 * 16.0, 0), - (fields.ResourceClass.MEMORY_MB, 1024 * 1.5, 0), + (orc.VCPU, 24 * 16.0, 0), + (orc.MEMORY_MB, 1024 * 1.5, 0), ]), 'cn3': set([ - (fields.ResourceClass.VCPU, 24 * 16.0, 0), - (fields.ResourceClass.MEMORY_MB, 1024 * 1.5, 0), - (fields.ResourceClass.DISK_GB, 2000 - 100, 0), + (orc.VCPU, 24 * 16.0, 0), + (orc.MEMORY_MB, 1024 * 1.5, 0), + (orc.DISK_GB, 2000 - 100, 0), ]), 'shared storage': set([ - (fields.ResourceClass.DISK_GB, 2000 - 100, 0), + (orc.DISK_GB, 2000 - 100, 0), ]), } self._validate_provider_summary_resources(expected, alloc_cands) @@ -1165,17 +1131,17 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): # There should be only cn3 in the returned allocation candidates expected = [ - [('cn3', fields.ResourceClass.VCPU, 1), - ('cn3', fields.ResourceClass.MEMORY_MB, 64), - ('cn3', fields.ResourceClass.DISK_GB, 1500)], + [('cn3', orc.VCPU, 1), + ('cn3', orc.MEMORY_MB, 64), + ('cn3', orc.DISK_GB, 1500)], ] self._validate_allocation_requests(expected, alloc_cands) expected = { 'cn3': set([ - (fields.ResourceClass.VCPU, 24 * 16.0, 0), - (fields.ResourceClass.MEMORY_MB, 1024 * 1.5, 0), - (fields.ResourceClass.DISK_GB, 2000 - 100, 0), + (orc.VCPU, 24 * 16.0, 0), + (orc.MEMORY_MB, 1024 * 1.5, 0), + (orc.DISK_GB, 2000 - 100, 0), ]), } self._validate_provider_summary_resources(expected, alloc_cands) @@ -1188,46 +1154,46 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): def test_common_rc(self): """Candidates when cn and shared have inventory in the same class.""" cn = self._create_provider('cn', uuids.agg1) - tb.add_inventory(cn, fields.ResourceClass.VCPU, 24) - tb.add_inventory(cn, fields.ResourceClass.MEMORY_MB, 2048) - tb.add_inventory(cn, fields.ResourceClass.DISK_GB, 1600) + tb.add_inventory(cn, orc.VCPU, 24) + tb.add_inventory(cn, orc.MEMORY_MB, 2048) + tb.add_inventory(cn, orc.DISK_GB, 1600) ss = self._create_provider('ss', uuids.agg1) tb.set_traits(ss, "MISC_SHARES_VIA_AGGREGATE") - tb.add_inventory(ss, fields.ResourceClass.DISK_GB, 2000) + tb.add_inventory(ss, orc.DISK_GB, 2000) alloc_cands = self._get_allocation_candidates() # One allocation_request should have cn + ss; the other should have # just the cn. expected = [ - [('cn', fields.ResourceClass.VCPU, 1), - ('cn', fields.ResourceClass.MEMORY_MB, 64), - ('cn', fields.ResourceClass.DISK_GB, 1500)], - [('cn', fields.ResourceClass.VCPU, 1), - ('cn', fields.ResourceClass.MEMORY_MB, 64), - ('ss', fields.ResourceClass.DISK_GB, 1500)], + [('cn', orc.VCPU, 1), + ('cn', orc.MEMORY_MB, 64), + ('cn', orc.DISK_GB, 1500)], + [('cn', orc.VCPU, 1), + ('cn', orc.MEMORY_MB, 64), + ('ss', orc.DISK_GB, 1500)], ] self._validate_allocation_requests(expected, alloc_cands) expected = { 'cn': set([ - (fields.ResourceClass.VCPU, 24, 0), - (fields.ResourceClass.MEMORY_MB, 2048, 0), - (fields.ResourceClass.DISK_GB, 1600, 0), + (orc.VCPU, 24, 0), + (orc.MEMORY_MB, 2048, 0), + (orc.DISK_GB, 1600, 0), ]), 'ss': set([ - (fields.ResourceClass.DISK_GB, 2000, 0), + (orc.DISK_GB, 2000, 0), ]), } self._validate_provider_summary_resources(expected, alloc_cands) # Next let's increase the requested DISK_GB requested_resources = { - fields.ResourceClass.VCPU: 1, - fields.ResourceClass.MEMORY_MB: 64, - fields.ResourceClass.DISK_GB: 1800, + orc.VCPU: 1, + orc.MEMORY_MB: 64, + orc.DISK_GB: 1800, } alloc_cands = self._get_allocation_candidates( {'': placement_lib.RequestGroup( @@ -1237,21 +1203,21 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): ) expected = [ - [('cn', fields.ResourceClass.VCPU, 1), - ('cn', fields.ResourceClass.MEMORY_MB, 64), - ('ss', fields.ResourceClass.DISK_GB, 1800)], + [('cn', orc.VCPU, 1), + ('cn', orc.MEMORY_MB, 64), + ('ss', orc.DISK_GB, 1800)], ] self._validate_allocation_requests(expected, alloc_cands) expected = { 'cn': set([ - (fields.ResourceClass.VCPU, 24, 0), - (fields.ResourceClass.MEMORY_MB, 2048, 0), - (fields.ResourceClass.DISK_GB, 1600, 0), + (orc.VCPU, 24, 0), + (orc.MEMORY_MB, 2048, 0), + (orc.DISK_GB, 1600, 0), ]), 'ss': set([ - (fields.ResourceClass.DISK_GB, 2000, 0), + (orc.DISK_GB, 2000, 0), ]), } self._validate_provider_summary_resources(expected, alloc_cands) @@ -1263,14 +1229,14 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): # use_same_provider=False cn = self._create_provider('cn', uuids.agg1) - tb.add_inventory(cn, fields.ResourceClass.VCPU, 24) - tb.add_inventory(cn, fields.ResourceClass.MEMORY_MB, 2048) - tb.add_inventory(cn, fields.ResourceClass.DISK_GB, 1600) + tb.add_inventory(cn, orc.VCPU, 24) + tb.add_inventory(cn, orc.MEMORY_MB, 2048) + tb.add_inventory(cn, orc.DISK_GB, 1600) # The compute node's disk is SSD tb.set_traits(cn, 'HW_CPU_X86_SSE', 'STORAGE_DISK_SSD') ss = self._create_provider('ss', uuids.agg1) - tb.add_inventory(ss, fields.ResourceClass.DISK_GB, 1600) + tb.add_inventory(ss, orc.DISK_GB, 1600) # The shared storage's disk is RAID tb.set_traits(ss, 'MISC_SHARES_VIA_AGGREGATE', 'CUSTOM_RAID') @@ -1288,21 +1254,21 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): # CUSTOM_RAID traits. # expected = [] expected = [ - [('cn', fields.ResourceClass.VCPU, 1), - ('cn', fields.ResourceClass.MEMORY_MB, 64), - ('ss', fields.ResourceClass.DISK_GB, 1500)], + [('cn', orc.VCPU, 1), + ('cn', orc.MEMORY_MB, 64), + ('ss', orc.DISK_GB, 1500)], ] self._validate_allocation_requests(expected, alloc_cands) # expected = {} expected = { 'cn': set([ - (fields.ResourceClass.VCPU, 24, 0), - (fields.ResourceClass.MEMORY_MB, 2048, 0), - (fields.ResourceClass.DISK_GB, 1600, 0), + (orc.VCPU, 24, 0), + (orc.MEMORY_MB, 2048, 0), + (orc.DISK_GB, 1600, 0), ]), 'ss': set([ - (fields.ResourceClass.DISK_GB, 1600, 0), + (orc.DISK_GB, 1600, 0), ]), } self._validate_provider_summary_resources(expected, alloc_cands) @@ -1310,9 +1276,9 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): def test_only_one_sharing_provider(self): ss1 = self._create_provider('ss1', uuids.agg1) tb.set_traits(ss1, "MISC_SHARES_VIA_AGGREGATE") - tb.add_inventory(ss1, fields.ResourceClass.IPV4_ADDRESS, 24) - tb.add_inventory(ss1, fields.ResourceClass.SRIOV_NET_VF, 16) - tb.add_inventory(ss1, fields.ResourceClass.DISK_GB, 1600) + tb.add_inventory(ss1, orc.IPV4_ADDRESS, 24) + tb.add_inventory(ss1, orc.SRIOV_NET_VF, 16) + tb.add_inventory(ss1, orc.DISK_GB, 1600) alloc_cands = self._get_allocation_candidates( {'': placement_lib.RequestGroup( @@ -1326,17 +1292,17 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): ) expected = [ - [('ss1', fields.ResourceClass.IPV4_ADDRESS, 2), - ('ss1', fields.ResourceClass.SRIOV_NET_VF, 1), - ('ss1', fields.ResourceClass.DISK_GB, 1500)] + [('ss1', orc.IPV4_ADDRESS, 2), + ('ss1', orc.SRIOV_NET_VF, 1), + ('ss1', orc.DISK_GB, 1500)] ] self._validate_allocation_requests(expected, alloc_cands) expected = { 'ss1': set([ - (fields.ResourceClass.IPV4_ADDRESS, 24, 0), - (fields.ResourceClass.SRIOV_NET_VF, 16, 0), - (fields.ResourceClass.DISK_GB, 1600, 0), + (orc.IPV4_ADDRESS, 24, 0), + (orc.SRIOV_NET_VF, 16, 0), + (orc.DISK_GB, 1600, 0), ]), } self._validate_provider_summary_resources(expected, alloc_cands) @@ -1344,11 +1310,11 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): def test_all_sharing_providers_no_rc_overlap(self): ss1 = self._create_provider('ss1', uuids.agg1) tb.set_traits(ss1, "MISC_SHARES_VIA_AGGREGATE") - tb.add_inventory(ss1, fields.ResourceClass.IPV4_ADDRESS, 24) + tb.add_inventory(ss1, orc.IPV4_ADDRESS, 24) ss2 = self._create_provider('ss2', uuids.agg1) tb.set_traits(ss2, "MISC_SHARES_VIA_AGGREGATE") - tb.add_inventory(ss2, fields.ResourceClass.DISK_GB, 1600) + tb.add_inventory(ss2, orc.DISK_GB, 1600) alloc_cands = self._get_allocation_candidates( {'': placement_lib.RequestGroup( @@ -1361,17 +1327,17 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): ) expected = [ - [('ss1', fields.ResourceClass.IPV4_ADDRESS, 2), - ('ss2', fields.ResourceClass.DISK_GB, 1500)], + [('ss1', orc.IPV4_ADDRESS, 2), + ('ss2', orc.DISK_GB, 1500)], ] self._validate_allocation_requests(expected, alloc_cands) expected = { 'ss1': set([ - (fields.ResourceClass.IPV4_ADDRESS, 24, 0), + (orc.IPV4_ADDRESS, 24, 0), ]), 'ss2': set([ - (fields.ResourceClass.DISK_GB, 1600, 0), + (orc.DISK_GB, 1600, 0), ]), } self._validate_provider_summary_resources(expected, alloc_cands) @@ -1379,12 +1345,12 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): def test_all_sharing_providers_no_rc_overlap_more_classes(self): ss1 = self._create_provider('ss1', uuids.agg1) tb.set_traits(ss1, "MISC_SHARES_VIA_AGGREGATE") - tb.add_inventory(ss1, fields.ResourceClass.IPV4_ADDRESS, 24) - tb.add_inventory(ss1, fields.ResourceClass.SRIOV_NET_VF, 16) + tb.add_inventory(ss1, orc.IPV4_ADDRESS, 24) + tb.add_inventory(ss1, orc.SRIOV_NET_VF, 16) ss2 = self._create_provider('ss2', uuids.agg1) tb.set_traits(ss2, "MISC_SHARES_VIA_AGGREGATE") - tb.add_inventory(ss2, fields.ResourceClass.DISK_GB, 1600) + tb.add_inventory(ss2, orc.DISK_GB, 1600) alloc_cands = self._get_allocation_candidates( {'': placement_lib.RequestGroup( @@ -1398,19 +1364,19 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): ) expected = [ - [('ss1', fields.ResourceClass.IPV4_ADDRESS, 2), - ('ss1', fields.ResourceClass.SRIOV_NET_VF, 1), - ('ss2', fields.ResourceClass.DISK_GB, 1500)] + [('ss1', orc.IPV4_ADDRESS, 2), + ('ss1', orc.SRIOV_NET_VF, 1), + ('ss2', orc.DISK_GB, 1500)] ] self._validate_allocation_requests(expected, alloc_cands) expected = { 'ss1': set([ - (fields.ResourceClass.IPV4_ADDRESS, 24, 0), - (fields.ResourceClass.SRIOV_NET_VF, 16, 0) + (orc.IPV4_ADDRESS, 24, 0), + (orc.SRIOV_NET_VF, 16, 0) ]), 'ss2': set([ - (fields.ResourceClass.DISK_GB, 1600, 0), + (orc.DISK_GB, 1600, 0), ]), } self._validate_provider_summary_resources(expected, alloc_cands) @@ -1418,14 +1384,14 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): def test_all_sharing_providers(self): ss1 = self._create_provider('ss1', uuids.agg1) tb.set_traits(ss1, "MISC_SHARES_VIA_AGGREGATE") - tb.add_inventory(ss1, fields.ResourceClass.IPV4_ADDRESS, 24) - tb.add_inventory(ss1, fields.ResourceClass.SRIOV_NET_VF, 16) - tb.add_inventory(ss1, fields.ResourceClass.DISK_GB, 1600) + tb.add_inventory(ss1, orc.IPV4_ADDRESS, 24) + tb.add_inventory(ss1, orc.SRIOV_NET_VF, 16) + tb.add_inventory(ss1, orc.DISK_GB, 1600) ss2 = self._create_provider('ss2', uuids.agg1) tb.set_traits(ss2, "MISC_SHARES_VIA_AGGREGATE") - tb.add_inventory(ss2, fields.ResourceClass.SRIOV_NET_VF, 16) - tb.add_inventory(ss2, fields.ResourceClass.DISK_GB, 1600) + tb.add_inventory(ss2, orc.SRIOV_NET_VF, 16) + tb.add_inventory(ss2, orc.DISK_GB, 1600) alloc_cands = self._get_allocation_candidates(requests={ '': placement_lib.RequestGroup( @@ -1444,30 +1410,30 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): # - gets the DISK_GB from ss2 and the rest from ss1, # - gets SRIOV_NET_VF and DISK_GB from ss2 and rest from ss1 expected = [ - [('ss1', fields.ResourceClass.IPV4_ADDRESS, 2), - ('ss1', fields.ResourceClass.SRIOV_NET_VF, 1), - ('ss1', fields.ResourceClass.DISK_GB, 1500)], - [('ss1', fields.ResourceClass.IPV4_ADDRESS, 2), - ('ss1', fields.ResourceClass.SRIOV_NET_VF, 1), - ('ss2', fields.ResourceClass.DISK_GB, 1500)], - [('ss1', fields.ResourceClass.IPV4_ADDRESS, 2), - ('ss2', fields.ResourceClass.SRIOV_NET_VF, 1), - ('ss1', fields.ResourceClass.DISK_GB, 1500)], - [('ss1', fields.ResourceClass.IPV4_ADDRESS, 2), - ('ss2', fields.ResourceClass.SRIOV_NET_VF, 1), - ('ss2', fields.ResourceClass.DISK_GB, 1500)], + [('ss1', orc.IPV4_ADDRESS, 2), + ('ss1', orc.SRIOV_NET_VF, 1), + ('ss1', orc.DISK_GB, 1500)], + [('ss1', orc.IPV4_ADDRESS, 2), + ('ss1', orc.SRIOV_NET_VF, 1), + ('ss2', orc.DISK_GB, 1500)], + [('ss1', orc.IPV4_ADDRESS, 2), + ('ss2', orc.SRIOV_NET_VF, 1), + ('ss1', orc.DISK_GB, 1500)], + [('ss1', orc.IPV4_ADDRESS, 2), + ('ss2', orc.SRIOV_NET_VF, 1), + ('ss2', orc.DISK_GB, 1500)], ] self._validate_allocation_requests(expected, alloc_cands) expected = { 'ss1': set([ - (fields.ResourceClass.IPV4_ADDRESS, 24, 0), - (fields.ResourceClass.SRIOV_NET_VF, 16, 0), - (fields.ResourceClass.DISK_GB, 1600, 0) + (orc.IPV4_ADDRESS, 24, 0), + (orc.SRIOV_NET_VF, 16, 0), + (orc.DISK_GB, 1600, 0) ]), 'ss2': set([ - (fields.ResourceClass.SRIOV_NET_VF, 16, 0), - (fields.ResourceClass.DISK_GB, 1600, 0), + (orc.SRIOV_NET_VF, 16, 0), + (orc.DISK_GB, 1600, 0), ]), } self._validate_provider_summary_resources(expected, alloc_cands) @@ -1482,16 +1448,16 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): # It is different from test_mix_local_and_shared as it uses two # different aggregates to connect the two CNs to the share RP cn1 = self._create_provider('cn1', uuids.agg1) - tb.add_inventory(cn1, fields.ResourceClass.VCPU, 24) - tb.add_inventory(cn1, fields.ResourceClass.MEMORY_MB, 2048) + tb.add_inventory(cn1, orc.VCPU, 24) + tb.add_inventory(cn1, orc.MEMORY_MB, 2048) cn2 = self._create_provider('cn2', uuids.agg2) - tb.add_inventory(cn2, fields.ResourceClass.VCPU, 24) - tb.add_inventory(cn2, fields.ResourceClass.MEMORY_MB, 2048) + tb.add_inventory(cn2, orc.VCPU, 24) + tb.add_inventory(cn2, orc.MEMORY_MB, 2048) ss1 = self._create_provider('ss1', uuids.agg1, uuids.agg2) tb.set_traits(ss1, "MISC_SHARES_VIA_AGGREGATE") - tb.add_inventory(ss1, fields.ResourceClass.DISK_GB, 1600) + tb.add_inventory(ss1, orc.DISK_GB, 1600) alloc_cands = self._get_allocation_candidates( {'': placement_lib.RequestGroup( @@ -1503,24 +1469,24 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): )} ) expected = [ - [('cn1', fields.ResourceClass.VCPU, 2), - ('ss1', fields.ResourceClass.DISK_GB, 1500)], - [('cn2', fields.ResourceClass.VCPU, 2), - ('ss1', fields.ResourceClass.DISK_GB, 1500)], + [('cn1', orc.VCPU, 2), + ('ss1', orc.DISK_GB, 1500)], + [('cn2', orc.VCPU, 2), + ('ss1', orc.DISK_GB, 1500)], ] self._validate_allocation_requests(expected, alloc_cands) expected = { 'cn1': set([ - (fields.ResourceClass.VCPU, 24, 0), - (fields.ResourceClass.MEMORY_MB, 2048, 0), + (orc.VCPU, 24, 0), + (orc.MEMORY_MB, 2048, 0), ]), 'cn2': set([ - (fields.ResourceClass.VCPU, 24, 0), - (fields.ResourceClass.MEMORY_MB, 2048, 0), + (orc.VCPU, 24, 0), + (orc.MEMORY_MB, 2048, 0), ]), 'ss1': set([ - (fields.ResourceClass.DISK_GB, 1600, 0), + (orc.DISK_GB, 1600, 0), ]), } self._validate_provider_summary_resources(expected, alloc_cands) @@ -1532,27 +1498,27 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): # / agg3 \ agg1 / agg1 \ agg2 # SS3 (IPV4) SS1 (DISK_GB) SS2 (IPV4) cn1 = self._create_provider('cn1', uuids.agg1, uuids.agg3) - tb.add_inventory(cn1, fields.ResourceClass.VCPU, 24) - tb.add_inventory(cn1, fields.ResourceClass.MEMORY_MB, 2048) + tb.add_inventory(cn1, orc.VCPU, 24) + tb.add_inventory(cn1, orc.MEMORY_MB, 2048) cn2 = self._create_provider('cn2', uuids.agg1, uuids.agg2) - tb.add_inventory(cn2, fields.ResourceClass.VCPU, 24) - tb.add_inventory(cn2, fields.ResourceClass.MEMORY_MB, 2048) + tb.add_inventory(cn2, orc.VCPU, 24) + tb.add_inventory(cn2, orc.MEMORY_MB, 2048) # ss1 is connected to both cn1 and cn2 ss1 = self._create_provider('ss1', uuids.agg1) tb.set_traits(ss1, "MISC_SHARES_VIA_AGGREGATE") - tb.add_inventory(ss1, fields.ResourceClass.DISK_GB, 1600) + tb.add_inventory(ss1, orc.DISK_GB, 1600) # ss2 only connected to cn2 ss2 = self._create_provider('ss2', uuids.agg2) tb.set_traits(ss2, "MISC_SHARES_VIA_AGGREGATE") - tb.add_inventory(ss2, fields.ResourceClass.IPV4_ADDRESS, 24) + tb.add_inventory(ss2, orc.IPV4_ADDRESS, 24) # ss3 only connected to cn1 ss3 = self._create_provider('ss3', uuids.agg3) tb.set_traits(ss3, "MISC_SHARES_VIA_AGGREGATE") - tb.add_inventory(ss3, fields.ResourceClass.IPV4_ADDRESS, 24) + tb.add_inventory(ss3, orc.IPV4_ADDRESS, 24) alloc_cands = self._get_allocation_candidates( {'': placement_lib.RequestGroup( @@ -1566,32 +1532,32 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): ) expected = [ - [('cn1', fields.ResourceClass.VCPU, 2), - ('ss1', fields.ResourceClass.DISK_GB, 1500), - ('ss3', fields.ResourceClass.IPV4_ADDRESS, 2)], - [('cn2', fields.ResourceClass.VCPU, 2), - ('ss1', fields.ResourceClass.DISK_GB, 1500), - ('ss2', fields.ResourceClass.IPV4_ADDRESS, 2)], + [('cn1', orc.VCPU, 2), + ('ss1', orc.DISK_GB, 1500), + ('ss3', orc.IPV4_ADDRESS, 2)], + [('cn2', orc.VCPU, 2), + ('ss1', orc.DISK_GB, 1500), + ('ss2', orc.IPV4_ADDRESS, 2)], ] self._validate_allocation_requests(expected, alloc_cands) expected = { 'cn1': set([ - (fields.ResourceClass.VCPU, 24, 0), - (fields.ResourceClass.MEMORY_MB, 2048, 0), + (orc.VCPU, 24, 0), + (orc.MEMORY_MB, 2048, 0), ]), 'cn2': set([ - (fields.ResourceClass.VCPU, 24, 0), - (fields.ResourceClass.MEMORY_MB, 2048, 0), + (orc.VCPU, 24, 0), + (orc.MEMORY_MB, 2048, 0), ]), 'ss1': set([ - (fields.ResourceClass.DISK_GB, 1600, 0), + (orc.DISK_GB, 1600, 0), ]), 'ss2': set([ - (fields.ResourceClass.IPV4_ADDRESS, 24, 0), + (orc.IPV4_ADDRESS, 24, 0), ]), 'ss3': set([ - (fields.ResourceClass.IPV4_ADDRESS, 24, 0), + (orc.IPV4_ADDRESS, 24, 0), ]), } self._validate_provider_summary_resources(expected, alloc_cands) @@ -1606,49 +1572,49 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): # ss2_1(MEM) ss2_2(DISK) with different resources. cn1 = self._create_provider('cn1', uuids.agg1) - tb.add_inventory(cn1, fields.ResourceClass.VCPU, 24) - tb.add_inventory(cn1, fields.ResourceClass.MEMORY_MB, 2048) + tb.add_inventory(cn1, orc.VCPU, 24) + tb.add_inventory(cn1, orc.MEMORY_MB, 2048) ss1 = self._create_provider('ss1', uuids.agg1) - tb.add_inventory(ss1, fields.ResourceClass.DISK_GB, 2000) + tb.add_inventory(ss1, orc.DISK_GB, 2000) tb.set_traits(ss1, 'MISC_SHARES_VIA_AGGREGATE') cn2 = self._create_provider('cn2', uuids.agg2) - tb.add_inventory(cn2, fields.ResourceClass.VCPU, 24) + tb.add_inventory(cn2, orc.VCPU, 24) ss2_1 = self._create_provider('ss2_1', uuids.agg2) - tb.add_inventory(ss2_1, fields.ResourceClass.MEMORY_MB, 2048) + tb.add_inventory(ss2_1, orc.MEMORY_MB, 2048) tb.set_traits(ss2_1, 'MISC_SHARES_VIA_AGGREGATE') ss2_2 = self._create_provider('ss2_2', uuids.agg2) - tb.add_inventory(ss2_2, fields.ResourceClass.DISK_GB, 2000) + tb.add_inventory(ss2_2, orc.DISK_GB, 2000) tb.set_traits(ss2_2, 'MISC_SHARES_VIA_AGGREGATE') alloc_cands = self._get_allocation_candidates() expected = [ - [('cn1', fields.ResourceClass.VCPU, 1), - ('cn1', fields.ResourceClass.MEMORY_MB, 64), - ('ss1', fields.ResourceClass.DISK_GB, 1500)], - [('cn2', fields.ResourceClass.VCPU, 1), - ('ss2_1', fields.ResourceClass.MEMORY_MB, 64), - ('ss2_2', fields.ResourceClass.DISK_GB, 1500)], + [('cn1', orc.VCPU, 1), + ('cn1', orc.MEMORY_MB, 64), + ('ss1', orc.DISK_GB, 1500)], + [('cn2', orc.VCPU, 1), + ('ss2_1', orc.MEMORY_MB, 64), + ('ss2_2', orc.DISK_GB, 1500)], ] self._validate_allocation_requests(expected, alloc_cands) expected = { 'cn1': set([ - (fields.ResourceClass.VCPU, 24, 0), - (fields.ResourceClass.MEMORY_MB, 2048, 0), + (orc.VCPU, 24, 0), + (orc.MEMORY_MB, 2048, 0), ]), 'ss1': set([ - (fields.ResourceClass.DISK_GB, 2000, 0), + (orc.DISK_GB, 2000, 0), ]), 'cn2': set([ - (fields.ResourceClass.VCPU, 24, 0), + (orc.VCPU, 24, 0), ]), 'ss2_1': set([ - (fields.ResourceClass.MEMORY_MB, 2048, 0), + (orc.MEMORY_MB, 2048, 0), ]), 'ss2_2': set([ - (fields.ResourceClass.DISK_GB, 2000, 0), + (orc.DISK_GB, 2000, 0), ]), } self._validate_provider_summary_resources(expected, alloc_cands) @@ -1663,57 +1629,57 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): # ss2_1(MEM) ss2_2(DISK) with different resources. cn1 = self._create_provider('cn1', uuids.agg1) - tb.add_inventory(cn1, fields.ResourceClass.VCPU, 24) - tb.add_inventory(cn1, fields.ResourceClass.MEMORY_MB, 2048) + tb.add_inventory(cn1, orc.VCPU, 24) + tb.add_inventory(cn1, orc.MEMORY_MB, 2048) ss1_1 = self._create_provider('ss1_1', uuids.agg1) - tb.add_inventory(ss1_1, fields.ResourceClass.DISK_GB, 2000) + tb.add_inventory(ss1_1, orc.DISK_GB, 2000) tb.set_traits(ss1_1, 'MISC_SHARES_VIA_AGGREGATE') ss1_2 = self._create_provider('ss1_2', uuids.agg1) - tb.add_inventory(ss1_2, fields.ResourceClass.DISK_GB, 2000) + tb.add_inventory(ss1_2, orc.DISK_GB, 2000) tb.set_traits(ss1_2, 'MISC_SHARES_VIA_AGGREGATE') cn2 = self._create_provider('cn2', uuids.agg2) - tb.add_inventory(cn2, fields.ResourceClass.VCPU, 24) + tb.add_inventory(cn2, orc.VCPU, 24) ss2_1 = self._create_provider('ss2_1', uuids.agg2) - tb.add_inventory(ss2_1, fields.ResourceClass.MEMORY_MB, 2048) + tb.add_inventory(ss2_1, orc.MEMORY_MB, 2048) tb.set_traits(ss2_1, 'MISC_SHARES_VIA_AGGREGATE') ss2_2 = self._create_provider('ss2_2', uuids.agg2) - tb.add_inventory(ss2_2, fields.ResourceClass.DISK_GB, 2000) + tb.add_inventory(ss2_2, orc.DISK_GB, 2000) tb.set_traits(ss2_2, 'MISC_SHARES_VIA_AGGREGATE') alloc_cands = self._get_allocation_candidates() expected = [ - [('cn1', fields.ResourceClass.VCPU, 1), - ('cn1', fields.ResourceClass.MEMORY_MB, 64), - ('ss1_1', fields.ResourceClass.DISK_GB, 1500)], - [('cn1', fields.ResourceClass.VCPU, 1), - ('cn1', fields.ResourceClass.MEMORY_MB, 64), - ('ss1_2', fields.ResourceClass.DISK_GB, 1500)], - [('cn2', fields.ResourceClass.VCPU, 1), - ('ss2_1', fields.ResourceClass.MEMORY_MB, 64), - ('ss2_2', fields.ResourceClass.DISK_GB, 1500)], + [('cn1', orc.VCPU, 1), + ('cn1', orc.MEMORY_MB, 64), + ('ss1_1', orc.DISK_GB, 1500)], + [('cn1', orc.VCPU, 1), + ('cn1', orc.MEMORY_MB, 64), + ('ss1_2', orc.DISK_GB, 1500)], + [('cn2', orc.VCPU, 1), + ('ss2_1', orc.MEMORY_MB, 64), + ('ss2_2', orc.DISK_GB, 1500)], ] self._validate_allocation_requests(expected, alloc_cands) expected = { 'cn1': set([ - (fields.ResourceClass.VCPU, 24, 0), - (fields.ResourceClass.MEMORY_MB, 2048, 0), + (orc.VCPU, 24, 0), + (orc.MEMORY_MB, 2048, 0), ]), 'ss1_1': set([ - (fields.ResourceClass.DISK_GB, 2000, 0), + (orc.DISK_GB, 2000, 0), ]), 'ss1_2': set([ - (fields.ResourceClass.DISK_GB, 2000, 0), + (orc.DISK_GB, 2000, 0), ]), 'cn2': set([ - (fields.ResourceClass.VCPU, 24, 0), + (orc.VCPU, 24, 0), ]), 'ss2_1': set([ - (fields.ResourceClass.MEMORY_MB, 2048, 0), + (orc.MEMORY_MB, 2048, 0), ]), 'ss2_2': set([ - (fields.ResourceClass.DISK_GB, 2000, 0), + (orc.DISK_GB, 2000, 0), ]), } self._validate_provider_summary_resources(expected, alloc_cands) @@ -1725,27 +1691,27 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): # / agg1 \ agg2 / agg2 \ agg3 # SS1 (DISK_GB) SS2 (DISK_GB) SS3 (DISK_GB) cn1 = self._create_provider('cn1', uuids.agg1, uuids.agg2) - tb.add_inventory(cn1, fields.ResourceClass.VCPU, 24) - tb.add_inventory(cn1, fields.ResourceClass.DISK_GB, 1600) + tb.add_inventory(cn1, orc.VCPU, 24) + tb.add_inventory(cn1, orc.DISK_GB, 1600) cn2 = self._create_provider('cn2', uuids.agg2, uuids.agg3) - tb.add_inventory(cn2, fields.ResourceClass.VCPU, 24) - tb.add_inventory(cn2, fields.ResourceClass.DISK_GB, 1600) + tb.add_inventory(cn2, orc.VCPU, 24) + tb.add_inventory(cn2, orc.DISK_GB, 1600) # ss1 is connected to cn1 ss1 = self._create_provider('ss1', uuids.agg1) tb.set_traits(ss1, "MISC_SHARES_VIA_AGGREGATE") - tb.add_inventory(ss1, fields.ResourceClass.DISK_GB, 1600) + tb.add_inventory(ss1, orc.DISK_GB, 1600) # ss2 is connected to both cn1 and cn2 ss2 = self._create_provider('ss2', uuids.agg2) tb.set_traits(ss2, "MISC_SHARES_VIA_AGGREGATE") - tb.add_inventory(ss2, fields.ResourceClass.DISK_GB, 1600) + tb.add_inventory(ss2, orc.DISK_GB, 1600) # ss3 is connected to cn2 ss3 = self._create_provider('ss3', uuids.agg3) tb.set_traits(ss3, "MISC_SHARES_VIA_AGGREGATE") - tb.add_inventory(ss3, fields.ResourceClass.DISK_GB, 1600) + tb.add_inventory(ss3, orc.DISK_GB, 1600) # Let's get allocation candidates from agg1 alloc_cands = self._get_allocation_candidates( @@ -1760,20 +1726,20 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): ) expected = [ - [('cn1', fields.ResourceClass.VCPU, 2), - ('cn1', fields.ResourceClass.DISK_GB, 1500)], - [('cn1', fields.ResourceClass.VCPU, 2), - ('ss1', fields.ResourceClass.DISK_GB, 1500)], + [('cn1', orc.VCPU, 2), + ('cn1', orc.DISK_GB, 1500)], + [('cn1', orc.VCPU, 2), + ('ss1', orc.DISK_GB, 1500)], ] self._validate_allocation_requests(expected, alloc_cands) expected = { 'cn1': set([ - (fields.ResourceClass.VCPU, 24, 0), - (fields.ResourceClass.DISK_GB, 1600, 0), + (orc.VCPU, 24, 0), + (orc.DISK_GB, 1600, 0), ]), 'ss1': set([ - (fields.ResourceClass.DISK_GB, 1600, 0), + (orc.DISK_GB, 1600, 0), ]), } self._validate_provider_summary_resources(expected, alloc_cands) @@ -1791,28 +1757,28 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): ) expected = [ - [('cn1', fields.ResourceClass.VCPU, 2), - ('cn1', fields.ResourceClass.DISK_GB, 1500)], - [('cn1', fields.ResourceClass.VCPU, 2), - ('ss2', fields.ResourceClass.DISK_GB, 1500)], - [('cn2', fields.ResourceClass.VCPU, 2), - ('cn2', fields.ResourceClass.DISK_GB, 1500)], - [('cn2', fields.ResourceClass.VCPU, 2), - ('ss2', fields.ResourceClass.DISK_GB, 1500)], + [('cn1', orc.VCPU, 2), + ('cn1', orc.DISK_GB, 1500)], + [('cn1', orc.VCPU, 2), + ('ss2', orc.DISK_GB, 1500)], + [('cn2', orc.VCPU, 2), + ('cn2', orc.DISK_GB, 1500)], + [('cn2', orc.VCPU, 2), + ('ss2', orc.DISK_GB, 1500)], ] self._validate_allocation_requests(expected, alloc_cands) expected = { 'cn1': set([ - (fields.ResourceClass.VCPU, 24, 0), - (fields.ResourceClass.DISK_GB, 1600, 0), + (orc.VCPU, 24, 0), + (orc.DISK_GB, 1600, 0), ]), 'cn2': set([ - (fields.ResourceClass.VCPU, 24, 0), - (fields.ResourceClass.DISK_GB, 1600, 0), + (orc.VCPU, 24, 0), + (orc.DISK_GB, 1600, 0), ]), 'ss2': set([ - (fields.ResourceClass.DISK_GB, 1600, 0), + (orc.DISK_GB, 1600, 0), ]), } self._validate_provider_summary_resources(expected, alloc_cands) @@ -1832,15 +1798,15 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): ) expected = [ - [('cn1', fields.ResourceClass.VCPU, 2), - ('cn1', fields.ResourceClass.DISK_GB, 1500)], + [('cn1', orc.VCPU, 2), + ('cn1', orc.DISK_GB, 1500)], ] self._validate_allocation_requests(expected, alloc_cands) expected = { 'cn1': set([ - (fields.ResourceClass.VCPU, 24, 0), - (fields.ResourceClass.DISK_GB, 1600, 0), + (orc.VCPU, 24, 0), + (orc.DISK_GB, 1600, 0), ]), } self._validate_provider_summary_resources(expected, alloc_cands) @@ -1858,33 +1824,33 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): ) expected = [ - [('cn1', fields.ResourceClass.VCPU, 2), - ('cn1', fields.ResourceClass.DISK_GB, 1500)], - [('cn1', fields.ResourceClass.VCPU, 2), - ('ss1', fields.ResourceClass.DISK_GB, 1500)], - [('cn1', fields.ResourceClass.VCPU, 2), - ('ss2', fields.ResourceClass.DISK_GB, 1500)], - [('cn2', fields.ResourceClass.VCPU, 2), - ('cn2', fields.ResourceClass.DISK_GB, 1500)], - [('cn2', fields.ResourceClass.VCPU, 2), - ('ss2', fields.ResourceClass.DISK_GB, 1500)], + [('cn1', orc.VCPU, 2), + ('cn1', orc.DISK_GB, 1500)], + [('cn1', orc.VCPU, 2), + ('ss1', orc.DISK_GB, 1500)], + [('cn1', orc.VCPU, 2), + ('ss2', orc.DISK_GB, 1500)], + [('cn2', orc.VCPU, 2), + ('cn2', orc.DISK_GB, 1500)], + [('cn2', orc.VCPU, 2), + ('ss2', orc.DISK_GB, 1500)], ] self._validate_allocation_requests(expected, alloc_cands) expected = { 'cn1': set([ - (fields.ResourceClass.VCPU, 24, 0), - (fields.ResourceClass.DISK_GB, 1600, 0), + (orc.VCPU, 24, 0), + (orc.DISK_GB, 1600, 0), ]), 'cn2': set([ - (fields.ResourceClass.VCPU, 24, 0), - (fields.ResourceClass.DISK_GB, 1600, 0), + (orc.VCPU, 24, 0), + (orc.DISK_GB, 1600, 0), ]), 'ss1': set([ - (fields.ResourceClass.DISK_GB, 1600, 0), + (orc.DISK_GB, 1600, 0), ]), 'ss2': set([ - (fields.ResourceClass.DISK_GB, 1600, 0), + (orc.DISK_GB, 1600, 0), ]), } self._validate_provider_summary_resources(expected, alloc_cands) @@ -1903,16 +1869,16 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): ss1 = self._create_provider('ss1', uuids.agg1) tb.set_traits(ss1, "MISC_SHARES_VIA_AGGREGATE") - tb.add_inventory(ss1, fields.ResourceClass.DISK_GB, 1600) + tb.add_inventory(ss1, orc.DISK_GB, 1600) cn1 = self._create_provider('cn1', uuids.agg1, uuids.agg2) - tb.add_inventory(cn1, fields.ResourceClass.VCPU, 24) - tb.add_inventory(cn1, fields.ResourceClass.MEMORY_MB, 2048) + tb.add_inventory(cn1, orc.VCPU, 24) + tb.add_inventory(cn1, orc.MEMORY_MB, 2048) ss2 = self._create_provider('ss2', uuids.agg2) tb.set_traits(ss2, "MISC_SHARES_VIA_AGGREGATE") - tb.add_inventory(ss2, fields.ResourceClass.IPV4_ADDRESS, 24) - tb.add_inventory(ss2, fields.ResourceClass.SRIOV_NET_VF, 16) + tb.add_inventory(ss2, orc.IPV4_ADDRESS, 24) + tb.add_inventory(ss2, orc.SRIOV_NET_VF, 16) alloc_cands = self._get_allocation_candidates( {'': placement_lib.RequestGroup( @@ -1926,19 +1892,19 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): ) expected = [ - [('ss1', fields.ResourceClass.DISK_GB, 1500), - ('ss2', fields.ResourceClass.IPV4_ADDRESS, 2), - ('ss2', fields.ResourceClass.SRIOV_NET_VF, 1)], + [('ss1', orc.DISK_GB, 1500), + ('ss2', orc.IPV4_ADDRESS, 2), + ('ss2', orc.SRIOV_NET_VF, 1)], ] self._validate_allocation_requests(expected, alloc_cands) expected = { 'ss1': set([ - (fields.ResourceClass.DISK_GB, 1600, 0), + (orc.DISK_GB, 1600, 0), ]), 'ss2': set([ - (fields.ResourceClass.IPV4_ADDRESS, 24, 0), - (fields.ResourceClass.SRIOV_NET_VF, 16, 0), + (orc.IPV4_ADDRESS, 24, 0), + (orc.SRIOV_NET_VF, 16, 0), ]), } self._validate_provider_summary_resources(expected, alloc_cands) @@ -1956,16 +1922,16 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): ss1 = self._create_provider('ss1', uuids.agg1) tb.set_traits(ss1, "MISC_SHARES_VIA_AGGREGATE") - tb.add_inventory(ss1, fields.ResourceClass.DISK_GB, 1600) + tb.add_inventory(ss1, orc.DISK_GB, 1600) cn1 = self._create_provider('cn1', uuids.agg1, uuids.agg2) - tb.add_inventory(cn1, fields.ResourceClass.VCPU, 24) - tb.add_inventory(cn1, fields.ResourceClass.MEMORY_MB, 2048) + tb.add_inventory(cn1, orc.VCPU, 24) + tb.add_inventory(cn1, orc.MEMORY_MB, 2048) ss2 = self._create_provider('ss2', uuids.agg2) tb.set_traits(ss2, "MISC_SHARES_VIA_AGGREGATE") - tb.add_inventory(ss2, fields.ResourceClass.IPV4_ADDRESS, 24) - tb.add_inventory(ss2, fields.ResourceClass.SRIOV_NET_VF, 16) + tb.add_inventory(ss2, orc.IPV4_ADDRESS, 24) + tb.add_inventory(ss2, orc.SRIOV_NET_VF, 16) alloc_cands = self._get_allocation_candidates( {'': placement_lib.RequestGroup( @@ -1980,24 +1946,24 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): ) expected = [ - [('cn1', fields.ResourceClass.VCPU, 2), - ('ss1', fields.ResourceClass.DISK_GB, 1500), - ('ss2', fields.ResourceClass.IPV4_ADDRESS, 2), - ('ss2', fields.ResourceClass.SRIOV_NET_VF, 1)], + [('cn1', orc.VCPU, 2), + ('ss1', orc.DISK_GB, 1500), + ('ss2', orc.IPV4_ADDRESS, 2), + ('ss2', orc.SRIOV_NET_VF, 1)], ] self._validate_allocation_requests(expected, alloc_cands) expected = { 'cn1': set([ - (fields.ResourceClass.VCPU, 24, 0), - (fields.ResourceClass.MEMORY_MB, 2048, 0), + (orc.VCPU, 24, 0), + (orc.MEMORY_MB, 2048, 0), ]), 'ss1': set([ - (fields.ResourceClass.DISK_GB, 1600, 0), + (orc.DISK_GB, 1600, 0), ]), 'ss2': set([ - (fields.ResourceClass.IPV4_ADDRESS, 24, 0), - (fields.ResourceClass.SRIOV_NET_VF, 16, 0), + (orc.IPV4_ADDRESS, 24, 0), + (orc.SRIOV_NET_VF, 16, 0), ]), } self._validate_provider_summary_resources(expected, alloc_cands) @@ -2033,51 +1999,51 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): # the only PF that has the required trait has no inventory left. cn = self._create_provider('cn') - tb.add_inventory(cn, fields.ResourceClass.VCPU, 16) - tb.add_inventory(cn, fields.ResourceClass.MEMORY_MB, 32768) + tb.add_inventory(cn, orc.VCPU, 16) + tb.add_inventory(cn, orc.MEMORY_MB, 32768) numa_cell0 = self._create_provider('cn_numa0', parent=cn.uuid) numa_cell1 = self._create_provider('cn_numa1', parent=cn.uuid) pf0 = self._create_provider('cn_numa0_pf0', parent=numa_cell0.uuid) - tb.add_inventory(pf0, fields.ResourceClass.SRIOV_NET_VF, 8) + tb.add_inventory(pf0, orc.SRIOV_NET_VF, 8) pf1 = self._create_provider('cn_numa1_pf1', parent=numa_cell1.uuid) - tb.add_inventory(pf1, fields.ResourceClass.SRIOV_NET_VF, 8) + tb.add_inventory(pf1, orc.SRIOV_NET_VF, 8) tb.set_traits(pf1, os_traits.HW_NIC_OFFLOAD_GENEVE) alloc_cands = self._get_allocation_candidates( {'': placement_lib.RequestGroup( use_same_provider=False, resources={ - fields.ResourceClass.VCPU: 2, - fields.ResourceClass.MEMORY_MB: 256, - fields.ResourceClass.SRIOV_NET_VF: 1, + orc.VCPU: 2, + orc.MEMORY_MB: 256, + orc.SRIOV_NET_VF: 1, } )} ) expected = [ - [('cn', fields.ResourceClass.VCPU, 2), - ('cn', fields.ResourceClass.MEMORY_MB, 256), - ('cn_numa0_pf0', fields.ResourceClass.SRIOV_NET_VF, 1)], - [('cn', fields.ResourceClass.VCPU, 2), - ('cn', fields.ResourceClass.MEMORY_MB, 256), - ('cn_numa1_pf1', fields.ResourceClass.SRIOV_NET_VF, 1)], + [('cn', orc.VCPU, 2), + ('cn', orc.MEMORY_MB, 256), + ('cn_numa0_pf0', orc.SRIOV_NET_VF, 1)], + [('cn', orc.VCPU, 2), + ('cn', orc.MEMORY_MB, 256), + ('cn_numa1_pf1', orc.SRIOV_NET_VF, 1)], ] self._validate_allocation_requests(expected, alloc_cands) expected = { 'cn': set([ - (fields.ResourceClass.VCPU, 16, 0), - (fields.ResourceClass.MEMORY_MB, 32768, 0), + (orc.VCPU, 16, 0), + (orc.MEMORY_MB, 32768, 0), ]), 'cn_numa0': set([]), 'cn_numa1': set([]), 'cn_numa0_pf0': set([ - (fields.ResourceClass.SRIOV_NET_VF, 8, 0), + (orc.SRIOV_NET_VF, 8, 0), ]), 'cn_numa1_pf1': set([ - (fields.ResourceClass.SRIOV_NET_VF, 8, 0), + (orc.SRIOV_NET_VF, 8, 0), ]), } self._validate_provider_summary_resources(expected, alloc_cands) @@ -2098,33 +2064,33 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): {'': placement_lib.RequestGroup( use_same_provider=False, resources={ - fields.ResourceClass.VCPU: 2, - fields.ResourceClass.MEMORY_MB: 256, - fields.ResourceClass.SRIOV_NET_VF: 1, + orc.VCPU: 2, + orc.MEMORY_MB: 256, + orc.SRIOV_NET_VF: 1, }, required_traits=[os_traits.HW_NIC_OFFLOAD_GENEVE], )} ) expected = [ - [('cn', fields.ResourceClass.VCPU, 2), - ('cn', fields.ResourceClass.MEMORY_MB, 256), - ('cn_numa1_pf1', fields.ResourceClass.SRIOV_NET_VF, 1)], + [('cn', orc.VCPU, 2), + ('cn', orc.MEMORY_MB, 256), + ('cn_numa1_pf1', orc.SRIOV_NET_VF, 1)], ] self._validate_allocation_requests(expected, alloc_cands) expected = { 'cn': set([ - (fields.ResourceClass.VCPU, 16, 0), - (fields.ResourceClass.MEMORY_MB, 32768, 0), + (orc.VCPU, 16, 0), + (orc.MEMORY_MB, 32768, 0), ]), 'cn_numa0': set([]), 'cn_numa1': set([]), 'cn_numa0_pf0': set([ - (fields.ResourceClass.SRIOV_NET_VF, 8, 0), + (orc.SRIOV_NET_VF, 8, 0), ]), 'cn_numa1_pf1': set([ - (fields.ResourceClass.SRIOV_NET_VF, 8, 0), + (orc.SRIOV_NET_VF, 8, 0), ]), } self._validate_provider_summary_resources(expected, alloc_cands) @@ -2144,29 +2110,29 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): {'': placement_lib.RequestGroup( use_same_provider=False, resources={ - fields.ResourceClass.SRIOV_NET_VF: 1, + orc.SRIOV_NET_VF: 1, }, )} ) expected = [ - [('cn_numa0_pf0', fields.ResourceClass.SRIOV_NET_VF, 1)], - [('cn_numa1_pf1', fields.ResourceClass.SRIOV_NET_VF, 1)], + [('cn_numa0_pf0', orc.SRIOV_NET_VF, 1)], + [('cn_numa1_pf1', orc.SRIOV_NET_VF, 1)], ] self._validate_allocation_requests(expected, alloc_cands) expected = { 'cn': set([ - (fields.ResourceClass.VCPU, 16, 0), - (fields.ResourceClass.MEMORY_MB, 32768, 0), + (orc.VCPU, 16, 0), + (orc.MEMORY_MB, 32768, 0), ]), 'cn_numa0': set([]), 'cn_numa1': set([]), 'cn_numa0_pf0': set([ - (fields.ResourceClass.SRIOV_NET_VF, 8, 0), + (orc.SRIOV_NET_VF, 8, 0), ]), 'cn_numa1_pf1': set([ - (fields.ResourceClass.SRIOV_NET_VF, 8, 0), + (orc.SRIOV_NET_VF, 8, 0), ]), } self._validate_provider_summary_resources(expected, alloc_cands) @@ -2186,29 +2152,29 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): {'': placement_lib.RequestGroup( use_same_provider=True, resources={ - fields.ResourceClass.SRIOV_NET_VF: 1, + orc.SRIOV_NET_VF: 1, }, )} ) expected = [ - [('cn_numa0_pf0', fields.ResourceClass.SRIOV_NET_VF, 1)], - [('cn_numa1_pf1', fields.ResourceClass.SRIOV_NET_VF, 1)], + [('cn_numa0_pf0', orc.SRIOV_NET_VF, 1)], + [('cn_numa1_pf1', orc.SRIOV_NET_VF, 1)], ] self._validate_allocation_requests(expected, alloc_cands) expected = { 'cn': set([ - (fields.ResourceClass.VCPU, 16, 0), - (fields.ResourceClass.MEMORY_MB, 32768, 0), + (orc.VCPU, 16, 0), + (orc.MEMORY_MB, 32768, 0), ]), 'cn_numa0': set([]), 'cn_numa1': set([]), 'cn_numa0_pf0': set([ - (fields.ResourceClass.SRIOV_NET_VF, 8, 0), + (orc.SRIOV_NET_VF, 8, 0), ]), 'cn_numa1_pf1': set([ - (fields.ResourceClass.SRIOV_NET_VF, 8, 0), + (orc.SRIOV_NET_VF, 8, 0), ]), } self._validate_provider_summary_resources(expected, alloc_cands) @@ -2226,16 +2192,16 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): # function (the one with HW_NIC_OFFLOAD_GENEVE associated with it) and # verify that the same request still results in 0 results since the # function with the required trait no longer has any inventory. - self.allocate_from_provider(pf1, fields.ResourceClass.SRIOV_NET_VF, 8) + self.allocate_from_provider(pf1, orc.SRIOV_NET_VF, 8) alloc_cands = self._get_allocation_candidates( {'': placement_lib.RequestGroup( use_same_provider=False, resources={ - fields.ResourceClass.VCPU: 2, - fields.ResourceClass.MEMORY_MB: 256, - fields.ResourceClass.SRIOV_NET_VF: 1, + orc.VCPU: 2, + orc.MEMORY_MB: 256, + orc.SRIOV_NET_VF: 1, }, required_traits=[os_traits.HW_NIC_OFFLOAD_GENEVE], )} @@ -2265,12 +2231,9 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): # NOTE(jaypipes): _get_trees_matching_all() expects a dict of resource # class internal identifiers, not string names resources = { - fields.ResourceClass.STANDARD.index( - fields.ResourceClass.VCPU): 2, - fields.ResourceClass.STANDARD.index( - fields.ResourceClass.MEMORY_MB): 256, - fields.ResourceClass.STANDARD.index( - fields.ResourceClass.SRIOV_NET_VF): 1, + orc.STANDARDS.index(orc.VCPU): 2, + orc.STANDARDS.index(orc.MEMORY_MB): 256, + orc.STANDARDS.index(orc.SRIOV_NET_VF): 1, } req_traits = {} forbidden_traits = {} @@ -2299,8 +2262,8 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): cn_names.append(cn_name) cn = self._create_provider(name) - tb.add_inventory(cn, fields.ResourceClass.VCPU, 16) - tb.add_inventory(cn, fields.ResourceClass.MEMORY_MB, 32768) + tb.add_inventory(cn, orc.VCPU, 16) + tb.add_inventory(cn, orc.MEMORY_MB, 32768) name = 'cn' + x + '_numa0' numa_cell0 = self._create_provider(name, parent=cn.uuid) @@ -2309,10 +2272,10 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): name = 'cn' + x + '_numa0_pf0' pf0 = self._create_provider(name, parent=numa_cell0.uuid) - tb.add_inventory(pf0, fields.ResourceClass.SRIOV_NET_VF, 8) + tb.add_inventory(pf0, orc.SRIOV_NET_VF, 8) name = 'cn' + x + '_numa1_pf1' pf1 = self._create_provider(name, parent=numa_cell1.uuid) - tb.add_inventory(pf1, fields.ResourceClass.SRIOV_NET_VF, 8) + tb.add_inventory(pf1, orc.SRIOV_NET_VF, 8) # Mark only the second PF on the third compute node as having # GENEVE offload enabled if x == '3': @@ -2343,13 +2306,11 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): # _get_trees_matching_all() cn2_pf0 = rp_obj.ResourceProvider.get_by_uuid(self.ctx, uuids.cn2_numa0_pf0) - self.allocate_from_provider(cn2_pf0, fields.ResourceClass.SRIOV_NET_VF, - 8) + self.allocate_from_provider(cn2_pf0, orc.SRIOV_NET_VF, 8) cn2_pf1 = rp_obj.ResourceProvider.get_by_uuid(self.ctx, uuids.cn2_numa1_pf1) - self.allocate_from_provider(cn2_pf1, fields.ResourceClass.SRIOV_NET_VF, - 8) + self.allocate_from_provider(cn2_pf1, orc.SRIOV_NET_VF, 8) trees = rp_obj._get_trees_matching_all(self.ctx, resources, req_traits, forbidden_traits, sharing, member_of) @@ -2450,22 +2411,18 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): # no more providers are returned cn1_pf0 = rp_obj.ResourceProvider.get_by_uuid(self.ctx, uuids.cn1_numa0_pf0) - self.allocate_from_provider( - cn1_pf0, fields.ResourceClass.SRIOV_NET_VF, 8) + self.allocate_from_provider(cn1_pf0, orc.SRIOV_NET_VF, 8) cn1_pf1 = rp_obj.ResourceProvider.get_by_uuid(self.ctx, uuids.cn1_numa1_pf1) - self.allocate_from_provider( - cn1_pf1, fields.ResourceClass.SRIOV_NET_VF, 8) + self.allocate_from_provider(cn1_pf1, orc.SRIOV_NET_VF, 8) cn3_pf0 = rp_obj.ResourceProvider.get_by_uuid(self.ctx, uuids.cn3_numa0_pf0) - self.allocate_from_provider( - cn3_pf0, fields.ResourceClass.SRIOV_NET_VF, 8) + self.allocate_from_provider(cn3_pf0, orc.SRIOV_NET_VF, 8) cn3_pf1 = rp_obj.ResourceProvider.get_by_uuid(self.ctx, uuids.cn3_numa1_pf1) - self.allocate_from_provider( - cn3_pf1, fields.ResourceClass.SRIOV_NET_VF, 8) + self.allocate_from_provider(cn3_pf1, orc.SRIOV_NET_VF, 8) trees = rp_obj._get_trees_matching_all(self.ctx, resources, req_traits, forbidden_traits, sharing, member_of) @@ -2507,8 +2464,8 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): cn1 = self._create_provider('cn1', uuids.agg1) cn2 = self._create_provider('cn2', uuids.agg2) - tb.add_inventory(cn1, fields.ResourceClass.VCPU, 16) - tb.add_inventory(cn2, fields.ResourceClass.VCPU, 16) + tb.add_inventory(cn1, orc.VCPU, 16) + tb.add_inventory(cn2, orc.VCPU, 16) numa1_0 = self._create_provider('cn1_numa0', parent=cn1.uuid) numa1_1 = self._create_provider('cn1_numa1', parent=cn1.uuid) @@ -2520,17 +2477,17 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): pf2_0 = self._create_provider('cn2_numa0_pf0', parent=numa2_0.uuid) pf2_1 = self._create_provider('cn2_numa1_pf1', parent=numa2_1.uuid) - tb.add_inventory(pf1_0, fields.ResourceClass.SRIOV_NET_VF, 8) - tb.add_inventory(pf1_1, fields.ResourceClass.SRIOV_NET_VF, 8) - tb.add_inventory(pf2_0, fields.ResourceClass.SRIOV_NET_VF, 8) - tb.add_inventory(pf2_1, fields.ResourceClass.SRIOV_NET_VF, 8) + tb.add_inventory(pf1_0, orc.SRIOV_NET_VF, 8) + tb.add_inventory(pf1_1, orc.SRIOV_NET_VF, 8) + tb.add_inventory(pf2_0, orc.SRIOV_NET_VF, 8) + tb.add_inventory(pf2_1, orc.SRIOV_NET_VF, 8) tb.set_traits(pf2_1, os_traits.HW_NIC_OFFLOAD_GENEVE) tb.set_traits(pf1_1, os_traits.HW_NIC_OFFLOAD_GENEVE) ss1 = self._create_provider('ss1', uuids.agg1) ss2 = self._create_provider('ss2', uuids.agg2) - tb.add_inventory(ss1, fields.ResourceClass.DISK_GB, 2000) - tb.add_inventory(ss2, fields.ResourceClass.DISK_GB, 1000) + tb.add_inventory(ss1, orc.DISK_GB, 2000) + tb.add_inventory(ss2, orc.DISK_GB, 1000) tb.set_traits(ss1, 'MISC_SHARES_VIA_AGGREGATE') tb.set_traits(ss2, 'MISC_SHARES_VIA_AGGREGATE') @@ -2538,9 +2495,9 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): {'': placement_lib.RequestGroup( use_same_provider=False, resources={ - fields.ResourceClass.VCPU: 2, - fields.ResourceClass.SRIOV_NET_VF: 1, - fields.ResourceClass.DISK_GB: 1500, + orc.VCPU: 2, + orc.SRIOV_NET_VF: 1, + orc.DISK_GB: 1500, }) } ) @@ -2548,30 +2505,30 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): # cn2 is not in the allocation candidates because it doesn't have # enough DISK_GB resource with shared providers. expected = [ - [('cn1', fields.ResourceClass.VCPU, 2), - ('cn1_numa0_pf0', fields.ResourceClass.SRIOV_NET_VF, 1), - ('ss1', fields.ResourceClass.DISK_GB, 1500)], - [('cn1', fields.ResourceClass.VCPU, 2), - ('cn1_numa1_pf1', fields.ResourceClass.SRIOV_NET_VF, 1), - ('ss1', fields.ResourceClass.DISK_GB, 1500)] + [('cn1', orc.VCPU, 2), + ('cn1_numa0_pf0', orc.SRIOV_NET_VF, 1), + ('ss1', orc.DISK_GB, 1500)], + [('cn1', orc.VCPU, 2), + ('cn1_numa1_pf1', orc.SRIOV_NET_VF, 1), + ('ss1', orc.DISK_GB, 1500)] ] self._validate_allocation_requests(expected, alloc_cands) expected = { 'cn1': set([ - (fields.ResourceClass.VCPU, 16, 0) + (orc.VCPU, 16, 0) ]), 'cn1_numa0': set([]), 'cn1_numa1': set([]), 'cn1_numa0_pf0': set([ - (fields.ResourceClass.SRIOV_NET_VF, 8, 0) + (orc.SRIOV_NET_VF, 8, 0) ]), 'cn1_numa1_pf1': set([ - (fields.ResourceClass.SRIOV_NET_VF, 8, 0) + (orc.SRIOV_NET_VF, 8, 0) ]), 'ss1': set([ - (fields.ResourceClass.DISK_GB, 2000, 0) + (orc.DISK_GB, 2000, 0) ]), } self._validate_provider_summary_resources(expected, alloc_cands) @@ -2582,9 +2539,9 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): {'': placement_lib.RequestGroup( use_same_provider=False, resources={ - fields.ResourceClass.VCPU: 2, - fields.ResourceClass.SRIOV_NET_VF: 1, - fields.ResourceClass.DISK_GB: 1500, + orc.VCPU: 2, + orc.SRIOV_NET_VF: 1, + orc.DISK_GB: 1500, }, required_traits=[os_traits.HW_NIC_OFFLOAD_GENEVE]) } @@ -2593,26 +2550,26 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): # cn1_numa0_pf0 is not in the allocation candidates because it # doesn't have the required trait. expected = [ - [('cn1', fields.ResourceClass.VCPU, 2), - ('cn1_numa1_pf1', fields.ResourceClass.SRIOV_NET_VF, 1), - ('ss1', fields.ResourceClass.DISK_GB, 1500)] + [('cn1', orc.VCPU, 2), + ('cn1_numa1_pf1', orc.SRIOV_NET_VF, 1), + ('ss1', orc.DISK_GB, 1500)] ] self._validate_allocation_requests(expected, alloc_cands) expected = { 'cn1': set([ - (fields.ResourceClass.VCPU, 16, 0) + (orc.VCPU, 16, 0) ]), 'cn1_numa0': set([]), 'cn1_numa1': set([]), 'cn1_numa0_pf0': set([ - (fields.ResourceClass.SRIOV_NET_VF, 8, 0) + (orc.SRIOV_NET_VF, 8, 0) ]), 'cn1_numa1_pf1': set([ - (fields.ResourceClass.SRIOV_NET_VF, 8, 0) + (orc.SRIOV_NET_VF, 8, 0) ]), 'ss1': set([ - (fields.ResourceClass.DISK_GB, 2000, 0) + (orc.DISK_GB, 2000, 0) ]), } self._validate_provider_summary_resources(expected, alloc_cands) diff --git a/placement/tests/functional/db/test_consumer.py b/placement/tests/functional/db/test_consumer.py index a2f38ac5b..8682b8219 100644 --- a/placement/tests/functional/db/test_consumer.py +++ b/placement/tests/functional/db/test_consumer.py @@ -10,6 +10,7 @@ # License for the specific language governing permissions and limitations # under the License. +import os_resource_classes as orc from oslo_utils.fixture import uuidsentinel as uuids import sqlalchemy as sa @@ -19,7 +20,6 @@ from placement.objects import consumer as consumer_obj from placement.objects import project as project_obj from placement.objects import resource_provider as rp_obj from placement.objects import user as user_obj -from placement import rc_fields as fields from placement.tests.functional import base from placement.tests.functional.db import test_base as tb @@ -263,24 +263,24 @@ class DeleteConsumerIfNoAllocsTestCase(tb.PlacementDbBaseTestCase): # Create some inventory that we will allocate cn1 = self._create_provider('cn1') - tb.add_inventory(cn1, fields.ResourceClass.VCPU, 8) - tb.add_inventory(cn1, fields.ResourceClass.MEMORY_MB, 2048) - tb.add_inventory(cn1, fields.ResourceClass.DISK_GB, 2000) + tb.add_inventory(cn1, orc.VCPU, 8) + tb.add_inventory(cn1, orc.MEMORY_MB, 2048) + tb.add_inventory(cn1, orc.DISK_GB, 2000) # Now allocate some of that inventory to two different consumers allocs = [ rp_obj.Allocation( self.ctx, consumer=c1, resource_provider=cn1, - resource_class=fields.ResourceClass.VCPU, used=1), + resource_class=orc.VCPU, used=1), rp_obj.Allocation( self.ctx, consumer=c1, resource_provider=cn1, - resource_class=fields.ResourceClass.MEMORY_MB, used=512), + resource_class=orc.MEMORY_MB, used=512), rp_obj.Allocation( self.ctx, consumer=c2, resource_provider=cn1, - resource_class=fields.ResourceClass.VCPU, used=1), + resource_class=orc.VCPU, used=1), rp_obj.Allocation( self.ctx, consumer=c2, resource_provider=cn1, - resource_class=fields.ResourceClass.MEMORY_MB, used=512), + resource_class=orc.MEMORY_MB, used=512), ] alloc_list = rp_obj.AllocationList(self.ctx, objects=allocs) alloc_list.replace_all() @@ -297,10 +297,10 @@ class DeleteConsumerIfNoAllocsTestCase(tb.PlacementDbBaseTestCase): allocs = [ rp_obj.Allocation( self.ctx, consumer=c2, resource_provider=cn1, - resource_class=fields.ResourceClass.VCPU, used=0), + resource_class=orc.VCPU, used=0), rp_obj.Allocation( self.ctx, consumer=c2, resource_provider=cn1, - resource_class=fields.ResourceClass.MEMORY_MB, used=0), + resource_class=orc.MEMORY_MB, used=0), ] alloc_list = rp_obj.AllocationList(self.ctx, objects=allocs) alloc_list.replace_all() diff --git a/placement/tests/functional/db/test_resource_class_cache.py b/placement/tests/functional/db/test_resource_class_cache.py index 9c8e9b7c8..bba1161c6 100644 --- a/placement/tests/functional/db/test_resource_class_cache.py +++ b/placement/tests/functional/db/test_resource_class_cache.py @@ -16,66 +16,38 @@ import mock from oslo_utils import timeutils from placement import exception -from placement import rc_fields as fields from placement import resource_class_cache as rc_cache from placement.tests.functional import base class TestResourceClassCache(base.TestCase): - def setUp(self): - super(TestResourceClassCache, self).setUp() - db = self.placement_db - self.context = mock.Mock() - sess_mock = mock.Mock() - sess_mock.connection.side_effect = db.get_engine().connect - self.context.session = sess_mock - - @mock.patch('sqlalchemy.select') - def test_rc_cache_std_no_db(self, sel_mock): + def test_rc_cache_std_db(self): """Test that looking up either an ID or a string in the resource class - cache for a standardized resource class does not result in a DB - call. + cache for a standardized resource class doesn't result in a DB call + once the cache is initialized """ cache = rc_cache.ResourceClassCache(self.context) + rc_cache._refresh_from_db(self.context, cache) - self.assertEqual('VCPU', cache.string_from_id(0)) - self.assertEqual('MEMORY_MB', cache.string_from_id(1)) - self.assertEqual(0, cache.id_from_string('VCPU')) - self.assertEqual(1, cache.id_from_string('MEMORY_MB')) + with mock.patch('sqlalchemy.select') as sel_mock: + self.assertEqual('VCPU', cache.string_from_id(0)) + self.assertEqual('MEMORY_MB', cache.string_from_id(1)) + self.assertEqual(0, cache.id_from_string('VCPU')) + self.assertEqual(1, cache.id_from_string('MEMORY_MB')) - self.assertFalse(sel_mock.called) - - def test_standards(self): - cache = rc_cache.ResourceClassCache(self.context) - standards = cache.STANDARDS - - self.assertEqual(len(standards), len(fields.ResourceClass.STANDARD)) - names = (rc['name'] for rc in standards) - for name in fields.ResourceClass.STANDARD: - self.assertIn(name, names) - - cache = rc_cache.ResourceClassCache(self.context) - standards2 = cache.STANDARDS - self.assertEqual(id(standards), id(standards2)) - - def test_standards_have_time_fields(self): - cache = rc_cache.ResourceClassCache(self.context) - standards = cache.STANDARDS - - first_standard = standards[0] - self.assertIn('updated_at', first_standard) - self.assertIn('created_at', first_standard) - self.assertIsNone(first_standard['updated_at']) - self.assertIsNone(first_standard['created_at']) + self.assertFalse(sel_mock.called) def test_standard_has_time_fields(self): cache = rc_cache.ResourceClassCache(self.context) - vcpu_class = cache.all_from_string('VCPU') + vcpu_class = dict(cache.all_from_string('VCPU')) expected = {'id': 0, 'name': 'VCPU', 'updated_at': None, 'created_at': None} - self.assertEqual(expected, vcpu_class) + expected_fields = sorted(expected.keys()) + self.assertEqual(expected_fields, sorted(vcpu_class.keys())) + self.assertEqual(0, vcpu_class['id']) + self.assertEqual('VCPU', vcpu_class['name']) def test_rc_cache_custom(self): """Test that non-standard, custom resource classes hit the database and @@ -92,7 +64,7 @@ class TestResourceClassCache(base.TestCase): cache.id_from_string, "IRON_NFV") # Now add to the database and verify appropriate results... - with self.context.session.connection() as conn: + with self.placement_db.get_engine().connect() as conn: ins_stmt = rc_cache._RC_TBL.insert().values( id=1001, name='IRON_NFV' @@ -117,8 +89,8 @@ class TestResourceClassCache(base.TestCase): self.assertIsInstance(iron_nfv_class['created_at'], datetime.datetime) # Update IRON_NFV (this is a no-op but will set updated_at) - with self.context.session.connection() as conn: - # NOTE(cdent): When using explicit SQL that names columns, + with self.placement_db.get_engine().connect() as conn: + # NOTE(cdent): When using explict SQL that names columns, # the automatic timestamp handling provided by the oslo_db # TimestampMixin is not provided. created_at is a default # but updated_at is an onupdate. diff --git a/placement/tests/functional/db/test_resource_provider.py b/placement/tests/functional/db/test_resource_provider.py index 9cf4b59db..3edea5ae6 100644 --- a/placement/tests/functional/db/test_resource_provider.py +++ b/placement/tests/functional/db/test_resource_provider.py @@ -13,6 +13,7 @@ import functools import mock +import os_resource_classes as orc import os_traits from oslo_db import exception as db_exc from oslo_utils.fixture import uuidsentinel @@ -23,7 +24,6 @@ from placement.db.sqlalchemy import models from placement import exception from placement.objects import consumer as consumer_obj from placement.objects import resource_provider as rp_obj -from placement import rc_fields as fields from placement.tests.functional.db import test_base as tb @@ -34,13 +34,13 @@ DISK_INVENTORY = dict( max_unit=5, step_size=1, allocation_ratio=1.0, - resource_class=fields.ResourceClass.DISK_GB + resource_class=orc.DISK_GB ) DISK_ALLOCATION = dict( consumer_id=uuidsentinel.disk_consumer, used=2, - resource_class=fields.ResourceClass.DISK_GB + resource_class=orc.DISK_GB ) @@ -241,7 +241,7 @@ class ResourceProviderTestCase(tb.PlacementDbBaseTestCase): # Create some inventory in the grandchild, allocate some consumers to # the grandchild and then attempt to delete the root provider and child # provider, both of which should fail. - tb.add_inventory(grandchild_rp, fields.ResourceClass.VCPU, 1) + tb.add_inventory(grandchild_rp, orc.VCPU, 1) # Check all providers returned when getting by root UUID rps = rp_obj.ResourceProviderList.get_all_by_filters( @@ -362,7 +362,7 @@ class ResourceProviderTestCase(tb.PlacementDbBaseTestCase): self.assertEqual(uuidsentinel.grandchild_rp, rps[0].uuid) alloc_list = self.allocate_from_provider( - grandchild_rp, fields.ResourceClass.VCPU, 1) + grandchild_rp, orc.VCPU, 1) self.assertRaises(exception.CannotDeleteParentResourceProvider, root_rp.destroy) @@ -557,12 +557,12 @@ class ResourceProviderTestCase(tb.PlacementDbBaseTestCase): def test_set_inventory_over_capacity(self, mock_log): rp = self._create_provider(uuidsentinel.rp_name) - disk_inv = tb.add_inventory(rp, fields.ResourceClass.DISK_GB, 2048, + disk_inv = tb.add_inventory(rp, orc.DISK_GB, 2048, reserved=15, min_unit=10, max_unit=600, step_size=10) - vcpu_inv = tb.add_inventory(rp, fields.ResourceClass.VCPU, 12, + vcpu_inv = tb.add_inventory(rp, orc.VCPU, 12, allocation_ratio=16.0) self.assertFalse(mock_log.warning.called) @@ -582,13 +582,13 @@ class ResourceProviderTestCase(tb.PlacementDbBaseTestCase): rp = self._create_provider(uuidsentinel.rp_name) saved_generation = rp.generation - disk_inv = tb.add_inventory(rp, fields.ResourceClass.DISK_GB, 1024, + disk_inv = tb.add_inventory(rp, orc.DISK_GB, 1024, reserved=15, min_unit=10, max_unit=100, step_size=10) - vcpu_inv = tb.add_inventory(rp, fields.ResourceClass.VCPU, 12, + vcpu_inv = tb.add_inventory(rp, orc.VCPU, 12, allocation_ratio=16.0) # generation has bumped once for each add @@ -599,8 +599,8 @@ class ResourceProviderTestCase(tb.PlacementDbBaseTestCase): self.ctx, rp) self.assertEqual(2, len(new_inv_list)) resource_classes = [inv.resource_class for inv in new_inv_list] - self.assertIn(fields.ResourceClass.VCPU, resource_classes) - self.assertIn(fields.ResourceClass.DISK_GB, resource_classes) + self.assertIn(orc.VCPU, resource_classes) + self.assertIn(orc.DISK_GB, resource_classes) # reset list to just disk_inv inv_list = rp_obj.InventoryList(objects=[disk_inv]) @@ -614,14 +614,14 @@ class ResourceProviderTestCase(tb.PlacementDbBaseTestCase): self.ctx, rp) self.assertEqual(1, len(new_inv_list)) resource_classes = [inv.resource_class for inv in new_inv_list] - self.assertNotIn(fields.ResourceClass.VCPU, resource_classes) - self.assertIn(fields.ResourceClass.DISK_GB, resource_classes) + self.assertNotIn(orc.VCPU, resource_classes) + self.assertIn(orc.DISK_GB, resource_classes) self.assertEqual(1024, new_inv_list[0].total) # update existing disk inv to new settings disk_inv = rp_obj.Inventory( resource_provider=rp, - resource_class=fields.ResourceClass.DISK_GB, + resource_class=orc.DISK_GB, total=2048, reserved=15, min_unit=10, @@ -640,7 +640,7 @@ class ResourceProviderTestCase(tb.PlacementDbBaseTestCase): self.assertEqual(2048, new_inv_list[0].total) # delete inventory - rp.delete_inventory(fields.ResourceClass.DISK_GB) + rp.delete_inventory(orc.DISK_GB) # generation has bumped self.assertEqual(saved_generation + 1, rp.generation) @@ -648,10 +648,10 @@ class ResourceProviderTestCase(tb.PlacementDbBaseTestCase): new_inv_list = rp_obj.InventoryList.get_all_by_resource_provider( self.ctx, rp) - result = new_inv_list.find(fields.ResourceClass.DISK_GB) + result = new_inv_list.find(orc.DISK_GB) self.assertIsNone(result) self.assertRaises(exception.NotFound, rp.delete_inventory, - fields.ResourceClass.DISK_GB) + orc.DISK_GB) # check inventory list is empty inv_list = rp_obj.InventoryList.get_all_by_resource_provider( @@ -718,7 +718,7 @@ class ResourceProviderTestCase(tb.PlacementDbBaseTestCase): new_total = 1 disk_inv = rp_obj.Inventory( resource_provider=rp, - resource_class=fields.ResourceClass.DISK_GB, total=new_total) + resource_class=orc.DISK_GB, total=new_total) disk_inv.obj_set_defaults() rp.update_inventory(disk_inv) @@ -748,7 +748,7 @@ class ResourceProviderTestCase(tb.PlacementDbBaseTestCase): for rp in (rp1, rp2): tb.add_inventory(rp, DISK_INVENTORY['resource_class'], DISK_INVENTORY['total']) - tb.add_inventory(rp, fields.ResourceClass.IPV4_ADDRESS, 10, + tb.add_inventory(rp, orc.IPV4_ADDRESS, 10, max_unit=2) # Get inventories for the first resource provider and validate @@ -780,65 +780,65 @@ class ResourceProviderListTestCase(tb.PlacementDbBaseTestCase): def test_get_all_by_filters_with_resources(self): for rp_i in ['1', '2']: rp = self._create_provider('rp_name_' + rp_i) - tb.add_inventory(rp, fields.ResourceClass.VCPU, 2) - tb.add_inventory(rp, fields.ResourceClass.DISK_GB, 1024, + tb.add_inventory(rp, orc.VCPU, 2) + tb.add_inventory(rp, orc.DISK_GB, 1024, reserved=2) # Write a specific inventory for testing min/max units and steps - tb.add_inventory(rp, fields.ResourceClass.MEMORY_MB, 1024, + tb.add_inventory(rp, orc.MEMORY_MB, 1024, reserved=2, min_unit=2, max_unit=4, step_size=2) # Create the VCPU allocation only for the first RP if rp_i != '1': continue - self.allocate_from_provider(rp, fields.ResourceClass.VCPU, used=1) + self.allocate_from_provider(rp, orc.VCPU, used=1) # Both RPs should accept that request given the only current allocation # for the first RP is leaving one VCPU resource_providers = rp_obj.ResourceProviderList.get_all_by_filters( - self.ctx, {'resources': {fields.ResourceClass.VCPU: 1}}) + self.ctx, {'resources': {orc.VCPU: 1}}) self.assertEqual(2, len(resource_providers)) # Now, when asking for 2 VCPUs, only the second RP should accept that # given the current allocation for the first RP resource_providers = rp_obj.ResourceProviderList.get_all_by_filters( - self.ctx, {'resources': {fields.ResourceClass.VCPU: 2}}) + self.ctx, {'resources': {orc.VCPU: 2}}) self.assertEqual(1, len(resource_providers)) # Adding a second resource request should be okay for the 2nd RP # given it has enough disk but we also need to make sure that the # first RP is not acceptable because of the VCPU request resource_providers = rp_obj.ResourceProviderList.get_all_by_filters( - self.ctx, {'resources': {fields.ResourceClass.VCPU: 2, - fields.ResourceClass.DISK_GB: 1022}}) + self.ctx, {'resources': {orc.VCPU: 2, + orc.DISK_GB: 1022}}) self.assertEqual(1, len(resource_providers)) # Now, we are asking for both disk and VCPU resources that all the RPs # can't accept (as the 2nd RP is having a reserved size) resource_providers = rp_obj.ResourceProviderList.get_all_by_filters( - self.ctx, {'resources': {fields.ResourceClass.VCPU: 2, - fields.ResourceClass.DISK_GB: 1024}}) + self.ctx, {'resources': {orc.VCPU: 2, + orc.DISK_GB: 1024}}) self.assertEqual(0, len(resource_providers)) # We also want to verify that asking for a specific RP can also be # checking the resource usage. resource_providers = rp_obj.ResourceProviderList.get_all_by_filters( self.ctx, {'name': u'rp_name_1', - 'resources': {fields.ResourceClass.VCPU: 1}}) + 'resources': {orc.VCPU: 1}}) self.assertEqual(1, len(resource_providers)) # Let's verify that the min and max units are checked too # Case 1: amount is in between min and max and modulo step_size resource_providers = rp_obj.ResourceProviderList.get_all_by_filters( - self.ctx, {'resources': {fields.ResourceClass.MEMORY_MB: 2}}) + self.ctx, {'resources': {orc.MEMORY_MB: 2}}) self.assertEqual(2, len(resource_providers)) # Case 2: amount is less than min_unit resource_providers = rp_obj.ResourceProviderList.get_all_by_filters( - self.ctx, {'resources': {fields.ResourceClass.MEMORY_MB: 1}}) + self.ctx, {'resources': {orc.MEMORY_MB: 1}}) self.assertEqual(0, len(resource_providers)) # Case 3: amount is more than min_unit resource_providers = rp_obj.ResourceProviderList.get_all_by_filters( - self.ctx, {'resources': {fields.ResourceClass.MEMORY_MB: 5}}) + self.ctx, {'resources': {orc.MEMORY_MB: 5}}) self.assertEqual(0, len(resource_providers)) # Case 4: amount is not modulo step_size resource_providers = rp_obj.ResourceProviderList.get_all_by_filters( - self.ctx, {'resources': {fields.ResourceClass.MEMORY_MB: 3}}) + self.ctx, {'resources': {orc.MEMORY_MB: 3}}) self.assertEqual(0, len(resource_providers)) def test_get_all_by_filters_with_resources_not_existing(self): @@ -1153,9 +1153,9 @@ class TestAllocation(tb.PlacementDbBaseTestCase): # Add same inventory to both source and destination host for cn in (cn_source, cn_dest): - tb.add_inventory(cn, fields.ResourceClass.VCPU, 24, + tb.add_inventory(cn, orc.VCPU, 24, allocation_ratio=16.0) - tb.add_inventory(cn, fields.ResourceClass.MEMORY_MB, 1024, + tb.add_inventory(cn, orc.MEMORY_MB, 1024, min_unit=64, max_unit=1024, step_size=64, @@ -1176,25 +1176,25 @@ class TestAllocation(tb.PlacementDbBaseTestCase): context=self.ctx, consumer=inst_consumer, resource_provider=cn_source, - resource_class=fields.ResourceClass.VCPU, + resource_class=orc.VCPU, used=1), rp_obj.Allocation( context=self.ctx, consumer=inst_consumer, resource_provider=cn_source, - resource_class=fields.ResourceClass.MEMORY_MB, + resource_class=orc.MEMORY_MB, used=256), rp_obj.Allocation( context=self.ctx, consumer=inst_consumer, resource_provider=cn_dest, - resource_class=fields.ResourceClass.VCPU, + resource_class=orc.VCPU, used=1), rp_obj.Allocation( context=self.ctx, consumer=inst_consumer, resource_provider=cn_dest, - resource_class=fields.ResourceClass.MEMORY_MB, + resource_class=orc.MEMORY_MB, used=256), ]) alloc_list.replace_all() @@ -1226,13 +1226,13 @@ class TestAllocation(tb.PlacementDbBaseTestCase): context=self.ctx, consumer=inst_consumer, resource_provider=cn_dest, - resource_class=fields.ResourceClass.VCPU, + resource_class=orc.VCPU, used=1), rp_obj.Allocation( context=self.ctx, consumer=inst_consumer, resource_provider=cn_dest, - resource_class=fields.ResourceClass.MEMORY_MB, + resource_class=orc.MEMORY_MB, used=256), ]) new_alloc_list.replace_all() @@ -1288,10 +1288,10 @@ class TestAllocationListCreateDelete(tb.PlacementDbBaseTestCase): # Create one resource provider with 2 classes rp1_name = uuidsentinel.rp1_name rp1_uuid = uuidsentinel.rp1_uuid - rp1_class = fields.ResourceClass.DISK_GB + rp1_class = orc.DISK_GB rp1_used = 6 - rp2_class = fields.ResourceClass.IPV4_ADDRESS + rp2_class = orc.IPV4_ADDRESS rp2_used = 2 rp1 = self._create_provider(rp1_name, uuid=rp1_uuid) @@ -1341,12 +1341,12 @@ class TestAllocationListCreateDelete(tb.PlacementDbBaseTestCase): # Create two resource providers rp1_name = uuidsentinel.rp1_name rp1_uuid = uuidsentinel.rp1_uuid - rp1_class = fields.ResourceClass.DISK_GB + rp1_class = orc.DISK_GB rp1_used = 6 rp2_name = uuidsentinel.rp2_name rp2_uuid = uuidsentinel.rp2_uuid - rp2_class = fields.ResourceClass.IPV4_ADDRESS + rp2_class = orc.IPV4_ADDRESS rp2_used = 2 rp1 = self._create_provider(rp1_name, uuid=rp1_uuid) @@ -1452,7 +1452,7 @@ class TestAllocationListCreateDelete(tb.PlacementDbBaseTestCase): def _check_create_allocations(self, inventory_kwargs, bad_used, good_used): - rp_class = fields.ResourceClass.DISK_GB + rp_class = orc.DISK_GB rp = self._make_rp_and_inventory(resource_class=rp_class, **inventory_kwargs) @@ -1500,7 +1500,7 @@ class TestAllocationListCreateDelete(tb.PlacementDbBaseTestCase): project=self.project_obj) inst_consumer.create() - rp_class = fields.ResourceClass.DISK_GB + rp_class = orc.DISK_GB target_rp = self._make_rp_and_inventory(resource_class=rp_class, max_unit=500) @@ -1628,9 +1628,9 @@ class TestAllocationListCreateDelete(tb.PlacementDbBaseTestCase): full_rp = self._create_provider('full_rp') for rp in (empty_rp, full_rp): - tb.add_inventory(rp, fields.ResourceClass.VCPU, 24, + tb.add_inventory(rp, orc.VCPU, 24, allocation_ratio=16.0) - tb.add_inventory(rp, fields.ResourceClass.MEMORY_MB, 1024, + tb.add_inventory(rp, orc.MEMORY_MB, 1024, min_unit=64, max_unit=1024, step_size=64) @@ -1648,13 +1648,13 @@ class TestAllocationListCreateDelete(tb.PlacementDbBaseTestCase): context=self.ctx, consumer=inst_consumer, resource_provider=full_rp, - resource_class=fields.ResourceClass.VCPU, + resource_class=orc.VCPU, used=12), rp_obj.Allocation( context=self.ctx, consumer=inst_consumer, resource_provider=full_rp, - resource_class=fields.ResourceClass.MEMORY_MB, + resource_class=orc.MEMORY_MB, used=1024) ]) alloc_list.replace_all() @@ -1673,25 +1673,25 @@ class TestAllocationListCreateDelete(tb.PlacementDbBaseTestCase): context=self.ctx, consumer=inst2_consumer, resource_provider=empty_rp, - resource_class=fields.ResourceClass.VCPU, + resource_class=orc.VCPU, used=12), rp_obj.Allocation( context=self.ctx, consumer=inst2_consumer, resource_provider=empty_rp, - resource_class=fields.ResourceClass.MEMORY_MB, + resource_class=orc.MEMORY_MB, used=512), rp_obj.Allocation( context=self.ctx, consumer=inst2_consumer, resource_provider=full_rp, - resource_class=fields.ResourceClass.VCPU, + resource_class=orc.VCPU, used=12), rp_obj.Allocation( context=self.ctx, consumer=inst2_consumer, resource_provider=full_rp, - resource_class=fields.ResourceClass.MEMORY_MB, + resource_class=orc.MEMORY_MB, used=512), ]) @@ -1714,9 +1714,9 @@ class TestAllocationListCreateDelete(tb.PlacementDbBaseTestCase): # Create a single resource provider and give it some inventory. rp1 = self._create_provider('rp1') - tb.add_inventory(rp1, fields.ResourceClass.VCPU, 24, + tb.add_inventory(rp1, orc.VCPU, 24, allocation_ratio=16.0) - tb.add_inventory(rp1, fields.ResourceClass.MEMORY_MB, 1024, + tb.add_inventory(rp1, orc.MEMORY_MB, 1024, min_unit=64, max_unit=1024, step_size=64) @@ -1737,13 +1737,13 @@ class TestAllocationListCreateDelete(tb.PlacementDbBaseTestCase): context=self.ctx, consumer=inst_consumer, resource_provider=rp1, - resource_class=fields.ResourceClass.VCPU, + resource_class=orc.VCPU, used=12), rp_obj.Allocation( context=self.ctx, consumer=inst_consumer, resource_provider=rp1, - resource_class=fields.ResourceClass.MEMORY_MB, + resource_class=orc.MEMORY_MB, used=1024) ]) @@ -1806,7 +1806,7 @@ class UsageListTestCase(tb.PlacementDbBaseTestCase): def test_get_all_one_allocation(self): db_rp, _ = self._make_allocation(DISK_INVENTORY, DISK_ALLOCATION) inv = rp_obj.Inventory(resource_provider=db_rp, - resource_class=fields.ResourceClass.DISK_GB, + resource_class=orc.DISK_GB, total=1024) inv.obj_set_defaults() inv_list = rp_obj.InventoryList(objects=[inv]) @@ -1816,24 +1816,24 @@ class UsageListTestCase(tb.PlacementDbBaseTestCase): self.ctx, db_rp.uuid) self.assertEqual(1, len(usage_list)) self.assertEqual(2, usage_list[0].usage) - self.assertEqual(fields.ResourceClass.DISK_GB, + self.assertEqual(orc.DISK_GB, usage_list[0].resource_class) def test_get_inventory_no_allocation(self): db_rp = self._create_provider('rp_no_inv') - tb.add_inventory(db_rp, fields.ResourceClass.DISK_GB, 1024) + tb.add_inventory(db_rp, orc.DISK_GB, 1024) usage_list = rp_obj.UsageList.get_all_by_resource_provider_uuid( self.ctx, db_rp.uuid) self.assertEqual(1, len(usage_list)) self.assertEqual(0, usage_list[0].usage) - self.assertEqual(fields.ResourceClass.DISK_GB, + self.assertEqual(orc.DISK_GB, usage_list[0].resource_class) def test_get_all_multiple_inv(self): db_rp = self._create_provider('rp_no_inv') - tb.add_inventory(db_rp, fields.ResourceClass.DISK_GB, 1024) - tb.add_inventory(db_rp, fields.ResourceClass.VCPU, 24) + tb.add_inventory(db_rp, orc.DISK_GB, 1024) + tb.add_inventory(db_rp, orc.VCPU, 24) usage_list = rp_obj.UsageList.get_all_by_resource_provider_uuid( self.ctx, db_rp.uuid) @@ -1848,7 +1848,7 @@ class ResourceClassListTestCase(tb.PlacementDbBaseTestCase): classes. """ rcs = rp_obj.ResourceClassList.get_all(self.ctx) - self.assertEqual(len(fields.ResourceClass.STANDARD), len(rcs)) + self.assertEqual(len(orc.STANDARDS), len(rcs)) def test_get_all_with_custom(self): """Test that if we add some custom resource classes, that we get a list @@ -1866,7 +1866,7 @@ class ResourceClassListTestCase(tb.PlacementDbBaseTestCase): conn.execute(ins) rcs = rp_obj.ResourceClassList.get_all(self.ctx) - expected_count = len(fields.ResourceClass.STANDARD) + len(customs) + expected_count = (len(orc.STANDARDS) + len(customs)) self.assertEqual(expected_count, len(rcs)) @@ -1875,13 +1875,11 @@ class ResourceClassTestCase(tb.PlacementDbBaseTestCase): def test_get_by_name(self): rc = rp_obj.ResourceClass.get_by_name( self.ctx, - fields.ResourceClass.VCPU - ) - vcpu_id = fields.ResourceClass.STANDARD.index( - fields.ResourceClass.VCPU + orc.VCPU ) + vcpu_id = orc.STANDARDS.index(orc.VCPU) self.assertEqual(vcpu_id, rc.id) - self.assertEqual(fields.ResourceClass.VCPU, rc.name) + self.assertEqual(orc.VCPU, rc.name) def test_get_by_name_not_found(self): self.assertRaises(exception.ResourceClassNotFound, @@ -1913,7 +1911,7 @@ class ResourceClassTestCase(tb.PlacementDbBaseTestCase): def test_create_duplicate_standard(self): rc = rp_obj.ResourceClass( context=self.ctx, - name=fields.ResourceClass.VCPU, + name=orc.VCPU, ) self.assertRaises(exception.ResourceClassExists, rc.create) @@ -2326,10 +2324,10 @@ class SharedProviderTestCase(tb.PlacementDbBaseTestCase): """ def _requested_resources(self): - STANDARDS = fields.ResourceClass.STANDARD - VCPU_ID = STANDARDS.index(fields.ResourceClass.VCPU) - MEMORY_MB_ID = STANDARDS.index(fields.ResourceClass.MEMORY_MB) - DISK_GB_ID = STANDARDS.index(fields.ResourceClass.DISK_GB) + STANDARDS = orc.STANDARDS + VCPU_ID = STANDARDS.index(orc.VCPU) + MEMORY_MB_ID = STANDARDS.index(orc.MEMORY_MB) + DISK_GB_ID = STANDARDS.index(orc.DISK_GB) # The resources we will request resources = { VCPU_ID: 1, @@ -2352,15 +2350,15 @@ class SharedProviderTestCase(tb.PlacementDbBaseTestCase): # DISK_GB. Both should be excluded from the result (one doesn't have # the requested resource; but neither is a sharing provider). for cn in (cn1, cn2): - tb.add_inventory(cn, fields.ResourceClass.VCPU, 24, + tb.add_inventory(cn, orc.VCPU, 24, allocation_ratio=16.0) - tb.add_inventory(cn, fields.ResourceClass.MEMORY_MB, 32768, + tb.add_inventory(cn, orc.MEMORY_MB, 32768, min_unit=64, max_unit=32768, step_size=64, allocation_ratio=1.5) if cn is cn1: - tb.add_inventory(cn, fields.ResourceClass.DISK_GB, 2000, + tb.add_inventory(cn, orc.DISK_GB, 2000, min_unit=10, max_unit=100, step_size=10) @@ -2369,7 +2367,7 @@ class SharedProviderTestCase(tb.PlacementDbBaseTestCase): ss = self._create_provider('shared storage') # Give the shared storage pool some inventory of DISK_GB - tb.add_inventory(ss, fields.ResourceClass.DISK_GB, 2000, + tb.add_inventory(ss, orc.DISK_GB, 2000, min_unit=10, max_unit=100, step_size=10) @@ -2382,7 +2380,7 @@ class SharedProviderTestCase(tb.PlacementDbBaseTestCase): # the shared storage pool when we ask for DISK_GB got_ids = rp_obj._get_providers_with_shared_capacity( self.ctx, - fields.ResourceClass.STANDARD.index(fields.ResourceClass.DISK_GB), + orc.STANDARDS.index(orc.DISK_GB), 100, ) self.assertEqual([ss.id], got_ids) diff --git a/placement/tests/functional/fixtures/gabbits.py b/placement/tests/functional/fixtures/gabbits.py index bb074b075..9f970ca82 100644 --- a/placement/tests/functional/fixtures/gabbits.py +++ b/placement/tests/functional/fixtures/gabbits.py @@ -14,6 +14,7 @@ from __future__ import absolute_import import os from gabbi import fixture +import os_resource_classes as orc from oslo_config import cfg from oslo_config import fixture as config_fixture from oslo_log.fixture import logging_error @@ -30,7 +31,6 @@ from placement.objects import project as project_obj from placement.objects import resource_provider as rp_obj from placement.objects import user as user_obj from placement import policies -from placement import rc_fields as fields from placement.tests import fixtures from placement.tests.functional.db import test_base as tb from placement.tests.functional.fixtures import capture @@ -257,21 +257,21 @@ class SharedStorageFixture(APIFixture): # Populate compute node inventory for VCPU and RAM for cn in (cn1, cn2): - tb.add_inventory(cn, fields.ResourceClass.VCPU, 24, + tb.add_inventory(cn, orc.VCPU, 24, allocation_ratio=16.0) - tb.add_inventory(cn, fields.ResourceClass.MEMORY_MB, 128 * 1024, + tb.add_inventory(cn, orc.MEMORY_MB, 128 * 1024, allocation_ratio=1.5) tb.set_traits(cn1, 'HW_CPU_X86_SSE', 'HW_CPU_X86_SSE2') # Populate shared storage provider with DISK_GB inventory and # mark it shared among any provider associated via aggregate - tb.add_inventory(ss, fields.ResourceClass.DISK_GB, 2000, + tb.add_inventory(ss, orc.DISK_GB, 2000, reserved=100, allocation_ratio=1.0) tb.set_traits(ss, 'MISC_SHARES_VIA_AGGREGATE') # Populate PF inventory for VF for pf in (pf1_1, pf1_2, pf2_1, pf2_2): - tb.add_inventory(pf, fields.ResourceClass.SRIOV_NET_VF, + tb.add_inventory(pf, orc.SRIOV_NET_VF, 8, allocation_ratio=1.0) @@ -343,13 +343,13 @@ class NUMAAggregateFixture(APIFixture): # Populate compute node inventory for VCPU and RAM for numa in (numa1_1, numa1_2, numa2_1, numa2_2): - tb.add_inventory(numa, fields.ResourceClass.VCPU, 24, + tb.add_inventory(numa, orc.VCPU, 24, allocation_ratio=16.0) # Populate shared storage provider with DISK_GB inventory and # mark it shared among any provider associated via aggregate for ss in (ss1, ss2): - tb.add_inventory(ss, fields.ResourceClass.DISK_GB, 2000, + tb.add_inventory(ss, orc.DISK_GB, 2000, reserved=100, allocation_ratio=1.0) tb.set_traits(ss, 'MISC_SHARES_VIA_AGGREGATE') diff --git a/placement/tests/unit/objects/test_resource_provider.py b/placement/tests/unit/objects/test_resource_provider.py index e21fbca7f..beaf8bd15 100644 --- a/placement/tests/unit/objects/test_resource_provider.py +++ b/placement/tests/unit/objects/test_resource_provider.py @@ -11,6 +11,7 @@ # under the License. import mock +import os_resource_classes as orc from oslo_config import cfg from oslo_config import fixture as config_fixture from oslo_utils.fixture import uuidsentinel as uuids @@ -22,15 +23,14 @@ from placement import conf from placement import context from placement import exception from placement.objects import resource_provider -from placement import rc_fields as fields _RESOURCE_CLASS_NAME = 'DISK_GB' _RESOURCE_CLASS_ID = 2 -IPV4_ADDRESS_ID = fields.ResourceClass.STANDARD.index( - fields.ResourceClass.IPV4_ADDRESS) -VCPU_ID = fields.ResourceClass.STANDARD.index( - fields.ResourceClass.VCPU) +IPV4_ADDRESS_ID = orc.STANDARDS.index( + orc.IPV4_ADDRESS) +VCPU_ID = orc.STANDARDS.index( + orc.VCPU) _RESOURCE_PROVIDER_ID = 1 _RESOURCE_PROVIDER_UUID = uuids.resource_provider @@ -155,10 +155,10 @@ class TestProviderSummaryNoDB(_TestCase): def test_resource_class_names(self): psum = resource_provider.ProviderSummary(mock.sentinel.ctx) disk_psr = resource_provider.ProviderSummaryResource( - mock.sentinel.ctx, resource_class=fields.ResourceClass.DISK_GB, + mock.sentinel.ctx, resource_class=orc.DISK_GB, capacity=100, used=0) ram_psr = resource_provider.ProviderSummaryResource( - mock.sentinel.ctx, resource_class=fields.ResourceClass.MEMORY_MB, + mock.sentinel.ctx, resource_class=orc.MEMORY_MB, capacity=1024, used=0) psum.resources = [disk_psr, ram_psr] expected = set(['DISK_GB', 'MEMORY_MB']) @@ -243,23 +243,23 @@ class TestInventoryList(_TestCase): inv_list = resource_provider.InventoryList(objects=[ resource_provider.Inventory( resource_provider=rp, - resource_class=fields.ResourceClass.VCPU, + resource_class=orc.VCPU, total=24), resource_provider.Inventory( resource_provider=rp, - resource_class=fields.ResourceClass.MEMORY_MB, + resource_class=orc.MEMORY_MB, total=10240), ]) - found = inv_list.find(fields.ResourceClass.MEMORY_MB) + found = inv_list.find(orc.MEMORY_MB) self.assertIsNotNone(found) self.assertEqual(10240, found.total) - found = inv_list.find(fields.ResourceClass.VCPU) + found = inv_list.find(orc.VCPU) self.assertIsNotNone(found) self.assertEqual(24, found.total) - found = inv_list.find(fields.ResourceClass.DISK_GB) + found = inv_list.find(orc.DISK_GB) self.assertIsNone(found) # Try an integer resource class identifier... diff --git a/requirements.txt b/requirements.txt index 500399928..66fd1d852 100644 --- a/requirements.txt +++ b/requirements.txt @@ -23,5 +23,6 @@ oslo.i18n>=3.15.3 # Apache-2.0 oslo.middleware>=3.31.0 # Apache-2.0 oslo.upgradecheck>=0.1.1 # Apache-2.0 oslo.versionedobjects>=1.31.2 # Apache-2.0 +os-resource-classes>=0.1.0 # Apache-2.0 os-traits>=0.4.0 # Apache-2.0 microversion-parse>=0.2.1 # Apache-2.0