Merge "Use os-resource-classes in placement"

This commit is contained in:
Zuul 2019-01-03 23:09:33 +00:00 committed by Gerrit Code Review
commit 1e213276bb
13 changed files with 735 additions and 863 deletions

View File

@ -41,6 +41,7 @@ mox3==0.20.0
msgpack-python==0.5.6
netaddr==0.7.18
netifaces==0.10.4
os-resource-classes==0.1.0
os-client-config==1.29.0
os-service-types==1.2.0
os-traits==0.4.0

View File

@ -97,6 +97,7 @@ def update_database():
"""
ctx = db_api.DbContext()
resource_provider.ensure_trait_sync(ctx)
resource_provider.ensure_resource_classes_sync(ctx)
resource_provider.ensure_rc_cache(ctx)

View File

@ -21,6 +21,7 @@ import random
# not be registered and there is no need to express VERSIONs nor handle
# obj_make_compatible.
import os_resource_classes as orc
import os_traits
from oslo_concurrency import lockutils
from oslo_db import api as oslo_db_api
@ -43,7 +44,6 @@ from placement.i18n import _
from placement.objects import consumer as consumer_obj
from placement.objects import project as project_obj
from placement.objects import user as user_obj
from placement import rc_fields
from placement import resource_class_cache as rc_cache
_TRAIT_TBL = models.Trait.__table__
@ -59,6 +59,8 @@ _PROJECT_TBL = models.Project.__table__
_USER_TBL = models.User.__table__
_CONSUMER_TBL = models.Consumer.__table__
_RC_CACHE = None
_RESOURCE_CLASSES_LOCK = 'resource_classes_sync'
_RESOURCE_CLASSES_SYNCED = False
_TRAIT_LOCK = 'trait_sync'
_TRAITS_SYNCED = False
@ -82,7 +84,7 @@ def ensure_rc_cache(ctx):
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
# Bug #1760322: If the caller raises an exception, we don't want the trait
# sync rolled back; so use an .independent transaction
@db_api.placement_context_manager.writer.independent
@db_api.placement_context_manager.writer
def _trait_sync(ctx):
"""Sync the os_traits symbols to the database.
@ -145,6 +147,47 @@ def ensure_trait_sync(ctx):
_TRAITS_SYNCED = True
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@db_api.placement_context_manager.writer
def _resource_classes_sync(ctx):
# Create a set of all resource class in the os_resource_classes library.
sel = sa.select([_RC_TBL.c.name])
res = ctx.session.execute(sel).fetchall()
db_classes = [r[0] for r in res if not orc.is_custom(r[0])]
LOG.debug("Found existing resource classes in db: %s", db_classes)
# Determine those resource clases which are in os_resource_classes but not
# currently in the database, and insert them.
batch_args = [{'name': six.text_type(name), 'id': index}
for index, name in enumerate(orc.STANDARDS)
if name not in db_classes]
ins = _RC_TBL.insert()
if batch_args:
conn = ctx.session.connection()
if conn.engine.dialect.name == 'mysql':
# We need to do a literal insert of 0 to preserve the order
# of the resource class ids from the previous style of
# managing them. In some mysql settings a 0 is the same as
# "give me a default key".
conn.execute("SET SESSION SQL_MODE='NO_AUTO_VALUE_ON_ZERO'")
try:
ctx.session.execute(ins, batch_args)
LOG.info("Synced resource_classes from os_resource_classes: %s",
batch_args)
except db_exc.DBDuplicateEntry:
pass # some other process sync'd, just ignore
def ensure_resource_classes_sync(ctx):
global _RESOURCE_CLASSES_SYNCED
# If another thread is doing this work, wait for it to complete.
# When that thread is done _RESOURCE_CLASSES_SYNCED will be true in this
# thread and we'll simply return.
with lockutils.lock(_RESOURCE_CLASSES_LOCK):
if not _RESOURCE_CLASSES_SYNCED:
_resource_classes_sync(ctx)
_RESOURCE_CLASSES_SYNCED = True
def _usage_select(rc_ids):
usage = sa.select([_ALLOC_TBL.c.resource_provider_id,
_ALLOC_TBL.c.resource_class_id,
@ -310,7 +353,7 @@ def _add_inventory(context, rp, inventory):
"""Add one Inventory that wasn't already on the provider.
:raises `exception.ResourceClassNotFound` if inventory.resource_class
cannot be found in either the standard classes or the DB.
cannot be found in the DB.
"""
rc_id = _RC_CACHE.id_from_string(inventory.resource_class)
inv_list = InventoryList(objects=[inventory])
@ -324,7 +367,7 @@ def _update_inventory(context, rp, inventory):
"""Update an inventory already on the provider.
:raises `exception.ResourceClassNotFound` if inventory.resource_class
cannot be found in either the standard classes or the DB.
cannot be found in the DB.
"""
rc_id = _RC_CACHE.id_from_string(inventory.resource_class)
inv_list = InventoryList(objects=[inventory])
@ -339,7 +382,7 @@ def _delete_inventory(context, rp, resource_class):
"""Delete up to one Inventory of the given resource_class string.
:raises `exception.ResourceClassNotFound` if resource_class
cannot be found in either the standard classes or the DB.
cannot be found in the DB.
"""
rc_id = _RC_CACHE.id_from_string(resource_class)
if not _delete_inventory_from_provider(context, rp, [rc_id]):
@ -365,8 +408,7 @@ def _set_inventory(context, rp, inv_list):
allocations in between the time when this object was originally
read and the call to set the inventory.
:raises `exception.ResourceClassNotFound` if any resource class in any
inventory in inv_list cannot be found in either the standard
classes or the DB.
inventory in inv_list cannot be found in the DB.
:raises `exception.InventoryInUse` if we attempt to delete inventory
from a provider that has allocations for that resource class.
"""
@ -1558,7 +1600,7 @@ class Inventory(base.VersionedObject, base.TimestampedObject):
fields = {
'id': fields.IntegerField(read_only=True),
'resource_provider': fields.ObjectField('ResourceProvider'),
'resource_class': rc_fields.ResourceClassField(read_only=True),
'resource_class': fields.StringField(read_only=True),
'total': fields.NonNegativeIntegerField(),
'reserved': fields.NonNegativeIntegerField(default=0),
'min_unit': fields.NonNegativeIntegerField(default=1),
@ -1643,7 +1685,7 @@ class Allocation(base.VersionedObject, base.TimestampedObject):
'id': fields.IntegerField(),
'resource_provider': fields.ObjectField('ResourceProvider'),
'consumer': fields.ObjectField('Consumer', nullable=False),
'resource_class': rc_fields.ResourceClassField(),
'resource_class': fields.StringField(),
'used': fields.IntegerField(),
}
@ -2008,8 +2050,7 @@ class AllocationList(base.ObjectListBase, base.VersionedObject):
If there is not we roll back the entire set.
:raises `exception.ResourceClassNotFound` if any resource class in any
allocation in allocs cannot be found in either the standard
classes or the DB.
allocation in allocs cannot be found in either the DB.
:raises `exception.InvalidAllocationCapacityExceeded` if any inventory
would be exhausted by the allocation.
:raises `InvalidAllocationConstraintsViolated` if any of the
@ -2225,7 +2266,7 @@ class AllocationList(base.ObjectListBase, base.VersionedObject):
class Usage(base.VersionedObject):
fields = {
'resource_class': rc_fields.ResourceClassField(read_only=True),
'resource_class': fields.StringField(read_only=True),
'usage': fields.NonNegativeIntegerField(),
}
@ -2321,7 +2362,7 @@ class ResourceClass(base.VersionedObject, base.TimestampedObject):
fields = {
'id': fields.IntegerField(read_only=True),
'name': rc_fields.ResourceClassField(nullable=False),
'name': fields.StringField(nullable=False),
}
@staticmethod
@ -2355,7 +2396,7 @@ class ResourceClass(base.VersionedObject, base.TimestampedObject):
"""
query = context.session.query(func.max(models.ResourceClass.id))
max_id = query.one()[0]
if not max_id:
if not max_id or max_id < ResourceClass.MIN_CUSTOM_RESOURCE_CLASS_ID:
return ResourceClass.MIN_CUSTOM_RESOURCE_CLASS_ID
else:
return max_id + 1
@ -2367,14 +2408,13 @@ class ResourceClass(base.VersionedObject, base.TimestampedObject):
if 'name' not in self:
raise exception.ObjectActionError(action='create',
reason='name is required')
if self.name in rc_fields.ResourceClass.STANDARD:
if self.name in orc.STANDARDS:
raise exception.ResourceClassExists(resource_class=self.name)
if not self.name.startswith(rc_fields.ResourceClass.CUSTOM_NAMESPACE):
if not self.name.startswith(orc.CUSTOM_NAMESPACE):
raise exception.ObjectActionError(
action='create',
reason='name must start with ' +
rc_fields.ResourceClass.CUSTOM_NAMESPACE)
reason='name must start with ' + orc.CUSTOM_NAMESPACE)
updates = self.obj_get_changes()
# There is the possibility of a race when adding resource classes, as
@ -2420,9 +2460,8 @@ class ResourceClass(base.VersionedObject, base.TimestampedObject):
if 'id' not in self:
raise exception.ObjectActionError(action='destroy',
reason='ID attribute not found')
# Never delete any standard resource class, since the standard resource
# classes don't even exist in the database table anyway.
if self.id in (rc['id'] for rc in _RC_CACHE.STANDARDS):
# Never delete any standard resource class.
if self.id < ResourceClass.MIN_CUSTOM_RESOURCE_CLASS_ID:
raise exception.ResourceClassCannotDeleteStandard(
resource_class=self.name)
@ -2449,9 +2488,8 @@ class ResourceClass(base.VersionedObject, base.TimestampedObject):
raise exception.ObjectActionError(action='save',
reason='ID attribute not found')
updates = self.obj_get_changes()
# Never update any standard resource class, since the standard resource
# classes don't even exist in the database table anyway.
if self.id in (rc['id'] for rc in _RC_CACHE.STANDARDS):
# Never update any standard resource class.
if self.id < ResourceClass.MIN_CUSTOM_RESOURCE_CLASS_ID:
raise exception.ResourceClassCannotUpdateStandard(
resource_class=self.name)
self._save(self._context, self.id, self.name, updates)
@ -2479,8 +2517,7 @@ class ResourceClassList(base.ObjectListBase, base.VersionedObject):
@staticmethod
@db_api.placement_context_manager.reader
def _get_all(context):
customs = list(context.session.query(models.ResourceClass).all())
return _RC_CACHE.STANDARDS + customs
return list(context.session.query(models.ResourceClass).all())
@classmethod
def get_all(cls, context):
@ -2631,7 +2668,7 @@ class AllocationRequestResource(base.VersionedObject):
fields = {
'resource_provider': fields.ObjectField('ResourceProvider'),
'resource_class': rc_fields.ResourceClassField(read_only=True),
'resource_class': fields.StringField(read_only=True),
'amount': fields.NonNegativeIntegerField(),
}
@ -2671,7 +2708,7 @@ class AllocationRequest(base.VersionedObject):
class ProviderSummaryResource(base.VersionedObject):
fields = {
'resource_class': rc_fields.ResourceClassField(read_only=True),
'resource_class': fields.StringField(read_only=True),
'capacity': fields.NonNegativeIntegerField(),
'used': fields.NonNegativeIntegerField(),
# Internal use only; not included when the object is serialized for

View File

@ -1,71 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Standard Resource Class Fields."""
# NOTE(cdent): This is kept as its own independent file as it is used by
# both the placement and nova sides of the placement interaction. On the
# placement side we don't want to import all the nova fields, nor all the
# nova objects (which are automatically loaded and registered if the
# nova.objects package is imported).
import re
from oslo_versionedobjects import fields
class ResourceClass(fields.StringField):
"""Classes of resources provided to consumers."""
CUSTOM_NAMESPACE = 'CUSTOM_'
"""All non-standard resource classes must begin with this string."""
VCPU = 'VCPU'
MEMORY_MB = 'MEMORY_MB'
DISK_GB = 'DISK_GB'
PCI_DEVICE = 'PCI_DEVICE'
SRIOV_NET_VF = 'SRIOV_NET_VF'
NUMA_SOCKET = 'NUMA_SOCKET'
NUMA_CORE = 'NUMA_CORE'
NUMA_THREAD = 'NUMA_THREAD'
NUMA_MEMORY_MB = 'NUMA_MEMORY_MB'
IPV4_ADDRESS = 'IPV4_ADDRESS'
VGPU = 'VGPU'
VGPU_DISPLAY_HEAD = 'VGPU_DISPLAY_HEAD'
# Standard resource class for network bandwidth egress measured in
# kilobits per second.
NET_BW_EGR_KILOBIT_PER_SEC = 'NET_BW_EGR_KILOBIT_PER_SEC'
# Standard resource class for network bandwidth ingress measured in
# kilobits per second.
NET_BW_IGR_KILOBIT_PER_SEC = 'NET_BW_IGR_KILOBIT_PER_SEC'
# The ordering here is relevant. If you must add a value, only
# append.
STANDARD = (VCPU, MEMORY_MB, DISK_GB, PCI_DEVICE, SRIOV_NET_VF,
NUMA_SOCKET, NUMA_CORE, NUMA_THREAD, NUMA_MEMORY_MB,
IPV4_ADDRESS, VGPU, VGPU_DISPLAY_HEAD,
NET_BW_EGR_KILOBIT_PER_SEC, NET_BW_IGR_KILOBIT_PER_SEC)
@classmethod
def normalize_name(cls, rc_name):
if rc_name is None:
return None
# Replace non-alphanumeric characters with underscores
norm_name = re.sub('[^0-9A-Za-z]+', '_', rc_name)
# Bug #1762789: Do .upper after replacing non alphanumerics.
norm_name = norm_name.upper()
norm_name = cls.CUSTOM_NAMESPACE + norm_name
return norm_name
class ResourceClassField(fields.AutoTypedField):
AUTO_TYPE = ResourceClass()

View File

@ -16,7 +16,6 @@ import sqlalchemy as sa
from placement.db.sqlalchemy import models
from placement import db_api
from placement import exception
from placement import rc_fields as fields
_RC_TBL = models.ResourceClass.__table__
_LOCKNAME = 'rc_cache'
@ -24,28 +23,21 @@ _LOCKNAME = 'rc_cache'
@db_api.placement_context_manager.reader
def _refresh_from_db(ctx, cache):
"""Grabs all custom resource classes from the DB table and populates the
"""Grabs all resource classes from the DB table and populates the
supplied cache object's internal integer and string identifier dicts.
:param cache: ResourceClassCache object to refresh.
"""
with db_api.placement_context_manager.reader.connection.using(ctx) as conn:
sel = sa.select([_RC_TBL.c.id, _RC_TBL.c.name, _RC_TBL.c.updated_at,
_RC_TBL.c.created_at])
res = conn.execute(sel).fetchall()
cache.id_cache = {r[1]: r[0] for r in res}
cache.str_cache = {r[0]: r[1] for r in res}
cache.all_cache = {r[1]: r for r in res}
sel = sa.select([_RC_TBL.c.id, _RC_TBL.c.name, _RC_TBL.c.updated_at,
_RC_TBL.c.created_at])
res = ctx.session.execute(sel).fetchall()
cache.id_cache = {r[1]: r[0] for r in res}
cache.str_cache = {r[0]: r[1] for r in res}
cache.all_cache = {r[1]: r for r in res}
class ResourceClassCache(object):
"""A cache of integer and string lookup values for resource classes."""
# List of dict of all standard resource classes, where every list item
# have a form {'id': <ID>, 'name': <NAME>}
STANDARDS = [{'id': fields.ResourceClass.STANDARD.index(s), 'name': s,
'updated_at': None, 'created_at': None}
for s in fields.ResourceClass.STANDARD]
"""A cache of integer and string lookup values for resource classes."""
def __init__(self, ctx):
"""Initialize the cache of resource class identifiers.
@ -66,29 +58,23 @@ class ResourceClassCache(object):
def id_from_string(self, rc_str):
"""Given a string representation of a resource class -- e.g. "DISK_GB"
or "IRON_SILVER" -- return the integer code for the resource class. For
standard resource classes, this integer code will match the list of
resource classes on the fields.ResourceClass field type. Other custom
resource classes will cause a DB lookup into the resource_classes
table, however the results of these DB lookups are cached since the
lookups are so frequent.
or "CUSTOM_IRON_SILVER" -- return the integer code for the resource
class by doing a DB lookup into the resource_classes table; however,
the results of these DB lookups are cached since the lookups are so
frequent.
:param rc_str: The string representation of the resource class to look
up a numeric identifier for.
:returns integer identifier for the resource class, or None, if no such
resource class was found in the list of standard resource
classes or the resource_classes database table.
:returns Integer identifier for the resource class.
:raises `exception.ResourceClassNotFound` if rc_str cannot be found in
either the standard classes or the DB.
the DB.
"""
# First check the standard resource classes
if rc_str in fields.ResourceClass.STANDARD:
return fields.ResourceClass.STANDARD.index(rc_str)
rc_id = self.id_cache.get(rc_str)
if rc_id is not None:
return rc_id
# Otherwise, check the database table
with lockutils.lock(_LOCKNAME):
if rc_str in self.id_cache:
return self.id_cache[rc_str]
# Otherwise, check the database table
_refresh_from_db(self.ctx, self)
if rc_str in self.id_cache:
return self.id_cache[rc_str]
@ -101,22 +87,17 @@ class ResourceClassCache(object):
:param rc_str: The string representation of the resource class for
which to look up a resource_class.
:returns: dict representing the resource class fields, if the
resource class was found in the list of standard
resource classes or the resource_classes database table.
resource class was found in the resource_classes database
table.
:raises: `exception.ResourceClassNotFound` if rc_str cannot be found in
either the standard classes or the DB.
the DB.
"""
# First check the standard resource classes
if rc_str in fields.ResourceClass.STANDARD:
return {'id': fields.ResourceClass.STANDARD.index(rc_str),
'name': rc_str,
'updated_at': None,
'created_at': None}
rc_id_str = self.all_cache.get(rc_str)
if rc_id_str is not None:
return rc_id_str
# Otherwise, check the database table
with lockutils.lock(_LOCKNAME):
if rc_str in self.all_cache:
return self.all_cache[rc_str]
# Otherwise, check the database table
_refresh_from_db(self.ctx, self)
if rc_str in self.all_cache:
return self.all_cache[rc_str]
@ -125,29 +106,21 @@ class ResourceClassCache(object):
def string_from_id(self, rc_id):
"""The reverse of the id_from_string() method. Given a supplied numeric
identifier for a resource class, we look up the corresponding string
representation, either in the list of standard resource classes or via
a DB lookup. The results of these DB lookups are cached since the
lookups are so frequent.
representation, via a DB lookup. The results of these DB lookups are
cached since the lookups are so frequent.
:param rc_id: The numeric representation of the resource class to look
up a string identifier for.
:returns: string identifier for the resource class, or None, if no such
resource class was found in the list of standard resource
classes or the resource_classes database table.
:returns: String identifier for the resource class.
:raises `exception.ResourceClassNotFound` if rc_id cannot be found in
either the standard classes or the DB.
the DB.
"""
# First check the fields.ResourceClass.STANDARD values
try:
return fields.ResourceClass.STANDARD[rc_id]
except IndexError:
pass
rc_str = self.str_cache.get(rc_id)
if rc_str is not None:
return rc_str
# Otherwise, check the database table
with lockutils.lock(_LOCKNAME):
if rc_id in self.str_cache:
return self.str_cache[rc_id]
# Otherwise, check the database table
_refresh_from_db(self.ctx, self)
if rc_id in self.str_cache:
return self.str_cache[rc_id]

View File

@ -64,12 +64,15 @@ class Database(test_fixtures.GeneratesSchema, test_fixtures.AdHocDbFixture):
engine)
self.addCleanup(_reset_facade)
# Make sure db flags are correct at both the start and finish
# of the test.
self.addCleanup(self.cleanup)
resource_provider._TRAITS_SYNCED = False
resource_provider._RC_CACHE = None
self.cleanup()
# Sync traits and resource classes.
deploy.update_database()
self.addCleanup(self.cleanup)
def cleanup(self):
resource_provider._TRAITS_SYNCED = False
resource_provider._RESOURCE_CLASSES_SYNCED = False
resource_provider._RC_CACHE = None

File diff suppressed because it is too large Load Diff

View File

@ -10,6 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import os_resource_classes as orc
from oslo_utils.fixture import uuidsentinel as uuids
import sqlalchemy as sa
@ -19,7 +20,6 @@ from placement.objects import consumer as consumer_obj
from placement.objects import project as project_obj
from placement.objects import resource_provider as rp_obj
from placement.objects import user as user_obj
from placement import rc_fields as fields
from placement.tests.functional import base
from placement.tests.functional.db import test_base as tb
@ -263,24 +263,24 @@ class DeleteConsumerIfNoAllocsTestCase(tb.PlacementDbBaseTestCase):
# Create some inventory that we will allocate
cn1 = self._create_provider('cn1')
tb.add_inventory(cn1, fields.ResourceClass.VCPU, 8)
tb.add_inventory(cn1, fields.ResourceClass.MEMORY_MB, 2048)
tb.add_inventory(cn1, fields.ResourceClass.DISK_GB, 2000)
tb.add_inventory(cn1, orc.VCPU, 8)
tb.add_inventory(cn1, orc.MEMORY_MB, 2048)
tb.add_inventory(cn1, orc.DISK_GB, 2000)
# Now allocate some of that inventory to two different consumers
allocs = [
rp_obj.Allocation(
self.ctx, consumer=c1, resource_provider=cn1,
resource_class=fields.ResourceClass.VCPU, used=1),
resource_class=orc.VCPU, used=1),
rp_obj.Allocation(
self.ctx, consumer=c1, resource_provider=cn1,
resource_class=fields.ResourceClass.MEMORY_MB, used=512),
resource_class=orc.MEMORY_MB, used=512),
rp_obj.Allocation(
self.ctx, consumer=c2, resource_provider=cn1,
resource_class=fields.ResourceClass.VCPU, used=1),
resource_class=orc.VCPU, used=1),
rp_obj.Allocation(
self.ctx, consumer=c2, resource_provider=cn1,
resource_class=fields.ResourceClass.MEMORY_MB, used=512),
resource_class=orc.MEMORY_MB, used=512),
]
alloc_list = rp_obj.AllocationList(self.ctx, objects=allocs)
alloc_list.replace_all()
@ -297,10 +297,10 @@ class DeleteConsumerIfNoAllocsTestCase(tb.PlacementDbBaseTestCase):
allocs = [
rp_obj.Allocation(
self.ctx, consumer=c2, resource_provider=cn1,
resource_class=fields.ResourceClass.VCPU, used=0),
resource_class=orc.VCPU, used=0),
rp_obj.Allocation(
self.ctx, consumer=c2, resource_provider=cn1,
resource_class=fields.ResourceClass.MEMORY_MB, used=0),
resource_class=orc.MEMORY_MB, used=0),
]
alloc_list = rp_obj.AllocationList(self.ctx, objects=allocs)
alloc_list.replace_all()

View File

@ -16,66 +16,38 @@ import mock
from oslo_utils import timeutils
from placement import exception
from placement import rc_fields as fields
from placement import resource_class_cache as rc_cache
from placement.tests.functional import base
class TestResourceClassCache(base.TestCase):
def setUp(self):
super(TestResourceClassCache, self).setUp()
db = self.placement_db
self.context = mock.Mock()
sess_mock = mock.Mock()
sess_mock.connection.side_effect = db.get_engine().connect
self.context.session = sess_mock
@mock.patch('sqlalchemy.select')
def test_rc_cache_std_no_db(self, sel_mock):
def test_rc_cache_std_db(self):
"""Test that looking up either an ID or a string in the resource class
cache for a standardized resource class does not result in a DB
call.
cache for a standardized resource class doesn't result in a DB call
once the cache is initialized
"""
cache = rc_cache.ResourceClassCache(self.context)
rc_cache._refresh_from_db(self.context, cache)
self.assertEqual('VCPU', cache.string_from_id(0))
self.assertEqual('MEMORY_MB', cache.string_from_id(1))
self.assertEqual(0, cache.id_from_string('VCPU'))
self.assertEqual(1, cache.id_from_string('MEMORY_MB'))
with mock.patch('sqlalchemy.select') as sel_mock:
self.assertEqual('VCPU', cache.string_from_id(0))
self.assertEqual('MEMORY_MB', cache.string_from_id(1))
self.assertEqual(0, cache.id_from_string('VCPU'))
self.assertEqual(1, cache.id_from_string('MEMORY_MB'))
self.assertFalse(sel_mock.called)
def test_standards(self):
cache = rc_cache.ResourceClassCache(self.context)
standards = cache.STANDARDS
self.assertEqual(len(standards), len(fields.ResourceClass.STANDARD))
names = (rc['name'] for rc in standards)
for name in fields.ResourceClass.STANDARD:
self.assertIn(name, names)
cache = rc_cache.ResourceClassCache(self.context)
standards2 = cache.STANDARDS
self.assertEqual(id(standards), id(standards2))
def test_standards_have_time_fields(self):
cache = rc_cache.ResourceClassCache(self.context)
standards = cache.STANDARDS
first_standard = standards[0]
self.assertIn('updated_at', first_standard)
self.assertIn('created_at', first_standard)
self.assertIsNone(first_standard['updated_at'])
self.assertIsNone(first_standard['created_at'])
self.assertFalse(sel_mock.called)
def test_standard_has_time_fields(self):
cache = rc_cache.ResourceClassCache(self.context)
vcpu_class = cache.all_from_string('VCPU')
vcpu_class = dict(cache.all_from_string('VCPU'))
expected = {'id': 0, 'name': 'VCPU', 'updated_at': None,
'created_at': None}
self.assertEqual(expected, vcpu_class)
expected_fields = sorted(expected.keys())
self.assertEqual(expected_fields, sorted(vcpu_class.keys()))
self.assertEqual(0, vcpu_class['id'])
self.assertEqual('VCPU', vcpu_class['name'])
def test_rc_cache_custom(self):
"""Test that non-standard, custom resource classes hit the database and
@ -92,7 +64,7 @@ class TestResourceClassCache(base.TestCase):
cache.id_from_string, "IRON_NFV")
# Now add to the database and verify appropriate results...
with self.context.session.connection() as conn:
with self.placement_db.get_engine().connect() as conn:
ins_stmt = rc_cache._RC_TBL.insert().values(
id=1001,
name='IRON_NFV'
@ -117,8 +89,8 @@ class TestResourceClassCache(base.TestCase):
self.assertIsInstance(iron_nfv_class['created_at'], datetime.datetime)
# Update IRON_NFV (this is a no-op but will set updated_at)
with self.context.session.connection() as conn:
# NOTE(cdent): When using explicit SQL that names columns,
with self.placement_db.get_engine().connect() as conn:
# NOTE(cdent): When using explict SQL that names columns,
# the automatic timestamp handling provided by the oslo_db
# TimestampMixin is not provided. created_at is a default
# but updated_at is an onupdate.

View File

@ -13,6 +13,7 @@
import functools
import mock
import os_resource_classes as orc
import os_traits
from oslo_db import exception as db_exc
from oslo_utils.fixture import uuidsentinel
@ -23,7 +24,6 @@ from placement.db.sqlalchemy import models
from placement import exception
from placement.objects import consumer as consumer_obj
from placement.objects import resource_provider as rp_obj
from placement import rc_fields as fields
from placement.tests.functional.db import test_base as tb
@ -34,13 +34,13 @@ DISK_INVENTORY = dict(
max_unit=5,
step_size=1,
allocation_ratio=1.0,
resource_class=fields.ResourceClass.DISK_GB
resource_class=orc.DISK_GB
)
DISK_ALLOCATION = dict(
consumer_id=uuidsentinel.disk_consumer,
used=2,
resource_class=fields.ResourceClass.DISK_GB
resource_class=orc.DISK_GB
)
@ -241,7 +241,7 @@ class ResourceProviderTestCase(tb.PlacementDbBaseTestCase):
# Create some inventory in the grandchild, allocate some consumers to
# the grandchild and then attempt to delete the root provider and child
# provider, both of which should fail.
tb.add_inventory(grandchild_rp, fields.ResourceClass.VCPU, 1)
tb.add_inventory(grandchild_rp, orc.VCPU, 1)
# Check all providers returned when getting by root UUID
rps = rp_obj.ResourceProviderList.get_all_by_filters(
@ -362,7 +362,7 @@ class ResourceProviderTestCase(tb.PlacementDbBaseTestCase):
self.assertEqual(uuidsentinel.grandchild_rp, rps[0].uuid)
alloc_list = self.allocate_from_provider(
grandchild_rp, fields.ResourceClass.VCPU, 1)
grandchild_rp, orc.VCPU, 1)
self.assertRaises(exception.CannotDeleteParentResourceProvider,
root_rp.destroy)
@ -557,12 +557,12 @@ class ResourceProviderTestCase(tb.PlacementDbBaseTestCase):
def test_set_inventory_over_capacity(self, mock_log):
rp = self._create_provider(uuidsentinel.rp_name)
disk_inv = tb.add_inventory(rp, fields.ResourceClass.DISK_GB, 2048,
disk_inv = tb.add_inventory(rp, orc.DISK_GB, 2048,
reserved=15,
min_unit=10,
max_unit=600,
step_size=10)
vcpu_inv = tb.add_inventory(rp, fields.ResourceClass.VCPU, 12,
vcpu_inv = tb.add_inventory(rp, orc.VCPU, 12,
allocation_ratio=16.0)
self.assertFalse(mock_log.warning.called)
@ -582,13 +582,13 @@ class ResourceProviderTestCase(tb.PlacementDbBaseTestCase):
rp = self._create_provider(uuidsentinel.rp_name)
saved_generation = rp.generation
disk_inv = tb.add_inventory(rp, fields.ResourceClass.DISK_GB, 1024,
disk_inv = tb.add_inventory(rp, orc.DISK_GB, 1024,
reserved=15,
min_unit=10,
max_unit=100,
step_size=10)
vcpu_inv = tb.add_inventory(rp, fields.ResourceClass.VCPU, 12,
vcpu_inv = tb.add_inventory(rp, orc.VCPU, 12,
allocation_ratio=16.0)
# generation has bumped once for each add
@ -599,8 +599,8 @@ class ResourceProviderTestCase(tb.PlacementDbBaseTestCase):
self.ctx, rp)
self.assertEqual(2, len(new_inv_list))
resource_classes = [inv.resource_class for inv in new_inv_list]
self.assertIn(fields.ResourceClass.VCPU, resource_classes)
self.assertIn(fields.ResourceClass.DISK_GB, resource_classes)
self.assertIn(orc.VCPU, resource_classes)
self.assertIn(orc.DISK_GB, resource_classes)
# reset list to just disk_inv
inv_list = rp_obj.InventoryList(objects=[disk_inv])
@ -614,14 +614,14 @@ class ResourceProviderTestCase(tb.PlacementDbBaseTestCase):
self.ctx, rp)
self.assertEqual(1, len(new_inv_list))
resource_classes = [inv.resource_class for inv in new_inv_list]
self.assertNotIn(fields.ResourceClass.VCPU, resource_classes)
self.assertIn(fields.ResourceClass.DISK_GB, resource_classes)
self.assertNotIn(orc.VCPU, resource_classes)
self.assertIn(orc.DISK_GB, resource_classes)
self.assertEqual(1024, new_inv_list[0].total)
# update existing disk inv to new settings
disk_inv = rp_obj.Inventory(
resource_provider=rp,
resource_class=fields.ResourceClass.DISK_GB,
resource_class=orc.DISK_GB,
total=2048,
reserved=15,
min_unit=10,
@ -640,7 +640,7 @@ class ResourceProviderTestCase(tb.PlacementDbBaseTestCase):
self.assertEqual(2048, new_inv_list[0].total)
# delete inventory
rp.delete_inventory(fields.ResourceClass.DISK_GB)
rp.delete_inventory(orc.DISK_GB)
# generation has bumped
self.assertEqual(saved_generation + 1, rp.generation)
@ -648,10 +648,10 @@ class ResourceProviderTestCase(tb.PlacementDbBaseTestCase):
new_inv_list = rp_obj.InventoryList.get_all_by_resource_provider(
self.ctx, rp)
result = new_inv_list.find(fields.ResourceClass.DISK_GB)
result = new_inv_list.find(orc.DISK_GB)
self.assertIsNone(result)
self.assertRaises(exception.NotFound, rp.delete_inventory,
fields.ResourceClass.DISK_GB)
orc.DISK_GB)
# check inventory list is empty
inv_list = rp_obj.InventoryList.get_all_by_resource_provider(
@ -718,7 +718,7 @@ class ResourceProviderTestCase(tb.PlacementDbBaseTestCase):
new_total = 1
disk_inv = rp_obj.Inventory(
resource_provider=rp,
resource_class=fields.ResourceClass.DISK_GB, total=new_total)
resource_class=orc.DISK_GB, total=new_total)
disk_inv.obj_set_defaults()
rp.update_inventory(disk_inv)
@ -748,7 +748,7 @@ class ResourceProviderTestCase(tb.PlacementDbBaseTestCase):
for rp in (rp1, rp2):
tb.add_inventory(rp, DISK_INVENTORY['resource_class'],
DISK_INVENTORY['total'])
tb.add_inventory(rp, fields.ResourceClass.IPV4_ADDRESS, 10,
tb.add_inventory(rp, orc.IPV4_ADDRESS, 10,
max_unit=2)
# Get inventories for the first resource provider and validate
@ -780,65 +780,65 @@ class ResourceProviderListTestCase(tb.PlacementDbBaseTestCase):
def test_get_all_by_filters_with_resources(self):
for rp_i in ['1', '2']:
rp = self._create_provider('rp_name_' + rp_i)
tb.add_inventory(rp, fields.ResourceClass.VCPU, 2)
tb.add_inventory(rp, fields.ResourceClass.DISK_GB, 1024,
tb.add_inventory(rp, orc.VCPU, 2)
tb.add_inventory(rp, orc.DISK_GB, 1024,
reserved=2)
# Write a specific inventory for testing min/max units and steps
tb.add_inventory(rp, fields.ResourceClass.MEMORY_MB, 1024,
tb.add_inventory(rp, orc.MEMORY_MB, 1024,
reserved=2, min_unit=2, max_unit=4, step_size=2)
# Create the VCPU allocation only for the first RP
if rp_i != '1':
continue
self.allocate_from_provider(rp, fields.ResourceClass.VCPU, used=1)
self.allocate_from_provider(rp, orc.VCPU, used=1)
# Both RPs should accept that request given the only current allocation
# for the first RP is leaving one VCPU
resource_providers = rp_obj.ResourceProviderList.get_all_by_filters(
self.ctx, {'resources': {fields.ResourceClass.VCPU: 1}})
self.ctx, {'resources': {orc.VCPU: 1}})
self.assertEqual(2, len(resource_providers))
# Now, when asking for 2 VCPUs, only the second RP should accept that
# given the current allocation for the first RP
resource_providers = rp_obj.ResourceProviderList.get_all_by_filters(
self.ctx, {'resources': {fields.ResourceClass.VCPU: 2}})
self.ctx, {'resources': {orc.VCPU: 2}})
self.assertEqual(1, len(resource_providers))
# Adding a second resource request should be okay for the 2nd RP
# given it has enough disk but we also need to make sure that the
# first RP is not acceptable because of the VCPU request
resource_providers = rp_obj.ResourceProviderList.get_all_by_filters(
self.ctx, {'resources': {fields.ResourceClass.VCPU: 2,
fields.ResourceClass.DISK_GB: 1022}})
self.ctx, {'resources': {orc.VCPU: 2,
orc.DISK_GB: 1022}})
self.assertEqual(1, len(resource_providers))
# Now, we are asking for both disk and VCPU resources that all the RPs
# can't accept (as the 2nd RP is having a reserved size)
resource_providers = rp_obj.ResourceProviderList.get_all_by_filters(
self.ctx, {'resources': {fields.ResourceClass.VCPU: 2,
fields.ResourceClass.DISK_GB: 1024}})
self.ctx, {'resources': {orc.VCPU: 2,
orc.DISK_GB: 1024}})
self.assertEqual(0, len(resource_providers))
# We also want to verify that asking for a specific RP can also be
# checking the resource usage.
resource_providers = rp_obj.ResourceProviderList.get_all_by_filters(
self.ctx, {'name': u'rp_name_1',
'resources': {fields.ResourceClass.VCPU: 1}})
'resources': {orc.VCPU: 1}})
self.assertEqual(1, len(resource_providers))
# Let's verify that the min and max units are checked too
# Case 1: amount is in between min and max and modulo step_size
resource_providers = rp_obj.ResourceProviderList.get_all_by_filters(
self.ctx, {'resources': {fields.ResourceClass.MEMORY_MB: 2}})
self.ctx, {'resources': {orc.MEMORY_MB: 2}})
self.assertEqual(2, len(resource_providers))
# Case 2: amount is less than min_unit
resource_providers = rp_obj.ResourceProviderList.get_all_by_filters(
self.ctx, {'resources': {fields.ResourceClass.MEMORY_MB: 1}})
self.ctx, {'resources': {orc.MEMORY_MB: 1}})
self.assertEqual(0, len(resource_providers))
# Case 3: amount is more than min_unit
resource_providers = rp_obj.ResourceProviderList.get_all_by_filters(
self.ctx, {'resources': {fields.ResourceClass.MEMORY_MB: 5}})
self.ctx, {'resources': {orc.MEMORY_MB: 5}})
self.assertEqual(0, len(resource_providers))
# Case 4: amount is not modulo step_size
resource_providers = rp_obj.ResourceProviderList.get_all_by_filters(
self.ctx, {'resources': {fields.ResourceClass.MEMORY_MB: 3}})
self.ctx, {'resources': {orc.MEMORY_MB: 3}})
self.assertEqual(0, len(resource_providers))
def test_get_all_by_filters_with_resources_not_existing(self):
@ -1153,9 +1153,9 @@ class TestAllocation(tb.PlacementDbBaseTestCase):
# Add same inventory to both source and destination host
for cn in (cn_source, cn_dest):
tb.add_inventory(cn, fields.ResourceClass.VCPU, 24,
tb.add_inventory(cn, orc.VCPU, 24,
allocation_ratio=16.0)
tb.add_inventory(cn, fields.ResourceClass.MEMORY_MB, 1024,
tb.add_inventory(cn, orc.MEMORY_MB, 1024,
min_unit=64,
max_unit=1024,
step_size=64,
@ -1176,25 +1176,25 @@ class TestAllocation(tb.PlacementDbBaseTestCase):
context=self.ctx,
consumer=inst_consumer,
resource_provider=cn_source,
resource_class=fields.ResourceClass.VCPU,
resource_class=orc.VCPU,
used=1),
rp_obj.Allocation(
context=self.ctx,
consumer=inst_consumer,
resource_provider=cn_source,
resource_class=fields.ResourceClass.MEMORY_MB,
resource_class=orc.MEMORY_MB,
used=256),
rp_obj.Allocation(
context=self.ctx,
consumer=inst_consumer,
resource_provider=cn_dest,
resource_class=fields.ResourceClass.VCPU,
resource_class=orc.VCPU,
used=1),
rp_obj.Allocation(
context=self.ctx,
consumer=inst_consumer,
resource_provider=cn_dest,
resource_class=fields.ResourceClass.MEMORY_MB,
resource_class=orc.MEMORY_MB,
used=256),
])
alloc_list.replace_all()
@ -1226,13 +1226,13 @@ class TestAllocation(tb.PlacementDbBaseTestCase):
context=self.ctx,
consumer=inst_consumer,
resource_provider=cn_dest,
resource_class=fields.ResourceClass.VCPU,
resource_class=orc.VCPU,
used=1),
rp_obj.Allocation(
context=self.ctx,
consumer=inst_consumer,
resource_provider=cn_dest,
resource_class=fields.ResourceClass.MEMORY_MB,
resource_class=orc.MEMORY_MB,
used=256),
])
new_alloc_list.replace_all()
@ -1288,10 +1288,10 @@ class TestAllocationListCreateDelete(tb.PlacementDbBaseTestCase):
# Create one resource provider with 2 classes
rp1_name = uuidsentinel.rp1_name
rp1_uuid = uuidsentinel.rp1_uuid
rp1_class = fields.ResourceClass.DISK_GB
rp1_class = orc.DISK_GB
rp1_used = 6
rp2_class = fields.ResourceClass.IPV4_ADDRESS
rp2_class = orc.IPV4_ADDRESS
rp2_used = 2
rp1 = self._create_provider(rp1_name, uuid=rp1_uuid)
@ -1341,12 +1341,12 @@ class TestAllocationListCreateDelete(tb.PlacementDbBaseTestCase):
# Create two resource providers
rp1_name = uuidsentinel.rp1_name
rp1_uuid = uuidsentinel.rp1_uuid
rp1_class = fields.ResourceClass.DISK_GB
rp1_class = orc.DISK_GB
rp1_used = 6
rp2_name = uuidsentinel.rp2_name
rp2_uuid = uuidsentinel.rp2_uuid
rp2_class = fields.ResourceClass.IPV4_ADDRESS
rp2_class = orc.IPV4_ADDRESS
rp2_used = 2
rp1 = self._create_provider(rp1_name, uuid=rp1_uuid)
@ -1452,7 +1452,7 @@ class TestAllocationListCreateDelete(tb.PlacementDbBaseTestCase):
def _check_create_allocations(self, inventory_kwargs,
bad_used, good_used):
rp_class = fields.ResourceClass.DISK_GB
rp_class = orc.DISK_GB
rp = self._make_rp_and_inventory(resource_class=rp_class,
**inventory_kwargs)
@ -1500,7 +1500,7 @@ class TestAllocationListCreateDelete(tb.PlacementDbBaseTestCase):
project=self.project_obj)
inst_consumer.create()
rp_class = fields.ResourceClass.DISK_GB
rp_class = orc.DISK_GB
target_rp = self._make_rp_and_inventory(resource_class=rp_class,
max_unit=500)
@ -1628,9 +1628,9 @@ class TestAllocationListCreateDelete(tb.PlacementDbBaseTestCase):
full_rp = self._create_provider('full_rp')
for rp in (empty_rp, full_rp):
tb.add_inventory(rp, fields.ResourceClass.VCPU, 24,
tb.add_inventory(rp, orc.VCPU, 24,
allocation_ratio=16.0)
tb.add_inventory(rp, fields.ResourceClass.MEMORY_MB, 1024,
tb.add_inventory(rp, orc.MEMORY_MB, 1024,
min_unit=64,
max_unit=1024,
step_size=64)
@ -1648,13 +1648,13 @@ class TestAllocationListCreateDelete(tb.PlacementDbBaseTestCase):
context=self.ctx,
consumer=inst_consumer,
resource_provider=full_rp,
resource_class=fields.ResourceClass.VCPU,
resource_class=orc.VCPU,
used=12),
rp_obj.Allocation(
context=self.ctx,
consumer=inst_consumer,
resource_provider=full_rp,
resource_class=fields.ResourceClass.MEMORY_MB,
resource_class=orc.MEMORY_MB,
used=1024)
])
alloc_list.replace_all()
@ -1673,25 +1673,25 @@ class TestAllocationListCreateDelete(tb.PlacementDbBaseTestCase):
context=self.ctx,
consumer=inst2_consumer,
resource_provider=empty_rp,
resource_class=fields.ResourceClass.VCPU,
resource_class=orc.VCPU,
used=12),
rp_obj.Allocation(
context=self.ctx,
consumer=inst2_consumer,
resource_provider=empty_rp,
resource_class=fields.ResourceClass.MEMORY_MB,
resource_class=orc.MEMORY_MB,
used=512),
rp_obj.Allocation(
context=self.ctx,
consumer=inst2_consumer,
resource_provider=full_rp,
resource_class=fields.ResourceClass.VCPU,
resource_class=orc.VCPU,
used=12),
rp_obj.Allocation(
context=self.ctx,
consumer=inst2_consumer,
resource_provider=full_rp,
resource_class=fields.ResourceClass.MEMORY_MB,
resource_class=orc.MEMORY_MB,
used=512),
])
@ -1714,9 +1714,9 @@ class TestAllocationListCreateDelete(tb.PlacementDbBaseTestCase):
# Create a single resource provider and give it some inventory.
rp1 = self._create_provider('rp1')
tb.add_inventory(rp1, fields.ResourceClass.VCPU, 24,
tb.add_inventory(rp1, orc.VCPU, 24,
allocation_ratio=16.0)
tb.add_inventory(rp1, fields.ResourceClass.MEMORY_MB, 1024,
tb.add_inventory(rp1, orc.MEMORY_MB, 1024,
min_unit=64,
max_unit=1024,
step_size=64)
@ -1737,13 +1737,13 @@ class TestAllocationListCreateDelete(tb.PlacementDbBaseTestCase):
context=self.ctx,
consumer=inst_consumer,
resource_provider=rp1,
resource_class=fields.ResourceClass.VCPU,
resource_class=orc.VCPU,
used=12),
rp_obj.Allocation(
context=self.ctx,
consumer=inst_consumer,
resource_provider=rp1,
resource_class=fields.ResourceClass.MEMORY_MB,
resource_class=orc.MEMORY_MB,
used=1024)
])
@ -1806,7 +1806,7 @@ class UsageListTestCase(tb.PlacementDbBaseTestCase):
def test_get_all_one_allocation(self):
db_rp, _ = self._make_allocation(DISK_INVENTORY, DISK_ALLOCATION)
inv = rp_obj.Inventory(resource_provider=db_rp,
resource_class=fields.ResourceClass.DISK_GB,
resource_class=orc.DISK_GB,
total=1024)
inv.obj_set_defaults()
inv_list = rp_obj.InventoryList(objects=[inv])
@ -1816,24 +1816,24 @@ class UsageListTestCase(tb.PlacementDbBaseTestCase):
self.ctx, db_rp.uuid)
self.assertEqual(1, len(usage_list))
self.assertEqual(2, usage_list[0].usage)
self.assertEqual(fields.ResourceClass.DISK_GB,
self.assertEqual(orc.DISK_GB,
usage_list[0].resource_class)
def test_get_inventory_no_allocation(self):
db_rp = self._create_provider('rp_no_inv')
tb.add_inventory(db_rp, fields.ResourceClass.DISK_GB, 1024)
tb.add_inventory(db_rp, orc.DISK_GB, 1024)
usage_list = rp_obj.UsageList.get_all_by_resource_provider_uuid(
self.ctx, db_rp.uuid)
self.assertEqual(1, len(usage_list))
self.assertEqual(0, usage_list[0].usage)
self.assertEqual(fields.ResourceClass.DISK_GB,
self.assertEqual(orc.DISK_GB,
usage_list[0].resource_class)
def test_get_all_multiple_inv(self):
db_rp = self._create_provider('rp_no_inv')
tb.add_inventory(db_rp, fields.ResourceClass.DISK_GB, 1024)
tb.add_inventory(db_rp, fields.ResourceClass.VCPU, 24)
tb.add_inventory(db_rp, orc.DISK_GB, 1024)
tb.add_inventory(db_rp, orc.VCPU, 24)
usage_list = rp_obj.UsageList.get_all_by_resource_provider_uuid(
self.ctx, db_rp.uuid)
@ -1848,7 +1848,7 @@ class ResourceClassListTestCase(tb.PlacementDbBaseTestCase):
classes.
"""
rcs = rp_obj.ResourceClassList.get_all(self.ctx)
self.assertEqual(len(fields.ResourceClass.STANDARD), len(rcs))
self.assertEqual(len(orc.STANDARDS), len(rcs))
def test_get_all_with_custom(self):
"""Test that if we add some custom resource classes, that we get a list
@ -1866,7 +1866,7 @@ class ResourceClassListTestCase(tb.PlacementDbBaseTestCase):
conn.execute(ins)
rcs = rp_obj.ResourceClassList.get_all(self.ctx)
expected_count = len(fields.ResourceClass.STANDARD) + len(customs)
expected_count = (len(orc.STANDARDS) + len(customs))
self.assertEqual(expected_count, len(rcs))
@ -1875,13 +1875,11 @@ class ResourceClassTestCase(tb.PlacementDbBaseTestCase):
def test_get_by_name(self):
rc = rp_obj.ResourceClass.get_by_name(
self.ctx,
fields.ResourceClass.VCPU
)
vcpu_id = fields.ResourceClass.STANDARD.index(
fields.ResourceClass.VCPU
orc.VCPU
)
vcpu_id = orc.STANDARDS.index(orc.VCPU)
self.assertEqual(vcpu_id, rc.id)
self.assertEqual(fields.ResourceClass.VCPU, rc.name)
self.assertEqual(orc.VCPU, rc.name)
def test_get_by_name_not_found(self):
self.assertRaises(exception.ResourceClassNotFound,
@ -1913,7 +1911,7 @@ class ResourceClassTestCase(tb.PlacementDbBaseTestCase):
def test_create_duplicate_standard(self):
rc = rp_obj.ResourceClass(
context=self.ctx,
name=fields.ResourceClass.VCPU,
name=orc.VCPU,
)
self.assertRaises(exception.ResourceClassExists, rc.create)
@ -2326,10 +2324,10 @@ class SharedProviderTestCase(tb.PlacementDbBaseTestCase):
"""
def _requested_resources(self):
STANDARDS = fields.ResourceClass.STANDARD
VCPU_ID = STANDARDS.index(fields.ResourceClass.VCPU)
MEMORY_MB_ID = STANDARDS.index(fields.ResourceClass.MEMORY_MB)
DISK_GB_ID = STANDARDS.index(fields.ResourceClass.DISK_GB)
STANDARDS = orc.STANDARDS
VCPU_ID = STANDARDS.index(orc.VCPU)
MEMORY_MB_ID = STANDARDS.index(orc.MEMORY_MB)
DISK_GB_ID = STANDARDS.index(orc.DISK_GB)
# The resources we will request
resources = {
VCPU_ID: 1,
@ -2352,15 +2350,15 @@ class SharedProviderTestCase(tb.PlacementDbBaseTestCase):
# DISK_GB. Both should be excluded from the result (one doesn't have
# the requested resource; but neither is a sharing provider).
for cn in (cn1, cn2):
tb.add_inventory(cn, fields.ResourceClass.VCPU, 24,
tb.add_inventory(cn, orc.VCPU, 24,
allocation_ratio=16.0)
tb.add_inventory(cn, fields.ResourceClass.MEMORY_MB, 32768,
tb.add_inventory(cn, orc.MEMORY_MB, 32768,
min_unit=64,
max_unit=32768,
step_size=64,
allocation_ratio=1.5)
if cn is cn1:
tb.add_inventory(cn, fields.ResourceClass.DISK_GB, 2000,
tb.add_inventory(cn, orc.DISK_GB, 2000,
min_unit=10,
max_unit=100,
step_size=10)
@ -2369,7 +2367,7 @@ class SharedProviderTestCase(tb.PlacementDbBaseTestCase):
ss = self._create_provider('shared storage')
# Give the shared storage pool some inventory of DISK_GB
tb.add_inventory(ss, fields.ResourceClass.DISK_GB, 2000,
tb.add_inventory(ss, orc.DISK_GB, 2000,
min_unit=10,
max_unit=100,
step_size=10)
@ -2382,7 +2380,7 @@ class SharedProviderTestCase(tb.PlacementDbBaseTestCase):
# the shared storage pool when we ask for DISK_GB
got_ids = rp_obj._get_providers_with_shared_capacity(
self.ctx,
fields.ResourceClass.STANDARD.index(fields.ResourceClass.DISK_GB),
orc.STANDARDS.index(orc.DISK_GB),
100,
)
self.assertEqual([ss.id], got_ids)

View File

@ -14,6 +14,7 @@ from __future__ import absolute_import
import os
from gabbi import fixture
import os_resource_classes as orc
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from oslo_log.fixture import logging_error
@ -30,7 +31,6 @@ from placement.objects import project as project_obj
from placement.objects import resource_provider as rp_obj
from placement.objects import user as user_obj
from placement import policies
from placement import rc_fields as fields
from placement.tests import fixtures
from placement.tests.functional.db import test_base as tb
from placement.tests.functional.fixtures import capture
@ -257,21 +257,21 @@ class SharedStorageFixture(APIFixture):
# Populate compute node inventory for VCPU and RAM
for cn in (cn1, cn2):
tb.add_inventory(cn, fields.ResourceClass.VCPU, 24,
tb.add_inventory(cn, orc.VCPU, 24,
allocation_ratio=16.0)
tb.add_inventory(cn, fields.ResourceClass.MEMORY_MB, 128 * 1024,
tb.add_inventory(cn, orc.MEMORY_MB, 128 * 1024,
allocation_ratio=1.5)
tb.set_traits(cn1, 'HW_CPU_X86_SSE', 'HW_CPU_X86_SSE2')
# Populate shared storage provider with DISK_GB inventory and
# mark it shared among any provider associated via aggregate
tb.add_inventory(ss, fields.ResourceClass.DISK_GB, 2000,
tb.add_inventory(ss, orc.DISK_GB, 2000,
reserved=100, allocation_ratio=1.0)
tb.set_traits(ss, 'MISC_SHARES_VIA_AGGREGATE')
# Populate PF inventory for VF
for pf in (pf1_1, pf1_2, pf2_1, pf2_2):
tb.add_inventory(pf, fields.ResourceClass.SRIOV_NET_VF,
tb.add_inventory(pf, orc.SRIOV_NET_VF,
8, allocation_ratio=1.0)
@ -343,13 +343,13 @@ class NUMAAggregateFixture(APIFixture):
# Populate compute node inventory for VCPU and RAM
for numa in (numa1_1, numa1_2, numa2_1, numa2_2):
tb.add_inventory(numa, fields.ResourceClass.VCPU, 24,
tb.add_inventory(numa, orc.VCPU, 24,
allocation_ratio=16.0)
# Populate shared storage provider with DISK_GB inventory and
# mark it shared among any provider associated via aggregate
for ss in (ss1, ss2):
tb.add_inventory(ss, fields.ResourceClass.DISK_GB, 2000,
tb.add_inventory(ss, orc.DISK_GB, 2000,
reserved=100, allocation_ratio=1.0)
tb.set_traits(ss, 'MISC_SHARES_VIA_AGGREGATE')

View File

@ -11,6 +11,7 @@
# under the License.
import mock
import os_resource_classes as orc
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from oslo_utils.fixture import uuidsentinel as uuids
@ -22,15 +23,14 @@ from placement import conf
from placement import context
from placement import exception
from placement.objects import resource_provider
from placement import rc_fields as fields
_RESOURCE_CLASS_NAME = 'DISK_GB'
_RESOURCE_CLASS_ID = 2
IPV4_ADDRESS_ID = fields.ResourceClass.STANDARD.index(
fields.ResourceClass.IPV4_ADDRESS)
VCPU_ID = fields.ResourceClass.STANDARD.index(
fields.ResourceClass.VCPU)
IPV4_ADDRESS_ID = orc.STANDARDS.index(
orc.IPV4_ADDRESS)
VCPU_ID = orc.STANDARDS.index(
orc.VCPU)
_RESOURCE_PROVIDER_ID = 1
_RESOURCE_PROVIDER_UUID = uuids.resource_provider
@ -155,10 +155,10 @@ class TestProviderSummaryNoDB(_TestCase):
def test_resource_class_names(self):
psum = resource_provider.ProviderSummary(mock.sentinel.ctx)
disk_psr = resource_provider.ProviderSummaryResource(
mock.sentinel.ctx, resource_class=fields.ResourceClass.DISK_GB,
mock.sentinel.ctx, resource_class=orc.DISK_GB,
capacity=100, used=0)
ram_psr = resource_provider.ProviderSummaryResource(
mock.sentinel.ctx, resource_class=fields.ResourceClass.MEMORY_MB,
mock.sentinel.ctx, resource_class=orc.MEMORY_MB,
capacity=1024, used=0)
psum.resources = [disk_psr, ram_psr]
expected = set(['DISK_GB', 'MEMORY_MB'])
@ -243,23 +243,23 @@ class TestInventoryList(_TestCase):
inv_list = resource_provider.InventoryList(objects=[
resource_provider.Inventory(
resource_provider=rp,
resource_class=fields.ResourceClass.VCPU,
resource_class=orc.VCPU,
total=24),
resource_provider.Inventory(
resource_provider=rp,
resource_class=fields.ResourceClass.MEMORY_MB,
resource_class=orc.MEMORY_MB,
total=10240),
])
found = inv_list.find(fields.ResourceClass.MEMORY_MB)
found = inv_list.find(orc.MEMORY_MB)
self.assertIsNotNone(found)
self.assertEqual(10240, found.total)
found = inv_list.find(fields.ResourceClass.VCPU)
found = inv_list.find(orc.VCPU)
self.assertIsNotNone(found)
self.assertEqual(24, found.total)
found = inv_list.find(fields.ResourceClass.DISK_GB)
found = inv_list.find(orc.DISK_GB)
self.assertIsNone(found)
# Try an integer resource class identifier...

View File

@ -23,5 +23,6 @@ oslo.i18n>=3.15.3 # Apache-2.0
oslo.middleware>=3.31.0 # Apache-2.0
oslo.upgradecheck>=0.1.1 # Apache-2.0
oslo.versionedobjects>=1.31.2 # Apache-2.0
os-resource-classes>=0.1.0 # Apache-2.0
os-traits>=0.4.0 # Apache-2.0
microversion-parse>=0.2.1 # Apache-2.0