Merge "Switch to using os-resource-classes"

This commit is contained in:
Zuul 2019-02-14 19:08:52 +00:00 committed by Gerrit Code Review
commit 38cefd9ece
28 changed files with 188 additions and 195 deletions

View File

@ -68,6 +68,7 @@ numpy==1.14.2
openstacksdk==0.12.0
os-brick==2.6.1
os-client-config==1.29.0
os-resource-classes==0.1.0
os-service-types==1.2.0
os-traits==0.4.0
os-vif==1.14.0

View File

@ -22,6 +22,7 @@ import collections
import copy
from keystoneauth1 import exceptions as ks_exc
import os_resource_classes as orc
from oslo_log import log as logging
from oslo_serialization import jsonutils
import retrying
@ -40,7 +41,6 @@ from nova.objects import base as obj_base
from nova.objects import migration as migration_obj
from nova.pci import manager as pci_manager
from nova.pci import request as pci_request
from nova import rc_fields as fields
from nova import rpc
from nova.scheduler.client import query
from nova.scheduler.client import report
@ -101,22 +101,22 @@ def _normalize_inventory_from_cn_obj(inv_data, cn):
returned from virt driver's get_inventory() method
:param compute_node: `objects.ComputeNode` describing the compute node
"""
if fields.ResourceClass.VCPU in inv_data:
cpu_inv = inv_data[fields.ResourceClass.VCPU]
if orc.VCPU in inv_data:
cpu_inv = inv_data[orc.VCPU]
if 'allocation_ratio' not in cpu_inv:
cpu_inv['allocation_ratio'] = cn.cpu_allocation_ratio
if 'reserved' not in cpu_inv:
cpu_inv['reserved'] = CONF.reserved_host_cpus
if fields.ResourceClass.MEMORY_MB in inv_data:
mem_inv = inv_data[fields.ResourceClass.MEMORY_MB]
if orc.MEMORY_MB in inv_data:
mem_inv = inv_data[orc.MEMORY_MB]
if 'allocation_ratio' not in mem_inv:
mem_inv['allocation_ratio'] = cn.ram_allocation_ratio
if 'reserved' not in mem_inv:
mem_inv['reserved'] = CONF.reserved_host_memory_mb
if fields.ResourceClass.DISK_GB in inv_data:
disk_inv = inv_data[fields.ResourceClass.DISK_GB]
if orc.DISK_GB in inv_data:
disk_inv = inv_data[orc.DISK_GB]
if 'allocation_ratio' not in disk_inv:
disk_inv['allocation_ratio'] = cn.disk_allocation_ratio
if 'reserved' not in disk_inv:

View File

@ -22,6 +22,7 @@ import math
import traceback
import netifaces
import os_resource_classes as orc
from oslo_log import log
from oslo_serialization import jsonutils
import six
@ -48,7 +49,6 @@ from nova.notifications.objects import server_group as sg_notification
from nova.notifications.objects import volume as volume_notification
from nova import objects
from nova.objects import fields
from nova import rc_fields
from nova import rpc
from nova import safe_utils
from nova import utils
@ -1364,7 +1364,7 @@ def compute_node_to_inventory_dict(compute_node):
# NOTE(jaypipes): Ironic virt driver will return 0 values for vcpus,
# memory_mb and disk_gb if the Ironic node is not available/operable
if compute_node.vcpus > 0:
result[rc_fields.ResourceClass.VCPU] = {
result[orc.VCPU] = {
'total': compute_node.vcpus,
'reserved': CONF.reserved_host_cpus,
'min_unit': 1,
@ -1373,7 +1373,7 @@ def compute_node_to_inventory_dict(compute_node):
'allocation_ratio': compute_node.cpu_allocation_ratio,
}
if compute_node.memory_mb > 0:
result[rc_fields.ResourceClass.MEMORY_MB] = {
result[orc.MEMORY_MB] = {
'total': compute_node.memory_mb,
'reserved': CONF.reserved_host_memory_mb,
'min_unit': 1,
@ -1386,7 +1386,7 @@ def compute_node_to_inventory_dict(compute_node):
# or start tracking DISK_MB.
reserved_disk_gb = convert_mb_to_ceil_gb(
CONF.reserved_host_disk_mb)
result[rc_fields.ResourceClass.DISK_GB] = {
result[orc.DISK_GB] = {
'total': compute_node.local_gb,
'reserved': reserved_disk_gb,
'min_unit': 1,

View File

@ -12,11 +12,10 @@
# under the License.
"""Standard Resource Class Fields."""
# NOTE(cdent): This is kept as its own independent file as it is used by
# both the placement and nova sides of the placement interaction. On the
# placement side we don't want to import all the nova fields, nor all the
# nova objects (which are automatically loaded and registered if the
# nova.objects package is imported).
# NOTE(cdent): This file is only used by the placement code within
# nova. Other uses of resource classes in nova make use of the
# os-resource-classes library. The placement code within nova
# continues to use this so that that code can remain unchanged.
import re

View File

@ -22,6 +22,7 @@ import re
import time
from keystoneauth1 import exceptions as ks_exc
import os_resource_classes as orc
import os_traits
from oslo_log import log as logging
from oslo_middleware import request_id
@ -34,7 +35,6 @@ import nova.conf
from nova import exception
from nova.i18n import _
from nova import objects
from nova import rc_fields as fields
from nova.scheduler import utils as scheduler_utils
from nova import utils
@ -1200,7 +1200,7 @@ class SchedulerReportClient(object):
# resource class.
version = '1.7'
to_ensure = set(n for n in names
if n.startswith(fields.ResourceClass.CUSTOM_NAMESPACE))
if n.startswith(orc.CUSTOM_NAMESPACE))
for name in to_ensure:
# no payload on the put request

View File

@ -19,6 +19,7 @@ import re
import sys
import traceback
import os_resource_classes as orc
from oslo_log import log as logging
from oslo_serialization import jsonutils
from six.moves.urllib import parse
@ -32,7 +33,6 @@ from nova.i18n import _
from nova import objects
from nova.objects import base as obj_base
from nova.objects import instance as obj_instance
from nova import rc_fields as fields
from nova import rpc
from nova.scheduler.filters import utils as filters_utils
@ -88,8 +88,8 @@ class ResourceRequest(object):
def _add_resource(self, groupid, rclass, amount):
# Validate the class.
if not (rclass.startswith(fields.ResourceClass.CUSTOM_NAMESPACE) or
rclass in fields.ResourceClass.STANDARD):
if not (rclass.startswith(orc.CUSTOM_NAMESPACE) or
rclass in orc.STANDARDS):
LOG.warning(
"Received an invalid ResourceClass '%(key)s' in extra_specs.",
{"key": rclass})
@ -376,9 +376,9 @@ def resources_from_flavor(instance, flavor):
swap_in_gb + flavor.ephemeral_gb)
resources = {
fields.ResourceClass.VCPU: flavor.vcpus,
fields.ResourceClass.MEMORY_MB: flavor.memory_mb,
fields.ResourceClass.DISK_GB: disk,
orc.VCPU: flavor.vcpus,
orc.MEMORY_MB: flavor.memory_mb,
orc.DISK_GB: disk,
}
if "extra_specs" in flavor:
# TODO(efried): This method is currently only used from places that
@ -414,8 +414,8 @@ def resources_from_request_spec(spec_obj):
traits, and aggregates it represents.
"""
spec_resources = {
fields.ResourceClass.VCPU: spec_obj.vcpus,
fields.ResourceClass.MEMORY_MB: spec_obj.memory_mb,
orc.VCPU: spec_obj.vcpus,
orc.MEMORY_MB: spec_obj.memory_mb,
}
requested_disk_mb = ((1024 * spec_obj.ephemeral_gb) +
@ -441,7 +441,7 @@ def resources_from_request_spec(spec_obj):
# NOTE(sbauza): Some flavors provide zero size for disk values, we need
# to avoid asking for disk usage.
if requested_disk_gb != 0:
spec_resources[fields.ResourceClass.DISK_GB] = requested_disk_gb
spec_resources[orc.DISK_GB] = requested_disk_gb
# Process extra_specs
if "extra_specs" in spec_obj.flavor:

View File

@ -28,6 +28,7 @@ import warnings
import fixtures
import mock
from neutronclient.common import exceptions as neutron_client_exc
import os_resource_classes as orc
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_db import exception as db_exc
@ -54,7 +55,6 @@ from nova import objects
from nova.objects import base as obj_base
from nova.objects import service as service_obj
from nova import quota as nova_quota
from nova import rc_fields
from nova import rpc
from nova import service
from nova.tests.functional.api import client
@ -1270,8 +1270,8 @@ class NeutronFixture(fixtures.Fixture):
'tenant_id': tenant_id,
'resource_request': {
"resources": {
rc_fields.ResourceClass.NET_BW_IGR_KILOBIT_PER_SEC: 1000,
rc_fields.ResourceClass.NET_BW_EGR_KILOBIT_PER_SEC: 1000},
orc.NET_BW_IGR_KILOBIT_PER_SEC: 1000,
orc.NET_BW_EGR_KILOBIT_PER_SEC: 1000},
"required": ["CUSTOM_PHYSNET_2", "CUSTOM_VNIC_TYPE_NORMAL"]
}
}

View File

@ -11,6 +11,7 @@
# under the License.
import mock
import os_resource_classes as orc
from oslo_utils.fixture import uuidsentinel as uuids
from nova.compute import power_state
@ -21,7 +22,6 @@ from nova.compute import vm_states
from nova import conf
from nova import context
from nova import objects
from nova import rc_fields as fields
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import integrated_helpers
from nova.tests.functional import test_report_client as test_base
@ -29,9 +29,9 @@ from nova.virt import driver as virt_driver
CONF = conf.CONF
VCPU = fields.ResourceClass.VCPU
MEMORY_MB = fields.ResourceClass.MEMORY_MB
DISK_GB = fields.ResourceClass.DISK_GB
VCPU = orc.VCPU
MEMORY_MB = orc.MEMORY_MB
DISK_GB = orc.DISK_GB
COMPUTE_HOST = 'compute-host'

View File

@ -14,6 +14,7 @@
import copy
from keystoneauth1 import exceptions as kse
import mock
import os_resource_classes as orc
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from oslo_utils.fixture import uuidsentinel as uuids
@ -32,7 +33,6 @@ from nova import context
# and is not testing the placement service itself.
from nova import exception
from nova import objects
from nova import rc_fields as fields
from nova.scheduler.client import report
from nova.scheduler import utils
from nova import test
@ -179,7 +179,7 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase):
# TODO(cdent): We should probably also have a test that
# tests that when allocation or inventory errors happen, we
# are resilient.
res_class = fields.ResourceClass.VCPU
res_class = orc.VCPU
with self._interceptor():
# When we start out there are no resource providers.
rp = self.client._get_resource_provider(self.context,
@ -356,7 +356,7 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase):
parent_provider_uuid=self.compute_uuid)
self.client.set_inventory_for_provider(
self.context, uuid, {
fields.ResourceClass.SRIOV_NET_VF: {
orc.SRIOV_NET_VF: {
'total': 24 * x,
'reserved': x,
'min_unit': 1,
@ -388,7 +388,7 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase):
name=name)
self.client.set_inventory_for_provider(
self.context, uuid, {
fields.ResourceClass.DISK_GB: {
orc.DISK_GB: {
'total': 100 * x,
'reserved': x,
'min_unit': 1,
@ -411,7 +411,7 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase):
name='sip')
self.client.set_inventory_for_provider(
self.context, uuids.sip, {
fields.ResourceClass.IPV4_ADDRESS: {
orc.IPV4_ADDRESS: {
'total': 128,
'reserved': 0,
'min_unit': 1,
@ -482,7 +482,7 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase):
# Make sure we can set reserved value equal to total
inv = {
fields.ResourceClass.SRIOV_NET_VF: {
orc.SRIOV_NET_VF: {
'total': 24,
'reserved': 24,
'min_unit': 1,
@ -503,7 +503,7 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase):
"""
with self._interceptor():
inv = {
fields.ResourceClass.SRIOV_NET_VF: {
orc.SRIOV_NET_VF: {
'total': 24,
'reserved': 1,
'min_unit': 1,
@ -538,7 +538,7 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase):
# Make sure we can change it
inv = {
fields.ResourceClass.SRIOV_NET_VF: {
orc.SRIOV_NET_VF: {
'total': 24,
'reserved': 1,
'min_unit': 1,
@ -546,7 +546,7 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase):
'step_size': 1,
'allocation_ratio': 1.0,
},
fields.ResourceClass.IPV4_ADDRESS: {
orc.IPV4_ADDRESS: {
'total': 128,
'reserved': 0,
'min_unit': 1,
@ -566,7 +566,7 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase):
self.assertFalse(
self.client.get('/resource_classes/CUSTOM_BANDWIDTH'))
inv = {
fields.ResourceClass.SRIOV_NET_VF: {
orc.SRIOV_NET_VF: {
'total': 24,
'reserved': 1,
'min_unit': 1,
@ -574,7 +574,7 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase):
'step_size': 1,
'allocation_ratio': 1.0,
},
fields.ResourceClass.IPV4_ADDRESS: {
orc.IPV4_ADDRESS: {
'total': 128,
'reserved': 0,
'min_unit': 1,
@ -624,7 +624,7 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase):
# Create a generation conflict by doing an "out of band" update
oob_inv = {
fields.ResourceClass.IPV4_ADDRESS: {
orc.IPV4_ADDRESS: {
'total': 128,
'reserved': 0,
'min_unit': 1,
@ -646,7 +646,7 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase):
# Now try to update again.
inv = {
fields.ResourceClass.SRIOV_NET_VF: {
orc.SRIOV_NET_VF: {
'total': 24,
'reserved': 1,
'min_unit': 1,
@ -687,7 +687,7 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase):
self.assertTrue(
self.client.put_allocations(
self.context, uuids.cn, uuids.consumer,
{fields.ResourceClass.SRIOV_NET_VF: 1},
{orc.SRIOV_NET_VF: 1},
uuids.proj, uuids.user, None))
# ...and trying to delete the provider's VF inventory
bad_inv = {
@ -779,7 +779,7 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase):
new_tree.update_traits(uuids.gc1_1, ['CUSTOM_PHYSNET_2'])
new_tree.new_root('ssp', uuids.ssp)
new_tree.update_inventory('ssp', {
fields.ResourceClass.DISK_GB: {
orc.DISK_GB: {
'total': 100,
'reserved': 1,
'min_unit': 1,
@ -794,7 +794,7 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase):
# Swizzle properties
# Give the root some everything
new_tree.update_inventory(uuids.root, {
fields.ResourceClass.VCPU: {
orc.VCPU: {
'total': 10,
'reserved': 0,
'min_unit': 1,
@ -802,7 +802,7 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase):
'step_size': 1,
'allocation_ratio': 10.0,
},
fields.ResourceClass.MEMORY_MB: {
orc.MEMORY_MB: {
'total': 1048576,
'reserved': 2048,
'min_unit': 1024,
@ -818,7 +818,7 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase):
new_tree.update_aggregates(uuids.child1, [])
# Grandchild gets some inventory
ipv4_inv = {
fields.ResourceClass.IPV4_ADDRESS: {
orc.IPV4_ADDRESS: {
'total': 128,
'reserved': 0,
'min_unit': 1,

View File

@ -10,12 +10,12 @@
# License for the specific language governing permissions and limitations
# under the License.
import os_resource_classes as orc
import six
from nova import context as nova_context
from nova import exception
from nova import objects
from nova import rc_fields
from nova.tests.functional.api import client as api_client
from nova.tests.functional import integrated_helpers
@ -60,9 +60,7 @@ class TestServicesAPI(integrated_helpers.ProviderUsageBaseTestCase):
# Make sure the resource provider has inventory.
inventories = self._get_provider_inventory(rp_uuid)
# Expect a minimal set of inventory for the fake virt driver.
for resource_class in [rc_fields.ResourceClass.VCPU,
rc_fields.ResourceClass.MEMORY_MB,
rc_fields.ResourceClass.DISK_GB]:
for resource_class in [orc.VCPU, orc.MEMORY_MB, orc.DISK_GB]:
self.assertIn(resource_class, inventories)
# Now create a server so that the resource provider has some allocation

View File

@ -15,6 +15,7 @@ import datetime
from keystoneauth1 import exceptions as ks_exc
import mock
import os_resource_classes as orc
from oslo_config import cfg
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@ -34,7 +35,6 @@ from nova.objects import base as obj_base
from nova.objects import fields as obj_fields
from nova.objects import pci_device
from nova.pci import manager as pci_manager
from nova import rc_fields
from nova.scheduler.client import report
from nova import test
from nova.tests.unit import fake_notifier
@ -1403,7 +1403,7 @@ class TestUpdateComputeNode(BaseTestCase):
client instead.
"""
fake_inv = {
rc_fields.ResourceClass.VCPU: {
orc.VCPU: {
'total': 2,
'min_unit': 1,
'max_unit': 2,
@ -1411,7 +1411,7 @@ class TestUpdateComputeNode(BaseTestCase):
'allocation_ratio': 16.0,
'reserved': 1,
},
rc_fields.ResourceClass.MEMORY_MB: {
orc.MEMORY_MB: {
'total': 4096,
'min_unit': 1,
'max_unit': 4096,
@ -1419,7 +1419,7 @@ class TestUpdateComputeNode(BaseTestCase):
'allocation_ratio': 1.5,
'reserved': 512,
},
rc_fields.ResourceClass.DISK_GB: {
orc.DISK_GB: {
'total': 500,
'min_unit': 1,
'max_unit': 500,
@ -1466,13 +1466,13 @@ class TestUpdateComputeNode(BaseTestCase):
self.driver_mock.get_inventory.assert_not_called()
exp_inv = copy.deepcopy(fake_inv)
# These ratios and reserved amounts come from fake_upt
exp_inv[rc_fields.ResourceClass.VCPU]['allocation_ratio'] = 16.0
exp_inv[rc_fields.ResourceClass.MEMORY_MB]['allocation_ratio'] = 1.5
exp_inv[rc_fields.ResourceClass.DISK_GB]['allocation_ratio'] = 1.0
exp_inv[rc_fields.ResourceClass.VCPU]['reserved'] = 1
exp_inv[rc_fields.ResourceClass.MEMORY_MB]['reserved'] = 512
exp_inv[orc.VCPU]['allocation_ratio'] = 16.0
exp_inv[orc.MEMORY_MB]['allocation_ratio'] = 1.5
exp_inv[orc.DISK_GB]['allocation_ratio'] = 1.0
exp_inv[orc.VCPU]['reserved'] = 1
exp_inv[orc.MEMORY_MB]['reserved'] = 512
# 1024MB in GB
exp_inv[rc_fields.ResourceClass.DISK_GB]['reserved'] = 1
exp_inv[orc.DISK_GB]['reserved'] = 1
self.assertEqual(exp_inv, ptree.data(new_compute.uuid).inventory)
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
@ -1605,19 +1605,19 @@ class TestNormalizatInventoryFromComputeNode(test.NoDBTestCase):
# allocation_ratio or reserved amounts for some resources. Verify that
# the information on the compute node fills in this information...
inv = {
rc_fields.ResourceClass.VCPU: {
orc.VCPU: {
'total': vcpus,
'min_unit': 1,
'max_unit': vcpus,
'step_size': 1,
},
rc_fields.ResourceClass.MEMORY_MB: {
orc.MEMORY_MB: {
'total': memory_mb,
'min_unit': 1,
'max_unit': memory_mb,
'step_size': 1,
},
rc_fields.ResourceClass.DISK_GB: {
orc.DISK_GB: {
'total': disk_gb,
'min_unit': 1,
'max_unit': disk_gb,
@ -1625,7 +1625,7 @@ class TestNormalizatInventoryFromComputeNode(test.NoDBTestCase):
},
}
expected = {
rc_fields.ResourceClass.VCPU: {
orc.VCPU: {
'total': vcpus,
'reserved': 1,
'min_unit': 1,
@ -1633,7 +1633,7 @@ class TestNormalizatInventoryFromComputeNode(test.NoDBTestCase):
'step_size': 1,
'allocation_ratio': 16.0,
},
rc_fields.ResourceClass.MEMORY_MB: {
orc.MEMORY_MB: {
'total': memory_mb,
'reserved': 10,
'min_unit': 1,
@ -1641,7 +1641,7 @@ class TestNormalizatInventoryFromComputeNode(test.NoDBTestCase):
'step_size': 1,
'allocation_ratio': 1.5,
},
rc_fields.ResourceClass.DISK_GB: {
orc.DISK_GB: {
'total': disk_gb,
'reserved': 1, # Rounded up from CONF.reserved_host_disk_mb
'min_unit': 1,
@ -1678,7 +1678,7 @@ class TestNormalizatInventoryFromComputeNode(test.NoDBTestCase):
# blocks for VCPU, MEMORY_MB, DISK_GB and the custom resource class
# inventory items
inv = {
rc_fields.ResourceClass.VCPU: {
orc.VCPU: {
'total': vcpus,
'reserved': 0,
'min_unit': 1,
@ -1686,7 +1686,7 @@ class TestNormalizatInventoryFromComputeNode(test.NoDBTestCase):
'step_size': 1,
'allocation_ratio': 1.0,
},
rc_fields.ResourceClass.MEMORY_MB: {
orc.MEMORY_MB: {
'total': memory_mb,
'reserved': 0,
'min_unit': 1,
@ -1694,7 +1694,7 @@ class TestNormalizatInventoryFromComputeNode(test.NoDBTestCase):
'step_size': 1,
'allocation_ratio': 1.0,
},
rc_fields.ResourceClass.DISK_GB: {
orc.DISK_GB: {
'total': disk_gb,
'reserved': 0,
'min_unit': 1,

View File

@ -24,7 +24,6 @@ import six
from nova import exception
from nova.network import model as network_model
from nova.objects import fields
from nova import rc_fields
from nova import test
from nova.tests.unit import fake_instance
from nova import utils
@ -333,38 +332,6 @@ class TestVMMode(TestField):
'invalid')
class TestResourceClass(TestString):
def setUp(self):
super(TestResourceClass, self).setUp()
self.field = rc_fields.ResourceClassField()
# NOTE(gibi): We assume that the input value of a STANDARD RC is always
# the same as the coerced value
self.coerce_good_values = [(v, v) for v in
rc_fields.ResourceClass.STANDARD]
self.coerce_bad_values = [object(), dict()]
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_normalize_name(self):
values = [
("foo", "CUSTOM_FOO"),
("VCPU", "CUSTOM_VCPU"),
("CUSTOM_BOB", "CUSTOM_CUSTOM_BOB"),
("CUSTM_BOB", "CUSTOM_CUSTM_BOB"),
]
for test_value, expected in values:
result = rc_fields.ResourceClass.normalize_name(test_value)
self.assertEqual(expected, result)
def test_normalize_name_bug_1762789(self):
"""The .upper() builtin treats sharp S (\xdf) differently in py2 vs.
py3. Make sure normalize_name handles it properly.
"""
name = u'Fu\xdfball'
self.assertEqual(u'CUSTOM_FU_BALL',
rc_fields.ResourceClass.normalize_name(name))
class TestInteger(TestField):
def setUp(self):
super(TestInteger, self).setUp()

View File

@ -15,6 +15,7 @@ import time
import fixtures
from keystoneauth1 import exceptions as ks_exc
import mock
import os_resource_classes as orc
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from six.moves.urllib import parse
@ -23,7 +24,6 @@ import nova.conf
from nova import context
from nova import exception
from nova import objects
from nova import rc_fields as fields
from nova.scheduler.client import report
from nova.scheduler import utils as scheduler_utils
from nova import test
@ -3042,7 +3042,7 @@ class TestAllocations(SchedulerReportClientTestCase):
mock_vbi.return_value = False
specs = {
'resources:CUSTOM_DAN': '123',
'resources:%s' % fields.ResourceClass.VCPU: '4',
'resources:%s' % orc.VCPU: '4',
'resources:NOTATHING': '456',
'resources:NOTEVENANUMBER': 'catfood',
'resources:': '7',

View File

@ -1419,3 +1419,24 @@ class RunOnceTests(test.NoDBTestCase):
self.assertRaises(ValueError, f.reset)
self.assertFalse(f.called)
mock_clean.assert_called_once_with()
class TestResourceClassNormalize(test.NoDBTestCase):
def test_normalize_name(self):
values = [
("foo", "CUSTOM_FOO"),
("VCPU", "CUSTOM_VCPU"),
("CUSTOM_BOB", "CUSTOM_CUSTOM_BOB"),
("CUSTM_BOB", "CUSTOM_CUSTM_BOB"),
]
for test_value, expected in values:
result = utils.normalize_rc_name(test_value)
self.assertEqual(expected, result)
def test_normalize_name_bug_1762789(self):
"""The .upper() builtin treats sharp S (\xdf) differently in py2 vs.
py3. Make sure normalize_name handles it properly.
"""
name = u'Fu\xdfball'
self.assertEqual(u'CUSTOM_FU_BALL', utils.normalize_rc_name(name))

View File

@ -36,7 +36,6 @@ from nova import context as nova_context
from nova import exception
from nova.network import model as network_model
from nova import objects
from nova import rc_fields as fields
from nova import servicegroup
from nova import test
from nova.tests import fixtures
@ -45,6 +44,7 @@ from nova.tests.unit import fake_instance
from nova.tests.unit import matchers as nova_matchers
from nova.tests.unit import utils
from nova.tests.unit.virt.ironic import utils as ironic_utils
from nova import utils as nova_utils
from nova.virt import block_device as driver_block_device
from nova.virt import configdrive
from nova.virt import driver
@ -2890,7 +2890,7 @@ class IronicDriverSyncTestCase(IronicDriverTestCase):
self.assertTrue(mock_get_by_uuid.called)
self.assertTrue(mock_save.called)
@mock.patch.object(fields.ResourceClass, 'normalize_name')
@mock.patch.object(nova_utils, 'normalize_rc_name')
@mock.patch.object(ironic_driver.IronicDriver, '_node_from_cache')
def test_pike_flavor_migration_empty_node(self, mock_node_from_cache,
mock_normalize):
@ -2898,7 +2898,7 @@ class IronicDriverSyncTestCase(IronicDriverTestCase):
self.driver._pike_flavor_migration([uuids.node])
mock_normalize.assert_not_called()
@mock.patch.object(fields.ResourceClass, 'normalize_name')
@mock.patch.object(nova_utils, 'normalize_rc_name')
@mock.patch.object(ironic_driver.IronicDriver, '_node_from_cache')
def test_pike_flavor_migration_already_migrated(self, mock_node_from_cache,
mock_normalize):

View File

@ -40,6 +40,7 @@ import mock
from os_brick import encryptors
from os_brick import exception as brick_exception
from os_brick.initiator import connector
import os_resource_classes as orc
import os_vif
from oslo_concurrency import lockutils
from oslo_concurrency import processutils
@ -77,7 +78,6 @@ from nova.pci import manager as pci_manager
from nova.pci import utils as pci_utils
import nova.privsep.fs
import nova.privsep.libvirt
from nova import rc_fields
from nova import test
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_diagnostics
@ -18131,7 +18131,7 @@ class TestUpdateProviderTree(test.NoDBTestCase):
def _get_inventory(self):
return {
rc_fields.ResourceClass.VCPU: {
orc.VCPU: {
'total': self.vcpus,
'min_unit': 1,
'max_unit': self.vcpus,
@ -18139,7 +18139,7 @@ class TestUpdateProviderTree(test.NoDBTestCase):
'allocation_ratio': 16.0,
'reserved': 0,
},
rc_fields.ResourceClass.MEMORY_MB: {
orc.MEMORY_MB: {
'total': self.memory_mb,
'min_unit': 1,
'max_unit': self.memory_mb,
@ -18147,7 +18147,7 @@ class TestUpdateProviderTree(test.NoDBTestCase):
'allocation_ratio': 1.5,
'reserved': 512,
},
rc_fields.ResourceClass.DISK_GB: {
orc.DISK_GB: {
'total': self.disk_gb,
'min_unit': 1,
'max_unit': self.disk_gb,
@ -18183,10 +18183,10 @@ class TestUpdateProviderTree(test.NoDBTestCase):
self._test_update_provider_tree(total_vgpus=8)
inventory = self._get_inventory()
# Add VGPU in the expected inventory
inventory[rc_fields.ResourceClass.VGPU] = {'step_size': 1,
'min_unit': 1,
'max_unit': 8,
'total': 8}
inventory[orc.VGPU] = {'step_size': 1,
'min_unit': 1,
'max_unit': 8,
'total': 8}
self.assertEqual(inventory,
(self.pt.data(self.cn_rp['uuid'])).inventory)
@ -18207,7 +18207,7 @@ class TestUpdateProviderTree(test.NoDBTestCase):
"""
shared_rp_inv = {
rc_fields.ResourceClass.DISK_GB: {
orc.DISK_GB: {
'total': self.disk_gb,
'min_unit': 1,
'max_unit': self.disk_gb,
@ -18227,7 +18227,7 @@ class TestUpdateProviderTree(test.NoDBTestCase):
inventory = self._get_inventory()
# Remove DISK_GB resource from inventory as you don't expect it to be
# reported by the compute node resource provider.
del inventory[rc_fields.ResourceClass.DISK_GB]
del inventory[orc.DISK_GB]
self.assertEqual(inventory,
(self.pt.data(self.cn_rp['uuid'])).inventory)
@ -20616,7 +20616,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
'rp1': {
'resources': {
# Just any resource class but VGPU
rc_fields.ResourceClass.VCPU: 1,
orc.VCPU: 1,
}
}
}
@ -20629,7 +20629,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
allocations = {
'rp1': {
'resources': {
rc_fields.ResourceClass.VGPU: 1,
orc.VGPU: 1,
}
}
}
@ -20651,7 +20651,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
allocations = {
'rp1': {
'resources': {
rc_fields.ResourceClass.VGPU: 1,
orc.VGPU: 1,
}
}
}
@ -20685,7 +20685,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
allocations = {
'rp1': {
'resources': {
rc_fields.ResourceClass.VGPU: 1,
orc.VGPU: 1,
}
}
}

View File

@ -20,6 +20,7 @@ import traceback
import fixtures
import mock
import netaddr
import os_resource_classes as orc
import os_vif
from oslo_log import log as logging
from oslo_serialization import jsonutils
@ -33,7 +34,6 @@ from nova.console import type as ctype
from nova import context
from nova import exception
from nova import objects
from nova import rc_fields
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit import fake_block_device
@ -818,9 +818,9 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
self.flags(ram_allocation_ratio=1.6)
self.flags(disk_allocation_ratio=1.1)
expeced_ratios = {
rc_fields.ResourceClass.VCPU: CONF.cpu_allocation_ratio,
rc_fields.ResourceClass.MEMORY_MB: CONF.ram_allocation_ratio,
rc_fields.ResourceClass.DISK_GB: CONF.disk_allocation_ratio
orc.VCPU: CONF.cpu_allocation_ratio,
orc.MEMORY_MB: CONF.ram_allocation_ratio,
orc.DISK_GB: CONF.disk_allocation_ratio
}
# If conf is set, return conf
self.assertEqual(expeced_ratios,
@ -833,27 +833,21 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
self.flags(initial_ram_allocation_ratio=1.4)
self.flags(initial_disk_allocation_ratio=0.9)
expeced_ratios = {
rc_fields.ResourceClass.VCPU:
CONF.initial_cpu_allocation_ratio,
rc_fields.ResourceClass.MEMORY_MB:
CONF.initial_ram_allocation_ratio,
rc_fields.ResourceClass.DISK_GB:
CONF.initial_disk_allocation_ratio
orc.VCPU: CONF.initial_cpu_allocation_ratio,
orc.MEMORY_MB: CONF.initial_ram_allocation_ratio,
orc.DISK_GB: CONF.initial_disk_allocation_ratio
}
# if conf is unset and inv doesn't exists, return init conf
self.assertEqual(expeced_ratios,
self.connection._get_allocation_ratios(inv))
inv = {rc_fields.ResourceClass.VCPU: {'allocation_ratio': 3.0},
rc_fields.ResourceClass.MEMORY_MB: {'allocation_ratio': 3.1},
rc_fields.ResourceClass.DISK_GB: {'allocation_ratio': 3.2}}
inv = {orc.VCPU: {'allocation_ratio': 3.0},
orc.MEMORY_MB: {'allocation_ratio': 3.1},
orc.DISK_GB: {'allocation_ratio': 3.2}}
expeced_ratios = {
rc_fields.ResourceClass.VCPU:
inv[rc_fields.ResourceClass.VCPU]['allocation_ratio'],
rc_fields.ResourceClass.MEMORY_MB:
inv[rc_fields.ResourceClass.MEMORY_MB]['allocation_ratio'],
rc_fields.ResourceClass.DISK_GB:
inv[rc_fields.ResourceClass.DISK_GB]['allocation_ratio']
orc.VCPU: inv[orc.VCPU]['allocation_ratio'],
orc.MEMORY_MB: inv[orc.MEMORY_MB]['allocation_ratio'],
orc.DISK_GB: inv[orc.DISK_GB]['allocation_ratio']
}
# if conf is unset and inv exists, return inv
self.assertEqual(expeced_ratios,

View File

@ -24,6 +24,7 @@ import datetime
from eventlet import greenthread
import mock
import os_resource_classes as orc
from oslo_utils import fixture as utils_fixture
from oslo_utils.fixture import uuidsentinel
from oslo_utils import units
@ -43,7 +44,6 @@ from nova import exception
from nova.image import glance
from nova.network import model as network_model
from nova import objects
from nova import rc_fields as fields
from nova import test
from nova.tests.unit import fake_diagnostics
from nova.tests.unit import fake_instance
@ -2139,21 +2139,21 @@ class VMwareAPIVMTestCase(test.NoDBTestCase,
mock_get_avail_ds.return_value = [ds1, ds2, ds3]
inv = self.conn.get_inventory(self.node_name)
expected = {
fields.ResourceClass.VCPU: {
orc.VCPU: {
'total': 32,
'reserved': 0,
'min_unit': 1,
'max_unit': 16,
'step_size': 1,
},
fields.ResourceClass.MEMORY_MB: {
orc.MEMORY_MB: {
'total': 2048,
'reserved': 512,
'min_unit': 1,
'max_unit': 1024,
'step_size': 1,
},
fields.ResourceClass.DISK_GB: {
orc.DISK_GB: {
'total': 95,
'reserved': 0,
'min_unit': 1,

View File

@ -16,12 +16,12 @@
import math
import mock
import os_resource_classes as orc
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import units
from nova import exception
from nova.objects import fields as obj_fields
from nova import rc_fields
from nova.tests.unit.virt.xenapi import stubs
from nova.virt import driver
from nova.virt import fake
@ -252,25 +252,25 @@ class XenAPIDriverTestCase(stubs.XenAPITestBaseNoDB):
@mock.patch.object(host.HostState, 'get_host_stats')
def test_get_inventory(self, mock_get_stats):
expected_inv = {
rc_fields.ResourceClass.VCPU: {
orc.VCPU: {
'total': 50,
'min_unit': 1,
'max_unit': 50,
'step_size': 1,
},
rc_fields.ResourceClass.MEMORY_MB: {
orc.MEMORY_MB: {
'total': 3,
'min_unit': 1,
'max_unit': 3,
'step_size': 1,
},
rc_fields.ResourceClass.DISK_GB: {
orc.DISK_GB: {
'total': 5,
'min_unit': 1,
'max_unit': 5,
'step_size': 1,
},
rc_fields.ResourceClass.VGPU: {
orc.VGPU: {
'total': 7,
'min_unit': 1,
'max_unit': 1,
@ -296,7 +296,7 @@ class XenAPIDriverTestCase(stubs.XenAPITestBaseNoDB):
inv = drv.get_inventory(mock.sentinel.nodename)
# check if the inventory data does NOT contain VGPU.
self.assertNotIn(rc_fields.ResourceClass.VGPU, inv)
self.assertNotIn(orc.VGPU, inv)
def test_get_vgpu_total_single_grp(self):
# Test when only one group included in the host_stats.

View File

@ -34,6 +34,7 @@ import eventlet
from keystoneauth1 import exceptions as ks_exc
from keystoneauth1 import loading as ks_loading
import netaddr
import os_resource_classes as orc
from os_service_types import service_types
from oslo_concurrency import lockutils
from oslo_concurrency import processutils
@ -1363,3 +1364,15 @@ def run_once(message, logger, cleanup=None):
wrapper.reset = functools.partial(reset, wrapper)
return wrapper
return outer_wrapper
def normalize_rc_name(rc_name):
"""Normalize a resource class name to standard form."""
if rc_name is None:
return None
# Replace non-alphanumeric characters with underscores
norm_name = re.sub('[^0-9A-Za-z]+', '_', rc_name)
# Bug #1762789: Do .upper after replacing non alphanumerics.
norm_name = norm_name.upper()
norm_name = orc.CUSTOM_NAMESPACE + norm_name
return norm_name

View File

@ -22,13 +22,13 @@ Driver base-classes:
import sys
import os_resource_classes as orc
from oslo_log import log as logging
from oslo_utils import importutils
import six
import nova.conf
from nova.i18n import _
from nova import rc_fields
from nova.virt import event as virtevent
CONF = nova.conf.CONF
@ -887,9 +887,9 @@ class ComputeDriver(object):
information.
:returns: Return a dict, keyed by resource class, of allocation ratio
"""
keys = {'cpu': rc_fields.ResourceClass.VCPU,
'ram': rc_fields.ResourceClass.MEMORY_MB,
'disk': rc_fields.ResourceClass.DISK_GB}
keys = {'cpu': orc.VCPU,
'ram': orc.MEMORY_MB,
'disk': orc.DISK_GB}
result = {}
for res, rc in keys.items():
attr = '%s_allocation_ratio' % res

View File

@ -28,6 +28,7 @@ import contextlib
import copy
import time
import os_resource_classes as orc
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import versionutils
@ -41,7 +42,6 @@ from nova import exception
from nova.objects import diagnostics as diagnostics_obj
from nova.objects import fields as obj_fields
from nova.objects import migrate_data
from nova import rc_fields
from nova.virt import driver
from nova.virt import hardware
from nova.virt import virtapi
@ -519,7 +519,7 @@ class FakeDriver(driver.ComputeDriver):
'min_unit': 1,
'max_unit': self.vcpus,
'step_size': 1,
'allocation_ratio': ratios[rc_fields.ResourceClass.VCPU],
'allocation_ratio': ratios[orc.VCPU],
'reserved': CONF.reserved_host_cpus,
},
'MEMORY_MB': {
@ -527,7 +527,7 @@ class FakeDriver(driver.ComputeDriver):
'min_unit': 1,
'max_unit': self.memory_mb,
'step_size': 1,
'allocation_ratio': ratios[rc_fields.ResourceClass.MEMORY_MB],
'allocation_ratio': ratios[orc.MEMORY_MB],
'reserved': CONF.reserved_host_memory_mb,
},
'DISK_GB': {
@ -535,7 +535,7 @@ class FakeDriver(driver.ComputeDriver):
'min_unit': 1,
'max_unit': self.local_gb,
'step_size': 1,
'allocation_ratio': ratios[rc_fields.ResourceClass.DISK_GB],
'allocation_ratio': ratios[orc.DISK_GB],
'reserved': self._get_reserved_host_disk_gb_from_config(),
},
}

View File

@ -46,7 +46,6 @@ from nova import exception
from nova.i18n import _
from nova import objects
from nova.objects import fields as obj_fields
from nova import rc_fields
from nova import servicegroup
from nova import utils
from nova.virt import configdrive
@ -512,7 +511,7 @@ class IronicDriver(virt_driver.ComputeDriver):
@staticmethod
def _pike_flavor_migration_for_node(ctx, node_rc, instance_uuid):
normalized_rc = rc_fields.ResourceClass.normalize_name(node_rc)
normalized_rc = utils.normalize_rc_name(node_rc)
instance = objects.Instance.get_by_uuid(ctx, instance_uuid,
expected_attrs=["flavor"])
specs = instance.flavor.extra_specs
@ -817,7 +816,7 @@ class IronicDriver(virt_driver.ComputeDriver):
if rc_name is None:
raise exception.NoResourceClass(node=nodename)
norm_name = rc_fields.ResourceClass.normalize_name(rc_name)
norm_name = utils.normalize_rc_name(rc_name)
if norm_name is not None:
result[norm_name] = {
'total': 1,

View File

@ -52,6 +52,7 @@ from os_brick import encryptors
from os_brick.encryptors import luks as luks_encryptor
from os_brick import exception as brick_exception
from os_brick.initiator import connector
import os_resource_classes as orc
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_serialization import base64
@ -93,7 +94,6 @@ from nova.pci import utils as pci_utils
import nova.privsep.libvirt
import nova.privsep.path
import nova.privsep.utils
from nova import rc_fields
from nova import utils
from nova import version
from nova.virt import block_device as driver_block_device
@ -6183,7 +6183,7 @@ class LibvirtDriver(driver.ComputeDriver):
if not allocations:
# If no allocations, there is no vGPU request.
return {}
RC_VGPU = rc_fields.ResourceClass.VGPU
RC_VGPU = orc.VGPU
vgpu_allocations = {}
for rp in allocations:
res = allocations[rp]['resources']
@ -6262,7 +6262,7 @@ class LibvirtDriver(driver.ComputeDriver):
'while at the moment libvirt only supports one. Only '
'the first allocation will be looked up.')
alloc = six.next(six.itervalues(vgpu_allocations))
vgpus_asked = alloc['resources'][rc_fields.ResourceClass.VGPU]
vgpus_asked = alloc['resources'][orc.VGPU]
requested_types = self._get_supported_vgpu_types()
# Which mediated devices are created but not assigned to a guest ?
@ -6557,20 +6557,20 @@ class LibvirtDriver(driver.ComputeDriver):
inv = provider_tree.data(nodename).inventory
ratios = self._get_allocation_ratios(inv)
result = {
rc_fields.ResourceClass.VCPU: {
orc.VCPU: {
'total': vcpus,
'min_unit': 1,
'max_unit': vcpus,
'step_size': 1,
'allocation_ratio': ratios[rc_fields.ResourceClass.VCPU],
'allocation_ratio': ratios[orc.VCPU],
'reserved': CONF.reserved_host_cpus,
},
rc_fields.ResourceClass.MEMORY_MB: {
orc.MEMORY_MB: {
'total': memory_mb,
'min_unit': 1,
'max_unit': memory_mb,
'step_size': 1,
'allocation_ratio': ratios[rc_fields.ResourceClass.MEMORY_MB],
'allocation_ratio': ratios[orc.MEMORY_MB],
'reserved': CONF.reserved_host_memory_mb,
},
}
@ -6580,20 +6580,20 @@ class LibvirtDriver(driver.ComputeDriver):
# the compute node provider.
# TODO(efried): Reinstate non-reporting of shared resource by the
# compute RP once the issues from bug #1784020 have been resolved.
if provider_tree.has_sharing_provider(rc_fields.ResourceClass.DISK_GB):
if provider_tree.has_sharing_provider(orc.DISK_GB):
LOG.debug('Ignoring sharing provider - see bug #1784020')
result[rc_fields.ResourceClass.DISK_GB] = {
result[orc.DISK_GB] = {
'total': disk_gb,
'min_unit': 1,
'max_unit': disk_gb,
'step_size': 1,
'allocation_ratio': ratios[rc_fields.ResourceClass.DISK_GB],
'allocation_ratio': ratios[orc.DISK_GB],
'reserved': self._get_reserved_host_disk_gb_from_config(),
}
if vgpus > 0:
# Only provide VGPU resource classes if the driver supports it.
result[rc_fields.ResourceClass.VGPU] = {
result[orc.VGPU] = {
'total': vgpus,
'min_unit': 1,
'max_unit': vgpus,

View File

@ -13,6 +13,7 @@
# under the License.
"""Connection to PowerVM hypervisor through NovaLink."""
import os_resource_classes as orc
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import importutils
@ -34,7 +35,6 @@ from nova.console import type as console_type
from nova import exception as exc
from nova.i18n import _
from nova import image
from nova import rc_fields
from nova.virt import configdrive
from nova.virt import driver
from nova.virt.powervm import host as pvm_host
@ -203,23 +203,23 @@ class PowerVMDriver(driver.ComputeDriver):
disk_reserved = self._get_reserved_host_disk_gb_from_config()
inventory = {
rc_fields.ResourceClass.VCPU: {
orc.VCPU: {
'total': data['vcpus'],
'max_unit': data['vcpus'],
'allocation_ratio': ratios[rc_fields.ResourceClass.VCPU],
'allocation_ratio': ratios[orc.VCPU],
'reserved': cpu_reserved,
},
rc_fields.ResourceClass.MEMORY_MB: {
orc.MEMORY_MB: {
'total': data['memory_mb'],
'max_unit': data['memory_mb'],
'allocation_ratio': ratios[rc_fields.ResourceClass.MEMORY_MB],
'allocation_ratio': ratios[orc.MEMORY_MB],
'reserved': mem_reserved,
},
rc_fields.ResourceClass.DISK_GB: {
orc.DISK_GB: {
# TODO(efried): Proper DISK_GB sharing when SSP driver in play
'total': int(data['local_gb']),
'max_unit': int(data['local_gb']),
'allocation_ratio': ratios[rc_fields.ResourceClass.DISK_GB],
'allocation_ratio': ratios[orc.DISK_GB],
'reserved': disk_reserved,
},
}

View File

@ -22,6 +22,7 @@ A connection to the VMware vCenter platform.
import os
import re
import os_resource_classes as orc
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
@ -40,7 +41,6 @@ from nova import exception
from nova.i18n import _
from nova import objects
import nova.privsep.path
from nova import rc_fields as fields
from nova.virt import driver
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import ds_util
@ -432,21 +432,21 @@ class VMwareVCDriver(driver.ComputeDriver):
reserved_disk_gb = compute_utils.convert_mb_to_ceil_gb(
CONF.reserved_host_disk_mb)
result = {
fields.ResourceClass.VCPU: {
orc.VCPU: {
'total': stats['cpu']['vcpus'],
'reserved': CONF.reserved_host_cpus,
'min_unit': 1,
'max_unit': stats['cpu']['max_vcpus_per_host'],
'step_size': 1,
},
fields.ResourceClass.MEMORY_MB: {
orc.MEMORY_MB: {
'total': stats['mem']['total'],
'reserved': CONF.reserved_host_memory_mb,
'min_unit': 1,
'max_unit': stats['mem']['max_mem_mb_per_host'],
'step_size': 1,
},
fields.ResourceClass.DISK_GB: {
orc.DISK_GB: {
'total': total_disk_capacity // units.Gi,
'reserved': reserved_disk_gb,
'min_unit': 1,

View File

@ -25,6 +25,7 @@ A driver for XenServer or Xen Cloud Platform.
import math
import os_resource_classes as orc
from os_xenapi.client import session
from oslo_log import log as logging
from oslo_serialization import jsonutils
@ -36,7 +37,6 @@ import six.moves.urllib.parse as urlparse
import nova.conf
from nova import exception
from nova.i18n import _
from nova import rc_fields as fields
from nova.virt import driver
from nova.virt.xenapi import host
from nova.virt.xenapi import pool
@ -175,7 +175,7 @@ class XenAPIDriver(driver.ComputeDriver):
if not allocations:
# If no allocations, there is no vGPU request.
return False
RC_VGPU = fields.ResourceClass.VGPU
RC_VGPU = orc.VGPU
for rp in allocations:
res = allocations[rp]['resources']
if res and RC_VGPU in res and res[RC_VGPU] > 0:
@ -481,19 +481,19 @@ class XenAPIDriver(driver.ComputeDriver):
vgpus = self._get_vgpu_total(host_stats['vgpu_stats'])
result = {
fields.ResourceClass.VCPU: {
orc.VCPU: {
'total': vcpus,
'min_unit': 1,
'max_unit': vcpus,
'step_size': 1,
},
fields.ResourceClass.MEMORY_MB: {
orc.MEMORY_MB: {
'total': memory_mb,
'min_unit': 1,
'max_unit': memory_mb,
'step_size': 1,
},
fields.ResourceClass.DISK_GB: {
orc.DISK_GB: {
'total': disk_gb,
'min_unit': 1,
'max_unit': disk_gb,
@ -506,7 +506,7 @@ class XenAPIDriver(driver.ComputeDriver):
# so max_unit is 1.
result.update(
{
fields.ResourceClass.VGPU: {
orc.VGPU: {
'total': vgpus,
'min_unit': 1,
'max_unit': 1,

View File

@ -56,6 +56,7 @@ oslo.middleware>=3.31.0 # Apache-2.0
psutil>=3.2.2 # BSD
oslo.versionedobjects>=1.33.3 # Apache-2.0
os-brick>=2.6.1 # Apache-2.0
os-resource-classes>=0.1.0 # Apache-2.0
os-traits>=0.4.0 # Apache-2.0
os-vif>=1.14.0 # Apache-2.0
os-win>=3.0.0 # Apache-2.0