placement: produce set of allocation candidates

We want to allow the scheduler to claim resources against multiple
providers of resources involved in a single allocation request,
including providers of shared resources and eventually child providers
in a hierarchy. To do so, we need to return back to the scheduler a set
of allocation request structures and some details about the providers
involved in those alternative claim possibilities.

This patch adds some new objects to the resource providers object
modeling that are used to describe these allocation choices and the
underlying providers of resources. A top-level AllocationCandidates
object contains two fields: allocation_requests and provider_summaries.
These fields store lists of objects of type AllocationRequest and
ProviderSummary respectively. The AllocationCandidates.get_by_filters()
method accepts a dict of resources in a similar fashion to
ResourceProviderList.get_all_by_filters() and will be used in follow-up
patches to expose the proposed GET /allocation_requests REST endpoint
being discussed in the placement-allocation-requests spec
(https://review.openstack.org/#/c/471927/).

Following patches will also add support for traits and filtering by
UUIDs and member_of aggregate lists.

Change-Id: Ibc049496fb21fb34e7b91aa869fdff42127fb384
blueprint: placement-allocation-requests
This commit is contained in:
Jay Pipes 2017-06-12 07:09:50 -04:00
parent 3e732d332a
commit 604f17d60c
2 changed files with 753 additions and 0 deletions

View File

@ -10,6 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
# NOTE(cdent): The resource provider objects are designed to never be
# used over RPC. Remote manipulation is done with the placement HTTP
@ -2170,3 +2171,373 @@ class TraitList(base.ObjectListBase, base.NovaObject):
def get_all(cls, context, filters=None):
db_traits = cls._get_all_from_db(context, filters)
return base.obj_make_list(context, cls(context), Trait, db_traits)
@base.NovaObjectRegistry.register_if(False)
class AllocationRequestResource(base.NovaObject):
# 1.0: Initial version
VERSION = '1.0'
fields = {
'resource_provider': fields.ObjectField('ResourceProvider'),
'resource_class': fields.ResourceClassField(read_only=True),
'amount': fields.NonNegativeIntegerField(),
}
@base.NovaObjectRegistry.register_if(False)
class AllocationRequest(base.NovaObject):
# 1.0: Initial version
VERSION = '1.0'
fields = {
'resource_requests': fields.ListOfObjectsField(
'AllocationRequestResource'
),
}
@base.NovaObjectRegistry.register_if(False)
class ProviderSummaryResource(base.NovaObject):
# 1.0: Initial version
VERSION = '1.0'
fields = {
'resource_class': fields.ResourceClassField(read_only=True),
'capacity': fields.NonNegativeIntegerField(),
'used': fields.NonNegativeIntegerField(),
}
@base.NovaObjectRegistry.register_if(False)
class ProviderSummary(base.NovaObject):
# 1.0: Initial version
VERSION = '1.0'
fields = {
'resource_provider': fields.ObjectField('ResourceProvider'),
'resources': fields.ListOfObjectsField('ProviderSummaryResource'),
'traits': fields.ListOfObjectsField('Trait'),
}
@db_api.api_context_manager.reader
def _get_usages_by_provider_and_rc(ctx, rp_ids, rc_ids):
"""Returns a row iterator of usage records grouped by resource provider ID
and resource class ID for all resource providers and resource classes
involved in our request
"""
# We build up a SQL expression that looks like this:
# SELECT
# rp.id as resource_provider_id
# , rp.uuid as resource_provider_uuid
# , inv.resource_class_id
# , inv.total
# , inv.reserved
# , inv.allocation_ratio
# , usage.used
# FROM resource_providers AS rp
# JOIN inventories AS inv
# ON rp.id = inv.resource_provider_id
# LEFT JOIN (
# SELECT resource_provider_id, resource_class_id, SUM(used) as used
# FROM allocations
# WHERE resource_provider_id IN ($rp_ids)
# AND resource_class_id IN ($rc_ids)
# GROUP BY resource_provider_id, resource_class_id
# )
# AS usages
# ON inv.resource_provider_id = usage.resource_provider_id
# AND inv.resource_class_id = usage.resource_class_id
# WHERE rp.id IN ($rp_ids)
# AND inv.resource_class_id IN ($rc_ids)
rpt = sa.alias(_RP_TBL, name="rp")
inv = sa.alias(_INV_TBL, name="inv")
# Build our derived table (subquery in the FROM clause) that sums used
# amounts for resource provider and resource class
usage = sa.alias(
sa.select([
_ALLOC_TBL.c.resource_provider_id,
_ALLOC_TBL.c.resource_class_id,
sql.func.sum(_ALLOC_TBL.c.used).label('used'),
]).where(
sa.and_(
_ALLOC_TBL.c.resource_provider_id.in_(rp_ids),
_ALLOC_TBL.c.resource_class_id.in_(rc_ids),
),
).group_by(
_ALLOC_TBL.c.resource_provider_id,
_ALLOC_TBL.c.resource_class_id
),
name='usage',
)
# Build a join between the resource providers and inventories table
rpt_inv_join = sa.join(rpt, inv, rpt.c.id == inv.c.resource_provider_id)
# And then join to the derived table of usages
usage_join = sa.outerjoin(
rpt_inv_join,
usage,
sa.and_(
usage.c.resource_provider_id == inv.c.resource_provider_id,
usage.c.resource_class_id == inv.c.resource_class_id,
),
)
query = sa.select([
rpt.c.id.label("resource_provider_id"),
rpt.c.uuid.label("resource_provider_uuid"),
inv.c.resource_class_id,
inv.c.total,
inv.c.reserved,
inv.c.allocation_ratio,
usage.c.used,
]).select_from(usage_join)
return ctx.session.execute(query).fetchall()
@base.NovaObjectRegistry.register_if(False)
class AllocationCandidates(base.NovaObject):
"""The AllocationCandidates object is a collection of possible allocations
that match some request for resources, along with some summary information
about the resource providers involved in these allocation candidates.
"""
# 1.0: Initial version
VERSION = '1.0'
fields = {
# A collection of allocation possibilities that can be attempted by the
# caller that would, at the time of calling, meet the requested
# resource constraints
'allocation_requests': fields.ListOfObjectsField('AllocationRequest'),
# Information about usage and inventory that relate to any provider
# contained in any of the AllocationRequest objects in the
# allocation_requests field
'provider_summaries': fields.ListOfObjectsField('ProviderSummary'),
}
@classmethod
def get_by_filters(cls, context, filters):
"""Returns an AllocationCandidates object containing all resource
providers matching a set of supplied resource constraints, with a set
of allocation requests constructed from that list of resource
providers.
:param filters: A dict of filters containing one or more of the
following keys:
'resources': A dict, keyed by resource class name, of amounts of
that resource being requested. The resource provider
must either have capacity for the amount being
requested or be associated via aggregate to a provider
that shares this resource and has capacity for the
requested amount.
"""
_ensure_rc_cache(context)
alloc_reqs, provider_summaries = cls._get_by_filters(context, filters)
return cls(
context,
allocation_requests=alloc_reqs,
provider_summaries=provider_summaries,
)
# TODO(jaypipes): See what we can pull out of here into helper functions to
# minimize the complexity of this method.
@staticmethod
@db_api.api_context_manager.reader
def _get_by_filters(context, filters):
# We first get the list of "root providers" that either have the
# requested resources or are associated with the providers that
# share one or more of the requested resource(s)
resources = filters.get('resources')
if not resources:
raise ValueError(_("Supply a resources collection in filters."))
# Transform resource string names to internal integer IDs
resources = {
_RC_CACHE.id_from_string(key): value
for key, value in resources.items()
}
roots = [r[0] for r in _get_all_with_shared(context, resources)]
if not roots:
return [], []
# Contains a set of resource provider IDs for each resource class
# requested
sharing_providers = {
rc_id: _get_providers_with_shared_capacity(context, rc_id, amount)
for rc_id, amount in resources.items()
}
# We need to grab usage information for all the providers identified as
# potentially fulfilling part of the resource request. This includes
# "root providers" returned from _get_all_with_shared() as well as all
# the providers of shared resources. Here, we simply grab a unique set
# of all those resource provider internal IDs by set union'ing them
# together
all_rp_ids = set(roots)
for rps in sharing_providers.values():
all_rp_ids |= set(rps)
# Grab usage summaries for each provider (local or sharing) and
# resource class requested
usages = _get_usages_by_provider_and_rc(
context,
all_rp_ids,
list(resources.keys()),
)
# Build up a dict, keyed by internal resource provider ID, of usage
# information from which we will then build both allocation request and
# provider summary information
summaries = {}
for usage in usages:
u_rp_id = usage['resource_provider_id']
u_rp_uuid = usage['resource_provider_uuid']
u_rc_id = usage['resource_class_id']
# NOTE(jaypipes): usage['used'] may be None due to the LEFT JOIN of
# the usages subquery, so we coerce NULL values to 0 here.
used = usage['used'] or 0
allocation_ratio = usage['allocation_ratio']
cap = int((usage['total'] - usage['reserved']) * allocation_ratio)
summary = summaries.get(u_rp_id)
if not summary:
summary = {
'uuid': u_rp_uuid,
'resources': {},
# TODO(jaypipes): Fill in the provider's traits...
'traits': [],
}
summaries[u_rp_id] = summary
summary['resources'][u_rc_id] = {
'capacity': cap,
'used': used,
}
# Next, build up a list of allocation requests. These allocation
# requests are AllocationRequest objects, containing resource provider
# UUIDs, resource class names and amounts to consume from that resource
# provider
alloc_request_objs = []
# Build a dict, keyed by resource class ID, of
# AllocationRequestResource objects that represent each resource
# provider for a shared resource
sharing_resource_requests = collections.defaultdict(list)
for shared_rc_id in sharing_providers.keys():
sharing = sharing_providers[shared_rc_id]
for sharing_rp_id in sharing:
sharing_summary = summaries[sharing_rp_id]
sharing_rp_uuid = sharing_summary['uuid']
sharing_res_req = AllocationRequestResource(
context,
resource_provider=ResourceProvider(
context,
uuid=sharing_rp_uuid,
),
resource_class=_RC_CACHE.string_from_id(shared_rc_id),
amount=resources[shared_rc_id],
)
sharing_resource_requests[shared_rc_id].append(sharing_res_req)
for root_rp_id in roots:
root_summary = summaries[root_rp_id]
root_rp_uuid = root_summary['uuid']
local_resources = set(
rc_id for rc_id in resources.keys()
if rc_id in root_summary['resources']
)
shared_resources = set(
rc_id for rc_id in resources.keys()
if rc_id not in root_summary['resources']
)
# Determine if the root provider actually has all the resources
# requested. If not, we need to add an AllocationRequest
# alternative containing this resource for each sharing provider
has_all = len(shared_resources) == 0
if has_all:
resource_requests = [
AllocationRequestResource(
context,
resource_provider=ResourceProvider(
context,
uuid=root_rp_uuid,
),
resource_class=_RC_CACHE.string_from_id(rc_id),
amount=amount,
) for rc_id, amount in resources.items()
]
req_obj = AllocationRequest(
context,
resource_requests=resource_requests,
)
alloc_request_objs.append(req_obj)
# If there are no resource providers sharing resources involved in
# this request, there's no point building a set of allocation
# requests that involve resource providers other than the "root
# providers" that have all the local resources on them
if not sharing_resource_requests:
continue
# add an AllocationRequest that includes local resources from the
# root provider and shared resources from each sharing provider of
# that resource class
non_shared_resources = local_resources - shared_resources
non_shared_requests = [
AllocationRequestResource(
context,
resource_provider=ResourceProvider(
context,
uuid=root_rp_uuid,
),
resource_class=_RC_CACHE.string_from_id(rc_id),
amount=amount,
) for rc_id, amount in resources.items()
if rc_id in non_shared_resources
]
sharing_request_tuples = zip(
sharing_resource_requests[shared_rc_id]
for shared_rc_id in shared_resources
)
# sharing_request_tuples will now contain a list of tuples with the
# tuples being AllocationRequestResource objects for each provider
# of a shared resource
for shared_request_tuple in sharing_request_tuples:
shared_requests = list(*shared_request_tuple)
resource_requests = non_shared_requests + shared_requests
req_obj = AllocationRequest(
context,
resource_requests=resource_requests,
)
alloc_request_objs.append(req_obj)
# Finally, construct the object representations for the provider
# summaries we built above. These summaries may be used by the
# scheduler (or any other caller) to sort and weigh for its eventual
# placement and claim decisions
summary_objs = []
for rp_id, summary in summaries.items():
rp_uuid = summary['uuid']
rps_resources = []
for rc_id, usage in summary['resources'].items():
rc_name = _RC_CACHE.string_from_id(rc_id)
rpsr_obj = ProviderSummaryResource(
context,
resource_class=rc_name,
capacity=usage['capacity'],
used=usage['used'],
)
rps_resources.append(rpsr_obj)
summary_obj = ProviderSummary(
context,
resource_provider=ResourceProvider(
context,
uuid=rp_uuid,
),
resources=rps_resources,
)
summary_objs.append(summary_obj)
return alloc_request_objs, summary_objs

View File

@ -2207,3 +2207,385 @@ class SharedProviderTestCase(ResourceProviderBaseCase):
)
got_ids = [rp.id for rp in got_rps]
self.assertEqual([cn1.id], got_ids)
class AllocationCandidatesTestCase(ResourceProviderBaseCase):
"""Tests a variety of scenarios with both shared and non-shared resource
providers that the AllocationCandidates.get_by_filters() method returns a
set of alternative allocation requests and provider summaries that may be
used by the scheduler to sort/weigh the options it has for claiming
resources against providers.
"""
def _requested_resources(self):
# The resources we will request
resources = {
fields.ResourceClass.VCPU: 1,
fields.ResourceClass.MEMORY_MB: 64,
fields.ResourceClass.DISK_GB: 100,
}
return resources
def _find_summary_for_provider(self, p_sums, rp_uuid):
for summary in p_sums:
if summary.resource_provider.uuid == rp_uuid:
return summary
def _find_summary_for_resource(self, p_sum, rc_name):
for resource in p_sum.resources:
if resource.resource_class == rc_name:
return resource
def _find_requests_for_provider(self, reqs, rp_uuid):
res = []
for ar in reqs:
for rr in ar.resource_requests:
if rr.resource_provider.uuid == rp_uuid:
res.append(rr)
return res
def _find_request_for_resource(self, res_reqs, rc_name):
for rr in res_reqs:
if rr.resource_class == rc_name:
return rr
def test_all_local(self):
"""Create some resource providers that can satisfy the request for
resources with local (non-shared) resources and verify that the
allocation requests returned by AllocationCandidates correspond with
each of these resource providers.
"""
# Create two compute node providers with VCPU, RAM and local disk
cn1_uuid = uuidsentinel.cn1
cn1 = objects.ResourceProvider(
self.context,
name='cn1',
uuid=cn1_uuid,
)
cn1.create()
cn2_uuid = uuidsentinel.cn2
cn2 = objects.ResourceProvider(
self.context,
name='cn2',
uuid=cn2_uuid,
)
cn2.create()
for cn in (cn1, cn2):
vcpu = objects.Inventory(
resource_provider=cn,
resource_class=fields.ResourceClass.VCPU,
total=24,
reserved=0,
min_unit=1,
max_unit=24,
step_size=1,
allocation_ratio=16.0,
)
memory_mb = objects.Inventory(
resource_provider=cn,
resource_class=fields.ResourceClass.MEMORY_MB,
total=32768,
reserved=0,
min_unit=64,
max_unit=32768,
step_size=64,
allocation_ratio=1.5,
)
disk_gb = objects.Inventory(
resource_provider=cn,
resource_class=fields.ResourceClass.DISK_GB,
total=2000,
reserved=100,
min_unit=10,
max_unit=100,
step_size=10,
allocation_ratio=1.0,
)
disk_gb.obj_set_defaults()
inv_list = objects.InventoryList(objects=[
vcpu,
memory_mb,
disk_gb,
])
cn.set_inventory(inv_list)
# Ask for the alternative placement possibilities and verify each
# provider is returned
requested_resources = self._requested_resources()
p_alts = rp_obj.AllocationCandidates.get_by_filters(
self.context,
filters={
'resources': requested_resources,
},
)
# Verify the provider summary information indicates 0 usage and
# capacity calculated from above inventory numbers for both compute
# nodes
p_sums = p_alts.provider_summaries
self.assertEqual(2, len(p_sums))
p_sum_rps = set([ps.resource_provider.uuid for ps in p_sums])
self.assertEqual(set([cn1_uuid, cn2_uuid]), p_sum_rps)
cn1_p_sum = self._find_summary_for_provider(p_sums, cn1_uuid)
self.assertIsNotNone(cn1_p_sum)
self.assertEqual(3, len(cn1_p_sum.resources))
cn1_p_sum_vcpu = self._find_summary_for_resource(cn1_p_sum, 'VCPU')
self.assertIsNotNone(cn1_p_sum_vcpu)
expected_capacity = (24 * 16.0)
self.assertEqual(expected_capacity, cn1_p_sum_vcpu.capacity)
self.assertEqual(0, cn1_p_sum_vcpu.used)
# Let's verify the disk for the second compute node
cn2_p_sum = self._find_summary_for_provider(p_sums, cn2_uuid)
self.assertIsNotNone(cn2_p_sum)
self.assertEqual(3, len(cn2_p_sum.resources))
cn2_p_sum_disk = self._find_summary_for_resource(cn2_p_sum, 'DISK_GB')
self.assertIsNotNone(cn2_p_sum_disk)
expected_capacity = ((2000 - 100) * 1.0)
self.assertEqual(expected_capacity, cn2_p_sum_disk.capacity)
self.assertEqual(0, cn2_p_sum_disk.used)
# Verify the allocation requests that are returned. There should be 2
# allocation requests, one for each compute node, containing 3
# resources in each allocation request, one each for VCPU, RAM, and
# disk. The amounts of the requests should correspond to the requested
# resource amounts in the filter:resources dict passed to
# AllocationCandidates.get_by_filters().
a_reqs = p_alts.allocation_requests
self.assertEqual(2, len(a_reqs))
a_req_rps = set()
for ar in a_reqs:
for rr in ar.resource_requests:
a_req_rps.add(rr.resource_provider.uuid)
self.assertEqual(set([cn1_uuid, cn2_uuid]), a_req_rps)
cn1_reqs = self._find_requests_for_provider(a_reqs, cn1_uuid)
# There should be a req object for each resource we have requested
self.assertEqual(3, len(cn1_reqs))
cn1_req_vcpu = self._find_request_for_resource(cn1_reqs, 'VCPU')
self.assertIsNotNone(cn1_req_vcpu)
self.assertEqual(requested_resources['VCPU'], cn1_req_vcpu.amount)
cn2_req_disk = self._find_request_for_resource(cn1_reqs, 'DISK_GB')
self.assertIsNotNone(cn2_req_disk)
self.assertEqual(requested_resources['DISK_GB'], cn2_req_disk.amount)
def test_local_with_shared_disk(self):
"""Create some resource providers that can satisfy the request for
resources with local VCPU and MEMORY_MB but rely on a shared storage
pool to satisfy DISK_GB and verify that the allocation requests
returned by AllocationCandidates have DISK_GB served up by the shared
storage pool resource provider and VCPU/MEMORY_MB by the compute node
providers
"""
# The aggregate that will be associated to everything...
agg_uuid = uuidsentinel.agg
# Create two compute node providers with VCPU, RAM and NO local disk
cn1_uuid = uuidsentinel.cn1
cn1 = objects.ResourceProvider(
self.context,
name='cn1',
uuid=cn1_uuid,
)
cn1.create()
cn2_uuid = uuidsentinel.cn2
cn2 = objects.ResourceProvider(
self.context,
name='cn2',
uuid=cn2_uuid,
)
cn2.create()
# Populate the two compute node providers with inventory, sans DISK_GB
for cn in (cn1, cn2):
vcpu = objects.Inventory(
resource_provider=cn,
resource_class=fields.ResourceClass.VCPU,
total=24,
reserved=0,
min_unit=1,
max_unit=24,
step_size=1,
allocation_ratio=16.0,
)
memory_mb = objects.Inventory(
resource_provider=cn,
resource_class=fields.ResourceClass.MEMORY_MB,
total=1024,
reserved=0,
min_unit=64,
max_unit=1024,
step_size=1,
allocation_ratio=1.5,
)
inv_list = objects.InventoryList(objects=[vcpu, memory_mb])
cn.set_inventory(inv_list)
# Create the shared storage pool
ss_uuid = uuidsentinel.ss
ss = objects.ResourceProvider(
self.context,
name='shared storage',
uuid=ss_uuid,
)
ss.create()
# Give the shared storage pool some inventory of DISK_GB
disk_gb = objects.Inventory(
resource_provider=ss,
resource_class=fields.ResourceClass.DISK_GB,
total=2000,
reserved=100,
min_unit=10,
max_unit=100,
step_size=1,
allocation_ratio=1.0,
)
inv_list = objects.InventoryList(objects=[disk_gb])
ss.set_inventory(inv_list)
# Mark the shared storage pool as having inventory shared among any
# provider associated via aggregate
t = objects.Trait(
self.context,
name="MISC_SHARES_VIA_AGGREGATE",
)
# TODO(jaypipes): Once MISC_SHARES_VIA_AGGREGATE is a standard
# os-traits trait, we won't need to create() here. Instead, we will
# just do:
# t = objects.Trait.get_by_name(
# self.context,
# "MISC_SHARES_VIA_AGGREGATE",
# )
t.create()
ss.set_traits(objects.TraitList(objects=[t]))
# Now associate the shared storage pool and both compute nodes with the
# same aggregate
cn1.set_aggregates([agg_uuid])
cn2.set_aggregates([agg_uuid])
ss.set_aggregates([agg_uuid])
# Ask for the alternative placement possibilities and verify each
# compute node provider is listed in the allocation requests as well as
# the shared storage pool provider
requested_resources = self._requested_resources()
p_alts = rp_obj.AllocationCandidates.get_by_filters(
self.context,
filters={
'resources': requested_resources,
},
)
# Verify the provider summary information indicates 0 usage and
# capacity calculated from above inventory numbers for both compute
# nodes
p_sums = p_alts.provider_summaries
self.assertEqual(3, len(p_sums))
p_sum_rps = set([ps.resource_provider.uuid for ps in p_sums])
self.assertEqual(set([cn1_uuid, cn2_uuid, ss_uuid]), p_sum_rps)
cn1_p_sum = self._find_summary_for_provider(p_sums, cn1_uuid)
self.assertIsNotNone(cn1_p_sum)
self.assertEqual(2, len(cn1_p_sum.resources))
cn1_p_sum_vcpu = self._find_summary_for_resource(cn1_p_sum, 'VCPU')
self.assertIsNotNone(cn1_p_sum_vcpu)
expected_capacity = (24 * 16.0)
self.assertEqual(expected_capacity, cn1_p_sum_vcpu.capacity)
self.assertEqual(0, cn1_p_sum_vcpu.used)
# Let's verify memory for the second compute node
cn2_p_sum = self._find_summary_for_provider(p_sums, cn2_uuid)
self.assertIsNotNone(cn2_p_sum)
self.assertEqual(2, len(cn2_p_sum.resources))
cn2_p_sum_ram = self._find_summary_for_resource(cn2_p_sum, 'MEMORY_MB')
self.assertIsNotNone(cn2_p_sum_ram)
expected_capacity = (1024 * 1.5)
self.assertEqual(expected_capacity, cn2_p_sum_ram.capacity)
self.assertEqual(0, cn2_p_sum_ram.used)
# Let's verify only diks for the shared storage pool
ss_p_sum = self._find_summary_for_provider(p_sums, ss_uuid)
self.assertIsNotNone(ss_p_sum)
self.assertEqual(1, len(ss_p_sum.resources))
ss_p_sum_disk = self._find_summary_for_resource(ss_p_sum, 'DISK_GB')
self.assertIsNotNone(ss_p_sum_disk)
expected_capacity = ((2000 - 100) * 1.0)
self.assertEqual(expected_capacity, ss_p_sum_disk.capacity)
self.assertEqual(0, ss_p_sum_disk.used)
# Verify the allocation requests that are returned. There should be 2
# allocation requests, one for each compute node, containing 3
# resources in each allocation request, one each for VCPU, RAM, and
# disk. The amounts of the requests should correspond to the requested
# resource amounts in the filter:resources dict passed to
# AllocationCandidates.get_by_filters(). The providers for VCPU and
# MEMORY_MB should be the compute nodes while the provider for the
# DISK_GB should be the shared storage pool
a_reqs = p_alts.allocation_requests
self.assertEqual(2, len(a_reqs))
a_req_rps = set()
for ar in a_reqs:
for rr in ar.resource_requests:
a_req_rps.add(rr.resource_provider.uuid)
self.assertEqual(set([cn1_uuid, cn2_uuid, ss_uuid]), a_req_rps)
cn1_reqs = self._find_requests_for_provider(a_reqs, cn1_uuid)
# There should be a req object for only VCPU and MEMORY_MB
self.assertEqual(2, len(cn1_reqs))
cn1_req_vcpu = self._find_request_for_resource(cn1_reqs, 'VCPU')
self.assertIsNotNone(cn1_req_vcpu)
self.assertEqual(requested_resources['VCPU'], cn1_req_vcpu.amount)
cn2_reqs = self._find_requests_for_provider(a_reqs, cn2_uuid)
# There should NOT be an allocation resource request that lists a
# compute node provider UUID for DISK_GB, since the shared storage pool
# is the thing that is providing the disk
cn1_req_disk = self._find_request_for_resource(cn1_reqs, 'DISK_GB')
self.assertIsNone(cn1_req_disk)
cn2_req_disk = self._find_request_for_resource(cn2_reqs, 'DISK_GB')
self.assertIsNone(cn2_req_disk)
# Let's check the second compute node for MEMORY_MB
cn2_req_ram = self._find_request_for_resource(cn2_reqs, 'MEMORY_MB')
self.assertIsNotNone(cn2_req_ram)
self.assertEqual(requested_resources['MEMORY_MB'], cn2_req_ram.amount)
# We should find the shared storage pool providing the DISK_GB for each
# of the allocation requests
ss_reqs = self._find_requests_for_provider(a_reqs, ss_uuid)
self.assertEqual(2, len(ss_reqs))
# Shared storage shouldn't be listed as providing anything but disk...
ss_req_ram = self._find_request_for_resource(ss_reqs, 'MEMORY_MB')
self.assertIsNone(ss_req_ram)
ss_req_disk = self._find_request_for_resource(ss_reqs, 'DISK_GB')
self.assertIsNotNone(ss_req_disk)
self.assertEqual(requested_resources['DISK_GB'], ss_req_disk.amount)