diff --git a/placement/tests/functional/db/test_allocation.py b/placement/tests/functional/db/test_allocation.py index 7b9c90bf9..912130391 100644 --- a/placement/tests/functional/db/test_allocation.py +++ b/placement/tests/functional/db/test_allocation.py @@ -22,6 +22,168 @@ from placement.objects import usage as usage_obj from placement.tests.functional.db import test_base as tb +class TestAllocation(tb.PlacementDbBaseTestCase): + + def test_create_list_and_delete_allocation(self): + rp, _ = self._make_allocation(tb.DISK_INVENTORY, tb.DISK_ALLOCATION) + + allocations = alloc_obj.get_all_by_resource_provider(self.ctx, rp) + + self.assertEqual(1, len(allocations)) + + self.assertEqual(tb.DISK_ALLOCATION['used'], + allocations[0].used) + + alloc_obj.delete_all(self.ctx, allocations) + + allocations = alloc_obj.get_all_by_resource_provider(self.ctx, rp) + + self.assertEqual(0, len(allocations)) + + def test_delete_all_with_multiple_consumers(self): + """Tests fix for LP #1781430 where alloc_obj.delete_all() when + issued for a list of allocations returned by + alloc_obj.get_by_resource_provider() where the resource provider + had multiple consumers allocated against it, left the DB in an + inconsistent state. + """ + # Create a single resource provider and allocate resources for two + # instances from it. Then grab all the provider's allocations with + # alloc_obj.get_all_by_resource_provider() and attempt to delete + # them all with alloc_obj.delete_all(). After which, another call + # to alloc_obj.get_all_by_resource_provider() should return an + # empty list. + cn1 = self._create_provider('cn1') + tb.add_inventory(cn1, 'VCPU', 8) + + c1_uuid = uuidsentinel.consumer1 + c2_uuid = uuidsentinel.consumer2 + + for c_uuid in (c1_uuid, c2_uuid): + self.allocate_from_provider(cn1, 'VCPU', 1, consumer_id=c_uuid) + + allocs = alloc_obj.get_all_by_resource_provider(self.ctx, cn1) + self.assertEqual(2, len(allocs)) + + alloc_obj.delete_all(self.ctx, allocs) + + allocs = alloc_obj.get_all_by_resource_provider(self.ctx, cn1) + self.assertEqual(0, len(allocs)) + + def test_multi_provider_allocation(self): + """Tests that an allocation that includes more than one resource + provider can be created, listed and deleted properly. + + Bug #1707669 highlighted a situation that arose when attempting to + remove part of an allocation for a source host during a resize + operation where the exiting allocation was not being properly + deleted. + """ + cn_source = self._create_provider('cn_source') + cn_dest = self._create_provider('cn_dest') + + # Add same inventory to both source and destination host + for cn in (cn_source, cn_dest): + tb.add_inventory(cn, orc.VCPU, 24, + allocation_ratio=16.0) + tb.add_inventory(cn, orc.MEMORY_MB, 1024, + min_unit=64, + max_unit=1024, + step_size=64, + allocation_ratio=1.5) + + # Create a consumer representing the instance + inst_consumer = consumer_obj.Consumer( + self.ctx, uuid=uuidsentinel.instance, user=self.user_obj, + project=self.project_obj) + inst_consumer.create() + + # Now create an allocation that represents a move operation where the + # scheduler has selected cn_dest as the target host and created a + # "doubled-up" allocation for the duration of the move operation + alloc_list = [ + alloc_obj.Allocation( + consumer=inst_consumer, + resource_provider=cn_source, + resource_class=orc.VCPU, + used=1), + alloc_obj.Allocation( + consumer=inst_consumer, + resource_provider=cn_source, + resource_class=orc.MEMORY_MB, + used=256), + alloc_obj.Allocation( + consumer=inst_consumer, + resource_provider=cn_dest, + resource_class=orc.VCPU, + used=1), + alloc_obj.Allocation( + consumer=inst_consumer, + resource_provider=cn_dest, + resource_class=orc.MEMORY_MB, + used=256), + ] + alloc_obj.replace_all(self.ctx, alloc_list) + + src_allocs = alloc_obj.get_all_by_resource_provider( + self.ctx, cn_source) + + self.assertEqual(2, len(src_allocs)) + + dest_allocs = alloc_obj.get_all_by_resource_provider(self.ctx, cn_dest) + + self.assertEqual(2, len(dest_allocs)) + + consumer_allocs = alloc_obj.get_all_by_consumer_id( + self.ctx, uuidsentinel.instance) + + self.assertEqual(4, len(consumer_allocs)) + + # Validate that when we create an allocation for a consumer that we + # delete any existing allocation and replace it with what the new. + # Here, we're emulating the step that occurs on confirm_resize() where + # the source host pulls the existing allocation for the instance and + # removes any resources that refer to itself and saves the allocation + # back to placement + new_alloc_list = [ + alloc_obj.Allocation( + consumer=inst_consumer, + resource_provider=cn_dest, + resource_class=orc.VCPU, + used=1), + alloc_obj.Allocation( + consumer=inst_consumer, + resource_provider=cn_dest, + resource_class=orc.MEMORY_MB, + used=256), + ] + alloc_obj.replace_all(self.ctx, new_alloc_list) + + src_allocs = alloc_obj.get_all_by_resource_provider( + self.ctx, cn_source) + + self.assertEqual(0, len(src_allocs)) + + dest_allocs = alloc_obj.get_all_by_resource_provider( + self.ctx, cn_dest) + + self.assertEqual(2, len(dest_allocs)) + + consumer_allocs = alloc_obj.get_all_by_consumer_id( + self.ctx, uuidsentinel.instance) + + self.assertEqual(2, len(consumer_allocs)) + + def test_get_all_by_resource_provider(self): + rp, allocation = self._make_allocation(tb.DISK_INVENTORY, + tb.DISK_ALLOCATION) + allocations = alloc_obj.get_all_by_resource_provider(self.ctx, rp) + self.assertEqual(1, len(allocations)) + self.assertEqual(rp.id, allocations[0].resource_provider.id) + self.assertEqual(allocation.resource_provider.id, + allocations[0].resource_provider.id) + + class TestAllocationListCreateDelete(tb.PlacementDbBaseTestCase): def test_allocation_checking(self): diff --git a/placement/tests/functional/db/test_resource_provider.py b/placement/tests/functional/db/test_resource_provider.py index 18f0dc5a2..1c2eb4265 100644 --- a/placement/tests/functional/db/test_resource_provider.py +++ b/placement/tests/functional/db/test_resource_provider.py @@ -20,7 +20,6 @@ import sqlalchemy as sa from placement.db.sqlalchemy import models from placement import exception from placement.objects import allocation as alloc_obj -from placement.objects import consumer as consumer_obj from placement.objects import inventory as inv_obj from placement.objects import resource_provider as rp_obj from placement.objects import trait as trait_obj @@ -1167,168 +1166,6 @@ class TestResourceProviderAggregates(tb.PlacementDbBaseTestCase): self.ctx, [s1.id, s2.id, s3.id, s4.id, s5.id], get_id=True)) -class TestAllocation(tb.PlacementDbBaseTestCase): - - def test_create_list_and_delete_allocation(self): - rp, _ = self._make_allocation(tb.DISK_INVENTORY, tb.DISK_ALLOCATION) - - allocations = alloc_obj.get_all_by_resource_provider(self.ctx, rp) - - self.assertEqual(1, len(allocations)) - - self.assertEqual(tb.DISK_ALLOCATION['used'], - allocations[0].used) - - alloc_obj.delete_all(self.ctx, allocations) - - allocations = alloc_obj.get_all_by_resource_provider(self.ctx, rp) - - self.assertEqual(0, len(allocations)) - - def test_delete_all_with_multiple_consumers(self): - """Tests fix for LP #1781430 where alloc_obj.delete_all() when - issued for a list of allocations returned by - alloc_obj.get_by_resource_provider() where the resource provider - had multiple consumers allocated against it, left the DB in an - inconsistent state. - """ - # Create a single resource provider and allocate resources for two - # instances from it. Then grab all the provider's allocations with - # alloc_obj.get_all_by_resource_provider() and attempt to delete - # them all with alloc_obj.delete_all(). After which, another call - # to alloc_obj.get_all_by_resource_provider() should return an - # empty list. - cn1 = self._create_provider('cn1') - tb.add_inventory(cn1, 'VCPU', 8) - - c1_uuid = uuidsentinel.consumer1 - c2_uuid = uuidsentinel.consumer2 - - for c_uuid in (c1_uuid, c2_uuid): - self.allocate_from_provider(cn1, 'VCPU', 1, consumer_id=c_uuid) - - allocs = alloc_obj.get_all_by_resource_provider(self.ctx, cn1) - self.assertEqual(2, len(allocs)) - - alloc_obj.delete_all(self.ctx, allocs) - - allocs = alloc_obj.get_all_by_resource_provider(self.ctx, cn1) - self.assertEqual(0, len(allocs)) - - def test_multi_provider_allocation(self): - """Tests that an allocation that includes more than one resource - provider can be created, listed and deleted properly. - - Bug #1707669 highlighted a situation that arose when attempting to - remove part of an allocation for a source host during a resize - operation where the exiting allocation was not being properly - deleted. - """ - cn_source = self._create_provider('cn_source') - cn_dest = self._create_provider('cn_dest') - - # Add same inventory to both source and destination host - for cn in (cn_source, cn_dest): - tb.add_inventory(cn, orc.VCPU, 24, - allocation_ratio=16.0) - tb.add_inventory(cn, orc.MEMORY_MB, 1024, - min_unit=64, - max_unit=1024, - step_size=64, - allocation_ratio=1.5) - - # Create a consumer representing the instance - inst_consumer = consumer_obj.Consumer( - self.ctx, uuid=uuidsentinel.instance, user=self.user_obj, - project=self.project_obj) - inst_consumer.create() - - # Now create an allocation that represents a move operation where the - # scheduler has selected cn_dest as the target host and created a - # "doubled-up" allocation for the duration of the move operation - alloc_list = [ - alloc_obj.Allocation( - consumer=inst_consumer, - resource_provider=cn_source, - resource_class=orc.VCPU, - used=1), - alloc_obj.Allocation( - consumer=inst_consumer, - resource_provider=cn_source, - resource_class=orc.MEMORY_MB, - used=256), - alloc_obj.Allocation( - consumer=inst_consumer, - resource_provider=cn_dest, - resource_class=orc.VCPU, - used=1), - alloc_obj.Allocation( - consumer=inst_consumer, - resource_provider=cn_dest, - resource_class=orc.MEMORY_MB, - used=256), - ] - alloc_obj.replace_all(self.ctx, alloc_list) - - src_allocs = alloc_obj.get_all_by_resource_provider( - self.ctx, cn_source) - - self.assertEqual(2, len(src_allocs)) - - dest_allocs = alloc_obj.get_all_by_resource_provider(self.ctx, cn_dest) - - self.assertEqual(2, len(dest_allocs)) - - consumer_allocs = alloc_obj.get_all_by_consumer_id( - self.ctx, uuidsentinel.instance) - - self.assertEqual(4, len(consumer_allocs)) - - # Validate that when we create an allocation for a consumer that we - # delete any existing allocation and replace it with what the new. - # Here, we're emulating the step that occurs on confirm_resize() where - # the source host pulls the existing allocation for the instance and - # removes any resources that refer to itself and saves the allocation - # back to placement - new_alloc_list = [ - alloc_obj.Allocation( - consumer=inst_consumer, - resource_provider=cn_dest, - resource_class=orc.VCPU, - used=1), - alloc_obj.Allocation( - consumer=inst_consumer, - resource_provider=cn_dest, - resource_class=orc.MEMORY_MB, - used=256), - ] - alloc_obj.replace_all(self.ctx, new_alloc_list) - - src_allocs = alloc_obj.get_all_by_resource_provider( - self.ctx, cn_source) - - self.assertEqual(0, len(src_allocs)) - - dest_allocs = alloc_obj.get_all_by_resource_provider( - self.ctx, cn_dest) - - self.assertEqual(2, len(dest_allocs)) - - consumer_allocs = alloc_obj.get_all_by_consumer_id( - self.ctx, uuidsentinel.instance) - - self.assertEqual(2, len(consumer_allocs)) - - def test_get_all_by_resource_provider(self): - rp, allocation = self._make_allocation(tb.DISK_INVENTORY, - tb.DISK_ALLOCATION) - allocations = alloc_obj.get_all_by_resource_provider(self.ctx, rp) - self.assertEqual(1, len(allocations)) - self.assertEqual(rp.id, allocations[0].resource_provider.id) - self.assertEqual(allocation.resource_provider.id, - allocations[0].resource_provider.id) - - class SharedProviderTestCase(tb.PlacementDbBaseTestCase): """Tests that the queries used to determine placement in deployments with shared resource providers such as a shared disk pool result in accurate