Remove provider allocs in confirm/revert resize

Now that the scheduler creates a doubled-up allocation for the duration
of a move operation (with part of the allocation referring to the
source and part referring to the destination host), we need to remove
the source provider when confirming the resize and remove the
destination provider from the allocation when reverting a resize. This
patch adds this logic in the RT's drop_move_claim() method.

Change-Id: I6f8afe6680f83125da9381c812016b3623503825
Co-Authored-By: Dan Smith <dms@danplanet.com>
Fixes-bug: #1707071
This commit is contained in:
Jay Pipes 2017-08-02 17:48:38 -04:00
parent 09e169bd7a
commit 5390210a4f
6 changed files with 416 additions and 55 deletions

View File

@ -41,6 +41,7 @@ from nova.pci import manager as pci_manager
from nova.pci import request as pci_request
from nova import rpc
from nova.scheduler import client as scheduler_client
from nova.scheduler import utils as scheduler_utils
from nova import utils
from nova.virt import hardware
@ -465,6 +466,36 @@ class ResourceTracker(object):
ctxt = context.elevated()
self._update(ctxt, self.compute_nodes[nodename])
# NOTE(jaypipes): This sucks, but due to the fact that confirm_resize()
# only runs on the source host and revert_resize() runs on the
# destination host, we need to do this here. Basically, what we're
# doing here is grabbing the existing allocations for this instance
# from the placement API, dropping the resources in the doubled-up
# allocation set that refer to the source host UUID and calling PUT
# /allocations back to the placement API. The allocation that gets
# PUT'd back to placement will only include the destination host and
# any shared providers in the case of a confirm_resize operation and
# the source host and shared providers for a revert_resize operation..
my_resources = scheduler_utils.resources_from_flavor(instance,
instance_type or instance.flavor)
cn_uuid = self.compute_nodes[nodename].uuid
operation = 'Confirming'
source_or_dest = 'source'
if prefix == 'new_':
operation = 'Reverting'
source_or_dest = 'destination'
LOG.debug("%s resize on %s host. Removing resources claimed on "
"provider %s from allocation",
operation, source_or_dest, cn_uuid, instance=instance)
res = self.reportclient.remove_provider_from_instance_allocation(
instance.uuid, cn_uuid, instance.user_id,
instance.project_id, my_resources)
if not res:
LOG.error("Failed to save manipulated allocation when "
"%s resize on %s host %s.",
operation.lower(), source_or_dest, cn_uuid,
instance=instance)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def update_usage(self, context, instance, nodename):
"""Update the resource usage and stats after a change in an

View File

@ -1000,6 +1000,104 @@ class SchedulerReportClient(object):
'text': r.text})
return r.status_code == 204
@safe_connect
def remove_provider_from_instance_allocation(self, consumer_uuid, rp_uuid,
user_id, project_id,
resources):
"""Grabs an allocation for a particular consumer UUID, strips parts of
the allocation that refer to a supplied resource provider UUID, and
then PUTs the resulting allocation back to the placement API for the
consumer.
We call this method on two occasions: on the source host during a
confirm_resize() operation and on the destination host during a
revert_resize() operation. This is important to reconcile the
"doubled-up" allocation that the scheduler constructs when claiming
resources against the destination host during a move operation.
If the move was between hosts, the entire allocation for rp_uuid will
be dropped. If the move is a resize on the same host, then we will
subtract resources from the single allocation to ensure we do not
exceed the reserved or max_unit amounts for the resource on the host.
:param consumer_uuid: The instance/consumer UUID
:param rp_uuid: The UUID of the provider whose resources we wish to
remove from the consumer's allocation
:param user_id: The instance's user
:param project_id: The instance's project
:param resources: The resources to be dropped from the allocation
"""
url = '/allocations/%s' % consumer_uuid
# Grab the "doubled-up" allocation that we will manipulate
r = self.get(url)
if r.status_code != 200:
LOG.warning("Failed to retrieve allocations for %s. Got HTTP %s",
consumer_uuid, r.status_code)
return False
current_allocs = r.json()['allocations']
if not current_allocs:
LOG.error("Expected to find current allocations for %s, but "
"found none.", consumer_uuid)
return False
# If the host isn't in the current allocation for the instance, don't
# do anything
if rp_uuid not in current_allocs:
LOG.warning("Expected to find allocations referencing resource "
"provider %s for %s, but found none.",
rp_uuid, consumer_uuid)
return True
compute_providers = [uuid for uuid, alloc in current_allocs.items()
if 'VCPU' in alloc['resources']]
LOG.debug('Current allocations for instance: %s', current_allocs,
instance_uuid=consumer_uuid)
LOG.debug('Instance %s has resources on %i compute nodes',
consumer_uuid, len(compute_providers))
new_allocs = [
{
'resource_provider': {
'uuid': alloc_rp_uuid,
},
'resources': alloc['resources'],
}
for alloc_rp_uuid, alloc in current_allocs.items()
if alloc_rp_uuid != rp_uuid
]
if len(compute_providers) == 1:
# NOTE(danms): We are in a resize to same host scenario. Since we
# are the only provider then we need to merge back in the doubled
# allocation with our part subtracted
peer_alloc = {
'resource_provider': {
'uuid': rp_uuid,
},
'resources': current_allocs[rp_uuid]['resources']
}
LOG.debug('Original resources from same-host '
'allocation: %s', peer_alloc['resources'])
scheduler_utils.merge_resources(peer_alloc['resources'],
resources, -1)
LOG.debug('Subtracting old resources from same-host '
'allocation: %s', peer_alloc['resources'])
new_allocs.append(peer_alloc)
payload = {'allocations': new_allocs}
payload['project_id'] = project_id
payload['user_id'] = user_id
LOG.debug("Sending updated allocation %s for instance %s after "
"removing resources for %s.",
new_allocs, consumer_uuid, rp_uuid)
r = self.put(url, payload, version='1.10')
if r.status_code != 204:
LOG.warning("Failed to save allocation for %s. Got HTTP %s: %s",
consumer_uuid, r.status_code, r.text)
return r.status_code == 204
@safe_connect
def put_allocations(self, rp_uuid, consumer_uuid, alloc_data, project_id,
user_id):

View File

@ -1173,22 +1173,12 @@ class ServerMovingTests(test.TestCase, integrated_helpers.InstanceHelperMixin):
self._test_resize_revert(dest_hostname='host1')
def test_resize_revert_reverse(self):
# NOTE(danms): This will run the test the other direction,
# but with the periodics running in the same order. When
# bug 1707071 is fixed, we should be able to pass with things
# running both ways. Until then, skip.
self.skipTest('Bug 1707071')
self._test_resize_revert(dest_hostname='host2')
def test_resize_confirm(self):
self._test_resize_confirm(dest_hostname='host1')
def test_resize_confirm_reverse(self):
# NOTE(danms): This will run the test the other direction,
# but with the periodics running in the same order. When
# bug 1707071 is fixed, we should be able to pass with things
# running both ways. Until then, skip.
self.skipTest('Bug 1707071')
self._test_resize_confirm(dest_hostname='host2')
def assertFlavorMatchesAllocation(self, flavor, allocation):
@ -1367,21 +1357,15 @@ class ServerMovingTests(test.TestCase, integrated_helpers.InstanceHelperMixin):
allocations = self._get_allocations_by_server_uuid(server['id'])
self.assertFlavorMatchesAllocation(self.flavor1, source_usages)
# NOTE(jaypipes): This should be uncommented when bug 1707071 is fixed.
# Currently, we're not cleaning up the destination allocation on
# confirm and the source allocation on resize
# dest_usages = self._get_provider_usages(dest_rp_uuid)
# self.assertEqual({'VCPU': 0,
# 'MEMORY_MB': 0,
# 'DISK_GB': 0}, dest_usages,
# 'Target host %s still has usage after the resize has '
# 'been reverted' % dest_hostname)
dest_usages = self._get_provider_usages(dest_rp_uuid)
self.assertEqual({'VCPU': 0,
'MEMORY_MB': 0,
'DISK_GB': 0}, dest_usages,
'Target host %s still has usage after the resize '
'has been reverted' % dest_hostname)
# NOTE(jaypipes): This should be uncommented when bug 1707071 is fixed.
# Currently, we're not cleaning up the destination allocation on
# confirm and the source allocation on resize
# Check that the server only allocates resource from the original host
# self.assertEqual(1, len(allocations))
self.assertEqual(1, len(allocations))
source_allocation = allocations[source_rp_uuid]['resources']
self.assertFlavorMatchesAllocation(self.flavor1, source_allocation)
@ -1406,35 +1390,42 @@ class ServerMovingTests(test.TestCase, integrated_helpers.InstanceHelperMixin):
server['id'], post, check_response_status=[204])
self._wait_for_state_change(self.api, server, 'ACTIVE')
# Fetch allocations post-confirm
# After confirming, we should have an allocation only on the
# destination host
allocations = self._get_allocations_by_server_uuid(server['id'])
self.assertEqual(2, len(allocations))
self._run_periodics()
self.assertEqual(1, len(allocations))
source_usages = self._get_provider_usages(source_rp_uuid)
dest_usages = self._get_provider_usages(dest_rp_uuid)
# NOTE(jaypipes): This should be uncommented when bug 1707071 is fixed.
# Currently, we're not cleaning up the destination allocation on
# confirm and the source allocation on resize
# source_usages = self._get_provider_usages(source_rp_uuid)
# self.assertEqual({'VCPU': 0,
# 'MEMORY_MB': 0,
# 'DISK_GB': 0}, source_usages,
# 'The source host %s still has usages after the '
# 'resize has been confirmed' % source_hostname)
# and the target host allocation should be according to the new flavor
self.assertFlavorMatchesAllocation(self.flavor2, dest_usages)
self.assertEqual({'VCPU': 0,
'MEMORY_MB': 0,
'DISK_GB': 0}, source_usages,
'The source host %s still has usages after the '
'resize has been confirmed' % source_hostname)
self._run_periodics()
# Check we're still accurate after running the periodics
dest_usages = self._get_provider_usages(dest_rp_uuid)
source_usages = self._get_provider_usages(source_rp_uuid)
# and the target host allocation should be according to the new flavor
self.assertFlavorMatchesAllocation(self.flavor2, dest_usages)
self.assertEqual({'VCPU': 0,
'MEMORY_MB': 0,
'DISK_GB': 0}, source_usages,
'The source host %s still has usages after the '
'resize has been confirmed' % source_hostname)
allocations = self._get_allocations_by_server_uuid(server['id'])
# NOTE(jaypipes): This should be uncommented when bug 1707071 is fixed.
# Currently, we're not cleaning up the destination allocation on
# confirm and the source allocation on resize
# and the server allocates only from the target host
# self.assertEqual(1, len(allocations))
self.assertEqual(1, len(allocations))
dest_allocation = allocations[dest_rp_uuid]['resources']
self.assertFlavorMatchesAllocation(self.flavor2, dest_allocation)
@ -1487,10 +1478,6 @@ class ServerMovingTests(test.TestCase, integrated_helpers.InstanceHelperMixin):
self.assertEqual(1, len(allocations))
allocation = allocations[rp_uuid]['resources']
# NOTE(gibi): This is bug 1707071 where the compute "healing" periodic
# tramples on the doubled allocations created in the scheduler.
self.assertFlavorMatchesAllocation(new_flavor, allocation)
# NOTE(gibi): After fixing bug 1707252 the following is expected
# self.assertEqual(old_flavor['vcpus'] + new_flavor['vcpus'],
# allocation['VCPU'])

View File

@ -5599,8 +5599,9 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
fake_rt.tracked_migrations[self.instance['uuid']] = (
self.migration, None)
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'drop_move_claim')
@mock.patch('nova.compute.rpcapi.ComputeAPI.finish_revert_resize')
@mock.patch.object(fake_rt, '_get_instance_type', return_value=None)
@mock.patch.object(self.instance, 'revert_migration_context')
@mock.patch.object(self.compute.network_api, 'get_instance_nw_info')
@mock.patch.object(self.compute, '_is_instance_storage_shared')
@ -5627,8 +5628,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
mock_is_instance_storage_shared,
mock_get_instance_nw_info,
mock_revert_migration_context,
mock_get_itype,
mock_finish_revert):
mock_finish_revert,
mock_drop_move_claim):
self.instance.migration_context = objects.MigrationContext()
self.migration.source_compute = self.instance['host']
@ -5638,6 +5639,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
migration=self.migration,
instance=self.instance,
reservations=None)
mock_drop_move_claim.assert_called_once_with(self.context,
self.instance, self.instance.node)
self.assertIsNotNone(self.instance.migration_context)
@mock.patch.object(self.compute, "_notify_about_instance_usage")

View File

@ -187,6 +187,7 @@ _INSTANCE_FIXTURES = [
task_state=None,
os_type='fake-os', # Used by the stats collector.
project_id='fake-project', # Used by the stats collector.
user_id=uuids.user_id,
flavor = _INSTANCE_TYPE_OBJ_FIXTURES[1],
old_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[1],
new_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[1],
@ -209,6 +210,7 @@ _INSTANCE_FIXTURES = [
task_state=None,
os_type='fake-os',
project_id='fake-project-2',
user_id=uuids.user_id,
flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2],
old_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2],
new_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2],
@ -1684,6 +1686,8 @@ class TestInstanceClaim(BaseTestCase):
class TestResize(BaseTestCase):
@mock.patch('nova.compute.utils.is_volume_backed_instance',
return_value=False)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
@ -1693,7 +1697,8 @@ class TestResize(BaseTestCase):
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@mock.patch('nova.objects.ComputeNode.save')
def test_resize_claim_same_host(self, save_mock, get_mock, migr_mock,
get_cn_mock, pci_mock, instance_pci_mock):
get_cn_mock, pci_mock, instance_pci_mock,
is_bfv_mock):
# Resize an existing instance from its current flavor (instance type
# 1) to a new flavor (instance type 2) and verify that the compute
# node's resources are appropriately updated to account for the new
@ -1781,6 +1786,8 @@ class TestResize(BaseTestCase):
self.assertEqual(128, cn.memory_mb_used)
self.assertEqual(0, len(self.rt.tracked_migrations))
@mock.patch('nova.compute.utils.is_volume_backed_instance',
return_value=False)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance',
@ -1802,6 +1809,7 @@ class TestResize(BaseTestCase):
pci_get_by_compute_node_mock,
pci_get_by_instance_mock,
pci_get_by_instance_uuid_mock,
is_bfv_mock,
revert=False):
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
@ -1995,7 +2003,8 @@ class TestResize(BaseTestCase):
self.assertEqual(request, pci_req_mock.return_value.requests[0])
alloc_mock.assert_called_once_with(instance)
def test_drop_move_claim_on_revert(self):
@mock.patch('nova.scheduler.utils.resources_from_flavor')
def test_drop_move_claim_on_revert(self, mock_resources):
self._setup_rt()
cn = _COMPUTE_NODE_FIXTURES[0].obj_clone()
self.rt.compute_nodes[_NODENAME] = cn
@ -2010,7 +2019,7 @@ class TestResize(BaseTestCase):
instance = _INSTANCE_FIXTURES[0].obj_clone()
instance.task_state = task_states.RESIZE_MIGRATING
instance.new_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2]
instance.flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2]
instance.migration_context = objects.MigrationContext()
instance.migration_context.new_pci_devices = objects.PciDeviceList(
objects=pci_devs)
@ -2438,6 +2447,33 @@ class TestUpdateUsageFromInstance(BaseTestCase):
# Scheduled instances should not have their allocations removed
rc.delete_allocation_for_instance.assert_not_called()
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'get_allocations_for_resource_provider')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'delete_allocation_for_instance')
@mock.patch('nova.objects.Instance.get_by_uuid')
def test_remove_deleted_instances_allocations_move_ops(self, mock_get,
mock_delete_allocs, mock_get_allocs):
"""Test that we do NOT delete allocations for instances that are
currently undergoing move operations.
"""
self.rt.tracked_instances = {}
# Create 1 instance
instance = _INSTANCE_FIXTURES[0].obj_clone()
instance.uuid = uuids.moving_instance
instance.host = uuids.destination
# Instances in resizing/move will be ACTIVE or STOPPED
instance.vm_state = vm_states.ACTIVE
# Mock out the allocation call
allocs = {uuids.inst0: mock.sentinel.moving_instance}
mock_get_allocs.return_value = allocs
mock_get.return_value = instance
cn = self.rt.compute_nodes[_NODENAME]
ctx = mock.sentinel.ctx
self.rt._remove_deleted_instances_allocations(ctx, cn)
mock_delete_allocs.assert_not_called()
@mock.patch('nova.objects.Instance.get_by_uuid')
def test_remove_deleted_instances_allocations_no_instance(self,
mock_inst_get):
@ -2488,7 +2524,9 @@ class TestUpdateUsageFromInstance(BaseTestCase):
@mock.patch.object(self.rt,
'_remove_deleted_instances_allocations')
@mock.patch.object(self.rt, '_update_usage_from_instance')
def test(uufi, rdia):
@mock.patch('nova.objects.Service.get_minimum_version',
return_value=22)
def test(version_mock, uufi, rdia):
self.rt._update_usage_from_instances('ctxt', [], 'foo')
test()

View File

@ -239,7 +239,7 @@ class TestPutAllocations(SchedulerReportClientTestCase):
def test_claim_resources_success(self):
get_resp_mock = mock.Mock(status_code=200)
get_resp_mock.json.return_value = {
'allocations': [], # build instance, not move
'allocations': {}, # build instance, not move
}
self.ks_sess_mock.get.return_value = get_resp_mock
resp_mock = mock.Mock(status_code=204)
@ -662,7 +662,7 @@ class TestPutAllocations(SchedulerReportClientTestCase):
def test_claim_resources_fail_retry_success(self):
get_resp_mock = mock.Mock(status_code=200)
get_resp_mock.json.return_value = {
'allocations': [], # build instance, not move
'allocations': {}, # build instance, not move
}
self.ks_sess_mock.get.return_value = get_resp_mock
resp_mocks = [
@ -718,7 +718,7 @@ class TestPutAllocations(SchedulerReportClientTestCase):
def test_claim_resources_failure(self, mock_log):
get_resp_mock = mock.Mock(status_code=200)
get_resp_mock.json.return_value = {
'allocations': [], # build instance, not move
'allocations': {}, # build instance, not move
}
self.ks_sess_mock.get.return_value = get_resp_mock
resp_mock = mock.Mock(status_code=409)
@ -755,6 +755,210 @@ class TestPutAllocations(SchedulerReportClientTestCase):
self.assertFalse(res)
self.assertTrue(mock_log.called)
def test_remove_provider_from_inst_alloc_no_shared(self):
"""Tests that the method which manipulates an existing doubled-up
allocation for a move operation to remove the source host results in
sending placement the proper payload to PUT
/allocations/{consumer_uuid} call.
"""
get_resp_mock = mock.Mock(status_code=200)
get_resp_mock.json.return_value = {
'allocations': {
uuids.source: {
'resource_provider_generation': 42,
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
},
},
uuids.destination: {
'resource_provider_generation': 42,
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
},
},
},
}
self.ks_sess_mock.get.return_value = get_resp_mock
resp_mock = mock.Mock(status_code=204)
self.ks_sess_mock.put.return_value = resp_mock
consumer_uuid = uuids.consumer_uuid
project_id = uuids.project_id
user_id = uuids.user_id
res = self.client.remove_provider_from_instance_allocation(
consumer_uuid, uuids.source, user_id, project_id, mock.Mock())
expected_url = "/allocations/%s" % consumer_uuid
# New allocations should only include the destination...
expected_payload = {
'allocations': [
{
'resource_provider': {
'uuid': uuids.destination,
},
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
},
},
],
}
expected_payload['project_id'] = project_id
expected_payload['user_id'] = user_id
# We have to pull the json body from the mock call_args to validate
# it separately otherwise hash seed issues get in the way.
actual_payload = self.ks_sess_mock.put.call_args[1]['json']
sort_by_uuid = lambda x: x['resource_provider']['uuid']
expected_allocations = sorted(expected_payload['allocations'],
key=sort_by_uuid)
actual_allocations = sorted(actual_payload['allocations'],
key=sort_by_uuid)
self.assertEqual(expected_allocations, actual_allocations)
self.ks_sess_mock.put.assert_called_once_with(
expected_url, endpoint_filter=mock.ANY,
headers={'OpenStack-API-Version': 'placement 1.10'},
json=mock.ANY, raise_exc=False)
self.assertTrue(res)
def test_remove_provider_from_inst_alloc_with_shared(self):
"""Tests that the method which manipulates an existing doubled-up
allocation with DISK_GB being consumed from a shared storage provider
for a move operation to remove the source host results in sending
placement the proper payload to PUT /allocations/{consumer_uuid}
call.
"""
get_resp_mock = mock.Mock(status_code=200)
get_resp_mock.json.return_value = {
'allocations': {
uuids.source: {
'resource_provider_generation': 42,
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
},
},
uuids.shared_storage: {
'resource_provider_generation': 42,
'resources': {
'DISK_GB': 100,
},
},
uuids.destination: {
'resource_provider_generation': 42,
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
},
},
},
}
self.ks_sess_mock.get.return_value = get_resp_mock
resp_mock = mock.Mock(status_code=204)
self.ks_sess_mock.put.return_value = resp_mock
consumer_uuid = uuids.consumer_uuid
project_id = uuids.project_id
user_id = uuids.user_id
res = self.client.remove_provider_from_instance_allocation(
consumer_uuid, uuids.source, user_id, project_id, mock.Mock())
expected_url = "/allocations/%s" % consumer_uuid
# New allocations should only include the destination...
expected_payload = {
'allocations': [
{
'resource_provider': {
'uuid': uuids.shared_storage,
},
'resources': {
'DISK_GB': 100,
},
},
{
'resource_provider': {
'uuid': uuids.destination,
},
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
},
},
],
}
expected_payload['project_id'] = project_id
expected_payload['user_id'] = user_id
# We have to pull the json body from the mock call_args to validate
# it separately otherwise hash seed issues get in the way.
actual_payload = self.ks_sess_mock.put.call_args[1]['json']
sort_by_uuid = lambda x: x['resource_provider']['uuid']
expected_allocations = sorted(expected_payload['allocations'],
key=sort_by_uuid)
actual_allocations = sorted(actual_payload['allocations'],
key=sort_by_uuid)
self.assertEqual(expected_allocations, actual_allocations)
self.ks_sess_mock.put.assert_called_once_with(
expected_url, endpoint_filter=mock.ANY,
headers={'OpenStack-API-Version': 'placement 1.10'},
json=mock.ANY, raise_exc=False)
self.assertTrue(res)
def test_remove_provider_from_inst_alloc_no_source(self):
"""Tests that if remove_provider_from_instance_allocation() fails to
find any allocations for the source host, it just returns True and
does not attempt to rewrite the allocation for the consumer.
"""
get_resp_mock = mock.Mock(status_code=200)
# Act like the allocations already did not include the source host for
# some reason
get_resp_mock.json.return_value = {
'allocations': {
uuids.shared_storage: {
'resource_provider_generation': 42,
'resources': {
'DISK_GB': 100,
},
},
uuids.destination: {
'resource_provider_generation': 42,
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
},
},
},
}
self.ks_sess_mock.get.return_value = get_resp_mock
consumer_uuid = uuids.consumer_uuid
project_id = uuids.project_id
user_id = uuids.user_id
res = self.client.remove_provider_from_instance_allocation(
consumer_uuid, uuids.source, user_id, project_id, mock.Mock())
self.ks_sess_mock.get.assert_called()
self.ks_sess_mock.put.assert_not_called()
self.assertTrue(res)
def test_remove_provider_from_inst_alloc_fail_get_allocs(self):
"""Tests that we gracefully exit with False from
remove_provider_from_instance_allocation() if the call to get the
existing allocations fails for some reason
"""
get_resp_mock = mock.Mock(status_code=500)
self.ks_sess_mock.get.return_value = get_resp_mock
consumer_uuid = uuids.consumer_uuid
project_id = uuids.project_id
user_id = uuids.user_id
res = self.client.remove_provider_from_instance_allocation(
consumer_uuid, uuids.source, user_id, project_id, mock.Mock())
self.ks_sess_mock.get.assert_called()
self.ks_sess_mock.put.assert_not_called()
self.assertFalse(res)
class TestProviderOperations(SchedulerReportClientTestCase):
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'