Merge "consumer gen: support claim_resources"
This commit is contained in:
commit
1c8b14d24c
|
@ -921,8 +921,23 @@ class ComputeTaskManager(base.Base):
|
|||
if host != instance.host:
|
||||
# If a destination host is forced for evacuate, create
|
||||
# allocations against it in Placement.
|
||||
self._allocate_for_evacuate_dest_host(
|
||||
context, instance, host, request_spec)
|
||||
try:
|
||||
self._allocate_for_evacuate_dest_host(
|
||||
context, instance, host, request_spec)
|
||||
except exception.AllocationUpdateFailed as ex:
|
||||
with excutils.save_and_reraise_exception():
|
||||
if migration:
|
||||
migration.status = 'error'
|
||||
migration.save()
|
||||
self._set_vm_state_and_notify(
|
||||
context,
|
||||
instance.uuid,
|
||||
'rebuild_server',
|
||||
{'vm_state': vm_states.ERROR,
|
||||
'task_state': None}, ex, request_spec)
|
||||
LOG.warning('Rebuild failed: %s',
|
||||
six.text_type(ex), instance=instance)
|
||||
|
||||
else:
|
||||
# At this point, the user is either:
|
||||
#
|
||||
|
@ -973,7 +988,8 @@ class ComputeTaskManager(base.Base):
|
|||
host, node, limits = (selection.service_host,
|
||||
selection.nodename, selection.limits)
|
||||
except (exception.NoValidHost,
|
||||
exception.UnsupportedPolicyException) as ex:
|
||||
exception.UnsupportedPolicyException,
|
||||
exception.AllocationUpdateFailed) as ex:
|
||||
if migration:
|
||||
migration.status = 'error'
|
||||
migration.save()
|
||||
|
|
|
@ -106,10 +106,17 @@ class LiveMigrationTask(base.TaskBase):
|
|||
# scheduler filters, which honors the 'force' flag in the API.
|
||||
# This raises NoValidHost which will be handled in
|
||||
# ComputeTaskManager.
|
||||
# NOTE(gibi): consumer_generation = None as we expect that the
|
||||
# source host allocation is held by the migration therefore the
|
||||
# instance is a new, empty consumer for the dest allocation. If
|
||||
# this assumption fails then placement will return consumer
|
||||
# generation conflict and this call raise a AllocationUpdateFailed
|
||||
# exception. We let that propagate here to abort the migration.
|
||||
scheduler_utils.claim_resources_on_destination(
|
||||
self.context, self.scheduler_client.reportclient,
|
||||
self.instance, source_node, dest_node,
|
||||
source_node_allocations=self._held_allocations)
|
||||
source_node_allocations=self._held_allocations,
|
||||
consumer_generation=None)
|
||||
|
||||
# dest_node is a ComputeNode object, so we need to get the actual
|
||||
# node name off it to set in the Migration object below.
|
||||
|
|
|
@ -350,7 +350,10 @@ class SchedulerReportClient(object):
|
|||
"Candidates are in either 'foo' or 'bar', but definitely in 'baz'"
|
||||
|
||||
"""
|
||||
version = GRANULAR_AC_VERSION
|
||||
# NOTE(gibi): claim_resources will use the this version as well so to
|
||||
# support consumer generations we need to do the allocation candidate
|
||||
# query with high enough version as well.
|
||||
version = CONSUMER_GENERATION_VERSION
|
||||
qparams = resources.to_querystring()
|
||||
url = "/allocation_candidates?%s" % qparams
|
||||
resp = self.get(url, version=version,
|
||||
|
@ -1722,6 +1725,14 @@ class SchedulerReportClient(object):
|
|||
|
||||
def get_allocations_for_consumer_by_provider(self, context, rp_uuid,
|
||||
consumer):
|
||||
"""Return allocations for a consumer and a resource provider.
|
||||
|
||||
:param context: The nova.context.RequestContext auth context
|
||||
:param rp_uuid: UUID of the resource provider
|
||||
:param consumer: UUID of the consumer
|
||||
:return: the resources dict of the consumer's allocation keyed by
|
||||
resource classes
|
||||
"""
|
||||
# NOTE(cdent): This trims to just the allocations being
|
||||
# used on this resource provider. In the future when there
|
||||
# are shared resources there might be other providers.
|
||||
|
@ -1743,7 +1754,8 @@ class SchedulerReportClient(object):
|
|||
@safe_connect
|
||||
@retries
|
||||
def claim_resources(self, context, consumer_uuid, alloc_request,
|
||||
project_id, user_id, allocation_request_version=None):
|
||||
project_id, user_id, allocation_request_version,
|
||||
consumer_generation=None):
|
||||
"""Creates allocation records for the supplied instance UUID against
|
||||
the supplied resource providers.
|
||||
|
||||
|
@ -1766,31 +1778,17 @@ class SchedulerReportClient(object):
|
|||
:param user_id: The user_id associated with the allocations.
|
||||
:param allocation_request_version: The microversion used to request the
|
||||
allocations.
|
||||
:param consumer_generation: The expected generation of the consumer.
|
||||
None if a new consumer is expected
|
||||
:returns: True if the allocations were created, False otherwise.
|
||||
:raise AllocationUpdateFailed: If consumer_generation in the
|
||||
alloc_request does not match with the
|
||||
placement view.
|
||||
"""
|
||||
# Older clients might not send the allocation_request_version, so
|
||||
# default to 1.10.
|
||||
# TODO(alex_xu): In the rocky, all the client should send the
|
||||
# allocation_request_version. So remove this default value.
|
||||
allocation_request_version = allocation_request_version or '1.10'
|
||||
# Ensure we don't change the supplied alloc request since it's used in
|
||||
# a loop within the scheduler against multiple instance claims
|
||||
ar = copy.deepcopy(alloc_request)
|
||||
|
||||
# If the allocation_request_version less than 1.12, then convert the
|
||||
# allocation array format to the dict format. This conversion can be
|
||||
# remove in Rocky release.
|
||||
if versionutils.convert_version_to_tuple(
|
||||
allocation_request_version) < (1, 12):
|
||||
ar = {
|
||||
'allocations': {
|
||||
alloc['resource_provider']['uuid']: {
|
||||
'resources': alloc['resources']
|
||||
} for alloc in ar['allocations']
|
||||
}
|
||||
}
|
||||
allocation_request_version = '1.12'
|
||||
|
||||
url = '/allocations/%s' % consumer_uuid
|
||||
|
||||
payload = ar
|
||||
|
@ -1798,20 +1796,65 @@ class SchedulerReportClient(object):
|
|||
# We first need to determine if this is a move operation and if so
|
||||
# create the "doubled-up" allocation that exists for the duration of
|
||||
# the move operation against both the source and destination hosts
|
||||
r = self.get(url, global_request_id=context.global_id)
|
||||
r = self.get(url, global_request_id=context.global_id,
|
||||
version=CONSUMER_GENERATION_VERSION)
|
||||
if r.status_code == 200:
|
||||
current_allocs = r.json()['allocations']
|
||||
body = r.json()
|
||||
current_allocs = body['allocations']
|
||||
if current_allocs:
|
||||
if 'consumer_generation' not in ar:
|
||||
# this is non-forced evacuation. Evacuation does not use
|
||||
# the migration.uuid to hold the source host allocation
|
||||
# therefore when the scheduler calls claim_resources() then
|
||||
# the two allocations need to be combined. Scheduler does
|
||||
# not know that this is not a new consumer as it only sees
|
||||
# allocation candidates.
|
||||
# Therefore we need to use the consumer generation from
|
||||
# the above GET.
|
||||
# If between the GET and the PUT the consumer generation
|
||||
# changes in placement then we raise
|
||||
# AllocationUpdateFailed.
|
||||
# NOTE(gibi): This only detect a small portion of possible
|
||||
# cases when allocation is modified outside of the this
|
||||
# code path. The rest can only be detected if nova would
|
||||
# cache at least the consumer generation of the instance.
|
||||
consumer_generation = body['consumer_generation']
|
||||
else:
|
||||
# this is forced evacuation and the caller
|
||||
# claim_resources_on_destination() provides the consumer
|
||||
# generation it sees in the conductor when it generates the
|
||||
# request.
|
||||
consumer_generation = ar['consumer_generation']
|
||||
payload = _move_operation_alloc_request(current_allocs, ar)
|
||||
|
||||
payload['project_id'] = project_id
|
||||
payload['user_id'] = user_id
|
||||
|
||||
if (versionutils.convert_version_to_tuple(
|
||||
allocation_request_version) >=
|
||||
versionutils.convert_version_to_tuple(
|
||||
CONSUMER_GENERATION_VERSION)):
|
||||
payload['consumer_generation'] = consumer_generation
|
||||
|
||||
r = self.put(url, payload, version=allocation_request_version,
|
||||
global_request_id=context.global_id)
|
||||
if r.status_code != 204:
|
||||
# NOTE(jaypipes): Yes, it sucks doing string comparison like this
|
||||
# but we have no error codes, only error messages.
|
||||
if 'concurrently updated' in r.text:
|
||||
err = r.json()['errors'][0]
|
||||
if err['code'] == 'placement.concurrent_update':
|
||||
# NOTE(jaypipes): Yes, it sucks doing string comparison like
|
||||
# this but we have no error codes, only error messages.
|
||||
# TODO(gibi): Use more granular error codes when available
|
||||
if 'consumer generation conflict' in err['detail']:
|
||||
reason = ('another process changed the consumer %s after '
|
||||
'the report client read the consumer state '
|
||||
'during the claim ' % consumer_uuid)
|
||||
raise exception.AllocationUpdateFailed(
|
||||
consumer_uuid=consumer_uuid, error=reason)
|
||||
|
||||
# this is not a consumer generation conflict so it can only be
|
||||
# a resource provider generation conflict. The caller does not
|
||||
# provide resource provider generation so this is just a
|
||||
# placement internal race. We can blindly retry locally.
|
||||
reason = ('another process changed the resource providers '
|
||||
'involved in our attempt to put allocations for '
|
||||
'consumer %s' % consumer_uuid)
|
||||
|
|
|
@ -473,7 +473,7 @@ def resources_from_request_spec(spec_obj):
|
|||
# some sort of skip_filters flag.
|
||||
def claim_resources_on_destination(
|
||||
context, reportclient, instance, source_node, dest_node,
|
||||
source_node_allocations=None):
|
||||
source_node_allocations=None, consumer_generation=None):
|
||||
"""Copies allocations from source node to dest node in Placement
|
||||
|
||||
Normally the scheduler will allocate resources on a chosen destination
|
||||
|
@ -492,21 +492,50 @@ def claim_resources_on_destination(
|
|||
lives
|
||||
:param dest_node: destination ComputeNode where the instance is being
|
||||
moved
|
||||
:param source_node_allocations: The consumer's current allocation on the
|
||||
source compute
|
||||
:param consumer_generation: The expected generation of the consumer.
|
||||
None if a new consumer is expected
|
||||
:raises NoValidHost: If the allocation claim on the destination
|
||||
node fails.
|
||||
:raises: keystoneauth1.exceptions.base.ClientException on failure to
|
||||
communicate with the placement API
|
||||
:raises: ConsumerAllocationRetrievalFailed if the placement API call fails
|
||||
:raises: AllocationUpdateFailed: If a parallel consumer update changed the
|
||||
consumer
|
||||
"""
|
||||
# Get the current allocations for the source node and the instance.
|
||||
if not source_node_allocations:
|
||||
source_node_allocations = (
|
||||
reportclient.get_allocations_for_consumer_by_provider(
|
||||
context, source_node.uuid, instance.uuid))
|
||||
# NOTE(gibi): This is the forced evacuate case where the caller did not
|
||||
# provide any allocation request. So we ask placement here for the
|
||||
# current allocation and consumer generation and use that for the new
|
||||
# allocation on the dest_node. If the allocation fails due to consumer
|
||||
# generation conflict then the claim will raise and the operation will
|
||||
# be aborted.
|
||||
# NOTE(gibi): This only detect a small portion of possible
|
||||
# cases when allocation is modified outside of the this
|
||||
# code path. The rest can only be detected if nova would
|
||||
# cache at least the consumer generation of the instance.
|
||||
allocations = reportclient.get_allocs_for_consumer(
|
||||
context, instance.uuid)
|
||||
source_node_allocations = allocations.get(
|
||||
'allocations', {}).get(source_node.uuid, {}).get('resources')
|
||||
consumer_generation = allocations.get('consumer_generation')
|
||||
else:
|
||||
# NOTE(gibi) This is the live migrate case. The caller provided the
|
||||
# allocation that needs to be used on the dest_node along with the
|
||||
# expected consumer_generation of the consumer (which is the instance).
|
||||
pass
|
||||
|
||||
if source_node_allocations:
|
||||
# Generate an allocation request for the destination node.
|
||||
alloc_request = {
|
||||
'allocations': {
|
||||
dest_node.uuid: {'resources': source_node_allocations}
|
||||
}
|
||||
},
|
||||
}
|
||||
# import locally to avoid cyclic import
|
||||
from nova.scheduler.client import report
|
||||
# The claim_resources method will check for existing allocations
|
||||
# for the instance and effectively "double up" the allocations for
|
||||
# both the source and destination node. That's why when requesting
|
||||
|
@ -515,7 +544,8 @@ def claim_resources_on_destination(
|
|||
if reportclient.claim_resources(
|
||||
context, instance.uuid, alloc_request,
|
||||
instance.project_id, instance.user_id,
|
||||
allocation_request_version='1.12'):
|
||||
allocation_request_version=report.CONSUMER_GENERATION_VERSION,
|
||||
consumer_generation=consumer_generation):
|
||||
LOG.debug('Instance allocations successfully created on '
|
||||
'destination node %(dest)s: %(alloc_request)s',
|
||||
{'dest': dest_node.uuid,
|
||||
|
@ -974,8 +1004,17 @@ def claim_resources(ctx, client, spec_obj, instance_uuid, alloc_req,
|
|||
# the spec object?
|
||||
user_id = ctx.user_id
|
||||
|
||||
# NOTE(gibi): this could raise AllocationUpdateFailed which means there is
|
||||
# a serious issue with the instance_uuid as a consumer. Every caller of
|
||||
# utils.claim_resources() assumes that instance_uuid will be a new consumer
|
||||
# and therefore we passing None as expected consumer_generation to
|
||||
# reportclient.claim_resources() here. If the claim fails
|
||||
# due to consumer generation conflict, which in this case means the
|
||||
# consumer is not new, then we let the AllocationUpdateFailed propagate and
|
||||
# fail the build / migrate as the instance is in inconsistent state.
|
||||
return client.claim_resources(ctx, instance_uuid, alloc_req, project_id,
|
||||
user_id, allocation_request_version=allocation_request_version)
|
||||
user_id, allocation_request_version=allocation_request_version,
|
||||
consumer_generation=None)
|
||||
|
||||
|
||||
def remove_allocation_from_compute(context, instance, compute_node_uuid,
|
||||
|
|
|
@ -4451,6 +4451,85 @@ class ConsumerGenerationConflictTest(
|
|||
self.compute1 = self._start_compute('compute1')
|
||||
self.compute2 = self._start_compute('compute2')
|
||||
|
||||
def test_create_server_fails_as_placement_reports_consumer_conflict(self):
|
||||
server_req = self._build_minimal_create_server_request(
|
||||
self.api, 'some-server', flavor_id=self.flavor['id'],
|
||||
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
|
||||
networks='none')
|
||||
|
||||
# We cannot pre-create a consumer with the uuid of the instance created
|
||||
# below as that uuid is generated. Instead we have to simulate that
|
||||
# Placement returns 409, consumer generation conflict for the PUT
|
||||
# /allocation request the scheduler does for the instance.
|
||||
with mock.patch('keystoneauth1.adapter.Adapter.put') as mock_put:
|
||||
rsp = fake_requests.FakeResponse(
|
||||
409,
|
||||
jsonutils.dumps(
|
||||
{'errors': [
|
||||
{'code': 'placement.concurrent_update',
|
||||
'detail': 'consumer generation conflict'}]}))
|
||||
mock_put.return_value = rsp
|
||||
|
||||
created_server = self.api.post_server({'server': server_req})
|
||||
server = self._wait_for_state_change(
|
||||
self.admin_api, created_server, 'ERROR')
|
||||
|
||||
# This is not a conflict that the API user can ever resolve. It is a
|
||||
# serious inconsistency in our database or a bug in the scheduler code
|
||||
# doing the claim.
|
||||
self.assertEqual(500, server['fault']['code'])
|
||||
self.assertIn('Failed to update allocations for consumer',
|
||||
server['fault']['message'])
|
||||
|
||||
allocations = self._get_allocations_by_server_uuid(server['id'])
|
||||
self.assertEqual(0, len(allocations))
|
||||
|
||||
self._delete_and_check_allocations(server)
|
||||
|
||||
def test_migrate_claim_on_dest_fails(self):
|
||||
source_hostname = self.compute1.host
|
||||
source_rp_uuid = self._get_provider_uuid_by_host(source_hostname)
|
||||
|
||||
server = self._boot_and_check_allocations(self.flavor, source_hostname)
|
||||
|
||||
# We have to simulate that Placement returns 409, consumer generation
|
||||
# conflict for the PUT /allocation request the scheduler does on the
|
||||
# destination host for the instance.
|
||||
with mock.patch('keystoneauth1.adapter.Adapter.put') as mock_put:
|
||||
rsp = fake_requests.FakeResponse(
|
||||
409,
|
||||
jsonutils.dumps(
|
||||
{'errors': [
|
||||
{'code': 'placement.concurrent_update',
|
||||
'detail': 'consumer generation conflict'}]}))
|
||||
mock_put.return_value = rsp
|
||||
|
||||
request = {'migrate': None}
|
||||
exception = self.assertRaises(client.OpenStackApiException,
|
||||
self.api.post_server_action,
|
||||
server['id'], request)
|
||||
|
||||
# I know that HTTP 500 is harsh code but I think this conflict case
|
||||
# signals either a serious db inconsistency or a bug in nova's
|
||||
# claim code.
|
||||
self.assertEqual(500, exception.response.status_code)
|
||||
|
||||
# The migration is aborted so the instance is ACTIVE on the source
|
||||
# host instead of being in VERIFY_RESIZE state.
|
||||
server = self.api.get_server(server['id'])
|
||||
self.assertEqual('ACTIVE', server['status'])
|
||||
self.assertEqual(source_hostname, server['OS-EXT-SRV-ATTR:host'])
|
||||
|
||||
source_usages = self._get_provider_usages(source_rp_uuid)
|
||||
self.assertFlavorMatchesAllocation(self.flavor, source_usages)
|
||||
|
||||
allocations = self._get_allocations_by_server_uuid(server['id'])
|
||||
self.assertEqual(1, len(allocations))
|
||||
alloc = allocations[source_rp_uuid]['resources']
|
||||
self.assertFlavorMatchesAllocation(self.flavor, alloc)
|
||||
|
||||
self._delete_and_check_allocations(server)
|
||||
|
||||
def test_migrate_move_allocation_fails_due_to_conflict(self):
|
||||
source_hostname = self.compute1.host
|
||||
source_rp_uuid = self._get_provider_uuid_by_host(source_hostname)
|
||||
|
@ -4655,6 +4734,48 @@ class ConsumerGenerationConflictTest(
|
|||
migrations[0]['uuid'])
|
||||
self.assertEqual(1, len(allocations))
|
||||
|
||||
def test_force_live_migrate_claim_on_dest_fails(self):
|
||||
# Normal live migrate moves source allocation from instance to
|
||||
# migration like a normal migrate tested above.
|
||||
# Normal live migrate claims on dest like a normal boot tested above.
|
||||
source_hostname = self.compute1.host
|
||||
dest_hostname = self.compute2.host
|
||||
|
||||
server = self._boot_and_check_allocations(
|
||||
self.flavor, source_hostname)
|
||||
|
||||
rsp = fake_requests.FakeResponse(
|
||||
409,
|
||||
jsonutils.dumps(
|
||||
{'errors': [
|
||||
{'code': 'placement.concurrent_update',
|
||||
'detail': 'consumer generation conflict'}]}))
|
||||
with mock.patch('keystoneauth1.adapter.Adapter.put',
|
||||
autospec=True) as mock_put:
|
||||
mock_put.return_value = rsp
|
||||
|
||||
post = {
|
||||
'os-migrateLive': {
|
||||
'host': dest_hostname,
|
||||
'block_migration': True,
|
||||
'force': True,
|
||||
}
|
||||
}
|
||||
|
||||
self.api.post_server_action(server['id'], post)
|
||||
server = self._wait_for_state_change(self.api, server, 'ERROR')
|
||||
|
||||
self.assertEqual(1, mock_put.call_count)
|
||||
|
||||
# This is not a conflict that the API user can ever resolve. It is a
|
||||
# serious inconsistency in our database or a bug in the scheduler code
|
||||
# doing the claim.
|
||||
self.assertEqual(500, server['fault']['code'])
|
||||
# The instance is in ERROR state so the allocations are in limbo but
|
||||
# at least we expect that when the instance is deleted the allocations
|
||||
# are cleaned up properly.
|
||||
self._delete_and_check_allocations(server)
|
||||
|
||||
def test_live_migrate_drop_allocation_on_source_fails(self):
|
||||
source_hostname = self.compute1.host
|
||||
dest_hostname = self.compute2.host
|
||||
|
@ -4749,6 +4870,70 @@ class ConsumerGenerationConflictTest(
|
|||
allocations = self._get_allocations_by_server_uuid(migration_uuid)
|
||||
self.assertEqual(1, len(allocations))
|
||||
|
||||
def _test_evacuate_fails_allocating_on_dest_host(self, force):
|
||||
source_hostname = self.compute1.host
|
||||
dest_hostname = self.compute2.host
|
||||
|
||||
server = self._boot_and_check_allocations(
|
||||
self.flavor, source_hostname)
|
||||
|
||||
source_compute_id = self.admin_api.get_services(
|
||||
host=source_hostname, binary='nova-compute')[0]['id']
|
||||
|
||||
self.compute1.stop()
|
||||
# force it down to avoid waiting for the service group to time out
|
||||
self.admin_api.put_service(
|
||||
source_compute_id, {'forced_down': 'true'})
|
||||
|
||||
rsp = fake_requests.FakeResponse(
|
||||
409,
|
||||
jsonutils.dumps(
|
||||
{'errors': [
|
||||
{'code': 'placement.concurrent_update',
|
||||
'detail': 'consumer generation conflict'}]}))
|
||||
|
||||
with mock.patch('keystoneauth1.adapter.Adapter.put',
|
||||
autospec=True) as mock_put:
|
||||
mock_put.return_value = rsp
|
||||
post = {
|
||||
'evacuate': {
|
||||
'force': force
|
||||
}
|
||||
}
|
||||
if force:
|
||||
post['evacuate']['host'] = dest_hostname
|
||||
|
||||
self.api.post_server_action(server['id'], post)
|
||||
server = self._wait_for_state_change(self.api, server, 'ERROR')
|
||||
|
||||
self.assertEqual(1, mock_put.call_count)
|
||||
|
||||
# As nova failed to allocate on the dest host we only expect allocation
|
||||
# on the source
|
||||
source_rp_uuid = self._get_provider_uuid_by_host(source_hostname)
|
||||
dest_rp_uuid = self._get_provider_uuid_by_host(dest_hostname)
|
||||
|
||||
source_usages = self._get_provider_usages(source_rp_uuid)
|
||||
self.assertFlavorMatchesAllocation(self.flavor, source_usages)
|
||||
|
||||
dest_usages = self._get_provider_usages(dest_rp_uuid)
|
||||
self.assertEqual({'VCPU': 0,
|
||||
'MEMORY_MB': 0,
|
||||
'DISK_GB': 0}, dest_usages)
|
||||
|
||||
allocations = self._get_allocations_by_server_uuid(server['id'])
|
||||
self.assertEqual(1, len(allocations))
|
||||
source_allocation = allocations[source_rp_uuid]['resources']
|
||||
self.assertFlavorMatchesAllocation(self.flavor, source_allocation)
|
||||
|
||||
self._delete_and_check_allocations(server)
|
||||
|
||||
def test_force_evacuate_fails_allocating_on_dest_host(self):
|
||||
self._test_evacuate_fails_allocating_on_dest_host(force=True)
|
||||
|
||||
def test_evacuate_fails_allocating_on_dest_host(self):
|
||||
self._test_evacuate_fails_allocating_on_dest_host(force=False)
|
||||
|
||||
def test_server_delete_fails_due_to_conflict(self):
|
||||
source_hostname = self.compute1.host
|
||||
|
||||
|
|
|
@ -105,7 +105,7 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
|
|||
mock_claim.assert_called_once_with(
|
||||
self.context, self.task.scheduler_client.reportclient,
|
||||
self.instance, mock.sentinel.source_node, dest_node,
|
||||
source_node_allocations=allocs)
|
||||
source_node_allocations=allocs, consumer_generation=None)
|
||||
mock_mig.assert_called_once_with(
|
||||
self.context,
|
||||
host=self.instance_host,
|
||||
|
|
|
@ -375,51 +375,6 @@ class TestPutAllocations(SchedulerReportClientTestCase):
|
|||
mock.call(expected_url, mock.ANY, version='1.28',
|
||||
global_request_id=self.context.global_id)] * 3)
|
||||
|
||||
def test_claim_resources_success_with_old_version(self):
|
||||
get_resp_mock = mock.Mock(status_code=200)
|
||||
get_resp_mock.json.return_value = {
|
||||
'allocations': {}, # build instance, not move
|
||||
}
|
||||
self.ks_adap_mock.get.return_value = get_resp_mock
|
||||
resp_mock = mock.Mock(status_code=204)
|
||||
self.ks_adap_mock.put.return_value = resp_mock
|
||||
consumer_uuid = uuids.consumer_uuid
|
||||
alloc_req = {
|
||||
'allocations': [
|
||||
{
|
||||
'resource_provider': {
|
||||
'uuid': uuids.cn1
|
||||
},
|
||||
'resources': {
|
||||
'VCPU': 1,
|
||||
'MEMORY_MB': 1024,
|
||||
}
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
project_id = uuids.project_id
|
||||
user_id = uuids.user_id
|
||||
res = self.client.claim_resources(
|
||||
self.context, consumer_uuid, alloc_req, project_id, user_id)
|
||||
|
||||
expected_url = "/allocations/%s" % consumer_uuid
|
||||
expected_payload = {
|
||||
'allocations': {
|
||||
alloc['resource_provider']['uuid']: {
|
||||
'resources': alloc['resources']
|
||||
}
|
||||
for alloc in alloc_req['allocations']
|
||||
}
|
||||
}
|
||||
expected_payload['project_id'] = project_id
|
||||
expected_payload['user_id'] = user_id
|
||||
self.ks_adap_mock.put.assert_called_once_with(
|
||||
expected_url, microversion='1.12', json=expected_payload,
|
||||
headers={'X-Openstack-Request-Id': self.context.global_id})
|
||||
|
||||
self.assertTrue(res)
|
||||
|
||||
def test_claim_resources_success(self):
|
||||
get_resp_mock = mock.Mock(status_code=200)
|
||||
get_resp_mock.json.return_value = {
|
||||
|
@ -458,35 +413,24 @@ class TestPutAllocations(SchedulerReportClientTestCase):
|
|||
|
||||
self.assertTrue(res)
|
||||
|
||||
def test_claim_resources_success_move_operation_no_shared(self):
|
||||
"""Tests that when a move operation is detected (existing allocations
|
||||
for the same instance UUID) that we end up constructing an appropriate
|
||||
allocation that contains the original resources on the source host
|
||||
as well as the resources on the destination host.
|
||||
def test_claim_resources_older_alloc_req(self):
|
||||
"""Test the case when a stale allocation request is sent to the report
|
||||
client to claim
|
||||
"""
|
||||
get_resp_mock = mock.Mock(status_code=200)
|
||||
get_resp_mock.json.return_value = {
|
||||
'allocations': {
|
||||
uuids.source: {
|
||||
'resource_provider_generation': 42,
|
||||
'resources': {
|
||||
'VCPU': 1,
|
||||
'MEMORY_MB': 1024,
|
||||
},
|
||||
},
|
||||
},
|
||||
'allocations': {}, # build instance, not move
|
||||
}
|
||||
|
||||
self.ks_adap_mock.get.return_value = get_resp_mock
|
||||
resp_mock = mock.Mock(status_code=204)
|
||||
self.ks_adap_mock.put.return_value = resp_mock
|
||||
consumer_uuid = uuids.consumer_uuid
|
||||
alloc_req = {
|
||||
'allocations': {
|
||||
uuids.destination: {
|
||||
uuids.cn1: {
|
||||
'resources': {
|
||||
'VCPU': 1,
|
||||
'MEMORY_MB': 1024
|
||||
'MEMORY_MB': 1024,
|
||||
}
|
||||
},
|
||||
},
|
||||
|
@ -499,147 +443,32 @@ class TestPutAllocations(SchedulerReportClientTestCase):
|
|||
allocation_request_version='1.12')
|
||||
|
||||
expected_url = "/allocations/%s" % consumer_uuid
|
||||
# New allocation should include resources claimed on both the source
|
||||
# and destination hosts
|
||||
expected_payload = {
|
||||
'allocations': {
|
||||
uuids.source: {
|
||||
'resources': {
|
||||
'VCPU': 1,
|
||||
'MEMORY_MB': 1024
|
||||
}
|
||||
},
|
||||
uuids.destination: {
|
||||
'resources': {
|
||||
'VCPU': 1,
|
||||
'MEMORY_MB': 1024
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
expected_payload['project_id'] = project_id
|
||||
expected_payload['user_id'] = user_id
|
||||
rp_uuid: res
|
||||
for rp_uuid, res in alloc_req['allocations'].items()},
|
||||
# no consumer generation in the payload as the caller requested
|
||||
# older microversion to be used
|
||||
'project_id': project_id,
|
||||
'user_id': user_id}
|
||||
self.ks_adap_mock.put.assert_called_once_with(
|
||||
expected_url, microversion='1.12', json=mock.ANY,
|
||||
expected_url, microversion='1.12', json=expected_payload,
|
||||
headers={'X-Openstack-Request-Id': self.context.global_id})
|
||||
# We have to pull the json body from the mock call_args to validate
|
||||
# it separately otherwise hash seed issues get in the way.
|
||||
actual_payload = self.ks_adap_mock.put.call_args[1]['json']
|
||||
self.assertEqual(expected_payload, actual_payload)
|
||||
|
||||
self.assertTrue(res)
|
||||
|
||||
def test_claim_resources_success_move_operation_with_shared(self):
|
||||
"""Tests that when a move operation is detected (existing allocations
|
||||
for the same instance UUID) that we end up constructing an appropriate
|
||||
allocation that contains the original resources on the source host
|
||||
as well as the resources on the destination host but that when a shared
|
||||
storage provider is claimed against in both the original allocation as
|
||||
well as the new allocation request, we don't double that allocation
|
||||
resource request up.
|
||||
"""
|
||||
get_resp_mock = mock.Mock(status_code=200)
|
||||
get_resp_mock.json.return_value = {
|
||||
'allocations': {
|
||||
uuids.source: {
|
||||
'resource_provider_generation': 42,
|
||||
'resources': {
|
||||
'VCPU': 1,
|
||||
'MEMORY_MB': 1024,
|
||||
},
|
||||
},
|
||||
uuids.shared_storage: {
|
||||
'resource_provider_generation': 42,
|
||||
'resources': {
|
||||
'DISK_GB': 100,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
self.ks_adap_mock.get.return_value = get_resp_mock
|
||||
resp_mock = mock.Mock(status_code=204)
|
||||
self.ks_adap_mock.put.return_value = resp_mock
|
||||
consumer_uuid = uuids.consumer_uuid
|
||||
alloc_req = {
|
||||
'allocations': {
|
||||
uuids.destination: {
|
||||
'resources': {
|
||||
'VCPU': 1,
|
||||
'MEMORY_MB': 1024,
|
||||
}
|
||||
},
|
||||
uuids.shared_storage: {
|
||||
'resources': {
|
||||
'DISK_GB': 100,
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
project_id = uuids.project_id
|
||||
user_id = uuids.user_id
|
||||
res = self.client.claim_resources(self.context, consumer_uuid,
|
||||
alloc_req, project_id, user_id,
|
||||
allocation_request_version='1.12')
|
||||
|
||||
expected_url = "/allocations/%s" % consumer_uuid
|
||||
# New allocation should include resources claimed on both the source
|
||||
# and destination hosts but not have a doubled-up request for the disk
|
||||
# resources on the shared provider
|
||||
expected_payload = {
|
||||
'allocations': {
|
||||
uuids.source: {
|
||||
'resources': {
|
||||
'VCPU': 1,
|
||||
'MEMORY_MB': 1024
|
||||
}
|
||||
},
|
||||
uuids.shared_storage: {
|
||||
'resources': {
|
||||
'DISK_GB': 100
|
||||
}
|
||||
},
|
||||
uuids.destination: {
|
||||
'resources': {
|
||||
'VCPU': 1,
|
||||
'MEMORY_MB': 1024
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
expected_payload['project_id'] = project_id
|
||||
expected_payload['user_id'] = user_id
|
||||
self.ks_adap_mock.put.assert_called_once_with(
|
||||
expected_url, microversion='1.12', json=mock.ANY,
|
||||
headers={'X-Openstack-Request-Id': self.context.global_id})
|
||||
# We have to pull the allocations from the json body from the
|
||||
# mock call_args to validate it separately otherwise hash seed
|
||||
# issues get in the way.
|
||||
actual_payload = self.ks_adap_mock.put.call_args[1]['json']
|
||||
self.assertEqual(expected_payload, actual_payload)
|
||||
|
||||
self.assertTrue(res)
|
||||
|
||||
def test_claim_resources_success_resize_to_same_host_no_shared(self):
|
||||
"""Tests that when a resize to the same host operation is detected
|
||||
(existing allocations for the same instance UUID and same resource
|
||||
provider) that we end up constructing an appropriate allocation that
|
||||
contains the original resources on the source host as well as the
|
||||
resources on the destination host, which in this case are the same.
|
||||
"""Tests resize to the same host operation. In this case allocation
|
||||
exists against the same host RP but with the migration_uuid.
|
||||
"""
|
||||
get_current_allocations_resp_mock = mock.Mock(status_code=200)
|
||||
# source host allocation held by the migration_uuid so it is not
|
||||
# not returned to the claim code as that asks for the instance_uuid
|
||||
# consumer
|
||||
get_current_allocations_resp_mock.json.return_value = {
|
||||
'allocations': {
|
||||
uuids.same_host: {
|
||||
'resource_provider_generation': 42,
|
||||
'resources': {
|
||||
'VCPU': 1,
|
||||
'MEMORY_MB': 1024,
|
||||
'DISK_GB': 20
|
||||
},
|
||||
},
|
||||
},
|
||||
'allocations': {},
|
||||
"consumer_generation": 1,
|
||||
"project_id": uuids.project_id,
|
||||
"user_id": uuids.user_id
|
||||
}
|
||||
|
||||
self.ks_adap_mock.get.return_value = get_current_allocations_resp_mock
|
||||
|
@ -648,8 +477,7 @@ class TestPutAllocations(SchedulerReportClientTestCase):
|
|||
consumer_uuid = uuids.consumer_uuid
|
||||
# This is the resize-up allocation where VCPU, MEMORY_MB and DISK_GB
|
||||
# are all being increased but on the same host. We also throw a custom
|
||||
# resource class in the new allocation to make sure it's not lost and
|
||||
# that we don't have a KeyError when merging the allocations.
|
||||
# resource class in the new allocation to make sure it's not lost
|
||||
alloc_req = {
|
||||
'allocations': {
|
||||
uuids.same_host: {
|
||||
|
@ -661,33 +489,36 @@ class TestPutAllocations(SchedulerReportClientTestCase):
|
|||
}
|
||||
},
|
||||
},
|
||||
# this allocation request comes from the scheduler therefore it
|
||||
# does not have consumer_generation in it.
|
||||
"project_id": uuids.project_id,
|
||||
"user_id": uuids.user_id
|
||||
}
|
||||
|
||||
project_id = uuids.project_id
|
||||
user_id = uuids.user_id
|
||||
res = self.client.claim_resources(self.context, consumer_uuid,
|
||||
alloc_req, project_id, user_id,
|
||||
allocation_request_version='1.12')
|
||||
allocation_request_version='1.28')
|
||||
|
||||
expected_url = "/allocations/%s" % consumer_uuid
|
||||
# New allocation should include doubled resources claimed on the same
|
||||
# host.
|
||||
expected_payload = {
|
||||
'allocations': {
|
||||
uuids.same_host: {
|
||||
'resources': {
|
||||
'VCPU': 3,
|
||||
'MEMORY_MB': 3072,
|
||||
'DISK_GB': 60,
|
||||
'VCPU': 2,
|
||||
'MEMORY_MB': 2048,
|
||||
'DISK_GB': 40,
|
||||
'CUSTOM_FOO': 1
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
expected_payload['project_id'] = project_id
|
||||
expected_payload['user_id'] = user_id
|
||||
# report client assumes a new consumer in this case
|
||||
'consumer_generation': None,
|
||||
'project_id': project_id,
|
||||
'user_id': user_id}
|
||||
self.ks_adap_mock.put.assert_called_once_with(
|
||||
expected_url, microversion='1.12', json=mock.ANY,
|
||||
expected_url, microversion='1.28', json=mock.ANY,
|
||||
headers={'X-Openstack-Request-Id': self.context.global_id})
|
||||
# We have to pull the json body from the mock call_args to validate
|
||||
# it separately otherwise hash seed issues get in the way.
|
||||
|
@ -697,31 +528,19 @@ class TestPutAllocations(SchedulerReportClientTestCase):
|
|||
self.assertTrue(res)
|
||||
|
||||
def test_claim_resources_success_resize_to_same_host_with_shared(self):
|
||||
"""Tests that when a resize to the same host operation is detected
|
||||
(existing allocations for the same instance UUID and same resource
|
||||
provider) that we end up constructing an appropriate allocation that
|
||||
contains the original resources on the source host as well as the
|
||||
resources on the destination host, which in this case are the same.
|
||||
This test adds the fun wrinkle of throwing a shared storage provider
|
||||
in the mix when doing resize to the same host.
|
||||
"""Tests resize to the same host operation. In this case allocation
|
||||
exists against the same host RP and the shared RP but with the
|
||||
migration_uuid.
|
||||
"""
|
||||
get_current_allocations_resp_mock = mock.Mock(status_code=200)
|
||||
# source host allocation held by the migration_uuid so it is not
|
||||
# not returned to the claim code as that asks for the instance_uuid
|
||||
# consumer
|
||||
get_current_allocations_resp_mock.json.return_value = {
|
||||
'allocations': {
|
||||
uuids.same_host: {
|
||||
'resource_provider_generation': 42,
|
||||
'resources': {
|
||||
'VCPU': 1,
|
||||
'MEMORY_MB': 1024
|
||||
},
|
||||
},
|
||||
uuids.shared_storage: {
|
||||
'resource_provider_generation': 42,
|
||||
'resources': {
|
||||
'DISK_GB': 20,
|
||||
},
|
||||
},
|
||||
},
|
||||
'allocations': {},
|
||||
"consumer_generation": 1,
|
||||
"project_id": uuids.project_id,
|
||||
"user_id": uuids.user_id
|
||||
}
|
||||
|
||||
self.ks_adap_mock.get.return_value = get_current_allocations_resp_mock
|
||||
|
@ -729,13 +548,15 @@ class TestPutAllocations(SchedulerReportClientTestCase):
|
|||
self.ks_adap_mock.put.return_value = put_allocations_resp_mock
|
||||
consumer_uuid = uuids.consumer_uuid
|
||||
# This is the resize-up allocation where VCPU, MEMORY_MB and DISK_GB
|
||||
# are all being increased but DISK_GB is on a shared storage provider.
|
||||
# are all being increased but on the same host. We also throw a custom
|
||||
# resource class in the new allocation to make sure it's not lost
|
||||
alloc_req = {
|
||||
'allocations': {
|
||||
uuids.same_host: {
|
||||
'resources': {
|
||||
'VCPU': 2,
|
||||
'MEMORY_MB': 2048
|
||||
'MEMORY_MB': 2048,
|
||||
'CUSTOM_FOO': 1
|
||||
}
|
||||
},
|
||||
uuids.shared_storage: {
|
||||
|
@ -744,36 +565,40 @@ class TestPutAllocations(SchedulerReportClientTestCase):
|
|||
}
|
||||
},
|
||||
},
|
||||
# this allocation request comes from the scheduler therefore it
|
||||
# does not have consumer_generation in it.
|
||||
"project_id": uuids.project_id,
|
||||
"user_id": uuids.user_id
|
||||
}
|
||||
|
||||
project_id = uuids.project_id
|
||||
user_id = uuids.user_id
|
||||
res = self.client.claim_resources(self.context, consumer_uuid,
|
||||
alloc_req, project_id, user_id,
|
||||
allocation_request_version='1.12')
|
||||
allocation_request_version='1.28')
|
||||
|
||||
expected_url = "/allocations/%s" % consumer_uuid
|
||||
# New allocation should include doubled resources claimed on the same
|
||||
# host.
|
||||
expected_payload = {
|
||||
'allocations': {
|
||||
uuids.same_host: {
|
||||
'resources': {
|
||||
'VCPU': 3,
|
||||
'MEMORY_MB': 3072
|
||||
'VCPU': 2,
|
||||
'MEMORY_MB': 2048,
|
||||
'CUSTOM_FOO': 1
|
||||
}
|
||||
},
|
||||
uuids.shared_storage: {
|
||||
'resources': {
|
||||
'DISK_GB': 60
|
||||
'DISK_GB': 40,
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
expected_payload['project_id'] = project_id
|
||||
expected_payload['user_id'] = user_id
|
||||
# report client assumes a new consumer in this case
|
||||
'consumer_generation': None,
|
||||
'project_id': project_id,
|
||||
'user_id': user_id}
|
||||
self.ks_adap_mock.put.assert_called_once_with(
|
||||
expected_url, microversion='1.12', json=mock.ANY,
|
||||
expected_url, microversion='1.28', json=mock.ANY,
|
||||
headers={'X-Openstack-Request-Id': self.context.global_id})
|
||||
# We have to pull the json body from the mock call_args to validate
|
||||
# it separately otherwise hash seed issues get in the way.
|
||||
|
@ -782,19 +607,399 @@ class TestPutAllocations(SchedulerReportClientTestCase):
|
|||
|
||||
self.assertTrue(res)
|
||||
|
||||
def test_claim_resources_fail_retry_success(self):
|
||||
def test_claim_resources_success_evacuate_no_shared(self):
|
||||
"""Tests non-forced evacuate. In this case both the source and the
|
||||
dest allocation are held by the instance_uuid in placement. So the
|
||||
claim code needs to merge allocations. The second claim comes from the
|
||||
scheduler and therefore it does not have consumer_generation in it.
|
||||
"""
|
||||
# the source allocation is also held by the instance_uuid so report
|
||||
# client will see it.
|
||||
current_allocs = {
|
||||
'allocations': {
|
||||
uuids.source_host: {
|
||||
'generation': 42,
|
||||
'resources': {
|
||||
'VCPU': 1,
|
||||
'MEMORY_MB': 1024,
|
||||
'DISK_GB': 20
|
||||
},
|
||||
},
|
||||
},
|
||||
"consumer_generation": 1,
|
||||
"project_id": uuids.project_id,
|
||||
"user_id": uuids.user_id
|
||||
}
|
||||
self.ks_adap_mock.get.return_value = fake_requests.FakeResponse(
|
||||
status_code=200,
|
||||
content=jsonutils.dumps(current_allocs))
|
||||
put_allocations_resp_mock = fake_requests.FakeResponse(status_code=204)
|
||||
self.ks_adap_mock.put.return_value = put_allocations_resp_mock
|
||||
consumer_uuid = uuids.consumer_uuid
|
||||
# this is an evacuate so we have the same resources request towards the
|
||||
# dest host
|
||||
alloc_req = {
|
||||
'allocations': {
|
||||
uuids.dest_host: {
|
||||
'resources': {
|
||||
'VCPU': 1,
|
||||
'MEMORY_MB': 1024,
|
||||
'DISK_GB': 20,
|
||||
}
|
||||
},
|
||||
},
|
||||
# this allocation request comes from the scheduler therefore it
|
||||
# does not have consumer_generation in it.
|
||||
"project_id": uuids.project_id,
|
||||
"user_id": uuids.user_id
|
||||
}
|
||||
|
||||
project_id = uuids.project_id
|
||||
user_id = uuids.user_id
|
||||
res = self.client.claim_resources(self.context, consumer_uuid,
|
||||
alloc_req, project_id, user_id,
|
||||
allocation_request_version='1.28')
|
||||
|
||||
expected_url = "/allocations/%s" % consumer_uuid
|
||||
# we expect that both the source and dest allocations are here
|
||||
expected_payload = {
|
||||
'allocations': {
|
||||
uuids.source_host: {
|
||||
'resources': {
|
||||
'VCPU': 1,
|
||||
'MEMORY_MB': 1024,
|
||||
'DISK_GB': 20
|
||||
},
|
||||
},
|
||||
uuids.dest_host: {
|
||||
'resources': {
|
||||
'VCPU': 1,
|
||||
'MEMORY_MB': 1024,
|
||||
'DISK_GB': 20,
|
||||
}
|
||||
},
|
||||
},
|
||||
# report client uses the consumer_generation that it got from
|
||||
# placement when asked for the existing allocations
|
||||
'consumer_generation': 1,
|
||||
'project_id': project_id,
|
||||
'user_id': user_id}
|
||||
self.ks_adap_mock.put.assert_called_once_with(
|
||||
expected_url, microversion='1.28', json=mock.ANY,
|
||||
headers={'X-Openstack-Request-Id': self.context.global_id})
|
||||
# We have to pull the json body from the mock call_args to validate
|
||||
# it separately otherwise hash seed issues get in the way.
|
||||
actual_payload = self.ks_adap_mock.put.call_args[1]['json']
|
||||
self.assertEqual(expected_payload, actual_payload)
|
||||
|
||||
self.assertTrue(res)
|
||||
|
||||
def test_claim_resources_success_evacuate_with_shared(self):
|
||||
"""Similar test that test_claim_resources_success_evacuate_no_shared
|
||||
but adds shared disk into the mix.
|
||||
"""
|
||||
# the source allocation is also held by the instance_uuid so report
|
||||
# client will see it.
|
||||
current_allocs = {
|
||||
'allocations': {
|
||||
uuids.source_host: {
|
||||
'generation': 42,
|
||||
'resources': {
|
||||
'VCPU': 1,
|
||||
'MEMORY_MB': 1024,
|
||||
},
|
||||
},
|
||||
uuids.shared_storage: {
|
||||
'generation': 42,
|
||||
'resources': {
|
||||
'DISK_GB': 20,
|
||||
},
|
||||
},
|
||||
},
|
||||
"consumer_generation": 1,
|
||||
"project_id": uuids.project_id,
|
||||
"user_id": uuids.user_id
|
||||
}
|
||||
self.ks_adap_mock.get.return_value = fake_requests.FakeResponse(
|
||||
status_code=200,
|
||||
content = jsonutils.dumps(current_allocs))
|
||||
self.ks_adap_mock.put.return_value = fake_requests.FakeResponse(
|
||||
status_code=204)
|
||||
consumer_uuid = uuids.consumer_uuid
|
||||
# this is an evacuate so we have the same resources request towards the
|
||||
# dest host
|
||||
alloc_req = {
|
||||
'allocations': {
|
||||
uuids.dest_host: {
|
||||
'resources': {
|
||||
'VCPU': 1,
|
||||
'MEMORY_MB': 1024,
|
||||
},
|
||||
},
|
||||
uuids.shared_storage: {
|
||||
'generation': 42,
|
||||
'resources': {
|
||||
'DISK_GB': 20,
|
||||
},
|
||||
},
|
||||
},
|
||||
# this allocation request comes from the scheduler therefore it
|
||||
# does not have consumer_generation in it.
|
||||
"project_id": uuids.project_id,
|
||||
"user_id": uuids.user_id
|
||||
}
|
||||
|
||||
project_id = uuids.project_id
|
||||
user_id = uuids.user_id
|
||||
res = self.client.claim_resources(self.context, consumer_uuid,
|
||||
alloc_req, project_id, user_id,
|
||||
allocation_request_version='1.28')
|
||||
|
||||
expected_url = "/allocations/%s" % consumer_uuid
|
||||
# we expect that both the source and dest allocations are here plus the
|
||||
# shared storage allocation
|
||||
expected_payload = {
|
||||
'allocations': {
|
||||
uuids.source_host: {
|
||||
'resources': {
|
||||
'VCPU': 1,
|
||||
'MEMORY_MB': 1024,
|
||||
},
|
||||
},
|
||||
uuids.dest_host: {
|
||||
'resources': {
|
||||
'VCPU': 1,
|
||||
'MEMORY_MB': 1024,
|
||||
}
|
||||
},
|
||||
uuids.shared_storage: {
|
||||
'resources': {
|
||||
'DISK_GB': 20,
|
||||
},
|
||||
},
|
||||
},
|
||||
# report client uses the consumer_generation that got from
|
||||
# placement when asked for the existing allocations
|
||||
'consumer_generation': 1,
|
||||
'project_id': project_id,
|
||||
'user_id': user_id}
|
||||
self.ks_adap_mock.put.assert_called_once_with(
|
||||
expected_url, microversion='1.28', json=mock.ANY,
|
||||
headers={'X-Openstack-Request-Id': self.context.global_id})
|
||||
# We have to pull the json body from the mock call_args to validate
|
||||
# it separately otherwise hash seed issues get in the way.
|
||||
actual_payload = self.ks_adap_mock.put.call_args[1]['json']
|
||||
self.assertEqual(expected_payload, actual_payload)
|
||||
|
||||
self.assertTrue(res)
|
||||
|
||||
def test_claim_resources_success_force_evacuate_no_shared(self):
|
||||
"""Tests forced evacuate. In this case both the source and the
|
||||
dest allocation are held by the instance_uuid in placement. So the
|
||||
claim code needs to merge allocations. The second claim comes from the
|
||||
conductor and therefore it does have consumer_generation in it.
|
||||
"""
|
||||
# the source allocation is also held by the instance_uuid so report
|
||||
# client will see it.
|
||||
current_allocs = {
|
||||
'allocations': {
|
||||
uuids.source_host: {
|
||||
'generation': 42,
|
||||
'resources': {
|
||||
'VCPU': 1,
|
||||
'MEMORY_MB': 1024,
|
||||
'DISK_GB': 20
|
||||
},
|
||||
},
|
||||
},
|
||||
"consumer_generation": 1,
|
||||
"project_id": uuids.project_id,
|
||||
"user_id": uuids.user_id
|
||||
}
|
||||
|
||||
self.ks_adap_mock.get.return_value = fake_requests.FakeResponse(
|
||||
status_code=200,
|
||||
content=jsonutils.dumps(current_allocs))
|
||||
self.ks_adap_mock.put.return_value = fake_requests.FakeResponse(
|
||||
status_code=204)
|
||||
consumer_uuid = uuids.consumer_uuid
|
||||
# this is an evacuate so we have the same resources request towards the
|
||||
# dest host
|
||||
alloc_req = {
|
||||
'allocations': {
|
||||
uuids.dest_host: {
|
||||
'resources': {
|
||||
'VCPU': 1,
|
||||
'MEMORY_MB': 1024,
|
||||
'DISK_GB': 20,
|
||||
}
|
||||
},
|
||||
},
|
||||
# this allocation request comes from the conductor that read the
|
||||
# allocation from placement therefore it has consumer_generation in
|
||||
# it.
|
||||
"consumer_generation": 1,
|
||||
"project_id": uuids.project_id,
|
||||
"user_id": uuids.user_id
|
||||
}
|
||||
|
||||
project_id = uuids.project_id
|
||||
user_id = uuids.user_id
|
||||
res = self.client.claim_resources(self.context, consumer_uuid,
|
||||
alloc_req, project_id, user_id,
|
||||
allocation_request_version='1.28')
|
||||
|
||||
expected_url = "/allocations/%s" % consumer_uuid
|
||||
# we expect that both the source and dest allocations are here
|
||||
expected_payload = {
|
||||
'allocations': {
|
||||
uuids.source_host: {
|
||||
'resources': {
|
||||
'VCPU': 1,
|
||||
'MEMORY_MB': 1024,
|
||||
'DISK_GB': 20
|
||||
},
|
||||
},
|
||||
uuids.dest_host: {
|
||||
'resources': {
|
||||
'VCPU': 1,
|
||||
'MEMORY_MB': 1024,
|
||||
'DISK_GB': 20,
|
||||
}
|
||||
},
|
||||
},
|
||||
# report client uses the consumer_generation that it got in the
|
||||
# allocation request
|
||||
'consumer_generation': 1,
|
||||
'project_id': project_id,
|
||||
'user_id': user_id}
|
||||
self.ks_adap_mock.put.assert_called_once_with(
|
||||
expected_url, microversion='1.28', json=mock.ANY,
|
||||
headers={'X-Openstack-Request-Id': self.context.global_id})
|
||||
# We have to pull the json body from the mock call_args to validate
|
||||
# it separately otherwise hash seed issues get in the way.
|
||||
actual_payload = self.ks_adap_mock.put.call_args[1]['json']
|
||||
self.assertEqual(expected_payload, actual_payload)
|
||||
|
||||
self.assertTrue(res)
|
||||
|
||||
def test_claim_resources_success_force_evacuate_with_shared(self):
|
||||
"""Similar test that
|
||||
test_claim_resources_success_force_evacuate_no_shared but adds shared
|
||||
disk into the mix.
|
||||
"""
|
||||
# the source allocation is also held by the instance_uuid so report
|
||||
# client will see it.
|
||||
current_allocs = {
|
||||
'allocations': {
|
||||
uuids.source_host: {
|
||||
'generation': 42,
|
||||
'resources': {
|
||||
'VCPU': 1,
|
||||
'MEMORY_MB': 1024,
|
||||
},
|
||||
},
|
||||
uuids.shared_storage: {
|
||||
'generation': 42,
|
||||
'resources': {
|
||||
'DISK_GB': 20,
|
||||
},
|
||||
},
|
||||
},
|
||||
"consumer_generation": 1,
|
||||
"project_id": uuids.project_id,
|
||||
"user_id": uuids.user_id
|
||||
}
|
||||
|
||||
self.ks_adap_mock.get.return_value = fake_requests.FakeResponse(
|
||||
status_code=200,
|
||||
content=jsonutils.dumps(current_allocs))
|
||||
self.ks_adap_mock.put.return_value = fake_requests.FakeResponse(
|
||||
status_code=204)
|
||||
consumer_uuid = uuids.consumer_uuid
|
||||
# this is an evacuate so we have the same resources request towards the
|
||||
# dest host
|
||||
alloc_req = {
|
||||
'allocations': {
|
||||
uuids.dest_host: {
|
||||
'resources': {
|
||||
'VCPU': 1,
|
||||
'MEMORY_MB': 1024,
|
||||
},
|
||||
},
|
||||
uuids.shared_storage: {
|
||||
'generation': 42,
|
||||
'resources': {
|
||||
'DISK_GB': 20,
|
||||
},
|
||||
},
|
||||
},
|
||||
# this allocation request comes from the conductor that read the
|
||||
# allocation from placement therefore it has consumer_generation in
|
||||
# it.
|
||||
"consumer_generation": 1,
|
||||
"project_id": uuids.project_id,
|
||||
"user_id": uuids.user_id
|
||||
}
|
||||
|
||||
project_id = uuids.project_id
|
||||
user_id = uuids.user_id
|
||||
res = self.client.claim_resources(self.context, consumer_uuid,
|
||||
alloc_req, project_id, user_id,
|
||||
allocation_request_version='1.28')
|
||||
|
||||
expected_url = "/allocations/%s" % consumer_uuid
|
||||
# we expect that both the source and dest allocations are here plus the
|
||||
# shared storage allocation
|
||||
expected_payload = {
|
||||
'allocations': {
|
||||
uuids.source_host: {
|
||||
'resources': {
|
||||
'VCPU': 1,
|
||||
'MEMORY_MB': 1024,
|
||||
},
|
||||
},
|
||||
uuids.dest_host: {
|
||||
'resources': {
|
||||
'VCPU': 1,
|
||||
'MEMORY_MB': 1024,
|
||||
}
|
||||
},
|
||||
uuids.shared_storage: {
|
||||
'resources': {
|
||||
'DISK_GB': 20,
|
||||
},
|
||||
},
|
||||
},
|
||||
# report client uses the consumer_generation that it got in the
|
||||
# allocation request
|
||||
'consumer_generation': 1,
|
||||
'project_id': project_id,
|
||||
'user_id': user_id}
|
||||
self.ks_adap_mock.put.assert_called_once_with(
|
||||
expected_url, microversion='1.28', json=mock.ANY,
|
||||
headers={'X-Openstack-Request-Id': self.context.global_id})
|
||||
# We have to pull the json body from the mock call_args to validate
|
||||
# it separately otherwise hash seed issues get in the way.
|
||||
actual_payload = self.ks_adap_mock.put.call_args[1]['json']
|
||||
self.assertEqual(expected_payload, actual_payload)
|
||||
|
||||
self.assertTrue(res)
|
||||
|
||||
def test_claim_resources_fail_due_to_rp_generation_retry_success(self):
|
||||
get_resp_mock = mock.Mock(status_code=200)
|
||||
get_resp_mock.json.return_value = {
|
||||
'allocations': {}, # build instance, not move
|
||||
}
|
||||
self.ks_adap_mock.get.return_value = get_resp_mock
|
||||
resp_mocks = [
|
||||
mock.Mock(
|
||||
status_code=409,
|
||||
text='Inventory changed while attempting to allocate: '
|
||||
'Another thread concurrently updated the data. '
|
||||
'Please retry your update'),
|
||||
mock.Mock(status_code=204),
|
||||
fake_requests.FakeResponse(
|
||||
409,
|
||||
jsonutils.dumps(
|
||||
{'errors': [
|
||||
{'code': 'placement.concurrent_update',
|
||||
'detail': ''}]})),
|
||||
fake_requests.FakeResponse(204)
|
||||
]
|
||||
self.ks_adap_mock.put.side_effect = resp_mocks
|
||||
consumer_uuid = uuids.consumer_uuid
|
||||
|
@ -813,7 +1018,7 @@ class TestPutAllocations(SchedulerReportClientTestCase):
|
|||
user_id = uuids.user_id
|
||||
res = self.client.claim_resources(self.context, consumer_uuid,
|
||||
alloc_req, project_id, user_id,
|
||||
allocation_request_version='1.12')
|
||||
allocation_request_version='1.28')
|
||||
|
||||
expected_url = "/allocations/%s" % consumer_uuid
|
||||
expected_payload = {
|
||||
|
@ -823,10 +1028,11 @@ class TestPutAllocations(SchedulerReportClientTestCase):
|
|||
}
|
||||
expected_payload['project_id'] = project_id
|
||||
expected_payload['user_id'] = user_id
|
||||
expected_payload['consumer_generation'] = None
|
||||
# We should have exactly two calls to the placement API that look
|
||||
# identical since we're retrying the same HTTP request
|
||||
expected_calls = [
|
||||
mock.call(expected_url, microversion='1.12', json=expected_payload,
|
||||
mock.call(expected_url, microversion='1.28', json=expected_payload,
|
||||
headers={'X-Openstack-Request-Id':
|
||||
self.context.global_id})] * 2
|
||||
self.assertEqual(len(expected_calls),
|
||||
|
@ -842,7 +1048,13 @@ class TestPutAllocations(SchedulerReportClientTestCase):
|
|||
'allocations': {}, # build instance, not move
|
||||
}
|
||||
self.ks_adap_mock.get.return_value = get_resp_mock
|
||||
resp_mock = mock.Mock(status_code=409, text='not cool')
|
||||
resp_mock = fake_requests.FakeResponse(
|
||||
409,
|
||||
jsonutils.dumps(
|
||||
{'errors': [
|
||||
{'code': 'something else',
|
||||
'detail': 'not cool'}]}))
|
||||
|
||||
self.ks_adap_mock.put.return_value = resp_mock
|
||||
consumer_uuid = uuids.consumer_uuid
|
||||
alloc_req = {
|
||||
|
@ -860,7 +1072,7 @@ class TestPutAllocations(SchedulerReportClientTestCase):
|
|||
user_id = uuids.user_id
|
||||
res = self.client.claim_resources(self.context, consumer_uuid,
|
||||
alloc_req, project_id, user_id,
|
||||
allocation_request_version='1.12')
|
||||
allocation_request_version='1.28')
|
||||
|
||||
expected_url = "/allocations/%s" % consumer_uuid
|
||||
expected_payload = {
|
||||
|
@ -870,13 +1082,59 @@ class TestPutAllocations(SchedulerReportClientTestCase):
|
|||
}
|
||||
expected_payload['project_id'] = project_id
|
||||
expected_payload['user_id'] = user_id
|
||||
expected_payload['consumer_generation'] = None
|
||||
self.ks_adap_mock.put.assert_called_once_with(
|
||||
expected_url, microversion='1.12', json=expected_payload,
|
||||
expected_url, microversion='1.28', json=expected_payload,
|
||||
headers={'X-Openstack-Request-Id': self.context.global_id})
|
||||
|
||||
self.assertFalse(res)
|
||||
self.assertTrue(mock_log.called)
|
||||
|
||||
def test_claim_resources_consumer_generation_failure(self):
|
||||
get_resp_mock = mock.Mock(status_code=200)
|
||||
get_resp_mock.json.return_value = {
|
||||
'allocations': {}, # build instance, not move
|
||||
}
|
||||
self.ks_adap_mock.get.return_value = get_resp_mock
|
||||
resp_mock = fake_requests.FakeResponse(
|
||||
409,
|
||||
jsonutils.dumps(
|
||||
{'errors': [
|
||||
{'code': 'placement.concurrent_update',
|
||||
'detail': 'consumer generation conflict'}]}))
|
||||
|
||||
self.ks_adap_mock.put.return_value = resp_mock
|
||||
consumer_uuid = uuids.consumer_uuid
|
||||
alloc_req = {
|
||||
'allocations': {
|
||||
uuids.cn1: {
|
||||
'resources': {
|
||||
'VCPU': 1,
|
||||
'MEMORY_MB': 1024,
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
project_id = uuids.project_id
|
||||
user_id = uuids.user_id
|
||||
self.assertRaises(exception.AllocationUpdateFailed,
|
||||
self.client.claim_resources, self.context,
|
||||
consumer_uuid, alloc_req, project_id, user_id,
|
||||
allocation_request_version='1.28')
|
||||
|
||||
expected_url = "/allocations/%s" % consumer_uuid
|
||||
expected_payload = {
|
||||
'allocations': {
|
||||
rp_uuid: res
|
||||
for rp_uuid, res in alloc_req['allocations'].items()},
|
||||
'project_id': project_id,
|
||||
'user_id': user_id,
|
||||
'consumer_generation': None}
|
||||
self.ks_adap_mock.put.assert_called_once_with(
|
||||
expected_url, microversion='1.28', json=expected_payload,
|
||||
headers={'X-Openstack-Request-Id': self.context.global_id})
|
||||
|
||||
def test_remove_provider_from_inst_alloc_no_shared(self):
|
||||
"""Tests that the method which manipulates an existing doubled-up
|
||||
allocation for a move operation to remove the source host results in
|
||||
|
@ -1637,7 +1895,7 @@ class TestProviderOperations(SchedulerReportClientTestCase):
|
|||
expected_url = '/allocation_candidates?%s' % parse.urlencode(
|
||||
expected_query)
|
||||
self.ks_adap_mock.get.assert_called_once_with(
|
||||
expected_url, microversion='1.25',
|
||||
expected_url, microversion='1.28',
|
||||
headers={'X-Openstack-Request-Id': self.context.global_id})
|
||||
self.assertEqual(mock.sentinel.alloc_reqs, alloc_reqs)
|
||||
self.assertEqual(mock.sentinel.p_sums, p_sums)
|
||||
|
@ -1677,7 +1935,7 @@ class TestProviderOperations(SchedulerReportClientTestCase):
|
|||
expected_query)
|
||||
self.assertEqual(mock.sentinel.alloc_reqs, alloc_reqs)
|
||||
self.ks_adap_mock.get.assert_called_once_with(
|
||||
expected_url, microversion='1.25',
|
||||
expected_url, microversion='1.28',
|
||||
headers={'X-Openstack-Request-Id': self.context.global_id})
|
||||
self.assertEqual(mock.sentinel.p_sums, p_sums)
|
||||
|
||||
|
@ -1699,7 +1957,7 @@ class TestProviderOperations(SchedulerReportClientTestCase):
|
|||
res = self.client.get_allocation_candidates(self.context, resources)
|
||||
|
||||
self.ks_adap_mock.get.assert_called_once_with(
|
||||
mock.ANY, microversion='1.25',
|
||||
mock.ANY, microversion='1.28',
|
||||
headers={'X-Openstack-Request-Id': self.context.global_id})
|
||||
url = self.ks_adap_mock.get.call_args[0][0]
|
||||
split_url = parse.urlsplit(url)
|
||||
|
|
|
@ -757,7 +757,7 @@ class TestUtils(test.NoDBTestCase):
|
|||
dest_node = objects.ComputeNode(uuid=uuids.dest_node, host='dest-host')
|
||||
|
||||
@mock.patch.object(reportclient,
|
||||
'get_allocations_for_consumer_by_provider',
|
||||
'get_allocs_for_consumer',
|
||||
return_value={})
|
||||
@mock.patch.object(reportclient,
|
||||
'claim_resources',
|
||||
|
@ -766,7 +766,7 @@ class TestUtils(test.NoDBTestCase):
|
|||
utils.claim_resources_on_destination(
|
||||
self.context, reportclient, instance, source_node, dest_node)
|
||||
mock_get_allocs.assert_called_once_with(
|
||||
self.context, uuids.source_node, instance.uuid)
|
||||
self.context, instance.uuid)
|
||||
|
||||
test()
|
||||
|
||||
|
@ -780,21 +780,35 @@ class TestUtils(test.NoDBTestCase):
|
|||
uuid=uuids.source_node, host=instance.host)
|
||||
dest_node = objects.ComputeNode(uuid=uuids.dest_node, host='dest-host')
|
||||
source_res_allocs = {
|
||||
'VCPU': instance.vcpus,
|
||||
'MEMORY_MB': instance.memory_mb,
|
||||
# This would really include ephemeral and swap too but we're lazy.
|
||||
'DISK_GB': instance.root_gb
|
||||
'allocations': {
|
||||
uuids.source_node: {
|
||||
'resources': {
|
||||
'VCPU': instance.vcpus,
|
||||
'MEMORY_MB': instance.memory_mb,
|
||||
# This would really include ephemeral and swap too but
|
||||
# we're lazy.
|
||||
'DISK_GB': instance.root_gb
|
||||
}
|
||||
}
|
||||
},
|
||||
'consumer_generation': 1,
|
||||
'project_id': uuids.project_id,
|
||||
'user_id': uuids.user_id
|
||||
}
|
||||
dest_alloc_request = {
|
||||
'allocations': {
|
||||
uuids.dest_node: {
|
||||
'resources': source_res_allocs
|
||||
'resources': {
|
||||
'VCPU': instance.vcpus,
|
||||
'MEMORY_MB': instance.memory_mb,
|
||||
'DISK_GB': instance.root_gb
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
@mock.patch.object(reportclient,
|
||||
'get_allocations_for_consumer_by_provider',
|
||||
'get_allocs_for_consumer',
|
||||
return_value=source_res_allocs)
|
||||
@mock.patch.object(reportclient,
|
||||
'claim_resources', return_value=False)
|
||||
|
@ -806,11 +820,11 @@ class TestUtils(test.NoDBTestCase):
|
|||
self.context, reportclient, instance,
|
||||
source_node, dest_node)
|
||||
mock_get_allocs.assert_called_once_with(
|
||||
self.context, uuids.source_node, instance.uuid)
|
||||
self.context, instance.uuid)
|
||||
mock_claim.assert_called_once_with(
|
||||
self.context, instance.uuid, dest_alloc_request,
|
||||
instance.project_id, instance.user_id,
|
||||
allocation_request_version='1.12')
|
||||
allocation_request_version='1.28', consumer_generation=1)
|
||||
|
||||
test()
|
||||
|
||||
|
@ -830,24 +844,28 @@ class TestUtils(test.NoDBTestCase):
|
|||
dest_alloc_request = {
|
||||
'allocations': {
|
||||
uuids.dest_node: {
|
||||
'resources': source_res_allocs
|
||||
'resources': {
|
||||
'VCPU': instance.vcpus,
|
||||
'MEMORY_MB': instance.memory_mb,
|
||||
'DISK_GB': instance.root_gb
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
@mock.patch.object(reportclient,
|
||||
'get_allocations_for_consumer_by_provider')
|
||||
'get_allocations_for_consumer')
|
||||
@mock.patch.object(reportclient,
|
||||
'claim_resources', return_value=True)
|
||||
def test(mock_claim, mock_get_allocs):
|
||||
utils.claim_resources_on_destination(
|
||||
self.context, reportclient, instance, source_node, dest_node,
|
||||
source_res_allocs)
|
||||
source_res_allocs, consumer_generation=None)
|
||||
self.assertFalse(mock_get_allocs.called)
|
||||
mock_claim.assert_called_once_with(
|
||||
self.context, instance.uuid, dest_alloc_request,
|
||||
instance.project_id, instance.user_id,
|
||||
allocation_request_version='1.12')
|
||||
allocation_request_version='1.28', consumer_generation=None)
|
||||
|
||||
test()
|
||||
|
||||
|
@ -869,7 +887,8 @@ class TestUtils(test.NoDBTestCase):
|
|||
|
||||
mock_client.claim_resources.assert_called_once_with(
|
||||
ctx, uuids.instance, mock.sentinel.alloc_req, uuids.project_id,
|
||||
uuids.user_id, allocation_request_version=None)
|
||||
uuids.user_id, allocation_request_version=None,
|
||||
consumer_generation=None)
|
||||
self.assertTrue(res)
|
||||
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient')
|
||||
|
|
Loading…
Reference in New Issue