[placement] Add sending global request ID in post

Add the 'X-Openstack-Request-Id' header
in the request of POST in SchedulerReportClient.
When creating a resource provider and creating a resource class,
the header is added.

Subsequent patches will add the header in the other cases.

Change-Id: I39d8c71432b3adf7e5bdde1c6cb6f089a9c79614
Partial-Bug: #1734625
This commit is contained in:
Takashi NATSUME 2017-12-09 14:04:34 +09:00
parent 38eb95384f
commit 6079b722cb
9 changed files with 163 additions and 102 deletions

View File

@ -4030,7 +4030,7 @@ class ComputeManager(manager.Manager):
instance=instance)
# TODO(cdent): Should we be doing anything with return values here?
self.reportclient.set_and_clear_allocations(
cn_uuid, instance.uuid, orig_alloc, instance.project_id,
context, cn_uuid, instance.uuid, orig_alloc, instance.project_id,
instance.user_id, consumer_to_clear=migration.uuid)
return True

View File

@ -121,7 +121,8 @@ class LiveMigrationTask(base.TaskBase):
# except to call the compute method, that has no matching
# rollback call right now.
if self._held_allocations:
migrate.revert_allocation_for_migration(self._source_cn,
migrate.revert_allocation_for_migration(self.context,
self._source_cn,
self.instance,
self.migration,
self._held_allocations)

View File

@ -57,8 +57,8 @@ def replace_allocation_with_migration(context, instance, migration):
# against only one provider. So, this may overwite allocations against
# a shared provider, if we had one.
success = reportclient.set_and_clear_allocations(
source_cn.uuid, migration.uuid, orig_alloc, instance.project_id,
instance.user_id, consumer_to_clear=instance.uuid)
context, source_cn.uuid, migration.uuid, orig_alloc,
instance.project_id, instance.user_id, consumer_to_clear=instance.uuid)
if not success:
LOG.error('Unable to replace resource claim on source '
'host %(host)s node %(node)s for instance',
@ -77,7 +77,7 @@ def replace_allocation_with_migration(context, instance, migration):
return source_cn, orig_alloc
def revert_allocation_for_migration(source_cn, instance, migration,
def revert_allocation_for_migration(context, source_cn, instance, migration,
orig_alloc):
"""Revert an allocation made for a migration back to the instance."""
@ -88,8 +88,9 @@ def revert_allocation_for_migration(source_cn, instance, migration,
# against only one provider. So, this may overwite allocations against
# a shared provider, if we had one.
success = reportclient.set_and_clear_allocations(
source_cn.uuid, instance.uuid, orig_alloc, instance.project_id,
instance.user_id, consumer_to_clear=migration.uuid)
context, source_cn.uuid, instance.uuid, orig_alloc,
instance.project_id, instance.user_id,
consumer_to_clear=migration.uuid)
if not success:
LOG.error('Unable to replace resource claim on source '
'host %(host)s node %(node)s for instance',
@ -315,6 +316,6 @@ class MigrationTask(base.TaskBase):
# do that cleanup but we never got that far, so do it here and
# now.
revert_allocation_for_migration(self._source_cn, self.instance,
self._migration,
revert_allocation_for_migration(self.context, self._source_cn,
self.instance, self._migration,
self._held_allocations)

View File

@ -276,13 +276,15 @@ class SchedulerReportClient(object):
def get(self, url, version=None):
return self._client.get(url, raise_exc=False, microversion=version)
def post(self, url, data, version=None):
def post(self, url, data, version=None, global_request_id=None):
headers = ({request_id.INBOUND_HEADER: global_request_id}
if global_request_id else {})
# NOTE(sdague): using json= instead of data= sets the
# media type to application/json for us. Placement API is
# more sensitive to this than other APIs in the OpenStack
# ecosystem.
return self._client.post(url, json=data, raise_exc=False,
microversion=version)
microversion=version, headers=headers)
def put(self, url, data, version=None):
# NOTE(sdague): using json= instead of data= sets the
@ -517,10 +519,11 @@ class SchedulerReportClient(object):
raise exception.ResourceProviderRetrievalFailed(uuid=uuid)
@safe_connect
def _create_resource_provider(self, uuid, name,
def _create_resource_provider(self, context, uuid, name,
parent_provider_uuid=None):
"""Calls the placement API to create a new resource provider record.
:param context: The security context
:param uuid: UUID of the new resource provider
:param name: Name of the resource provider
:param parent_provider_uuid: Optional UUID of the immediate parent
@ -537,7 +540,8 @@ class SchedulerReportClient(object):
if parent_provider_uuid is not None:
payload['parent_provider_uuid'] = parent_provider_uuid
resp = self.post(url, payload, version=NESTED_PROVIDER_API_VERSION)
resp = self.post(url, payload, version=NESTED_PROVIDER_API_VERSION,
global_request_id=context.global_id)
placement_req_id = get_placement_request_id(resp)
if resp.status_code == 201:
msg = ("[%(placement_req_id)s] Created resource provider record "
@ -585,7 +589,7 @@ class SchedulerReportClient(object):
LOG.error(msg, args)
raise exception.ResourceProviderCreationFailed(name=name)
def _ensure_resource_provider(self, uuid, name=None,
def _ensure_resource_provider(self, context, uuid, name=None,
parent_provider_uuid=None):
"""Ensures that the placement API has a record of a resource provider
with the supplied UUID. If not, creates the resource provider record in
@ -612,6 +616,7 @@ class SchedulerReportClient(object):
reduces to just the specified provider as a root, with no aggregates or
traits.
:param context: The security context
:param uuid: UUID identifier for the resource provider to ensure exists
:param name: Optional name for the resource provider if the record
does not exist. If empty, the name is set to the UUID
@ -634,7 +639,7 @@ class SchedulerReportClient(object):
rps_to_refresh = self._get_providers_in_tree(uuid)
if not rps_to_refresh:
created_rp = self._create_resource_provider(
uuid, name or uuid,
context, uuid, name or uuid,
parent_provider_uuid=parent_provider_uuid)
# Don't add the created_rp to rps_to_refresh. Since we just
# created it, it has no aggregates or traits.
@ -789,9 +794,10 @@ class SchedulerReportClient(object):
refresh_time = self.association_refresh_time.get(uuid, 0)
return (time.time() - refresh_time) > ASSOCIATION_REFRESH
def _update_inventory_attempt(self, rp_uuid, inv_data):
def _update_inventory_attempt(self, context, rp_uuid, inv_data):
"""Update the inventory for this resource provider if needed.
:param context: The security context
:param rp_uuid: The resource provider UUID for the operation
:param inv_data: The new inventory for the resource provider
:returns: True if the inventory was updated (or did not need to be),
@ -865,7 +871,7 @@ class SchedulerReportClient(object):
# NOTE(jaypipes): We don't need to pass a name parameter to
# _ensure_resource_provider() because we know the resource provider
# record already exists. We're just reloading the record here.
self._ensure_resource_provider(rp_uuid)
self._ensure_resource_provider(context, rp_uuid)
return False
elif not result:
placement_req_id = get_placement_request_id(result)
@ -904,7 +910,7 @@ class SchedulerReportClient(object):
return True
@safe_connect
def _update_inventory(self, rp_uuid, inv_data):
def _update_inventory(self, context, rp_uuid, inv_data):
for attempt in (1, 2, 3):
if not self._provider_tree.exists(rp_uuid):
# NOTE(danms): Either we failed to fetch/create the RP
@ -913,7 +919,7 @@ class SchedulerReportClient(object):
# it. Bail and try again next time.
LOG.warning('Unable to refresh my resource provider record')
return False
if self._update_inventory_attempt(rp_uuid, inv_data):
if self._update_inventory_attempt(context, rp_uuid, inv_data):
return True
time.sleep(1)
return False
@ -1006,7 +1012,7 @@ class SchedulerReportClient(object):
msg_args['err'] = r.text
LOG.error(msg, msg_args)
def get_provider_tree_and_ensure_root(self, rp_uuid, name=None,
def get_provider_tree_and_ensure_root(self, context, rp_uuid, name=None,
parent_provider_uuid=None):
"""Returns a fresh ProviderTree representing all providers which are in
the same tree or in the same aggregate as the specified provider,
@ -1015,6 +1021,7 @@ class SchedulerReportClient(object):
If the specified provider does not exist, it is created with the
specified UUID, name, and parent provider (which *must* already exist).
:param context: The security context
:param rp_uuid: UUID of the resource provider for which to populate the
tree. (This doesn't need to be the UUID of the root.)
:param name: Optional name for the resource provider if the record
@ -1031,7 +1038,8 @@ class SchedulerReportClient(object):
# return a deep copy of the local _provider_tree cache.
# (Re)populate the local ProviderTree
self._ensure_resource_provider(
rp_uuid, name=name, parent_provider_uuid=parent_provider_uuid)
context, rp_uuid, name=name,
parent_provider_uuid=parent_provider_uuid)
# Ensure inventories are up to date (for *all* cached RPs)
for uuid in self._provider_tree.get_provider_uuids():
self._refresh_and_get_inventory(uuid)
@ -1058,15 +1066,16 @@ class SchedulerReportClient(object):
name does not meet the placement API's format requirements.
"""
self._ensure_resource_provider(
rp_uuid, rp_name, parent_provider_uuid=parent_provider_uuid)
context, rp_uuid, rp_name,
parent_provider_uuid=parent_provider_uuid)
# Auto-create custom resource classes coming from a virt driver
list(map(self._ensure_resource_class,
(rc_name for rc_name in inv_data
if rc_name not in fields.ResourceClass.STANDARD)))
for rc_name in inv_data:
if rc_name not in fields.ResourceClass.STANDARD:
self._ensure_resource_class(context, rc_name)
if inv_data:
self._update_inventory(rp_uuid, inv_data)
self._update_inventory(context, rp_uuid, inv_data)
else:
self._delete_inventory(context, rp_uuid)
@ -1218,7 +1227,7 @@ class SchedulerReportClient(object):
raise exception.ResourceProviderUpdateFailed(url=url, error=resp.text)
@safe_connect
def _ensure_resource_class(self, name):
def _ensure_resource_class(self, context, name):
"""Make sure a custom resource class exists.
First attempt to PUT the resource class using microversion 1.7. If
@ -1227,6 +1236,7 @@ class SchedulerReportClient(object):
Returns the name of the resource class if it was successfully
created or already exists. Otherwise None.
:param context: The security context
:param name: String name of the resource class to check/create.
:raises: `exception.InvalidResourceClass` upon error.
"""
@ -1241,7 +1251,7 @@ class SchedulerReportClient(object):
# call and the associated code.
LOG.debug('Falling back to placement API microversion 1.2 '
'for resource class management.')
return self._get_or_create_resource_class(name)
return self._get_or_create_resource_class(context, name)
else:
msg = ("Failed to ensure resource class record with placement API "
"for resource class %(rc_name)s. Got %(status_code)d: "
@ -1254,19 +1264,20 @@ class SchedulerReportClient(object):
LOG.error(msg, args)
raise exception.InvalidResourceClass(resource_class=name)
def _get_or_create_resource_class(self, name):
def _get_or_create_resource_class(self, context, name):
"""Queries the placement API for a resource class supplied resource
class string name. If the resource class does not exist, creates it.
Returns the resource class name if exists or was created, else None.
:param context: The security context
:param name: String name of the resource class to check/create.
"""
resp = self.get("/resource_classes/%s" % name, version="1.2")
if 200 <= resp.status_code < 300:
return name
elif resp.status_code == 404:
self._create_resource_class(name)
self._create_resource_class(context, name)
return name
else:
msg = ("Failed to retrieve resource class record from placement "
@ -1280,9 +1291,10 @@ class SchedulerReportClient(object):
LOG.error(msg, args)
return None
def _create_resource_class(self, name):
def _create_resource_class(self, context, name):
"""Calls the placement API to create a new resource class.
:param context: The security context
:param name: String name of the resource class to create.
:returns: None on successful creation.
@ -1292,7 +1304,8 @@ class SchedulerReportClient(object):
payload = {
'name': name,
}
resp = self.post(url, payload, version="1.2")
resp = self.post(url, payload, version="1.2",
global_request_id=context.global_id)
if 200 <= resp.status_code < 300:
msg = ("Created resource class record via placement API for "
"resource class %s.")
@ -1324,11 +1337,11 @@ class SchedulerReportClient(object):
resource classes that would be deleted by an update to the
placement API.
"""
self._ensure_resource_provider(compute_node.uuid,
self._ensure_resource_provider(context, compute_node.uuid,
compute_node.hypervisor_hostname)
inv_data = _compute_node_to_inventory_dict(compute_node)
if inv_data:
self._update_inventory(compute_node.uuid, inv_data)
self._update_inventory(context, compute_node.uuid, inv_data)
else:
self._delete_inventory(context, compute_node.uuid)
@ -1562,8 +1575,8 @@ class SchedulerReportClient(object):
@safe_connect
@retries
def set_and_clear_allocations(self, rp_uuid, consumer_uuid, alloc_data,
project_id, user_id,
def set_and_clear_allocations(self, context, rp_uuid, consumer_uuid,
alloc_data, project_id, user_id,
consumer_to_clear=None):
"""Create allocation records for the supplied consumer UUID while
simultaneously clearing any allocations identified by the uuid
@ -1575,6 +1588,7 @@ class SchedulerReportClient(object):
Once shared storage and things like NUMA allocations are a
reality, this will change to allocate against multiple providers.
:param context: The security context
:param rp_uuid: The UUID of the resource provider to allocate against.
:param consumer_uuid: The consumer UUID for which allocations are
being set.
@ -1608,7 +1622,8 @@ class SchedulerReportClient(object):
'user_id': user_id,
}
r = self.post('/allocations', payload,
version=POST_ALLOCATIONS_API_VERSION)
version=POST_ALLOCATIONS_API_VERSION,
global_request_id=context.global_id)
if r.status_code != 204:
# NOTE(jaypipes): Yes, it sucks doing string comparison like this
# but we have no error codes, only error messages.

View File

@ -113,7 +113,7 @@ class SchedulerReportClientTests(test.TestCase):
# But get_provider_tree_and_ensure_root creates one (via
# _ensure_resource_provider)
ptree = self.client.get_provider_tree_and_ensure_root(
self.compute_uuid)
self.context, self.compute_uuid)
self.assertEqual([self.compute_uuid], ptree.get_provider_uuids())
# Now let's update status for our compute node.
@ -147,7 +147,7 @@ class SchedulerReportClientTests(test.TestCase):
# Providers and inventory show up nicely in the provider tree
ptree = self.client.get_provider_tree_and_ensure_root(
self.compute_uuid)
self.context, self.compute_uuid)
self.assertEqual([self.compute_uuid], ptree.get_provider_uuids())
self.assertTrue(ptree.has_inventory(self.compute_uuid))
@ -193,7 +193,7 @@ class SchedulerReportClientTests(test.TestCase):
# Build the provider tree afresh.
ptree = self.client.get_provider_tree_and_ensure_root(
self.compute_uuid)
self.context, self.compute_uuid)
# The compute node is still there
self.assertEqual([self.compute_uuid], ptree.get_provider_uuids())
# But the inventory is gone
@ -233,6 +233,11 @@ class SchedulerReportClientTests(test.TestCase):
app=lambda: assert_app, url=self.url):
self.client._delete_provider(self.compute_uuid,
global_request_id=global_request_id)
payload = {
'name': 'test-resource-provider'
}
self.client.post('/resource_providers', payload,
global_request_id=global_request_id)
def test_get_provider_tree_with_nested_and_aggregates(self):
"""A more in-depth test of get_provider_tree_and_ensure_root with
@ -344,7 +349,7 @@ class SchedulerReportClientTests(test.TestCase):
# Setup is done. Grab the ProviderTree
prov_tree = self.client.get_provider_tree_and_ensure_root(
self.compute_uuid)
self.context, self.compute_uuid)
# All providers show up because we used set_inventory_for_provider
self.assertEqual(set([self.compute_uuid, uuids.ss1, uuids.ss2,

View File

@ -6202,7 +6202,7 @@ class ComputeTestCase(BaseTestCase,
migrate_data=migrate_data)
mock_setup.assert_called_once_with(c, instance, self.compute.host)
mock_client.set_and_clear_allocations.assert_called_once_with(
mock.sentinel.source, instance.uuid,
c, mock.sentinel.source, instance.uuid,
mock.sentinel.allocs,
instance.project_id, instance.user_id,
consumer_to_clear=migration.uuid)

View File

@ -6427,7 +6427,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
self.assertTrue(r)
mock_report.set_and_clear_allocations.assert_called_once_with(
cu, self.instance.uuid, {'DISK_GB': 1},
mock.sentinel.ctx, cu, self.instance.uuid, {'DISK_GB': 1},
self.instance.project_id, self.instance.user_id,
consumer_to_clear=self.migration.uuid)

View File

@ -14,6 +14,7 @@ import mock
from nova.compute import rpcapi as compute_rpcapi
from nova.conductor.tasks import migrate
from nova import context
from nova import exception
from nova import objects
from nova.scheduler import client as scheduler_client
@ -213,8 +214,8 @@ class MigrationTaskTestCase(test.NoDBTestCase):
task._migration.create.assert_called_once_with()
task._migration.save.assert_called_once_with()
self.assertEqual('error', task._migration.status)
mock_ra.assert_called_once_with(task._source_cn, task.instance,
task._migration,
mock_ra.assert_called_once_with(task.context, task._source_cn,
task.instance, task._migration,
task._held_allocations)
@ -253,6 +254,7 @@ class MigrationTaskAllocationUtils(test.NoDBTestCase):
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
def test_replace_allocation_with_migration_allocs_fail(self, mock_cn,
mock_ga, mock_pa):
ctxt = context.get_admin_context()
migration = objects.Migration(uuid=uuids.migration)
instance = objects.Instance(uuid=uuids.instance,
user_id='fake', project_id='fake',
@ -261,5 +263,5 @@ class MigrationTaskAllocationUtils(test.NoDBTestCase):
self.assertRaises(exception.NoValidHost,
migrate.replace_allocation_with_migration,
mock.sentinel.context,
ctxt,
instance, migration)

View File

@ -1072,13 +1072,14 @@ class TestSetAndClearAllocations(SchedulerReportClientTestCase):
expected_microversion = '1.13'
resp = self.client.set_and_clear_allocations(
self.rp_uuid, self.consumer_uuid, self.data,
self.context, self.rp_uuid, self.consumer_uuid, self.data,
self.project_id, self.user_id)
self.assertTrue(resp)
self.mock_post.assert_called_once_with(
self.expected_url, mock.ANY,
version=expected_microversion)
version=expected_microversion,
global_request_id=self.context.global_id)
def test_payload_no_clear(self):
expected_payload = {
@ -1096,7 +1097,7 @@ class TestSetAndClearAllocations(SchedulerReportClientTestCase):
}
resp = self.client.set_and_clear_allocations(
self.rp_uuid, self.consumer_uuid, self.data,
self.context, self.rp_uuid, self.consumer_uuid, self.data,
self.project_id, self.user_id)
self.assertTrue(resp)
@ -1125,7 +1126,7 @@ class TestSetAndClearAllocations(SchedulerReportClientTestCase):
}
resp = self.client.set_and_clear_allocations(
self.rp_uuid, self.consumer_uuid, self.data,
self.context, self.rp_uuid, self.consumer_uuid, self.data,
self.project_id, self.user_id,
consumer_to_clear=mock.sentinel.migration_uuid)
@ -1139,7 +1140,7 @@ class TestSetAndClearAllocations(SchedulerReportClientTestCase):
self.mock_post.return_value.text = 'concurrently updated'
resp = self.client.set_and_clear_allocations(
self.rp_uuid, self.consumer_uuid, self.data,
self.context, self.rp_uuid, self.consumer_uuid, self.data,
self.project_id, self.user_id,
consumer_to_clear=mock.sentinel.migration_uuid)
@ -1154,7 +1155,7 @@ class TestSetAndClearAllocations(SchedulerReportClientTestCase):
self.mock_post.return_value.text = error_message
resp = self.client.set_and_clear_allocations(
self.rp_uuid, self.consumer_uuid, self.data,
self.context, self.rp_uuid, self.consumer_uuid, self.data,
self.project_id, self.user_id,
consumer_to_clear=mock.sentinel.migration_uuid)
@ -1212,7 +1213,7 @@ class TestProviderOperations(SchedulerReportClientTestCase):
'generation': 2,
},
]
self.client._ensure_resource_provider(cn.uuid)
self.client._ensure_resource_provider(self.context, cn.uuid)
get_pia_mock.assert_called_once_with(set([uuids.agg1, uuids.agg2]))
self.assertTrue(self.client._provider_tree.exists(uuids.shr1))
self.assertTrue(self.client._provider_tree.exists(uuids.shr2))
@ -1269,7 +1270,7 @@ class TestProviderOperations(SchedulerReportClientTestCase):
get_trait_mock.return_value = set(['CUSTOM_GOLD'])
get_pia_mock.return_value = []
self.client._ensure_resource_provider(uuids.compute_node)
self.client._ensure_resource_provider(self.context, uuids.compute_node)
get_rpt_mock.assert_called_once_with(uuids.compute_node)
self.assertTrue(self.client._provider_tree.exists(uuids.compute_node))
@ -1308,11 +1309,13 @@ class TestProviderOperations(SchedulerReportClientTestCase):
self.assertRaises(
exception.ResourceProviderCreationFailed,
self.client._ensure_resource_provider, uuids.compute_node)
self.client._ensure_resource_provider, self.context,
uuids.compute_node)
get_rpt_mock.assert_called_once_with(uuids.compute_node)
create_rp_mock.assert_called_once_with(
uuids.compute_node, uuids.compute_node, parent_provider_uuid=None)
self.context, uuids.compute_node, uuids.compute_node,
parent_provider_uuid=None)
self.assertFalse(self.client._provider_tree.exists(uuids.compute_node))
self.assertFalse(refresh_mock.called)
self.assertRaises(
@ -1341,7 +1344,8 @@ class TestProviderOperations(SchedulerReportClientTestCase):
}
self.assertEqual(
uuids.compute_node,
self.client._ensure_resource_provider(uuids.compute_node))
self.client._ensure_resource_provider(self.context,
uuids.compute_node))
self._validate_provider(uuids.compute_node, name='compute-name',
generation=1, parent_uuid=None,
aggregates=set(), traits=set())
@ -1350,6 +1354,7 @@ class TestProviderOperations(SchedulerReportClientTestCase):
refresh_mock.assert_not_called()
get_rpt_mock.assert_called_once_with(uuids.compute_node)
create_rp_mock.assert_called_once_with(
self.context,
uuids.compute_node,
uuids.compute_node, # name param defaults to UUID if None
parent_provider_uuid=None,
@ -1360,7 +1365,8 @@ class TestProviderOperations(SchedulerReportClientTestCase):
self.assertEqual(
uuids.compute_node,
self.client._ensure_resource_provider(uuids.compute_node))
self.client._ensure_resource_provider(self.context,
uuids.compute_node))
self._validate_provider(uuids.compute_node, name='compute-name',
generation=1, parent_uuid=None)
@ -1368,9 +1374,10 @@ class TestProviderOperations(SchedulerReportClientTestCase):
self.assertFalse(create_rp_mock.called)
# Validate the path where we specify a name (don't default to the UUID)
self.client._ensure_resource_provider(uuids.cn2, 'a-name')
self.client._ensure_resource_provider(
self.context, uuids.cn2, 'a-name')
create_rp_mock.assert_called_once_with(
uuids.cn2, 'a-name', parent_provider_uuid=None)
self.context, uuids.cn2, 'a-name', parent_provider_uuid=None)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_refresh_associations', new=mock.Mock())
@ -1380,7 +1387,8 @@ class TestProviderOperations(SchedulerReportClientTestCase):
'_get_providers_in_tree')
def test_ensure_resource_provider_tree(self, get_rpt_mock, create_rp_mock):
"""Test _ensure_resource_provider with a tree of providers."""
def _create_resource_provider(uuid, name, parent_provider_uuid=None):
def _create_resource_provider(context, uuid, name,
parent_provider_uuid=None):
"""Mock side effect for creating the RP with the specified args."""
return {
'uuid': uuid,
@ -1394,28 +1402,31 @@ class TestProviderOperations(SchedulerReportClientTestCase):
get_rpt_mock.return_value = []
# Create the root
root = self.client._ensure_resource_provider(uuids.root)
root = self.client._ensure_resource_provider(self.context, uuids.root)
self.assertEqual(uuids.root, root)
# Now create a child
child1 = self.client._ensure_resource_provider(
uuids.child1, name='junior', parent_provider_uuid=uuids.root)
self.context, uuids.child1, name='junior',
parent_provider_uuid=uuids.root)
self.assertEqual(uuids.child1, child1)
# If we re-ensure the child, we get the object from the tree, not a
# newly-created one - i.e. the early .find() works like it should.
self.assertIs(child1,
self.client._ensure_resource_provider(uuids.child1))
self.client._ensure_resource_provider(self.context,
uuids.child1))
# Make sure we can create a grandchild
grandchild = self.client._ensure_resource_provider(
uuids.grandchild, parent_provider_uuid=uuids.child1)
self.context, uuids.grandchild,
parent_provider_uuid=uuids.child1)
self.assertEqual(uuids.grandchild, grandchild)
# Now create a second child of the root and make sure it doesn't wind
# up in some crazy wrong place like under child1 or grandchild
child2 = self.client._ensure_resource_provider(
uuids.child2, parent_provider_uuid=uuids.root)
self.context, uuids.child2, parent_provider_uuid=uuids.root)
self.assertEqual(uuids.child2, child2)
# At this point we should get all the providers.
@ -1436,7 +1447,8 @@ class TestProviderOperations(SchedulerReportClientTestCase):
tree_uuids = [uuids.root, uuids.one, uuids.two]
mock_gpu.return_value = tree_uuids
self.assertEqual(uuids.root,
self.client._ensure_resource_provider(uuids.root))
self.client._ensure_resource_provider(self.context,
uuids.root))
mock_exists.assert_called_once_with(uuids.root)
mock_gpu.assert_called_once_with(uuids.root)
mock_refresh.assert_has_calls(
@ -1455,7 +1467,8 @@ class TestProviderOperations(SchedulerReportClientTestCase):
mock_gpit.return_value = [{'uuid': u, 'name': u, 'generation': 42}
for u in tree_uuids]
self.assertEqual(uuids.root,
self.client._ensure_resource_provider(uuids.root))
self.client._ensure_resource_provider(self.context,
uuids.root))
mock_gpit.assert_called_once_with(uuids.root)
mock_refresh.assert_has_calls(
[mock.call(uuid, generation=42, force=True)
@ -1476,9 +1489,11 @@ class TestProviderOperations(SchedulerReportClientTestCase):
mock_create.return_value = {'name': 'cn', 'uuid': uuids.cn,
'generation': 42}
self.assertEqual(uuids.root,
self.client._ensure_resource_provider(uuids.root))
self.client._ensure_resource_provider(self.context,
uuids.root))
mock_gpit.assert_called_once_with(uuids.root)
mock_create.assert_called_once_with(uuids.root, uuids.root,
mock_create.assert_called_once_with(self.context, uuids.root,
uuids.root,
parent_provider_uuid=None)
mock_refresh.assert_not_called()
self.assertEqual([uuids.cn],
@ -1762,7 +1777,7 @@ class TestProviderOperations(SchedulerReportClientTestCase):
resp_mock = mock.Mock(status_code=201)
self.ks_adap_mock.post.return_value = resp_mock
self.client._create_resource_provider(uuid, name)
self.client._create_resource_provider(self.context, uuid, name)
expected_payload = {
'uuid': uuid,
@ -1772,7 +1787,8 @@ class TestProviderOperations(SchedulerReportClientTestCase):
expected_url = '/resource_providers'
self.ks_adap_mock.post.assert_called_once_with(
expected_url, json=expected_payload, raise_exc=False,
microversion='1.14')
microversion='1.14',
headers={'X-Openstack-Request-Id': self.context.global_id})
def test_create_resource_provider_with_parent(self):
"""Test that when specifying a parent provider UUID, that the
@ -1785,6 +1801,7 @@ class TestProviderOperations(SchedulerReportClientTestCase):
self.ks_adap_mock.post.return_value = resp_mock
result = self.client._create_resource_provider(
self.context,
uuid,
name,
parent_provider_uuid=parent_uuid,
@ -1804,7 +1821,8 @@ class TestProviderOperations(SchedulerReportClientTestCase):
expected_url = '/resource_providers'
self.ks_adap_mock.post.assert_called_once_with(
expected_url, json=expected_payload, raise_exc=False,
microversion='1.14')
microversion='1.14',
headers={'X-Openstack-Request-Id': self.context.global_id})
self.assertEqual(expected_provider_dict, result)
@mock.patch.object(report.LOG, 'info')
@ -1826,7 +1844,8 @@ class TestProviderOperations(SchedulerReportClientTestCase):
get_rp_mock.return_value = mock.sentinel.get_rp
result = self.client._create_resource_provider(uuid, name)
result = self.client._create_resource_provider(self.context, uuid,
name)
expected_payload = {
'uuid': uuid,
@ -1835,7 +1854,8 @@ class TestProviderOperations(SchedulerReportClientTestCase):
expected_url = '/resource_providers'
self.ks_adap_mock.post.assert_called_once_with(
expected_url, json=expected_payload, raise_exc=False,
microversion='1.14')
microversion='1.14',
headers={'X-Openstack-Request-Id': self.context.global_id})
self.assertEqual(mock.sentinel.get_rp, result)
# The 409 response will produce a message to the info log.
self.assertTrue(logging_mock.called)
@ -1852,7 +1872,8 @@ class TestProviderOperations(SchedulerReportClientTestCase):
self.assertRaises(
exception.ResourceProviderCreationFailed,
self.client._create_resource_provider, uuids.compute_node, 'foo')
self.client._create_resource_provider, self.context,
uuids.compute_node, 'foo')
@mock.patch.object(report.LOG, 'error')
def test_create_resource_provider_error(self, logging_mock):
@ -1868,7 +1889,7 @@ class TestProviderOperations(SchedulerReportClientTestCase):
self.assertRaises(
exception.ResourceProviderCreationFailed,
self.client._create_resource_provider, uuid, name)
self.client._create_resource_provider, self.context, uuid, name)
expected_payload = {
'uuid': uuid,
@ -1877,7 +1898,8 @@ class TestProviderOperations(SchedulerReportClientTestCase):
expected_url = '/resource_providers'
self.ks_adap_mock.post.assert_called_once_with(
expected_url, json=expected_payload, raise_exc=False,
microversion='1.14')
microversion='1.14',
headers={'X-Openstack-Request-Id': self.context.global_id})
# A 503 Service Unavailable should log an error that
# includes the placement request id and
# _create_resource_provider() should return None
@ -2381,7 +2403,8 @@ class TestInventory(SchedulerReportClientTestCase):
def test_update_compute_node(self, mock_ui, mock_delete, mock_erp):
cn = self.compute_node
self.client.update_compute_node(self.context, cn)
mock_erp.assert_called_once_with(cn.uuid, cn.hypervisor_hostname)
mock_erp.assert_called_once_with(self.context, cn.uuid,
cn.hypervisor_hostname)
expected_inv_data = {
'VCPU': {
'total': 8,
@ -2409,6 +2432,7 @@ class TestInventory(SchedulerReportClientTestCase):
},
}
mock_ui.assert_called_once_with(
self.context,
cn.uuid,
expected_inv_data,
)
@ -2430,7 +2454,8 @@ class TestInventory(SchedulerReportClientTestCase):
cn.memory_mb = 0
cn.local_gb = 0
self.client.update_compute_node(self.context, cn)
mock_erp.assert_called_once_with(cn.uuid, cn.hypervisor_hostname)
mock_erp.assert_called_once_with(self.context, cn.uuid,
cn.hypervisor_hostname)
mock_delete.assert_called_once_with(self.context, cn.uuid)
self.assertFalse(mock_ui.called)
@ -2755,7 +2780,7 @@ There was a conflict when trying to complete your request.
inv_data = report._compute_node_to_inventory_dict(compute_node)
result = self.client._update_inventory_attempt(
compute_node.uuid, inv_data
self.context, compute_node.uuid, inv_data
)
self.assertTrue(result)
@ -2831,7 +2856,7 @@ There was a conflict when trying to complete your request.
# Make a change to trigger the update...
inv_data['VCPU']['total'] = new_vcpus_total
result = self.client._update_inventory_attempt(
compute_node.uuid, inv_data
self.context, compute_node.uuid, inv_data
)
self.assertTrue(result)
@ -2917,7 +2942,7 @@ There was a conflict when trying to complete your request.
}
inv_data = report._compute_node_to_inventory_dict(compute_node)
result = self.client._update_inventory_attempt(
compute_node.uuid, inv_data
self.context, compute_node.uuid, inv_data
)
self.assertTrue(result)
exp_url = '/resource_providers/%s/inventories' % uuid
@ -2958,14 +2983,14 @@ There was a conflict when trying to complete your request.
inv_data = report._compute_node_to_inventory_dict(compute_node)
result = self.client._update_inventory_attempt(
compute_node.uuid, inv_data
self.context, compute_node.uuid, inv_data
)
self.assertFalse(result)
# Invalidated the cache
self.assertFalse(self.client._provider_tree.exists(uuid))
# Refreshed our resource provider
mock_ensure.assert_called_once_with(uuid)
mock_ensure.assert_called_once_with(self.context, uuid)
# Logged the request id in the log message
self.assertEqual(uuids.request_id,
mock_info.call_args[0][1]['placement_req_id'])
@ -3000,6 +3025,7 @@ There was a conflict when trying to complete your request.
self.assertRaises(
exception.InventoryInUse,
self.client._update_inventory_attempt,
self.context,
compute_node.uuid,
inv_data,
)
@ -3035,7 +3061,7 @@ There was a conflict when trying to complete your request.
inv_data = report._compute_node_to_inventory_dict(compute_node)
result = self.client._update_inventory_attempt(
compute_node.uuid, inv_data
self.context, compute_node.uuid, inv_data
)
self.assertFalse(result)
@ -3075,7 +3101,7 @@ There was a conflict when trying to complete your request.
inv_data = report._compute_node_to_inventory_dict(compute_node)
result = self.client._update_inventory_attempt(
compute_node.uuid, inv_data
self.context, compute_node.uuid, inv_data
)
self.assertFalse(result)
@ -3106,7 +3132,7 @@ There was a conflict when trying to complete your request.
42,
)
result = self.client._update_inventory(
cn.uuid, mock.sentinel.inv_data
self.context, cn.uuid, mock.sentinel.inv_data
)
self.assertTrue(result)
@ -3131,7 +3157,7 @@ There was a conflict when trying to complete your request.
42,
)
result = self.client._update_inventory(
cn.uuid, mock.sentinel.inv_data
self.context, cn.uuid, mock.sentinel.inv_data
)
self.assertFalse(result)
@ -3140,9 +3166,9 @@ There was a conflict when trying to complete your request.
# Three attempts to update
mock_update.assert_has_calls([
mock.call(cn.uuid, mock.sentinel.inv_data),
mock.call(cn.uuid, mock.sentinel.inv_data),
mock.call(cn.uuid, mock.sentinel.inv_data),
mock.call(self.context, cn.uuid, mock.sentinel.inv_data),
mock.call(self.context, cn.uuid, mock.sentinel.inv_data),
mock.call(self.context, cn.uuid, mock.sentinel.inv_data),
])
# Slept three times
@ -3196,6 +3222,7 @@ There was a conflict when trying to complete your request.
inv_data,
)
mock_erp.assert_called_once_with(
self.context,
mock.sentinel.rp_uuid,
mock.sentinel.rp_name,
parent_provider_uuid=None,
@ -3204,6 +3231,7 @@ There was a conflict when trying to complete your request.
self.assertFalse(mock_erc.called)
self.assertFalse(mock_gocr.called)
mock_upd.assert_called_once_with(
self.context,
mock.sentinel.rp_uuid,
inv_data,
)
@ -3232,6 +3260,7 @@ There was a conflict when trying to complete your request.
inv_data,
)
mock_erp.assert_called_once_with(
self.context,
mock.sentinel.rp_uuid,
mock.sentinel.rp_name,
parent_provider_uuid=None,
@ -3299,12 +3328,14 @@ There was a conflict when trying to complete your request.
inv_data,
)
mock_erp.assert_called_once_with(
self.context,
mock.sentinel.rp_uuid,
mock.sentinel.rp_name,
parent_provider_uuid=None,
)
mock_erc.assert_called_once_with('CUSTOM_IRON_SILVER')
mock_erc.assert_called_once_with(self.context, 'CUSTOM_IRON_SILVER')
mock_upd.assert_called_once_with(
self.context,
mock.sentinel.rp_uuid,
inv_data,
)
@ -3323,7 +3354,8 @@ There was a conflict when trying to complete your request.
self.context, uuids.child, 'junior', {},
parent_provider_uuid=uuids.parent)
mock_erp.assert_called_once_with(
uuids.child, 'junior', parent_provider_uuid=uuids.parent)
self.context, uuids.child, 'junior',
parent_provider_uuid=uuids.parent)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'put')
@ -3332,8 +3364,8 @@ There was a conflict when trying to complete your request.
def test_ensure_resource_class_microversion_failover(self, mock_gocr,
mock_put):
mock_put.return_value.status_code = 406
self.client._ensure_resource_class('CUSTOM_IRON_SILVER')
mock_gocr.assert_called_once_with('CUSTOM_IRON_SILVER')
self.client._ensure_resource_class(self.context, 'CUSTOM_IRON_SILVER')
mock_gocr.assert_called_once_with(self.context, 'CUSTOM_IRON_SILVER')
class TestAllocations(SchedulerReportClientTestCase):
@ -3644,7 +3676,8 @@ class TestResourceClass(SchedulerReportClientTestCase):
resp_mock = mock.Mock(status_code=200)
mock_get.return_value = resp_mock
rc_name = 'CUSTOM_FOO'
result = self.client._get_or_create_resource_class(rc_name)
result = self.client._get_or_create_resource_class(self.context,
rc_name)
mock_get.assert_called_once_with(
'/resource_classes/' + rc_name,
version="1.2",
@ -3659,12 +3692,13 @@ class TestResourceClass(SchedulerReportClientTestCase):
resp_mock = mock.Mock(status_code=404)
mock_get.return_value = resp_mock
rc_name = 'CUSTOM_FOO'
result = self.client._get_or_create_resource_class(rc_name)
result = self.client._get_or_create_resource_class(self.context,
rc_name)
mock_get.assert_called_once_with(
'/resource_classes/' + rc_name,
version="1.2",
)
mock_crc.assert_called_once_with(rc_name)
mock_crc.assert_called_once_with(self.context, rc_name)
self.assertEqual(rc_name, result)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
@ -3674,7 +3708,8 @@ class TestResourceClass(SchedulerReportClientTestCase):
resp_mock = mock.Mock(status_code=500, text='server error')
mock_get.return_value = resp_mock
rc_name = 'CUSTOM_FOO'
result = self.client._get_or_create_resource_class(rc_name)
result = self.client._get_or_create_resource_class(self.context,
rc_name)
mock_get.assert_called_once_with(
'/resource_classes/' + rc_name,
version="1.2",
@ -3687,11 +3722,12 @@ class TestResourceClass(SchedulerReportClientTestCase):
resp_mock = mock.Mock(status_code=201)
mock_post.return_value = resp_mock
rc_name = 'CUSTOM_FOO'
result = self.client._create_resource_class(rc_name)
result = self.client._create_resource_class(self.context, rc_name)
mock_post.assert_called_once_with(
'/resource_classes',
{'name': rc_name},
version="1.2",
global_request_id=self.context.global_id
)
self.assertIsNone(result)
@ -3701,11 +3737,12 @@ class TestResourceClass(SchedulerReportClientTestCase):
resp_mock = mock.Mock(status_code=409)
mock_post.return_value = resp_mock
rc_name = 'CUSTOM_FOO'
result = self.client._create_resource_class(rc_name)
result = self.client._create_resource_class(self.context, rc_name)
mock_post.assert_called_once_with(
'/resource_classes',
{'name': rc_name},
version="1.2",
global_request_id=self.context.global_id
)
self.assertIsNone(result)
self.assertIn('Another thread already', mock_log.call_args[0][0])