[placement] Add sending global request ID in delete (2)

Add the 'X-Openstack-Request-Id' header in the request of DELETE.
When deleting allocations for a server (instance),
the header is added.

Subsequent patches will add the header in the other cases.

Change-Id: If38e4a6d49910f0aa5016e1bcb61aac2be416fa7
Partial-Bug: #1734625
This commit is contained in:
Takashi NATSUME 2017-12-07 11:25:37 +09:00
parent 90a92d33ed
commit 0cbe9b2333
13 changed files with 88 additions and 67 deletions

View File

@ -750,7 +750,7 @@ class ComputeManager(manager.Manager):
self._update_resource_tracker(context, instance)
rt = self._get_resource_tracker()
rt.reportclient.delete_allocation_for_instance(instance.uuid)
rt.reportclient.delete_allocation_for_instance(context, instance.uuid)
self._notify_about_instance_usage(context, instance, "delete.end",
system_metadata=system_meta)
@ -1774,7 +1774,8 @@ class ComputeManager(manager.Manager):
# call this for a reschedule, as the allocations will
# have already been removed in
# self._do_build_and_run_instance().
self._delete_allocation_for_instance(instance.uuid)
self._delete_allocation_for_instance(context,
instance.uuid)
if result in (build_results.FAILED,
build_results.RESCHEDULED):
@ -1791,9 +1792,9 @@ class ComputeManager(manager.Manager):
requested_networks, security_groups,
block_device_mapping, node, limits, host_list)
def _delete_allocation_for_instance(self, instance_uuid):
def _delete_allocation_for_instance(self, context, instance_uuid):
rt = self._get_resource_tracker()
rt.reportclient.delete_allocation_for_instance(instance_uuid)
rt.reportclient.delete_allocation_for_instance(context, instance_uuid)
def _check_device_tagging(self, requested_networks, block_device_mapping):
tagging_requested = False
@ -1904,7 +1905,7 @@ class ComputeManager(manager.Manager):
# to unclaim those resources before casting to the conductor, so
# that if there are alternate hosts available for a retry, it can
# claim resources on that new host for the instance.
self._delete_allocation_for_instance(instance.uuid)
self._delete_allocation_for_instance(context, instance.uuid)
self.compute_task_api.build_instances(context, [instance],
image, filter_properties, admin_password,
@ -3689,7 +3690,7 @@ class ComputeManager(manager.Manager):
rt = self._get_resource_tracker()
rt.drop_move_claim(context, instance, migration.source_node,
old_instance_type, prefix='old_')
self._delete_allocation_after_move(instance, migration,
self._delete_allocation_after_move(context, instance, migration,
old_instance_type,
migration.source_node)
instance.drop_migration_context()
@ -3718,8 +3719,8 @@ class ComputeManager(manager.Manager):
context, instance, "resize.confirm.end",
network_info=network_info)
def _delete_allocation_after_move(self, instance, migration, flavor,
nodename):
def _delete_allocation_after_move(self, context, instance, migration,
flavor, nodename):
rt = self._get_resource_tracker()
cn_uuid = rt.get_node_uuid(nodename)
@ -3728,7 +3729,7 @@ class ComputeManager(manager.Manager):
# NOTE(danms): We're finishing on the source node, so try to
# delete the allocation based on the migration uuid
deleted = self.reportclient.delete_allocation_for_instance(
migration.uuid)
context, migration.uuid)
if deleted:
LOG.info(_('Source node %(node)s confirmed migration '
'%(mig)s; deleted migration-based '
@ -3851,7 +3852,7 @@ class ComputeManager(manager.Manager):
rt = self._get_resource_tracker()
rt.drop_move_claim(context, instance, instance.node)
self._delete_allocation_after_move(instance, migration,
self._delete_allocation_after_move(context, instance, migration,
instance.flavor,
instance.node)
@ -4775,7 +4776,7 @@ class ComputeManager(manager.Manager):
self._update_resource_tracker(context, instance)
rt = self._get_resource_tracker()
rt.delete_allocation_for_shelve_offloaded_instance(instance)
rt.delete_allocation_for_shelve_offloaded_instance(context, instance)
# NOTE(sfinucan): RPC calls should no longer be attempted against this
# instance, so ensure any calls result in errors
@ -4881,7 +4882,8 @@ class ComputeManager(manager.Manager):
# the instance claim failed with ComputeResourcesUnavailable
# or if we did claim but the spawn failed, because aborting the
# instance claim will not remove the allocations.
rt.reportclient.delete_allocation_for_instance(instance.uuid)
rt.reportclient.delete_allocation_for_instance(context,
instance.uuid)
# FIXME: Umm, shouldn't we be rolling back volume connections
# and port bindings?
@ -6260,7 +6262,8 @@ class ComputeManager(manager.Manager):
if allocs:
# We had a migration-based allocation that we need to handle
self._delete_allocation_after_move(instance,
self._delete_allocation_after_move(ctxt,
instance,
migrate_data.migration,
instance.flavor,
source_node)

View File

@ -1094,8 +1094,8 @@ class ResourceTracker(object):
sign=sign)
if require_allocation_refresh:
LOG.debug("Auto-correcting allocations.")
self.reportclient.update_instance_allocation(cn, instance,
sign)
self.reportclient.update_instance_allocation(context, cn,
instance, sign)
# new instance, update compute node resource usage:
self._update_usage(self._get_usage_dict(instance), nodename,
sign=sign)
@ -1260,7 +1260,8 @@ class ResourceTracker(object):
"Deleting allocations that remained for this "
"instance against this compute host: %s.",
instance_uuid, alloc)
self.reportclient.delete_allocation_for_instance(instance_uuid)
self.reportclient.delete_allocation_for_instance(context,
instance_uuid)
continue
if not instance.host:
# Allocations related to instances being scheduled should not
@ -1383,8 +1384,10 @@ class ResourceTracker(object):
usage = {'memory_mb': memory_mb}
self._update_usage(usage, nodename)
def delete_allocation_for_shelve_offloaded_instance(self, instance):
self.reportclient.delete_allocation_for_instance(instance.uuid)
def delete_allocation_for_shelve_offloaded_instance(self, context,
instance):
self.reportclient.delete_allocation_for_instance(context,
instance.uuid)
def _verify_resources(self, resources):
resource_keys = ["vcpus", "memory_mb", "local_gb", "cpu_info",

View File

@ -1157,7 +1157,7 @@ class ComputeTaskManager(base.Base):
# This is a placeholder in case the quota recheck fails.
instances.append(None)
rc = self.scheduler_client.reportclient
rc.delete_allocation_for_instance(instance.uuid)
rc.delete_allocation_for_instance(context, instance.uuid)
continue
else:
instance.availability_zone = (

View File

@ -1372,9 +1372,9 @@ class SchedulerReportClient(object):
return r.status_code == 204
@safe_connect
def delete_allocation_for_instance(self, uuid):
def delete_allocation_for_instance(self, context, uuid):
url = '/allocations/%s' % uuid
r = self.delete(url)
r = self.delete(url, global_request_id=context.global_id)
if r:
LOG.info('Deleted allocation for instance %s', uuid)
return True
@ -1389,11 +1389,12 @@ class SchedulerReportClient(object):
'text': r.text})
return False
def update_instance_allocation(self, compute_node, instance, sign):
def update_instance_allocation(self, context, compute_node, instance,
sign):
if sign > 0:
self._allocate_for_instance(compute_node.uuid, instance)
else:
self.delete_allocation_for_instance(instance.uuid)
self.delete_allocation_for_instance(context, instance.uuid)
@safe_connect
def get_allocations_for_resource_provider(self, rp_uuid):
@ -1425,7 +1426,7 @@ class SchedulerReportClient(object):
instances = objects.InstanceList.get_by_host_and_node(context,
host, nodename)
for instance in instances:
self.delete_allocation_for_instance(instance.uuid)
self.delete_allocation_for_instance(context, instance.uuid)
url = "/resource_providers/%s" % rp_uuid
resp = self.delete(url, global_request_id=context.global_id)
if resp:

View File

@ -172,8 +172,8 @@ class FilterScheduler(driver.Scheduler):
# is None, that indicates an older conductor, so we need to return
# the objects without alternates. They will be converted back to
# the older dict format representing HostState objects.
return self._legacy_find_hosts(num_instances, spec_obj, hosts,
num_alts)
return self._legacy_find_hosts(context, num_instances, spec_obj,
hosts, num_alts)
# A list of the instance UUIDs that were successfully claimed against
# in the placement API. If we are not able to successfully claim for
@ -239,7 +239,7 @@ class FilterScheduler(driver.Scheduler):
# Check if we were able to fulfill the request. If not, this call will
# raise a NoValidHost exception.
self._ensure_sufficient_hosts(claimed_hosts, num_instances,
self._ensure_sufficient_hosts(context, claimed_hosts, num_instances,
claimed_instance_uuids)
# We have selected and claimed hosts for each instance. Now we need to
@ -249,7 +249,7 @@ class FilterScheduler(driver.Scheduler):
alloc_reqs_by_rp_uuid, allocation_request_version)
return selections_to_return
def _ensure_sufficient_hosts(self, hosts, required_count,
def _ensure_sufficient_hosts(self, context, hosts, required_count,
claimed_uuids=None):
"""Checks that we have selected a host for each requested instance. If
not, log this failure, remove allocations for any claimed instances,
@ -260,7 +260,7 @@ class FilterScheduler(driver.Scheduler):
return
if claimed_uuids:
self._cleanup_allocations(claimed_uuids)
self._cleanup_allocations(context, claimed_uuids)
# NOTE(Rui Chen): If multiple creates failed, set the updated time
# of selected HostState to None so that these HostStates are
# refreshed according to database in next schedule, and release
@ -279,15 +279,16 @@ class FilterScheduler(driver.Scheduler):
reason = _('There are not enough hosts available.')
raise exception.NoValidHost(reason=reason)
def _cleanup_allocations(self, instance_uuids):
def _cleanup_allocations(self, context, instance_uuids):
"""Removes allocations for the supplied instance UUIDs."""
if not instance_uuids:
return
LOG.debug("Cleaning up allocations for %s", instance_uuids)
for uuid in instance_uuids:
self.placement_client.delete_allocation_for_instance(uuid)
self.placement_client.delete_allocation_for_instance(context, uuid)
def _legacy_find_hosts(self, num_instances, spec_obj, hosts, num_alts):
def _legacy_find_hosts(self, context, num_instances, spec_obj, hosts,
num_alts):
"""Some schedulers do not do claiming, or we can sometimes not be able
to if the Placement service is not reachable. Additionally, we may be
working with older conductors that don't pass in instance_uuids.
@ -312,7 +313,7 @@ class FilterScheduler(driver.Scheduler):
# Check if we were able to fulfill the request. If not, this call will
# raise a NoValidHost exception.
self._ensure_sufficient_hosts(selected_hosts, num_instances)
self._ensure_sufficient_hosts(context, selected_hosts, num_instances)
selections_to_return = self._get_alternate_hosts(selected_hosts,
spec_obj, hosts, num, num_alts)

View File

@ -1757,7 +1757,7 @@ class PlacementFixture(fixtures.Fixture):
headers=headers,
raise_exc=False)
def _fake_delete(self, *args):
def _fake_delete(self, *args, **kwargs):
(url,) = args[1:]
# TODO(sbauza): The current placement NoAuthMiddleware returns a 401
# in case a token is not provided. We should change that by creating

View File

@ -19,6 +19,7 @@ from wsgi_intercept import interceptor
from nova.api.openstack.placement import deploy
from nova import conf
from nova import context
from nova import exception
from nova import objects
from nova.objects import fields
@ -84,6 +85,7 @@ class SchedulerReportClientTests(test.TestCase):
memory_mb=1024,
vcpus=2,
extra_specs={}))
self.context = context.get_admin_context()
@mock.patch('nova.compute.utils.is_volume_backed_instance',
return_value=False)
@ -135,7 +137,7 @@ class SchedulerReportClientTests(test.TestCase):
# Update allocations with our instance
self.client.update_instance_allocation(
self.compute_node, self.instance, 1)
self.context, self.compute_node, self.instance, 1)
# Check that allocations were made
resp = self.client.get('/allocations/%s' % self.instance_uuid)
@ -152,7 +154,7 @@ class SchedulerReportClientTests(test.TestCase):
# Delete allocations with our instance
self.client.update_instance_allocation(
self.compute_node, self.instance, -1)
self.context, self.compute_node, self.instance, -1)
# No usage
resp = self.client.get('/resource_providers/%s/usages' %

View File

@ -298,7 +298,8 @@ class IronicResourceTrackerTest(test.TestCase):
# allocation for us. So, we can use our old update routine
# here to mimic that before we go do the compute RT claim,
# and then the checks below.
self.rt.reportclient.update_instance_allocation(cn1_obj,
self.rt.reportclient.update_instance_allocation(self.ctx,
cn1_obj,
inst,
1)
with self.rt.instance_claim(self.ctx, inst, cn1_nodename):

View File

@ -4878,7 +4878,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
mock_dbari.return_value = build_results.FAILED
instance = objects.Instance(uuid=uuids.instance)
for i in range(0, 10):
self.compute.build_and_run_instance(None, instance, None,
self.compute.build_and_run_instance(self.context, instance, None,
None, None)
service = mock_service.return_value
self.assertTrue(service.disabled)
@ -4896,7 +4896,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
mock_dbari.return_value = build_results.FAILED
instance = objects.Instance(uuid=uuids.instance)
for i in range(0, 10):
self.compute.build_and_run_instance(None, instance, None,
self.compute.build_and_run_instance(self.context, instance, None,
None, None)
service = mock_service.return_value
self.assertFalse(service.save.called)
@ -4919,7 +4919,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
mock_dbari.side_effect = _fake_build
instance = objects.Instance(uuid=uuids.instance)
for i in range(0, 10):
self.compute.build_and_run_instance(None, instance, None,
self.compute.build_and_run_instance(self.context, instance, None,
None, None)
service = mock_service.return_value
self.assertFalse(service.save.called)
@ -4931,7 +4931,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
mock_dbari.return_value = build_results.RESCHEDULED
instance = objects.Instance(uuid=uuids.instance)
for i in range(0, 10):
self.compute.build_and_run_instance(None, instance, None,
self.compute.build_and_run_instance(self.context, instance, None,
None, None)
service = mock_service.return_value
self.assertTrue(service.disabled)
@ -4952,7 +4952,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
for i in range(0, 10):
self.assertRaises(test.TestingException,
self.compute.build_and_run_instance,
None, instance, None,
self.context, instance, None,
None, None)
service = mock_service.return_value
self.assertTrue(service.disabled)
@ -6050,8 +6050,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
mock_drop_move_claim.assert_called_once_with(self.context,
self.instance, self.instance.node)
mock_delete_allocation.assert_called_once_with(
self.instance, self.migration, self.instance.flavor,
self.instance.node)
self.context, self.instance, self.migration,
self.instance.flavor, self.instance.node)
self.assertIsNotNone(self.instance.migration_context)
# Three fake BDMs:
@ -6139,7 +6139,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
self.migration.source_node = self.instance['node']
self.compute._confirm_resize(self.context, self.instance,
self.migration)
mock_delete.assert_called_once_with(self.instance, self.migration,
mock_delete.assert_called_once_with(self.context, self.instance,
self.migration,
self.instance.old_flavor,
self.migration.source_node)
@ -6152,7 +6153,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
def do_it(mock_rc, mock_grt):
instance = mock.MagicMock()
migration = mock.MagicMock()
self.compute._delete_allocation_after_move(instance,
self.compute._delete_allocation_after_move(self.context,
instance,
migration,
mock.sentinel.flavor,
mock.sentinel.node)
@ -6178,12 +6180,13 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
self.migration.source_node = 'src'
self.migration.uuid = uuids.migration
self.migration.status = 'confirmed'
self.compute._delete_allocation_after_move(self.instance,
self.compute._delete_allocation_after_move(self.context,
self.instance,
self.migration,
mock.sentinel.flavor,
'src')
mock_report.delete_allocation_for_instance.assert_called_once_with(
self.migration.uuid)
self.context, self.migration.uuid)
old = mock_report.remove_provider_from_instance_allocation
if new_rules:
@ -6209,7 +6212,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
self.migration.source_node = 'src'
self.migration.uuid = uuids.migration
self.migration.status = 'failed'
self.compute._delete_allocation_after_move(self.instance,
self.compute._delete_allocation_after_move(self.context,
self.instance,
self.migration,
mock.sentinel.flavor,
'src')
@ -6242,7 +6246,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
self.migration.source_node = 'src'
self.migration.dest_node = 'dst'
self.migration.uuid = uuids.migration
self.compute._delete_allocation_after_move(self.instance,
self.compute._delete_allocation_after_move(self.context,
self.instance,
self.migration,
mock.sentinel.flavor,
'dst')
@ -6825,7 +6830,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
mock.sentinel.allocs)
self._call_post_live_migration(migrate_data=md)
# ...so we should have called the new style delete
mock_delete.assert_called_once_with(self.instance,
mock_delete.assert_called_once_with(self.context,
self.instance,
migration,
self.instance.flavor,
self.instance.node)

View File

@ -2572,7 +2572,7 @@ class TestUpdateUsageFromInstance(BaseTestCase):
# Only one call should be made to delete allocations, and that should
# be for the first instance created above
rc.delete_allocation_for_instance.assert_called_once_with(
uuids.deleted)
ctx, uuids.deleted)
mock_inst_get.assert_called_once_with(
ctx.elevated.return_value,
uuids.deleted,
@ -2619,7 +2619,7 @@ class TestUpdateUsageFromInstance(BaseTestCase):
# Only one call should be made to delete allocations, and that should
# be for the first instance created above
rc.delete_allocation_for_instance.assert_called_once_with(
uuids.deleted)
ctx, uuids.deleted)
@mock.patch('nova.objects.Instance.get_by_uuid')
def test_remove_deleted_instances_allocations_scheduled_instance(self,
@ -2739,11 +2739,13 @@ class TestUpdateUsageFromInstance(BaseTestCase):
instance = _INSTANCE_FIXTURES[0].obj_clone()
instance.uuid = uuids.inst0
self.rt.delete_allocation_for_shelve_offloaded_instance(instance)
self.rt.delete_allocation_for_shelve_offloaded_instance(
mock.sentinel.ctx, instance)
rc = self.rt.reportclient
mock_remove_allocation = rc.delete_allocation_for_instance
mock_remove_allocation.assert_called_once_with(instance.uuid)
mock_remove_allocation.assert_called_once_with(
mock.sentinel.ctx, instance.uuid)
def test_update_usage_from_instances_goes_negative(self):
# NOTE(danms): The resource tracker _should_ report negative resources

View File

@ -244,7 +244,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
self.context, instance)
mock_update_resource_tracker.assert_called_once_with(self.context,
instance)
mock_delete_alloc.assert_called_once_with(instance)
mock_delete_alloc.assert_called_once_with(self.context, instance)
mock_event.assert_called_once_with(self.context,
'compute_shelve_offload_instance',
instance.uuid)

View File

@ -3000,7 +3000,7 @@ class TestAllocations(SchedulerReportClientTestCase):
'project_id': inst.project_id,
'user_id': inst.user_id,
}
self.client.update_instance_allocation(cn, inst, 1)
self.client.update_instance_allocation(self.context, cn, inst, 1)
mock_put.assert_called_once_with(
'/allocations/%s' % inst.uuid,
expected, version='1.8')
@ -3029,7 +3029,7 @@ class TestAllocations(SchedulerReportClientTestCase):
'DISK_GB': 123,
'MEMORY_MB': 456,
}
self.client.update_instance_allocation(cn, inst, 1)
self.client.update_instance_allocation(self.context, cn, inst, 1)
self.assertFalse(mock_put.called)
mock_get.assert_called_once_with(
'/allocations/%s' % inst.uuid)
@ -3051,7 +3051,7 @@ class TestAllocations(SchedulerReportClientTestCase):
except AttributeError:
# NOTE(danms): LOL @ py3
mock_put.return_value.__bool__.return_value = False
self.client.update_instance_allocation(cn, inst, 1)
self.client.update_instance_allocation(self.context, cn, inst, 1)
self.assertTrue(mock_warn.called)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
@ -3059,9 +3059,10 @@ class TestAllocations(SchedulerReportClientTestCase):
def test_update_instance_allocation_delete(self, mock_delete):
cn = objects.ComputeNode(uuid=uuids.cn)
inst = objects.Instance(uuid=uuids.inst)
self.client.update_instance_allocation(cn, inst, -1)
self.client.update_instance_allocation(self.context, cn, inst, -1)
mock_delete.assert_called_once_with(
'/allocations/%s' % inst.uuid)
'/allocations/%s' % inst.uuid,
global_request_id=self.context.global_id)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'delete')
@ -3075,7 +3076,7 @@ class TestAllocations(SchedulerReportClientTestCase):
except AttributeError:
# NOTE(danms): LOL @ py3
mock_delete.return_value.__bool__.return_value = False
self.client.update_instance_allocation(cn, inst, -1)
self.client.update_instance_allocation(self.context, cn, inst, -1)
self.assertTrue(mock_warn.called)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
@ -3093,7 +3094,7 @@ class TestAllocations(SchedulerReportClientTestCase):
# py3 uses __bool__
mock_response.__bool__.return_value = False
mock_delete.return_value = mock_response
self.client.delete_allocation_for_instance(uuids.rp_uuid)
self.client.delete_allocation_for_instance(self.context, uuids.rp_uuid)
# make sure we didn't screw up the logic or the mock
mock_log.info.assert_not_called()
# make sure warning wasn't called for the 404

View File

@ -330,7 +330,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
mock.sentinel.provider_summaries)
# Ensure we cleaned up the first successfully-claimed instance
mock_cleanup.assert_called_once_with([uuids.instance1])
mock_cleanup.assert_called_once_with(ctx, [uuids.instance1])
@mock.patch('nova.scheduler.utils.claim_resources')
@mock.patch('nova.scheduler.filter_scheduler.FilterScheduler.'
@ -681,13 +681,14 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
# allocations for
pc = self.placement_client
self.driver._cleanup_allocations(instance_uuids)
self.driver._cleanup_allocations(self.context, instance_uuids)
self.assertFalse(pc.delete_allocation_for_instance.called)
instance_uuids = [uuids.instance1, uuids.instance2]
self.driver._cleanup_allocations(instance_uuids)
self.driver._cleanup_allocations(self.context, instance_uuids)
exp_calls = [mock.call(uuids.instance1), mock.call(uuids.instance2)]
exp_calls = [mock.call(self.context, uuids.instance1),
mock.call(self.context, uuids.instance2)]
pc.delete_allocation_for_instance.assert_has_calls(exp_calls)
def test_add_retry_host(self):