Handle legacy request spec dict in ComputeTaskManager._cold_migrate
Prior to change I4244f7dd8fe74565180f73684678027067b4506e in Stein, conductor would pass a legacy dict request spec to compute during cold migrate / resize and if compute rescheduled it would not pass the request spec back to conductor, so the _cold_migrate method in conductor would have to create a new RequestSpec from components it had available. As of that change, compute will send the request spec it got back to conductor and _cold_migrate avoids the RequestSpec.from_components call. There are two issues here: 1. Technically if conductor RPC API is pinned to less than 1.13 the ComputeTaskAPI.migrate_server method will remove the request spec from the call to conductor. So conductor (server-side) can still not get a RequestSpec and need to use from_components. As a result the TODO in the _cold_migrate method needs to be updated since we require an RPC API major version bump to make request spec required. 2. Just because conductor is passing compute a RequestSpec object, if compute RPC API versions are pinned to less than 5.1, conductor will pass a legacy request spec dict to compute and compute will send that back to conductor, so the _cold_migrate method needs to handle getting a request spec that is a dict and convert it to an object. A new test is added for that case. Change-Id: I188b7aa9cb220f93e69a68f0c3592b28d41ba5b6 Closes-Bug: #1843090
This commit is contained in:
parent
ffd4dbf817
commit
b36c44c449
|
@ -813,6 +813,7 @@ class ComputeAPI(object):
|
|||
client = self.router.client(ctxt)
|
||||
return client.can_send_version('5.2')
|
||||
|
||||
# TODO(mriedem): Drop compat for request_spec being a legacy dict in v6.0.
|
||||
def prep_resize(self, ctxt, instance, image, instance_type, host,
|
||||
migration, request_spec, filter_properties, node,
|
||||
clean_shutdown, host_list):
|
||||
|
|
|
@ -309,9 +309,8 @@ class ComputeTaskManager(base.Base):
|
|||
# it only provides filter_properties legacy dict back to the
|
||||
# conductor with no RequestSpec part of the payload for <Stein
|
||||
# computes.
|
||||
# TODO(mriedem): We should be able to remove this in Train when we
|
||||
# only support >=Stein computes and request_spec is passed back to
|
||||
# conductor on reschedule.
|
||||
# TODO(mriedem): We can remove this compat code for no request spec
|
||||
# coming to conductor in ComputeTaskAPI RPC API version 2.0
|
||||
if not request_spec:
|
||||
# Make sure we hydrate a new RequestSpec object with the new flavor
|
||||
# and not the nested one from the instance
|
||||
|
@ -320,6 +319,19 @@ class ComputeTaskManager(base.Base):
|
|||
flavor, instance.numa_topology, instance.pci_requests,
|
||||
filter_properties, None, instance.availability_zone,
|
||||
project_id=instance.project_id, user_id=instance.user_id)
|
||||
elif not isinstance(request_spec, objects.RequestSpec):
|
||||
# Prior to compute RPC API 5.1 conductor would pass a legacy dict
|
||||
# version of the request spec to compute and Stein compute
|
||||
# could be sending that back to conductor on reschedule, so if we
|
||||
# got a dict convert it to an object.
|
||||
# TODO(mriedem): We can drop this compat code when we only support
|
||||
# compute RPC API >=6.0.
|
||||
request_spec = objects.RequestSpec.from_primitives(
|
||||
context, request_spec, filter_properties)
|
||||
# We don't have to set the new flavor on the request spec because
|
||||
# if we got here it was due to a reschedule from the compute and
|
||||
# the request spec would already have the new flavor in it from the
|
||||
# else block below.
|
||||
else:
|
||||
# NOTE(sbauza): Resizes means new flavor, so we need to update the
|
||||
# original RequestSpec object for make sure the scheduler verifies
|
||||
|
|
|
@ -304,6 +304,8 @@ class ComputeTaskAPI(object):
|
|||
|
||||
# TODO(melwitt): Remove the reservations parameter in version 2.0 of the
|
||||
# RPC API.
|
||||
# TODO(mriedem): Make request_spec required *and* a RequestSpec object
|
||||
# rather than a legacy dict in version 2.0 of the RPC API.
|
||||
def migrate_server(self, context, instance, scheduler_hint, live, rebuild,
|
||||
flavor, block_migration, disk_over_commit,
|
||||
reservations=None, clean_shutdown=True, request_spec=None,
|
||||
|
|
|
@ -130,73 +130,4 @@ class PinnedComputeRpcTests(integrated_helpers.ProviderUsageBaseTestCase):
|
|||
self._test_reschedule_migration_with_compute_rpc_pin('5.1')
|
||||
|
||||
def test_reschedule_migration_5_0(self):
|
||||
# This should work the same as test_reschedule_migration_5_1 so the
|
||||
# commented out call below should pass but it doesn't. This is
|
||||
# bug 1843090.
|
||||
# self._test_reschedule_migration_with_compute_rpc_pin('5.0')
|
||||
|
||||
# So the above call is inlined below and modified to assert the wrong
|
||||
# behavior
|
||||
self.flags(compute='5.0', group='upgrade_levels')
|
||||
|
||||
server_req = self._build_minimal_create_server_request(
|
||||
self.api, 'server1',
|
||||
networks=[],
|
||||
image_uuid=fake_image.get_valid_image_id(),
|
||||
flavor_id=self.flavor1['id'])
|
||||
server = self.api.post_server({'server': server_req})
|
||||
server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
|
||||
|
||||
orig_claim = nova.compute.resource_tracker.ResourceTracker.resize_claim
|
||||
claim_calls = []
|
||||
|
||||
def fake_orig_claim(
|
||||
_self, context, instance, instance_type, nodename,
|
||||
*args, **kwargs):
|
||||
if not claim_calls:
|
||||
claim_calls.append(nodename)
|
||||
raise exception.ComputeResourcesUnavailable(
|
||||
reason='Simulated claim failure')
|
||||
else:
|
||||
claim_calls.append(nodename)
|
||||
return orig_claim(
|
||||
_self, context, instance, instance_type, nodename, *args,
|
||||
**kwargs)
|
||||
|
||||
with mock.patch(
|
||||
'nova.compute.resource_tracker.ResourceTracker.resize_claim',
|
||||
new=fake_orig_claim):
|
||||
# Now migrate the server which is going to fail on the first
|
||||
# destination but then will be rescheduled.
|
||||
self.api.post_server_action(server['id'], {'migrate': None})
|
||||
|
||||
# bug 1843090: The migration failed and the instance remained on
|
||||
# the source host
|
||||
server = self._wait_for_server_parameter(
|
||||
self.api, server,
|
||||
{
|
||||
'OS-EXT-SRV-ATTR:host': 'host1',
|
||||
'OS-EXT-STS:task_state': None,
|
||||
'status': 'ERROR'})
|
||||
|
||||
# there was only one resize_claim call as no reschedule happened
|
||||
self.assertEqual(['host2'], claim_calls)
|
||||
|
||||
# bug 1843090: The following stack trace is printed in the log when the
|
||||
# re-schedule is initiated:
|
||||
#
|
||||
# Traceback (most recent call last):
|
||||
# [snip]
|
||||
# File "nova/conductor/manager.py", line 95, in wrapper
|
||||
# return fn(self, context, *args, **kwargs)
|
||||
# File "nova/compute/utils.py", line 1372, in decorated_function
|
||||
# return function(self, context, *args, **kwargs)
|
||||
# File "nova/conductor/manager.py", line 299, in migrate_server
|
||||
# host_list)
|
||||
# File "nova/conductor/manager.py", line 327, in _cold_migrate
|
||||
# request_spec.flavor = flavor
|
||||
# AttributeError: 'dict' object has no attribute 'flavor'
|
||||
#
|
||||
self.assertIn(
|
||||
"AttributeError: 'dict' object has no attribute 'flavor'",
|
||||
self.stdlog.logger.output)
|
||||
self._test_reschedule_migration_with_compute_rpc_pin('5.0')
|
||||
|
|
|
@ -2893,6 +2893,36 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
|
|||
# ...and persisted
|
||||
spec_save_mock.assert_called_once_with()
|
||||
|
||||
@mock.patch('nova.objects.RequestSpec.from_primitives')
|
||||
@mock.patch.object(objects.RequestSpec, 'save')
|
||||
def test_cold_migrate_reschedule_legacy_request_spec(
|
||||
self, spec_save_mock, from_primitives_mock):
|
||||
"""Tests the scenario that compute RPC API is pinned to less than 5.1
|
||||
so conductor passes a legacy dict request spec to compute and compute
|
||||
sends it back to conductor on a reschedule during prep_resize so
|
||||
conductor has to convert the legacy request spec dict to an object.
|
||||
"""
|
||||
instance = objects.Instance(system_metadata={})
|
||||
fake_spec = fake_request_spec.fake_spec_obj()
|
||||
from_primitives_mock.return_value = fake_spec
|
||||
legacy_spec = fake_spec.to_legacy_request_spec_dict()
|
||||
filter_props = {}
|
||||
clean_shutdown = True
|
||||
host_list = mock.sentinel.host_list
|
||||
|
||||
with mock.patch.object(
|
||||
self.conductor, '_build_cold_migrate_task') as build_task_mock:
|
||||
self.conductor._cold_migrate(
|
||||
self.context, instance, self.flavor, filter_props,
|
||||
clean_shutdown, legacy_spec, host_list)
|
||||
# Make sure the legacy request spec was converted.
|
||||
from_primitives_mock.assert_called_once_with(
|
||||
self.context, legacy_spec, filter_props)
|
||||
build_task_mock.assert_called_once_with(
|
||||
self.context, instance, self.flavor,
|
||||
fake_spec, clean_shutdown, host_list)
|
||||
spec_save_mock.assert_called_once_with()
|
||||
|
||||
def test_resize_no_valid_host_error_msg(self):
|
||||
flavor_new = objects.Flavor.get_by_name(self.ctxt, 'm1.small')
|
||||
inst_obj = objects.Instance(
|
||||
|
|
Loading…
Reference in New Issue