Fix migrate single instance when it was created concurrently

When a multiple-instance creation is requested by the user, we create a
single RequestSpec per instance (each of them having the corresponding
instance UUID) but we keep track of how many concurrent instances were
created at once by updating a field named 'num_instances' and we persist
it.

Unfortunately, due to Ifc5cf482209e4f6f4e3e39b24389bd3563d86444 we now
lookup that field in the scheduler for knowing how many allocations to
do. Since a move operation will only pass a single instance UUID to
migrate to the scheduler, there is a discrepancy that can lead to an
ugly IndexError. Defaulting that loop to be what is passed over RPC
unless we don't have it and then we default back to what we know from
the RequestSpec record.

Change-Id: If7da79356174be57481ef246618221e3b2ff8200
Closes-Bug: #1708961
This commit is contained in:
Sylvain Bauza 2017-08-07 13:03:37 +02:00 committed by EdLeafe
parent 5971dde5d9
commit 2bd7df84dc
2 changed files with 21 additions and 6 deletions

View File

@ -117,8 +117,7 @@ class FilterScheduler(driver.Scheduler):
:param context: The RequestContext object
:param spec_obj: The RequestSpec object
:param instance_uuids: List of UUIDs, one for each value of the spec
object's num_instances attribute
:param instance_uuids: List of instance UUIDs to place or move.
:param alloc_reqs_by_rp_uuid: Optional dict, keyed by resource provider
UUID, of the allocation requests that may
be used to claim resources against
@ -159,7 +158,13 @@ class FilterScheduler(driver.Scheduler):
claimed_instance_uuids = []
selected_hosts = []
num_instances = spec_obj.num_instances
# NOTE(sbauza): The RequestSpec.num_instances field contains the number
# of instances created when the RequestSpec was used to first boot some
# instances. This is incorrect when doing a move or resize operation,
# so prefer the length of instance_uuids unless it is None.
num_instances = (len(instance_uuids) if instance_uuids
else spec_obj.num_instances)
for num in range(num_instances):
hosts = self._get_sorted_hosts(spec_obj, hosts, num)
if not hosts:

View File

@ -147,10 +147,10 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
'_get_all_host_states')
@mock.patch('nova.scheduler.filter_scheduler.FilterScheduler.'
'_get_sorted_hosts')
def test_schedule_successful_claim(self, mock_get_hosts,
mock_get_all_states, mock_claim):
def _test_schedule_successful_claim(self, mock_get_hosts,
mock_get_all_states, mock_claim, num_instances=1):
spec_obj = objects.RequestSpec(
num_instances=1,
num_instances=num_instances,
flavor=objects.Flavor(memory_mb=512,
root_gb=512,
ephemeral_gb=0,
@ -188,6 +188,16 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
# Ensure that we have consumed the resources on the chosen host states
host_state.consume_from_request.assert_called_once_with(spec_obj)
def test_schedule_successful_claim(self):
self._test_schedule_successful_claim()
def test_schedule_old_reqspec_and_move_operation(self):
"""This test is for verifying that in case of a move operation with an
original RequestSpec created for 3 concurrent instances, we only verify
the instance that is moved.
"""
self._test_schedule_successful_claim(num_instances=3)
@mock.patch('nova.scheduler.filter_scheduler.FilterScheduler.'
'_cleanup_allocations')
@mock.patch('nova.scheduler.filter_scheduler.FilterScheduler.'