RT: track evacuation migrations

The previous patch sets the stage for tracking resources when doing
evacuate, but we still have to make sure that
_update_usage_from_migration actually does the update of the resources.
To do that we have to make sure we are getting the right flavor in case
of 'evacuation' migration, and also that we are not completely skipping
over it.

Once that's done claims and resource tracking work as expected when
evacuating instances.

DocImpact

Change-Id: Ie74939e543155bc42705b28e1b44d943ef54ebdc
Related-bug: #1417667
This commit is contained in:
Nikola Dipanov 2015-09-22 16:58:20 +01:00
parent dc0221d724
commit 4ee4f9f2ec
3 changed files with 124 additions and 14 deletions

View File

@ -100,7 +100,7 @@ def _instance_in_resize_state(instance):
if (vm in [vm_states.ACTIVE, vm_states.STOPPED]
and task in [task_states.RESIZE_PREP,
task_states.RESIZE_MIGRATING, task_states.RESIZE_MIGRATED,
task_states.RESIZE_FINISH]):
task_states.RESIZE_FINISH, task_states.REBUILDING]):
return True
return False
@ -326,7 +326,8 @@ class ResourceTracker(object):
if not instance_type:
ctxt = context.elevated()
instance_type = self._get_instance_type(ctxt, instance, prefix)
instance_type = self._get_instance_type(ctxt, instance, prefix,
migration)
if image_meta is None:
image_meta = objects.ImageMeta.from_instance(instance)
@ -679,11 +680,12 @@ class ResourceTracker(object):
self.compute_node.numa_topology = updated_numa_topology
def _is_trackable_migration(self, migration):
# Only look at resize/migrate migration records
# Only look at resize/migrate migration and evacuation records
# NOTE(danms): RT should probably examine live migration
# records as well and do something smart. However, ignore
# those for now to avoid them being included in below calculations.
return migration.migration_type in ('resize', 'migration')
return migration.migration_type in ('resize', 'migration',
'evacuation')
def _get_migration_context_resource(self, resource, instance,
prefix='new_', itype=None):
@ -720,28 +722,28 @@ class ResourceTracker(object):
if (instance['instance_type_id'] ==
migration.old_instance_type_id):
itype = self._get_instance_type(context, instance, 'new_',
migration.new_instance_type_id)
migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance)
else:
# instance record already has new flavor, hold space for a
# possible revert to the old instance type:
itype = self._get_instance_type(context, instance, 'old_',
migration.old_instance_type_id)
migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance, prefix='old_')
elif incoming and not record:
# instance has not yet migrated here:
itype = self._get_instance_type(context, instance, 'new_',
migration.new_instance_type_id)
migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance)
elif outbound and not record:
# instance migrated, but record usage for a possible revert:
itype = self._get_instance_type(context, instance, 'old_',
migration.old_instance_type_id)
migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance, prefix='old_')
@ -909,10 +911,16 @@ class ResourceTracker(object):
reason = _("Missing keys: %s") % missing_keys
raise exception.InvalidInput(reason=reason)
def _get_instance_type(self, context, instance, prefix,
instance_type_id=None):
def _get_instance_type(self, context, instance, prefix, migration):
"""Get the instance type from instance."""
return getattr(instance, '%sflavor' % prefix)
stashed_flavors = migration.migration_type in ('resize')
if stashed_flavors:
return getattr(instance, '%sflavor' % prefix)
else:
# NOTE(ndipanov): Certain migration types (all but resize)
# do not change flavors so there is no need to stash
# them. In that case - just get the instance flavor.
return instance.flavor
def _get_usage_dict(self, object_or_dict, **updates):
"""Make a usage dict _update methods expect.

View File

@ -1149,8 +1149,8 @@ class _MoveClaimTestCase(BaseTrackerTestCase):
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_move_type_not_tracked(self, mock_get, mock_save):
self.claim_method(self.context, self.instance,
self.instance_type, limits=self.limits, move_type="evacuation")
self.claim_method(self.context, self.instance, self.instance_type,
limits=self.limits, move_type="live-migration")
mock_save.assert_called_once_with()
self._assert(0, 'memory_mb_used')
@ -1163,6 +1163,8 @@ class _MoveClaimTestCase(BaseTrackerTestCase):
def test_existing_migration(self, save_mock, save_inst_mock):
migration = objects.Migration(self.context, id=42,
instance_uuid=self.instance.uuid,
source_compute='fake-other-compute',
source_node='fake-other-node',
status='accepted',
migration_type='evacuation')
self.claim_method(self.context, self.instance, self.instance_type,
@ -1170,7 +1172,7 @@ class _MoveClaimTestCase(BaseTrackerTestCase):
self.assertEqual(self.tracker.host, migration.dest_compute)
self.assertEqual(self.tracker.nodename, migration.dest_node)
self.assertEqual("pre-migrating", migration.status)
self.assertEqual(0, len(self.tracker.tracked_migrations))
self.assertEqual(1, len(self.tracker.tracked_migrations))
save_mock.assert_called_once_with()
save_inst_mock.assert_called_once_with()

View File

@ -207,6 +207,7 @@ _MIGRATION_FIXTURES = {
dest_node='other-node',
old_instance_type_id=1,
new_instance_type_id=2,
migration_type='resize',
status='migrating'
),
# A migration that has only this compute node as the dest host
@ -219,6 +220,7 @@ _MIGRATION_FIXTURES = {
dest_node='fake-node',
old_instance_type_id=1,
new_instance_type_id=2,
migration_type='resize',
status='migrating'
),
# A migration that has this compute node as both the source and dest host
@ -231,8 +233,22 @@ _MIGRATION_FIXTURES = {
dest_node='fake-node',
old_instance_type_id=1,
new_instance_type_id=2,
migration_type='resize',
status='migrating'
),
# A migration that has this compute node as destination and is an evac
'dest-only-evac': objects.Migration(
id=4,
instance_uuid='077fb63a-bdc8-4330-90ef-f012082703dc',
source_compute='other-host',
dest_compute='fake-host',
source_node='other-node',
dest_node='fake-node',
old_instance_type_id=2,
new_instance_type_id=None,
migration_type='evacuation',
status='pre-migrating'
),
}
_MIGRATION_INSTANCE_FIXTURES = {
@ -302,6 +318,28 @@ _MIGRATION_INSTANCE_FIXTURES = {
old_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[1],
new_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
),
# dest-only-evac
'077fb63a-bdc8-4330-90ef-f012082703dc': objects.Instance(
id=102,
host=None, # prevent RT trying to lazy-load this
node=None,
uuid='077fb63a-bdc8-4330-90ef-f012082703dc',
memory_mb=_INSTANCE_TYPE_FIXTURES[2]['memory_mb'],
vcpus=_INSTANCE_TYPE_FIXTURES[2]['vcpus'],
root_gb=_INSTANCE_TYPE_FIXTURES[2]['root_gb'],
ephemeral_gb=_INSTANCE_TYPE_FIXTURES[2]['ephemeral_gb'],
numa_topology=None,
instance_type_id=2,
vm_state=vm_states.ACTIVE,
power_state=power_state.RUNNING,
task_state=task_states.REBUILDING,
system_metadata={},
os_type='fake-os',
project_id='fake-project',
flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
old_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[1],
new_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
),
}
_MIGRATION_CONTEXT_FIXTURES = {
@ -325,6 +363,11 @@ _MIGRATION_CONTEXT_FIXTURES = {
migration_id=2,
new_numa_topology=_INSTANCE_NUMA_TOPOLOGIES['2mb'],
old_numa_topology=None),
'077fb63a-bdc8-4330-90ef-f012082703dc': objects.MigrationContext(
instance_uuid='077fb63a-bdc8-4330-90ef-f012082703dc',
migration_id=2,
new_numa_topology=None,
old_numa_topology=None),
}
@ -735,6 +778,63 @@ class TestUpdateAvailableResources(BaseTestCase):
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_no_instances_dest_evacuation(self, get_mock, get_inst_mock,
migr_mock, get_cn_mock):
# We test the behavior of update_available_resource() when
# there is an active evacuation that involves this compute node
# as the destination host not the source host, and the resource
# tracker does not yet have any instances assigned to it. This is
# the case when a migration to this compute host from another host
# is in progress, but not finished yet.
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
self._setup_rt()
get_mock.return_value = []
migr_obj = _MIGRATION_FIXTURES['dest-only-evac']
migr_mock.return_value = [migr_obj]
inst_uuid = migr_obj.instance_uuid
instance = _MIGRATION_INSTANCE_FIXTURES[inst_uuid].obj_clone()
get_inst_mock.return_value = instance
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
instance.migration_context = _MIGRATION_CONTEXT_FIXTURES[inst_uuid]
update_mock = self._update_available_resources()
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
expected_resources.update({
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
'host_ip': '1.1.1.1',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': 'fakehost',
'free_disk_gb': 1,
'hypervisor_version': 0,
'local_gb': 6,
'free_ram_mb': 256, # 512 total - 256 for possible confirm of new
'memory_mb_used': 256, # 256 possible confirmed amount
'pci_device_pools': objects.PciDevicePoolList(),
'vcpus_used': 0, # See NOTE(jaypipes) above about why this is 0
'hypervisor_type': 'fake',
'local_gb_used': 5,
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
'running_vms': 0
})
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
@mock.patch('nova.objects.MigrationContext.get_by_instance_uuid',
return_value=None)
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')