Add shelve/unshelve wrinkle to volume-backed disk func test

This adds a shelve/unshelve scenario to the functional test
which checks that root_gb from the flavor does not show up
in placement for volume-backed servers. Because the shelve
happens after we've cold migrated the server to a new host,
the fake virt driver's finish_migration() method needed to
be implemented to track the instance on the destination host.

Change-Id: Ica456f2512ebe7814c5d20f205ba89b49c42050a
Related-Bug: #1469179
This commit is contained in:
Matt Riedemann 2018-07-23 11:13:37 -04:00
parent b537148228
commit dc5c69d0d1
2 changed files with 27 additions and 1 deletions

View File

@ -3693,6 +3693,7 @@ class VolumeBackedServerTest(integrated_helpers.ProviderUsageBaseTestCase):
self.assertEqual('host2', server['OS-EXT-SRV-ATTR:host'])
# Confirm the cold migration and check usage and the request spec.
self.api.post_server_action(server['id'], {'confirmResize': None})
self._wait_for_state_change(self.api, server, 'ACTIVE')
reqspec = objects.RequestSpec.get_by_instance_uuid(ctxt, server['id'])
# Make sure it's set.
self.assertTrue(reqspec.is_bfv)
@ -3700,6 +3701,25 @@ class VolumeBackedServerTest(integrated_helpers.ProviderUsageBaseTestCase):
resources = list(allocs.values())[0]['resources']
self.assertEqual(expected_usage, resources['DISK_GB'])
# Now shelve and unshelve the server to make sure root_gb DISK_GB
# isn't reported for allocations after we unshelve the server.
fake_notifier.stub_notifier(self)
self.addCleanup(fake_notifier.reset)
self.api.post_server_action(server['id'], {'shelve': None})
self._wait_for_state_change(self.api, server, 'SHELVED_OFFLOADED')
fake_notifier.wait_for_versioned_notifications('shelve_offload.end')
# The server should not have any allocations since it's not currently
# hosted on any compute service.
allocs = self._get_allocations_by_server_uuid(server['id'])
self.assertDictEqual({}, allocs)
# Now unshelve the server and make sure there are still no DISK_GB
# allocations for the root disk.
self.api.post_server_action(server['id'], {'unshelve': None})
self._wait_for_state_change(self.api, server, 'ACTIVE')
allocs = self._get_allocations_by_server_uuid(server['id'])
resources = list(allocs.values())[0]['resources']
self.assertEqual(expected_usage, resources['DISK_GB'])
class TraitsBasedSchedulingTest(integrated_helpers.ProviderUsageBaseTestCase):
"""Tests for requesting a server with required traits in Placement"""

View File

@ -542,7 +542,13 @@ class FakeDriver(driver.ComputeDriver):
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
return
injected_files = admin_password = allocations = None
# Finish migration is just like spawning the guest on a destination
# host during resize/cold migrate, so re-use the spawn() fake to
# claim resources and track the instance on this "hypervisor".
self.spawn(context, instance, image_meta, injected_files,
admin_password, allocations,
block_device_info=block_device_info)
def confirm_migration(self, context, migration, instance, network_info):
return