fixtures: Track volume attachments within CinderFixtureNewAttachFlow

Previously volume attachments ids were not tracked at all within the
fixture with only the instance_uuid and volume_id stashed. This change
should allow future functional tests to exercise bugs where attachments
are deleted but not recreated such as bug #1784353.

Change-Id: Ib30144596fe6a8d8ffbb4ebd695ebcf38ef828a4
Co-authored-by: Matthew Booth <mbooth@redhat.com>
Related-Bug: #1784353
(cherry picked from commit bfdc6a0d52)
(cherry picked from commit cb9996b45f)
This commit is contained in:
Lee Yarwood 2018-07-31 11:09:45 +01:00 committed by Matt Riedemann
parent 53f9c8e510
commit 6fa5aee86b
4 changed files with 85 additions and 40 deletions

View File

@ -1329,6 +1329,9 @@ class CinderFixture(fixtures.Fixture):
# This map gets updated on attach/detach operations.
self.attachments = collections.defaultdict(list)
def volume_ids_for_instance(self, instance_uuid):
return self.attachments.get(instance_uuid)
def setUp(self):
super(CinderFixture, self).setUp()
@ -1515,15 +1518,28 @@ class CinderFixtureNewAttachFlow(fixtures.Fixture):
self.swap_volume_instance_uuid = None
self.swap_volume_instance_error_uuid = None
self.attachment_error_id = None
# This is a map of instance UUIDs mapped to a list of volume IDs.
# This map gets updated on attach/detach operations.
self.attachments = collections.defaultdict(list)
# A map of volumes to a list of (attachment_id, instance_uuid).
# Note that a volume can have multiple attachments even without
# multi-attach, as some flows create a blank 'reservation' attachment
# before deleting another attachment.
self.volume_to_attachment = collections.defaultdict(list)
def volume_ids_for_instance(self, instance_uuid):
for volume_id, attachments in self.volume_to_attachment.items():
for _, _instance_uuid in attachments:
if _instance_uuid == instance_uuid:
# we might have multiple volumes attached to this instance
# so yield rather than return
yield volume_id
break
def setUp(self):
super(CinderFixtureNewAttachFlow, self).setUp()
def fake_get(self_api, context, volume_id, microversion=None):
# Check for the special swap volumes.
attachments = self.volume_to_attachment[volume_id]
if volume_id in (CinderFixture.SWAP_OLD_VOL,
CinderFixture.SWAP_ERR_OLD_VOL):
volume = {
@ -1542,37 +1558,39 @@ class CinderFixtureNewAttachFlow(fixtures.Fixture):
if volume_id == CinderFixture.SWAP_OLD_VOL
else self.swap_volume_instance_error_uuid)
volume.update({
'status': 'in-use',
'attachments': {
instance_uuid: {
'mountpoint': '/dev/vdb',
'attachment_id': volume_id
}
},
'attach_status': 'attached'
})
if attachments:
attachment_id, instance_uuid = attachments[0]
volume.update({
'status': 'in-use',
'attachments': {
instance_uuid: {
'mountpoint': '/dev/vdb',
'attachment_id': attachment_id
}
},
'attach_status': 'attached'
})
return volume
# Check to see if the volume is attached.
for instance_uuid, volumes in self.attachments.items():
if volume_id in volumes:
# The volume is attached.
volume = {
'status': 'in-use',
'display_name': volume_id,
'attach_status': 'attached',
'id': volume_id,
'multiattach': volume_id == self.MULTIATTACH_VOL,
'size': 1,
'attachments': {
instance_uuid: {
'attachment_id': volume_id,
'mountpoint': '/dev/vdb'
}
if attachments:
# The volume is attached.
attachment_id, instance_uuid = attachments[0]
volume = {
'status': 'in-use',
'display_name': volume_id,
'attach_status': 'attached',
'id': volume_id,
'multiattach': volume_id == self.MULTIATTACH_VOL,
'size': 1,
'attachments': {
instance_uuid: {
'attachment_id': attachment_id,
'mountpoint': '/dev/vdb'
}
}
break
}
else:
# This is a test that does not care about the actual details.
volume = {
@ -1600,26 +1618,45 @@ class CinderFixtureNewAttachFlow(fixtures.Fixture):
new_volume_id, error):
return {'save_volume_id': new_volume_id}
def _find_attachment(attachment_id):
"""Find attachment corresponding to ``attachment_id``.
Returns:
A tuple of the volume ID, an attachment-instance mapping tuple
for the given attachment ID, and a list of attachment-instance
mapping tuples for the volume.
"""
for volume_id, attachments in self.volume_to_attachment.items():
for attachment in attachments:
_attachment_id, instance_uuid = attachment
if attachment_id == _attachment_id:
return volume_id, attachment, attachments
raise exception.VolumeAttachmentNotFound(
attachment_id=attachment_id)
def fake_attachment_create(_self, context, volume_id, instance_uuid,
connector=None, mountpoint=None):
attachment_id = uuidutils.generate_uuid()
if self.attachment_error_id is not None:
attachment_id = self.attachment_error_id
attachment = {'id': attachment_id, 'connection_info': {'data': {}}}
self.attachments['instance_uuid'].append(instance_uuid)
self.attachments[instance_uuid].append(volume_id)
self.volume_to_attachment[volume_id].append(
(attachment_id, instance_uuid))
return attachment
def fake_attachment_delete(_self, context, attachment_id):
instance_uuid = self.attachments['instance_uuid'][0]
del self.attachments[instance_uuid][0]
del self.attachments['instance_uuid'][0]
# 'attachment' is a tuple defining a attachment-instance mapping
_, attachment, attachments = _find_attachment(attachment_id)
attachments.remove(attachment)
if attachment_id == CinderFixtureNewAttachFlow.SWAP_ERR_ATTACH_ID:
self.swap_error = True
def fake_attachment_update(_self, context, attachment_id, connector,
mountpoint=None):
# Ensure the attachment exists
_find_attachment(attachment_id)
attachment_ref = {'driver_volume_type': 'fake_type',
'id': attachment_id,
'connection_info': {'data':
@ -1630,6 +1667,8 @@ class CinderFixtureNewAttachFlow(fixtures.Fixture):
return attachment_ref
def fake_attachment_get(_self, context, attachment_id):
# Ensure the attachment exists
_find_attachment(attachment_id)
attachment_ref = {'driver_volume_type': 'fake_type',
'id': attachment_id,
'connection_info': {'data':

View File

@ -96,7 +96,8 @@ class DeleteWithReservedVolumes(integrated_helpers._IntegratedTestBase,
# There should now exist an attachment to the volume as it was created
# by Nova.
self.assertIn(volume_id, self.cinder.attachments[server_id])
self.assertIn(volume_id,
self.cinder.volume_ids_for_instance(server_id))
# Delete this server, which should delete BDMs and remove the
# reservation on the instances.
@ -104,4 +105,5 @@ class DeleteWithReservedVolumes(integrated_helpers._IntegratedTestBase,
# The volume should no longer have any attachments as instance delete
# should have removed them.
self.assertNotIn(volume_id, self.cinder.attachments[server_id])
self.assertNotIn(volume_id,
self.cinder.volume_ids_for_instance(server_id))

View File

@ -160,7 +160,8 @@ class TestLocalDeleteAttachedVolumes(test.TestCase):
self._wait_for_volume_attach(server_id, volume_id)
# Check to see that the fixture is tracking the server and volume
# attachment.
self.assertIn(volume_id, self.cinder.attachments[server_id])
self.assertIn(volume_id,
self.cinder.volume_ids_for_instance(server_id))
# At this point the instance.host is no longer set, so deleting
# the server will take the local delete path in the API.
@ -172,7 +173,8 @@ class TestLocalDeleteAttachedVolumes(test.TestCase):
LOG.info('Validating that volume %s was detached from server %s.',
volume_id, server_id)
# Now that the bug is fixed, assert the volume was detached.
self.assertNotIn(volume_id, self.cinder.attachments[server_id])
self.assertNotIn(volume_id,
self.cinder.volume_ids_for_instance(server_id))
@mock.patch('nova.objects.Service.get_minimum_version',

View File

@ -308,7 +308,8 @@ class ServersPreSchedulingTestCase(test.TestCase,
# Since _IntegratedTestBase uses the CastAsCall fixture, when we
# get the server back we know all of the volume stuff should be done.
self.assertIn(volume_id, cinder.attachments[server['id']])
self.assertIn(volume_id,
cinder.volume_ids_for_instance(server['id']))
# Now delete the server, which should go through the "local delete"
# code in the API, find the build request and delete it along with
@ -317,7 +318,8 @@ class ServersPreSchedulingTestCase(test.TestCase,
# The volume should no longer have any attachments as instance delete
# should have removed them.
self.assertNotIn(volume_id, cinder.attachments[server['id']])
self.assertNotIn(volume_id,
cinder.volume_ids_for_instance(server['id']))
def test_instance_list_build_request_marker_ip_filter(self):
"""Tests listing instances with a marker that is in the build_requests