Ensure attachment_id always exists for block device mapping
If an instance is deleted before it is scheduled, the BDM clean-up code uses the mappings from the build request as they don't exist in the database yet. When using the older attachment flow with reserve_volume, there is no attachment_id bound to the block device mapping and because it is not loaded from database but rather from the build request, accessing the attachment_id field raises an exception with 'attachment_id not lazy-loadable' If we did a new style attach, _validate_bdm will add the attachment_id from Cinder. If we did not, then this patch will make sure to set it to 'None' to avoid raising an exception when checking if we have an attachment_id set in the BDM clean-up code Conflicts: nova/tests/functional/wsgi/test_servers.py Change-Id: I3cc775fc7dafe691b97a15e50ae2e93c92f355be Closes-Bug: #1750666 (cherry picked from commit16c2c8b3ee
) Detach volumes when deleting a BFV server pre-scheduling If the user creates a volume-backed server from an existing volume, the API reserves the volume by creating an attachment against it. This puts the volume into 'attaching' status. If the user then deletes the server before it's created in a cell, by deleting the build request, the attached volume is orphaned and requires admin intervention in the block storage service. This change simply pulls the BDMs off the BuildRequest when we delete the server via the build request and does the same local cleanup of those volumes as we would in a "normal" local delete scenario that the instance was created in a cell but doesn't have a host. We don't have to worry about ports in this scenario since ports are created on the compute, in a cell, and if we're deleting a build request then we never got far enough to create ports. Conflicts: nova/tests/functional/wsgi/test_servers.py Change-Id: I1a576bdb16befabe06a9728d7adf008fc0667077 Partial-Bug: #1404867 (cherry picked from commit0652e4ab3d
)
This commit is contained in:
parent
b61bf73b0f
commit
e5a055d2c6
|
@ -1352,6 +1352,13 @@ class API(base.Base):
|
|||
volume = self._check_attach(context, volume_id,
|
||||
instance)
|
||||
bdm.volume_size = volume.get('size')
|
||||
|
||||
# NOTE(mnaser): If we end up reserving the volume, it will
|
||||
# not have an attachment_id which is needed
|
||||
# for cleanups. This can be removed once
|
||||
# all calls to reserve_volume are gone.
|
||||
if 'attachment_id' not in bdm:
|
||||
bdm.attachment_id = None
|
||||
except (exception.CinderConnectionFailed,
|
||||
exception.InvalidVolume):
|
||||
raise
|
||||
|
@ -1780,6 +1787,11 @@ class API(base.Base):
|
|||
# instance is now in a cell and the delete needs to proceed
|
||||
# normally.
|
||||
return False
|
||||
|
||||
# We need to detach from any volumes so they aren't orphaned.
|
||||
self._local_cleanup_bdm_volumes(
|
||||
build_req.block_device_mappings, instance, context)
|
||||
|
||||
return True
|
||||
|
||||
def _delete(self, context, instance, delete_type, cb, **instance_attrs):
|
||||
|
@ -2054,7 +2066,12 @@ class API(base.Base):
|
|||
except Exception as exc:
|
||||
LOG.warning("Ignoring volume cleanup failure due to %s",
|
||||
exc, instance=instance)
|
||||
bdm.destroy()
|
||||
# If we're cleaning up volumes from an instance that wasn't yet
|
||||
# created in a cell, i.e. the user deleted the server while
|
||||
# the BuildRequest still existed, then the BDM doesn't actually
|
||||
# exist in the DB to destroy it.
|
||||
if 'id' in bdm:
|
||||
bdm.destroy()
|
||||
|
||||
def _local_delete(self, context, instance, bdms, delete_type, cb):
|
||||
if instance.vm_state == vm_states.SHELVED_OFFLOADED:
|
||||
|
|
|
@ -274,7 +274,4 @@ class ServersPreSchedulingTestCase(test.TestCase):
|
|||
|
||||
# The volume should no longer have any attachments as instance delete
|
||||
# should have removed them.
|
||||
# self.assertNotIn(volume_id, cinder.reserved_volumes)
|
||||
# FIXME(mnaser): This is part of bug 1750666 where the BDMs aren't
|
||||
# properly deleted because they don't exist in the database.
|
||||
self.assertIn(volume_id, cinder.reserved_volumes)
|
||||
self.assertNotIn(volume_id, cinder.reserved_volumes)
|
||||
|
|
|
@ -3601,6 +3601,46 @@ class _ComputeAPIUnitTestMixIn(object):
|
|||
self.context,
|
||||
bdms, legacy_bdm=True)
|
||||
|
||||
@mock.patch.object(objects.service, 'get_minimum_version_all_cells',
|
||||
return_value=17)
|
||||
@mock.patch.object(objects.Service, 'get_minimum_version',
|
||||
return_value=17)
|
||||
@mock.patch.object(cinder.API, 'get')
|
||||
@mock.patch.object(cinder.API, 'reserve_volume')
|
||||
def test_validate_bdm_returns_attachment_id(self, mock_reserve_volume,
|
||||
mock_get, mock_get_min_ver,
|
||||
mock_get_min_ver_all):
|
||||
# Tests that bdm validation *always* returns an attachment_id even if
|
||||
# it's None.
|
||||
instance = self._create_instance_obj()
|
||||
instance_type = self._create_flavor()
|
||||
volume_id = 'e856840e-9f5b-4894-8bde-58c6e29ac1e8'
|
||||
volume_info = {'status': 'available',
|
||||
'attach_status': 'detached',
|
||||
'id': volume_id,
|
||||
'multiattach': False}
|
||||
mock_get.return_value = volume_info
|
||||
|
||||
# NOTE(mnaser): We use the AnonFakeDbBlockDeviceDict to make sure that
|
||||
# the attachment_id field does not get any defaults to
|
||||
# properly test this function.
|
||||
bdms = [objects.BlockDeviceMapping(
|
||||
**fake_block_device.AnonFakeDbBlockDeviceDict(
|
||||
{
|
||||
'boot_index': 0,
|
||||
'volume_id': volume_id,
|
||||
'source_type': 'volume',
|
||||
'destination_type': 'volume',
|
||||
'device_name': 'vda',
|
||||
}))]
|
||||
self.compute_api._validate_bdm(self.context, instance, instance_type,
|
||||
bdms)
|
||||
self.assertIsNone(bdms[0].attachment_id)
|
||||
|
||||
mock_get.assert_called_once_with(self.context, volume_id)
|
||||
mock_reserve_volume.assert_called_once_with(
|
||||
self.context, volume_id)
|
||||
|
||||
@mock.patch.object(objects.service, 'get_minimum_version_all_cells',
|
||||
return_value=17)
|
||||
@mock.patch.object(objects.Service, 'get_minimum_version',
|
||||
|
|
Loading…
Reference in New Issue