Detach volumes when VM creation fails

If the boot-volume creation fails, the data volume is left in state
"in-use", attached to the server which is now in "error" state.
The user can't detach the volume because of the server's error state.

They can delete the server, which then leaves the volume apparently
attached to a server that no longer exists, which is being fixed
separately: https://review.openstack.org/#/c/340614/

The only way out of this is to ask an administrator to reset the state of
the data volume (this option is not available to regular users by
default policy).

This change fixes the problem in the compute service such that
when the creation fails, compute manager detaches the created volumes
before putting the VM into error state. Then you can delete the instance
without care about attached volumes.

Conflicts:
      nova/compute/manager.py

NOTE(mriedem): The conflict in _delete_instance is due to restructuring
the method in I9269ffa2b80e48db96c622d0dc0817738854f602 in Pike. Also
note that _LW has to be used for the warning message since those
translation markers are still required in Ocata.

Change-Id: I8b1c05317734e14ea73dc868941351bb31210bf0
Closes-bug: #1633249
(cherry picked from commit 61f6751a18)
(cherry picked from commit 22164d5118)
(cherry picked from commit 4dbe72f976)
This commit is contained in:
Ameed Ashour 2018-01-24 09:32:24 -05:00 committed by Matt Riedemann
parent c407a69167
commit b188492ca4
3 changed files with 77 additions and 21 deletions

View File

@ -1796,7 +1796,7 @@ class ComputeManager(manager.Manager):
instance=instance)
self._cleanup_allocated_networks(context, instance,
requested_networks)
self._cleanup_volumes(context, instance.uuid,
self._cleanup_volumes(context, instance,
block_device_mapping, raise_exc=False)
compute_utils.add_instance_fault_from_exc(context,
instance, e, sys.exc_info(),
@ -1849,7 +1849,7 @@ class ComputeManager(manager.Manager):
LOG.exception(e.format_message(), instance=instance)
self._cleanup_allocated_networks(context, instance,
requested_networks)
self._cleanup_volumes(context, instance.uuid,
self._cleanup_volumes(context, instance,
block_device_mapping, raise_exc=False)
compute_utils.add_instance_fault_from_exc(context, instance,
e, sys.exc_info())
@ -1863,7 +1863,7 @@ class ComputeManager(manager.Manager):
LOG.exception(msg, instance=instance)
self._cleanup_allocated_networks(context, instance,
requested_networks)
self._cleanup_volumes(context, instance.uuid,
self._cleanup_volumes(context, instance,
block_device_mapping, raise_exc=False)
compute_utils.add_instance_fault_from_exc(context, instance,
e, sys.exc_info())
@ -2312,14 +2312,27 @@ class ComputeManager(manager.Manager):
self.host, action=fields.NotificationAction.SHUTDOWN,
phase=fields.NotificationPhase.END)
def _cleanup_volumes(self, context, instance_uuid, bdms, raise_exc=True):
def _cleanup_volumes(self, context, instance, bdms, raise_exc=True,
detach=True):
exc_info = None
for bdm in bdms:
LOG.debug("terminating bdm %s", bdm,
instance_uuid=instance_uuid)
if detach and bdm.volume_id:
try:
LOG.debug("Detaching volume: %s", bdm.volume_id,
instance_uuid=instance.uuid)
destroy = bdm.delete_on_termination
self._detach_volume(context, bdm, instance,
destroy_bdm=destroy)
except Exception as exc:
exc_info = sys.exc_info()
LOG.warning(_LW('Failed to detach volume: %(volume_id)s '
'due to %(exc)s'),
{'volume_id': bdm.volume_id, 'exc': exc})
if bdm.volume_id and bdm.delete_on_termination:
try:
LOG.debug("Deleting volume: %s", bdm.volume_id,
instance_uuid=instance.uuid)
self.volume_api.delete(context, bdm.volume_id)
except Exception as exc:
exc_info = sys.exc_info()
@ -2383,8 +2396,14 @@ class ComputeManager(manager.Manager):
# future to set an instance fault the first time
# and to only ignore the failure if the instance
# is already in ERROR.
self._cleanup_volumes(context, instance.uuid, bdms,
raise_exc=False)
# NOTE(ameeda): The volumes already detached during the above
# _shutdown_instance() call and this is why
# detach is not requested from _cleanup_volumes()
# in this case
self._cleanup_volumes(context, instance, bdms,
raise_exc=False, detach=False)
# if a delete task succeeded, always update vm state and task
# state without expecting task state to be DELETING
instance.vm_state = vm_states.DELETED
@ -6715,7 +6734,7 @@ class ComputeManager(manager.Manager):
try:
self._shutdown_instance(context, instance, bdms,
notify=False)
self._cleanup_volumes(context, instance.uuid, bdms)
self._cleanup_volumes(context, instance, bdms)
except Exception as e:
LOG.warning(_LW("Periodic cleanup failed to delete "
"instance: %s"),

View File

@ -6570,7 +6570,7 @@ class ComputeTestCase(BaseTestCase):
mock_shutdown.assert_has_calls([
mock.call(ctxt, inst1, bdms, notify=False),
mock.call(ctxt, inst2, bdms, notify=False)])
mock_cleanup.assert_called_once_with(ctxt, inst2['uuid'], bdms)
mock_cleanup.assert_called_once_with(ctxt, inst2, bdms)
mock_get_uuid.assert_has_calls([
mock.call(ctxt, inst1.uuid, use_slave=True),
mock.call(ctxt, inst2.uuid, use_slave=True)])

View File

@ -242,7 +242,10 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
self.assertFalse(db_node.destroy.called)
@mock.patch('nova.compute.utils.notify_about_instance_action')
def test_delete_instance_without_info_cache(self, mock_notify):
@mock.patch('nova.compute.manager.ComputeManager.'
'_detach_volume')
def test_delete_instance_without_info_cache(self, mock_detach,
mock_notify):
instance = fake_instance.fake_instance_obj(
self.context,
uuid=uuids.instance,
@ -2880,7 +2883,9 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
self.assertEqual(mock.call.rollback(), quotas.method_calls[0])
set_error.assert_called_once_with(self.context, instance)
def test_cleanup_volumes(self):
@mock.patch('nova.compute.manager.ComputeManager.'
'_detach_volume')
def test_cleanup_volumes(self, mock_detach):
instance = fake_instance.fake_instance_obj(self.context)
bdm_do_not_delete_dict = fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': 'fake-id1', 'source_type': 'image',
@ -2893,11 +2898,17 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
with mock.patch.object(self.compute.volume_api,
'delete') as volume_delete:
self.compute._cleanup_volumes(self.context, instance.uuid, bdms)
self.compute._cleanup_volumes(self.context, instance, bdms)
calls = [mock.call(self.context, bdm, instance,
destroy_bdm=bdm.delete_on_termination)
for bdm in bdms]
self.assertEqual(calls, mock_detach.call_args_list)
volume_delete.assert_called_once_with(self.context,
bdms[1].volume_id)
def test_cleanup_volumes_exception_do_not_raise(self):
@mock.patch('nova.compute.manager.ComputeManager.'
'_detach_volume')
def test_cleanup_volumes_exception_do_not_raise(self, mock_detach):
instance = fake_instance.fake_instance_obj(self.context)
bdm_dict1 = fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': 'fake-id1', 'source_type': 'image',
@ -2911,12 +2922,17 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
with mock.patch.object(self.compute.volume_api,
'delete',
side_effect=[test.TestingException(), None]) as volume_delete:
self.compute._cleanup_volumes(self.context, instance.uuid, bdms,
self.compute._cleanup_volumes(self.context, instance, bdms,
raise_exc=False)
calls = [mock.call(self.context, bdm.volume_id) for bdm in bdms]
self.assertEqual(calls, volume_delete.call_args_list)
calls = [mock.call(self.context, bdm, instance,
destroy_bdm=True) for bdm in bdms]
self.assertEqual(calls, mock_detach.call_args_list)
def test_cleanup_volumes_exception_raise(self):
@mock.patch('nova.compute.manager.ComputeManager.'
'_detach_volume')
def test_cleanup_volumes_exception_raise(self, mock_detach):
instance = fake_instance.fake_instance_obj(self.context)
bdm_dict1 = fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': 'fake-id1', 'source_type': 'image',
@ -2931,10 +2947,31 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
'delete',
side_effect=[test.TestingException(), None]) as volume_delete:
self.assertRaises(test.TestingException,
self.compute._cleanup_volumes, self.context, instance.uuid,
self.compute._cleanup_volumes, self.context, instance,
bdms)
calls = [mock.call(self.context, bdm.volume_id) for bdm in bdms]
self.assertEqual(calls, volume_delete.call_args_list)
calls = [mock.call(self.context, bdm, instance,
destroy_bdm=bdm.delete_on_termination)
for bdm in bdms]
self.assertEqual(calls, mock_detach.call_args_list)
@mock.patch('nova.compute.manager.ComputeManager._detach_volume',
side_effect=exception.CinderConnectionFailed(reason='idk'))
def test_cleanup_volumes_detach_fails_raise_exc(self, mock_detach):
instance = fake_instance.fake_instance_obj(self.context)
bdms = block_device_obj.block_device_make_list(
self.context,
[fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': uuids.volume_id,
'source_type': 'volume',
'destination_type': 'volume',
'delete_on_termination': False})])
self.assertRaises(exception.CinderConnectionFailed,
self.compute._cleanup_volumes, self.context,
instance, bdms)
mock_detach.assert_called_once_with(
self.context, bdms[0], instance, destroy_bdm=False)
def test_stop_instance_task_state_none_power_state_shutdown(self):
# Tests that stop_instance doesn't puke when the instance power_state
@ -3544,7 +3581,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
mock_clean_net.assert_called_once_with(self.context, self.instance,
self.requested_networks)
mock_clean_vol.assert_called_once_with(self.context,
self.instance.uuid, self.block_device_mapping, raise_exc=False)
self.instance, self.block_device_mapping, raise_exc=False)
mock_add.assert_called_once_with(self.context, self.instance,
mock.ANY, mock.ANY)
mock_nil.assert_called_once_with(self.instance)
@ -3777,7 +3814,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
mock_clean_net.assert_called_once_with(self.context, self.instance,
self.requested_networks)
mock_clean_vol.assert_called_once_with(self.context,
self.instance.uuid, self.block_device_mapping,
self.instance, self.block_device_mapping,
raise_exc=False)
mock_add.assert_called_once_with(self.context, self.instance,
mock.ANY, mock.ANY, fault_message=mock.ANY)
@ -3922,7 +3959,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self._assert_build_instance_update(mock_save)
if cleanup_volumes:
mock_clean_vol.assert_called_once_with(self.context,
self.instance.uuid, self.block_device_mapping,
self.instance, self.block_device_mapping,
raise_exc=False)
if nil_out_host_and_node:
mock_nil.assert_called_once_with(self.instance)