Detach volume after deleting instance with no host

If an instance is booted from a volume, shelved, and goes into an error
state due to some reason. Volume from which instance is booted, remains
in-use state even the instance is deleted because instance has no host
associated with it.

Called _local_delete() to detach volume and destroy bdm if instance is
in shelved_offloaded state or has no host associated with it. This will
cleanup both volumes and the networks.

Note:
I had submitted same patch [1] earlier which was reverted [2] due to a
race condition on jenkins if an instance is deleted when it is in
building state. In this patch I have fixed the failure of race condition
by reverting the ObjectActionError exception handling in _delete.

[1] Ic630ae7d026a9697afec46ac9ea40aea0f5b5ffb
[2] Id4e405e7579530ed1c1f22ccc972d45b6d185f41

Closes-Bug: 1404867
Closes-Bug: 1408527
Closes-Bug: 1458308
Change-Id: Ic107d8edc7ee7a4ebb04eac58ef0cdbf506d6173
This commit is contained in:
ankitagrawal 2015-09-23 03:58:19 -07:00
parent cb02486816
commit ecdf331baf
3 changed files with 89 additions and 39 deletions

View File

@ -1651,16 +1651,13 @@ class API(base.Base):
return
shelved_offloaded = (instance.vm_state
== vm_states.SHELVED_OFFLOADED)
if not instance.host and not shelved_offloaded:
if not instance.host or shelved_offloaded:
try:
compute_utils.notify_about_instance_usage(
self.notifier, context, instance,
"%s.start" % delete_type)
instance.destroy()
compute_utils.notify_about_instance_usage(
self.notifier, context, instance,
"%s.end" % delete_type,
system_metadata=instance.system_metadata)
# If instance is in shelved_offloaded state or
# instance.host is None, then delete instance
# from db and clean bdms info and network info.
self._local_delete(context, instance, bdms, delete_type,
cb)
quotas.commit()
return
except exception.ObjectActionError:
@ -1670,42 +1667,43 @@ class API(base.Base):
self._confirm_resize_on_deleting(context, instance)
is_local_delete = True
try:
if not shelved_offloaded:
if instance.host and not shelved_offloaded:
try:
# Check if compute service is up.
service = objects.Service.get_by_compute_host(
context.elevated(), instance.host)
is_local_delete = not self.servicegroup_api.service_is_up(
service)
if not is_local_delete:
if original_task_state in (task_states.DELETING,
task_states.SOFT_DELETING):
LOG.info(_LI('Instance is already in deleting state, '
'ignoring this request'),
instance=instance)
quotas.rollback()
return
self._record_action_start(context, instance,
instance_actions.DELETE)
# NOTE(snikitin): If instance's vm_state is 'soft-delete',
# we should not count reservations here, because instance
# in soft-delete vm_state have already had quotas
# decremented. More details:
# https://bugs.launchpad.net/nova/+bug/1333145
if instance.vm_state == vm_states.SOFT_DELETED:
quotas.rollback()
cb(context, instance, bdms,
reservations=quotas.reservations)
except exception.ComputeHostNotFound:
pass
except exception.ComputeHostNotFound:
pass
if is_local_delete:
# If instance is in shelved_offloaded state or compute node
# isn't up, delete instance from db and clean bdms info and
# network info
# network info.
self._local_delete(context, instance, bdms, delete_type, cb)
quotas.commit()
else:
if original_task_state in (task_states.DELETING,
task_states.SOFT_DELETING):
LOG.info(_LI('Instance is already in deleting state, '
'ignoring this request.'),
instance=instance)
quotas.rollback()
return
self._record_action_start(context, instance,
instance_actions.DELETE)
# NOTE(snikitin): If instance's vm_state is 'soft-delete',
# we should not count reservations here, because instance
# in soft-delete vm_state have already had quotas
# decremented. More details:
# https://bugs.launchpad.net/nova/+bug/1333145
if instance.vm_state == vm_states.SOFT_DELETED:
quotas.rollback()
cb(context, instance, bdms,
reservations=quotas.reservations)
except exception.InstanceNotFound:
# NOTE(comstud): Race condition. Instance already gone.

View File

@ -10802,7 +10802,8 @@ class ComputePolicyTestCase(BaseTestCase):
rules = {"compute:delete": []}
self.policy.set_rules(rules)
self.compute_api.delete(self.context, instance)
with mock.patch.object(self.compute_api, '_local_delete'):
self.compute_api.delete(self.context, instance)
def test_create_fail(self):
rules = {"compute:create": [["false:false"]]}

View File

@ -997,6 +997,7 @@ class _ComputeAPIUnitTestMixIn(object):
tzinfo=iso8601.iso8601.Utc())
updates['deleted_at'] = delete_time
updates['deleted'] = True
inst.save()
fake_inst = fake_instance.fake_db_instance(**updates)
db.instance_destroy(self.context, inst.uuid,
constraint='constraint').AndReturn(fake_inst)
@ -1007,9 +1008,11 @@ class _ComputeAPIUnitTestMixIn(object):
self.mox.ReplayAll()
self.compute_api.delete(self.context, inst)
for k, v in updates.items():
self.assertEqual(inst[k], v)
with mock.patch.object(self.compute_api.network_api,
'deallocate_for_instance'):
self.compute_api.delete(self.context, inst)
for k, v in updates.items():
self.assertEqual(inst[k], v)
def _fake_do_delete(context, instance, bdms,
rservations=None, local=False):
@ -2972,6 +2975,54 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
self.assertRaises(exception.CannotResizeToSameFlavor,
self._test_resize, same_flavor=True)
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
@mock.patch('nova.context.RequestContext.elevated')
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(quota.QUOTAS, 'reserve')
@mock.patch.object(compute_utils, 'notify_about_instance_usage')
@mock.patch.object(objects.BlockDeviceMapping, 'destroy')
@mock.patch.object(objects.Instance, 'destroy')
def test_delete_volume_backed_instance_in_error_state(
self, mock_instance_destroy, bdm_destroy,
notify_about_instance_usage, mock_reserve,
mock_save, mock_elevated, bdm_get_by_instance_uuid):
volume_id = uuidutils.generate_uuid()
bdms = [objects.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict(
{'id': 42, 'volume_id': volume_id,
'source_type': 'volume', 'destination_type': 'volume',
'delete_on_termination': False}))]
reservations = ['fake-resv']
delete_time = datetime.datetime(1955, 11, 5, 9, 30,
tzinfo=iso8601.iso8601.Utc())
updates = {'deleted_at': delete_time,
'deleted': True}
fake_inst = fake_instance.fake_instance_obj(self.context, **updates)
mock_instance_destroy.return_value = fake_inst
bdm_get_by_instance_uuid.return_value = bdms
mock_reserve.return_value = reservations
mock_elevated.return_value = self.context
params = {'host': '', 'vm_state': vm_states.ERROR}
inst = self._create_instance_obj(params=params)
inst._context = self.context
connector = {'ip': '127.0.0.1', 'initiator': 'iqn.fake'}
with mock.patch.object(self.compute_api.network_api,
'deallocate_for_instance') as mock_deallocate, \
mock.patch.object(self.compute_api.volume_api,
'terminate_connection') as mock_terminate_conn, \
mock.patch.object(self.compute_api.volume_api,
'detach') as mock_detach:
self.compute_api.delete(self.context, inst)
mock_deallocate.assert_called_once_with(self.context, inst)
mock_detach.assert_called_once_with(self.context, volume_id)
mock_terminate_conn.assert_called_once_with(self.context,
volume_id, connector)
bdm_destroy.assert_called_once_with()
class ComputeAPIAPICellUnitTestCase(_ComputeAPIUnitTestMixIn,
test.NoDBTestCase):