nimble: fix for parent volume delete

currently we make the volume in error state if it has dependent
clones. Try to delete the volume and if it has dependent clones
then catch the exception and online the volume and keep it available

Change-Id: I0ba8f43f14f5f4bf318f00b916b787ecc9481505
Closes-Bug: 1718814
This commit is contained in:
Raunak Kumar 2017-09-21 17:38:58 -07:00
parent 815c66d3a2
commit a4cf964d00
2 changed files with 49 additions and 3 deletions

View File

@ -171,6 +171,7 @@ FAKE_POSITIVE_GROUP_INFO_RESPONSE = {
'unused_reserve_bytes': 0}
FAKE_GENERIC_POSITIVE_RESPONSE = ""
FAKE_VOLUME_DELETE_HAS_CLONE_RESPONSE = "Object has a clone"
FAKE_TYPE_ID = fake.VOLUME_TYPE_ID
FAKE_POOL_ID = fake.GROUP_ID
@ -643,6 +644,27 @@ class NimbleDriverVolumeTestCase(NimbleDriverBaseTestCase):
self.mock_client_service.assert_has_calls(expected_calls)
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
'nimble', 'nimble_pass', '10.18.108.55', 'default', '*'))
@mock.patch(NIMBLE_ISCSI_DRIVER + ".is_volume_backup_clone", mock.Mock(
return_value = ['', '']))
def test_delete_volume_with_clone(self):
self.mock_client_service.delete_vol.side_effect = \
nimble.NimbleAPIException(FAKE_VOLUME_DELETE_HAS_CLONE_RESPONSE)
self.assertRaises(
exception.VolumeIsBusy,
self.driver.delete_volume,
{'name': 'testvolume'})
expected_calls = [mock.call.online_vol(
'testvolume', False),
mock.call.delete_vol('testvolume'),
mock.call.online_vol('testvolume', True)]
self.mock_client_service.assert_has_calls(expected_calls)
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(

View File

@ -69,6 +69,7 @@ SM_SUBNET_MGMT_PLUS_DATA = 'mgmt-data'
SM_STATE_MSG = "is already in requested state"
SM_OBJ_EXIST_MSG = "Object exists"
SM_OBJ_ENOENT_MSG = "No such object"
SM_OBJ_HAS_CLONE = "has a clone"
IOPS_ERR_MSG = "Please set valid IOPS limit in the range"
LUN_ID = '0'
WARN_LEVEL = 80
@ -105,6 +106,10 @@ class NimbleAPIException(exception.VolumeBackendAPIException):
message = _("Unexpected response from Nimble API")
class NimbleVolumeBusyException(exception.VolumeIsBusy):
message = _("Nimble Cinder Driver: Volume Busy")
class NimbleBaseVolumeDriver(san.SanDriver):
"""OpenStack driver to enable Nimble Controller.
@ -211,7 +216,18 @@ class NimbleBaseVolumeDriver(san.SanDriver):
eventlet.sleep(DEFAULT_SLEEP)
self.APIExecutor.online_vol(volume['name'], False)
LOG.debug("Deleting volume %(vol)s", {'vol': volume['name']})
self.APIExecutor.delete_vol(volume['name'])
try:
self.APIExecutor.delete_vol(volume['name'])
except NimbleAPIException as ex:
LOG.debug("delete volume exception: %s", ex)
if SM_OBJ_HAS_CLONE in six.text_type(ex):
LOG.warning('Volume %(vol)s : %(state)s',
{'vol': volume['name'],
'state': SM_OBJ_HAS_CLONE})
# set the volume back to be online and raise busy exception
self.APIExecutor.online_vol(volume['name'], True)
raise exception.VolumeIsBusy(volume_name=volume['name'])
raise
# Nimble backend does not delete the snapshot from the parent volume
# if there is a dependent clone. So the deletes need to be in reverse
# order i.e.
@ -1663,10 +1679,18 @@ class NimbleRestAPIExecutor(object):
url = self.uri + api
r = requests.delete(url, headers=self.headers, verify=self.verify)
if r.status_code != 201 and r.status_code != 200:
msg = _("Failed to execute api %(api)s : %(msg)s %(code)s") % {
base = "Failed to execute api %(api)s: Error Code: %(code)s" % {
'api': api,
'msg': r.json()['messages'][1]['text'],
'code': r.status_code}
LOG.debug("Base error : %(base)s", {'base': base})
try:
msg = _("%(base)s Message: %(msg)s") % {
'base': base,
'msg': r.json()['messages'][1]['text']}
except IndexError:
msg = _("%(base)s Message: %(msg)s") % {
'base': base,
'msg': six.text_type(r.json())}
raise NimbleAPIException(msg)
return r.json()