From 3b7a2ba47a2b711ff0f356788e41446a0f58bdca Mon Sep 17 00:00:00 2001 From: John Griffith Date: Thu, 18 Jan 2018 18:44:10 +0000 Subject: [PATCH] Use provider_id for SolidFire Volume lookups Over the years the various iterations of volume-get in the SolidFire driver has gotten a bit redundant. The biggest problem with all of these changes comes in when we start dealing with things like managed volumes, most of the methods we have in place expect the volume-name on the SolidFire backend to match the cinder UUID. In the case of manage, migrate and some other cases however, this isn't a valid assumption. SolidFire doesn't allow changing the name of an existing volume. You're only choice in that case is to clone or copy to a new volume, but that's terribly heavy weight in this case. So, what we need to do is harden the volume_get method a bit. We want to do a better job here, first by attempting to use the provider_id info that's stored with a volume, from there we can try some failback methods like looking for name:uuid map and finally checking attributes. This should make things much more efficient for delete calls, and fix the current bug due to imported volumes not having the cinder-uuid as the name. We need to do some follow up work and consolidate all of these get calls, but for now we just fix the recently reported bug. Change-Id: I1a1d02a1407926c22fbadcc1049434e24b92549b Closes-Bug: #1744005 --- .../drivers/solidfire/test_solidfire.py | 26 ++--- cinder/volume/drivers/solidfire.py | 102 ++++++++++-------- 2 files changed, 73 insertions(+), 55 deletions(-) diff --git a/cinder/tests/unit/volume/drivers/solidfire/test_solidfire.py b/cinder/tests/unit/volume/drivers/solidfire/test_solidfire.py index 42ae4674a1b..6ad389afe95 100644 --- a/cinder/tests/unit/volume/drivers/solidfire/test_solidfire.py +++ b/cinder/tests/unit/volume/drivers/solidfire/test_solidfire.py @@ -529,17 +529,17 @@ class SolidFireVolumeTestCase(test.TestCase): 'targetSecret': 'shhhh', 'username': 'john-wayne'}] - get_vol_result = [{'volumeID': 5, - 'name': 'test_volume', - 'accountID': 25, - 'sliceCount': 1, - 'totalSize': 1 * units.Gi, - 'enable512e': True, - 'access': "readWrite", - 'status': "active", - 'attributes': {}, - 'qos': None, - 'iqn': 'super_fake_iqn'}] + get_vol_result = {'volumeID': 5, + 'name': 'test_volume', + 'accountID': 25, + 'sliceCount': 1, + 'totalSize': 1 * units.Gi, + 'enable512e': True, + 'access': "readWrite", + 'status': "active", + 'attributes': {}, + 'qos': None, + 'iqn': 'super_fake_iqn'} mod_conf = self.configuration mod_conf.sf_enable_vag = True @@ -548,7 +548,7 @@ class SolidFireVolumeTestCase(test.TestCase): '_get_sfaccounts_for_tenant', return_value=fake_sfaccounts), \ mock.patch.object(sfv, - '_get_volumes_for_account', + '_get_sfvol_by_cinder_vref', return_value=get_vol_result), \ mock.patch.object(sfv, '_issue_api_request'), \ @@ -556,7 +556,7 @@ class SolidFireVolumeTestCase(test.TestCase): '_remove_volume_from_vags') as rem_vol: sfv.delete_volume(testvol) - rem_vol.assert_called_with(get_vol_result[0]['volumeID']) + rem_vol.assert_called_with(get_vol_result['volumeID']) def test_delete_volume_no_volume_on_backend(self): fake_sfaccounts = [{'accountID': 5, diff --git a/cinder/volume/drivers/solidfire.py b/cinder/volume/drivers/solidfire.py index 0d9f6de90c1..159264e746c 100644 --- a/cinder/volume/drivers/solidfire.py +++ b/cinder/volume/drivers/solidfire.py @@ -160,10 +160,11 @@ class SolidFireDriver(san.SanISCSIDriver): 2.0.9 - Always purge on delete volume 2.0.10 - Add response to debug on retryable errors 2.0.11 - Add ability to failback replicating volumes + 2.0.12 - Fix bug #1744005 """ - VERSION = '2.0.11' + VERSION = '2.0.12' # ThirdPartySystems wiki page CI_WIKI_NAME = "NetApp_SolidFire_CI" @@ -528,10 +529,6 @@ class SolidFireDriver(san.SanISCSIDriver): return response - def _get_active_volumes_by_sfaccount(self, account_id, endpoint=None): - return [v for v in self._get_volumes_by_sfaccount(account_id, endpoint) - if v['status'] == "active"] - def _get_volumes_by_sfaccount(self, account_id, endpoint=None): """Get all volumes on cluster for specified account.""" params = {'accountID': account_id} @@ -540,6 +537,61 @@ class SolidFireDriver(san.SanISCSIDriver): params, endpoint=endpoint)['result']['volumes'] + def _get_volumes_for_account(self, sf_account_id, cinder_uuid=None): + # ListVolumesForAccount gives both Active and Deleted + # we require the solidfire accountID, uuid of volume + # is optional + vols = self._get_volumes_by_sfaccount(sf_account_id) + if cinder_uuid: + vlist = [v for v in vols if + cinder_uuid in v['name']] + else: + vlist = [v for v in vols] + vlist = sorted(vlist, key=lambda k: k['volumeID']) + return vlist + + def _get_sfvol_by_cinder_vref(self, vref): + sfvol = None + provider_id = vref.get('provider_id', None) + if provider_id: + try: + sf_vid, sf_aid, sf_cluster_id = provider_id.split(' ') + except ValueError: + LOG.warning("Invalid provider_id entry for volume: %s", + vref.id) + else: + # So there shouldn't be any clusters out in the field that are + # running Element < 8.0, but just in case; we'll to a try + # block here and fall back to the old methods just to be safe + try: + sfvol = self._issue_api_request( + 'ListVolumes', + {'startVolumeID': sf_vid, + 'limit': 1}, + version='8.0')['result']['volumes'][0] + except Exception: + pass + if not sfvol: + LOG.info("Failed to find volume by provider_id, " + "attempting ListForAccount") + for account in self._get_sfaccounts_for_tenant(vref.project_id): + sfvols = self._issue_api_request( + 'ListVolumesForAccount', + {'accountID': account['accountID']})['result']['volumes'] + if len(sfvols) >= 1: + sfvol = sfvols[0] + break + if not sfvol: + # Hmmm, frankly if we get here there's a problem, + # but try one last trick + LOG.info("Failed to find volume by provider_id or account, " + "attempting find by attributes.") + for v in sfvols: + if v['Attributes'].get('uuid', None): + sfvol = v + break + return sfvol + def _get_sfaccount_by_name(self, sf_account_name, endpoint=None): """Get SolidFire account object by name.""" sfaccount = None @@ -1077,19 +1129,6 @@ class SolidFireDriver(san.SanISCSIDriver): raise exception.SolidFireDriverException(msg) return sf_account - def _get_volumes_for_account(self, sf_account_id, cinder_uuid=None): - # ListVolumesForAccount gives both Active and Deleted - # we require the solidfire accountID, uuid of volume - # is optional - vols = self._get_active_volumes_by_sfaccount(sf_account_id) - if cinder_uuid: - vlist = [v for v in vols if - cinder_uuid in v['name']] - else: - vlist = [v for v in vols] - vlist = sorted(vlist, key=lambda k: k['volumeID']) - return vlist - def _create_vag(self, iqn, vol_id=None): """Create a volume access group(vag). @@ -1462,32 +1501,11 @@ class SolidFireDriver(san.SanISCSIDriver): def delete_volume(self, volume): """Delete SolidFire Volume from device. - SolidFire allows multiple volumes with same name, - volumeID is what's guaranteed unique. + SolidFire allows multiple volumes with same name, + volumeID is what's guaranteed unique. """ - sf_vol = None - accounts = self._get_sfaccounts_for_tenant(volume['project_id']) - if accounts is None: - LOG.error("Account for Volume ID %s was not found on " - "the SolidFire Cluster while attempting " - "delete_volume operation!", volume['id']) - LOG.error("This usually means the volume was never " - "successfully created.") - return - - for acc in accounts: - vols = self._get_volumes_for_account(acc['accountID'], - volume.name_id) - # Check for migration magic here - if (not vols and (volume.name_id != volume.id)): - vols = self._get_volumes_for_account(acc['accountID'], - volume.id) - - if vols: - sf_vol = vols[0] - break - + sf_vol = self._get_sfvol_by_cinder_vref(volume) if sf_vol is not None: for vp in sf_vol.get('volumePairs', []): LOG.debug("Deleting paired volume on remote cluster...")