diff --git a/cinder/tests/unit/volume/drivers/dell_emc/vmax/test_vmax.py b/cinder/tests/unit/volume/drivers/dell_emc/vmax/test_vmax.py index 4911aa3f8..7f75c6124 100644 --- a/cinder/tests/unit/volume/drivers/dell_emc/vmax/test_vmax.py +++ b/cinder/tests/unit/volume/drivers/dell_emc/vmax/test_vmax.py @@ -27,7 +27,11 @@ import six from cinder import context from cinder import exception from cinder.objects import fields +from cinder.objects import group +from cinder.objects import group_snapshot +from cinder.objects import volume_type from cinder import test +from cinder.tests.unit import fake_group from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.volume.drivers.dell_emc.vmax import common @@ -37,6 +41,7 @@ from cinder.volume.drivers.dell_emc.vmax import masking from cinder.volume.drivers.dell_emc.vmax import provision from cinder.volume.drivers.dell_emc.vmax import rest from cinder.volume.drivers.dell_emc.vmax import utils +from cinder.volume import utils as volume_utils from cinder.volume import volume_types from cinder.zonemanager import utils as fczm_utils @@ -75,6 +80,11 @@ class VMAXCommonData(object): rdf_group_name = '23_24_007' rdf_group_no = '70' u4v_version = '84' + storagegroup_name_source = 'Grp_source_sg' + storagegroup_name_target = 'Grp_target_sg' + group_snapshot_name = 'Grp_snapshot' + target_group_name = 'Grp_target' + storagegroup_name_with_id = 'GrpId_group_name' # connector info wwpn1 = "123456789012345" @@ -194,6 +204,53 @@ class VMAXCommonData(object): rep_extra_specs['retries'] = 0 rep_extra_specs['srp'] = srp2 + test_volume_type_1 = volume_type.VolumeType( + id='abc', name='abc', + extra_specs=extra_specs + ) + test_volume_type_list = volume_type.VolumeTypeList( + objects=[test_volume_type_1]) + test_group_1 = group.Group( + context=None, name=storagegroup_name_source, + group_id='abc', size=1, + id='12345', status='available', + provider_auth=None, volume_type_ids=['abc'], + group_type_id='grptypeid', + volume_types=test_volume_type_list, + host=fake_host, provider_location=six.text_type(provider_location)) + + test_group_failed = group.Group( + context=None, name=failed_resource, + group_id='abc', size=1, + id='12345', status='available', + provider_auth=None, volume_type_ids=['abc'], + group_type_id='grptypeid', + volume_types=test_volume_type_list, + host=fake_host, provider_location=six.text_type(provider_location)) + + test_group = fake_group.fake_group_obj( + context=ctx, name=storagegroup_name_source, + id='12345', host=fake_host) + + test_group_without_name = fake_group.fake_group_obj( + context=ctx, name=None, + id='12345', host=fake_host) + + test_vol_grp_name = 'Grp_source_sg_12345' + test_vol_grp_name_id_only = '12345' + + test_group_snapshot_1 = group_snapshot.GroupSnapshot( + context=None, id='123456', + group_id='12345', name=group_snapshot_name, + group_type_id='grptypeid', status='available', + group=test_group_1) + + test_group_snapshot_failed = group_snapshot.GroupSnapshot( + context=None, id='123456', + group_id='12345', name=failed_resource, + group_type_id='grptypeid', status='available', + group=test_group_failed) + # masking view dict masking_view_dict = { 'array': array, @@ -325,9 +382,21 @@ class VMAXCommonData(object): "maskingview": [masking_view_name_i], } ] + sg_details_rep = [{"childNames": [], + "numDevicesNonGk": 2, + "isLinkTarget": False, + "rdf": False, + "capacityGB": 2.0, + "name": storagegroup_name_source, + "snapVXSnapshots": ['12345'], + "symmetrixId": array, + "numSnapVXSnapshots": 1}] + sg_list = {"storageGroupId": [storagegroup_name_f, defaultstoragegroup_name]} + sg_list_rep = [storagegroup_name_with_id] + srp_details = {"srpSloDemandId": ["Bronze", "Diamond", "Gold", "None", "Optimized", "Silver"], "srpId": srp, @@ -400,6 +469,39 @@ class VMAXCommonData(object): {"symmetrixId": array, "snapVxCapable": True, "rdfCapable": True}]} + group_snap_vx = {"generation": 0, + "isLinked": False, + "numUniqueTracks": 0, + "isRestored": False, + "name": group_snapshot_name, + "numStorageGroupVolumes": 1, + "state": ["Established"], + "timeToLiveExpiryDate": "N/A", + "isExpired": False, + "numSharedTracks": 0, + "timestamp": "00:30:50 Fri, 02 Jun 2017 IST +0100", + "numSourceVolumes": 1 + } + group_snap_vx_1 = {"generation": 0, + "isLinked": False, + "numUniqueTracks": 0, + "isRestored": False, + "name": group_snapshot_name, + "numStorageGroupVolumes": 1, + "state": ["Copied"], + "timeToLiveExpiryDate": "N/A", + "isExpired": False, + "numSharedTracks": 0, + "timestamp": "00:30:50 Fri, 02 Jun 2017 IST +0100", + "numSourceVolumes": 1, + "linkedStorageGroup": + {"name": target_group_name, + "percentageCopied": 100}, + } + grp_snapvx_links = [{"name": target_group_name, + "percentageCopied": 100}, + {"name": "another-target", + "percentageCopied": 90}] rdf_group_list = {"rdfGroupID": [{"rdfgNumber": rdf_group_no, "label": rdf_group_name}]} @@ -592,12 +694,22 @@ class FakeRequestsSession(object): return_object = self.data.rdf_group_details else: return_object = self.data.rdf_group_list + elif 'storagegroup' in url: + return_object = self._replication_sg(url) elif 'snapshot' in url: return_object = self.data.volume_snap_vx elif 'capabilities' in url: return_object = self.data.capabilities return return_object + def _replication_sg(self, url): + return_object = None + if 'generation' in url: + return_object = self.data.group_snap_vx + elif 'storagegroup' in url: + return_object = self.data.sg_details_rep[0] + return return_object + def _system(self, url): return_object = None if 'job' in url: @@ -1059,6 +1171,84 @@ class VMAXUtilsTest(test.TestCase): is_fo3 = self.utils.is_volume_failed_over(None) self.assertFalse(is_fo3) + def test_update_volume_group_name(self): + group = self.data.test_group_1 + ref_group_name = self.data.test_vol_grp_name + vol_grp_name = self.utils.update_volume_group_name(group) + self.assertEqual(ref_group_name, vol_grp_name) + + def test_update_volume_group_name_id_only(self): + group = self.data.test_group_without_name + ref_group_name = self.data.test_vol_grp_name_id_only + vol_grp_name = self.utils.update_volume_group_name(group) + self.assertEqual(ref_group_name, vol_grp_name) + + def test_update_admin_metadata(self): + admin_metadata = {'targetVolumeName': '123456'} + ref_model_update = [{'id': '12345', + 'admin_metadata': admin_metadata}] + volume_model_update = {'id': '12345'} + volumes_model_update = [volume_model_update] + key = 'targetVolumeName' + values = {} + values['12345'] = '123456' + self.utils.update_admin_metadata( + volumes_model_update, key, values) + self.assertEqual(ref_model_update, volumes_model_update) + + def test_get_volume_group_utils(self): + group = self.data.test_group_1 + array, extraspecs_dict = self.utils.get_volume_group_utils( + group, interval=1, retries=1) + ref_array = self.data.array + self.assertEqual(ref_array, array) + + def test_update_extra_specs_list(self): + extra_specs = self.data.extra_specs + volume_type_id = 'abc' + extraspecs_dict = self.utils._update_extra_specs_list( + extra_specs, volume_type_id, interval=1, retries=1) + self.assertEqual(extra_specs, extraspecs_dict['extra_specs']) + + def test_update_intervals_and_retries(self): + extra_specs = self.data.extra_specs + ref_interval = 1 + extraspecs = self.utils._update_intervals_and_retries( + extra_specs, interval=1, retries=1) + self.assertEqual(ref_interval, extraspecs['interval']) + + def test_get_intervals_retries_dict(self): + ref_value = {'interval': 1, 'retries': 1} + ret_dict = self.utils.get_intervals_retries_dict( + interval=1, retries=1) + self.assertEqual(ref_value, ret_dict) + + def test_update_volume_model_updates(self): + volume_model_updates = [{'id': '1', 'status': 'available'}] + volumes = [self.data.test_volume] + ref_val = {'id': self.data.test_volume.id, + 'status': 'error_deleting'} + ret_val = self.utils.update_volume_model_updates( + volume_model_updates, volumes, 'abc', status='error_deleting') + self.assertEqual(ref_val, ret_val[1]) + + def test_update_volume_model_updates_empty_update_list(self): + volume_model_updates = [] + volumes = [self.data.test_volume] + ref_val = [{'id': self.data.test_volume.id, + 'status': 'available'}] + ret_val = self.utils.update_volume_model_updates( + volume_model_updates, volumes, 'abc') + self.assertEqual(ref_val, ret_val) + + def test_update_volume_model_updates_empty_vol_list(self): + volume_model_updates = [] + volumes = [] + ref_val = [] + ret_val = self.utils.update_volume_model_updates( + volume_model_updates, volumes, 'abc') + self.assertEqual(ref_val, ret_val) + class VMAXRestTest(test.TestCase): def setUp(self): @@ -2250,6 +2440,36 @@ class VMAXRestTest(test.TestCase): failover_payload, resource_name=resource_name, private='/private') + def test_get_storage_group_rep(self): + array = self.data.array + source_group_name = self.data.storagegroup_name_source + ref_details = self.data.sg_details_rep[0] + volume_group = self.rest.get_storage_group_rep(array, + source_group_name) + self.assertEqual(volume_group, ref_details) + + def test_get_volumes_in_storage_group(self): + array = self.data.array + storagegroup_name = self.data.storagegroup_name_source + ref_volumes = [self.data.device_id, self.data.device_id2] + volume_list = self.rest.get_volumes_in_storage_group( + array, storagegroup_name) + self.assertEqual(ref_volumes, volume_list) + + def test_create_storagegroup_snap(self): + array = self.data.array + extra_specs = self.data.extra_specs + source_group = self.data.storagegroup_name_source + snap_name = self.data.group_snapshot_name + with mock.patch.object( + self.rest, "create_storagegroup_snap") as mock_create: + self.rest.create_storagegroup_snap( + array, source_group, snap_name, extra_specs) + mock_create.assert_called_once_with(array, + source_group, + snap_name, + extra_specs) + class VMAXProvisionTest(test.TestCase): def setUp(self): @@ -2267,6 +2487,7 @@ class VMAXProvisionTest(test.TestCase): self.common = self.driver.common self.provision = self.common.provision self.utils = self.common.utils + self.rest = self.common.rest def test_create_storage_group(self): array = self.data.array @@ -2573,6 +2794,74 @@ class VMAXProvisionTest(test.TestCase): array, device_id, rdf_group_name, extra_specs, split=False) + def test_create_volume_group_success(self): + array = self.data.array + group_name = self.data.storagegroup_name_source + extra_specs = self.data.extra_specs + ref_value = self.data.storagegroup_name_source + storagegroup = self.provision.create_volume_group(array, + group_name, + extra_specs) + self.assertEqual(ref_value, storagegroup) + + def test_create_group_replica(self): + array = self.data.array + source_group = self.data.storagegroup_name_source + snap_name = self.data.group_snapshot_name + extra_specs = self.data.extra_specs + with mock.patch.object( + self.provision, + 'create_group_replica') as mock_create_replica: + self.provision.create_group_replica( + array, source_group, snap_name, extra_specs) + mock_create_replica.assert_called_once_with( + array, source_group, snap_name, extra_specs) + + def test_delete_group_replica(self): + array = self.data.array + snap_name = self.data.group_snapshot_name + source_group_name = self.data.storagegroup_name_source + with mock.patch.object( + self.provision, + 'delete_group_replica') as mock_delete_replica: + self.provision.delete_group_replica(array, + snap_name, + source_group_name) + mock_delete_replica.assert_called_once_with( + array, snap_name, source_group_name) + + def test_link_and_break_replica(self): + array = self.data.array + source_group_name = self.data.storagegroup_name_source + target_group_name = self.data.target_group_name + snap_name = self.data.group_snapshot_name + extra_specs = self.data.extra_specs + deleteSnapshot = False + with mock.patch.object( + self.provision, + 'link_and_break_replica') as mock_link_and_break_replica: + self.provision.link_and_break_replica( + array, source_group_name, + target_group_name, snap_name, + extra_specs, deleteSnapshot) + mock_link_and_break_replica.assert_called_once_with( + array, source_group_name, + target_group_name, snap_name, + extra_specs, deleteSnapshot) + + def test_unlink_group(self): + with mock.patch.object(self.rest, + 'modify_storagegroup_snap') as mock_mod: + self.provision._unlink_group( + self.data.array, self.data.storagegroup_name_source, + self.data.target_group_name, + self.data.group_snapshot_name, self.data.extra_specs) + mock_mod.assert_called_once_with( + self.data.array, self.data.storagegroup_name_source, + self.data.target_group_name, + self.data.group_snapshot_name, self.data.extra_specs, + unlink=True) + class VMAXCommonTest(test.TestCase): def setUp(self): @@ -3707,6 +3996,269 @@ class VMAXCommonTest(test.TestCase): self.data.srp, volume_name, False) self.assertEqual(ref_return, return_val) + def test_find_volume_group_name_from_id(self): + array = self.data.array + group_id = 'GrpId' + group_name = None + ref_group_name = self.data.storagegroup_name_with_id + with mock.patch.object( + self.rest, 'get_storage_group_list', + return_value=self.data.sg_list_rep): + group_name = self.common._find_volume_group_name_from_id( + array, group_id) + self.assertEqual(ref_group_name, group_name) + + def test_find_volume_group_name_from_id_not_found(self): + array = self.data.array + group_id = 'GrpId' + group_name = None + group_name = self.common._find_volume_group_name_from_id( + array, group_id) + self.assertIsNone(group_name) + + def test_find_volume_group(self): + group = self.data.test_group_1 + array = self.data.array + volume_group = self.common._find_volume_group(array, group) + ref_group = self.data.sg_details_rep[0] + self.assertEqual(ref_group, volume_group) + + def test_get_volume_device_ids(self): + array = self.data.array + volumes = [self.data.test_volume] + ref_device_ids = [self.data.device_id] + device_ids = self.common._get_volume_device_ids(volumes, array) + self.assertEqual(ref_device_ids, device_ids) + + def test_get_members_of_volume_group(self): + array = self.data.array + group_name = self.data.storagegroup_name_source + ref_volumes = [self.data.device_id, self.data.device_id2] + member_device_ids = self.common._get_members_of_volume_group( + array, group_name) + self.assertEqual(ref_volumes, member_device_ids) + + def test_get_members_of_volume_group_empty(self): + array = self.data.array + group_name = self.data.storagegroup_name_source + with mock.patch.object( + self.rest, 'get_volumes_in_storage_group', + return_value=None): + member_device_ids = self.common._get_members_of_volume_group( + array, group_name + ) + self.assertIsNone(member_device_ids) + + @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', + return_value=True) + def test_create_group_replica(self, mock_check): + source_group = self.data.test_group_1 + snap_name = self.data.group_snapshot_name + with mock.patch.object( + self.common, + '_create_group_replica') as mock_create_replica: + self.common._create_group_replica( + source_group, snap_name) + mock_create_replica.assert_called_once_with( + source_group, snap_name) + + def test_create_group_replica_exception(self): + source_group = self.data.test_group_failed + snap_name = self.data.group_snapshot_name + with mock.patch.object( + volume_utils, 'is_group_a_cg_snapshot_type', + return_value=True): + self.assertRaises(exception.VolumeBackendAPIException, + self.common._create_group_replica, + source_group, + snap_name) + + def test_create_group_snapshot(self): + context = None + group_snapshot = self.data.test_group_snapshot_1 + snapshots = [] + ref_model_update = {'status': fields.GroupStatus.AVAILABLE} + with mock.patch.object( + volume_utils, 'is_group_a_cg_snapshot_type', + return_value=True): + model_update, snapshots_model_update = ( + self.common.create_group_snapshot( + context, group_snapshot, snapshots)) + self.assertEqual(ref_model_update, model_update) + + def test_create_group_snapshot_exception(self): + context = None + group_snapshot = self.data.test_group_snapshot_failed + snapshots = [] + with mock.patch.object( + volume_utils, 'is_group_a_cg_snapshot_type', + return_value=True): + self.assertRaises(exception.VolumeBackendAPIException, + self.common.create_group_snapshot, + context, + group_snapshot, + snapshots) + + def test_create_group(self): + ref_model_update = {'status': fields.GroupStatus.AVAILABLE} + context = None + group = self.data.test_group_1 + with mock.patch.object( + volume_utils, 'is_group_a_cg_snapshot_type', + return_value=True): + model_update = self.common.create_group(context, group) + self.assertEqual(ref_model_update, model_update) + + def test_create_group_exception(self): + context = None + group = self.data.test_group_snapshot_failed + with mock.patch.object( + volume_utils, 'is_group_a_cg_snapshot_type', + return_value=True): + self.assertRaises(exception.VolumeBackendAPIException, + self.common.create_group, + context, + group) + + def test_delete_group_snapshot(self): + group_snapshot = self.data.test_group_snapshot_1 + snapshots = [] + context = None + ref_model_update = {'status': fields.GroupSnapshotStatus.DELETED} + with mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', + return_value=True): + model_update, snapshots_model_update = ( + self.common.delete_group_snapshot(context, + group_snapshot, snapshots)) + self.assertEqual(ref_model_update, model_update) + + def test_delete_group_snapshot_success(self): + group_snapshot = self.data.test_group_snapshot_1 + snapshots = [] + ref_model_update = {'status': fields.GroupSnapshotStatus.DELETED} + with mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', + return_value=True): + model_update, snapshots_model_update = ( + self.common._delete_group_snapshot(group_snapshot, + snapshots)) + self.assertEqual(ref_model_update, model_update) + + def test_delete_group_snapshot_failed(self): + group_snapshot = self.data.test_group_snapshot_failed + snapshots = [] + ref_model_update = ( + {'status': fields.GroupSnapshotStatus.ERROR_DELETING}) + with mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', + return_value=True): + model_update, snapshots_model_update = ( + self.common._delete_group_snapshot(group_snapshot, + snapshots)) + self.assertEqual(ref_model_update, model_update) + + def test_update_group(self): + group = self.data.test_group_1 + add_vols = [self.data.test_volume] + remove_vols = [] + ref_model_update = {'status': fields.GroupStatus.AVAILABLE} + with mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', + return_value=True): + model_update, __, __ = self.common.update_group(group, + add_vols, + remove_vols) + self.assertEqual(ref_model_update, model_update) + + @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', + return_value=True) + def test_update_group_not_found(self, mock_check): + group = self.data.test_group_1 + add_vols = [] + remove_vols = [] + with mock.patch.object( + self.common, '_find_volume_group', + return_value=None): + self.assertRaises(exception.GroupNotFound, + self.common.update_group, + group, + add_vols, + remove_vols) + + @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', + return_value=True) + def test_update_group_exception(self, mock_check): + group = self.data.test_group_1 + add_vols = [] + remove_vols = [] + with mock.patch.object( + self.common, '_find_volume_group', + side_effect=exception.VolumeBackendAPIException): + self.assertRaises(exception.VolumeBackendAPIException, + self.common.update_group, + group, add_vols, remove_vols) + + def test_delete_group(self): + group = self.data.test_group_1 + volumes = [self.data.test_volume] + context = None + ref_model_update = {'status': fields.GroupStatus.DELETED} + with mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', + return_value=True),\ + mock.patch.object(self.rest, 'get_volumes_in_storage_group', + return_value=[]): + model_update, __ = self.common.delete_group( + context, group, volumes) + self.assertEqual(ref_model_update, model_update) + + def test_delete_group_success(self): + group = self.data.test_group_1 + volumes = [] + ref_model_update = {'status': fields.GroupStatus.DELETED} + with mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', + return_value=True),\ + mock.patch.object(self.rest, 'get_volumes_in_storage_group', + return_value=[]): + model_update, __ = self.common._delete_group(group, volumes) + self.assertEqual(ref_model_update, model_update) + + def test_delete_group_already_deleted(self): + group = self.data.test_group_failed + ref_model_update = {'status': fields.GroupStatus.DELETED} + volumes = [] + with mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', + return_value=True): + model_update, __ = self.common._delete_group(group, volumes) + self.assertEqual(ref_model_update, model_update) + + @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', + return_value=True) + def test_delete_group_failed(self, mock_check): + group = self.data.test_group_1 + volumes = [] + ref_model_update = {'status': fields.GroupStatus.ERROR_DELETING} + with mock.patch.object( + self.rest, 'delete_storage_group', + side_effect=exception.VolumeBackendAPIException): + model_update, __ = self.common._delete_group( + group, volumes) + self.assertEqual(ref_model_update, model_update) + + def test_create_group_from_src_success(self): + context = None + group = self.data.test_group_1 + group_snapshot = self.data.test_group_snapshot_1 + snapshots = [] + volumes = [self.data.test_volume] + source_group = None + source_vols = [] + ref_model_update = {'status': fields.GroupStatus.AVAILABLE} + with mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', + return_value=True): + model_update, volumes_model_update = ( + self.common.create_group_from_src( + context, group, volumes, + group_snapshot, snapshots, + source_group, source_vols)) + self.assertEqual(ref_model_update, model_update) + class VMAXFCTest(test.TestCase): def setUp(self): diff --git a/cinder/volume/drivers/dell_emc/vmax/common.py b/cinder/volume/drivers/dell_emc/vmax/common.py index b52a5009f..4dd2e7d00 100644 --- a/cinder/volume/drivers/dell_emc/vmax/common.py +++ b/cinder/volume/drivers/dell_emc/vmax/common.py @@ -22,6 +22,7 @@ from oslo_config import cfg from oslo_log import log as logging from oslo_utils import strutils import six +import uuid from cinder import exception from cinder.i18n import _ @@ -31,7 +32,7 @@ from cinder.volume.drivers.dell_emc.vmax import masking from cinder.volume.drivers.dell_emc.vmax import provision from cinder.volume.drivers.dell_emc.vmax import rest from cinder.volume.drivers.dell_emc.vmax import utils - +from cinder.volume import utils as volume_utils LOG = logging.getLogger(__name__) @@ -195,7 +196,7 @@ class VMAXCommon(object): all available SLO & Workload combinations :param array_info: the array information :returns: finalarrayinfolist - :raises VolumeBackendAPIException: + :raises: VolumeBackendAPIException: """ try: array = array_info['SerialNumber'] @@ -250,6 +251,14 @@ class VMAXCommon(object): volume_dict = (self._create_volume( volume_name, volume_size, extra_specs)) + if volume.group_id is not None: + group_name = self._find_volume_group_name_from_id( + extra_specs[utils.ARRAY], volume.group_id) + if group_name is not None: + self.masking.add_volume_to_storage_group( + extra_specs[utils.ARRAY], volume_dict['device_id'], + group_name, volume_name, extra_specs) + # Set-up volume replication, if enabled if self.utils.is_replication_enabled(extra_specs): rep_update = self._replicate_volume(volume, volume_name, @@ -268,7 +277,7 @@ class VMAXCommon(object): :param volume: volume object :param snapshot: snapshot object :returns: model_update - :raises VolumeBackendAPIException: + :raises: VolumeBackendAPIException: """ LOG.debug("Entering create_volume_from_snapshot.") model_update = {} @@ -597,7 +606,7 @@ class VMAXCommon(object): :param volume: the volume Object :param new_size: the new size to increase the volume to :returns: dict -- modifiedVolumeDict - the extended volume Object - :raises VolumeBackendAPIException: + :raises: VolumeBackendAPIException: """ original_vol_size = volume.size volume_name = volume.name @@ -708,6 +717,7 @@ class VMAXCommon(object): 'location_info': temp_location_info, 'thin_provisioning_support': True, 'thick_provisioning_support': False, + 'consistent_group_snapshot_enabled': True, 'max_over_subscription_ratio': max_oversubscription_ratio, 'reserved_percentage': reserved_percentage, @@ -731,7 +741,7 @@ class VMAXCommon(object): 'consistencygroup_support': False, 'thin_provisioning_support': True, 'thick_provisioning_support': False, - 'consistent_group_snapshot_enabled': False, + 'consistent_group_snapshot_enabled': True, 'max_over_subscription_ratio': max_oversubscription_ratio, 'reserved_percentage': reserved_percentage, @@ -854,9 +864,16 @@ class VMAXCommon(object): device_id = name['keybindings']['DeviceID'] element_name = self.utils.get_volume_element_name( volume_name) - founddevice_id = self.rest.find_volume_device_id( - array, element_name) - + admin_metadata = {} + if 'admin_metadata' in volume: + admin_metadata = volume.admin_metadata + if 'targetVolumeName' in admin_metadata: + target_vol_name = admin_metadata['targetVolumeName'] + founddevice_id = self.rest.find_volume_device_id( + array, target_vol_name) + else: + founddevice_id = self.rest.find_volume_device_id( + array, element_name) # Allow for an external app to delete the volume. if device_id and device_id != founddevice_id: founddevice_id = None @@ -968,7 +985,7 @@ class VMAXCommon(object): :param config_group_name: the config group name :returns: string -- configurationFile - name of the configuration file - :raises VolumeBackendAPIException: + :raises: VolumeBackendAPIException: """ if config_group_name is None: return CINDER_EMC_CONFIG_FILE @@ -1012,7 +1029,7 @@ class VMAXCommon(object): :param volume: the volume object :param volume_type_id: optional override of volume.volume_type_id :returns: dict -- extra spec dict - :raises VolumeBackendAPIException: + :raises: VolumeBackendAPIException: """ try: extra_specs, config_file, qos_specs = ( @@ -1134,7 +1151,7 @@ class VMAXCommon(object): :param is_snapshot: boolean -- Defaults to False :param from_snapvx: bool -- Defaults to False :returns: dict -- cloneDict the cloned volume dictionary - :raises VolumeBackendAPIException: + :raises: VolumeBackendAPIException: """ clone_name = volume.name snap_name = None @@ -1281,7 +1298,7 @@ class VMAXCommon(object): :param extra_specs: extra specifications :returns: int -- return code :returns: dict -- volume_dict - :raises VolumeBackendAPIException: + :raises: VolumeBackendAPIException: """ array = extra_specs[utils.ARRAY] is_valid_slo, is_valid_workload = self.provision.verify_slo_workload( @@ -1431,7 +1448,7 @@ class VMAXCommon(object): :param device_id: the device id :param volume_name: the volume name :param extra_specs: the extra specifications - :raises VolumeBackendAPIException: + :raises: VolumeBackendAPIException: """ try: LOG.debug("Delete Volume: %(name)s. device_id: %(device_id)s.", @@ -1706,7 +1723,7 @@ class VMAXCommon(object): :param device_id: the device id :param volume_id: the cinder volume id :param external_ref: the external reference - :raises ManageExistingInvalidReference, ManageExistingAlreadyManaged: + :raises: ManageExistingInvalidReference, ManageExistingAlreadyManaged: """ # Ensure the volume exists on the array volume_details = self.rest.get_volume(array, device_id) @@ -2630,3 +2647,535 @@ class VMAXCommon(object): if is_descendant: is_source_nf_sg = True return source_nf_sg, source_sg, source_parent_sg, is_source_nf_sg + + def create_group(self, context, group): + """Creates a generic volume group. + + :param context: the context + :param group: the group object to be created + :returns: dict -- modelUpdate = {'status': 'available'} + :raises: VolumeBackendAPIException, NotImplementedError + """ + if not volume_utils.is_group_a_cg_snapshot_type(group): + raise NotImplementedError() + + model_update = {'status': fields.GroupStatus.AVAILABLE} + + LOG.info("Create generic volume group: %(group)s.", + {'group': group.id}) + + vol_grp_name = self.utils.update_volume_group_name(group) + + try: + array, __ = self.utils.get_volume_group_utils( + group, self.interval, self.retries) + interval_retries_dict = self.utils.get_intervals_retries_dict( + self.interval, self.retries) + self.provision.create_volume_group( + array, vol_grp_name, interval_retries_dict) + except Exception: + exception_message = (_("Failed to create generic volume group:" + " %(volGrpName)s.") + % {'volGrpName': vol_grp_name}) + LOG.exception(exception_message) + raise exception.VolumeBackendAPIException(data=exception_message) + + return model_update + + def delete_group(self, context, group, volumes): + """Deletes a generic volume group. + + :param context: the context + :param group: the group object to be deleted + :param volumes: the list of volumes in the generic group to be deleted + :returns: dict -- modelUpdate + :returns: list -- list of volume model updates + :raises: NotImplementedError + """ + LOG.info("Delete generic volume group: %(group)s.", + {'group': group.id}) + if not volume_utils.is_group_a_cg_snapshot_type(group): + raise NotImplementedError() + model_update, volumes_model_update = self._delete_group( + group, volumes) + return model_update, volumes_model_update + + def _delete_group(self, group, volumes): + """Helper function to delete a volume group. + + :param group: the group object + :param volumes: the member volume objects + :returns: model_update, volumes_model_update + """ + volumes_model_update = [] + array, extraspecs_dict_list = self.utils.get_volume_group_utils( + group, self.interval, self.retries) + vol_grp_name = None + + volume_group = self._find_volume_group( + array, group) + + if volume_group is None: + LOG.error("Cannot find generic volume group %(volGrpName)s.", + {'volGrpName': group.id}) + model_update = {'status': fields.GroupStatus.DELETED} + + volumes_model_update = self.utils.update_volume_model_updates( + volumes_model_update, volumes, group.id, status='deleted') + return model_update, volumes_model_update + + if 'name' in volume_group: + vol_grp_name = volume_group['name'] + volume_device_ids = self._get_members_of_volume_group( + array, vol_grp_name) + intervals_retries_dict = self.utils.get_intervals_retries_dict( + self.interval, self.retries) + deleted_volume_device_ids = [] + try: + # If there are no volumes in sg then delete it + if not volume_device_ids: + self.rest.delete_storage_group(array, vol_grp_name) + model_update = {'status': fields.GroupStatus.DELETED} + volumes_model_update = self.utils.update_volume_model_updates( + volumes_model_update, volumes, group.id, status='deleted') + return model_update, volumes_model_update + # First remove all the volumes from the SG + self.masking.remove_volumes_from_storage_group( + array, volume_device_ids, vol_grp_name, intervals_retries_dict) + for vol in volumes: + for extraspecs_dict in extraspecs_dict_list: + if vol.volume_type_id in extraspecs_dict['volumeTypeId']: + extraspecs = extraspecs_dict.get(utils.EXTRA_SPECS) + device_id = self._find_device_on_array(vol, + extraspecs) + if device_id in volume_device_ids: + self._remove_vol_and_cleanup_replication( + array, device_id, + vol.name, extraspecs, vol) + self._delete_from_srp( + array, device_id, "group vol", extraspecs) + else: + LOG.debug("Volume not present in storage group.") + # Add the device id to the deleted list + deleted_volume_device_ids.append(device_id) + # Once all volumes are deleted then delete the SG + self.rest.delete_storage_group(array, vol_grp_name) + model_update = {'status': fields.GroupStatus.DELETED} + volumes_model_update = self.utils.update_volume_model_updates( + volumes_model_update, volumes, group.id, status='deleted') + except Exception as e: + LOG.error("Error deleting volume group." + "Error received: %(e)s", {'e': e}) + model_update = {'status': fields.GroupStatus.ERROR_DELETING} + # Update the volumes_model_update + volumes_not_deleted = [] + for vol in volume_device_ids: + if vol not in deleted_volume_device_ids: + volumes_not_deleted.append(vol) + if not deleted_volume_device_ids: + volumes_model_update = self.utils.update_volume_model_updates( + volumes_model_update, + deleted_volume_device_ids, + group.id, status='deleted') + if not volumes_not_deleted: + volumes_model_update = self.utils.update_volume_model_updates( + volumes_model_update, + volumes_not_deleted, + group.id, status='deleted') + # As a best effort try to add back the undeleted volumes to sg + # Dont throw any exception in case of failure + try: + if not volumes_not_deleted: + self.masking.add_volumes_to_storage_group( + array, volumes_not_deleted, + vol_grp_name, intervals_retries_dict) + except Exception as ex: + LOG.error("Error in rollback - %(ex)s. " + "Failed to add back volumes to sg %(sg_name)s", + {'ex': ex, 'sg_name': vol_grp_name}) + + return model_update, volumes_model_update + + def create_group_snapshot(self, context, group_snapshot, snapshots): + """Creates a generic volume group snapshot. + + :param context: the context + :param group_snapshot: the group snapshot to be created + :param snapshots: snapshots + :returns: dict -- modelUpdate + :returns: list -- list of snapshots + :raises: VolumeBackendAPIException, NotImplementedError + """ + grp_id = group_snapshot.group_id + source_group = group_snapshot.get('group') + if not volume_utils.is_group_a_cg_snapshot_type(source_group): + raise NotImplementedError() + snapshots_model_update = [] + LOG.info( + "Create snapshot for %(grpId)s " + "group Snapshot ID: %(group_snapshot)s.", + {'group_snapshot': group_snapshot.id, + 'grpId': grp_id}) + + try: + snap_name = self.utils.truncate_string(group_snapshot.id, 19) + self._create_group_replica(source_group, + snap_name) + + except Exception as e: + exception_message = (_("Failed to create snapshot for group: " + "%(volGrpName)s. Exception received: %(e)s") + % {'volGrpName': grp_id, + 'e': six.text_type(e)}) + LOG.exception(exception_message) + raise exception.VolumeBackendAPIException(data=exception_message) + + for snapshot in snapshots: + snapshots_model_update.append( + {'id': snapshot.id, + 'status': fields.SnapshotStatus.AVAILABLE}) + model_update = {'status': fields.GroupStatus.AVAILABLE} + + return model_update, snapshots_model_update + + def _create_group_replica( + self, source_group, snap_name): + """Create a group replica. + + This can be a group snapshot or a cloned volume group. + :param source_group: the group object + :param snap_name: the name of the snapshot + """ + array, __ = ( + self.utils.get_volume_group_utils( + source_group, self.interval, self.retries)) + vol_grp_name = None + volume_group = ( + self._find_volume_group(array, source_group)) + if volume_group: + if 'name' in volume_group: + vol_grp_name = volume_group['name'] + if vol_grp_name is None: + exception_message = ( + _("Cannot find generic volume group %(group_id)s.") % + {'group_id': source_group.id}) + raise exception.VolumeBackendAPIException( + data=exception_message) + interval_retries_dict = self.utils.get_intervals_retries_dict( + self.interval, self.retries) + self.provision.create_group_replica( + array, vol_grp_name, + snap_name, interval_retries_dict) + + def delete_group_snapshot(self, context, group_snapshot, snapshots): + """Delete a volume group snapshot. + + :param context: the context + :param group_snapshot: the volume group snapshot to be deleted + :param snapshots: the snapshot objects + :returns: model_update, snapshots_model_update + """ + model_update, snapshots_model_update = self._delete_group_snapshot( + group_snapshot, snapshots) + return model_update, snapshots_model_update + + def _delete_group_snapshot(self, group_snapshot, snapshots): + """Helper function to delete a group snapshot. + + :param group_snapshot: the group snapshot object + :param snapshots: the snapshot objects + :returns: model_update, snapshots_model_update + :raises: VolumeBackendApiException, NotImplementedError + """ + snapshots_model_update = [] + model_update = {} + source_group = group_snapshot.get('group') + grp_id = group_snapshot.group_id + if not volume_utils.is_group_a_cg_snapshot_type(source_group): + raise NotImplementedError() + + LOG.info("Delete snapshot grpSnapshotId: %(grpSnapshotId)s" + " for source group %(grpId)s", + {'grpSnapshotId': group_snapshot.id, + 'grpId': grp_id}) + + snap_name = self.utils.truncate_string(group_snapshot.id, 19) + vol_grp_name = None + try: + # Get the array serial + array, __ = ( + self.utils.get_volume_group_utils( + source_group, self.interval, self.retries)) + # Get the volume group dict for getting the group name + volume_group = ( + self._find_volume_group(array, source_group)) + if volume_group: + if 'name' in volume_group: + vol_grp_name = volume_group['name'] + if vol_grp_name is None: + exception_message = ( + _("Cannot find generic volume group %(grp_id)s.") % + {'group_id': source_group.id}) + raise exception.VolumeBackendAPIException( + data=exception_message) + # Check if the snapshot exists + if 'snapVXSnapshots' in volume_group: + if snap_name in volume_group['snapVXSnapshots']: + self.provision.delete_group_replica(array, + snap_name, + vol_grp_name) + else: + # Snapshot has been already deleted, return successfully + LOG.error("Cannot find group snapshot %(snapId)s.", + {'snapId': group_snapshot.id}) + model_update = {'status': fields.GroupSnapshotStatus.DELETED} + for snapshot in snapshots: + snapshots_model_update.append( + {'id': snapshot.id, + 'status': fields.SnapshotStatus.DELETED}) + except Exception as e: + LOG.error("Error deleting volume group snapshot." + "Error received: %(e)s", {'e': e}) + model_update = { + 'status': fields.GroupSnapshotStatus.ERROR_DELETING} + + return model_update, snapshots_model_update + + def _find_volume_group_name_from_id(self, array, group_id): + """Finds the volume group name given its id + + :param array: the array serial number + :param group_id: the group id + :returns: group_name: Name of the group + """ + group_name = None + sg_list = self.rest.get_storage_group_list(array) + for sg in sg_list: + if group_id in sg: + group_name = sg + return group_name + return group_name + + def _find_volume_group(self, array, group): + """Finds a volume group given the group. + + :param array: the array serial number + :param group: the group object + :returns: volume group dictionary + """ + group_name = self.utils.update_volume_group_name(group) + volume_group = self.rest.get_storage_group_rep(array, group_name) + if not volume_group: + LOG.warning("Volume group %(group_id)s cannot be found", + {'group_id': group_name}) + return None + return volume_group + + def _get_members_of_volume_group(self, array, group_name): + """Get the members of a volume group. + + :param array: the array serial number + :param group_name: the storage group name + :returns: list -- member_device_ids + """ + member_device_ids = self.rest.get_volumes_in_storage_group( + array, group_name) + if not member_device_ids: + LOG.info("No member volumes found in %(group_id)s", + {'group_id': group_name}) + return member_device_ids + + def update_group(self, group, add_volumes, remove_volumes): + """Updates LUNs in generic volume group. + + :param group: storage configuration service instance + :param add_volumes: the volumes uuids you want to add to the vol grp + :param remove_volumes: the volumes uuids you want to remove from + the CG + :returns: model_update + :raises: VolumeBackendAPIException, NotImplementedError + """ + LOG.info("Update generic volume Group: %(group)s. " + "This adds and/or removes volumes from " + "a generic volume group.", + {'group': group.id}) + if not volume_utils.is_group_a_cg_snapshot_type(group): + raise NotImplementedError() + + array, __ = self.utils.get_volume_group_utils( + group, self.interval, self.retries) + model_update = {'status': fields.GroupStatus.AVAILABLE} + add_vols = [vol for vol in add_volumes] if add_volumes else [] + add_device_ids = self._get_volume_device_ids(add_vols, array) + remove_vols = [vol for vol in remove_volumes] if remove_volumes else [] + remove_device_ids = self._get_volume_device_ids(remove_vols, array) + vol_grp_name = None + try: + volume_group = self._find_volume_group( + array, group) + if volume_group: + if 'name' in volume_group: + vol_grp_name = volume_group['name'] + if vol_grp_name is None: + raise exception.GroupNotFound( + group_id=group.id) + interval_retries_dict = self.utils.get_intervals_retries_dict( + self.interval, self.retries) + # Add volume(s) to the group + if add_device_ids: + self.masking.add_volumes_to_storage_group( + array, add_device_ids, vol_grp_name, interval_retries_dict) + # Remove volume(s) from the group + if remove_device_ids: + self.masking.remove_volumes_from_storage_group( + array, remove_device_ids, + vol_grp_name, interval_retries_dict) + except exception.GroupNotFound: + raise + except Exception as ex: + exception_message = (_("Failed to update volume group:" + " %(volGrpName)s. Exception: %(ex)s.") + % {'volGrpName': group.id, + 'ex': ex}) + LOG.exception(exception_message) + raise exception.VolumeBackendAPIException(data=exception_message) + + return model_update, None, None + + def _get_volume_device_ids(self, volumes, array): + """Get volume device ids from volume. + + :param volumes: volume objects + :returns: device_ids + """ + device_ids = [] + for volume in volumes: + specs = {utils.ARRAY: array} + device_id = self._find_device_on_array(volume, specs) + if device_id is None: + LOG.error("Volume %(name)s not found on the array.", + {'name': volume['name']}) + else: + device_ids.append(device_id) + return device_ids + + def create_group_from_src(self, context, group, volumes, + group_snapshot, snapshots, source_group, + source_vols): + """Creates the volume group from source. + + :param context: the context + :param group: the volume group object to be created + :param volumes: volumes in the consistency group + :param group_snapshot: the source volume group snapshot + :param snapshots: snapshots of the source volumes + :param source_group: the source volume group + :param source_vols: the source vols + :returns: model_update, volumes_model_update + model_update is a dictionary of cg status + volumes_model_update is a list of dictionaries of volume + update + :raises: VolumeBackendAPIException, NotImplementedError + """ + if not volume_utils.is_group_a_cg_snapshot_type(group): + raise NotImplementedError() + # Check if we need to create a snapshot + create_snapshot = False + volumes_model_update = [] + if group_snapshot: + source_vols_or_snapshots = snapshots + source_id = group_snapshot.id + actual_source_grp = group_snapshot + elif source_group: + source_vols_or_snapshots = source_vols + source_id = source_group.id + actual_source_grp = source_group + create_snapshot = True + else: + exception_message = (_("Must supply either group snapshot or " + "a source group.")) + raise exception.VolumeBackendAPIException( + data=exception_message) + + LOG.debug("Enter VMAX create_volume group_from_src. Group to be " + "created: %(grpId)s, Source : %(SourceGrpId)s.", + {'grpId': group.id, + 'SourceGrpId': source_id}) + + tgt_name = self.utils.update_volume_group_name(group) + self.create_group(context, group) + model_update = {'status': fields.GroupStatus.AVAILABLE} + snap_name = None + try: + array, extraspecs_dict_list = ( + self.utils.get_volume_group_utils( + group, self.interval, self.retries)) + vol_grp_name = "" + # Create the target devices + dict_volume_dicts = {} + target_volume_names = {} + for volume, source_vol_or_snapshot in zip( + volumes, source_vols_or_snapshots): + if 'size' in source_vol_or_snapshot: + volume_size = source_vol_or_snapshot['size'] + else: + volume_size = source_vol_or_snapshot['volume_size'] + for extraspecs_dict in extraspecs_dict_list: + if volume.volume_type_id in ( + extraspecs_dict['volumeTypeId']): + extraspecs = extraspecs_dict.get(utils.EXTRA_SPECS) + # Create a random UUID and use it as volume name + target_volume_name = six.text_type(uuid.uuid4()) + volume_dict = self.provision.create_volume_from_sg( + array, target_volume_name, + tgt_name, volume_size, extraspecs) + dict_volume_dicts[volume.id] = volume_dict + target_volume_names[volume.id] = target_volume_name + + if create_snapshot is True: + # We have to create a snapshot of the source group + snap_name = self.utils.truncate_string(group.id, 19) + self._create_group_replica(actual_source_grp, snap_name) + vol_grp_name = self.utils.update_volume_group_name( + source_group) + else: + # We need to check if the snapshot exists + snap_name = self.utils.truncate_string(source_id, 19) + source_group = actual_source_grp.get('group') + volume_group = self._find_volume_group(array, source_group) + if volume_group is not None: + if 'snapVXSnapshots' in volume_group: + if snap_name in volume_group['snapVXSnapshots']: + LOG.info("Snapshot is present on the array") + if 'name' in volume_group: + vol_grp_name = volume_group['name'] + # Link and break the snapshot to the source group + interval_retries_dict = self.utils.get_intervals_retries_dict( + self.interval, self.retries) + self.provision.link_and_break_replica( + array, vol_grp_name, tgt_name, snap_name, + interval_retries_dict, delete_snapshot=create_snapshot) + + except Exception: + exception_message = (_("Failed to create vol grp %(volGrpName)s" + " from source %(grpSnapshot)s.") + % {'volGrpName': group.id, + 'grpSnapshot': source_id}) + LOG.exception(exception_message) + raise exception.VolumeBackendAPIException(data=exception_message) + volumes_model_update = self.utils.update_volume_model_updates( + volumes_model_update, volumes, group.id, model_update['status']) + + # Update the provider_location + for volume_model_update in volumes_model_update: + if volume_model_update['id'] in dict_volume_dicts: + volume_model_update.update( + {'provider_location': six.text_type( + dict_volume_dicts[volume_model_update['id']])}) + + # Update the volumes_model_update with admin_metadata + self.utils.update_admin_metadata(volumes_model_update, + key='targetVolumeName', + values=target_volume_names) + + return model_update, volumes_model_update diff --git a/cinder/volume/drivers/dell_emc/vmax/fc.py b/cinder/volume/drivers/dell_emc/vmax/fc.py index 72fe7fa27..fd173c635 100644 --- a/cinder/volume/drivers/dell_emc/vmax/fc.py +++ b/cinder/volume/drivers/dell_emc/vmax/fc.py @@ -81,6 +81,7 @@ class VMAXFCDriver(driver.FibreChannelDriver): - Support for compression on All Flash - Support for volume replication - Support for live migration + - Support for Generic Volume Group """ VERSION = "3.0.0" @@ -456,3 +457,70 @@ class VMAXFCDriver(driver.FibreChannelDriver): :returns: secondary_id, volume_update_list, group_update_list """ return self.common.failover_host(volumes, secondary_id, groups) + + def create_group(self, context, group): + """Creates a generic volume group. + + :param context: the context + :param group: the group object + """ + self.common.create_group(context, group) + + def delete_group(self, context, group, volumes): + """Deletes a generic volume group. + + :param context: the context + :param group: the group object + :param volumes: the member volumes + """ + return self.common.delete_group( + context, group, volumes) + + def create_group_snapshot(self, context, group_snapshot, snapshots): + """Creates a group snapshot. + + :param context: the context + :param group_snapshot: the grouop snapshot + :param snapshots: snapshots list + """ + return self.common.create_group_snapshot(context, + group_snapshot, snapshots) + + def delete_group_snapshot(self, context, group_snapshot, snapshots): + """Deletes a group snapshot. + + :param context: the context + :param group_snapshot: the grouop snapshot + :param snapshots: snapshots list + """ + return self.common.delete_group_snapshot(context, + group_snapshot, snapshots) + + def update_group(self, context, group, + add_volumes=None, remove_volumes=None): + """Updates LUNs in generic volume group. + + :param context: the context + :param group: the group object + :param add_volumes: flag for adding volumes + :param remove_volumes: flag for removing volumes + """ + return self.common.update_group(group, add_volumes, + remove_volumes) + + def create_group_from_src( + self, context, group, volumes, group_snapshot=None, + snapshots=None, source_group=None, source_vols=None): + """Creates the volume group from source. + + :param context: the context + :param group: the group object to be created + :param volumes: volumes in the group + :param group_snapshot: the source volume group snapshot + :param snapshots: snapshots of the source volumes + :param source_group: the dictionary of a volume group as source. + :param source_vols: a list of volume dictionaries in the source_group. + """ + return self.common.create_group_from_src( + context, group, volumes, group_snapshot, snapshots, source_group, + source_vols) diff --git a/cinder/volume/drivers/dell_emc/vmax/iscsi.py b/cinder/volume/drivers/dell_emc/vmax/iscsi.py index 260da112e..14f83d82f 100644 --- a/cinder/volume/drivers/dell_emc/vmax/iscsi.py +++ b/cinder/volume/drivers/dell_emc/vmax/iscsi.py @@ -86,6 +86,7 @@ class VMAXISCSIDriver(driver.ISCSIDriver): - Support for compression on All Flash - Support for volume replication - Support for live migration + - Support for Generic Volume Group """ VERSION = "3.0.0" @@ -400,3 +401,70 @@ class VMAXISCSIDriver(driver.ISCSIDriver): :returns: secondary_id, volume_update_list, group_update_list """ return self.common.failover_host(volumes, secondary_id, groups) + + def create_group(self, context, group): + """Creates a generic volume group. + + :param context: the context + :param group: the group object + """ + self.common.create_group(context, group) + + def delete_group(self, context, group, volumes): + """Deletes a generic volume group. + + :param context: the context + :param group: the group object + :param volumes: the member volumes + """ + return self.common.delete_group( + context, group, volumes) + + def create_group_snapshot(self, context, group_snapshot, snapshots): + """Creates a group snapshot. + + :param context: the context + :param group_snapshot: the group snapshot + :param snapshots: snapshots list + """ + return self.common.create_group_snapshot(context, + group_snapshot, snapshots) + + def delete_group_snapshot(self, context, group_snapshot, snapshots): + """Deletes a group snapshot. + + :param context: the context + :param group_snapshot: the grouop snapshot + :param snapshots: snapshots list + """ + return self.common.delete_group_snapshot(context, + group_snapshot, snapshots) + + def update_group(self, context, group, + add_volumes=None, remove_volumes=None): + """Updates LUNs in group. + + :param context: the context + :param group: the group object + :param add_volumes: flag for adding volumes + :param remove_volumes: flag for removing volumes + """ + return self.common.update_group(group, add_volumes, + remove_volumes) + + def create_group_from_src( + self, context, group, volumes, group_snapshot=None, + snapshots=None, source_group=None, source_vols=None): + """Creates the volume group from source. + + :param context: the context + :param group: the consistency group object to be created + :param volumes: volumes in the group + :param group_snapshot: the source volume group snapshot + :param snapshots: snapshots of the source volumes + :param source_group: the dictionary of a volume group as source. + :param source_vols: a list of volume dictionaries in the source_group. + """ + return self.common.create_group_from_src( + context, group, volumes, group_snapshot, snapshots, source_group, + source_vols) diff --git a/cinder/volume/drivers/dell_emc/vmax/masking.py b/cinder/volume/drivers/dell_emc/vmax/masking.py index 1ad2e6ba0..fbf9aad2e 100644 --- a/cinder/volume/drivers/dell_emc/vmax/masking.py +++ b/cinder/volume/drivers/dell_emc/vmax/masking.py @@ -612,6 +612,46 @@ class VMAXMasking(object): LOG.info("Added volume: %(vol_name)s to storage group %(sg_name)s.", {'vol_name': volume_name, 'sg_name': storagegroup_name}) + def add_volumes_to_storage_group( + self, serial_number, list_device_id, storagegroup_name, + extra_specs): + """Add a volume to a storage group. + + :param serial_number: array serial number + :param list_device_id: list of volume device id + :param storagegroup_name: storage group name + :param extra_specs: extra specifications + """ + if not list_device_id: + LOG.info("add_volumes_to_storage_group: No volumes to add") + return + start_time = time.time() + temp_device_id_list = list_device_id + + @coordination.synchronized("emc-sg-{sg_name}") + def do_add_volume_to_sg(sg_name): + # Check if another process has added any volume to the + # sg while this process was waiting for the lock + volume_list = self.rest.get_volumes_in_storage_group( + serial_number, storagegroup_name) + for volume in volume_list: + if volume in temp_device_id_list: + LOG.info("Volume: %(volume_name)s is already part " + "of storage group %(sg_name)s.", + {'volume_name': volume, + 'sg_name': storagegroup_name}) + # Remove this device id from the list + temp_device_id_list.remove(volume) + self.rest.add_vol_to_sg(serial_number, storagegroup_name, + temp_device_id_list, extra_specs) + do_add_volume_to_sg(storagegroup_name) + + LOG.debug("Add volumes to storagegroup took: %(delta)s H:MM:SS.", + {'delta': self.utils.get_time_delta(start_time, + time.time())}) + LOG.info("Added volumes to storage group %(sg_name)s.", + {'sg_name': storagegroup_name}) + def remove_vol_from_storage_group( self, serial_number, device_id, storagegroup_name, volume_name, extra_specs): @@ -643,6 +683,43 @@ class VMAXMasking(object): raise exception.VolumeBackendAPIException( data=exception_message) + def remove_volumes_from_storage_group( + self, serial_number, list_of_device_ids, + storagegroup_name, extra_specs): + """Remove multiple volumes from a storage group. + + :param serial_number: the array serial number + :param list_of_device_ids: list of device ids + :param storagegroup_name: the name of the storage group + :param extra_specs: the extra specifications + :raises: VolumeBackendAPIException + """ + start_time = time.time() + + @coordination.synchronized("emc-sg-{sg_name}") + def do_remove_volumes_from_storage_group(sg_name): + self.rest.remove_vol_from_sg( + serial_number, storagegroup_name, + list_of_device_ids, extra_specs) + + LOG.debug("Remove volumes from storagegroup " + "took: %(delta)s H:MM:SS.", + {'delta': self.utils.get_time_delta(start_time, + time.time())}) + volume_list = self.rest.get_volumes_in_storage_group( + serial_number, storagegroup_name) + + for device_id in list_of_device_ids: + if device_id in volume_list: + exception_message = (_( + "Failed to remove device " + "with id %(dev_id)s from SG: %(sg_name)s.") + % {'dev_id': device_id, 'sg_name': storagegroup_name}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException( + data=exception_message) + return do_remove_volumes_from_storage_group(storagegroup_name) + def find_initiator_names(self, connector): """Check the connector object for initiators(ISCSI) or wwpns(FC). diff --git a/cinder/volume/drivers/dell_emc/vmax/provision.py b/cinder/volume/drivers/dell_emc/vmax/provision.py index a84833905..ae4737ca1 100644 --- a/cinder/volume/drivers/dell_emc/vmax/provision.py +++ b/cinder/volume/drivers/dell_emc/vmax/provision.py @@ -16,6 +16,7 @@ import time from oslo_log import log as logging +from oslo_service import loopingcall from cinder import coordination from cinder import exception @@ -25,6 +26,8 @@ from cinder.volume.drivers.dell_emc.vmax import utils LOG = logging.getLogger(__name__) WRITE_DISABLED = "Write Disabled" +UNLINK_INTERVAL = 15 +UNLINK_RETRIES = 30 class VMAXProvision(object): @@ -448,3 +451,125 @@ class VMAXProvision(object): {'action': action, 'src': device_id}) self.rest.modify_rdf_device_pair( array, device_id, rdf_group, extra_specs, split=False) + + def create_volume_group(self, array, group_name, extra_specs): + """Create a generic volume group. + + :param array: the array serial number + :param group_name: the name of the group + :param extra_specs: the extra specifications + :returns: volume_group + """ + return self.create_storage_group(array, group_name, + None, None, None, extra_specs) + + def create_group_replica( + self, array, source_group, snap_name, extra_specs): + """Create a replica (snapVx) of a volume group. + + :param array: the array serial number + :param source_group: the source group name + :param snap_name: the name for the snap shot + :param extra_specs: extra specifications + """ + LOG.debug("Creating Snap Vx snapshot of storage group: %(srcGroup)s.", + {'srcGroup': source_group}) + + # Create snapshot + self.rest.create_storagegroup_snap( + array, source_group, snap_name, extra_specs) + + def delete_group_replica(self, array, snap_name, + source_group_name): + """Delete the snapshot. + + :param array: the array serial number + :param snap_name: the name for the snap shot + :param source_group_name: the source group name + """ + # Delete snapvx snapshot + LOG.debug("Deleting Snap Vx snapshot: source group: %(srcGroup)s " + "snapshot: %(snap_name)s.", + {'srcGroup': source_group_name, + 'snap_name': snap_name}) + # The check for existence of snapshot has already happened + # So we just need to delete the snapshot + self.rest.delete_storagegroup_snap(array, snap_name, source_group_name) + + def link_and_break_replica(self, array, source_group_name, + target_group_name, snap_name, extra_specs, + delete_snapshot=False): + """Links a group snap and breaks the relationship. + + :param array: the array serial + :param source_group_name: the source group name + :param target_group_name: the target group name + :param snap_name: the snapshot name + :param extra_specs: extra specifications + :param delete_snapshot: delete snapshot flag + """ + LOG.debug("Linking Snap Vx snapshot: source group: %(srcGroup)s " + "targetGroup: %(tgtGroup)s.", + {'srcGroup': source_group_name, + 'tgtGroup': target_group_name}) + # Link the snapshot + self.rest.modify_storagegroup_snap( + array, source_group_name, target_group_name, snap_name, + extra_specs, link=True) + # Unlink the snapshot + LOG.debug("Unlinking Snap Vx snapshot: source group: %(srcGroup)s " + "targetGroup: %(tgtGroup)s.", + {'srcGroup': source_group_name, + 'tgtGroup': target_group_name}) + self._unlink_group(array, source_group_name, + target_group_name, snap_name, extra_specs) + # Delete the snapshot if necessary + if delete_snapshot: + LOG.debug("Deleting Snap Vx snapshot: source group: %(srcGroup)s " + "snapshot: %(snap_name)s.", + {'srcGroup': source_group_name, + 'snap_name': snap_name}) + self.rest.delete_storagegroup_snap(array, snap_name, + source_group_name) + + def _unlink_group( + self, array, source_group_name, target_group_name, snap_name, + extra_specs): + """Unlink a target group from it's source group. + + :param array: the array serial number + :param source_group_name: the source group name + :param target_group_name: the target device name + :param snap_name: the snap name + :param extra_specs: extra specifications + :returns: return code + """ + + def _unlink_grp(): + """Called at an interval until the synchronization is finished. + + :raises: loopingcall.LoopingCallDone + """ + retries = kwargs['retries'] + try: + kwargs['retries'] = retries + 1 + if not kwargs['modify_grp_snap_success']: + self.rest.modify_storagegroup_snap( + array, source_group_name, target_group_name, + snap_name, extra_specs, unlink=True) + kwargs['modify_grp_snap_success'] = True + except exception.VolumeBackendAPIException: + pass + + if kwargs['retries'] > UNLINK_RETRIES: + LOG.error("_unlink_grp failed after %(retries)d " + "tries.", {'retries': retries}) + raise loopingcall.LoopingCallDone(retvalue=30) + if kwargs['modify_grp_snap_success']: + raise loopingcall.LoopingCallDone() + + kwargs = {'retries': 0, + 'modify_grp_snap_success': False} + timer = loopingcall.FixedIntervalLoopingCall(_unlink_grp) + rc = timer.start(interval=UNLINK_INTERVAL).wait() + return rc diff --git a/cinder/volume/drivers/dell_emc/vmax/rest.py b/cinder/volume/drivers/dell_emc/vmax/rest.py index 46f888ee2..1187c44db 100644 --- a/cinder/volume/drivers/dell_emc/vmax/rest.py +++ b/cinder/volume/drivers/dell_emc/vmax/rest.py @@ -1626,7 +1626,7 @@ class VMAXRest(object): kwargs = {'retries': 0, 'wait_for_sync_called': False} timer = loopingcall.FixedIntervalLoopingCall(_wait_for_sync) - rc = timer.start(interval=extra_specs[utils.INTERVAL]).wait() + rc = timer.start(interval=int(extra_specs[utils.INTERVAL])).wait() return rc def _is_sync_complete(self, array, source_device_id, snap_name, @@ -1890,3 +1890,91 @@ class VMAXRest(object): % {'rdf_num': rdf_group, 'device_id': device_id}) self.delete_resource(array, REPLICATION, 'rdf_group', resource_name, private="/private", params=params) + + def get_storage_group_rep(self, array, storage_group_name): + """Given a name, return storage group details wrt replication. + + :param array: the array serial number + :param storage_group_name: the name of the storage group + :returns: storage group dict or None + """ + return self.get_resource( + array, REPLICATION, 'storagegroup', + resource_name=storage_group_name) + + def get_volumes_in_storage_group(self, array, storagegroup_name): + """Given a volume identifier, find the corresponding device_id. + + :param array: the array serial number + :param storagegroup_name: the storage group name + :returns: volume_list + """ + volume_list = None + params = {"storageGroupId": storagegroup_name} + + volume_list = self.get_volume_list(array, params) + if not volume_list: + LOG.debug("Cannot find record for storage group %(storageGrpId)s", + {'storageGrpId': storagegroup_name}) + return volume_list + + def create_storagegroup_snap(self, array, source_group, + snap_name, extra_specs): + """Create a snapVx snapshot of a storage group. + + :param array: the array serial number + :param source_group: the source group name + :param snap_name: the name of the snapshot + :param extra_specs: the extra specifications + """ + payload = {"snapshotName": snap_name} + resource_type = ('storagegroup/%(sg_name)s/snapshot' + % {'sg_name': source_group}) + status_code, job = self.create_resource( + array, REPLICATION, resource_type, payload) + self.wait_for_job('Create storage group snapVx', status_code, + job, extra_specs) + + def modify_storagegroup_snap( + self, array, source_sg_id, target_sg_id, snap_name, + extra_specs, link=False, unlink=False): + """Link or unlink a snapVx to or from a target storagegroup. + + :param array: the array serial number + :param source_sg_id: the source device id + :param target_sg_id: the target device id + :param snap_name: the snapshot name + :param extra_specs: extra specifications + :param link: Flag to indicate action = Link + :param unlink: Flag to indicate action = Unlink + """ + payload = '' + if link: + payload = {"link": {"linkStorageGroupName": target_sg_id, + "copy": "true"}, + "action": "Link"} + elif unlink: + payload = {"unlink": {"unlinkStorageGroupName": target_sg_id}, + "action": "Unlink"} + + resource_name = ('%(sg_name)s/snapshot/%(snap_id)s/generation/0' + % {'sg_name': source_sg_id, 'snap_id': snap_name}) + + status_code, job = self.modify_resource( + array, REPLICATION, 'storagegroup', payload, + resource_name=resource_name) + + self.wait_for_job('Modify storagegroup snapVx relationship to target', + status_code, job, extra_specs) + + def delete_storagegroup_snap(self, array, snap_name, source_sg_id): + """Delete the snapshot of a storagegroup. + + :param array: the array serial number + :param snap_name: the name of the snapshot + :param source_sg_id: the source device id + """ + resource_name = ('%(sg_name)s/snapshot/%(snap_id)s/generation/0' + % {'sg_name': source_sg_id, 'snap_id': snap_name}) + return self.delete_resource( + array, REPLICATION, 'storagegroup', resource_name) diff --git a/cinder/volume/drivers/dell_emc/vmax/utils.py b/cinder/volume/drivers/dell_emc/vmax/utils.py index b95edc601..3bd603ace 100644 --- a/cinder/volume/drivers/dell_emc/vmax/utils.py +++ b/cinder/volume/drivers/dell_emc/vmax/utils.py @@ -19,6 +19,7 @@ import random import re from xml.dom import minidom +from cinder.objects.group import Group from oslo_log import log as logging from oslo_utils import strutils import six @@ -501,3 +502,162 @@ class VMAXUtils(object): fields.ReplicationStatus.FAILED_OVER): return True return False + + @staticmethod + def update_volume_model_updates(volume_model_updates, + volumes, group_id, status='available'): + """Update the volume model's status and return it. + + :param volume_model_updates: list of volume model update dicts + :param volumes: volumes object api + :param group_id: consistency group id + :param status: string value reflects the status of the member volume + :returns: volume_model_updates - updated volumes + """ + LOG.info( + "Updating status for group: %(id)s.", + {'id': group_id}) + if volumes: + for volume in volumes: + volume_model_updates.append({'id': volume.id, + 'status': status}) + else: + LOG.info("No volume found for group: %(cg)s.", + {'cg': group_id}) + return volume_model_updates + + @staticmethod + def update_extra_specs(extraspecs): + """Update extra specs. + + :param extraspecs: the additional info + :returns: extraspecs + """ + try: + pool_details = extraspecs['pool_name'].split('+') + extraspecs[SLO] = pool_details[0] + extraspecs[WORKLOAD] = pool_details[1] + extraspecs[SRP] = pool_details[2] + extraspecs[ARRAY] = pool_details[3] + except KeyError: + LOG.error("Error parsing SLO, workload from" + " the provided extra_specs.") + return extraspecs + + @staticmethod + def get_intervals_retries_dict(interval, retries): + """Get the default intervals and retries. + + :param interval: Interval in seconds between retries + :param retries: Retry count + :returns: default_dict + """ + default_dict = {} + default_dict[INTERVAL] = interval + default_dict[RETRIES] = retries + return default_dict + + @staticmethod + def update_admin_metadata(volumes_model_update, key, values): + """Update the volume_model_updates with admin metadata. + + :param volumes_model_update: List of volume model updates + :param key: Key to be updated in the admin_metadata + :param values: Dictionary of values per volume id + """ + for volume_model_update in volumes_model_update: + volume_id = volume_model_update['id'] + if volume_id in values: + admin_metadata = {} + admin_metadata.update({key: values[volume_id]}) + volume_model_update.update( + {'admin_metadata': admin_metadata}) + + def get_volume_group_utils(self, group, interval, retries): + """Standard utility for generic volume groups. + + :param group: the generic volume group object to be created + :param interval: Interval in seconds between retries + :param retries: Retry count + :returns: array, extra specs dict list + :raises: VolumeBackendAPIException + """ + arrays = set() + extraspecs_dict_list = [] + # Check if it is a generic volume group instance + if isinstance(group, Group): + for volume_type in group.volume_types: + extraspecs_dict = ( + self._update_extra_specs_list( + volume_type.extra_specs, + volume_type.id, interval, retries)) + extraspecs_dict_list.append(extraspecs_dict) + arrays.add(extraspecs_dict[EXTRA_SPECS][ARRAY]) + else: + msg = (_("Unable to get volume type ids.")) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if len(arrays) != 1: + if not arrays: + msg = (_("Failed to get an array associated with " + "volume group: %(groupid)s.") + % {'groupid': group.id}) + else: + msg = (_("There are multiple arrays " + "associated with volume group: %(groupid)s.") + % {'groupid': group.id}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + array = arrays.pop() + return array, extraspecs_dict_list + + def _update_extra_specs_list(self, extraspecs, volumetype_id, + interval, retries): + """Update the extra specs list. + + :param extraspecs: extraspecs + :param volumetype_Id: volume type identifier + :param interval: Interval in seconds between retries + :param retries: Retry count + :returns: extraspecs_dict_list + """ + extraspecs_dict = {} + extraspecs = self.update_extra_specs(extraspecs) + extraspecs = self._update_intervals_and_retries( + extraspecs, interval, retries) + extraspecs_dict["volumeTypeId"] = volumetype_id + extraspecs_dict[EXTRA_SPECS] = extraspecs + return extraspecs_dict + + def update_volume_group_name(self, group): + """Format id and name consistency group. + + :param group: the generic volume group object + :returns: group_name -- formatted name + id + """ + group_name = "" + if group.name is not None: + group_name = ( + self.truncate_string( + group.name, TRUNCATE_27) + "_") + + group_name += group.id + return group_name + + @staticmethod + def _update_intervals_and_retries(extra_specs, interval, retries): + """Updates the extraSpecs with intervals and retries values. + + :param extra_specs: + :param interval: Interval in seconds between retries + :param retries: Retry count + :returns: Updated extra_specs + """ + extra_specs[INTERVAL] = interval + LOG.debug("The interval is set at: %(intervalInSecs)s.", + {'intervalInSecs': interval}) + extra_specs[RETRIES] = retries + LOG.debug("Retries are set at: %(retries)s.", + {'retries': retries}) + return extra_specs diff --git a/releasenotes/notes/vmax-generic-volume-group-28b3b2674c492bbc.yaml b/releasenotes/notes/vmax-generic-volume-group-28b3b2674c492bbc.yaml new file mode 100644 index 000000000..65bc8532b --- /dev/null +++ b/releasenotes/notes/vmax-generic-volume-group-28b3b2674c492bbc.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Add consistent group snapshot support to generic volume groups in + VMAX driver version 3.0.