diff --git a/cinder/tests/unit/volume/drivers/dell_emc/vmax/test_vmax.py b/cinder/tests/unit/volume/drivers/dell_emc/vmax/test_vmax.py index 50e3f076b75..760fd708a89 100644 --- a/cinder/tests/unit/volume/drivers/dell_emc/vmax/test_vmax.py +++ b/cinder/tests/unit/volume/drivers/dell_emc/vmax/test_vmax.py @@ -1484,6 +1484,11 @@ class VMAXUtilsTest(test.TestCase): self.assertEqual(expected_snap_name, updated_name) + def test_change_replication(self): + new_type = {'extra_specs': self.data.extra_specs_rep_enabled} + self.assertFalse(self.utils.change_replication(True, new_type)) + self.assertTrue(self.utils.change_replication(False, new_type)) + class VMAXRestTest(test.TestCase): def setUp(self): @@ -4564,7 +4569,7 @@ class VMAXCommonTest(test.TestCase): migrate_status = self.common._migrate_volume( self.data.array, volume, device_id, self.data.srp, self.data.slo, self.data.workload, volume_name, - new_type, extra_specs) + new_type, extra_specs)[0] self.assertTrue(migrate_status) target_extra_specs = { 'array': self.data.array, 'interval': 3, @@ -4580,7 +4585,7 @@ class VMAXCommonTest(test.TestCase): migrate_status = self.common._migrate_volume( self.data.array, volume, device_id, self.data.srp, self.data.slo, self.data.workload, volume_name, - new_type, extra_specs) + new_type, extra_specs)[0] self.assertTrue(migrate_status) mock_remove.assert_not_called() @@ -4610,7 +4615,7 @@ class VMAXCommonTest(test.TestCase): migrate_status = self.common._migrate_volume( self.data.array, self.data.test_volume, device_id, self.data.srp, self.data.slo, - self.data.workload, volume_name, new_type, extra_specs) + self.data.workload, volume_name, new_type, extra_specs)[0] self.assertFalse(migrate_status) def test_is_valid_for_storage_assisted_migration_true(self): @@ -4620,20 +4625,20 @@ class VMAXCommonTest(test.TestCase): ref_return = (True, 'Silver', 'OLTP') return_val = self.common._is_valid_for_storage_assisted_migration( device_id, host, self.data.array, - self.data.srp, volume_name, False) + self.data.srp, volume_name, False, False) self.assertEqual(ref_return, return_val) # No current sgs found with mock.patch.object(self.rest, 'get_storage_groups_from_volume', return_value=None): return_val = self.common._is_valid_for_storage_assisted_migration( device_id, host, self.data.array, self.data.srp, - volume_name, False) + volume_name, False, False) self.assertEqual(ref_return, return_val) host = {'host': 'HostX@Backend#Silver+SRP_1+000197800123'} ref_return = (True, 'Silver', 'NONE') return_val = self.common._is_valid_for_storage_assisted_migration( device_id, host, self.data.array, - self.data.srp, volume_name, False) + self.data.srp, volume_name, False, False) self.assertEqual(ref_return, return_val) def test_is_valid_for_storage_assisted_migration_false(self): @@ -4644,36 +4649,25 @@ class VMAXCommonTest(test.TestCase): host = {'host': 'HostX@Backend#Silver+SRP_1+000197800123+dummy+data'} return_val = self.common._is_valid_for_storage_assisted_migration( device_id, host, self.data.array, - self.data.srp, volume_name, False) + self.data.srp, volume_name, False, False) self.assertEqual(ref_return, return_val) # Wrong array host2 = {'host': 'HostX@Backend#Silver+OLTP+SRP_1+00012345678'} return_val = self.common._is_valid_for_storage_assisted_migration( device_id, host2, self.data.array, - self.data.srp, volume_name, False) + self.data.srp, volume_name, False, False) self.assertEqual(ref_return, return_val) # Wrong srp host3 = {'host': 'HostX@Backend#Silver+OLTP+SRP_2+000197800123'} return_val = self.common._is_valid_for_storage_assisted_migration( device_id, host3, self.data.array, - self.data.srp, volume_name, False) + self.data.srp, volume_name, False, False) self.assertEqual(ref_return, return_val) # Already in correct sg host4 = {'host': self.data.fake_host} return_val = self.common._is_valid_for_storage_assisted_migration( device_id, host4, self.data.array, - self.data.srp, volume_name, False) - self.assertEqual(ref_return, return_val) - - def test_is_valid_for_storage_assisted_migration_none(self): - device_id = self.data.device_id - host = {'host': self.data.none_host} - volume_name = self.data.test_volume.name - # Testing for 'NONE' Workload - ref_return = (True, 'Diamond', 'NONE') - return_val = self.common._is_valid_for_storage_assisted_migration( - device_id, host, self.data.array, - self.data.srp, volume_name, False) + self.data.srp, volume_name, False, False) self.assertEqual(ref_return, return_val) def test_find_volume_group(self): @@ -6333,12 +6327,13 @@ class VMAXMaskingTest(test.TestCase): self.data.array, self.data.masking_view_name_i) mock_delete_mv.assert_called_once() + @mock.patch.object(masking.VMAXMasking, 'return_volume_to_volume_group') @mock.patch.object(rest.VMAXRest, 'move_volume_between_storage_groups') @mock.patch.object(masking.VMAXMasking, 'get_or_create_default_storage_group') @mock.patch.object(masking.VMAXMasking, 'add_volume_to_storage_group') def test_add_volume_to_default_storage_group( - self, mock_add_sg, mock_get_sg, mock_move): + self, mock_add_sg, mock_get_sg, mock_move, mock_return): self.mask.add_volume_to_default_storage_group( self.data.array, self.device_id, self.volume_name, self.extra_specs) @@ -6347,14 +6342,12 @@ class VMAXMaskingTest(test.TestCase): self.data.array, self.device_id, self.volume_name, self.extra_specs, src_sg=self.data.storagegroup_name_i) mock_move.assert_called_once() - mock_add_sg.reset_mock() vol_grp_member = deepcopy(self.data.test_volume) vol_grp_member.group_id = self.data.test_vol_grp_name_id_only - vol_grp_member.group = self.data.test_group self.mask.add_volume_to_default_storage_group( self.data.array, self.device_id, self.volume_name, self.extra_specs, volume=vol_grp_member) - self.assertEqual(2, mock_add_sg.call_count) + mock_return.assert_called_once() @mock.patch.object(provision.VMAXProvision, 'create_storage_group') def test_get_or_create_default_storage_group(self, mock_create_sg): @@ -6570,6 +6563,32 @@ class VMAXMaskingTest(test.TestCase): mock_remove_volume.assert_not_called() mock_remove_child_sg.assert_called_once() + @mock.patch.object(masking.VMAXMasking, + 'add_volumes_to_storage_group') + def test_add_remote_vols_to_volume_group(self, mock_add): + self.mask.add_remote_vols_to_volume_group( + [self.data.test_volume], self.data.test_rep_group, + self.data.rep_extra_specs) + mock_add.assert_called_once() + + @mock.patch.object(masking.VMAXMasking, 'add_remote_vols_to_volume_group') + @mock.patch.object(masking.VMAXMasking, + '_check_adding_volume_to_storage_group') + @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', + return_value=True) + @mock.patch.object(volume_utils, 'is_group_a_type', + side_effect=[False, False, True, True]) + def test_return_volume_to_volume_group(self, mock_type, mock_cg, + mock_check, mock_add): + vol_grp_member = deepcopy(self.data.test_volume) + vol_grp_member.group_id = self.data.test_vol_grp_name_id_only + vol_grp_member.group = self.data.test_group + for x in range(0, 2): + self.mask.return_volume_to_volume_group( + self.data.array, vol_grp_member, self.data.device_id, + self.data.test_volume.name, self.data.extra_specs) + mock_add.assert_called_once() + class VMAXCommonReplicationTest(test.TestCase): def setUp(self): @@ -7400,7 +7419,7 @@ class VMAXCommonReplicationTest(test.TestCase): @mock.patch.object(utils.VMAXUtils, 'check_rep_status_enabled') @mock.patch.object(common.VMAXCommon, '_remove_remote_vols_from_volume_group') - @mock.patch.object(common.VMAXCommon, '_add_remote_vols_to_volume_group') + @mock.patch.object(masking.VMAXMasking, 'add_remote_vols_to_volume_group') @mock.patch.object(volume_utils, 'is_group_a_type', return_value=True) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=True) @@ -7414,14 +7433,6 @@ class VMAXCommonReplicationTest(test.TestCase): mock_add.assert_called_once() mock_remove.assert_called_once() - @mock.patch.object(masking.VMAXMasking, - 'add_volumes_to_storage_group') - def test_add_remote_vols_to_volume_group(self, mock_add): - self.common._add_remote_vols_to_volume_group( - self.data.remote_array, [self.data.test_volume], - self.data.test_rep_group, self.data.rep_extra_specs) - mock_add.assert_called_once() - @mock.patch.object(masking.VMAXMasking, 'remove_volumes_from_storage_group') def test_remove_remote_vols_from_volume_group(self, mock_rm): @@ -7485,3 +7496,43 @@ class VMAXCommonReplicationTest(test.TestCase): self.async_driver.common.failover_host(volumes, None, []) mock_fv.assert_not_called() mock_fg.assert_called_once() + + @mock.patch.object(common.VMAXCommon, '_retype_volume', return_value=True) + @mock.patch.object(masking.VMAXMasking, 'remove_vol_from_storage_group') + @mock.patch.object(common.VMAXCommon, '_retype_remote_volume', + return_value=True) + @mock.patch.object(common.VMAXCommon, 'setup_volume_replication', + return_value=VMAXCommonData.provider_location2) + @mock.patch.object(common.VMAXCommon, + '_remove_vol_and_cleanup_replication') + @mock.patch.object(utils.VMAXUtils, 'is_replication_enabled', + side_effect=[False, True, True, False, True, True]) + def test_migrate_volume_replication(self, mock_re, mock_rm_rep, + mock_setup, mock_retype, + mock_rm, mock_rt): + new_type = {'extra_specs': {}} + for x in range(0, 3): + success, model_update = self.common._migrate_volume( + self.data.array, self.data.test_volume, self.data.device_id, + self.data.srp, 'OLTP', 'Silver', self.data.test_volume.name, + new_type, self.data.extra_specs) + self.assertTrue(success) + mock_rm_rep.assert_called_once() + mock_setup.assert_called_once() + mock_retype.assert_called_once() + + @mock.patch.object( + common.VMAXCommon, '_get_replication_extra_specs', + return_value=VMAXCommonData.extra_specs_rep_enabled) + @mock.patch.object( + rest.VMAXRest, 'get_storage_groups_from_volume', + side_effect=[ + VMAXCommonData.storagegroup_list, ['OS-SRP_1-Diamond-DSS-RE-SG']]) + @mock.patch.object(common.VMAXCommon, '_retype_volume', return_value=True) + def test_retype_volume_replication(self, mock_retype, mock_sg, mock_es): + for x in range(0, 2): + self.common._retype_remote_volume( + self.data.array, self.data.test_volume, self.data.device_id, + self.data.test_volume.name, utils.REP_SYNC, + True, self.data.extra_specs) + mock_retype.assert_called_once() diff --git a/cinder/volume/drivers/dell_emc/vmax/common.py b/cinder/volume/drivers/dell_emc/vmax/common.py index 23e9136cc26..b63334c3add 100644 --- a/cinder/volume/drivers/dell_emc/vmax/common.py +++ b/cinder/volume/drivers/dell_emc/vmax/common.py @@ -302,9 +302,8 @@ class VMAXCommon(object): group_name, volume_name, extra_specs) # Add remote volume to remote group, if required if volume.group.is_replicated: - self._add_remote_vols_to_volume_group( - extra_specs[utils.ARRAY], - [volume], volume.group, extra_specs, rep_driver_data) + self.masking.add_remote_vols_to_volume_group( + volume, volume.group, extra_specs, rep_driver_data) def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot. @@ -2220,14 +2219,6 @@ class VMAXCommon(object): {'name': volume_name}) return False - if self.utils.is_replication_enabled(extra_specs): - LOG.error("Volume %(name)s is replicated - " - "Replicated volumes are not eligible for " - "storage assisted retype. Host assisted " - "retype is supported.", - {'name': volume_name}) - return False - return self._slo_workload_migration(device_id, volume, host, volume_name, new_type, extra_specs) @@ -2243,6 +2234,10 @@ class VMAXCommon(object): :param extra_specs: extra specifications :returns: boolean -- True if migration succeeded, False if error. """ + vol_is_replicated = self.utils.is_replication_enabled(extra_specs) + # Check if old type and new type have different replication types + do_change_replication = self.utils.change_replication( + vol_is_replicated, new_type) is_compression_disabled = self.utils.is_compression_disabled( extra_specs) # Check if old type and new type have different compression types @@ -2252,7 +2247,7 @@ class VMAXCommon(object): self._is_valid_for_storage_assisted_migration( device_id, host, extra_specs[utils.ARRAY], extra_specs[utils.SRP], volume_name, - do_change_compression)) + do_change_compression, do_change_replication)) if not is_valid: LOG.error( @@ -2260,14 +2255,15 @@ class VMAXCommon(object): "assisted migration using retype.", {'name': volume_name}) return False - if volume.host != host['host'] or do_change_compression: + if (volume.host != host['host'] or do_change_compression + or do_change_replication): LOG.debug( "Retype Volume %(name)s from source host %(sourceHost)s " - "to target host %(targetHost)s. Compression change is %(cc)r.", - {'name': volume_name, - 'sourceHost': volume.host, + "to target host %(targetHost)s. Compression change is %(cc)r. " + "Replication change is %(rc)s", + {'name': volume_name, 'sourceHost': volume.host, 'targetHost': host['host'], - 'cc': do_change_compression}) + 'cc': do_change_compression, 'rc': do_change_replication}) return self._migrate_volume( extra_specs[utils.ARRAY], volume, device_id, extra_specs[utils.SRP], target_slo, @@ -2293,6 +2289,7 @@ class VMAXCommon(object): :param extra_specs: the extra specifications :returns: bool """ + model_update, rep_mode, move_target = None, None, False target_extra_specs = new_type['extra_specs'] target_extra_specs[utils.SRP] = srp target_extra_specs[utils.ARRAY] = array @@ -2302,28 +2299,82 @@ class VMAXCommon(object): target_extra_specs[utils.RETRIES] = extra_specs[utils.RETRIES] is_compression_disabled = self.utils.is_compression_disabled( target_extra_specs) + if self.rep_config and self.rep_config.get('mode'): + rep_mode = self.rep_config['mode'] + target_extra_specs[utils.REP_MODE] = rep_mode + was_rep_enabled = self.utils.is_replication_enabled(extra_specs) + is_rep_enabled = self.utils.is_replication_enabled(target_extra_specs) + if was_rep_enabled: + if not is_rep_enabled: + # Disable replication is True + self._remove_vol_and_cleanup_replication( + array, device_id, volume_name, extra_specs, volume) + model_update = {'replication_status': REPLICATION_DISABLED, + 'replication_driver_data': None} + else: + # Ensure both source and target volumes are retyped + move_target = True + else: + if is_rep_enabled: + # Setup_volume_replication will put volume in correct sg + rep_status, rdf_dict = self.setup_volume_replication( + array, volume, device_id, target_extra_specs) + model_update = { + 'replication_status': rep_status, + 'replication_driver_data': six.text_type(rdf_dict)} + return True, model_update try: target_sg_name = self.masking.get_or_create_default_storage_group( array, srp, target_slo, target_workload, extra_specs, - is_compression_disabled) + is_compression_disabled, is_rep_enabled, rep_mode) except Exception as e: LOG.error("Failed to get or create storage group. " "Exception received was %(e)s.", {'e': e}) return False + success = self._retype_volume( + array, device_id, volume_name, target_sg_name, + volume, target_extra_specs) + if success and move_target: + success = self._retype_remote_volume( + array, volume, device_id, volume_name, + rep_mode, is_rep_enabled, target_extra_specs) + + return success, model_update + + def _retype_volume(self, array, device_id, volume_name, target_sg_name, + volume, extra_specs): + """Move the volume to the correct storagegroup. + + Add the volume to the target storage group, or to the correct default + storage group, and check if it is there. + :param array: the array serial + :param device_id: the device id + :param volume_name: the volume name + :param target_sg_name: the target sg name + :param volume: the volume object + :param extra_specs: the target extra specifications + :returns bool + """ storagegroups = self.rest.get_storage_groups_from_volume( array, device_id) if not storagegroups: LOG.warning("Volume : %(volume_name)s does not currently " "belong to any storage groups.", {'volume_name': volume_name}) + # Add the volume to the target storage group self.masking.add_volume_to_storage_group( array, device_id, target_sg_name, volume_name, extra_specs) + # Check if volume should be member of GVG + self.masking.return_volume_to_volume_group( + array, volume, device_id, volume_name, extra_specs) else: + # Move the volume to the correct default storage group for + # its volume type self.masking.remove_and_reset_members( - array, volume, device_id, volume_name, target_extra_specs, - reset=True) + array, volume, device_id, volume_name, + extra_specs, reset=True) # Check that it has been added. vol_check = self.rest.is_volume_in_storagegroup( @@ -2338,9 +2389,48 @@ class VMAXCommon(object): return True + def _retype_remote_volume(self, array, volume, device_id, + volume_name, rep_mode, is_re, extra_specs): + """Retype the remote volume. + + :param array: the array serial number + :param volume: the volume object + :param device_id: the device id + :param volume_name: the volume name + :param rep_mode: the replication mode + :param is_re: replication enabled + :param extra_specs: the target extra specs + :returns: bool + """ + success = True + (target_device, remote_array, _, _, _) = ( + self.get_remote_target_device(array, volume, device_id)) + rep_extra_specs = self._get_replication_extra_specs( + extra_specs, self.rep_config) + rep_compr_disabled = self.utils.is_compression_disabled( + rep_extra_specs) + remote_sg_name = self.masking.get_or_create_default_storage_group( + remote_array, rep_extra_specs[utils.SRP], + rep_extra_specs[utils.SLO], rep_extra_specs[utils.WORKLOAD], + rep_extra_specs, rep_compr_disabled, + is_re=is_re, rep_mode=rep_mode) + found_storage_group_list = self.rest.get_storage_groups_from_volume( + remote_array, target_device) + move_rqd = True + for found_storage_group_name in found_storage_group_list: + # Check if remote volume is already in the correct sg + if found_storage_group_name == remote_sg_name: + move_rqd = False + break + if move_rqd: + success = self._retype_volume( + remote_array, target_device, volume_name, remote_sg_name, + volume, rep_extra_specs) + return success + def _is_valid_for_storage_assisted_migration( - self, device_id, host, source_array, - source_srp, volume_name, do_change_compression): + self, device_id, host, source_array, source_srp, volume_name, + do_change_compression, do_change_replication): """Check if volume is suitable for storage assisted (pool) migration. :param device_id: the volume device id @@ -2349,6 +2439,7 @@ class VMAXCommon(object): :param source_srp: the volume's current pool name :param volume_name: the name of the volume to be migrated :param do_change_compression: do change compression + :param do_change_replication: flag indicating replication change :returns: boolean -- True/False :returns: string -- targetSlo :returns: string -- targetWorkload @@ -2375,6 +2466,8 @@ class VMAXCommon(object): target_workload = 'NONE' else: raise IndexError + if target_slo.lower() == 'none': + target_slo = None except IndexError: LOG.error("Error parsing array, pool, SLO and workload.") return false_ret @@ -2415,9 +2508,11 @@ class VMAXCommon(object): % {'targetSlo': target_slo, 'targetWorkload': target_workload}) if target_combination == emc_fast_setting: - # Check if migration is from compression to non compression - # or vice versa - if not do_change_compression: + # Check if migration is to change compression + # or replication types + action_rqd = (True if do_change_compression + or do_change_replication else False) + if not action_rqd: LOG.warning( "No action required. Volume: %(volume_name)s is " "already part of slo/workload combination: " @@ -3632,8 +3727,8 @@ class VMAXCommon(object): array, add_device_ids, vol_grp_name, interval_retries_dict) if group.is_replicated: # Add remote volumes to remote storage group - self._add_remote_vols_to_volume_group( - array, add_vols, group, interval_retries_dict) + self.masking.add_remote_vols_to_volume_group( + add_vols, group, interval_retries_dict) # Remove volume(s) from the group if remove_device_ids: self.masking.remove_volumes_from_storage_group( @@ -3655,34 +3750,6 @@ class VMAXCommon(object): return model_update, None, None - def _add_remote_vols_to_volume_group( - self, array, volumes, group, - extra_specs, rep_driver_data=None): - """Add the remote volumes to their volume group. - - :param array: the array serial number - :param volumes: list of volumes - :param group: the id of the group - :param extra_specs: the extra specifications - :param rep_driver_data: replication driver data, optional - """ - remote_device_list = [] - __, remote_array = self.get_rdf_details(array) - for vol in volumes: - try: - remote_loc = ast.literal_eval(vol.replication_driver_data) - except (ValueError, KeyError): - remote_loc = ast.literal_eval(rep_driver_data) - founddevice_id = self.rest.check_volume_device_id( - remote_array, remote_loc['device_id'], vol.id) - if founddevice_id is not None: - remote_device_list.append(founddevice_id) - group_name = self.provision.get_or_create_volume_group( - remote_array, group, extra_specs) - self.masking.add_volumes_to_storage_group( - remote_array, remote_device_list, group_name, extra_specs) - LOG.info("Added volumes to remote volume group.") - def _remove_remote_vols_from_volume_group( self, array, volumes, group, extra_specs): """Remove the remote volumes from their volume group. diff --git a/cinder/volume/drivers/dell_emc/vmax/fc.py b/cinder/volume/drivers/dell_emc/vmax/fc.py index 2813864b69d..51aac85652e 100644 --- a/cinder/volume/drivers/dell_emc/vmax/fc.py +++ b/cinder/volume/drivers/dell_emc/vmax/fc.py @@ -90,9 +90,11 @@ class VMAXFCDriver(san.SanDriver, driver.FibreChannelDriver): - Support for manage/unmanage snapshots (vmax-manage-unmanage-snapshot) - Support for revert to volume snapshot + 3.2.0 - Support for retyping replicated volumes (bp + vmax-retype-replicated-volumes) """ - VERSION = "3.1.0" + VERSION = "3.2.0" # ThirdPartySystems wiki CI_WIKI_NAME = "EMC_VMAX_CI" diff --git a/cinder/volume/drivers/dell_emc/vmax/iscsi.py b/cinder/volume/drivers/dell_emc/vmax/iscsi.py index a9c1921c8fc..96410cab538 100644 --- a/cinder/volume/drivers/dell_emc/vmax/iscsi.py +++ b/cinder/volume/drivers/dell_emc/vmax/iscsi.py @@ -95,9 +95,11 @@ class VMAXISCSIDriver(san.SanISCSIDriver): - Support for manage/unmanage snapshots (vmax-manage-unmanage-snapshot) - Support for revert to volume snapshot + 3.2.0 - Support for retyping replicated volumes (bp + vmax-retype-replicated-volumes) """ - VERSION = "3.1.0" + VERSION = "3.2.0" # ThirdPartySystems wiki CI_WIKI_NAME = "EMC_VMAX_CI" diff --git a/cinder/volume/drivers/dell_emc/vmax/masking.py b/cinder/volume/drivers/dell_emc/vmax/masking.py index 69d03a0a0a7..97dff7095a2 100644 --- a/cinder/volume/drivers/dell_emc/vmax/masking.py +++ b/cinder/volume/drivers/dell_emc/vmax/masking.py @@ -13,6 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. +import ast import time from oslo_log import log as logging @@ -23,6 +24,7 @@ from cinder import exception from cinder.i18n import _ from cinder.volume.drivers.dell_emc.vmax import provision from cinder.volume.drivers.dell_emc.vmax import utils +from cinder.volume import utils as volume_utils LOG = logging.getLogger(__name__) @@ -1454,12 +1456,59 @@ class VMAXMasking(object): # Need to check if the volume needs to be returned to a # generic volume group. This may be necessary in a force-detach # situation. - if volume.group_id is not None: - vol_grp_name = self.provision.get_or_create_volume_group( - serial_number, volume.group, extra_specs) - self._check_adding_volume_to_storage_group( - serial_number, device_id, - vol_grp_name, volume_name, extra_specs) + self.return_volume_to_volume_group( + serial_number, volume, device_id, volume_name, extra_specs) + + def return_volume_to_volume_group(self, serial_number, volume, + device_id, volume_name, extra_specs): + """Return a volume to its volume group, if required. + + :param serial_number: the array serial number + :param volume: the volume object + :param device_id: the device id + :param volume_name: the volume name + :param extra_specs: the extra specifications + """ + if (volume.group_id is not None and + (volume_utils.is_group_a_cg_snapshot_type(volume.group) + or volume.group.is_replicated)): + vol_grp_name = self.provision.get_or_create_volume_group( + serial_number, volume.group, extra_specs) + self._check_adding_volume_to_storage_group( + serial_number, device_id, + vol_grp_name, volume_name, extra_specs) + if volume.group.is_replicated: + self.add_remote_vols_to_volume_group( + volume, volume.group, extra_specs) + + def add_remote_vols_to_volume_group( + self, volumes, group, extra_specs, rep_driver_data=None): + """Add the remote volumes to their volume group. + + :param volumes: list of volumes + :param group: the id of the group + :param extra_specs: the extra specifications + :param rep_driver_data: replication driver data, optional + """ + remote_device_list = [] + remote_array = None + if not isinstance(volumes, list): + volumes = [volumes] + for vol in volumes: + try: + remote_loc = ast.literal_eval(vol.replication_driver_data) + except (ValueError, KeyError): + remote_loc = ast.literal_eval(rep_driver_data) + remote_array = remote_loc['array'] + founddevice_id = self.rest.check_volume_device_id( + remote_array, remote_loc['device_id'], vol.id) + if founddevice_id is not None: + remote_device_list.append(founddevice_id) + group_name = self.provision.get_or_create_volume_group( + remote_array, group, extra_specs) + self.add_volumes_to_storage_group( + remote_array, remote_device_list, group_name, extra_specs) + LOG.info("Added volumes to remote volume group.") def get_or_create_default_storage_group( self, serial_number, srp, slo, workload, extra_specs, diff --git a/cinder/volume/drivers/dell_emc/vmax/utils.py b/cinder/volume/drivers/dell_emc/vmax/utils.py index dc4f1a4757a..6c379062d64 100644 --- a/cinder/volume/drivers/dell_emc/vmax/utils.py +++ b/cinder/volume/drivers/dell_emc/vmax/utils.py @@ -495,6 +495,16 @@ class VMAXUtils(object): else: return True + def change_replication(self, vol_is_replicated, new_type): + """Check if volume types have different replication status. + + :param vol_is_replicated: from source + :param new_type: from target + :return: bool + """ + is_tgt_rep = self.is_replication_enabled(new_type['extra_specs']) + return vol_is_replicated != is_tgt_rep + @staticmethod def is_replication_enabled(extra_specs): """Check if replication is to be enabled. diff --git a/releasenotes/notes/vmax-retype-replicated-volumes-325be6e5fd626819.yaml b/releasenotes/notes/vmax-retype-replicated-volumes-325be6e5fd626819.yaml new file mode 100644 index 00000000000..863ecacd05c --- /dev/null +++ b/releasenotes/notes/vmax-retype-replicated-volumes-325be6e5fd626819.yaml @@ -0,0 +1,3 @@ +--- +features: + - Support for retype (storage-assisted migration) of replicated volumes on VMAX cinder driver.