diff --git a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax.py b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax.py index 84f1913627f..3f50238ff62 100644 --- a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax.py +++ b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax.py @@ -974,15 +974,15 @@ class PowerMaxCommonData(object): vmax_model = u'VMAX250F' version_dict = { - 'unisphere_version': unisphere_version, + 'unisphere_for_powermax_version': unisphere_version, 'openstack_release': openstack_release, 'openstack_version': openstack_version, 'python_version': python_version, - 'vmax_driver_version': vmax_driver_version, - 'platform': platform, - 'vmax_firmware_version': vmax_firmware_version, + 'powermax_cinder_driver_version': vmax_driver_version, + 'openstack_platform': platform, + 'storage_firmware_version': vmax_firmware_version, 'serial_number': array, - 'vmax_model': vmax_model} + 'storage_model': vmax_model} u4p_failover_config = { 'u4p_failover_backoff_factor': '2', @@ -1039,7 +1039,7 @@ class FakeResponse(object): else: raise ValueError - def status_code(self): + def get_status_code(self): return self.status_code() def raise_for_status(self): @@ -1358,18 +1358,17 @@ class PowerMaxUtilsTest(test.TestCase): self.data.test_volume.volume_type_id = ( self.data.test_volume_type.id) self.utils.get_volumetype_extra_specs(self.data.test_volume) - volume_types.get_volume_type_extra_specs.assert_called_once_with( + type_mock.assert_called_once_with( self.data.test_volume_type.id) type_mock.reset_mock() # path 2: volume_type_id passed in self.utils.get_volumetype_extra_specs(self.data.test_volume, '123') - volume_types.get_volume_type_extra_specs.assert_called_once_with( + type_mock.assert_called_once_with( '123') type_mock.reset_mock() # path 3: no type_id self.utils.get_volumetype_extra_specs(self.data.test_clone_volume) - (volume_types.get_volume_type_extra_specs. - assert_not_called()) + type_mock.assert_not_called() def test_get_volumetype_extra_specs_exception(self): extra_specs = self.utils.get_volumetype_extra_specs( @@ -1847,13 +1846,13 @@ class PowerMaxRestTest(test.TestCase): def test_rest_request_handle_failover(self): response = FakeResponse(200, 'Success') - with mock.patch.object(self.rest, '_handle_u4p_failover'): + with mock.patch.object(self.rest, '_handle_u4p_failover')as mock_fail: with mock.patch.object(self.rest.session, 'request', side_effect=[requests.ConnectionError, response]): self.rest.u4p_failover_enabled = True self.rest.request('/fake_uri', 'GET') - self.rest._handle_u4p_failover.assert_called_once() + mock_fail.assert_called_once() def test_wait_for_job_complete(self): rc, job, status, task = self.rest.wait_for_job_complete( @@ -2065,10 +2064,10 @@ class PowerMaxRestTest(test.TestCase): self.assertEqual(self.data.sg_list, sg_list) def test_create_storage_group(self): - with mock.patch.object(self.rest, 'create_resource'): + with mock.patch.object(self.rest, 'create_resource') as mock_create: payload = {'someKey': 'someValue'} self.rest._create_storagegroup(self.data.array, payload) - self.rest.create_resource.assert_called_once_with( + mock_create.assert_called_once_with( self.data.array, 'sloprovisioning', 'storagegroup', payload) def test_create_storage_group_success(self): @@ -2080,8 +2079,9 @@ class PowerMaxRestTest(test.TestCase): def test_create_storage_group_next_gen(self): with mock.patch.object(self.rest, 'is_next_gen_array', return_value=True): - with mock.patch.object(self.rest, '_create_storagegroup', - return_value=(200, self.data.job_list[0])): + with mock.patch.object( + self.rest, '_create_storagegroup', + return_value=(200, self.data.job_list[0])) as mock_sg: self.rest.create_storage_group( self.data.array, self.data.storagegroup_name_f, self.data.srp, self.data.slo, self.data.workload, @@ -2096,7 +2096,7 @@ class PowerMaxRestTest(test.TestCase): "volumeAttribute": { "volume_size": "0", "capacityUnit": "GB"}}]} - self.rest._create_storagegroup.assert_called_once_with( + mock_sg.assert_called_once_with( self.data.array, payload) def test_create_storage_group_failed(self): @@ -2113,8 +2113,9 @@ class PowerMaxRestTest(test.TestCase): self.assertEqual(self.data.default_sg_no_slo, sg_name) def test_create_storage_group_compression_disabled(self): - with mock.patch.object(self.rest, '_create_storagegroup', - return_value=(200, self.data.job_list[0])): + with mock.patch.object( + self.rest, '_create_storagegroup', + return_value=(200, self.data.job_list[0]))as mock_sg: self.rest.create_storage_group( self.data.array, self.data.default_sg_compr_disabled, self.data.srp, self.data.slo, self.data.workload, @@ -2130,17 +2131,16 @@ class PowerMaxRestTest(test.TestCase): "volume_size": "0", "capacityUnit": "GB"}, "noCompression": "true"}]} - self.rest._create_storagegroup.assert_called_once_with( - self.data.array, payload) + mock_sg.assert_called_once_with(self.data.array, payload) def test_modify_storage_group(self): array = self.data.array storagegroup = self.data.defaultstoragegroup_name payload = {'someKey': 'someValue'} version = self.data.u4v_version - with mock.patch.object(self.rest, 'modify_resource'): + with mock.patch.object(self.rest, 'modify_resource') as mock_modify: self.rest.modify_storage_group(array, storagegroup, payload) - self.rest.modify_resource.assert_called_once_with( + mock_modify.assert_called_once_with( self.data.array, 'sloprovisioning', 'storagegroup', payload, version, resource_name=storagegroup) @@ -2174,12 +2174,12 @@ class PowerMaxRestTest(test.TestCase): operation = 'Add volume to sg' status_code = 202 message = self.data.job_list[0] - with mock.patch.object(self.rest, 'wait_for_job'): + with mock.patch.object(self.rest, 'wait_for_job') as mock_wait: device_id = self.data.device_id self.rest.add_vol_to_sg( self.data.array, self.data.storagegroup_name_f, device_id, self.data.extra_specs) - self.rest.wait_for_job.assert_called_with( + mock_wait.assert_called_with( operation, status_code, message, self.data.extra_specs) def test_add_vol_to_sg_failed(self): @@ -2194,12 +2194,12 @@ class PowerMaxRestTest(test.TestCase): operation = 'Remove vol from sg' status_code = 202 message = self.data.job_list[0] - with mock.patch.object(self.rest, 'wait_for_job'): + with mock.patch.object(self.rest, 'wait_for_job') as mock_wait: device_id = self.data.device_id self.rest.remove_vol_from_sg( self.data.array, self.data.storagegroup_name_f, device_id, self.data.extra_specs) - self.rest.wait_for_job.assert_called_with( + mock_wait.assert_called_with( operation, status_code, message, self.data.extra_specs) @mock.patch.object(time, 'sleep') @@ -2233,10 +2233,11 @@ class PowerMaxRestTest(test.TestCase): operation = 'delete storagegroup resource' status_code = 204 message = None - with mock.patch.object(self.rest, 'check_status_code_success'): + with mock.patch.object( + self.rest, 'check_status_code_success') as mock_check: self.rest.delete_storage_group( self.data.array, self.data.storagegroup_name_f) - self.rest.check_status_code_success.assert_called_with( + mock_check.assert_called_with( operation, status_code, message) def test_is_child_sg_in_parent_sg(self): @@ -2254,12 +2255,13 @@ class PowerMaxRestTest(test.TestCase): "expandStorageGroupParam": { "addExistingStorageGroupParam": { "storageGroupId": [self.data.storagegroup_name_f]}}}} - with mock.patch.object(self.rest, 'modify_storage_group', - return_value=(202, self.data.job_list[0])): + with mock.patch.object( + self.rest, 'modify_storage_group', + return_value=(202, self.data.job_list[0])) as mock_modify: self.rest.add_child_sg_to_parent_sg( self.data.array, self.data.storagegroup_name_f, self.data.parent_sg_f, self.data.extra_specs) - self.rest.modify_storage_group.assert_called_once_with( + mock_modify.assert_called_once_with( self.data.array, self.data.parent_sg_f, payload) def test_remove_child_sg_from_parent_sg(self): @@ -2267,12 +2269,13 @@ class PowerMaxRestTest(test.TestCase): "removeStorageGroupParam": { "storageGroupId": [self.data.storagegroup_name_f], "force": 'true'}}} - with mock.patch.object(self.rest, 'modify_storage_group', - return_value=(202, self.data.job_list[0])): + with mock.patch.object( + self.rest, 'modify_storage_group', + return_value=(202, self.data.job_list[0])) as mock_modify: self.rest.remove_child_sg_from_parent_sg( self.data.array, self.data.storagegroup_name_f, self.data.parent_sg_f, self.data.extra_specs) - self.rest.modify_storage_group.assert_called_once_with( + mock_modify.assert_called_once_with( self.data.array, self.data.parent_sg_f, payload) def test_get_volume_list(self): @@ -2304,9 +2307,9 @@ class PowerMaxRestTest(test.TestCase): array = self.data.array device_id = self.data.device_id payload = {'someKey': 'someValue'} - with mock.patch.object(self.rest, 'modify_resource'): + with mock.patch.object(self.rest, 'modify_resource') as mock_modify: self.rest._modify_volume(array, device_id, payload) - self.rest.modify_resource.assert_called_once_with( + mock_modify.assert_called_once_with( self.data.array, 'sloprovisioning', 'volume', payload, resource_name=device_id) @@ -2327,24 +2330,26 @@ class PowerMaxRestTest(test.TestCase): "volumeAttribute": { "volume_size": new_size, "capacityUnit": "GB"}}}} - with mock.patch.object(self.rest, '_modify_volume', - return_value=(202, self.data.job_list[0])): + with mock.patch.object( + self.rest, '_modify_volume', + return_value=(202, self.data.job_list[0])) as mock_modify: self.rest.extend_volume(self.data.array, device_id, new_size, self.data.extra_specs) - self.rest._modify_volume.assert_called_once_with( + mock_modify.assert_called_once_with( self.data.array, device_id, extend_vol_payload) def test_delete_volume(self): device_id = self.data.device_id - with mock.patch.object(self.rest, 'delete_resource'),\ - mock.patch.object( - self.rest, '_modify_volume', side_effect=[ - None, None, None, exception.VolumeBackendAPIException]): + vb_except = exception.VolumeBackendAPIException + with mock.patch.object(self.rest, 'delete_resource') as mock_delete,\ + mock.patch.object( + self.rest, '_modify_volume', + side_effect=[None, None, None, vb_except]) as mock_modify: for x in range(0, 2): self.rest.delete_volume(self.data.array, device_id) - mod_call_count = self.rest._modify_volume.call_count + mod_call_count = mock_modify.call_count self.assertEqual(4, mod_call_count) - self.rest.delete_resource.assert_called_once_with( + mock_delete.assert_called_once_with( self.data.array, 'sloprovisioning', 'volume', device_id) def test_rename_volume(self): @@ -2362,7 +2367,7 @@ class PowerMaxRestTest(test.TestCase): self.data.array, device_id, payload) mock_mod.reset_mock() self.rest.rename_volume(self.data.array, device_id, None) - self.rest._modify_volume.assert_called_once_with( + mock_mod.assert_called_once_with( self.data.array, device_id, payload2) def test_check_volume_device_id(self): @@ -2610,20 +2615,21 @@ class PowerMaxRestTest(test.TestCase): init_group_name = self.data.initiatorgroup_name_f init_list = [self.data.wwpn1] extra_specs = self.data.extra_specs - with mock.patch.object(self.rest, 'create_resource', - return_value=(202, self.data.job_list[0])): + with mock.patch.object( + self.rest, 'create_resource', + return_value=(202, self.data.job_list[0])) as mock_create: payload = ({"executionOption": "ASYNCHRONOUS", "hostId": init_group_name, "initiatorId": init_list}) self.rest.create_initiator_group( self.data.array, init_group_name, init_list, extra_specs) - self.rest.create_resource.assert_called_once_with( + mock_create.assert_called_once_with( self.data.array, 'sloprovisioning', 'host', payload) def test_delete_initiator_group(self): - with mock.patch.object(self.rest, 'delete_resource'): + with mock.patch.object(self.rest, 'delete_resource') as mock_delete: self.rest.delete_initiator_group( self.data.array, self.data.initiatorgroup_name_f) - self.rest.delete_resource.assert_called_once_with( + mock_delete.assert_called_once_with( self.data.array, 'sloprovisioning', 'host', self.data.initiatorgroup_name_f) @@ -2722,8 +2728,9 @@ class PowerMaxRestTest(test.TestCase): port_group_name = self.data.port_group_name_f init_group_name = self.data.initiatorgroup_name_f extra_specs = self.data.extra_specs - with mock.patch.object(self.rest, 'create_resource', - return_value=(202, self.data.job_list[0])): + with mock.patch.object( + self.rest, 'create_resource', + return_value=(202, self.data.job_list[0])) as mock_create: payload = ({"executionOption": "ASYNCHRONOUS", "portGroupSelection": { "useExistingPortGroupParam": { @@ -2738,14 +2745,14 @@ class PowerMaxRestTest(test.TestCase): self.rest.create_masking_view( self.data.array, maskingview_name, storagegroup_name, port_group_name, init_group_name, extra_specs) - self.rest.create_resource.assert_called_once_with( + mock_create.assert_called_once_with( self.data.array, 'sloprovisioning', 'maskingview', payload) def test_delete_masking_view(self): - with mock.patch.object(self.rest, 'delete_resource'): + with mock.patch.object(self.rest, 'delete_resource') as mock_delete: self.rest.delete_masking_view( self.data.array, self.data.masking_view_name_f) - self.rest.delete_resource.assert_called_once_with( + mock_delete.assert_called_once_with( self.data.array, 'sloprovisioning', 'maskingview', self.data.masking_view_name_f) @@ -2780,11 +2787,12 @@ class PowerMaxRestTest(test.TestCase): "bothSides": 'false', "star": 'false', "force": 'false'} resource_type = 'snapshot/%(snap)s' % {'snap': snap_name} - with mock.patch.object(self.rest, 'create_resource', - return_value=(202, self.data.job_list[0])): + with mock.patch.object( + self.rest, 'create_resource', + return_value=(202, self.data.job_list[0])) as mock_create: self.rest.create_volume_snap( self.data.array, snap_name, device_id, extra_specs) - self.rest.create_resource.assert_called_once_with( + mock_create.assert_called_once_with( self.data.array, 'replication', resource_type, payload, private='/private') ttl = 1 @@ -2792,11 +2800,12 @@ class PowerMaxRestTest(test.TestCase): "bothSides": 'false', "star": 'false', "force": 'false', "timeToLive": ttl, "timeInHours": "true"} - with mock.patch.object(self.rest, 'create_resource', - return_value=(202, self.data.job_list[0])): + with mock.patch.object( + self.rest, 'create_resource', + return_value=(202, self.data.job_list[0])) as mock_create: self.rest.create_volume_snap( self.data.array, snap_name, device_id, extra_specs, ttl) - self.rest.create_resource.assert_called_once_with( + mock_create.assert_called_once_with( self.data.array, 'replication', resource_type, payload, private='/private') @@ -2827,7 +2836,7 @@ class PowerMaxRestTest(test.TestCase): payload["action"] = "Link" self.rest.modify_volume_snap( array, source_id, target_id, snap_name, extra_specs, link=True) - self.rest.modify_resource.assert_called_once_with( + mock_modify.assert_called_once_with( array, 'replication', 'snapshot', payload, resource_name=snap_name, private='/private') # unlink @@ -2836,7 +2845,7 @@ class PowerMaxRestTest(test.TestCase): self.rest.modify_volume_snap( array, source_id, target_id, snap_name, extra_specs, unlink=True) - self.rest.modify_resource.assert_called_once_with( + mock_modify.assert_called_once_with( array, 'replication', 'snapshot', payload, resource_name=snap_name, private='/private') # restore @@ -2845,7 +2854,7 @@ class PowerMaxRestTest(test.TestCase): self.rest.modify_volume_snap( array, source_id, "", snap_name, extra_specs, unlink=False, restore=True) - self.rest.modify_resource.assert_called_once_with( + mock_modify.assert_called_once_with( array, 'replication', 'snapshot', payload_restore, resource_name=snap_name, private='/private') # link or unlink, list of volumes @@ -2855,7 +2864,7 @@ class PowerMaxRestTest(test.TestCase): array, "", "", snap_name, extra_specs, unlink=False, link=True, list_volume_pairs=[(source_id, target_id)]) - self.rest.modify_resource.assert_called_once_with( + mock_modify.assert_called_once_with( array, 'replication', 'snapshot', payload, resource_name=snap_name, private='/private') # none selected @@ -2863,7 +2872,7 @@ class PowerMaxRestTest(test.TestCase): self.rest.modify_volume_snap( array, source_id, target_id, snap_name, extra_specs) - self.rest.modify_resource.assert_not_called() + mock_modify.assert_not_called() def test_delete_volume_snap(self): array = self.data.array @@ -2873,10 +2882,10 @@ class PowerMaxRestTest(test.TestCase): payload = {"deviceNameListSource": [{"name": source_device_id}], "generation": 0} generation = 0 - with mock.patch.object(self.rest, 'delete_resource'): + with mock.patch.object(self.rest, 'delete_resource') as mock_delete: self.rest.delete_volume_snap( array, snap_name, source_device_id, generation) - self.rest.delete_resource.assert_called_once_with( + mock_delete.assert_called_once_with( array, 'replication', 'snapshot', snap_name, payload=payload, private='/private') @@ -2887,10 +2896,10 @@ class PowerMaxRestTest(test.TestCase): source_device_id = self.data.device_id payload = {"deviceNameListSource": [{"name": source_device_id}], "restore": True, "generation": 0} - with mock.patch.object(self.rest, 'delete_resource'): + with mock.patch.object(self.rest, 'delete_resource') as mock_delete: self.rest.delete_volume_snap( array, snap_name, source_device_id, restored=True) - self.rest.delete_resource.assert_called_once_with( + mock_delete.assert_called_once_with( array, 'replication', 'snapshot', snap_name, payload=payload, private='/private') @@ -3502,15 +3511,17 @@ class PowerMaxProvisionTest(test.TestCase): extra_specs = self.data.extra_specs # TTL of 1 hours ttl = 1 - with mock.patch.object(self.provision, 'create_volume_snapvx'): - with mock.patch.object(self.provision.rest, 'modify_volume_snap'): + with mock.patch.object( + self.provision, 'create_volume_snapvx') as mock_create_snapvx: + with mock.patch.object( + self.provision.rest, 'modify_volume_snap') as mock_modify: self.provision.create_volume_replica( array, source_device_id, target_device_id, snap_name, extra_specs, create_snap=True) - self.provision.rest.modify_volume_snap.assert_called_once_with( + mock_modify.assert_called_once_with( array, source_device_id, target_device_id, snap_name, extra_specs, link=True) - self.provision.create_volume_snapvx.assert_called_once_with( + mock_create_snapvx.assert_called_once_with( array, source_device_id, snap_name, extra_specs, ttl=ttl) def test_create_volume_replica_create_snap_false(self): @@ -3519,15 +3530,17 @@ class PowerMaxProvisionTest(test.TestCase): target_device_id = self.data.device_id2 snap_name = self.data.snap_location['snap_name'] extra_specs = self.data.extra_specs - with mock.patch.object(self.provision, 'create_volume_snapvx'): - with mock.patch.object(self.provision.rest, 'modify_volume_snap'): + with mock.patch.object( + self.provision, 'create_volume_snapvx') as mock_create_snapvx: + with mock.patch.object( + self.provision.rest, 'modify_volume_snap') as mock_modify: self.provision.create_volume_replica( array, source_device_id, target_device_id, snap_name, extra_specs, create_snap=False) - self.provision.rest.modify_volume_snap.assert_called_once_with( + mock_modify.assert_called_once_with( array, source_device_id, target_device_id, snap_name, extra_specs, link=True) - self.provision.create_volume_snapvx.assert_not_called() + mock_create_snapvx.assert_not_called() def test_break_replication_relationship(self): array = self.data.array @@ -3535,15 +3548,15 @@ class PowerMaxProvisionTest(test.TestCase): target_device_id = self.data.device_id2 snap_name = self.data.snap_location['snap_name'] extra_specs = self.data.extra_specs - with mock.patch.object(self.provision.rest, 'modify_volume_snap'): + with mock.patch.object( + self.provision.rest, 'modify_volume_snap') as mock_modify: self.provision.break_replication_relationship( array, target_device_id, source_device_id, snap_name, extra_specs) - (self.provision.rest.modify_volume_snap. - assert_called_once_with( - array, source_device_id, target_device_id, - snap_name, extra_specs, - list_volume_pairs=None, unlink=True, generation=0)) + mock_modify.assert_called_once_with( + array, source_device_id, target_device_id, + snap_name, extra_specs, list_volume_pairs=None, + unlink=True, generation=0) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=test_utils.ZeroIntervalLoopingCall) @@ -3831,14 +3844,15 @@ class PowerMaxProvisionTest(test.TestCase): array = self.data.array snap_name = self.data.group_snapshot_name source_group_name = self.data.storagegroup_name_source + extra_specs = self.data.extra_specs + src_dev_ids = [self.data.device_id] with mock.patch.object( self.provision, 'delete_group_replica') as mock_delete_replica: - self.provision.delete_group_replica(array, - snap_name, - source_group_name) + self.provision.delete_group_replica( + array, snap_name, source_group_name, src_dev_ids, extra_specs) mock_delete_replica.assert_called_once_with( - array, snap_name, source_group_name) + array, snap_name, source_group_name, src_dev_ids, extra_specs) def test_link_and_break_replica(self): array = self.data.array @@ -4084,10 +4098,9 @@ class PowerMaxCommonTest(test.TestCase): self.assertEqual(ref_model_update, model_update) def test_delete_volume(self): - with mock.patch.object(self.common, '_delete_volume'): + with mock.patch.object(self.common, '_delete_volume') as mock_delete: self.common.delete_volume(self.data.test_volume) - self.common._delete_volume.assert_called_once_with( - self.data.test_volume) + mock_delete.assert_called_once_with(self.data.test_volume) def test_create_snapshot(self): ref_model_update = ( @@ -4101,20 +4114,22 @@ class PowerMaxCommonTest(test.TestCase): snap_name = self.data.snap_location['snap_name'] sourcedevice_id = self.data.snap_location['source_id'] generation = 0 - with mock.patch.object(self.provision, 'delete_volume_snap'): + with mock.patch.object( + self.provision, 'delete_volume_snap') as mock_delete_snap: self.common.delete_snapshot(self.data.test_snapshot, self.data.test_volume) - self.provision.delete_volume_snap.assert_called_once_with( + mock_delete_snap.assert_called_once_with( self.data.array, snap_name, [sourcedevice_id], restored=False, generation=generation) def test_delete_snapshot_not_found(self): with mock.patch.object(self.common, '_parse_snap_info', return_value=(None, 'Something')): - with mock.patch.object(self.provision, 'delete_volume_snap'): + with mock.patch.object( + self.provision, 'delete_volume_snap') as mock_delete_snap: self.common.delete_snapshot(self.data.test_snapshot, self.data.test_volume) - self.provision.delete_volume_snap.assert_not_called() + mock_delete_snap.assert_not_called() def test_delete_legacy_snap(self): with mock.patch.object(self.common, '_delete_volume') as mock_del: @@ -4160,9 +4175,9 @@ class PowerMaxCommonTest(test.TestCase): extra_specs = deepcopy(self.data.extra_specs_intervals_set) extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f connector = self.data.connector - with mock.patch.object(self.common, '_remove_members'): + with mock.patch.object(self.common, '_remove_members') as mock_remove: self.common._unmap_lun(volume, connector) - self.common._remove_members.assert_called_once_with( + mock_remove.assert_called_once_with( array, volume, device_id, extra_specs, connector, False, async_grp=None) @@ -4189,11 +4204,11 @@ class PowerMaxCommonTest(test.TestCase): extra_specs['qos'] = { 'total_iops_sec': '4000', 'DistributionType': 'Always'} connector = self.data.connector - with mock.patch.object(self.common, '_remove_members'): + with mock.patch.object(self.common, '_remove_members') as mock_remove: with mock.patch.object(self.utils, 'get_volumetype_extra_specs', return_value=extra_specs): self.common._unmap_lun(volume, connector) - self.common._remove_members.assert_called_once_with( + mock_remove.assert_called_once_with( array, volume, device_id, extra_specs, connector, False, async_grp=None) @@ -4202,9 +4217,10 @@ class PowerMaxCommonTest(test.TestCase): connector = self.data.connector with mock.patch.object(self.common, 'find_host_lun_id', return_value=({}, False)): - with mock.patch.object(self.common, '_remove_members'): + with mock.patch.object( + self.common, '_remove_members') as mock_remove: self.common._unmap_lun(volume, connector) - self.common._remove_members.assert_not_called() + mock_remove.assert_not_called() def test_unmap_lun_connector_is_none(self): array = self.data.array @@ -4213,9 +4229,9 @@ class PowerMaxCommonTest(test.TestCase): extra_specs = deepcopy(self.data.extra_specs_intervals_set) extra_specs['storagetype:portgroupname'] = ( self.data.port_group_name_f) - with mock.patch.object(self.common, '_remove_members'): + with mock.patch.object(self.common, '_remove_members') as mock_remove: self.common._unmap_lun(volume, None) - self.common._remove_members.assert_called_once_with( + mock_remove.assert_called_once_with( array, volume, device_id, extra_specs, None, False, async_grp=None) @@ -4338,9 +4354,9 @@ class PowerMaxCommonTest(test.TestCase): def test_terminate_connection(self): volume = self.data.test_volume connector = self.data.connector - with mock.patch.object(self.common, '_unmap_lun'): + with mock.patch.object(self.common, '_unmap_lun') as mock_unmap: self.common.terminate_connection(volume, connector) - self.common._unmap_lun.assert_called_once_with( + mock_unmap.assert_called_once_with( volume, connector) @mock.patch.object(rest.PowerMaxRest, 'is_next_gen_array', @@ -4691,18 +4707,20 @@ class PowerMaxCommonTest(test.TestCase): ref_extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f volume = self.data.test_volume with mock.patch.object(self.common, '_sync_check'): - with mock.patch.object(self.common, '_delete_from_srp'): + with mock.patch.object( + self.common, '_delete_from_srp') as mock_delete: self.common._delete_volume(volume) - self.common._delete_from_srp.assert_called_once_with( + mock_delete.assert_called_once_with( array, device_id, volume_name, ref_extra_specs) def test_delete_volume_not_found(self): volume = self.data.test_volume with mock.patch.object(self.common, '_find_device_on_array', return_value=None): - with mock.patch.object(self.common, '_delete_from_srp'): + with mock.patch.object( + self.common, '_delete_from_srp') as mock_delete: self.common._delete_volume(volume) - self.common._delete_from_srp.assert_not_called() + mock_delete.assert_not_called() def test_create_volume_success(self): volume_name = '1' @@ -4722,15 +4740,14 @@ class PowerMaxCommonTest(test.TestCase): return_value=True): with mock.patch.object(self.rest, 'is_next_gen_array', return_value=True): - with mock.patch.object(self.masking, - 'get_or_create_default_storage_group'): + with mock.patch.object( + self.masking, + 'get_or_create_default_storage_group') as mock_get: self.common._create_volume( volume_name, volume_size, extra_specs) - (self.masking.get_or_create_default_storage_group - .assert_called_once_with(extra_specs['array'], - extra_specs[utils.SRP], - extra_specs[utils.SLO], - 'NONE', extra_specs, True)) + mock_get.assert_called_once_with( + extra_specs['array'], extra_specs[utils.SRP], + extra_specs[utils.SLO], 'NONE', extra_specs, True) def test_create_volume_failed(self): volume_name = self.data.test_volume.name @@ -4739,23 +4756,23 @@ class PowerMaxCommonTest(test.TestCase): with mock.patch.object(self.masking, 'get_or_create_default_storage_group', return_value=self.data.failed_resource): - with mock.patch.object(self.rest, 'delete_storage_group'): + with mock.patch.object( + self.rest, 'delete_storage_group') as mock_delete: # path 1: not last vol in sg with mock.patch.object(self.rest, 'get_num_vols_in_sg', return_value=2): self.assertRaises(exception.VolumeBackendAPIException, self.common._create_volume, volume_name, volume_size, extra_specs) - self.rest.delete_storage_group.assert_not_called() + mock_delete.assert_not_called() # path 2: last vol in sg, delete sg with mock.patch.object(self.rest, 'get_num_vols_in_sg', return_value=0): self.assertRaises(exception.VolumeBackendAPIException, self.common._create_volume, volume_name, volume_size, extra_specs) - (self.rest.delete_storage_group. - assert_called_once_with(self.data.array, - self.data.failed_resource)) + mock_delete.assert_called_once_with( + self.data.array, self.data.failed_resource) def test_create_volume_incorrect_slo(self): volume_name = self.data.test_volume.name @@ -4847,14 +4864,14 @@ class PowerMaxCommonTest(test.TestCase): device_id = self.data.failed_resource volume_name = self.data.test_volume.name extra_specs = self.data.extra_specs - with mock.patch.object(self.masking, - 'add_volume_to_default_storage_group'): + with mock.patch.object( + self.masking, + 'add_volume_to_default_storage_group') as mock_add: self.assertRaises(exception.VolumeBackendAPIException, self.common._delete_from_srp, array, device_id, volume_name, extra_specs) - (self.masking.add_volume_to_default_storage_group. - assert_called_once_with( - array, device_id, volume_name, extra_specs)) + mock_add.assert_called_once_with( + array, device_id, volume_name, extra_specs) @mock.patch.object(utils.PowerMaxUtils, 'is_replication_enabled', side_effect=[False, True]) @@ -4916,31 +4933,32 @@ class PowerMaxCommonTest(test.TestCase): def test_get_port_group_from_masking_view(self): array = self.data.array maskingview_name = self.data.masking_view_name_f - with mock.patch.object(self.rest, - 'get_element_from_masking_view'): + with mock.patch.object( + self.rest, 'get_element_from_masking_view') as mock_get: self.common.get_port_group_from_masking_view( array, maskingview_name) - self.rest.get_element_from_masking_view.assert_called_once_with( + mock_get.assert_called_once_with( array, maskingview_name, portgroup=True) def test_get_initiator_group_from_masking_view(self): array = self.data.array maskingview_name = self.data.masking_view_name_f - with mock.patch.object(self.rest, - 'get_element_from_masking_view'): + with mock.patch.object( + self.rest, 'get_element_from_masking_view') as mock_get: self.common.get_initiator_group_from_masking_view( array, maskingview_name) - self.rest.get_element_from_masking_view.assert_called_once_with( + mock_get.assert_called_once_with( array, maskingview_name, host=True) def test_get_common_masking_views(self): array = self.data.array portgroup_name = self.data.port_group_name_f initiator_group_name = self.data.initiatorgroup_name_f - with mock.patch.object(self.rest, 'get_common_masking_views'): + with mock.patch.object( + self.rest, 'get_common_masking_views') as mock_get: self.common.get_common_masking_views( array, portgroup_name, initiator_group_name) - self.rest.get_common_masking_views.assert_called_once_with( + mock_get.assert_called_once_with( array, portgroup_name, initiator_group_name) def test_get_ip_and_iqn(self): @@ -4974,14 +4992,14 @@ class PowerMaxCommonTest(test.TestCase): source_device_id = self.data.device_id snap_name = "temp-" + source_device_id + "-snapshot_for_clone" ref_dict = self.data.provider_location - with mock.patch.object(self.utils, 'get_temp_snap_name', - return_value=snap_name): + with mock.patch.object( + self.utils, 'get_temp_snap_name', + return_value=snap_name) as mock_get_snap: clone_dict = self.common._create_replica( array, clone_volume, source_device_id, self.data.extra_specs) self.assertEqual(ref_dict, clone_dict) - self.utils.get_temp_snap_name.assert_called_once_with( - source_device_id) + mock_get_snap.assert_called_once_with(source_device_id) def test_create_replica_failed_cleanup_target(self): array = self.data.array @@ -4990,12 +5008,13 @@ class PowerMaxCommonTest(test.TestCase): snap_name = self.data.failed_resource clone_name = 'OS-' + clone_volume.id extra_specs = self.data.extra_specs - with mock.patch.object(self.common, '_cleanup_target'): + with mock.patch.object( + self.common, '_cleanup_target') as mock_cleanup: self.assertRaises( exception.VolumeBackendAPIException, self.common._create_replica, array, clone_volume, device_id, self.data.extra_specs, snap_name) - self.common._cleanup_target.assert_called_once_with( + mock_cleanup.assert_called_once_with( array, device_id, device_id, clone_name, snap_name, extra_specs) @@ -5006,12 +5025,13 @@ class PowerMaxCommonTest(test.TestCase): snap_name = self.data.failed_resource with mock.patch.object(self.common, '_create_volume', return_value={'device_id': None}): - with mock.patch.object(self.common, '_cleanup_target'): + with mock.patch.object( + self.common, '_cleanup_target') as mock_cleanup: self.assertRaises( exception.VolumeBackendAPIException, self.common._create_replica, array, clone_volume, source_device_id, self.data.extra_specs, snap_name) - self.common._cleanup_target.assert_not_called() + mock_cleanup.assert_not_called() @mock.patch.object( masking.PowerMaxMasking, @@ -5027,15 +5047,15 @@ class PowerMaxCommonTest(test.TestCase): generation = 0 with mock.patch.object(self.rest, 'get_sync_session', return_value='session'): - with mock.patch.object(self.provision, - 'break_replication_relationship'): + with mock.patch.object( + self.provision, + 'break_replication_relationship') as mock_break: self.common._cleanup_target( array, target_device_id, source_device_id, clone_name, snap_name, extra_specs) - (self.provision.break_replication_relationship. - assert_called_with( - array, target_device_id, source_device_id, - snap_name, extra_specs, generation)) + mock_break.assert_called_with( + array, target_device_id, source_device_id, + snap_name, extra_specs, generation) def test_cleanup_target_no_sync(self): array = self.data.array @@ -5047,12 +5067,12 @@ class PowerMaxCommonTest(test.TestCase): extra_specs = self.data.extra_specs with mock.patch.object(self.rest, 'get_sync_session', return_value=None): - with mock.patch.object(self.common, - '_delete_from_srp'): + with mock.patch.object( + self.common, '_delete_from_srp') as mock_delete: self.common._cleanup_target( array, target_device_id, source_device_id, clone_name, snap_name, extra_specs) - self.common._delete_from_srp.assert_called_once_with( + mock_delete.assert_called_once_with( array, target_device_id, clone_name, extra_specs) @@ -5066,14 +5086,12 @@ class PowerMaxCommonTest(test.TestCase): array = self.data.array device_id = self.data.device_id target = self.data.volume_details[1]['volumeId'] - volume_name = self.data.test_volume.name extra_specs = self.data.extra_specs snap_name = 'temp-1' generation = '0' with mock.patch.object(self.rest, 'get_volume_snap', return_value=snap_name): - self.common._sync_check(array, device_id, volume_name, - extra_specs) + self.common._sync_check(array, device_id, extra_specs) mock_break.assert_called_with( array, target, device_id, snap_name, extra_specs, generation) mock_delete.assert_called_with(array, snap_name, @@ -5089,8 +5107,7 @@ class PowerMaxCommonTest(test.TestCase): return_value=sessions): with mock.patch.object(self.rest, 'get_volume_snap', return_value=snap_name2): - self.common._sync_check(array, device_id, volume_name, - extra_specs) + self.common._sync_check(array, device_id, extra_specs) mock_delete.assert_called_once_with( array, snap_name2, device_id, restored=False, generation=0) @@ -5104,7 +5121,6 @@ class PowerMaxCommonTest(test.TestCase): array = self.data.array device_id = self.data.device_id target = self.data.volume_details[1]['volumeId'] - volume_name = self.data.test_volume.name extra_specs = self.data.extra_specs snap_name = 'OS-1' sessions = [{'source_vol': device_id, @@ -5112,8 +5128,7 @@ class PowerMaxCommonTest(test.TestCase): 'target_vol_list': [(target, "Copied")]}] with mock.patch.object(self.rest, 'find_snap_vx_sessions', return_value=sessions): - self.common._sync_check(array, device_id, volume_name, - extra_specs) + self.common._sync_check(array, device_id, extra_specs) mock_break.assert_called_with( array, target, device_id, snap_name, extra_specs, 0) mock_delete.assert_not_called() @@ -5124,12 +5139,10 @@ class PowerMaxCommonTest(test.TestCase): def test_sync_check_no_sessions(self, mock_break): array = self.data.array device_id = self.data.device_id - volume_name = self.data.test_volume.name extra_specs = self.data.extra_specs with mock.patch.object(self.rest, 'find_snap_vx_sessions', return_value=None): - self.common._sync_check(array, device_id, volume_name, - extra_specs) + self.common._sync_check(array, device_id, extra_specs) mock_break.assert_not_called() @mock.patch.object( @@ -5294,18 +5307,18 @@ class PowerMaxCommonTest(test.TestCase): '_remove_vol_and_cleanup_replication') def test_unmanage_success(self, mock_rm): volume = self.data.test_volume - with mock.patch.object(self.rest, 'rename_volume'): + with mock.patch.object(self.rest, 'rename_volume') as mock_rename: self.common.unmanage(volume) - self.rest.rename_volume.assert_called_once_with( + mock_rename.assert_called_once_with( self.data.array, self.data.device_id, self.data.test_volume.id) # Test for success when create storage group fails - with mock.patch.object(self.rest, 'rename_volume'): + with mock.patch.object(self.rest, 'rename_volume') as mock_rename: with mock.patch.object( self.provision, 'create_storage_group', side_effect=exception.VolumeBackendAPIException): self.common.unmanage(volume) - self.rest.rename_volume.assert_called_once_with( + mock_rename.assert_called_once_with( self.data.array, self.data.device_id, self.data.test_volume.id) @@ -5313,9 +5326,9 @@ class PowerMaxCommonTest(test.TestCase): volume = self.data.test_volume with mock.patch.object(self.common, '_find_device_on_array', return_value=None): - with mock.patch.object(self.rest, 'rename_volume'): + with mock.patch.object(self.rest, 'rename_volume') as mock_rename: self.common.unmanage(volume) - self.rest.rename_volume.assert_not_called() + mock_rename.assert_not_called() @mock.patch.object(common.PowerMaxCommon, '_slo_workload_migration') @@ -5347,10 +5360,11 @@ class PowerMaxCommonTest(test.TestCase): new_type = {'extra_specs': {}} volume = self.data.test_volume host = {'host': self.data.new_host} - with mock.patch.object(self.common, '_migrate_volume'): + with mock.patch.object( + self.common, '_migrate_volume') as mock_migrate: self.common._slo_workload_migration( device_id, volume, host, volume_name, new_type, extra_specs) - self.common._migrate_volume.assert_called_once_with( + mock_migrate.assert_called_once_with( extra_specs[utils.ARRAY], volume, device_id, extra_specs[utils.SRP], 'Silver', 'OLTP', volume_name, new_type, extra_specs) @@ -5390,12 +5404,13 @@ class PowerMaxCommonTest(test.TestCase): with mock.patch.object( self.common, '_is_valid_for_storage_assisted_migration', return_value=(True, self.data.slo, self.data.workload)): - with mock.patch.object(self.common, '_migrate_volume'): + with mock.patch.object( + self.common, '_migrate_volume') as mock_migrate: migrate_status = self.common._slo_workload_migration( device_id, volume, host, volume_name, new_type, extra_specs) self.assertTrue(bool(migrate_status)) - self.common._migrate_volume.assert_called_once_with( + mock_migrate.assert_called_once_with( extra_specs[utils.ARRAY], volume, device_id, extra_specs[utils.SRP], self.data.slo, self.data.workload, volume_name, new_type, extra_specs) @@ -5917,7 +5932,7 @@ class PowerMaxCommonTest(test.TestCase): 'snap_name': 'OS-%s' % existing_ref['source-name']} updates = { - 'display_name': self.data.test_snapshot_manage.display_name, + 'display_name': 'my_snap', 'provider_location': six.text_type(prov_loc)} self.assertEqual(updates_response, updates) @@ -6182,15 +6197,17 @@ class PowerMaxCommonTest(test.TestCase): def test_update_vol_stats_retest_u4p(self): self.rest.u4p_in_failover = True self.rest.u4p_failover_autofailback = True - with mock.patch.object(self.common, 'retest_primary_u4p'): + with mock.patch.object( + self.common, 'retest_primary_u4p') as mock_retest: self.common.update_volume_stats() - self.common.retest_primary_u4p.assert_called_once() + mock_retest.assert_called_once() self.rest.u4p_in_failover = True self.rest.u4p_failover_autofailback = False - with mock.patch.object(self.common, 'retest_primary_u4p'): + with mock.patch.object( + self.common, 'retest_primary_u4p') as mock_retest: self.common.update_volume_stats() - self.common.retest_primary_u4p.assert_not_called() + mock_retest.assert_not_called() @mock.patch.object(rest.PowerMaxRest, 'request', return_value=[200, None]) @@ -6223,60 +6240,60 @@ class PowerMaxFCTest(test.TestCase): mock.Mock(return_value=self.data.vol_type_extra_specs)) def test_create_volume(self): - with mock.patch.object(self.common, 'create_volume'): + with mock.patch.object(self.common, 'create_volume') as mock_create: self.driver.create_volume(self.data.test_volume) - self.common.create_volume.assert_called_once_with( - self.data.test_volume) + mock_create.assert_called_once_with(self.data.test_volume) def test_create_volume_from_snapshot(self): volume = self.data.test_clone_volume snapshot = self.data.test_snapshot - with mock.patch.object(self.common, 'create_volume_from_snapshot'): + with mock.patch.object( + self.common, 'create_volume_from_snapshot') as mock_create: self.driver.create_volume_from_snapshot(volume, snapshot) - self.common.create_volume_from_snapshot.assert_called_once_with( - volume, snapshot) + mock_create.assert_called_once_with(volume, snapshot) def test_create_cloned_volume(self): volume = self.data.test_clone_volume src_volume = self.data.test_volume - with mock.patch.object(self.common, 'create_cloned_volume'): + with mock.patch.object( + self.common, 'create_cloned_volume') as mock_create: self.driver.create_cloned_volume(volume, src_volume) - self.common.create_cloned_volume.assert_called_once_with( - volume, src_volume) + mock_create.assert_called_once_with(volume, src_volume) def test_delete_volume(self): - with mock.patch.object(self.common, 'delete_volume'): + with mock.patch.object(self.common, 'delete_volume') as mock_delete: self.driver.delete_volume(self.data.test_volume) - self.common.delete_volume.assert_called_once_with( - self.data.test_volume) + mock_delete.assert_called_once_with(self.data.test_volume) def test_create_snapshot(self): - with mock.patch.object(self.common, 'create_snapshot'): + with mock.patch.object(self.common, 'create_snapshot') as mock_create: self.driver.create_snapshot(self.data.test_snapshot) - self.common.create_snapshot.assert_called_once_with( + mock_create.assert_called_once_with( self.data.test_snapshot, self.data.test_snapshot.volume) def test_delete_snapshot(self): - with mock.patch.object(self.common, 'delete_snapshot'): + with mock.patch.object(self.common, 'delete_snapshot') as mock_delete: self.driver.delete_snapshot(self.data.test_snapshot) - self.common.delete_snapshot.assert_called_once_with( + mock_delete.assert_called_once_with( self.data.test_snapshot, self.data.test_snapshot.volume) def test_initialize_connection(self): - with mock.patch.object(self.common, 'initialize_connection', - return_value=self.data.fc_device_info): - with mock.patch.object(self.driver, 'populate_data'): + with mock.patch.object( + self.common, 'initialize_connection', + return_value=self.data.fc_device_info) as mock_initialize: + with mock.patch.object( + self.driver, 'populate_data') as mock_populate: self.driver.initialize_connection(self.data.test_volume, self.data.connector) - self.common.initialize_connection.assert_called_once_with( + mock_initialize.assert_called_once_with( self.data.test_volume, self.data.connector) - self.driver.populate_data.assert_called_once_with( + mock_populate.assert_called_once_with( self.data.fc_device_info, self.data.test_volume, self.data.connector) def test_populate_data(self): with mock.patch.object(self.driver, '_build_initiator_target_map', - return_value=([], {})): + return_value=([], {})) as mock_build: ref_data = { 'driver_volume_type': 'fibre_channel', 'data': {'target_lun': self.data.fc_device_info['hostlunid'], @@ -6287,23 +6304,25 @@ class PowerMaxFCTest(test.TestCase): self.data.test_volume, self.data.connector) self.assertEqual(ref_data, data) - self.driver._build_initiator_target_map.assert_called_once_with( + mock_build.assert_called_once_with( self.data.test_volume, self.data.connector) def test_terminate_connection(self): - with mock.patch.object(self.common, 'terminate_connection'): + with mock.patch.object( + self.common, 'terminate_connection') as mock_terminate: self.driver.terminate_connection(self.data.test_volume, self.data.connector) - self.common.terminate_connection.assert_called_once_with( + mock_terminate.assert_called_once_with( self.data.test_volume, self.data.connector) def test_terminate_connection_no_zoning_mappings(self): with mock.patch.object(self.driver, '_get_zoning_mappings', return_value=None): - with mock.patch.object(self.common, 'terminate_connection'): + with mock.patch.object( + self.common, 'terminate_connection') as mock_terminate: self.driver.terminate_connection(self.data.test_volume, self.data.connector) - self.common.terminate_connection.assert_not_called() + mock_terminate.assert_not_called() def test_get_zoning_mappings(self): ref_mappings = self.data.zoning_mappings @@ -6362,25 +6381,25 @@ class PowerMaxFCTest(test.TestCase): self.assertEqual(ref_target_map, target_map) def test_extend_volume(self): - with mock.patch.object(self.common, 'extend_volume'): + with mock.patch.object(self.common, 'extend_volume') as mock_extend: self.driver.extend_volume(self.data.test_volume, '3') - self.common.extend_volume.assert_called_once_with( - self.data.test_volume, '3') + mock_extend.assert_called_once_with(self.data.test_volume, '3') def test_get_volume_stats(self): - with mock.patch.object(self.driver, 'update_volume_stats'): + with mock.patch.object( + self.driver, 'update_volume_stats') as mock_update: # no refresh self.driver.get_volume_stats() - self.driver.update_volume_stats.assert_not_called() + mock_update.assert_not_called() # with refresh self.driver.get_volume_stats(True) - self.driver.update_volume_stats.assert_called_once_with() + mock_update.assert_called_once_with() def test_update_volume_stats(self): with mock.patch.object(self.common, 'update_volume_stats', - return_value={}): + return_value={}) as mock_update: self.driver.update_volume_stats() - self.common.update_volume_stats.assert_called_once_with() + mock_update.assert_called_once_with() def test_check_for_setup_error(self): self.driver.check_for_setup_error() @@ -6399,35 +6418,35 @@ class PowerMaxFCTest(test.TestCase): def test_manage_existing(self): with mock.patch.object(self.common, 'manage_existing', - return_value={}): + return_value={}) as mock_manage: external_ref = {u'source-name': u'00002'} self.driver.manage_existing(self.data.test_volume, external_ref) - self.common.manage_existing.assert_called_once_with( + mock_manage.assert_called_once_with( self.data.test_volume, external_ref) def test_manage_existing_get_size(self): with mock.patch.object(self.common, 'manage_existing_get_size', - return_value='1'): + return_value='1') as mock_manage: external_ref = {u'source-name': u'00002'} self.driver.manage_existing_get_size( self.data.test_volume, external_ref) - self.common.manage_existing_get_size.assert_called_once_with( + mock_manage.assert_called_once_with( self.data.test_volume, external_ref) def test_unmanage_volume(self): with mock.patch.object(self.common, 'unmanage', - return_value={}): + return_value={}) as mock_unmanage: self.driver.unmanage(self.data.test_volume) - self.common.unmanage.assert_called_once_with( + mock_unmanage.assert_called_once_with( self.data.test_volume) def test_retype(self): host = {'host': self.data.new_host} new_type = {'extra_specs': {}} with mock.patch.object(self.common, 'retype', - return_value=True): + return_value=True) as mock_retype: self.driver.retype({}, self.data.test_volume, new_type, '', host) - self.common.retype.assert_called_once_with( + mock_retype.assert_called_once_with( self.data.test_volume, new_type, host) def test_failover_host(self): @@ -6480,43 +6499,44 @@ class PowerMaxISCSITest(test.TestCase): mock.Mock(return_value=self.data.vol_type_extra_specs)) def test_create_volume(self): - with mock.patch.object(self.common, 'create_volume'): + with mock.patch.object(self.common, 'create_volume') as mock_create: self.driver.create_volume(self.data.test_volume) - self.common.create_volume.assert_called_once_with( + mock_create.assert_called_once_with( self.data.test_volume) def test_create_volume_from_snapshot(self): volume = self.data.test_clone_volume snapshot = self.data.test_snapshot - with mock.patch.object(self.common, 'create_volume_from_snapshot'): + with mock.patch.object( + self.common, 'create_volume_from_snapshot') as mock_create: self.driver.create_volume_from_snapshot(volume, snapshot) - self.common.create_volume_from_snapshot.assert_called_once_with( + mock_create.assert_called_once_with( volume, snapshot) def test_create_cloned_volume(self): volume = self.data.test_clone_volume src_volume = self.data.test_volume - with mock.patch.object(self.common, 'create_cloned_volume'): + with mock.patch.object( + self.common, 'create_cloned_volume') as mock_create: self.driver.create_cloned_volume(volume, src_volume) - self.common.create_cloned_volume.assert_called_once_with( - volume, src_volume) + mock_create.assert_called_once_with(volume, src_volume) def test_delete_volume(self): - with mock.patch.object(self.common, 'delete_volume'): + with mock.patch.object(self.common, 'delete_volume') as mock_delete: self.driver.delete_volume(self.data.test_volume) - self.common.delete_volume.assert_called_once_with( + mock_delete.assert_called_once_with( self.data.test_volume) def test_create_snapshot(self): - with mock.patch.object(self.common, 'create_snapshot'): + with mock.patch.object(self.common, 'create_snapshot') as mock_create: self.driver.create_snapshot(self.data.test_snapshot) - self.common.create_snapshot.assert_called_once_with( + mock_create.assert_called_once_with( self.data.test_snapshot, self.data.test_snapshot.volume) def test_delete_snapshot(self): - with mock.patch.object(self.common, 'delete_snapshot'): + with mock.patch.object(self.common, 'delete_snapshot') as mock_delete: self.driver.delete_snapshot(self.data.test_snapshot) - self.common.delete_snapshot.assert_called_once_with( + mock_delete.assert_called_once_with( self.data.test_snapshot, self.data.test_snapshot.volume) def test_initialize_connection(self): @@ -6527,13 +6547,13 @@ class PowerMaxISCSITest(test.TestCase): 'ip_and_iqn': [{'ip': self.data.ip, 'iqn': self.data.initiator}], 'is_multipath': False} - with mock.patch.object(self.driver, 'get_iscsi_dict'): + with mock.patch.object(self.driver, 'get_iscsi_dict') as mock_get: with mock.patch.object( self.common, 'get_port_group_from_masking_view', return_value=self.data.port_group_name_i): self.driver.initialize_connection(self.data.test_volume, self.data.connector) - self.driver.get_iscsi_dict.assert_called_once_with( + mock_get.assert_called_once_with( ref_dict, self.data.test_volume) def test_get_iscsi_dict_success(self): @@ -6544,10 +6564,12 @@ class PowerMaxISCSITest(test.TestCase): device_info = self.data.iscsi_device_info ref_data = {'driver_volume_type': 'iscsi', 'data': {}} with mock.patch.object( - self.driver, 'vmax_get_iscsi_properties', return_value={}): + self.driver, + 'vmax_get_iscsi_properties', + return_value={}) as mock_get: data = self.driver.get_iscsi_dict(device_info, volume) self.assertEqual(ref_data, data) - self.driver.vmax_get_iscsi_properties.assert_called_once_with( + mock_get.assert_called_once_with( volume, ip_and_iqn, True, host_lun_id, None, None) def test_get_iscsi_dict_exception(self): @@ -6564,10 +6586,12 @@ class PowerMaxISCSITest(test.TestCase): device_info = self.data.iscsi_device_info_metro ref_data = {'driver_volume_type': 'iscsi', 'data': {}} with mock.patch.object( - self.driver, 'vmax_get_iscsi_properties', return_value={}): + self.driver, + 'vmax_get_iscsi_properties', + return_value={}) as mock_get: data = self.driver.get_iscsi_dict(device_info, volume) self.assertEqual(ref_data, data) - self.driver.vmax_get_iscsi_properties.assert_called_once_with( + mock_get.assert_called_once_with( volume, ip_and_iqn, True, host_lun_id, self.data.iscsi_device_info_metro['metro_ip_and_iqn'], self.data.iscsi_device_info_metro['metro_hostlunid']) @@ -6663,32 +6687,35 @@ class PowerMaxISCSITest(test.TestCase): self.assertEqual(ref_properties, iscsi_properties) def test_terminate_connection(self): - with mock.patch.object(self.common, 'terminate_connection'): + with mock.patch.object( + self.common, 'terminate_connection') as mock_terminate: self.driver.terminate_connection(self.data.test_volume, self.data.connector) - self.common.terminate_connection.assert_called_once_with( + mock_terminate.assert_called_once_with( self.data.test_volume, self.data.connector) def test_extend_volume(self): - with mock.patch.object(self.common, 'extend_volume'): + with mock.patch.object( + self.common, 'extend_volume') as mock_extend: self.driver.extend_volume(self.data.test_volume, '3') - self.common.extend_volume.assert_called_once_with( + mock_extend.assert_called_once_with( self.data.test_volume, '3') def test_get_volume_stats(self): - with mock.patch.object(self.driver, 'update_volume_stats'): + with mock.patch.object( + self.driver, 'update_volume_stats') as mock_update: # no refresh self.driver.get_volume_stats() - self.driver.update_volume_stats.assert_not_called() + mock_update.assert_not_called() # with refresh self.driver.get_volume_stats(True) - self.driver.update_volume_stats.assert_called_once_with() + mock_update.assert_called_once_with() def test_update_volume_stats(self): with mock.patch.object(self.common, 'update_volume_stats', - return_value={}): + return_value={}) as mock_update: self.driver.update_volume_stats() - self.common.update_volume_stats.assert_called_once_with() + mock_update.assert_called_once_with() def test_check_for_setup_error(self): self.driver.check_for_setup_error() @@ -6707,35 +6734,34 @@ class PowerMaxISCSITest(test.TestCase): def test_manage_existing(self): with mock.patch.object(self.common, 'manage_existing', - return_value={}): + return_value={}) as mock_manage: external_ref = {u'source-name': u'00002'} self.driver.manage_existing(self.data.test_volume, external_ref) - self.common.manage_existing.assert_called_once_with( + mock_manage.assert_called_once_with( self.data.test_volume, external_ref) def test_manage_existing_get_size(self): with mock.patch.object(self.common, 'manage_existing_get_size', - return_value='1'): + return_value='1') as mock_manage: external_ref = {u'source-name': u'00002'} self.driver.manage_existing_get_size( self.data.test_volume, external_ref) - self.common.manage_existing_get_size.assert_called_once_with( + mock_manage.assert_called_once_with( self.data.test_volume, external_ref) def test_unmanage_volume(self): with mock.patch.object(self.common, 'unmanage', - return_value={}): + return_value={}) as mock_unmanage: self.driver.unmanage(self.data.test_volume) - self.common.unmanage.assert_called_once_with( - self.data.test_volume) + mock_unmanage.assert_called_once_with(self.data.test_volume) def test_retype(self): host = {'host': self.data.new_host} new_type = {'extra_specs': {}} with mock.patch.object(self.common, 'retype', - return_value=True): + return_value=True) as mock_retype: self.driver.retype({}, self.data.test_volume, new_type, '', host) - self.common.retype.assert_called_once_with( + mock_retype.assert_called_once_with( self.data.test_volume, new_type, host) def test_failover_host(self): @@ -7516,15 +7542,15 @@ class PowerMaxMaskingTest(test.TestCase): with mock.patch.object(rest.PowerMaxRest, 'is_next_gen_array', return_value=True): with mock.patch.object( - self.mask, 'get_or_create_default_storage_group'): + self.mask, + 'get_or_create_default_storage_group') as mock_get: self.mask.add_volume_to_default_storage_group( self.data.array, self.device_id, self.volume_name, self.extra_specs) - (self.mask.get_or_create_default_storage_group - .assert_called_once_with(self.data.array, self.data.srp, - self.extra_specs[utils.SLO], - 'NONE', self.extra_specs, False, - False, None)) + mock_get.assert_called_once_with( + self.data.array, self.data.srp, + self.extra_specs[utils.SLO], + 'NONE', self.extra_specs, False, False, None) @mock.patch.object(provision.PowerMaxProvision, 'create_storage_group') def test_get_or_create_default_storage_group(self, mock_create_sg): @@ -7615,17 +7641,18 @@ class PowerMaxMaskingTest(test.TestCase): return_value=PowerMaxCommonData.initiatorgroup_name_f): with mock.patch.object( self.mask, '_verify_initiator_group_from_masking_view', - return_value=(True, self.data.initiatorgroup_name_f)): + return_value=( + True, + self.data.initiatorgroup_name_f)) as mock_verify: self.mask._check_existing_initiator_group( self.data.array, self.data.masking_view_name_f, mv_dict, self.data.storagegroup_name_f, self.data.port_group_name_f, self.data.extra_specs) - (self.mask._verify_initiator_group_from_masking_view. - assert_called_once_with( - self.data.array, self.data.masking_view_name_f, - mv_dict, self.data.initiatorgroup_name_f, - self.data.storagegroup_name_f, - self.data.port_group_name_f, self.data.extra_specs)) + mock_verify.assert_called_once_with( + self.data.array, self.data.masking_view_name_f, + mv_dict, self.data.initiatorgroup_name_f, + self.data.storagegroup_name_f, + self.data.port_group_name_f, self.data.extra_specs) @mock.patch.object(masking.PowerMaxMasking, 'add_child_sg_to_parent_sg', side_effect=[ @@ -7887,7 +7914,7 @@ class PowerMaxCommonReplicationTest(test.TestCase): @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=False) - @mock.patch.object(objects.Group, 'get_by_id', + @mock.patch.object(objects.group.Group, 'get_by_id', return_value=PowerMaxCommonData.test_rep_group) @mock.patch.object(volume_utils, 'is_group_a_type', return_value=True) @mock.patch.object(utils.PowerMaxUtils, 'check_replication_matched', @@ -8821,7 +8848,7 @@ class PowerMaxVolumeMetadataDebugTest(test.TestCase): volume_metadata = self.volume_metadata.update_volume_info_metadata( self.data.data_dict, self.data.version_dict) self.assertEqual('2.7.12', volume_metadata['python_version']) - self.assertEqual('VMAX250F', volume_metadata['vmax_model']) + self.assertEqual('VMAX250F', volume_metadata['storage_model']) self.assertEqual('DSS', volume_metadata['workload']) self.assertEqual('OS-fibre-PG', volume_metadata['port_group']) @@ -8829,7 +8856,7 @@ class PowerMaxVolumeMetadataDebugTest(test.TestCase): datadict = {} volume_trace_dict = {} volume_key_value = {} - result_dict = {'operation': 'create', + result_dict = {'successful_operation': 'create', 'volume_id': self.data.test_volume.id} volume_metadata = self.volume_metadata._fill_volume_trace_dict( self.data.test_volume.id, 'create', False, target_name=None, @@ -8846,7 +8873,7 @@ class PowerMaxVolumeMetadataDebugTest(test.TestCase): result_dict = {'masking_view_1': 'mv1', 'masking_view_2': 'mv2', 'masking_view_3': 'mv3', - 'operation': 'attach', + 'successful_operation': 'attach', 'storage_group_1': 'sg1', 'storage_group_2': 'sg2', 'storage_group_3': 'sg3', diff --git a/cinder/volume/drivers/dell_emc/powermax/common.py b/cinder/volume/drivers/dell_emc/powermax/common.py index ba4c457ed4d..95762cfd1a8 100644 --- a/cinder/volume/drivers/dell_emc/powermax/common.py +++ b/cinder/volume/drivers/dell_emc/powermax/common.py @@ -951,7 +951,7 @@ class PowerMaxCommon(object): device_id = self._find_device_on_array(volume, extra_specs) array = extra_specs[utils.ARRAY] # Check if volume is part of an on-going clone operation - self._sync_check(array, device_id, volume_name, extra_specs) + self._sync_check(array, device_id, extra_specs) if device_id is None: exception_message = (_("Cannot find Volume: %(volume_name)s. " "Extend operation. Exiting....") @@ -1046,46 +1046,9 @@ class PowerMaxCommon(object): if already_queried: # The dictionary will only have one key per PowerMax/VMAX # Construct the location info - try: - temp_location_info = ( - ("%(arrayName)s#%(srpName)s#%(slo)s#%(workload)s" - % {'arrayName': array_info['SerialNumber'], - 'srpName': array_info['srpName'], - 'slo': array_info['SLO'], - 'workload': array_info['Workload']})) - except KeyError: - temp_location_info = ( - ("%(arrayName)s#%(srpName)s#%(slo)s" - % {'arrayName': array_info['SerialNumber'], - 'srpName': array_info['srpName'], - 'slo': array_info['SLO']})) - - pool = {'pool_name': pool_name, - 'total_capacity_gb': - arrays[array_info['SerialNumber']][0], - 'free_capacity_gb': - arrays[array_info['SerialNumber']][1], - 'provisioned_capacity_gb': - arrays[array_info['SerialNumber']][2], - 'QoS_support': False, - 'location_info': temp_location_info, - 'thin_provisioning_support': True, - 'thick_provisioning_support': False, - 'consistent_group_snapshot_enabled': True, - 'max_over_subscription_ratio': - max_oversubscription_ratio, - 'reserved_percentage': reserved_percentage, - 'replication_enabled': self.replication_enabled, - 'multiattach': True} - if arrays[array_info['SerialNumber']][3]: - if reserved_percentage: - if (arrays[array_info['SerialNumber']][3] > - reserved_percentage): - pool['reserved_percentage'] = ( - arrays[array_info['SerialNumber']][3]) - else: - pool['reserved_percentage'] = ( - arrays[array_info['SerialNumber']][3]) + pool = self._construct_location_info_and_pool( + array_info, pool_name, arrays, max_oversubscription_ratio, + reserved_percentage) else: pool = {'pool_name': pool_name, 'total_capacity_gb': total_capacity_gb, @@ -1130,6 +1093,61 @@ class PowerMaxCommon(object): return data + def _construct_location_info_and_pool( + self, array_info, pool_name, arrays, max_oversubscription_ratio, + reserved_percentage): + """Construct the location info string and the pool dict + + :param array_info: array information dict + :param pool_name: pool name + :param arrays: arrays dict + :param max_oversubscription_ratio: max oversubscription ratio + :param reserved_percentage: reserved percentage + + :returns: pool - dict + """ + try: + temp_location_info = ( + ("%(arrayName)s#%(srpName)s#%(slo)s#%(workload)s" + % {'arrayName': array_info['SerialNumber'], + 'srpName': array_info['srpName'], + 'slo': array_info['SLO'], + 'workload': array_info['Workload']})) + except KeyError: + temp_location_info = ( + ("%(arrayName)s#%(srpName)s#%(slo)s" + % {'arrayName': array_info['SerialNumber'], + 'srpName': array_info['srpName'], + 'slo': array_info['SLO']})) + + pool = {'pool_name': pool_name, + 'total_capacity_gb': + arrays[array_info['SerialNumber']][0], + 'free_capacity_gb': + arrays[array_info['SerialNumber']][1], + 'provisioned_capacity_gb': + arrays[array_info['SerialNumber']][2], + 'QoS_support': False, + 'location_info': temp_location_info, + 'thin_provisioning_support': True, + 'thick_provisioning_support': False, + 'consistent_group_snapshot_enabled': True, + 'max_over_subscription_ratio': + max_oversubscription_ratio, + 'reserved_percentage': reserved_percentage, + 'replication_enabled': self.replication_enabled, + 'multiattach': True} + if arrays[array_info['SerialNumber']][3]: + if reserved_percentage: + if (arrays[array_info['SerialNumber']][3] > + reserved_percentage): + pool['reserved_percentage'] = ( + arrays[array_info['SerialNumber']][3]) + else: + pool['reserved_percentage'] = ( + arrays[array_info['SerialNumber']][3]) + return pool + def _update_srp_stats(self, array_info): """Update SRP stats. @@ -1306,7 +1324,7 @@ class PowerMaxCommon(object): else: exception_message = (_("Cannot retrieve volume %(vol)s " "from the array.") % {'vol': volume_name}) - LOG.exception(exception_message) + LOG.error(exception_message) raise exception.VolumeBackendAPIException(exception_message) return maskedvols, is_multiattach @@ -1424,7 +1442,7 @@ class PowerMaxCommon(object): if not device_id: exception_message = (_("Cannot retrieve volume %(vol)s " "from the array. ") % {'vol': volume_name}) - LOG.exception(exception_message) + LOG.error(exception_message) raise exception.VolumeBackendAPIException(exception_message) protocol = self.utils.get_short_protocol_type(self.protocol) @@ -1614,7 +1632,7 @@ class PowerMaxCommon(object): array = extra_specs[utils.ARRAY] # Check if the volume being deleted is a # source or target for copy session - self._sync_check(array, device_id, volume_name, extra_specs) + self._sync_check(array, device_id, extra_specs) # Remove from any storage groups and cleanup replication self._remove_vol_and_cleanup_replication( array, device_id, volume_name, extra_specs, volume) @@ -1711,7 +1729,7 @@ class PowerMaxCommon(object): "the volume type, or enter a list of " "portgroups in the cinder.conf associated with " "this backend.")) - LOG.exception(error_message) + LOG.error(error_message) raise exception.VolumeBackendAPIException(message=error_message) extra_specs[utils.INTERVAL] = self.interval @@ -1815,7 +1833,7 @@ class PowerMaxCommon(object): "Exception received: %(e)s") % {'volume_name': volume_name, 'e': six.text_type(e)}) - LOG.exception(error_message) + LOG.error(error_message) raise exception.VolumeBackendAPIException(message=error_message) def _remove_vol_and_cleanup_replication( @@ -2017,13 +2035,12 @@ class PowerMaxCommon(object): self._delete_from_srp( array, target_device_id, clone_name, extra_specs) - def _sync_check(self, array, device_id, volume_name, extra_specs, + def _sync_check(self, array, device_id, extra_specs, tgt_only=False): """Check if volume is part of a SnapVx sync process. :param array: the array serial number :param device_id: volume instance - :param volume_name: volume name :param tgt_only: Flag - return only sessions where device is target :param extra_specs: extra specifications :param is_clone: Flag to specify if it is a clone operation @@ -2079,54 +2096,80 @@ class PowerMaxCommon(object): if snap_vx_sessions: snap_vx_sessions.sort( key=lambda k: k['generation'], reverse=True) - count = 0 - for session in snap_vx_sessions: - source = session['source_vol'] - snap_name = session['snap_name'] - targets = session['target_vol_list'] - generation = session['generation'] - # Only unlink a set number of targets - if count == self.snapvx_unlink_limit: - break - is_temp = False - is_temp = 'temp' in snap_name or 'EMC_SMI' in snap_name - if utils.CLONE_SNAPSHOT_NAME in snap_name: - is_temp = False - for target in targets: - if snapvx_src: - if not is_temp and target[1] == "Copied": - # Break the replication relationship - LOG.debug("Unlinking source from " - "target. Source: %(volume)s, " - "Target: %(target)s.", - {'volume': source, - 'target': target[0]}) - self.provision.break_replication_relationship( - array, target[0], source, snap_name, - extra_specs, generation) - count = count + 1 - elif snapvx_tgt: - # If our device is a target, we need to wait - # and then unlink - LOG.debug("Unlinking source from " - "target. Source: %(volume)s, " - "Target: %(target)s.", - {'volume': source, - 'target': target[0]}) - self.provision.break_replication_relationship( - array, target[0], source, snap_name, - extra_specs, generation) - # For older styled temp snapshots for clone - # do a delete as well - if is_temp: + self._break_relationship( + snap_vx_sessions, snapvx_tgt, snapvx_src, array, + extra_specs) - @coordination.synchronized( - "emc-source-{source}") - def do_delete_temp_volume_snap(source): - self.provision.delete_temp_volume_snap( - array, snap_name, source, generation) + def _break_relationship( + self, snap_vx_sessions, snapvx_tgt, snapvx_src, array, + extra_specs): + """Break relationship and cleanup - do_delete_temp_volume_snap(source) + :param snap_vx_sessions: the snapvx sessions + :param snapvx_tgt: the snapvx target + :param snapvx_src: the snapvx source + :param array: the serialnumber of the array + :param extra_specs: extra specifications + """ + count = 0 + for session in snap_vx_sessions: + snap_name = session['snap_name'] + targets = session['target_vol_list'] + # Only unlink a set number of targets + if count == self.snapvx_unlink_limit: + break + is_temp = False + is_temp = 'temp' in snap_name or 'EMC_SMI' in snap_name + if utils.CLONE_SNAPSHOT_NAME in snap_name: + is_temp = False + for target in targets: + if snapvx_src: + if not is_temp and target[1] == "Copied": + # Break the replication relationship + LOG.debug("Unlinking source from " + "target. Source: %(volume)s, " + "Target: %(target)s.", + {'volume': session['source_vol'], + 'target': target[0]}) + self.provision.break_replication_relationship( + array, target[0], session['source_vol'], + snap_name, extra_specs, + session['generation']) + count = count + 1 + elif snapvx_tgt: + # If our device is a target, we need to wait + # and then unlink + self._break_relationship_snapvx_tgt( + session, target, is_temp, array, extra_specs) + + def _break_relationship_snapvx_tgt( + self, session, target, is_temp, array, extra_specs): + """Break relationship of the snapvx target and cleanup + + :param session: the snapvx session + :param target: the snapvx target + :param is_temp: is the snapshot temporary + :param array: the serialnumber of the array + :param extra_specs: extra specifications + """ + LOG.debug("Unlinking source from " + "target. Source: %(volume)s, " + "Target: %(target)s.", + {'volume': session['source_vol'], + 'target': target[0]}) + self.provision.break_replication_relationship( + array, target[0], session['source_vol'], session['snap_name'], + extra_specs, session['generation']) + # For older styled temp snapshots for clone + # do a delete as well + if is_temp: + @coordination.synchronized( + "emc-source-{source}") + def do_delete_temp_volume_snap(source): + self.provision.delete_temp_volume_snap( + array, session['snap_name'], source, session['generation']) + + do_delete_temp_volume_snap(session['source_vol']) def manage_existing(self, volume, external_ref): """Manages an existing PowerMax/VMAX Volume (import to Cinder). @@ -2280,7 +2323,7 @@ class PowerMaxCommon(object): "sizes are supported. Please extend the " "volume to the nearest GB value before importing.") % {'device_id': device_id, 'vol_size': size, }) - LOG.exception(exception_message) + LOG.error(exception_message) raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=exception_message) @@ -2307,8 +2350,7 @@ class PowerMaxCommon(object): {'id': volume_id}) else: # Check if volume is snap source - self._sync_check(extra_specs['array'], device_id, - volume_name, extra_specs) + self._sync_check(extra_specs['array'], device_id, extra_specs) # Remove volume from any openstack storage groups # and remove any replication self._remove_vol_and_cleanup_replication( @@ -2375,7 +2417,7 @@ class PowerMaxCommon(object): (_("Volume %(name)s is failed over from the source volume, " "it is not possible to manage a snapshot of a failed over " "volume.") % {'name': volume.id})) - LOG.exception(exception_message) + LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) @@ -2386,7 +2428,7 @@ class PowerMaxCommon(object): "snapshot that is not associated with the specified " "volume.") % {'device_id': device_id, 'snap_name': snap_name}) - LOG.exception(exception_message) + LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) @@ -2403,7 +2445,7 @@ class PowerMaxCommon(object): _("There was an issue managing %(snap_name)s, it was not " "possible to add the OS- prefix. Error Message: %(e)s.") % {'snap_name': snap_name, 'e': six.text_type(e)}) - LOG.exception(exception_message) + LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) @@ -2452,7 +2494,7 @@ class PowerMaxCommon(object): "PowerMax/VMAX to unmanage snapshot %(snap_name)s") % {'snap_name': snap_name}) - LOG.exception(exception_message) + LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) @@ -2469,11 +2511,11 @@ class PowerMaxCommon(object): "was not possible to remove the OS- prefix. Error " "message is: %(e)s.") % {'snap_name': snap_name, 'e': six.text_type(e)}) - LOG.exception(exception_message) + LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) - self._sync_check(array, device_id, volume.name, extra_specs) + self._sync_check(array, device_id, extra_specs) LOG.info("Snapshot %(snap_name)s is no longer managed in " "OpenStack but still remains on PowerMax/VMAX source " @@ -2528,40 +2570,63 @@ class PowerMaxCommon(object): if manageable_vols: # If sort keys selected, determine if by size or reference, and # direction of sort - if sort_keys: - reverse = False - if sort_dirs: - if 'desc' in sort_dirs[0]: - reverse = True - if sort_keys[0] == 'size': - manageable_vols = sorted(manageable_vols, - key=lambda k: k['size'], - reverse=reverse) - if sort_keys[0] == 'reference': - manageable_vols = sorted(manageable_vols, - key=lambda k: k['reference'][ - 'source-id'], - reverse=reverse) + manageable_vols = self._sort_manageable_volumes( + manageable_vols, marker, limit, offset, sort_keys, sort_dirs) - # If marker provided, return only manageable volumes after marker - if marker: - vol_index = None - for vol in manageable_vols: - if vol['reference']['source-id'] == marker: - vol_index = manageable_vols.index(vol) - if vol_index: - manageable_vols = manageable_vols[vol_index:] - else: - msg = _("Volume marker not found, please check supplied " - "device ID and try again.") - raise exception.VolumeBackendAPIException(msg) + return manageable_vols - # If offset or limit provided, offset or limit result list - if offset: - manageable_vols = manageable_vols[offset:] - if limit: - manageable_vols = manageable_vols[:limit] + def _sort_manageable_volumes( + self, manageable_vols, marker, limit, offset, sort_keys, + sort_dirs): + """Sort manageable volumes. + :param manageable_vols: Unsort list of dicts + :param marker: Begin returning volumes that appear later in the volume + list than that represented by this reference. This + reference should be json like. Default=None. + :param limit: Maximum number of volumes to return. Default=None. + :param offset: Number of volumes to skip after marker. Default=None. + :param sort_keys: Key to sort by, sort by size or reference. Valid + keys: size, reference. Default=None. + :param sort_dirs: Direction to sort by. Valid dirs: asc, desc. + Default=None. + :return: manageable_vols -Sorted list of dicts + """ + # If sort keys selected, determine if by size or reference, and + # direction of sort + if sort_keys: + reverse = False + if sort_dirs: + if 'desc' in sort_dirs[0]: + reverse = True + if sort_keys[0] == 'size': + manageable_vols = sorted(manageable_vols, + key=lambda k: k['size'], + reverse=reverse) + if sort_keys[0] == 'reference': + manageable_vols = sorted(manageable_vols, + key=lambda k: k['reference'][ + 'source-id'], + reverse=reverse) + + # If marker provided, return only manageable volumes after marker + if marker: + vol_index = None + for vol in manageable_vols: + if vol['reference']['source-id'] == marker: + vol_index = manageable_vols.index(vol) + if vol_index: + manageable_vols = manageable_vols[vol_index:] + else: + msg = _("Volume marker not found, please check supplied " + "device ID and try again.") + raise exception.VolumeBackendAPIException(msg) + + # If offset or limit provided, offset or limit result list + if offset: + manageable_vols = manageable_vols[offset:] + if limit: + manageable_vols = manageable_vols[:limit] return manageable_vols def get_manageable_snapshots(self, marker, limit, offset, sort_keys, @@ -2577,7 +2642,7 @@ class PowerMaxCommon(object): Valid keys: size, reference. Default=None. :param sort_dirs: Direction to sort by. Valid dirs: asc, desc. Default=None. - :return: List of dicts containing all volumes valid for management + :return: List of dicts containing all snapshots valid for management """ manageable_snaps = [] array = self.pool_info['arrays_info'][0]["SerialNumber"] @@ -2594,85 +2659,118 @@ class PowerMaxCommon(object): for device in volumes: # Determine if volume is valid for management - if self.utils.is_snapshot_manageable(device): - # Snapshot valid, extract relevant snap info - snap_info = device['timeFinderInfo']['snapVXSession'][0][ - 'srcSnapshotGenInfo'][0]['snapshotHeader'] - # Convert timestamp to human readable format - human_timestamp = time.strftime( - "%Y/%m/%d, %H:%M:%S", time.localtime( - float(six.text_type( - snap_info['timestamp'])[:-3]))) - # If TTL is set, convert value to human readable format - if int(snap_info['timeToLive']) > 0: - human_ttl_timestamp = time.strftime( - "%Y/%m/%d, %H:%M:%S", time.localtime( - float(six.text_type( - snap_info['timeToLive'])))) - else: - human_ttl_timestamp = 'N/A' - - # For all valid snaps, extract relevant data for Cinder - # response - snap_dict = { - 'reference': { - 'source-name': snap_info['snapshotName']}, - 'safe_to_manage': True, - 'size': int( - math.ceil(device['volumeHeader']['capGB'])), - 'reason_not_safe': None, 'cinder_id': None, - 'extra_info': { - 'generation': snap_info['generation'], - 'secured': snap_info['secured'], - 'timeToLive': human_ttl_timestamp, - 'timestamp': human_timestamp}, - 'source_reference': {'source-id': snap_info['device']}} - manageable_snaps.append(snap_dict) + manageable_snaps = self._is_snapshot_valid_for_management( + manageable_snaps, device) # If snapshot list is populated, perform filtering on user params if len(manageable_snaps) > 0: # Order snapshots by source deviceID and not snapshot name - manageable_snaps = sorted( - manageable_snaps, - key=lambda k: k['source_reference']['source-id']) - # If sort keys selected, determine if by size or reference, and - # direction of sort - if sort_keys: - reverse = False - if sort_dirs: - if 'desc' in sort_dirs[0]: - reverse = True - if sort_keys[0] == 'size': - manageable_snaps = sorted(manageable_snaps, - key=lambda k: k['size'], - reverse=reverse) - if sort_keys[0] == 'reference': - manageable_snaps = sorted(manageable_snaps, - key=lambda k: k['reference'][ - 'source-name'], - reverse=reverse) + manageable_snaps = self._sort_manageable_snapshots( + manageable_snaps, marker, limit, offset, sort_keys, sort_dirs) - # If marker provided, return only manageable volumes after marker - if marker: - snap_index = None - for snap in manageable_snaps: - if snap['reference']['source-name'] == marker: - snap_index = manageable_snaps.index(snap) - if snap_index: - manageable_snaps = manageable_snaps[snap_index:] - else: - msg = (_("Snapshot marker %(marker)s not found, marker " - "provided must be a valid PowerMax/VMAX " - "snapshot ID") % - {'marker': marker}) - raise exception.VolumeBackendAPIException(msg) + return manageable_snaps - # If offset or limit provided, offset or limit result list - if offset: - manageable_snaps = manageable_snaps[offset:] - if limit: - manageable_snaps = manageable_snaps[:limit] + def _sort_manageable_snapshots( + self, manageable_snaps, marker, limit, offset, sort_keys, + sort_dirs): + """Sorts manageable snapshots list. + :param manageable_snaps: unsorted manageable snapshot list + :param marker: Begin returning volumes that appear later in the volume + list than that represented by this reference. This + reference should be json like. Default=None. + :param limit: Maximum number of volumes to return. Default=None. + :param offset: Number of volumes to skip after marker. Default=None. + :param sort_keys: Key to sort by, sort by size or reference. + Valid keys: size, reference. Default=None. + :param sort_dirs: Direction to sort by. Valid dirs: asc, desc. + Default=None. + :return: List of dicts containing all snapshots valid for management + """ + manageable_snaps = sorted( + manageable_snaps, + key=lambda k: k['source_reference']['source-id']) + # If sort keys selected, determine if by size or reference, and + # direction of sort + if sort_keys: + reverse = False + if sort_dirs: + if 'desc' in sort_dirs[0]: + reverse = True + if sort_keys[0] == 'size': + manageable_snaps = sorted(manageable_snaps, + key=lambda k: k['size'], + reverse=reverse) + if sort_keys[0] == 'reference': + manageable_snaps = sorted(manageable_snaps, + key=lambda k: k['reference'][ + 'source-name'], + reverse=reverse) + + # If marker provided, return only manageable volumes after marker + if marker: + snap_index = None + for snap in manageable_snaps: + if snap['reference']['source-name'] == marker: + snap_index = manageable_snaps.index(snap) + if snap_index: + manageable_snaps = manageable_snaps[snap_index:] + else: + msg = (_("Snapshot marker %(marker)s not found, marker " + "provided must be a valid PowerMax/VMAX " + "snapshot ID") % + {'marker': marker}) + raise exception.VolumeBackendAPIException(msg) + + # If offset or limit provided, offset or limit result list + if offset: + manageable_snaps = manageable_snaps[offset:] + if limit: + manageable_snaps = manageable_snaps[:limit] + return manageable_snaps + + def _is_snapshot_valid_for_management(self, manageable_snaps, device): + """check if snapshot is valid for management + + :param manageable_snaps: list of manageable snapshots + :param device: the source device + + :return: List of dicts containing all snapshots valid for management + """ + if self.utils.is_snapshot_manageable(device): + # Snapshot valid, extract relevant snap info + snap_info = device['timeFinderInfo']['snapVXSession'][0][ + 'srcSnapshotGenInfo'][0]['snapshotHeader'] + # Convert timestamp to human readable format + human_timestamp = time.strftime( + "%Y/%m/%d, %H:%M:%S", time.localtime( + float(six.text_type( + snap_info['timestamp'])[:-3]))) + # If TTL is set, convert value to human readable format + if int(snap_info['timeToLive']) > 0: + human_ttl_timestamp = time.strftime( + "%Y/%m/%d, %H:%M:%S", time.localtime( + float(six.text_type( + snap_info['timeToLive'])))) + else: + human_ttl_timestamp = 'N/A' + + # For all valid snaps, extract relevant data for Cinder + # response + snap_dict = { + 'reference': { + 'source-name': snap_info['snapshotName']}, + 'safe_to_manage': True, + 'size': int( + math.ceil(device['volumeHeader']['capGB'])), + 'reason_not_safe': None, 'cinder_id': None, + 'extra_info': { + 'generation': snap_info['generation'], + 'secured': snap_info['secured'], + 'timeToLive': human_ttl_timestamp, + 'timestamp': human_timestamp}, + 'source_reference': {'source-id': snap_info['device']}} + manageable_snaps.append(snap_dict) return manageable_snaps def retype(self, volume, new_type, host): @@ -3086,6 +3184,7 @@ class PowerMaxCommon(object): replication_status = REPLICATION_ENABLED replication_driver_data = rdf_dict rep_info_dict = self.volume_metadata.gather_replication_info( + volume.id, 'replication', False, rdf_group_no=rdf_group_no, target_name=target_name, remote_array=remote_array, target_device_id=target_device_id, @@ -3289,7 +3388,7 @@ class PowerMaxCommon(object): {'sourceName': volume_name}) device_id = volume_dict['device_id'] # Check if volume is snap target (e.g. if clone volume) - self._sync_check(array, device_id, volume_name, extra_specs) + self._sync_check(array, device_id, extra_specs) # Remove from any storage groups and cleanup replication self._remove_vol_and_cleanup_replication( array, device_id, volume_name, extra_specs, volume) @@ -3307,7 +3406,7 @@ class PowerMaxCommon(object): "backend: %(backend)s.") % {'backend': self.configuration.safe_get( 'volume_backend_name')}) - LOG.exception(exception_message) + LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) @@ -3321,7 +3420,7 @@ class PowerMaxCommon(object): "%(RDFGroup)s. Please check the name " "and the array") % {'RDFGroup': rdf_group_label}) - LOG.exception(exception_message) + LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) @@ -3342,8 +3441,6 @@ class PowerMaxCommon(object): :returns: secondary_id, volume_update_list, group_update_list :raises: VolumeBackendAPIException """ - volume_update_list = [] - group_update_list = [] group_fo = None if secondary_id != 'default': if not self.failover: @@ -3374,6 +3471,25 @@ class PowerMaxCommon(object): LOG.error(exception_message) return + volume_update_list, group_update_list = ( + self._populate_volume_and_group_update_lists( + volumes, groups, group_fo)) + + LOG.info("Failover host complete.") + return secondary_id, volume_update_list, group_update_list + + def _populate_volume_and_group_update_lists( + self, volumes, groups, group_fo): + """Populate volume and group update lists + + :param volumes: the list of volumes to be failed over + :param groups: replication groups + :param group_fo: group fail over + :returns: volume_update_list, group_update_list + """ + volume_update_list = [] + group_update_list = [] + rep_mode = self.rep_config['mode'] if groups: for group in groups: vol_list = [] @@ -3387,9 +3503,6 @@ class PowerMaxCommon(object): group_update_list.append({'group_id': group.id, 'updates': grp_update}) volume_update_list += vol_updates - - rep_mode = self.rep_config['mode'] - sync_vol_list, non_rep_vol_list, async_vol_list, metro_list = ( [], [], [], []) for volume in volumes: @@ -3400,7 +3513,7 @@ class PowerMaxCommon(object): device_id = self._find_device_on_array( volume, extra_specs) self._sync_check( - array, device_id, volume.name, extra_specs) + array, device_id, extra_specs) if rep_mode == utils.REP_SYNC: sync_vol_list.append(volume) elif rep_mode == utils.REP_ASYNC: @@ -3420,23 +3533,8 @@ class PowerMaxCommon(object): volume_update_list += vol_updates if len(sync_vol_list) > 0: - extra_specs = self._initial_setup(sync_vol_list[0]) - array = ast.literal_eval( - sync_vol_list[0].provider_location)['array'] - extra_specs[utils.ARRAY] = array - temp_grp_name = self.utils.get_temp_failover_grp_name( - self.rep_config) - self.provision.create_volume_group( - array, temp_grp_name, extra_specs) - device_ids = self._get_volume_device_ids(sync_vol_list, array) - self.masking.add_volumes_to_storage_group( - array, device_ids, temp_grp_name, extra_specs) - __, vol_updates = ( - self._failover_replication( - sync_vol_list, None, temp_grp_name, - secondary_backend_id=group_fo, host=True)) - volume_update_list += vol_updates - self.rest.delete_storage_group(array, temp_grp_name) + volume_update_list = self. _update_volume_list_from_sync_vol_list( + sync_vol_list, volume_update_list, group_fo) if len(metro_list) > 0: __, vol_updates = ( @@ -3453,9 +3551,35 @@ class PowerMaxCommon(object): volume_update_list.append({ 'volume_id': vol.id, 'updates': {'status': 'error'}}) + return volume_update_list, group_update_list - LOG.info("Failover host complete.") - return secondary_id, volume_update_list, group_update_list + def _update_volume_list_from_sync_vol_list( + self, sync_vol_list, volume_update_list, group_fo): + """Update the volume update list from the synced volume list + + :param sync_vol_list: synced volume list + :param volume_update_list: volume update list + :param group_fo: group fail over + :returns: volume_update_list + """ + extra_specs = self._initial_setup(sync_vol_list[0]) + array = ast.literal_eval( + sync_vol_list[0].provider_location)['array'] + extra_specs[utils.ARRAY] = array + temp_grp_name = self.utils.get_temp_failover_grp_name( + self.rep_config) + self.provision.create_volume_group( + array, temp_grp_name, extra_specs) + device_ids = self._get_volume_device_ids(sync_vol_list, array) + self.masking.add_volumes_to_storage_group( + array, device_ids, temp_grp_name, extra_specs) + __, vol_updates = ( + self._failover_replication( + sync_vol_list, None, temp_grp_name, + secondary_backend_id=group_fo, host=True)) + volume_update_list += vol_updates + self.rest.delete_storage_group(array, temp_grp_name) + return volume_update_list def get_remote_target_device(self, array, volume, device_id): """Get the remote target for a given volume. @@ -3513,68 +3637,9 @@ class PowerMaxCommon(object): if self.utils.is_metro_device(self.rep_config, extra_specs): allow_extend = False if allow_extend is True or ode_replication is True: - try: - (target_device, remote_array, rdf_group, - local_vol_state, pair_state) = ( - self.get_remote_target_device( - array, volume, device_id)) - rep_extra_specs = self._get_replication_extra_specs( - extra_specs, self.rep_config) - lock_rdf_group = rdf_group - if not ode_replication: - # Volume must be removed from replication (storage) group - # before the replication relationship can be ended (cannot - # have a mix of replicated and non-replicated volumes as - # the SRDF groups become unmanageable) - lock_rdf_group = None - self.masking.remove_and_reset_members( - array, volume, device_id, volume_name, - extra_specs, False) - - # Repeat on target side - self.masking.remove_and_reset_members( - remote_array, volume, target_device, volume_name, - rep_extra_specs, False) - - LOG.info("Breaking replication relationship...") - self.provision.break_rdf_relationship( - array, device_id, target_device, rdf_group, - rep_extra_specs, pair_state) - - # Extend the target volume - LOG.info("Extending target volume...") - # Check to make sure the R2 device requires extending first... - r2_size = self.rest.get_size_of_device_on_array( - remote_array, target_device) - if int(r2_size) < int(new_size): - self.provision.extend_volume( - remote_array, target_device, new_size, - rep_extra_specs, lock_rdf_group) - - # Extend the source volume - LOG.info("Extending source volume...") - self.provision.extend_volume( - array, device_id, new_size, extra_specs, lock_rdf_group) - - if not ode_replication: - # Re-create replication relationship - LOG.info("Recreating replication relationship...") - self.setup_volume_replication( - array, volume, device_id, extra_specs, target_device) - - # Check if volume needs to be returned to volume group - if volume.group_id: - self._add_new_volume_to_volume_group( - volume, device_id, volume_name, extra_specs) - - except Exception as e: - exception_message = (_("Error extending volume. " - "Error received was %(e)s") % - {'e': e}) - LOG.exception(exception_message) - raise exception.VolumeBackendAPIException( - message=exception_message) - + self._extend_with_or_without_ode_replication( + array, volume, device_id, ode_replication, volume_name, + new_size, extra_specs) else: exception_message = (_( "Extending a replicated volume is not permitted on this " @@ -3584,6 +3649,81 @@ class PowerMaxCommon(object): raise exception.VolumeBackendAPIException( message=exception_message) + def _extend_with_or_without_ode_replication( + self, array, volume, device_id, ode_replication, volume_name, + new_size, extra_specs): + """Extend a volume with or without Online Device Expansion + + :param array: the array serial number + :param volume: the volume objcet + :param device_id: the volume device id + :param ode_replication: Online device expansion + :param volume_name: the volume name + :param new_size: the new size the volume should be + :param extra_specs: extra specifications + """ + try: + (target_device, remote_array, rdf_group, + local_vol_state, pair_state) = ( + self.get_remote_target_device( + array, volume, device_id)) + rep_extra_specs = self._get_replication_extra_specs( + extra_specs, self.rep_config) + lock_rdf_group = rdf_group + if not ode_replication: + # Volume must be removed from replication (storage) group + # before the replication relationship can be ended (cannot + # have a mix of replicated and non-replicated volumes as + # the SRDF groups become unmanageable) + lock_rdf_group = None + self.masking.remove_and_reset_members( + array, volume, device_id, volume_name, + extra_specs, False) + + # Repeat on target side + self.masking.remove_and_reset_members( + remote_array, volume, target_device, volume_name, + rep_extra_specs, False) + + LOG.info("Breaking replication relationship...") + self.provision.break_rdf_relationship( + array, device_id, target_device, rdf_group, + rep_extra_specs, pair_state) + + # Extend the target volume + LOG.info("Extending target volume...") + # Check to make sure the R2 device requires extending first... + r2_size = self.rest.get_size_of_device_on_array( + remote_array, target_device) + if int(r2_size) < int(new_size): + self.provision.extend_volume( + remote_array, target_device, new_size, + rep_extra_specs, lock_rdf_group) + + # Extend the source volume + LOG.info("Extending source volume...") + self.provision.extend_volume( + array, device_id, new_size, extra_specs, lock_rdf_group) + + if not ode_replication: + # Re-create replication relationship + LOG.info("Recreating replication relationship...") + self.setup_volume_replication( + array, volume, device_id, extra_specs, target_device) + + # Check if volume needs to be returned to volume group + if volume.group_id: + self._add_new_volume_to_volume_group( + volume, device_id, volume_name, extra_specs) + + except Exception as e: + exception_message = (_("Error extending volume. " + "Error received was %(e)s") % + {'e': e}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException( + message=exception_message) + def enable_rdf(self, array, volume, device_id, rdf_group_no, rep_config, target_name, remote_array, target_device, extra_specs): """Create a replication relationship with a target volume. @@ -3612,8 +3752,7 @@ class PowerMaxCommon(object): rep_extra_specs, False) # Check if volume is a copy session target - self._sync_check(array, device_id, target_name, - extra_specs, tgt_only=True) + self._sync_check(array, device_id, extra_specs, tgt_only=True) # Establish replication relationship rdf_dict = self.rest.create_rdf_device_pair( array, device_id, rdf_group_no, target_device, remote_array, @@ -3640,7 +3779,7 @@ class PowerMaxCommon(object): exception_message = (_("Remote replication failed with exception:" " %(e)s") % {'e': six.text_type(e)}) - LOG.exception(exception_message) + LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) @@ -3671,7 +3810,7 @@ class PowerMaxCommon(object): exception_message = (_("Failed to get or create replication " "group. Exception received: %(e)s") % {'e': six.text_type(e)}) - LOG.exception(exception_message) + LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) @@ -3783,7 +3922,7 @@ class PowerMaxCommon(object): exception_message = (_("Failed to create generic volume group:" " %(volGrpName)s.") % {'volGrpName': vol_grp_name}) - LOG.exception(exception_message) + LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) @@ -3872,36 +4011,55 @@ class PowerMaxCommon(object): LOG.error("Error deleting volume group." "Error received: %(e)s", {'e': e}) model_update = {'status': fields.GroupStatus.ERROR_DELETING} - # Update the volumes_model_update - if len(deleted_volume_device_ids) is not 0: - LOG.debug("Device ids: %(dev)s are deleted.", - {'dev': deleted_volume_device_ids}) - volumes_not_deleted = [] - for vol in volume_device_ids: - if vol not in deleted_volume_device_ids: - volumes_not_deleted.append(vol) - if not deleted_volume_device_ids: - volumes_model_update = self.utils.update_volume_model_updates( - volumes_model_update, deleted_volume_device_ids, - group.id, status='deleted') - if not volumes_not_deleted: - volumes_model_update = self.utils.update_volume_model_updates( - volumes_model_update, volumes_not_deleted, - group.id, status='error_deleting') - # As a best effort try to add back the undeleted volumes to sg - # Don't throw any exception in case of failure - try: - if not volumes_not_deleted: - self.masking.add_volumes_to_storage_group( - array, volumes_not_deleted, - vol_grp_name, interval_retries_dict) - except Exception as ex: - LOG.error("Error in rollback - %(ex)s. " - "Failed to add back volumes to sg %(sg_name)s", - {'ex': ex, 'sg_name': vol_grp_name}) + volumes_model_update = self._handle_delete_group_exception( + deleted_volume_device_ids, volume_device_ids, group.id, array, + vol_grp_name, interval_retries_dict, volumes_model_update) return model_update, volumes_model_update + def _handle_delete_group_exception( + self, deleted_volume_device_ids, volume_device_ids, group_id, + array, vol_grp_name, interval_retries_dict, volumes_model_update): + """Handle delete group exception and update volume model + + :param deleted_volume_device_ids: deleted volume device ids + :param volume_device_ids: volume device ids + :param group_id: group id + :param array: array serial number + :param vol_grp_name: volume group name + :param interval_retries_dict: intervals and retries dict + :param volumes_model_update: volume model update dict + :returns: volumes_model_update + """ + # Update the volumes_model_update + if deleted_volume_device_ids: + LOG.debug("Device ids: %(dev)s are deleted.", + {'dev': deleted_volume_device_ids}) + volumes_not_deleted = [] + for vol in volume_device_ids: + if vol not in deleted_volume_device_ids: + volumes_not_deleted.append(vol) + if not deleted_volume_device_ids: + volumes_model_update = self.utils.update_volume_model_updates( + volumes_model_update, deleted_volume_device_ids, + group_id, status='deleted') + if not volumes_not_deleted: + volumes_model_update = self.utils.update_volume_model_updates( + volumes_model_update, volumes_not_deleted, + group_id, status='error_deleting') + # As a best effort try to add back the undeleted volumes to sg + # Don't throw any exception in case of failure + try: + if not volumes_not_deleted: + self.masking.add_volumes_to_storage_group( + array, volumes_not_deleted, + vol_grp_name, interval_retries_dict) + except Exception as ex: + LOG.error("Error in rollback - %(ex)s. " + "Failed to add back volumes to sg %(sg_name)s", + {'ex': ex, 'sg_name': vol_grp_name}) + return volumes_model_update + def _cleanup_group_replication( self, array, vol_grp_name, volume_device_ids, extra_specs): """Cleanup remote replication. @@ -3964,7 +4122,7 @@ class PowerMaxCommon(object): "%(volGrpName)s. Exception received: %(e)s") % {'volGrpName': grp_id, 'e': six.text_type(e)}) - LOG.exception(exception_message) + LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) @@ -4189,7 +4347,7 @@ class PowerMaxCommon(object): " %(volGrpName)s. Exception: %(ex)s.") % {'volGrpName': group.id, 'ex': ex}) - LOG.exception(exception_message) + LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) @@ -4304,51 +4462,23 @@ class PowerMaxCommon(object): # Create the target devices list_volume_pairs = [] for volume in volumes: - src_dev_id, extra_specs, vol_size, tgt_vol_name = ( - self._get_clone_vol_info( - volume, source_vols, snapshots)) - volume_dict = self._create_volume( - tgt_vol_name, vol_size, extra_specs) - device_id = volume_dict['device_id'] - # Add the volume to the volume group SG - self.masking.add_volume_to_storage_group( - extra_specs[utils.ARRAY], device_id, tgt_name, - tgt_vol_name, extra_specs) - # Record relevant information - list_volume_pairs.append((src_dev_id, device_id)) - # Add details to rollback dict - rollback_dict['device_ids'].append(device_id) - rollback_dict['list_volume_pairs'].append( - (src_dev_id, device_id)) - rollback_dict['volumes'].append( - (device_id, extra_specs, volume)) - volumes_model_update.append( - self.utils.get_grp_volume_model_update( - volume, volume_dict, group.id)) + volumes_model_update, rollback_dict, list_volume_pairs = ( + self. _create_vol_and_add_to_group( + volume, group.id, tgt_name, rollback_dict, + source_vols, snapshots, list_volume_pairs, + volumes_model_update)) + + snap_name, rollback_dict = ( + self._create_group_replica_and_get_snap_name( + group.id, actual_source_grp, source_id, source_sg, + rollback_dict, create_snapshot)) - if create_snapshot is True: - # We have to create a snapshot of the source group - snap_name = self.utils.truncate_string(group.id, 19) - self._create_group_replica(actual_source_grp, snap_name) - rollback_dict['snap_name'] = snap_name - else: - # We need to check if the snapshot exists - snap_name = self.utils.truncate_string(source_id, 19) - if ('snapVXSnapshots' in source_sg and - snap_name in source_sg['snapVXSnapshots']): - LOG.info("Snapshot is present on the array") - else: - error_msg = ( - _("Cannot retrieve source snapshot %(snap_id)s " - "from the array.") % {'snap_id': source_id}) - LOG.error(error_msg) - raise exception.VolumeBackendAPIException( - message=error_msg) # Link and break the snapshot to the source group self.provision.link_and_break_replica( array, src_grp_name, tgt_name, snap_name, interval_retries_dict, list_volume_pairs, delete_snapshot=create_snapshot) + # Update the replication status if group.is_replicated: volumes_model_update = self._replicate_group( @@ -4370,6 +4500,77 @@ class PowerMaxCommon(object): return model_update, volumes_model_update + def _create_group_replica_and_get_snap_name( + self, group_id, actual_source_grp, source_id, source_sg, + rollback_dict, create_snapshot): + """Create group replica and get snap name + + :param group_id: the group id + :param actual_source_grp: the source group + :param source_id: source id + :param source_sg: source storage goup + :param rollback_dict: rollback dict + :param create_snapshot: boolean + :returns: snap_name, rollback_dict + """ + if create_snapshot is True: + # We have to create a snapshot of the source group + snap_name = self.utils.truncate_string(group_id, 19) + self._create_group_replica(actual_source_grp, snap_name) + rollback_dict['snap_name'] = snap_name + else: + # We need to check if the snapshot exists + snap_name = self.utils.truncate_string(source_id, 19) + if ('snapVXSnapshots' in source_sg and + snap_name in source_sg['snapVXSnapshots']): + LOG.info("Snapshot is present on the array") + else: + error_msg = (_("Cannot retrieve source snapshot %(snap_id)s " + "from the array.") % {'snap_id': source_id}) + LOG.error(error_msg) + raise exception.VolumeBackendAPIException( + message=error_msg) + return snap_name, rollback_dict + + def _create_vol_and_add_to_group( + self, volume, group_id, tgt_name, rollback_dict, source_vols, + snapshots, list_volume_pairs, volumes_model_update): + """Creates the volume group from source. + + :param volume: volume object + :param group_id: the group id + :param tgt_name: target name + :param rollback_dict: rollback dict + :param source_vols: source volumes + :param snapshots: snapshot objects + :param list_volume_pairs: volume pairs list + :param volumes_model_update: volume model update + :returns: volumes_model_update, rollback_dict, list_volume_pairs + """ + + src_dev_id, extra_specs, vol_size, tgt_vol_name = ( + self._get_clone_vol_info( + volume, source_vols, snapshots)) + volume_dict = self._create_volume( + tgt_vol_name, vol_size, extra_specs) + device_id = volume_dict['device_id'] + # Add the volume to the volume group SG + self.masking.add_volume_to_storage_group( + extra_specs[utils.ARRAY], device_id, tgt_name, + tgt_vol_name, extra_specs) + # Record relevant information + list_volume_pairs.append((src_dev_id, device_id)) + # Add details to rollback dict + rollback_dict['device_ids'].append(device_id) + rollback_dict['list_volume_pairs'].append( + (src_dev_id, device_id)) + rollback_dict['volumes'].append( + (device_id, extra_specs, volume)) + volumes_model_update.append( + self.utils.get_grp_volume_model_update( + volume, volume_dict, group_id)) + return volumes_model_update, rollback_dict, list_volume_pairs + def _get_clone_vol_info(self, volume, source_vols, snapshots): """Get the clone volume info. @@ -4768,7 +4969,7 @@ class PowerMaxCommon(object): exception_message = (_( "Failed to revert the volume to the snapshot")) raise exception.VolumeDriverException(message=exception_message) - self._sync_check(array, sourcedevice_id, volume.name, extra_specs) + self._sync_check(array, sourcedevice_id, extra_specs) try: LOG.info("Reverting device: %(deviceid)s " "to snapshot: %(snapname)s.", diff --git a/cinder/volume/drivers/dell_emc/powermax/metadata.py b/cinder/volume/drivers/dell_emc/powermax/metadata.py index 9818a015551..0a013352262 100644 --- a/cinder/volume/drivers/dell_emc/powermax/metadata.py +++ b/cinder/volume/drivers/dell_emc/powermax/metadata.py @@ -58,12 +58,12 @@ class PowerMaxVolumeMetadata(object): self.utils = utils.PowerMaxUtils() self.volume_trace_list = [] self.is_debug = is_debug - self.vmax_driver_version = version + self.powermax_driver_version = version def _update_platform(self): """Update the platform.""" try: - self.version_dict['platform'] = platform.platform() + self.version_dict['openstack_platform'] = platform.platform() except Exception as ex: LOG.warning("Unable to determine the platform. " "Exception is %s.", ex) @@ -102,14 +102,6 @@ class PowerMaxVolumeMetadata(object): """ return version.version_info.version - @staticmethod - def _get_version_info_release(): - """Gets the release. - - :returns: string -- release - """ - return version.version_info.release - def _update_info_from_version_info(self): """Update class variables from version info.""" try: @@ -119,13 +111,6 @@ class PowerMaxVolumeMetadata(object): except Exception as ex: LOG.warning("Unable to get version info. " "Exception is %s.", ex) - try: - rel = self._get_version_info_release() - if rel: - self.version_dict['openstack_release'] = rel - except Exception as ex: - LOG.warning("Unable to get release info. " - "Exception is %s.", ex) def _update_openstack_info(self): """Update openstack info.""" @@ -136,20 +121,22 @@ class PowerMaxVolumeMetadata(object): # Some distributions override with more meaningful information self._update_info_from_version_info() - def _update_vmax_info(self, serial_number): + def _update_array_info(self, serial_number): """Update PowerMax/VMAX info. :param serial_number: the serial number of the array """ - vmax_u4v_version_dict = ( + u4p_version_dict = ( self.rest.get_unisphere_version()) - self.version_dict['unisphere_version'] = ( - vmax_u4v_version_dict['version']) + self.version_dict['unisphere_for_powermax_version'] = ( + u4p_version_dict['version']) self.version_dict['serial_number'] = serial_number - vmax_info_dict = self.rest.get_array_serial(serial_number) - self.version_dict['vmax_firmware_version'] = vmax_info_dict['ucode'] - self.version_dict['vmax_model'] = vmax_info_dict['model'] - self.version_dict['vmax_driver_version'] = self.vmax_driver_version + array_info_dict = self.rest.get_array_serial(serial_number) + self.version_dict['storage_firmware_version'] = ( + array_info_dict['ucode']) + self.version_dict['storage_model'] = array_info_dict['model'] + self.version_dict['powermax_cinder_driver_version'] = ( + self.powermax_driver_version) @debug_required def gather_version_info(self, serial_number): @@ -160,7 +147,7 @@ class PowerMaxVolumeMetadata(object): """ try: self._update_openstack_info() - self._update_vmax_info(serial_number) + self._update_array_info(serial_number) self.print_pretty_table(self.version_dict) except Exception as ex: LOG.warning("Unable to gather version info. " @@ -169,11 +156,11 @@ class PowerMaxVolumeMetadata(object): @debug_required def gather_volume_info( - self, volume_id, operation, append, **kwargs): + self, volume_id, successful_operation, append, **kwargs): """Gather volume information. :param volume_id: the unique volume id key - :param operation: the operation e.g "create" + :param successful_operation: the operation e.g "create" :param append: append flag :param kwargs: variable length argument list :returns: datadict @@ -183,7 +170,7 @@ class PowerMaxVolumeMetadata(object): datadict = {} try: volume_trace_dict = self._fill_volume_trace_dict( - volume_id, operation, append, **kwargs) + volume_id, successful_operation, append, **kwargs) volume_trace_dict['volume_updated_time'] = ( datetime.datetime.fromtimestamp( int(time.time())).strftime('%Y-%m-%d %H:%M:%S')) @@ -204,11 +191,11 @@ class PowerMaxVolumeMetadata(object): return datadict def _fill_volume_trace_dict( - self, volume_id, operation, append, **kwargs): + self, volume_id, successful_operation, append, **kwargs): """Populates a dictionary with key value pairs :param volume_id: the unique volume id key - :param operation: the operation e.g "create" + :param successful_operation: the operation e.g "create" :param append: append flag :param kwargs: variable length argument list :returns: my_volume_trace_dict @@ -309,18 +296,18 @@ class PowerMaxVolumeMetadata(object): initiator_group, port_group = None, None if is_multiattach: - operation = 'multi_attach' + successful_operation = 'multi_attach' mv_list = masking_view_dict['mv_list'] sg_list = masking_view_dict['sg_list'] else: - operation = 'attach' + successful_operation = 'attach' child_storage_group = masking_view_dict[utils.SG_NAME] parent_storage_group = masking_view_dict[utils.PARENT_SG_NAME] initiator_group = masking_view_dict[utils.IG_NAME] port_group = masking_view_dict[utils.PORTGROUPNAME] datadict = self.gather_volume_info( - volume.id, operation, False, + volume.id, successful_operation, False, serial_number=extra_specs[utils.ARRAY], service_level=extra_specs[utils.SLO], workload=extra_specs[utils.WORKLOAD], srp=extra_specs[utils.SRP], @@ -394,23 +381,23 @@ class PowerMaxVolumeMetadata(object): @debug_required def capture_snapshot_info( - self, source, extra_specs, operation, last_ss_name): + self, source, extra_specs, successful_operation, last_ss_name): """Captures snapshot info in volume metadata :param source: the source volume object :param extra_specs: extra specifications - :param operation: snapshot operation + :param successful_operation: snapshot operation :param last_ss_name: the last snapshot name """ if isinstance(source, volume.Volume): - if 'create' in operation: + if 'create' in successful_operation: snapshot_count = six.text_type(len(source.snapshots)) else: snapshot_count = six.text_type(len(source.snapshots) - 1) default_sg = ( self.utils.derive_default_sg_from_extra_specs(extra_specs)) datadict = self.gather_volume_info( - source.id, operation, False, + source.id, successful_operation, False, volume_size=source.size, default_sg_name=default_sg, serial_number=extra_specs[utils.ARRAY], @@ -459,7 +446,7 @@ class PowerMaxVolumeMetadata(object): @debug_required def capture_create_volume( self, device_id, volume, group_name, group_id, extra_specs, - rep_info_dict, operation, source_snapshot_id): + rep_info_dict, successful_operation, source_snapshot_id): """Captures create volume info in volume metadata :param device_id: device id @@ -468,7 +455,7 @@ class PowerMaxVolumeMetadata(object): :param group_id: group id :param extra_specs: additional info :param rep_info_dict: information gathered from replication - :param operation: the type of create operation + :param successful_operation: the type of create operation :param source_snapshot_id: the source snapshot id :returns: volume_metadata dict """ @@ -490,7 +477,7 @@ class PowerMaxVolumeMetadata(object): default_sg = self.utils.derive_default_sg_from_extra_specs( extra_specs, rep_mode) datadict = self.gather_volume_info( - volume.id, operation, True, volume_size=volume.size, + volume.id, successful_operation, True, volume_size=volume.size, device_id=device_id, default_sg_name=default_sg, serial_number=extra_specs[utils.ARRAY], @@ -517,23 +504,17 @@ class PowerMaxVolumeMetadata(object): @debug_required def gather_replication_info( - self, rdf_group_no=None, - target_name=None, remote_array=None, - target_device_id=None, replication_status=None, - rep_mode=None, rdf_group_label=None): + self, volume_id, successful_operation, append, **kwargs): """Gathers replication information - :param rdf_group_no: RDF group number - :param target_name: target volume name - :param remote_array: remote array serialnumber - :param target_device_id: target device id - :param replication_status: replication status - :param rep_mode: replication mode, sync, async, metro - :param rdf_group_label: rdf group label + :param volume_id: volume id + :param successful_operation: the successful operation type + :param append: boolean + :param **kwargs: variable length of arguments :returns: rep_dict """ - param_dict = locals() - return self._fill_volume_trace_dict(param_dict) + return self._fill_volume_trace_dict( + volume_id, successful_operation, append, **kwargs) @debug_required def capture_failover_volume( @@ -586,7 +567,7 @@ class PowerMaxVolumeMetadata(object): :param device_id: the PowerMax/VMAX device id :param extra_specs: the extra specs """ - operation = "manage_existing_volume" + successful_operation = "manage_existing_volume" rdf_group_no, target_name, remote_array, target_device_id = ( None, None, None, None) rep_mode, replication_status, rdf_group_label = ( @@ -603,7 +584,7 @@ class PowerMaxVolumeMetadata(object): default_sg = self.utils.derive_default_sg_from_extra_specs( extra_specs, rep_mode) datadict = self.gather_volume_info( - volume.id, operation, True, volume_size=volume.size, + volume.id, successful_operation, True, volume_size=volume.size, device_id=device_id, default_sg_name=default_sg, serial_number=extra_specs[utils.ARRAY], @@ -641,9 +622,9 @@ class PowerMaxVolumeMetadata(object): :param rep_mode: replication mode :param is_compression_disabled: compression disabled flag """ - operation = "retype" + successful_operation = "retype" datadict = self.gather_volume_info( - volume.id, operation, False, volume_size=volume.size, + volume.id, successful_operation, False, volume_size=volume.size, device_id=device_id, target_sg_name=target_sg_name, serial_number=array, diff --git a/cinder/volume/drivers/dell_emc/powermax/rest.py b/cinder/volume/drivers/dell_emc/powermax/rest.py index bea6249038a..86bc65bb8dd 100644 --- a/cinder/volume/drivers/dell_emc/powermax/rest.py +++ b/cinder/volume/drivers/dell_emc/powermax/rest.py @@ -23,8 +23,9 @@ from oslo_utils import units import requests import requests.auth import requests.exceptions as r_exc -import requests.packages.urllib3.exceptions as urllib_exp +# pylint: disable=E0401 import requests.packages.urllib3.util.retry as requests_retry + import six from cinder import coordination @@ -33,8 +34,6 @@ from cinder.i18n import _ from cinder.utils import retry from cinder.volume.drivers.dell_emc.powermax import utils -requests.packages.urllib3.disable_warnings(urllib_exp.InsecureRequestWarning) - LOG = logging.getLogger(__name__) SLOPROVISIONING = 'sloprovisioning' REPLICATION = 'replication' @@ -896,9 +895,9 @@ class PowerMaxRest(object): if vol_details: vol_identifier = vol_details.get('volume_identifier', None) LOG.debug('Element name = %(en)s, Vol identifier = %(vi)s, ' - 'Device id = %(di)s, vol details = %(vd)s', + 'Device id = %(di)s', {'en': element_name, 'vi': vol_identifier, - 'di': device_id, 'vd': vol_details}) + 'di': device_id}) if vol_identifier == element_name: found_device_id = device_id elif name_id: