diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/client/fakes.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/client/fakes.py index 6bde6bf5a5c..03a0c475789 100644 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/client/fakes.py +++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/client/fakes.py @@ -76,6 +76,7 @@ LUN_NAME = 'fake-lun-name' DEST_LUN_NAME = 'new-fake-lun-name' FILE_NAME = 'fake-file-name' DEST_FILE_NAME = 'new-fake-file-name' +FAKE_UUID = 'b32bab78-82be-11ec-a8a3-0242ac120002' FAKE_QUERY = {'volume-attributes': None} @@ -646,6 +647,7 @@ AGGR_INFO_SSC = { AGGR_SIZE_TOTAL = 107374182400 AGGR_SIZE_AVAILABLE = 59055800320 AGGR_USED_PERCENT = 45 +AGGR_SIZE_USED = 58888957952 AGGR_GET_ITER_CAPACITY_RESPONSE = etree.XML(""" @@ -720,7 +722,6 @@ VOLUME_GET_ITER_CAPACITY_RESPONSE = etree.XML(""" 'volume': VOLUME_GET_ITER_CAPACITY_ATTR_STR, }) - VOLUME_GET_ITER_STYLE_RESPONSE = etree.XML(""" 3 @@ -1349,6 +1350,8 @@ SM_SOURCE_VSERVER = 'fake_source_vserver' SM_SOURCE_VOLUME = 'fake_source_volume' SM_DEST_VSERVER = 'fake_destination_vserver' SM_DEST_VOLUME = 'fake_destination_volume' +IGROUP_NAME = 'openstack-d9b4194f-5f65-4952-fake-26c911f1e4b2' +LUN_NAME_PATH = '/vol/volume-fake/lun-path-fake-1234' CLUSTER_PEER_GET_ITER_RESPONSE = etree.XML(""" @@ -1582,6 +1585,136 @@ DESTROY_FILE_COPY_RESPONSE = etree.XML(""" """) +VOLUME_GET_ITER_SSC_RESPONSE_STR_FLEXGROUP_REST = { + "uuid": "2407b637-119c-11ec-a4fb", + "language": "c.utf_8", + "name": VOLUME_NAMES[0], + "style": "flexgroup", + "is_svm_root": False, + "type": "rw", + "aggregates": [ + { + "name": VOLUME_AGGREGATE_NAMES[0] + } + ], + "error_state": { + "is_inconsistent": False + }, + "nas": { + "path": '/' + VOLUME_NAMES[0] + }, + "snapshot_policy": { + "name": "default", + "uuid": "e7b0f455-fc15-11ea-b64a" + }, + "svm": { + "name": VOLUME_VSERVER_NAME + }, + "space": { + "size": 12345, + "snapshot": { + "reserve_percent": 5 + } + }, + "qos": { + "policy": { + "name": "fake_qos_policy_group_name" + } + }, + "guarantee": { + "type": "none", + "honored": True + }, + "_links": { + "self": { + "href": "/api/storage/volumes/2407b637-119c-11ec-a4fb" + } + } +} + +VOLUME_GET_ITER_SSC_RESPONSE_FLEXGROUP_REST = { + "records": [ + VOLUME_GET_ITER_SSC_RESPONSE_STR_FLEXGROUP_REST, + ], + "num_records": 1, + "_links": { + "self": { + "href": "/api/storage/volumes" + } + } +} + +VOLUME_GET_ITER_SSC_RESPONSE_STR_REST = { + "uuid": "2407b637-119c-11ec-a4fb", + "language": "c.utf_8", + "name": VOLUME_NAMES[0], + "style": "flexvol", + "is_svm_root": False, + "type": "rw", + "aggregates": [ + { + "name": VOLUME_AGGREGATE_NAMES[0] + } + ], + "error_state": { + "is_inconsistent": False + }, + "nas": { + "path": '/' + VOLUME_NAMES[0] + }, + "snapshot_policy": { + "name": "default", + "uuid": "e7b0f455-fc15-11ea-b64a" + }, + "svm": { + "name": VOLUME_VSERVER_NAME + }, + "space": { + "size": 12345, + "snapshot": { + "reserve_percent": 5 + } + }, + "qos": { + "policy": { + "name": "fake_qos_policy_group_name" + } + }, + "guarantee": { + "type": "none", + "honored": True + }, + "efficiency": { + "compression": "none", + "dedupe": "none", + "cross_volume_dedupe": "none", + "compaction": "none", + "schedule": "-", + "volume_path": "/vol/" + VOLUME_NAMES[0], + "state": "disabled", + "policy": { + "name": "-" + } + }, + "_links": { + "self": { + "href": "/api/storage/volumes/2407b637-119c-11ec-a4fb" + } + } +} + +VOLUME_GET_ITER_SSC_RESPONSE_REST = { + "records": [ + VOLUME_GET_ITER_SSC_RESPONSE_STR_REST, + ], + "num_records": 1, + "_links": { + "self": { + "href": "/api/storage/volumes" + } + } +} + VOLUME_GET_ITER_RESPONSE_LIST_REST = [ { "uuid": "2407b637-119c-11ec-a4fb-00a0b89c9a78", @@ -1617,6 +1750,47 @@ VOLUME_GET_ITER_RESPONSE_LIST_REST = [ } ] +VOLUME_GET_ITER_LIST_RESPONSE_REST = { + "records": [ + VOLUME_GET_ITER_RESPONSE_LIST_REST[0], + VOLUME_GET_ITER_RESPONSE_LIST_REST[1], + ], + "num_records": 2, + "_links": { + "self": { + "href": "/api/storage/volumes" + } + } +} + +VOLUME_ITEM_SIMPLE_RESPONSE_REST = { + "uuid": "2407b637-119c-11ec-a4fb-00a0b89c9a78", + "name": VOLUME_NAMES[0], + "style": 'flexvol', + "_links": { + "self": { + "href": "/api/storage/volumes/2407b637-119c-11ec-a4fb-00a0b89c9a78" + } + } +} + +VOLUME_LIST_SIMPLE_RESPONSE_REST = { + "records": [ + VOLUME_ITEM_SIMPLE_RESPONSE_REST + ], + "num_records": 1, + "_links": { + "self": { + "href": "/api/storage/volumes" + } + } +} + +NO_RECORDS_RESPONSE_REST = { + "records": [], + "num_records": 0, +} + VOLUME_GET_ITER_RESPONSE_REST_PAGE = { "records": [ VOLUME_GET_ITER_RESPONSE_LIST_REST[0], @@ -1665,16 +1839,680 @@ INVALID_GET_ITER_RESPONSE_NO_NUM_RECORDS_REST = { "records": [], } -NO_RECORDS_RESPONSE_REST = { - "records": [], - "num_records": 0, +VOLUME_GET_ITER_STYLE_RESPONSE_REST = { + "records": [ + { + "style": "flexgroup", + }, + ], + "num_records": 1, +} + +VOLUME_FLEXGROUP_STYLE_REST = \ + VOLUME_GET_ITER_STYLE_RESPONSE_REST["records"][0] + +VOLUME_GET_ITER_SAME_STYLE_RESPONSE_REST = { + "records": [ + { + "style": "flexvol", + }, + { + "style": "flexvol", + }, + { + "style": "flexvol", + }, + ], + "num_records": 3, +} + +GET_NUM_RECORDS_RESPONSE_REST = { + "num_records": 1, +} + +AGGR_GET_ITER_RESPONSE_REST = { + "records": [ + { + "uuid": "6aad2b76-a069-47e9-93ee-e501ebf2cdd2", + "name": VOLUME_AGGREGATE_NAMES[1], + "node": { + "uuid": "2ac8f13a-fc16-11ea-8799-52540006bba9", + "name": NODE_NAME + }, + "home_node": { + "uuid": "2ac8f13a-fc16-11ea-8799-52540006bba9", + "name": NODE_NAME + }, + "snapshot": { + "files_total": 0, + "files_used": 0, + "max_files_available": 0, + "max_files_used": 0 + }, + "space": { + "footprint": 58491584512, + "footprint_percent": 4, + "block_storage": { + "size": AGGR_SIZE_TOTAL, + "available": AGGR_SIZE_AVAILABLE, + "used": AGGR_SIZE_USED, + "inactive_user_data": 0, + "inactive_user_data_percent": 0, + "full_threshold_percent": 98, + "physical_used": 7706808320, + "physical_used_percent": 1, + "aggregate_metadata": 397373440, + "aggregate_metadata_percent": 0, + "used_including_snapshot_reserve": 58888957952, + "used_including_snapshot_reserve_percent": 4, + "data_compacted_count": 0, + "data_compaction_space_saved": 0, + "data_compaction_space_saved_percent": 0, + "volume_deduplication_shared_count": 0, + "volume_deduplication_space_saved": 0, + "volume_deduplication_space_saved_percent": 0 + }, + "snapshot": { + "used_percent": 0, + "available": 0, + "total": 0, + "used": 0, + "reserve_percent": 0 + }, + "cloud_storage": { + "used": 0 + }, + "efficiency": { + "savings": 0, + "ratio": 1, + "logical_used": 117510144 + }, + "efficiency_without_snapshots": { + "savings": 0, + "ratio": 1, + "logical_used": 9617408 + }, + "efficiency_without_snapshots_flexclones": { + "savings": 0, + "ratio": 1, + "logical_used": 9617408 + } + }, + "state": "online", + "snaplock_type": "non_snaplock", + "create_time": "2020-09-21T14:45:11+00:00", + "data_encryption": { + "software_encryption_enabled": False, + "drive_protection_enabled": False + }, + "block_storage": { + "primary": { + "disk_count": 1, + "disk_class": "virtual", + "raid_type": "raid0", + "raid_size": 8, + "checksum_style": "advanced_zoned", + "disk_type": "vm_disk" + }, + "hybrid_cache": { + "enabled": False + }, + "mirror": { + "enabled": False, + "state": "unmirrored" + }, + "plexes": [ + { + "name": "plex0" + } + ], + "storage_type": "hdd" + }, + "cloud_storage": { + "attach_eligible": True + }, + "inactive_data_reporting": { + "enabled": False + }, + "metric": { + "timestamp": "2021-12-21T13:25:15Z", + "duration": "PT15S", + "status": "ok", + "throughput": { + "read": 0, + "write": 13107, + "other": 0, + "total": 13107 + }, + "latency": { + "read": 0, + "write": 2659, + "other": 0, + "total": 2659 + }, + "iops": { + "read": 0, + "write": 0, + "other": 0, + "total": 0 + } + }, + "statistics": { + "timestamp": "2021-12-21T13:25:21Z", + "status": "ok", + "throughput_raw": { + "read": 3699994624, + "write": 111813349376, + "other": 0, + "total": 115513344000 + }, + "latency_raw": { + "read": 1884163936, + "write": 9308463160, + "other": 0, + "total": 11192627096 + }, + "iops_raw": { + "read": 242498, + "write": 4871034, + "other": 0, + "total": 5113532 + } + } + }, + { + "uuid": "ad20dafb-1dcb-483a-b457-012ae9225062", + "name": VOLUME_AGGREGATE_NAMES[0], + "node": { + "uuid": "2ac8f13a-fc16-11ea-8799-52540006bba9", + "name": NODE_NAME + }, + "home_node": { + "uuid": "2ac8f13a-fc16-11ea-8799-52540006bba9", + "name": NODE_NAME + }, + "snapshot": { + "files_total": 0, + "files_used": 0, + "max_files_available": 0, + "max_files_used": 0 + }, + "space": { + "footprint": 172316893184, + "footprint_percent": 14, + "block_storage": { + "size": 1271819509760, + "available": 1099709939712, + "used": 172109570048, + "inactive_user_data": 0, + "inactive_user_data_percent": 0, + "full_threshold_percent": 98, + "physical_used": 27038863360, + "physical_used_percent": 2, + "aggregate_metadata": 0, + "aggregate_metadata_percent": 0, + "used_including_snapshot_reserve": 172109570048, + "used_including_snapshot_reserve_percent": 14, + "data_compacted_count": 0, + "data_compaction_space_saved": 0, + "data_compaction_space_saved_percent": 0, + "volume_deduplication_shared_count": 0, + "volume_deduplication_space_saved": 0, + "volume_deduplication_space_saved_percent": 0 + }, + "snapshot": { + "used_percent": 0, + "available": 0, + "total": 0, + "used": 0, + "reserve_percent": 0 + }, + "cloud_storage": { + "used": 0 + }, + "efficiency": { + "savings": 74937720832, + "ratio": 9.238858947247071, + "logical_used": 84033363968 + }, + "efficiency_without_snapshots": { + "savings": 0, + "ratio": 1, + "logical_used": 7005036544 + }, + "efficiency_without_snapshots_flexclones": { + "savings": 0, + "ratio": 1, + "logical_used": 7005036544 + } + }, + "state": "online", + "snaplock_type": "non_snaplock", + "create_time": "2020-09-21T14:44:51+00:00", + "data_encryption": { + "software_encryption_enabled": False, + "drive_protection_enabled": False + }, + "block_storage": { + "primary": { + "disk_count": 1, + "disk_class": "virtual", + "raid_type": "raid0", + "raid_size": 8, + "checksum_style": "advanced_zoned", + "disk_type": "vm_disk" + }, + "hybrid_cache": { + "enabled": False + }, + "mirror": { + "enabled": False, + "state": "unmirrored" + }, + "plexes": [ + { + "name": "plex0" + } + ], + "storage_type": "hdd" + }, + "cloud_storage": { + "attach_eligible": True + }, + "inactive_data_reporting": { + "enabled": False + }, + "metric": { + "timestamp": "2021-12-21T13:25:15Z", + "duration": "PT15S", + "status": "ok", + "throughput": { + "read": 0, + "write": 27033, + "other": 0, + "total": 27033 + }, + "latency": { + "read": 0, + "write": 1173, + "other": 0, + "total": 1173 + }, + "iops": { + "read": 0, + "write": 0, + "other": 0, + "total": 0 + } + }, + "statistics": { + "timestamp": "2021-12-21T13:25:21Z", + "status": "ok", + "throughput_raw": { + "read": 5740912640, + "write": 132358234112, + "other": 0, + "total": 138099146752 + }, + "latency_raw": { + "read": 15095876198, + "write": 12140289450, + "other": 0, + "total": 27236165648 + }, + "iops_raw": { + "read": 535930, + "write": 6011240, + "other": 0, + "total": 6547170 + } + } + } + ], + "num_records": 2 +} + +LUN_GET_ITER_REST = { + "records": [ + { + "uuid": "bd6baab3-4842-45b6-b627-45b305ed2e84", + "svm": { + "uuid": "fake-uuid", + "name": "vserver-name", + }, + "name": "/vol/nahim_dev_vol01/volume-fake-uuid", + "location": { + "logical_unit": "volume-fake-uuid", + "node": { + "name": "node-name", + "uuid": "fake-uuid", + }, + "volume": { + "uuid": "fake-uuid", + "name": "nahim_dev_vol01", + } + }, + "auto_delete": False, + "class": "regular", + "create_time": "2021-12-09T14:07:31+00:00", + "enabled": True, + "lun_maps": [ + { + "logical_unit_number": 0, + "igroup": { + "uuid": "fake-uuid", + "name": "openstack-fake-uuid", + }, + } + ], + "os_type": "linux", + "serial_number": "ZlAFA?QMnBdX", + "space": { + "scsi_thin_provisioning_support_enabled": False, + "size": 10737418240, + "used": 3474366464, + "guarantee": { + "requested": False, + "reserved": False + } + }, + "status": { + "container_state": "online", + "mapped": True, + "read_only": False, + "state": "online" + }, + "vvol": { + "is_bound": False + }, + "metric": { + "timestamp": "2021-12-23T20:36:00Z", + "duration": "PT15S", + "status": "ok", + "throughput": { + "read": 0, + "write": 0, + "other": 0, + "total": 0 + }, + "iops": { + "read": 0, + "write": 0, + "other": 0, + "total": 0 + }, + "latency": { + "read": 0, + "write": 0, + "other": 0, + "total": 0 + } + }, + "statistics": { + "timestamp": "2021-12-23T20:36:02Z", + "status": "ok", + "throughput_raw": { + "read": 1078230528, + "write": 3294724096, + "other": 0, + "total": 4372954624 + }, + "iops_raw": { + "read": 16641, + "write": 51257, + "other": 59, + "total": 67957 + }, + "latency_raw": { + "read": 2011655, + "write": 1235068755, + "other": 1402, + "total": 1237081812 + } + }, + }, + { + "uuid": "dff549b8-fabe-466b-8608-871a6493b492", + "svm": { + "uuid": "fake-uuid", + "name": "vserver-name", + "_links": { + "self": { + "href": "/api/svm/svms/fake-uuid" + } + } + }, + "name": "/vol/nahim_dev_vol01/volume-fake-uuid", + "location": { + "logical_unit": "volume-fake-uuid", + "node": { + "name": "node-name", + "uuid": "fake-uuid", + "_links": { + "self": { + "href": "/api/cluster/nodes/fake-uuid" + } + } + }, + "volume": { + "uuid": "fake-uuid", + "name": "nahim_dev_vol01", + "_links": { + "self": { + "href": "/api/storage/volumes/fake-uuid" + } + } + } + }, + "auto_delete": False, + "class": "regular", + "create_time": "2021-12-14T18:12:38+00:00", + "enabled": True, + "os_type": "linux", + "serial_number": "ZlAFA?QMnBdf", + "space": { + "scsi_thin_provisioning_support_enabled": False, + "size": 5368709120, + "used": 0, + "guarantee": { + "requested": False, + "reserved": False + } + }, + "status": { + "container_state": "online", + "mapped": False, + "read_only": False, + "state": "online" + }, + "vvol": { + "is_bound": False + }, + } + ], + "num_records": 2, +} + +LUN_GET_ITER_RESULT = [ + { + 'Vserver': LUN_GET_ITER_REST['records'][0]['svm']['name'], + 'Volume': + LUN_GET_ITER_REST['records'][0]['location']['volume']['name'], + 'Size': LUN_GET_ITER_REST['records'][0]['space']['size'], + 'Qtree': (LUN_GET_ITER_REST['records'][0]['location'] + .get('qtree', {}).get('name', '')), + 'Path': LUN_GET_ITER_REST['records'][0]['name'], + 'OsType': LUN_GET_ITER_REST['records'][0]['os_type'], + 'SpaceReserved': + LUN_GET_ITER_REST['records'][0]['space']['guarantee']['requested'], + 'UUID': LUN_GET_ITER_REST['records'][0]['uuid'], + }, + { + 'Vserver': LUN_GET_ITER_REST['records'][1]['svm']['name'], + 'Volume': + LUN_GET_ITER_REST['records'][1]['location']['volume']['name'], + 'Size': LUN_GET_ITER_REST['records'][1]['space']['size'], + 'Qtree': (LUN_GET_ITER_REST['records'][1]['location'] + .get('qtree', {}).get('name', '')), + 'Path': LUN_GET_ITER_REST['records'][1]['name'], + 'OsType': LUN_GET_ITER_REST['records'][1]['os_type'], + 'SpaceReserved': + LUN_GET_ITER_REST['records'][1]['space']['guarantee']['requested'], + 'UUID': LUN_GET_ITER_REST['records'][1]['uuid'], + }, +] + +FILE_DIRECTORY_GET_ITER_REST = { + "_links": { + "next": { + "href": "/api/resourcelink" + }, + "self": { + "href": "/api/resourcelink" + } + }, + "num_records": 2, + "records": [ + { + "_links": { + "metadata": { + "href": "/api/resourcelink" + }, + "self": { + "href": "/api/resourcelink" + } + }, + "name": "test_file", + "path": "d1/d2/d3", + "size": 200, + "type": "file" + }, + { + "_links": { + "metadata": { + "href": "/api/resourcelink" + }, + "self": { + "href": "/api/resourcelink" + } + }, + "name": "test_file_2", + "path": "d1/d2/d3", + "size": 250, + "type": "file" + } + ] +} + +FILE_DIRECTORY_GET_ITER_RESULT_REST = [ + { + 'name': FILE_DIRECTORY_GET_ITER_REST['records'][0]['name'], + 'file-size': float(FILE_DIRECTORY_GET_ITER_REST['records'][0]['size']) + }, + { + 'name': FILE_DIRECTORY_GET_ITER_REST['records'][1]['name'], + 'file-size': float(FILE_DIRECTORY_GET_ITER_REST['records'][1]['size']) + } +] + +LUN_GET_MOVEMENT_REST = { "_links": { "self": { - "href": "/api/cluster/nodes" + "href": "/api/resourcelink" + } + }, + "name": "/vol/volume1/qtree1/lun1", + "uuid": "1cd8a442-86d1-11e0-ae1c-123478563412", + "movement": { + "progress": { + "elapsed": 0, + "failure": { + "arguments": [ + { + "code": "string", + "message": "string" + } + ], + "code": "4", + "message": "entry doesn't exist", + "target": "uuid" + }, + "percent_complete": 0, + "state": "preparing", + "volume_snapshot_blocked": True } } } +LUN_GET_COPY_REST = { + "_links": { + "self": { + "href": "/api/resourcelink" + } + }, + "name": "/vol/volume1/qtree1/lun1", + "uuid": "1cd8a442-86d1-11e0-ae1c-123478563412", + "copy": { + "source": { + "_links": { + "self": { + "href": "/api/resourcelink" + } + }, + "progress": { + "elapsed": 0, + "failure": { + "arguments": [ + { + "code": "string", + "message": "string" + } + ], + "code": "4", + "message": "entry doesn't exist", + "target": "uuid" + }, + "percent_complete": 0, + "state": "preparing", + "volume_snapshot_blocked": True + }, + } + }, +} + +VOLUME_GET_ITER_STATE_RESPONSE_REST = { + "records": [ + { + "uuid": "c19aef05-ac60-4211-9fe4-3ef8c8816c83", + "name": "fake_volume", + "state": VOLUME_STATE_ONLINE, + "style": "flexvol", + "nas": { + "path": "/fake/vol" + }, + } + ], + "num_records": 1, +} + +GET_OPERATIONAL_LIF_ADDRESSES_RESPONSE_REST = { + 'records': [ + { + 'uuid': 'fake_uuid_1', + 'name': 'vserver_name', + 'ip': {'address': '1.2.3.4'}, + 'state': 'up' + }, + { + 'uuid': 'fake_uuid_2', + 'name': 'vserver_name', + 'ip': {'address': '99.98.97.96'}, + 'state': 'up' + } + ], + 'num_records': 2 +} + ERROR_RESPONSE_REST = { "error": { "code": 1100, @@ -1699,3 +2537,387 @@ JOB_RESPONSE_REST = { } } } + +VSERVER_DATA_LIST_RESPONSE_REST = { + 'records': [ + { + 'name': VSERVER_NAME + }, + { + 'name': VSERVER_NAME_2 + } + ], + 'num_records': 2, +} + +PERF_COUNTER_LIST_INFO_WAFL_RESPONSE_REST = { + 'name': 'wafl', + 'counter_schemas': [ + { + 'name': 'cp_phase_times', + 'description': 'Array of percentage time spent in different phases' + + ' of Consistency Point (CP).', + 'type': 'percent', + 'unit': 'percent', + 'denominator': { + 'name': 'total_cp_msecs' + } + } + ], +} + +PERF_COUNTER_TOTAL_CP_MSECS_LABELS_REST = [ + 'cp_setup', 'cp_pre_p0', 'cp_p0_snap_del', 'cp_p1_clean', 'cp_p1_quota', + 'cp_ipu_disk_add', 'cp_p2v_inofile', 'cp_p2v_ino_pub', 'cp_p2v_ino_pri', + 'cp_p2v_fsinfo', 'cp_p2v_dlog1', 'cp_p2v_dlog2', 'cp_p2v_refcount', + 'cp_p2v_topaa', 'cp_p2v_df_scores_sub', 'cp_p2v_bm', 'cp_p2v_snap', + 'cp_p2v_df_scores', 'cp_p2v_volinfo', 'cp_p2v_cont', 'cp_p2a_inofile', + 'cp_p2a_ino', 'cp_p2a_dlog1', 'cp_p2a_hya', 'cp_p2a_dlog2', + 'cp_p2a_fsinfo', 'cp_p2a_ipu_bitmap_grow', 'cp_p2a_refcount', + 'cp_p2a_topaa', 'cp_p2a_hyabc', 'cp_p2a_bm', 'cp_p2a_snap', + 'cp_p2a_volinfo', 'cp_p2_flush', 'cp_p2_finish', 'cp_p3_wait', + 'cp_p3v_volinfo', 'cp_p3a_volinfo', 'cp_p3_finish', 'cp_p4_finish', + 'cp_p5_finish', +] + +PERF_COUNTER_TOTAL_CP_MSECS_LABELS_RESULT = [ + label[3:] for label in PERF_COUNTER_TOTAL_CP_MSECS_LABELS_REST +] + +PERF_COUNTER_TOTAL_CP_MSECS_VALUES_REST = [ + 0, 3112, 3, 0, 0, 3, 757, 0, 99, 0, 26, 0, 22, 1, 0, 194, 4, 224, 359, 222, + 0, 0, 0, 0, 0, 0, 82, 0, 0, 0, 0, 0, 0, 62, 0, 133, 16, 35, 334219, 43, + 2218, 20, 0, +] + +PERF_COUNTER_TABLE_ROWS_WAFL = { + 'records': [ + { + 'id': NODE_NAME + ':wafl', + 'counters': [ + { + 'name': 'cp_phase_times', + 'values': PERF_COUNTER_TOTAL_CP_MSECS_VALUES_REST, + 'labels': PERF_COUNTER_TOTAL_CP_MSECS_LABELS_REST + } + ], + } + ], + 'num_records': 1, +} + +PERF_COUNTER_DOMAIN_BUSY_LABELS = [ + 'exempt', 'ha', 'host_os', 'idle', 'kahuna', 'kahuna_legacy', 'none', + 'nwk_exempt', 'network', 'protocol', 'raid', 'raid_exempt', 'sm_exempt', + 'ssan_exempt', 'storage', 'target', 'unclassified', 'wafl_exempt', + 'wafl_mpcleaner', 'xor_exempt', 'ssan_exempt2', 'exempt_ise', 'zombie', +] + +PERF_COUNTER_DOMAIN_BUSY_VALUES_1 = [ + 83071627197, 1334877, 19459898, 588539096, 11516887, 14878622, 18, + 647698, 20, 229232646, 4310322, 441035, 12946782, 57837913, 38765442, + 1111004351701, 1497335, 949657, 109890, 768027, 21, 14, 13 +] + +PERF_COUNTER_DOMAIN_BUSY_VALUES_2 = [ + 1191129018056, 135991, 22842513, 591213798, 9449562, 15345460, 0, + 751656, 0, 162605694, 3927323, 511160, 7644403, 29696759, 21787992, + 3585552592, 1058902, 957296, 87811, 499766, 0, 0, 0 +] + +PERF_COUNTER_ELAPSED_TIME_1 = 1199265469753 +PERF_COUNTER_ELAPSED_TIME_2 = 1199265469755 + +PERF_GET_INSTANCES_PROCESSOR_RESPONSE_REST = { + 'records': [ + { + 'counter_table': { + 'name': 'processor' + }, + 'id': NODE_NAME + ':processor0', + 'counters': [ + { + 'name': 'domain_busy_percent', + 'values': PERF_COUNTER_DOMAIN_BUSY_VALUES_1, + 'labels': PERF_COUNTER_DOMAIN_BUSY_LABELS + }, + { + 'name': 'elapsed_time', + 'value': PERF_COUNTER_ELAPSED_TIME_1, + } + ], + }, + { + 'counter_table': { + 'name': 'processor' + }, + 'id': NODE_NAME + ':processor1', + 'counters': [ + { + 'name': 'domain_busy_percent', + 'values': PERF_COUNTER_DOMAIN_BUSY_VALUES_2, + 'labels': PERF_COUNTER_DOMAIN_BUSY_LABELS + }, + { + 'name': 'elapsed_time', + 'value': PERF_COUNTER_ELAPSED_TIME_2, + } + ], + } + ], + 'num_records': 2, +} + +PERF_COUNTERS_PROCESSOR_EXPECTED = [ + { + 'instance-name': 'processor', + 'instance-uuid': NODE_NAME + ':processor0', + 'node-name': NODE_NAME, + 'timestamp': mock.ANY, + 'domain_busy': + ','.join([str(v) for v in PERF_COUNTER_DOMAIN_BUSY_VALUES_1]) + }, + { + 'instance-name': 'processor', + 'instance-uuid': NODE_NAME + ':processor0', + 'node-name': NODE_NAME, + 'timestamp': mock.ANY, + 'processor_elapsed_time': PERF_COUNTER_ELAPSED_TIME_1 + }, + { + 'instance-name': 'processor', + 'instance-uuid': NODE_NAME + ':processor1', + 'node-name': NODE_NAME, + 'timestamp': mock.ANY, + 'domain_busy': + ','.join([str(v) for v in PERF_COUNTER_DOMAIN_BUSY_VALUES_2]) + }, + { + 'instance-name': 'processor', + 'instance-uuid': NODE_NAME + ':processor1', + 'node-name': NODE_NAME, + 'timestamp': mock.ANY, + 'processor_elapsed_time': PERF_COUNTER_ELAPSED_TIME_2 + }, +] + +SINGLE_IGROUP_REST = { + "svm": { + "uuid": FAKE_UUID, + "name": VOLUME_VSERVER_NAME, + }, + "uuid": FAKE_UUID, + "name": "openstack-e6bf1584-bfb3-4cdb-950d-525bf6f26b53", + "protocol": "iscsi", + "os_type": "linux", + "initiators": [ + { + "name": "iqn.1993-08.org.fake:01:5b67769f5c5e", + } + ], +} + +IGROUP_GET_ITER_REST = { + "records": [ + SINGLE_IGROUP_REST + ], + "num_records": 1, +} + +IGROUP_GET_ITER_MULT_REST = { + "records": [ + SINGLE_IGROUP_REST, + SINGLE_IGROUP_REST + ], + "num_records": 2, +} + +IGROUP_GET_ITER_INITS_REST = { + "records": [ + { + "svm": { + "uuid": FAKE_UUID, + "name": VOLUME_VSERVER_NAME, + }, + "uuid": FAKE_UUID, + "name": "openstack-e6bf1584-bfb3-4cdb-950d-525bf6f26b53", + "protocol": "iscsi", + "os_type": "linux", + "initiators": [ + { + "name": "iqn.1993-08.org.fake:01:5b67769f5c5e", + }, + { + "name": "iqn.1993-08.org.fake:02:5b67769f5c5e", + } + ], + } + ], + "num_records": 1, +} + +GET_LUN_MAP_REST = { + "records": [ + { + "svm": { + "uuid": FAKE_UUID, + "name": VSERVER_NAME, + }, + "lun": { + "uuid": "6c2969dc-b022-434c-b7cd-9240bs975187", + "name": LUN_NAME_PATH, + }, + "igroup": { + "uuid": "08088517-a6f5-11ec-82cc-00a0b89c9a78", + "name": IGROUP_NAME, + }, + "logical_unit_number": 0, + } + ], + "num_records": 1, +} + +FC_INTERFACE_REST = { + "records": [ + { + "data_protocol": "fcp", + "location": { + "port": { + "name": "0a", + "uuid": FAKE_UUID, + "node": { + "name": "node1" + } + }, + "node": { + "name": "node1", + "uuid": FAKE_UUID, + } + }, + "wwpn": "20:00:00:50:56:b4:13:a8", + "name": "lif1", + "uuid": FAKE_UUID, + "state": "up", + "port_address": "5060F", + "wwnn": "20:00:00:50:56:b4:13:01", + "comment": "string", + "svm": { + "name": VOLUME_VSERVER_NAME, + "uuid": FAKE_UUID, + }, + "enabled": True + } + ], + "num_records": 1 +} + +GET_LUN_MAPS = { + "records": [ + { + "svm": { + "uuid": "77deec3a-38ea-11ec-aca8-00a0b89c9a78", + "name": VOLUME_NAME, + }, + "uuid": "99809170-a92c-11ec-82cc-0aa0b89c9a78", + "name": "openstack-626d20dc-c420-4a5a-929c-59178d64f2c5", + "initiators": [ + { + "name": "iqn.2005-03.org.open-iscsi:49ebe8a87d1", + } + ], + "lun_maps": [ + { + "logical_unit_number": 0, + "lun": { + "name": LUN_NAME_PATH, + "uuid": "91e83a0a-72c3-4278-9a24-f2f8135aa5db", + "node": { + "name": CLUSTER_NAME, + "uuid": "9eff6c76-fc13-11ea-8799-525a0006bba9", + }, + }, + } + ], + } + ], + "num_records": 1, +} + +GET_LUN_MAPS_NO_MAPS = { + "records": [ + { + "svm": { + "uuid": "77deec3a-38ea-11ec-aca8-00a0b89c9a78", + "name": VOLUME_NAME, + }, + "uuid": "99809170-a92c-11ec-82cc-0aa0b89c9a78", + "name": "openstack-626d20dc-c420-4a5a-929c-59178d64f2c5", + "initiators": [ + { + "name": "iqn.2005-03.org.open-iscsi:49ebe8a87d1", + } + ], + } + ], + "num_records": 1, +} + +GET_ISCSI_SERVICE_DETAILS_REST = { + "records": [ + { + "svm": { + "uuid": FAKE_UUID, + "name": VOLUME_VSERVER_NAME, + }, + "target": { + "name": INITIATOR_IQN + }, + } + ], + "num_records": 1, +} + +CHECK_ISCSI_INITIATOR_REST = { + "records": [ + { + "svm": { + "uuid": FAKE_UUID, + "name": VOLUME_VSERVER_NAME, + }, + "initiator": INITIATOR_IQN, + } + ], + "num_records": 1, +} + +GET_ISCSI_TARGET_DETAILS_REST = { + "records": [ + { + "uuid": FAKE_UUID, + "name": VOLUME_VSERVER_NAME, + "ip": { + "address": "192.168.1.254" + }, + "enabled": True, + "services": [ + "data_core", + "data_iscsi" + ], + } + ], + "num_records": 1, +} + +VOLUME_GET_ITER_CAPACITY_RESPONSE_REST = { + "records": [ + { + "uuid": FAKE_UUID, + "name": VOLUME_NAME, + "space": { + "available": VOLUME_SIZE_AVAILABLE, + "afs_total": VOLUME_SIZE_TOTAL + }, + } + ], + "num_records": 1, +} diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode.py index b42ddf10ad9..e5ed24de6fb 100644 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode.py +++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode.py @@ -1441,6 +1441,7 @@ class NetAppCmodeClientTestCase(test.TestCase): 1 + fake_vserver """)) @@ -4494,3 +4495,46 @@ class NetAppCmodeClientTestCase(test.TestCase): } self.client.connection.send_request.assert_called_once_with( 'file-rename-file', api_args) + + def test_check_api_permissions(self): + + mock_log = self.mock_object(client_cmode.LOG, 'warning') + self.mock_object(self.client, 'check_cluster_api', return_value=True) + + self.client.check_api_permissions() + + self.client.check_cluster_api.assert_has_calls( + [mock.call(*key) for key in client_cmode.SSC_API_MAP.keys()]) + self.assertEqual(0, mock_log.call_count) + + def test_check_api_permissions_failed_ssc_apis(self): + + def check_cluster_api(object_name, operation_name, api): + if api != 'volume-get-iter': + return False + return True + + self.mock_object(self.client, 'check_cluster_api', + side_effect=check_cluster_api) + + mock_log = self.mock_object(client_cmode.LOG, 'warning') + + self.client.check_api_permissions() + + self.assertEqual(1, mock_log.call_count) + + def test_check_api_permissions_failed_volume_api(self): + + def check_cluster_api(object_name, operation_name, api): + if api == 'volume-get-iter': + return False + return True + + self.mock_object(self.client, 'check_cluster_api', + side_effect=check_cluster_api) + mock_log = self.mock_object(client_cmode.LOG, 'warning') + + self.assertRaises(exception.VolumeBackendAPIException, + self.client.check_api_permissions) + + self.assertEqual(0, mock_log.call_count) diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode_rest.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode_rest.py index b914c493235..97be72a1b4d 100644 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode_rest.py +++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode_rest.py @@ -22,13 +22,16 @@ import uuid import ddt import six +from cinder import exception from cinder.tests.unit import test from cinder.tests.unit.volume.drivers.netapp.dataontap.client import ( fakes as fake_client) from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api +from cinder.volume.drivers.netapp.dataontap.client import client_base from cinder.volume.drivers.netapp.dataontap.client import client_cmode from cinder.volume.drivers.netapp.dataontap.client import client_cmode_rest +from cinder.volume.drivers.netapp import utils as netapp_utils CONNECTION_INFO = {'hostname': 'hostname', @@ -306,3 +309,2197 @@ class NetAppRestCmodeClientTestCase(test.TestCase): mock.call(next_url_1, 'get', query=None, enable_tunneling=True), mock.call(next_url_2, 'get', query=None, enable_tunneling=True), ]) + + def test__get_unique_volume(self): + api_response = fake_client.VOLUME_GET_ITER_STYLE_RESPONSE_REST + + result = self.client._get_unique_volume(api_response["records"]) + + expected = fake_client.VOLUME_FLEXGROUP_STYLE_REST + self.assertEqual(expected, result) + + def test__get_unique_volume_raise_exception(self): + api_response = fake_client.VOLUME_GET_ITER_SAME_STYLE_RESPONSE_REST + + self.assertRaises(exception.VolumeBackendAPIException, + self.client._get_unique_volume, + api_response["records"]) + + @ddt.data(fake.REST_FIELDS, None) + def test__get_volume_by_args(self, fields): + mock_get_unique_vol = self.mock_object( + self.client, '_get_unique_volume', + return_value=fake_client.VOLUME_GET_ITER_SSC_RESPONSE_STR_REST) + mock_send_request = self.mock_object( + self.client, 'send_request', + return_value=fake_client.VOLUME_GET_ITER_SSC_RESPONSE_REST) + + volume = self.client._get_volume_by_args( + vol_name=fake.VOLUME_NAME, vol_path=fake.VOLUME_PATH, + vserver=fake.VSERVER_NAME, fields=fields) + + self.assertEqual(fake_client.VOLUME_GET_ITER_SSC_RESPONSE_STR_REST, + volume) + mock_get_unique_vol.assert_called_once_with( + fake_client.VOLUME_GET_ITER_SSC_RESPONSE_REST['records']) + expected_query = { + 'type': 'rw', + 'style': 'flex*', + 'is_svm_root': 'false', + 'error_state.is_inconsistent': 'false', + 'state': 'online', + 'name': fake.VOLUME_NAME, + 'nas.path': fake.VOLUME_PATH, + 'svm.name': fake.VSERVER_NAME, + 'fields': 'name,style' if not fields else fields, + } + mock_send_request.assert_called_once_with('/storage/volumes/', 'get', + query=expected_query) + + @ddt.data(False, True) + def test_get_flexvol(self, is_flexgroup): + + if is_flexgroup: + api_response = \ + fake_client.VOLUME_GET_ITER_SSC_RESPONSE_FLEXGROUP_REST + volume_response = \ + fake_client.VOLUME_GET_ITER_SSC_RESPONSE_STR_FLEXGROUP_REST + else: + api_response = fake_client.VOLUME_GET_ITER_SSC_RESPONSE_REST + volume_response = \ + fake_client.VOLUME_GET_ITER_SSC_RESPONSE_STR_REST + + self.mock_object(self.client, + 'send_request', + return_value=api_response) + + mock_get_unique_vol = self.mock_object( + self.client, '_get_volume_by_args', return_value=volume_response) + + result = self.client.get_flexvol( + flexvol_name=fake_client.VOLUME_NAMES[0], + flexvol_path='/%s' % fake_client.VOLUME_NAMES[0]) + + fields = ('aggregates.name,name,svm.name,nas.path,' + 'type,guarantee.honored,guarantee.type,' + 'space.snapshot.reserve_percent,space.size,' + 'qos.policy.name,snapshot_policy,language,style') + mock_get_unique_vol.assert_called_once_with( + vol_name=fake_client.VOLUME_NAMES[0], + vol_path='/%s' % fake_client.VOLUME_NAMES[0], fields=fields) + + if is_flexgroup: + self.assertEqual(fake_client.VOLUME_INFO_SSC_FLEXGROUP, result) + else: + self.assertEqual(fake_client.VOLUME_INFO_SSC, result) + + def test_list_flexvols(self): + api_response = fake_client.VOLUME_GET_ITER_LIST_RESPONSE_REST + self.mock_object(self.client, + 'send_request', + return_value=api_response) + + result = self.client.list_flexvols() + + query = { + 'type': 'rw', + 'style': 'flex*', # Match both 'flexvol' and 'flexgroup' + 'is_svm_root': 'false', + 'error_state.is_inconsistent': 'false', + # 'is-invalid': 'false', + 'state': 'online', + 'fields': 'name' + } + + self.client.send_request.assert_called_once_with( + '/storage/volumes/', 'get', query=query) + self.assertEqual(list(fake_client.VOLUME_NAMES), result) + + def test_list_flexvols_not_found(self): + api_response = fake_client.NO_RECORDS_RESPONSE_REST + self.mock_object(self.client, + 'send_request', + return_value=api_response) + + result = self.client.list_flexvols() + self.assertEqual([], result) + + def test_is_flexvol_mirrored(self): + + api_response = fake_client.GET_NUM_RECORDS_RESPONSE_REST + self.mock_object(self.client, + 'send_request', + return_value=api_response) + + result = self.client.is_flexvol_mirrored( + fake_client.VOLUME_NAMES[0], fake_client.VOLUME_VSERVER_NAME) + + query = { + 'source.path': fake_client.VOLUME_VSERVER_NAME + + ':' + fake_client.VOLUME_NAMES[0], + 'state': 'snapmirrored', + 'return_records': 'false', + } + + self.client.send_request.assert_called_once_with( + '/snapmirror/relationships/', 'get', query=query) + self.assertTrue(result) + + def test_is_flexvol_mirrored_not_mirrored(self): + + api_response = fake_client.NO_RECORDS_RESPONSE_REST + self.mock_object(self.client, + 'send_request', + return_value=api_response) + + result = self.client.is_flexvol_mirrored( + fake_client.VOLUME_NAMES[0], fake_client.VOLUME_VSERVER_NAME) + + self.assertFalse(result) + + def test_is_flexvol_mirrored_api_error(self): + + self.mock_object(self.client, + 'send_request', + side_effect=self._mock_api_error()) + + result = self.client.is_flexvol_mirrored( + fake_client.VOLUME_NAMES[0], fake_client.VOLUME_VSERVER_NAME) + + self.assertFalse(result) + + def test_is_flexvol_encrypted(self): + + api_response = fake_client.GET_NUM_RECORDS_RESPONSE_REST + self.client.features.add_feature('FLEXVOL_ENCRYPTION') + self.mock_object(self.client, + 'send_request', + return_value=api_response) + + result = self.client.is_flexvol_encrypted( + fake_client.VOLUME_NAME, fake_client.VOLUME_VSERVER_NAME) + + query = { + 'encryption.enabled': 'true', + 'name': fake_client.VOLUME_NAME, + 'svm.name': fake_client.VOLUME_VSERVER_NAME, + 'return_records': 'false', + } + + self.client.send_request.assert_called_once_with( + '/storage/volumes/', 'get', query=query) + + self.assertTrue(result) + + def test_is_flexvol_encrypted_unsupported_version(self): + + self.client.features.add_feature('FLEXVOL_ENCRYPTION', supported=False) + result = self.client.is_flexvol_encrypted( + fake_client.VOLUME_NAMES[0], fake_client.VOLUME_VSERVER_NAME) + + self.assertFalse(result) + + def test_is_flexvol_encrypted_no_records_found(self): + + api_response = fake_client.NO_RECORDS_RESPONSE_REST + self.mock_object(self.client, + 'send_request', + return_value=api_response) + + result = self.client.is_flexvol_encrypted( + fake_client.VOLUME_NAMES[0], fake_client.VOLUME_VSERVER_NAME) + + self.assertFalse(result) + + def test_is_flexvol_encrypted_api_error(self): + + self.mock_object(self.client, + 'send_request', + side_effect=self._mock_api_error()) + + result = self.client.is_flexvol_encrypted( + fake_client.VOLUME_NAMES[0], fake_client.VOLUME_VSERVER_NAME) + + self.assertFalse(result) + + @ddt.data({'types': {'FCAL'}, 'expected': ['FCAL']}, + {'types': {'SATA', 'SSD'}, 'expected': ['SATA', 'SSD']},) + @ddt.unpack + def test_get_aggregate_disk_types(self, types, expected): + + mock_get_aggregate_disk_types = self.mock_object( + self.client, '_get_aggregate_disk_types', return_value=types) + + result = self.client.get_aggregate_disk_types( + fake_client.VOLUME_AGGREGATE_NAME) + + self.assertCountEqual(expected, result) + mock_get_aggregate_disk_types.assert_called_once_with( + fake_client.VOLUME_AGGREGATE_NAME) + + def test_get_aggregate_disk_types_not_found(self): + + mock_get_aggregate_disk_types = self.mock_object( + self.client, '_get_aggregate_disk_types', return_value=set()) + + result = self.client.get_aggregate_disk_types( + fake_client.VOLUME_AGGREGATE_NAME) + + self.assertIsNone(result) + mock_get_aggregate_disk_types.assert_called_once_with( + fake_client.VOLUME_AGGREGATE_NAME) + + def test_get_aggregate_disk_types_api_not_found(self): + + api_error = netapp_api.NaApiError() + self.mock_object(self.client, + 'send_request', + side_effect=api_error) + + result = self.client.get_aggregate_disk_types( + fake_client.VOLUME_AGGREGATE_NAME) + + self.assertIsNone(result) + + def test__get_aggregates(self): + + api_response = fake_client.AGGR_GET_ITER_RESPONSE_REST + mock_send_request = self.mock_object(self.client, + 'send_request', + return_value=api_response) + + result = self.client._get_aggregates() + + mock_send_request.assert_has_calls( + [mock.call('/storage/aggregates', 'get', query={}, + enable_tunneling=False)]) + self.assertEqual(result, api_response['records']) + + def test__get_aggregates_with_filters(self): + + api_response = fake_client.AGGR_GET_ITER_RESPONSE_REST + mock_send_request = self.mock_object(self.client, + 'send_request', + return_value=api_response) + query = { + 'fields': 'space.block_storage.size,space.block_storage.available', + 'name': ','.join(fake_client.VOLUME_AGGREGATE_NAMES), + } + + result = self.client._get_aggregates( + aggregate_names=fake_client.VOLUME_AGGREGATE_NAMES, + fields=query['fields']) + + mock_send_request.assert_has_calls([ + mock.call('/storage/aggregates', 'get', query=query, + enable_tunneling=False)]) + self.assertEqual(result, api_response['records']) + + def test__get_aggregates_not_found(self): + + api_response = fake_client.NO_RECORDS_RESPONSE_REST + mock_send_request = self.mock_object(self.client, + 'send_request', + return_value=api_response) + + result = self.client._get_aggregates() + + mock_send_request.assert_has_calls([ + mock.call('/storage/aggregates', 'get', query={}, + enable_tunneling=False)]) + self.assertEqual([], result) + + def test_get_aggregate_none_specified(self): + + result = self.client.get_aggregate('') + + self.assertEqual({}, result) + + def test_get_aggregate(self): + + api_response = [fake_client.AGGR_GET_ITER_RESPONSE_REST['records'][1]] + + mock__get_aggregates = self.mock_object(self.client, + '_get_aggregates', + return_value=api_response) + + response = self.client.get_aggregate(fake_client.VOLUME_AGGREGATE_NAME) + + fields = ('name,block_storage.primary.raid_type,' + 'block_storage.storage_type,home_node.name') + mock__get_aggregates.assert_has_calls([ + mock.call( + aggregate_names=[fake_client.VOLUME_AGGREGATE_NAME], + fields=fields)]) + + expected = { + 'name': fake_client.VOLUME_AGGREGATE_NAME, + 'raid-type': 'raid0', + 'is-hybrid': False, + 'node-name': fake_client.NODE_NAME, + } + self.assertEqual(expected, response) + + def test_get_aggregate_not_found(self): + + api_response = fake_client.NO_RECORDS_RESPONSE_REST + self.mock_object(self.client, + 'send_request', + return_value=api_response) + + result = self.client.get_aggregate(fake_client.VOLUME_AGGREGATE_NAME) + + self.assertEqual({}, result) + + def test_get_aggregate_api_error(self): + + self.mock_object(self.client, + 'send_request', + side_effect=self._mock_api_error()) + + result = self.client.get_aggregate(fake_client.VOLUME_AGGREGATE_NAME) + + self.assertEqual({}, result) + + def test_get_aggregate_api_not_found(self): + + api_error = netapp_api.NaApiError(code=netapp_api.REST_API_NOT_FOUND) + + self.mock_object(self.client, + 'send_request', + side_effect=api_error) + + result = self.client.get_aggregate(fake_client.VOLUME_AGGREGATE_NAME) + + self.assertEqual({}, result) + + @ddt.data(True, False) + def test_is_qos_min_supported(self, supported): + self.client.features.add_feature('test', supported=supported) + mock_name = self.mock_object(netapp_utils, + 'qos_min_feature_name', + return_value='test') + result = self.client.is_qos_min_supported(True, 'node') + + mock_name.assert_called_once_with(True, 'node') + self.assertEqual(result, supported) + + def test_is_qos_min_supported_invalid_node(self): + mock_name = self.mock_object(netapp_utils, + 'qos_min_feature_name', + return_value='invalid_feature') + result = self.client.is_qos_min_supported(True, 'node') + + mock_name.assert_called_once_with(True, 'node') + self.assertFalse(result) + + def test_is_qos_min_supported_none_node(self): + result = self.client.is_qos_min_supported(True, None) + + self.assertFalse(result) + + def test_get_flexvol_dedupe_info(self): + + api_response = fake_client.VOLUME_GET_ITER_SSC_RESPONSE_REST + mock_send_request = self.mock_object(self.client, + 'send_request', + return_value=api_response) + + result = self.client.get_flexvol_dedupe_info( + fake_client.VOLUME_NAMES[0]) + + query = { + 'efficiency.volume_path': '/vol/%s' % fake_client.VOLUME_NAMES[0], + 'fields': 'efficiency.state,efficiency.compression' + } + + mock_send_request.assert_called_once_with( + '/storage/volumes', 'get', query=query) + self.assertEqual( + fake_client.VOLUME_DEDUPE_INFO_SSC_NO_LOGICAL_DATA, result) + + def test_get_flexvol_dedupe_info_no_logical_data_values(self): + + api_response = fake_client.VOLUME_GET_ITER_SSC_RESPONSE_REST + self.mock_object(self.client, + 'send_request', + return_value=api_response) + + result = self.client.get_flexvol_dedupe_info( + fake_client.VOLUME_NAMES[0]) + + self.assertEqual(fake_client.VOLUME_DEDUPE_INFO_SSC_NO_LOGICAL_DATA, + result) + + def test_get_flexvol_dedupe_info_not_found(self): + + api_response = fake_client.NO_RECORDS_RESPONSE_REST + self.mock_object(self.client, + 'send_request', + return_value=api_response) + + result = self.client.get_flexvol_dedupe_info( + fake_client.VOLUME_NAMES[0]) + + self.assertEqual(fake_client.VOLUME_DEDUPE_INFO_SSC_NO_LOGICAL_DATA, + result) + + def test_get_flexvol_dedupe_info_api_error(self): + + self.mock_object(self.client, + 'send_request', + side_effect=self._mock_api_error()) + + result = self.client.get_flexvol_dedupe_info( + fake_client.VOLUME_NAMES[0]) + + self.assertEqual(fake_client.VOLUME_DEDUPE_INFO_SSC_NO_LOGICAL_DATA, + result) + + def test_get_flexvol_dedupe_info_api_insufficient_privileges(self): + + api_error = netapp_api.NaApiError(code=netapp_api.EAPIPRIVILEGE) + self.mock_object(self.client, + 'send_request', + side_effect=api_error) + + result = self.client.get_flexvol_dedupe_info( + fake_client.VOLUME_NAMES[0]) + + self.assertEqual(fake_client.VOLUME_DEDUPE_INFO_SSC_NO_LOGICAL_DATA, + result) + + def test_get_lun_list(self): + response = fake_client.LUN_GET_ITER_REST + self.mock_object(self.client, + 'send_request', + return_value=response) + + expected_result = fake_client.LUN_GET_ITER_RESULT + luns = self.client.get_lun_list() + + self.assertEqual(expected_result, luns) + self.assertEqual(2, len(luns)) + + def test_get_lun_list_no_records(self): + response = fake_client.NO_RECORDS_RESPONSE_REST + self.mock_object(self.client, + 'send_request', + return_value=response) + + luns = self.client.get_lun_list() + + self.assertEqual([], luns) + + def test_get_lun_sizes_by_volume(self): + volume_name = fake_client.VOLUME_NAME + query = { + 'location.volume.name': volume_name, + 'fields': 'space.size,name' + } + response = fake_client.LUN_GET_ITER_REST + expected_result = [] + for lun in fake_client.LUN_GET_ITER_RESULT: + expected_result.append({ + 'size': lun['Size'], + 'path': lun['Path'], + }) + + self.mock_object(self.client, + 'send_request', + return_value=response) + + luns = self.client.get_lun_sizes_by_volume(volume_name) + + self.assertEqual(expected_result, luns) + self.assertEqual(2, len(luns)) + self.client.send_request.assert_called_once_with( + '/storage/luns/', 'get', query=query) + + def test_get_lun_sizes_by_volume_no_records(self): + volume_name = fake_client.VOLUME_NAME + query = { + 'location.volume.name': volume_name, + 'fields': 'space.size,name' + } + response = fake_client.NO_RECORDS_RESPONSE_REST + + self.mock_object(self.client, + 'send_request', + return_value=response) + + luns = self.client.get_lun_sizes_by_volume(volume_name) + + self.assertEqual([], luns) + self.client.send_request.assert_called_once_with( + '/storage/luns/', 'get', query=query) + + def test_get_lun_by_args(self): + response = fake_client.LUN_GET_ITER_REST + mock_send_request = self.mock_object( + self.client, 'send_request', return_value=response) + + lun_info_args = { + 'vserver': fake.VSERVER_NAME, + 'path': fake.LUN_PATH, + 'uuid': fake.UUID1, + } + + luns = self.client.get_lun_by_args(**lun_info_args) + + query = { + 'svm.name': fake.VSERVER_NAME, + 'name': fake.LUN_PATH, + 'uuid': fake.UUID1, + 'fields': 'svm.name,location.volume.name,space.size,' + 'location.qtree.name,name,os_type,' + 'space.guarantee.requested,uuid' + } + + mock_send_request.assert_called_once_with( + '/storage/luns/', 'get', query=query) + + self.assertEqual(2, len(luns)) + + def test_get_lun_by_args_no_lun_found(self): + response = fake_client.NO_RECORDS_RESPONSE_REST + self.mock_object(self.client, + 'send_request', + return_value=response) + + luns = self.client.get_lun_by_args() + + self.assertEqual([], luns) + + def test_get_lun_by_args_with_one_arg(self): + path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) + response = fake_client.LUN_GET_ITER_REST + mock_send_request = self.mock_object( + self.client, 'send_request', return_value=response) + + luns = self.client.get_lun_by_args(path=path) + + query = { + 'name': path, + 'fields': 'svm.name,location.volume.name,space.size,' + 'location.qtree.name,name,os_type,' + 'space.guarantee.requested,uuid' + } + + mock_send_request.assert_called_once_with( + '/storage/luns/', 'get', query=query) + + self.assertEqual(2, len(luns)) + + def test_get_file_sizes_by_dir(self): + volume = fake_client.VOLUME_ITEM_SIMPLE_RESPONSE_REST + query = { + 'type': 'file', + 'fields': 'size,name' + } + response = fake_client.FILE_DIRECTORY_GET_ITER_REST + expected_result = fake_client.FILE_DIRECTORY_GET_ITER_RESULT_REST + + self.mock_object(self.client, + '_get_volume_by_args', + return_value=volume) + self.mock_object(self.client, + 'send_request', + return_value=response) + + files = self.client.get_file_sizes_by_dir(volume['name']) + + self.assertEqual(expected_result, files) + self.assertEqual(2, len(files)) + self.client.send_request.assert_called_once_with( + f'/storage/volumes/{volume["uuid"]}/files', + 'get', query=query) + + def test_get_file_sizes_by_dir_no_records(self): + volume = fake_client.VOLUME_ITEM_SIMPLE_RESPONSE_REST + query = { + 'type': 'file', + 'fields': 'size,name' + } + + api_error = netapp_api.NaApiError(code=netapp_api.REST_NO_SUCH_FILE) + + self.mock_object(self.client, + '_get_volume_by_args', + return_value=volume) + self.mock_object(self.client, + 'send_request', + side_effect=api_error) + + files = self.client.get_file_sizes_by_dir(volume['name']) + + self.assertEqual([], files) + self.assertEqual(0, len(files)) + self.client.send_request.assert_called_once_with( + f'/storage/volumes/{volume["uuid"]}/files', + 'get', query=query) + + def test_get_file_sizes_by_dir_exception(self): + volume = fake_client.VOLUME_ITEM_SIMPLE_RESPONSE_REST + api_error = netapp_api.NaApiError(code=0) + + self.mock_object(self.client, + '_get_volume_by_args', + return_value=volume) + self.mock_object(self.client, + 'send_request', + side_effect=api_error) + self.assertRaises(netapp_api.NaApiError, + self.client.get_file_sizes_by_dir, + volume['name']) + + @ddt.data({'junction_path': '/fake/vol'}, + {'name': 'fake_volume'}, + {'junction_path': '/fake/vol', 'name': 'fake_volume'}) + def test_get_volume_state(self, kwargs): + query_args = {} + query_args['fields'] = 'state' + + if 'name' in kwargs: + query_args['name'] = kwargs['name'] + if 'junction_path' in kwargs: + query_args['nas.path'] = kwargs['junction_path'] + + response = fake_client.VOLUME_GET_ITER_STATE_RESPONSE_REST + mock_send_request = self.mock_object( + self.client, 'send_request', return_value=response) + + state = self.client.get_volume_state(**kwargs) + + mock_send_request.assert_called_once_with( + '/storage/volumes/', 'get', query=query_args) + + self.assertEqual(fake_client.VOLUME_STATE_ONLINE, state) + + def test_delete_snapshot(self): + volume = fake_client.VOLUME_GET_ITER_SSC_RESPONSE_STR_REST + self.mock_object( + self.client, '_get_volume_by_args', + return_value=volume) + snap_name = fake.SNAPSHOT["name"] + self.mock_object(self.client, 'send_request') + + self.client.delete_snapshot(volume["name"], snap_name) + + self.client._get_volume_by_args.assert_called_once_with( + vol_name=volume["name"]) + self.client.send_request.assert_called_once_with( + f'/storage/volumes/{volume["uuid"]}/snapshots' + f'?name={snap_name}', 'delete') + + def test_get_operational_lif_addresses(self): + expected_result = ['1.2.3.4', '99.98.97.96'] + api_response = fake_client.GET_OPERATIONAL_LIF_ADDRESSES_RESPONSE_REST + mock_send_request = self.mock_object(self.client, + 'send_request', + return_value=api_response) + + address_list = self.client.get_operational_lif_addresses() + + query = { + 'state': 'up', + 'fields': 'ip.address', + } + + mock_send_request.assert_called_once_with( + '/network/ip/interfaces/', 'get', query=query) + + self.assertEqual(expected_result, address_list) + + def test__list_vservers(self): + api_response = fake_client.VSERVER_DATA_LIST_RESPONSE_REST + self.mock_object(self.client, + 'send_request', + return_value=api_response) + result = self.client._list_vservers() + query = { + 'fields': 'name', + } + self.client.send_request.assert_has_calls([ + mock.call('/svm/svms', 'get', query=query, + enable_tunneling=False)]) + self.assertListEqual( + [fake_client.VSERVER_NAME, fake_client.VSERVER_NAME_2], result) + + def test_list_vservers_not_found(self): + api_response = fake_client.NO_RECORDS_RESPONSE_REST + self.mock_object(self.client, + 'send_request', + return_value=api_response) + result = self.client._list_vservers() + self.assertListEqual([], result) + + def test_get_ems_log_destination_vserver(self): + mock_list_vservers = self.mock_object( + self.client, + '_list_vservers', + return_value=[fake_client.VSERVER_NAME]) + result = self.client._get_ems_log_destination_vserver() + mock_list_vservers.assert_called_once_with() + self.assertEqual(fake_client.VSERVER_NAME, result) + + def test_get_ems_log_destination_vserver_not_found(self): + mock_list_vservers = self.mock_object( + self.client, + '_list_vservers', + return_value=[]) + + self.assertRaises(exception.NotFound, + self.client._get_ems_log_destination_vserver) + + mock_list_vservers.assert_called_once_with() + + def test_send_ems_log_message(self): + + message_dict = { + 'computer-name': '25-dev-vm', + 'event-source': 'Cinder driver NetApp_iSCSI_Cluster_direct', + 'app-version': '20.1.0.dev|vendor|Linux-5.4.0-120-generic-x86_64', + 'category': 'provisioning', + 'log-level': '5', + 'auto-support': 'false', + 'event-id': '1', + 'event-description': + '{"pools": {"vserver": "vserver_name",' + + '"aggregates": [], "flexvols": ["flexvol_01"]}}' + } + + body = { + 'computer_name': message_dict['computer-name'], + 'event_source': message_dict['event-source'], + 'app_version': message_dict['app-version'], + 'category': message_dict['category'], + 'severity': 'notice', + 'autosupport_required': message_dict['auto-support'] == 'true', + 'event_id': message_dict['event-id'], + 'event_description': message_dict['event-description'], + } + + self.mock_object(self.client, '_get_ems_log_destination_vserver', + return_value='vserver_name') + self.mock_object(self.client, 'send_request') + + self.client.send_ems_log_message(message_dict) + + self.client.send_request.assert_called_once_with( + '/support/ems/application-logs', 'post', body=body) + + @ddt.data('cp_phase_times', 'domain_busy') + def test_get_performance_counter_info(self, counter_name): + + response1 = fake_client.PERF_COUNTER_LIST_INFO_WAFL_RESPONSE_REST + response2 = fake_client.PERF_COUNTER_TABLE_ROWS_WAFL + + object_name = 'wafl' + + mock_send_request = self.mock_object( + self.client, 'send_request', + side_effect=[response1, response2]) + + result = self.client.get_performance_counter_info(object_name, + counter_name) + + expected = { + 'name': 'cp_phase_times', + 'base-counter': 'total_cp_msecs', + 'labels': fake_client.PERF_COUNTER_TOTAL_CP_MSECS_LABELS_RESULT, + } + + query1 = { + 'counter_schemas.name': counter_name, + 'fields': 'counter_schemas.*' + } + + query2 = { + 'counters.name': counter_name, + 'fields': 'counters.*' + } + + if counter_name == 'domain_busy': + expected['name'] = 'domain_busy' + expected['labels'] = ( + fake_client.PERF_COUNTER_TOTAL_CP_MSECS_LABELS_REST) + query1['counter_schemas.name'] = 'domain_busy_percent' + query2['counters.name'] = 'domain_busy_percent' + + self.assertEqual(expected, result) + + mock_send_request.assert_has_calls([ + mock.call(f'/cluster/counter/tables/{object_name}', + 'get', query=query1, enable_tunneling=False), + mock.call(f'/cluster/counter/tables/{object_name}/rows', + 'get', query=query2, enable_tunneling=False), + ]) + + def test_get_performance_counter_info_not_found_rows(self): + response1 = fake_client.PERF_COUNTER_LIST_INFO_WAFL_RESPONSE_REST + response2 = fake_client.NO_RECORDS_RESPONSE_REST + + object_name = 'wafl' + counter_name = 'cp_phase_times' + + self.mock_object( + self.client, 'send_request', + side_effect=[response1, response2]) + + result = self.client.get_performance_counter_info(object_name, + counter_name) + + expected = { + 'name': 'cp_phase_times', + 'base-counter': 'total_cp_msecs', + 'labels': [], + } + self.assertEqual(expected, result) + + def test_get_performance_instance_uuids(self): + response = fake_client.PERF_COUNTER_TABLE_ROWS_WAFL + + mock_send_request = self.mock_object( + self.client, 'send_request', + return_value=response) + + object_name = 'wafl' + result = self.client.get_performance_instance_uuids( + object_name, fake_client.NODE_NAME) + + expected = [fake_client.NODE_NAME + ':wafl'] + self.assertEqual(expected, result) + + query = { + 'id': fake_client.NODE_NAME + ':*', + } + mock_send_request.assert_called_once_with( + f'/cluster/counter/tables/{object_name}/rows', + 'get', query=query, enable_tunneling=False) + + def test_get_performance_counters(self): + response = fake_client.PERF_GET_INSTANCES_PROCESSOR_RESPONSE_REST + + mock_send_request = self.mock_object( + self.client, 'send_request', + return_value=response) + + instance_uuids = [ + fake_client.NODE_NAME + ':processor0', + fake_client.NODE_NAME + ':processor1', + ] + object_name = 'processor' + counter_names = ['domain_busy', 'processor_elapsed_time'] + rest_counter_names = ['domain_busy_percent', 'elapsed_time'] + result = self.client.get_performance_counters(object_name, + instance_uuids, + counter_names) + + expected = fake_client.PERF_COUNTERS_PROCESSOR_EXPECTED + self.assertEqual(expected, result) + + query = { + 'id': '|'.join(instance_uuids), + 'counters.name': '|'.join(rest_counter_names), + 'fields': 'id,counter_table.name,counters.*', + } + + mock_send_request.assert_called_once_with( + f'/cluster/counter/tables/{object_name}/rows', + 'get', query=query, enable_tunneling=False) + + def test_get_aggregate_capacities(self): + aggr1_capacities = { + 'percent-used': 50, + 'size-available': 100.0, + 'size-total': 200.0, + } + aggr2_capacities = { + 'percent-used': 75, + 'size-available': 125.0, + 'size-total': 500.0, + } + mock_get_aggregate_capacity = self.mock_object( + self.client, '_get_aggregate_capacity', + side_effect=[aggr1_capacities, aggr2_capacities]) + + result = self.client.get_aggregate_capacities(['aggr1', 'aggr2']) + + expected = { + 'aggr1': aggr1_capacities, + 'aggr2': aggr2_capacities, + } + self.assertEqual(expected, result) + mock_get_aggregate_capacity.assert_has_calls([ + mock.call('aggr1'), + mock.call('aggr2'), + ]) + + def test_get_aggregate_capacities_not_found(self): + mock_get_aggregate_capacity = self.mock_object( + self.client, '_get_aggregate_capacity', side_effect=[{}, {}]) + + result = self.client.get_aggregate_capacities(['aggr1', 'aggr2']) + + expected = { + 'aggr1': {}, + 'aggr2': {}, + } + self.assertEqual(expected, result) + mock_get_aggregate_capacity.assert_has_calls([ + mock.call('aggr1'), + mock.call('aggr2'), + ]) + + def test_get_aggregate_capacities_not_list(self): + result = self.client.get_aggregate_capacities('aggr1') + self.assertEqual({}, result) + + def test__get_aggregate_capacity(self): + api_response = fake_client.AGGR_GET_ITER_RESPONSE_REST['records'] + mock_get_aggregates = self.mock_object(self.client, + '_get_aggregates', + return_value=api_response) + + result = self.client._get_aggregate_capacity( + fake_client.VOLUME_AGGREGATE_NAME) + + fields = ('space.block_storage.available,space.block_storage.size,' + 'space.block_storage.used') + mock_get_aggregates.assert_has_calls([ + mock.call(aggregate_names=[fake_client.VOLUME_AGGREGATE_NAME], + fields=fields)]) + + available = float(fake_client.AGGR_SIZE_AVAILABLE) + total = float(fake_client.AGGR_SIZE_TOTAL) + used = float(fake_client.AGGR_SIZE_USED) + percent_used = int((used * 100) // total) + + expected = { + 'percent-used': percent_used, + 'size-available': available, + 'size-total': total, + } + self.assertEqual(expected, result) + + def test__get_aggregate_capacity_not_found(self): + + api_response = fake_client.NO_RECORDS_RESPONSE_REST + self.mock_object(self.client, + 'send_request', + return_value=api_response) + + result = self.client._get_aggregate_capacity( + fake_client.VOLUME_AGGREGATE_NAME) + + self.assertEqual({}, result) + + def test__get_aggregate_capacity_api_error(self): + + self.mock_object(self.client, + 'send_request', + side_effect=self._mock_api_error()) + + result = self.client._get_aggregate_capacity( + fake_client.VOLUME_AGGREGATE_NAME) + + self.assertEqual({}, result) + + def test__get_aggregate_capacity_api_not_found(self): + + api_error = netapp_api.NaApiError(code=netapp_api.REST_API_NOT_FOUND) + self.mock_object( + self.client, 'send_request', side_effect=api_error) + + result = self.client._get_aggregate_capacity( + fake_client.VOLUME_AGGREGATE_NAME) + + self.assertEqual({}, result) + + def test_get_node_for_aggregate(self): + + api_response = fake_client.AGGR_GET_ITER_RESPONSE_REST['records'] + mock_get_aggregates = self.mock_object(self.client, + '_get_aggregates', + return_value=api_response) + + result = self.client.get_node_for_aggregate( + fake_client.VOLUME_AGGREGATE_NAME) + + fields = 'home_node.name' + mock_get_aggregates.assert_has_calls([ + mock.call( + aggregate_names=[fake_client.VOLUME_AGGREGATE_NAME], + fields=fields)]) + + self.assertEqual(fake_client.NODE_NAME, result) + + def test_get_node_for_aggregate_none_requested(self): + result = self.client.get_node_for_aggregate(None) + self.assertIsNone(result) + + def test_get_node_for_aggregate_api_not_found(self): + api_error = netapp_api.NaApiError(code=netapp_api.REST_API_NOT_FOUND) + self.mock_object(self.client, + 'send_request', + side_effect=api_error) + + result = self.client.get_node_for_aggregate( + fake_client.VOLUME_AGGREGATE_NAME) + + self.assertIsNone(result) + + def test_get_node_for_aggregate_api_error(self): + + self.mock_object(self.client, + 'send_request', + self._mock_api_error()) + + self.assertRaises(netapp_api.NaApiError, + self.client.get_node_for_aggregate, + fake_client.VOLUME_AGGREGATE_NAME) + + def test_get_node_for_aggregate_not_found(self): + + api_response = fake_client.NO_RECORDS_RESPONSE_REST + self.mock_object(self.client, + 'send_request', + return_value=api_response) + + result = self.client.get_node_for_aggregate( + fake_client.VOLUME_AGGREGATE_NAME) + + self.assertIsNone(result) + + @ddt.data(None, {'legacy': 'fake'}, {}) + def test_provision_qos_policy_group_invalid_policy_info(self, policy_info): + self.mock_object(self.client, '_validate_qos_policy_group') + self.mock_object(self.client, '_get_qos_first_policy_group_by_name') + self.mock_object(self.client, '_create_qos_policy_group') + self.mock_object(self.client, '_modify_qos_policy_group') + + self.client.provision_qos_policy_group(policy_info, False) + + self.client._validate_qos_policy_group.assert_not_called() + self.client._get_qos_first_policy_group_by_name.assert_not_called() + self.client._create_qos_policy_group.assert_not_called() + self.client._modify_qos_policy_group.assert_not_called() + + @ddt.data(True, False) + def test_provision_qos_policy_group_qos_policy_create(self, is_adaptive): + policy_info = fake.QOS_POLICY_GROUP_INFO + policy_spec = fake.QOS_POLICY_GROUP_SPEC + if is_adaptive: + policy_info = fake.ADAPTIVE_QOS_POLICY_GROUP_INFO + policy_spec = fake.ADAPTIVE_QOS_SPEC + + self.mock_object(self.client, '_validate_qos_policy_group') + self.mock_object(self.client, '_get_qos_first_policy_group_by_name', + return_value=None) + self.mock_object(self.client, '_create_qos_policy_group') + self.mock_object(self.client, '_modify_qos_policy_group') + + self.client.provision_qos_policy_group(policy_info, True) + + self.client._validate_qos_policy_group.assert_called_once_with( + is_adaptive, spec=policy_spec, qos_min_support=True) + (self.client._get_qos_first_policy_group_by_name. + assert_called_once_with(policy_spec['policy_name'])) + self.client._create_qos_policy_group.assert_called_once_with( + policy_spec, is_adaptive) + self.client._modify_qos_policy_group.assert_not_called() + + @ddt.data(True, False) + def test_provision_qos_policy_group_qos_policy_modify(self, is_adaptive): + policy_rest_item = fake.QOS_POLICY_BY_NAME_RESPONSE_REST['records'][0] + policy_info = fake.QOS_POLICY_GROUP_INFO + policy_spec = fake.QOS_POLICY_GROUP_SPEC + if is_adaptive: + policy_info = fake.ADAPTIVE_QOS_POLICY_GROUP_INFO + policy_spec = fake.ADAPTIVE_QOS_SPEC + + self.mock_object(self.client, '_validate_qos_policy_group') + self.mock_object(self.client, '_get_qos_first_policy_group_by_name', + return_value=policy_rest_item) + self.mock_object(self.client, '_create_qos_policy_group') + self.mock_object(self.client, '_modify_qos_policy_group') + + self.client.provision_qos_policy_group(policy_info, True) + + self.client._validate_qos_policy_group.assert_called_once_with( + is_adaptive, spec=policy_spec, qos_min_support=True) + (self.client._get_qos_first_policy_group_by_name. + assert_called_once_with(policy_spec['policy_name'])) + self.client._create_qos_policy_group.assert_not_called() + self.client._modify_qos_policy_group.assert_called_once_with( + policy_spec, is_adaptive, policy_rest_item) + + @ddt.data(True, False) + def test__get_qos_first_policy_group_by_name(self, is_empty): + qos_rest_records = [] + qos_item = fake.QOS_POLICY_BY_NAME_RESPONSE_REST['records'][0] + if not is_empty: + qos_rest_records = fake.QOS_POLICY_BY_NAME_RESPONSE_REST['records'] + + self.mock_object(self.client, '_get_qos_policy_group_by_name', + return_value=qos_rest_records) + + result = self.client._get_qos_first_policy_group_by_name( + qos_item['name']) + + self.client._get_qos_policy_group_by_name.assert_called_once_with( + qos_item['name'] + ) + if not is_empty: + self.assertEqual(qos_item, result) + else: + self.assertTrue(result is None) + + @ddt.data(True, False) + def test__get_qos_policy_group_by_name(self, is_empty): + qos_rest_response = {} + qos_rest_records = [] + qos_name = fake.QOS_POLICY_BY_NAME_RESPONSE_REST['records'][0]['name'] + if not is_empty: + qos_rest_response = fake.QOS_POLICY_BY_NAME_RESPONSE_REST + qos_rest_records = qos_rest_response['records'] + + self.mock_object(self.client, 'send_request', + return_value=qos_rest_response) + + result = self.client._get_qos_policy_group_by_name(qos_name) + + self.client.send_request.assert_called_once_with( + '/storage/qos/policies/', 'get', query={'name': qos_name}) + self.assertEqual(qos_rest_records, result) + + @ddt.data(True, False) + def test__qos_spec_to_api_args(self, is_adaptive): + policy_spec = copy.deepcopy(fake.QOS_POLICY_GROUP_SPEC) + expected_args = fake.QOS_POLICY_GROUP_API_ARGS_REST + if is_adaptive: + policy_spec = fake.ADAPTIVE_QOS_SPEC + expected_args = fake.ADAPTIVE_QOS_API_ARGS_REST + + result = self.client._qos_spec_to_api_args( + policy_spec, is_adaptive, vserver=fake.VSERVER_NAME) + + self.assertEqual(expected_args, result) + + def test__qos_spec_to_api_args_bps(self): + policy_spec = copy.deepcopy(fake.QOS_POLICY_GROUP_SPEC_BPS) + expected_args = fake.QOS_POLICY_GROUP_API_ARGS_REST_BPS + + result = self.client._qos_spec_to_api_args( + policy_spec, False, vserver=fake.VSERVER_NAME) + + self.assertEqual(expected_args, result) + + @ddt.data('100IOPS', '100iops', '100B/s', '100b/s') + def test__sanitize_qos_spec_value(self, value): + result = self.client._sanitize_qos_spec_value(value) + + self.assertEqual(100, result) + + @ddt.data(True, False) + def test__create_qos_policy_group(self, is_adaptive): + self.client.vserver = fake.VSERVER_NAME + policy_spec = fake.QOS_POLICY_GROUP_SPEC + body_args = fake.QOS_POLICY_GROUP_API_ARGS_REST + if is_adaptive: + policy_spec = fake.ADAPTIVE_QOS_SPEC + body_args = fake.ADAPTIVE_QOS_API_ARGS_REST + + self.mock_object(self.client, '_qos_spec_to_api_args', + return_value=body_args) + self.mock_object(self.client, 'send_request') + + self.client._create_qos_policy_group(policy_spec, is_adaptive) + + self.client._qos_spec_to_api_args.assert_called_once_with( + policy_spec, is_adaptive, vserver=fake.VSERVER_NAME) + self.client.send_request.assert_called_once_with( + '/storage/qos/policies/', 'post', body=body_args, + enable_tunneling=False) + + @ddt.data((False, False), (False, True), (True, False), (True, True)) + @ddt.unpack + def test__modify_qos_policy_group(self, is_adaptive, same_name): + self.client.vserver = fake.VSERVER_NAME + policy_spec = fake.QOS_POLICY_GROUP_SPEC + body_args = copy.deepcopy(fake.QOS_POLICY_GROUP_API_ARGS_REST) + if is_adaptive: + policy_spec = fake.ADAPTIVE_QOS_SPEC + body_args = copy.deepcopy(fake.ADAPTIVE_QOS_API_ARGS_REST) + + expected_body_args = copy.deepcopy(body_args) + qos_group_item = copy.deepcopy( + fake.QOS_POLICY_BY_NAME_RESPONSE_REST['records'][0]) + if same_name: + qos_group_item['name'] = policy_spec['policy_name'] + expected_body_args.pop('name') + + self.mock_object(self.client, '_qos_spec_to_api_args', + return_value=body_args) + self.mock_object(self.client, 'send_request') + + self.client._modify_qos_policy_group( + policy_spec, is_adaptive, qos_group_item) + + self.client._qos_spec_to_api_args.assert_called_once_with( + policy_spec, is_adaptive) + self.client.send_request.assert_called_once_with( + f'/storage/qos/policies/{qos_group_item["uuid"]}', 'patch', + body=expected_body_args, enable_tunneling=False) + + def test_get_vol_by_junc_vserver(self): + api_response = fake_client.VOLUME_LIST_SIMPLE_RESPONSE_REST + volume_response = fake_client.VOLUME_ITEM_SIMPLE_RESPONSE_REST + file_path = f'/vol/{fake_client.VOLUME_NAMES[0]}/cinder-vol' + + self.mock_object(self.client, 'send_request', + return_value=api_response) + self.mock_object(self.client, '_get_unique_volume', + return_value=volume_response) + + result = self.client.get_vol_by_junc_vserver( + fake_client.VOLUME_VSERVER_NAME, file_path) + + query = { + 'type': 'rw', + 'style': 'flex*', + 'is_svm_root': 'false', + 'error_state.is_inconsistent': 'false', + 'state': 'online', + 'nas.path': file_path, + 'svm.name': fake_client.VOLUME_VSERVER_NAME, + 'fields': 'name,style' + } + + self.client.send_request.assert_called_once_with( + '/storage/volumes/', 'get', query=query) + self.client._get_unique_volume.assert_called_once_with( + api_response["records"]) + + self.assertEqual(volume_response['name'], result) + + def test_file_assign_qos(self): + volume = fake_client.VOLUME_GET_ITER_SSC_RESPONSE_STR_REST + self.mock_object( + self.client, '_get_volume_by_args', + return_value=volume) + self.mock_object(self.client, 'send_request') + + self.client.file_assign_qos( + volume['name'], fake.QOS_POLICY_GROUP_NAME, True, fake.VOLUME_NAME) + + self.client._get_volume_by_args.assert_called_once_with(volume['name']) + body = {'qos_policy.name': fake.QOS_POLICY_GROUP_NAME} + self.client.send_request.assert_called_once_with( + f'/storage/volumes/{volume["uuid"]}/files/{fake.VOLUME_NAME}', + 'patch', body=body, enable_tunneling=False) + + @ddt.data(None, {}) + def test_mark_qos_policy_group_for_deletion_invalid_policy(self, + policy_info): + self.mock_object(self.client, '_rename_qos_policy_group') + self.mock_object(self.client, 'remove_unused_qos_policy_groups') + + self.client.mark_qos_policy_group_for_deletion(policy_info, False) + + self.client._rename_qos_policy_group.assert_not_called() + if policy_info is None: + self.client.remove_unused_qos_policy_groups.assert_not_called() + else: + (self.client.remove_unused_qos_policy_groups + .assert_called_once_with()) + + @ddt.data((False, False), (False, True), (True, False), (True, True)) + @ddt.unpack + def test_mark_qos_policy_group_for_deletion(self, is_adaptive, has_error): + policy_info = fake.QOS_POLICY_GROUP_INFO + if is_adaptive: + policy_info = fake.ADAPTIVE_QOS_POLICY_GROUP_INFO + current_name = policy_info['spec']['policy_name'] + deleted_name = client_base.DELETED_PREFIX + current_name + + self.mock_object(self.client, 'remove_unused_qos_policy_groups') + if has_error: + self.mock_object(self.client, '_rename_qos_policy_group', + side_effect=self._mock_api_error()) + else: + self.mock_object(self.client, '_rename_qos_policy_group') + + self.client.mark_qos_policy_group_for_deletion( + policy_info, is_adaptive) + + self.client._rename_qos_policy_group.assert_called_once_with( + current_name, deleted_name) + self.client.remove_unused_qos_policy_groups.assert_called_once_with() + + def test__rename_qos_policy_group(self): + self.mock_object(self.client, 'send_request') + new_policy_name = 'fake_new_policy' + + self.client._rename_qos_policy_group(fake.QOS_POLICY_GROUP_NAME, + new_policy_name) + + body = {'name': new_policy_name} + query = {'name': fake.QOS_POLICY_GROUP_NAME} + self.client.send_request.assert_called_once_with( + '/storage/qos/policies/', 'patch', body=body, query=query, + enable_tunneling=False) + + def test_remove_unused_qos_policy_groups(self): + deleted_preffix = f'{client_base.DELETED_PREFIX}*' + + self.mock_object(self.client, 'send_request') + + self.client.remove_unused_qos_policy_groups() + + query = {'name': deleted_preffix} + self.client.send_request.assert_called_once_with( + '/storage/qos/policies', 'delete', query=query) + + def test_create_lun(self): + metadata = copy.deepcopy(fake_client.LUN_GET_ITER_RESULT[0]) + path = f'/vol/{fake.VOLUME_NAME}/{fake.LUN_NAME}' + size = 2048 + initial_size = size + qos_policy_group_is_adaptive = False + + self.mock_object(self.client, '_validate_qos_policy_group') + self.mock_object(self.client, 'send_request') + + body = { + 'name': path, + 'space.size': str(initial_size), + 'os_type': metadata['OsType'], + 'space.guarantee.requested': metadata['SpaceReserved'], + 'qos_policy.name': fake.QOS_POLICY_GROUP_NAME + } + + self.client.create_lun( + fake.VOLUME_NAME, fake.LUN_NAME, size, metadata, + qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME, + qos_policy_group_is_adaptive=qos_policy_group_is_adaptive) + + self.client._validate_qos_policy_group.assert_called_once_with( + qos_policy_group_is_adaptive) + self.client.send_request.assert_called_once_with( + '/storage/luns', 'post', body=body) + + def test_do_direct_resize(self): + lun_path = f'/vol/{fake_client.VOLUME_NAMES[0]}/cinder-lun' + new_size_bytes = '1073741824' + body = {'name': lun_path, 'space.size': new_size_bytes} + + self.mock_object(self.client, '_lun_update_by_path') + + self.client.do_direct_resize(lun_path, new_size_bytes) + + self.client._lun_update_by_path.assert_called_once_with(lun_path, body) + + @ddt.data(True, False) + def test__get_lun_by_path(self, is_empty): + lun_path = f'/vol/{fake_client.VOLUME_NAMES[0]}/cinder-lun' + lun_response = fake_client.LUN_GET_ITER_REST + lun_records = fake_client.LUN_GET_ITER_REST['records'] + if is_empty: + lun_response = {} + lun_records = [] + + self.mock_object(self.client, 'send_request', + return_value=lun_response) + + result = self.client._get_lun_by_path(lun_path) + + query = {'name': lun_path} + self.client.send_request.assert_called_once_with( + '/storage/luns', 'get', query=query) + self.assertEqual(result, lun_records) + + @ddt.data(True, False) + def test__get_first_lun_by_path(self, is_empty): + lun_path = f'/vol/{fake_client.VOLUME_NAMES[0]}/cinder-lun' + lun_records = fake_client.LUN_GET_ITER_REST['records'] + lun_item = lun_records[0] + if is_empty: + lun_records = [] + + self.mock_object(self.client, '_get_lun_by_path', + return_value=lun_records) + + result = self.client._get_first_lun_by_path(lun_path) + + self.client._get_lun_by_path.assert_called_once_with(lun_path) + if is_empty: + self.assertTrue(result is None) + else: + self.assertEqual(result, lun_item) + + def test__lun_update_by_path(self): + lun_path = f'/vol/{fake_client.VOLUME_NAMES[0]}/cinder-lun' + lun_item = fake_client.LUN_GET_ITER_REST['records'][0] + new_size_bytes = '1073741824' + body = { + 'name': lun_path, + 'space.guarantee.requested': 'True', + 'space.size': new_size_bytes + } + + self.mock_object(self.client, '_get_first_lun_by_path', + return_value=lun_item) + self.mock_object(self.client, 'send_request') + + self.client._lun_update_by_path(lun_path, body) + + self.client._get_first_lun_by_path.assert_called_once_with(lun_path) + self.client.send_request.assert_called_once_with( + f'/storage/luns/{lun_item["uuid"]}', 'patch', body=body) + + def test__lun_update_by_path_not_found(self): + lun_path = f'/vol/{fake_client.VOLUME_NAMES[0]}/cinder-lun' + lun_item = None + new_size_bytes = '1073741824' + body = { + 'name': lun_path, + 'space.guarantee.requested': 'True', + 'space.size': new_size_bytes + } + + self.mock_object(self.client, '_get_first_lun_by_path', + return_value=lun_item) + self.mock_object(self.client, 'send_request') + + self.assertRaises( + netapp_api.NaApiError, + self.client._lun_update_by_path, + lun_path, + body + ) + + self.client._get_first_lun_by_path.assert_called_once_with(lun_path) + self.client.send_request.assert_not_called() + + def test__validate_qos_policy_group_unsupported_qos(self): + is_adaptive = True + self.client.features.ADAPTIVE_QOS = False + + self.assertRaises( + netapp_utils.NetAppDriverException, + self.client._validate_qos_policy_group, + is_adaptive + ) + + def test__validate_qos_policy_group_no_spec(self): + is_adaptive = True + self.client.features.ADAPTIVE_QOS = True + + result = self.client._validate_qos_policy_group(is_adaptive) + + self.assertTrue(result is None) + + def test__validate_qos_policy_group_unsupported_feature(self): + is_adaptive = True + self.client.features.ADAPTIVE_QOS = True + spec = { + 'min_throughput': fake.MIN_IOPS_REST + } + + self.assertRaises( + netapp_utils.NetAppDriverException, + self.client._validate_qos_policy_group, + is_adaptive, + spec=spec, + qos_min_support=False + ) + + @ddt.data(True, False) + def test__validate_qos_policy_group(self, is_adaptive): + self.client.features.ADAPTIVE_QOS = True + spec = { + 'max_throughput': fake.MAX_IOPS_REST, + 'min_throughput': fake.MIN_IOPS_REST + } + + self.client._validate_qos_policy_group( + is_adaptive, spec=spec, qos_min_support=True) + + def test_delete_file(self): + """Delete file at path.""" + path_to_file = fake.VOLUME_PATH + volume_response = fake_client.VOLUME_LIST_SIMPLE_RESPONSE_REST + volume_item = fake_client.VOLUME_ITEM_SIMPLE_RESPONSE_REST + + volume_name = path_to_file.split('/')[2] + relative_path = '/'.join(path_to_file.split('/')[3:]) + + query = { + 'type': 'rw', + 'style': 'flex*', # Match both 'flexvol' and 'flexgroup' + 'is_svm_root': 'false', + 'error_state.is_inconsistent': 'false', + 'state': 'online', + 'name': volume_name, + 'fields': 'name,style' + } + self.mock_object(self.client, 'send_request', + return_value=volume_response) + self.mock_object(self.client, '_get_unique_volume', + return_value=volume_item) + self.client.delete_file(path_to_file) + + relative_path = relative_path.replace('/', '%2F').replace('.', '%2E') + + self.client.send_request.assert_has_calls([ + mock.call('/storage/volumes/', 'get', query=query), + mock.call(f'/storage/volumes/{volume_item["uuid"]}' + + f'/files/{relative_path}', 'delete') + ]) + + self.client._get_unique_volume.assert_called_once_with( + volume_response['records']) + + def test_get_igroup_by_initiators_none_found(self): + initiator = 'initiator' + expected_response = fake_client.NO_RECORDS_RESPONSE_REST + + self.mock_object(self.client, 'send_request', + return_value=expected_response) + + igroup_list = self.client.get_igroup_by_initiators([initiator]) + + self.assertEqual([], igroup_list) + + def test_get_igroup_by_initiators(self): + initiators = ['iqn.1993-08.org.fake:01:5b67769f5c5e'] + expected_igroup = [{ + 'initiator-group-os-type': 'linux', + 'initiator-group-type': 'iscsi', + 'initiator-group-name': + 'openstack-e6bf1584-bfb3-4cdb-950d-525bf6f26b53' + }] + + expected_query = { + 'svm.name': fake_client.VOLUME_VSERVER_NAME, + 'initiators.name': ' '.join(initiators), + 'fields': 'name,protocol,os_type' + } + + self.mock_object(self.client, 'send_request', + return_value=fake_client.IGROUP_GET_ITER_REST) + + igroup_list = self.client.get_igroup_by_initiators(initiators) + self.client.send_request.assert_called_once_with( + '/protocols/san/igroups', 'get', query=expected_query) + self.assertEqual(expected_igroup, igroup_list) + + def test_get_igroup_by_initiators_multiple(self): + initiators = ['iqn.1993-08.org.fake:01:5b67769f5c5e', + 'iqn.1993-08.org.fake:02:5b67769f5c5e'] + + expected_igroup = [{ + 'initiator-group-os-type': 'linux', + 'initiator-group-type': 'iscsi', + 'initiator-group-name': + 'openstack-e6bf1584-bfb3-4cdb-950d-525bf6f26b53' + }] + + expected_query = { + 'svm.name': fake_client.VOLUME_VSERVER_NAME, + 'initiators.name': ' '.join(initiators), + 'fields': 'name,protocol,os_type' + } + + self.mock_object(self.client, 'send_request', + return_value=fake_client.IGROUP_GET_ITER_INITS_REST) + + igroup_list = self.client.get_igroup_by_initiators(initiators) + self.client.send_request.assert_called_once_with( + '/protocols/san/igroups', 'get', query=expected_query) + self.assertEqual(expected_igroup, igroup_list) + + def test_get_igroup_by_initiators_multiple_records(self): + initiators = ['iqn.1993-08.org.fake:01:5b67769f5c5e'] + expected_element = { + 'initiator-group-os-type': 'linux', + 'initiator-group-type': 'iscsi', + 'initiator-group-name': + 'openstack-e6bf1584-bfb3-4cdb-950d-525bf6f26b53' + } + expected_igroup = [expected_element, expected_element] + + self.mock_object(self.client, 'send_request', + return_value=fake_client.IGROUP_GET_ITER_MULT_REST) + + igroup_list = self.client.get_igroup_by_initiators(initiators) + self.assertEqual(expected_igroup, igroup_list) + + def test_add_igroup_initiator(self): + igroup = 'fake_igroup' + initiator = 'fake_initator' + + mock_return = fake_client.IGROUP_GET_ITER_REST + expected_uuid = fake_client.IGROUP_GET_ITER_REST['records'][0]['uuid'] + mock_send_request = self.mock_object(self.client, 'send_request', + return_value = mock_return) + + self.client.add_igroup_initiator(igroup, initiator) + + expected_body = { + 'name': initiator + } + mock_send_request.assert_has_calls([ + mock.call('/protocols/san/igroups/' + + expected_uuid + '/initiators', + 'post', body=expected_body)]) + + def test_create_igroup(self): + igroup = 'fake_igroup' + igroup_type = 'fake_type' + os_type = 'fake_os' + + body = { + 'name': igroup, + 'protocol': igroup_type, + 'os_type': os_type, + } + + self.mock_object(self.client, 'send_request') + self.client.create_igroup(igroup, igroup_type, os_type) + self.client.send_request.assert_called_once_with( + '/protocols/san/igroups', 'post', body=body) + + @ddt.data(None, 0, 4095) + def test_map_lun(self, lun_id): + fake_record = fake_client.GET_LUN_MAP_REST['records'][0] + path = fake_record['lun']['name'] + igroup_name = fake_record['igroup']['name'] + + mock_send_request = self.mock_object( + self.client, 'send_request', + return_value=fake_client.GET_LUN_MAP_REST) + + result = self.client.map_lun(path, igroup_name, lun_id) + + self.assertEqual(0, result) + expected_body = { + 'lun.name': path, + 'igroup.name': igroup_name, + } + if lun_id is not None: + expected_body['logical_unit_number'] = lun_id + + mock_send_request.assert_has_calls([ + mock.call('/protocols/san/lun-maps', 'post', + body=expected_body, query={'return_records': 'true'})]) + + def test_get_lun_map(self): + fake_record = fake_client.GET_LUN_MAP_REST['records'][0] + path = fake_record['lun']['name'] + + expected_lun_map = [{ + 'initiator-group': fake_record['igroup']['name'], + 'lun-id': fake_record['logical_unit_number'], + 'vserver': fake_record['svm']['name'], + }] + + expected_query = { + 'lun.name': path, + 'fields': 'igroup.name,logical_unit_number,svm.name', + } + + self.mock_object(self.client, 'send_request', + return_value=fake_client.GET_LUN_MAP_REST) + + lun_map = self.client.get_lun_map(path) + self.assertEqual(observed=lun_map, expected=expected_lun_map) + self.client.send_request.assert_called_once_with( + '/protocols/san/lun-maps', 'get', query=expected_query) + + def test_get_lun_map_no_luns_mapped(self): + fake_record = fake_client.GET_LUN_MAP_REST['records'][0] + path = fake_record['lun']['name'] + + expected_lun_map = [] + expected_query = { + 'lun.name': path, + 'fields': 'igroup.name,logical_unit_number,svm.name', + } + + self.mock_object(self.client, 'send_request', + return_value = fake_client.NO_RECORDS_RESPONSE_REST) + + lun_map = self.client.get_lun_map(path) + self.assertEqual(observed=lun_map, expected=expected_lun_map) + self.client.send_request.assert_called_once_with( + '/protocols/san/lun-maps', 'get', query=expected_query) + + def test_get_fc_target_wwpns(self): + fake_record = fake_client.FC_INTERFACE_REST['records'][0] + expected_wwpns = [fake_record['wwpn']] + expected_query = { + 'fields': 'wwpn' + } + self.mock_object(self.client, 'send_request', + return_value = fake_client.FC_INTERFACE_REST) + wwpns = self.client.get_fc_target_wwpns() + self.assertEqual(observed=wwpns, expected=expected_wwpns) + self.client.send_request.assert_called_once_with( + '/network/fc/interfaces', 'get', query=expected_query) + + def test_get_fc_target_wwpns_not_found(self): + expected_wwpns = [] + expected_query = { + 'fields': 'wwpn' + } + self.mock_object(self.client, 'send_request', + return_value = fake_client.NO_RECORDS_RESPONSE_REST) + wwpns = self.client.get_fc_target_wwpns() + self.assertEqual(observed=wwpns, expected=expected_wwpns) + self.client.send_request.assert_called_once_with( + '/network/fc/interfaces', 'get', query=expected_query) + + def test_unmap_lun(self): + get_uuid_response = fake_client.GET_LUN_MAP_REST + mock_send_request = self.mock_object( + self.client, 'send_request', + side_effect=[get_uuid_response, None]) + + self.client.unmap_lun(fake_client.LUN_NAME_PATH, + fake_client.IGROUP_NAME) + + query_uuid = { + 'igroup.name': fake_client.IGROUP_NAME, + 'lun.name': fake_client.LUN_NAME_PATH, + 'fields': 'lun.uuid,igroup.uuid' + } + + lun_uuid = get_uuid_response['records'][0]['lun']['uuid'] + igroup_uuid = get_uuid_response['records'][0]['igroup']['uuid'] + + mock_send_request.assert_has_calls([ + mock.call('/protocols/san/lun-maps', 'get', query=query_uuid), + mock.call(f'/protocols/san/lun-maps/{lun_uuid}/{igroup_uuid}', + 'delete'), + ]) + + def test_unmap_lun_with_api_error(self): + get_uuid_response = fake_client.GET_LUN_MAP_REST + mock_send_request = self.mock_object( + self.client, 'send_request', + side_effect=[get_uuid_response, netapp_api.NaApiError()]) + + self.assertRaises(netapp_api.NaApiError, + self.client.unmap_lun, + fake_client.LUN_NAME_PATH, + fake_client.IGROUP_NAME) + + query_uuid = { + 'igroup.name': fake_client.IGROUP_NAME, + 'lun.name': fake_client.LUN_NAME_PATH, + 'fields': 'lun.uuid,igroup.uuid' + } + + lun_uuid = get_uuid_response['records'][0]['lun']['uuid'] + igroup_uuid = get_uuid_response['records'][0]['igroup']['uuid'] + + mock_send_request.assert_has_calls([ + mock.call('/protocols/san/lun-maps', 'get', query=query_uuid), + mock.call(f'/protocols/san/lun-maps/{lun_uuid}/{igroup_uuid}', + 'delete'), + ]) + + def test_unmap_lun_invalid_input(self): + get_uuid_response = fake_client.NO_RECORDS_RESPONSE_REST + mock_send_request = self.mock_object( + self.client, 'send_request', + side_effect=[get_uuid_response, + None]) + + self.client.unmap_lun(fake_client.LUN_NAME_PATH, + fake_client.IGROUP_NAME) + + query_uuid = { + 'igroup.name': fake_client.IGROUP_NAME, + 'lun.name': fake_client.LUN_NAME_PATH, + 'fields': 'lun.uuid,igroup.uuid' + } + + mock_send_request.assert_called_once_with( + '/protocols/san/lun-maps', 'get', query=query_uuid) + + def test_unmap_lun_not_mapped_in_group(self): + get_uuid_response = fake_client.GET_LUN_MAP_REST + + # Exception REST_NO_SUCH_LUN_MAP is handled inside the function + # and should not be re-raised + mock_send_request = self.mock_object( + self.client, 'send_request', + side_effect=[ + get_uuid_response, + netapp_api.NaApiError( + code=netapp_api.REST_NO_SUCH_LUN_MAP)]) + + self.client.unmap_lun(fake_client.LUN_NAME_PATH, + fake_client.IGROUP_NAME) + + query_uuid = { + 'igroup.name': fake_client.IGROUP_NAME, + 'lun.name': fake_client.LUN_NAME_PATH, + 'fields': 'lun.uuid,igroup.uuid' + } + + lun_uuid = get_uuid_response['records'][0]['lun']['uuid'] + igroup_uuid = get_uuid_response['records'][0]['igroup']['uuid'] + + mock_send_request.assert_has_calls([ + mock.call('/protocols/san/lun-maps', 'get', query=query_uuid), + mock.call(f'/protocols/san/lun-maps/{lun_uuid}/{igroup_uuid}', + 'delete'), + ]) + + def test_has_luns_mapped_to_initiators(self): + initiators = ['iqn.2005-03.org.open-iscsi:49ebe8a87d1'] + api_response = fake_client.GET_LUN_MAPS + mock_send_request = self.mock_object( + self.client, 'send_request', return_value=api_response) + + self.assertTrue(self.client.has_luns_mapped_to_initiators(initiators)) + + query = { + 'initiators.name': ' '.join(initiators), + 'fields': 'lun_maps' + } + + mock_send_request.assert_called_once_with( + '/protocols/san/igroups', 'get', query=query) + + def test_has_luns_mapped_to_initiators_no_records(self): + initiators = ['iqn.2005-03.org.open-iscsi:49ebe8a87d1'] + api_response = fake_client.NO_RECORDS_RESPONSE_REST + mock_send_request = self.mock_object( + self.client, 'send_request', return_value=api_response) + + self.assertFalse(self.client.has_luns_mapped_to_initiators(initiators)) + + query = { + 'initiators.name': ' '.join(initiators), + 'fields': 'lun_maps' + } + + mock_send_request.assert_called_once_with( + '/protocols/san/igroups', 'get', query=query) + + def test_has_luns_mapped_to_initiators_not_mapped(self): + initiators = ['iqn.2005-03.org.open-iscsi:49ebe8a87d1'] + api_response = fake_client.GET_LUN_MAPS_NO_MAPS + mock_send_request = self.mock_object( + self.client, 'send_request', return_value=api_response) + + self.assertFalse(self.client.has_luns_mapped_to_initiators(initiators)) + + query = { + 'initiators.name': ' '.join(initiators), + 'fields': 'lun_maps' + } + + mock_send_request.assert_called_once_with( + '/protocols/san/igroups', 'get', query=query) + + def test_iscsi_service_details(self): + fake_record = fake_client.GET_ISCSI_SERVICE_DETAILS_REST['records'][0] + expected_iqn = fake_record['target']['name'] + expected_query = { + 'fields': 'target.name' + } + mock_send_request = self.mock_object( + self.client, 'send_request', + return_value=fake_client.GET_ISCSI_SERVICE_DETAILS_REST) + iqn = self.client.get_iscsi_service_details() + self.assertEqual(expected_iqn, iqn) + mock_send_request.assert_called_once_with( + '/protocols/san/iscsi/services', 'get', query=expected_query) + + def test_iscsi_service_details_not_found(self): + expected_iqn = None + expected_query = { + 'fields': 'target.name' + } + mock_send_request = self.mock_object( + self.client, 'send_request', + return_value=fake_client.NO_RECORDS_RESPONSE_REST) + iqn = self.client.get_iscsi_service_details() + self.assertEqual(expected_iqn, iqn) + mock_send_request.assert_called_once_with( + '/protocols/san/iscsi/services', 'get', query=expected_query) + + def test_check_iscsi_initiator_exists(self): + fake_record = fake_client.CHECK_ISCSI_INITIATOR_REST['records'][0] + iqn = fake_record['initiator'] + expected_query = { + 'initiator': iqn + } + mock_send_request = self.mock_object( + self.client, 'send_request', + return_value=fake_client.CHECK_ISCSI_INITIATOR_REST) + initiator_exists = self.client.check_iscsi_initiator_exists(iqn) + self.assertEqual(expected=True, observed=initiator_exists) + mock_send_request.assert_called_once_with( + '/protocols/san/iscsi/credentials', 'get', + query=expected_query) + + def test_check_iscsi_initiator_exists_not_found(self): + fake_record = fake_client.CHECK_ISCSI_INITIATOR_REST['records'][0] + iqn = fake_record['initiator'] + expected_query = { + 'initiator': iqn + } + mock_send_request = self.mock_object( + self.client, 'send_request', + return_value=fake_client.NO_RECORDS_RESPONSE_REST) + initiator_exists = self.client.check_iscsi_initiator_exists(iqn) + self.assertEqual(expected=False, observed=initiator_exists) + mock_send_request.assert_called_once_with( + '/protocols/san/iscsi/credentials', 'get', + query=expected_query) + + def test_get_iscsi_target_details(self): + fake_record = fake_client.GET_ISCSI_TARGET_DETAILS_REST['records'][0] + expected_details = [{ + 'address': fake_record['ip']['address'], + 'port': 3260, + 'tpgroup-tag': None, + 'interface-enabled': fake_record['enabled'], + }] + expected_query = { + 'services': 'data_iscsi', + 'fields': 'ip.address,enabled' + } + mock_send_request = self.mock_object( + self.client, 'send_request', + return_value=fake_client.GET_ISCSI_TARGET_DETAILS_REST) + details = self.client.get_iscsi_target_details() + self.assertEqual(expected_details, details) + mock_send_request.assert_called_once_with('/network/ip/interfaces', + 'get', query=expected_query) + + def test_get_iscsi_target_details_no_details(self): + expected_details = [] + expected_query = { + 'services': 'data_iscsi', + 'fields': 'ip.address,enabled' + } + mock_send_request = self.mock_object( + self.client, 'send_request', + return_value=fake_client.NO_RECORDS_RESPONSE_REST) + details = self.client.get_iscsi_target_details() + self.assertEqual(expected_details, details) + mock_send_request.assert_called_once_with('/network/ip/interfaces', + 'get', query=expected_query) + + def test_move_lun(self): + fake_cur_path = '/vol/fake_vol/fake_lun_cur' + fake_new_path = '/vol/fake_vol/fake_lun_new' + expected_query = { + 'svm.name': self.vserver, + 'name': fake_cur_path, + } + expected_body = { + 'name': fake_new_path, + } + mock_send_request = self.mock_object(self.client, 'send_request') + self.client.move_lun(fake_cur_path, fake_new_path) + mock_send_request.assert_called_once_with( + '/storage/luns/', 'patch', query=expected_query, + body=expected_body) + + @ddt.data(True, False) + def test_clone_file_snapshot(self, overwrite_dest): + fake_volume = fake_client.VOLUME_ITEM_SIMPLE_RESPONSE_REST + self.client.features.BACKUP_CLONE_PARAM = True + + fake_name = fake.NFS_VOLUME['name'] + fake_new_name = fake.SNAPSHOT_NAME + api_version = (1, 19) + + expected_body = { + 'volume': { + 'uuid': fake_volume['uuid'], + 'name': fake_volume['name'] + }, + 'source_path': fake_name, + 'destination_path': fake_new_name, + 'is_backup': True + } + if overwrite_dest: + api_version = (1, 20) + expected_body['overwrite_destination'] = True + + self.mock_object(self.client, 'send_request') + self.mock_object(self.client, '_get_volume_by_args', + return_value=fake_volume) + self.mock_object(self.client.connection, 'get_api_version', + return_value=api_version) + + self.client.clone_file( + fake_volume['name'], fake_name, fake_new_name, fake.VSERVER_NAME, + is_snapshot=True, dest_exists=overwrite_dest) + + self.client.send_request.assert_has_calls([ + mock.call('/storage/file/clone', 'post', body=expected_body), + ]) + + def test_clone_lun(self): + self.client.vserver = fake.VSERVER_NAME + + expected_body = { + 'svm': { + 'name': fake.VSERVER_NAME + }, + 'name': f'/vol/{fake.VOLUME_NAME}/{fake.SNAPSHOT_NAME}', + 'clone': { + 'source': { + 'name': f'/vol/{fake.VOLUME_NAME}/{fake.LUN_NAME}', + } + }, + 'space': { + 'guarantee': { + 'requested': True, + } + }, + 'qos_policy': { + 'name': fake.QOS_POLICY_GROUP_NAME, + } + } + + mock_send_request = self.mock_object( + self.client, 'send_request', return_value=None) + mock_validate_policy = self.mock_object( + self.client, '_validate_qos_policy_group') + + self.client.clone_lun( + volume=fake.VOLUME_NAME, name=fake.LUN_NAME, + new_name=fake.SNAPSHOT_NAME, + qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME, + is_snapshot=True) + + mock_validate_policy.assert_called_once_with(False) + mock_send_request.assert_called_once_with( + '/storage/luns', 'post', body=expected_body) + + def test_destroy_lun(self, force=True): + path = f'/vol/{fake_client.VOLUME_NAME}/{fake_client.FILE_NAME}' + + query = {} + query['name'] = path + query['svm'] = fake_client.VOLUME_VSERVER_NAME + if force: + query['allow_delete_while_mapped'] = 'true' + + self.mock_object(self.client, 'send_request') + + self.client.destroy_lun(path) + + self.client.send_request.assert_called_once_with('/storage/luns/', + 'delete', query=query) + + def test_get_flexvol_capacity(self, ): + + api_response = fake_client.VOLUME_GET_ITER_CAPACITY_RESPONSE_REST + volume_response = api_response['records'][0] + mock_get_unique_vol = self.mock_object( + self.client, '_get_volume_by_args', return_value=volume_response) + + capacity = self.client.get_flexvol_capacity( + flexvol_path=fake.VOLUME_PATH, flexvol_name=fake.VOLUME_NAME) + + mock_get_unique_vol.assert_called_once_with( + vol_name=fake.VOLUME_NAME, vol_path=fake.VOLUME_PATH, + fields='name,space.available,space.afs_total') + self.assertEqual(float(fake_client.VOLUME_SIZE_TOTAL), + capacity['size-total']) + self.assertEqual(float(fake_client.VOLUME_SIZE_AVAILABLE), + capacity['size-available']) + + def test_get_flexvol_capacity_not_found(self): + + self.mock_object( + self.client, '_get_volume_by_args', + side_effect=exception.VolumeBackendAPIException(data="fake")) + + self.assertRaises(netapp_utils.NetAppDriverException, + self.client.get_flexvol_capacity, + flexvol_path='fake_path') + + def test_check_api_permissions(self): + + mock_log = self.mock_object(client_cmode_rest.LOG, 'warning') + self.mock_object(self.client, 'check_cluster_api', return_value=True) + + self.client.check_api_permissions() + + self.client.check_cluster_api.assert_has_calls( + [mock.call(key) for key in client_cmode_rest.SSC_API_MAP.keys()]) + self.assertEqual(0, mock_log.call_count) + + def test_check_api_permissions_failed_ssc_apis(self): + + def check_cluster_api(api): + if api != '/storage/volumes': + return False + return True + + self.mock_object(self.client, 'check_cluster_api', + side_effect=check_cluster_api) + + mock_log = self.mock_object(client_cmode_rest.LOG, 'warning') + + self.client.check_api_permissions() + + self.assertEqual(1, mock_log.call_count) + + def test_check_api_permissions_failed_volume_api(self): + + def check_cluster_api(api): + if api == '/storage/volumes': + return False + return True + + self.mock_object(self.client, 'check_cluster_api', + side_effect=check_cluster_api) + + mock_log = self.mock_object(client_cmode_rest.LOG, 'warning') + + self.assertRaises(exception.VolumeBackendAPIException, + self.client.check_api_permissions) + + self.assertEqual(0, mock_log.call_count) + + def test_check_cluster_api(self): + + endpoint_api = '/storage/volumes' + endpoint_request = '/storage/volumes?return_records=false' + mock_send_request = self.mock_object(self.client, + 'send_request', + return_value=True) + + result = self.client.check_cluster_api(endpoint_api) + + mock_send_request.assert_has_calls([mock.call(endpoint_request, 'get', + enable_tunneling=False)]) + self.assertTrue(result) + + def test_check_cluster_api_error(self): + + endpoint_api = '/storage/volumes' + api_error = netapp_api.NaApiError(code=netapp_api.REST_UNAUTHORIZED) + + self.mock_object(self.client, 'send_request', + side_effect=[api_error]) + + result = self.client.check_cluster_api(endpoint_api) + + self.assertFalse(result) diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/fakes.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/fakes.py index 950e92283ee..2b3d0726553 100644 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/fakes.py +++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/fakes.py @@ -275,8 +275,9 @@ IGROUP1 = {'initiator-group-os-type': 'linux', QOS_SPECS = {} EXTRA_SPECS = {} MAX_THROUGHPUT = '21734278B/s' -MIN_IOPS = '256IOPS' -MAX_IOPS = '512IOPS' +MIN_IOPS = '256iops' +MAX_IOPS = '512iops' +MAX_BPS = '1000000B/s' QOS_POLICY_GROUP_NAME = 'fake_qos_policy_group_name' QOS_POLICY_GROUP_INFO_LEGACY = { @@ -290,6 +291,11 @@ QOS_POLICY_GROUP_SPEC = { 'policy_name': QOS_POLICY_GROUP_NAME, } +QOS_POLICY_GROUP_SPEC_BPS = { + 'max_throughput': MAX_BPS, + 'policy_name': QOS_POLICY_GROUP_NAME, +} + QOS_POLICY_GROUP_SPEC_MAX = { 'max_throughput': MAX_THROUGHPUT, 'policy_name': QOS_POLICY_GROUP_NAME, @@ -417,6 +423,19 @@ FAKE_LUN = netapp_api.NaElement.create_node_with_children( 'volume': 'fakeLUN', 'vserver': 'fake_vserver'}) +FAKE_LUN_GET_ITER_RESULT = [ + { + 'Vserver': 'fake_vserver', + 'Volume': 'fake_volume', + 'Size': 123, + 'Qtree': 'fake_qtree', + 'Path': 'fake_path', + 'OsType': 'fake_os', + 'SpaceReserved': 'true', + 'UUID': 'fake-uuid', + }, +] + CG_VOLUME_NAME = 'fake_cg_volume' CG_GROUP_NAME = 'fake_consistency_group' CG_POOL_NAME = 'cdot' @@ -740,12 +759,219 @@ def get_fake_net_interface_get_iter_response(): def get_fake_ifs(): - list_of_ifs = [ - etree.XML(""" -
FAKE_IP
"""), - etree.XML(""" -
FAKE_IP2
"""), - etree.XML(""" -
FAKE_IP3
"""), - ] - return [netapp_api.NaElement(el) for el in list_of_ifs] + return [{'vserver': VSERVER_NAME}] + + +AFF_SYSTEM_NODE_GET_ITER_RESPONSE_REST = { + "records": [ + { + "uuid": "9eff6c76-fc13-11ea-8799-525400", + "name": "aff-node1", + "model": "AFFA400", + "is_all_flash_optimized": True, + "is_all_flash_select_optimized": False, + "_links": { + "self": { + "href": "/api/cluster/nodes/9eff6c76-fc13-11ea-8799-525400" + } + } + }, + { + "uuid": "9eff6c76-fc13-11ea-8799-52540006bba9", + "name": "aff-node2", + "model": "AFFA400", + "is_all_flash_optimized": True, + "is_all_flash_select_optimized": False, + "_links": { + "self": { + "href": "/api/cluster/nodes/9eff6c76-fc13-11ea-8799-525400" + } + } + } + ], + "num_records": 2, + "_links": { + "self": { + "href": "/api/cluster/nodes?fields=model,name," + "is_all_flash_optimized,is_all_flash_select_optimized" + } + } +} + +FAS_SYSTEM_NODE_GET_ITER_RESPONSE_REST = { + "records": [ + { + "uuid": "9eff6c76-fc13-11ea-8799-52540006bba9", + "name": "fas-node1", + "model": "FAS2554", + "is_all_flash_optimized": False, + "is_all_flash_select_optimized": False, + "_links": { + "self": { + "href": "/api/cluster/nodes/9eff6c76-fc13-11ea-8799-525400" + } + } + }, + { + "uuid": "9eff6c76-fc13-11ea-8799-52540006bba9", + "name": "fas-node2", + "model": "FAS2554", + "is_all_flash_optimized": False, + "is_all_flash_select_optimized": False, + "_links": { + "self": { + "href": "/api/cluster/nodes/9eff6c76-fc13-11ea-8799-525400" + } + } + } + ], + "num_records": 2, + "_links": { + "self": { + "href": "/api/cluster/nodes?fields=model,name," + "is_all_flash_optimized,is_all_flash_select_optimized" + } + } +} + +HYBRID_SYSTEM_NODE_GET_ITER_RESPONSE_REST = { + "records": [ + { + "uuid": "9eff6c76-fc13-11ea-8799-52540006bba9", + "name": "select-node", + "model": "FDvM300", + "is_all_flash_optimized": False, + "is_all_flash_select_optimized": True, + "_links": { + "self": { + "href": "/api/cluster/nodes/9eff6c76-fc13-11ea-8799-525400" + } + } + }, + { + "uuid": "9eff6c76-fc13-11ea-8799-52540006bba9", + "name": "c190-node", + "model": "AFF-C190", + "is_all_flash_optimized": True, + "is_all_flash_select_optimized": False, + "_links": { + "self": { + "href": "/api/cluster/nodes/9eff6c76-fc13-11ea-8799-525400" + } + } + } + ], + "num_records": 2, + "_links": { + "self": { + "href": "/api/cluster/nodes?fields=model,name," + "is_all_flash_optimized,is_all_flash_select_optimized" + } + } +} + +QOS_POLICY_BY_NAME_RESPONSE_REST = { + "records": [ + { + "uuid": "9eff6c76-fc13-11ea-8799-52540006bba9", + "name": "openstack-cd-uuid", + "_links": { + "self": { + "href": "/api/storage/qos/policies/" + "9eff6c76-fc13-11ea-8799-52540006bba9" + } + } + } + ], + "num_records": 1, + "_links": { + "self": { + "href": "/api/storage/qos/policies?fields=name" + } + } +} + +QOS_SPECS_REST = {} +MAX_THROUGHPUT_REST = '21734278' +MIN_IOPS_REST = '256' +MAX_IOPS_REST = '512' +MAX_BPS_REST = '1' + +QOS_POLICY_GROUP_INFO_LEGACY_REST = { + 'legacy': 'legacy-' + QOS_POLICY_GROUP_NAME, + 'spec': None, +} + +QOS_POLICY_GROUP_SPEC_REST = { + 'min_throughput': MIN_IOPS_REST, + 'max_throughput': MAX_IOPS_REST, + 'policy_name': QOS_POLICY_GROUP_NAME, +} + +QOS_POLICY_GROUP_API_ARGS_REST = { + 'name': QOS_POLICY_GROUP_NAME, + 'svm': { + 'name': VSERVER_NAME + }, + 'fixed': { + 'max_throughput_iops': int(MAX_IOPS_REST), + 'min_throughput_iops': int(MIN_IOPS_REST) + } +} + +QOS_POLICY_GROUP_API_ARGS_REST_BPS = { + 'name': QOS_POLICY_GROUP_NAME, + 'svm': { + 'name': VSERVER_NAME + }, + 'fixed': { + 'max_throughput_mbps': int(MAX_BPS_REST), + } +} + +QOS_POLICY_GROUP_SPEC_MAX_REST = { + 'max_throughput': MAX_THROUGHPUT_REST, + 'policy_name': QOS_POLICY_GROUP_NAME, +} + +EXPECTED_IOPS_PER_GB_REST = '128' +PEAK_IOPS_PER_GB_REST = '512' +PEAK_IOPS_ALLOCATION_REST = 'used-space' +EXPECTED_IOPS_ALLOCATION_REST = 'used-space' +ABSOLUTE_MIN_IOPS_REST = '75' +BLOCK_SIZE_REST = 'ANY' +ADAPTIVE_QOS_SPEC_REST = { + 'policy_name': QOS_POLICY_GROUP_NAME, + 'expected_iops': EXPECTED_IOPS_PER_GB_REST, + 'expected_iops_allocation': EXPECTED_IOPS_ALLOCATION_REST, + 'peak_iops': PEAK_IOPS_PER_GB_REST, + 'peak_iops_allocation': PEAK_IOPS_ALLOCATION_REST, + 'absolute_min_iops': ABSOLUTE_MIN_IOPS_REST, + 'block_size': BLOCK_SIZE_REST, +} + +ADAPTIVE_QOS_API_ARGS_REST = { + 'name': QOS_POLICY_GROUP_NAME, + 'svm': { + 'name': VSERVER_NAME + }, + 'adaptive': { + 'absolute_min_iops': int(ABSOLUTE_MIN_IOPS_REST), + 'expected_iops': int(EXPECTED_IOPS_PER_GB_REST), + 'expected_iops_allocation': EXPECTED_IOPS_ALLOCATION_REST, + 'peak_iops': int(PEAK_IOPS_PER_GB_REST), + 'peak_iops_allocation': PEAK_IOPS_ALLOCATION_REST, + 'block_size': BLOCK_SIZE_REST, + } +} + +QOS_POLICY_GROUP_INFO_REST = { + 'legacy': None, 'spec': QOS_POLICY_GROUP_SPEC_REST} +QOS_POLICY_GROUP_INFO_MAX_REST = { + 'legacy': None, 'spec': QOS_POLICY_GROUP_SPEC_MAX_REST} +ADAPTIVE_QOS_POLICY_GROUP_INFO_REST = { + 'legacy': None, + 'spec': ADAPTIVE_QOS_SPEC_REST, +} + +REST_FIELDS = 'uuid,name,style' diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_cmode.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_cmode.py index 8598cedc938..13a8b77d51e 100644 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_cmode.py +++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_cmode.py @@ -19,7 +19,6 @@ from unittest import mock import ddt -import six from cinder import exception from cinder.objects import fields @@ -280,10 +279,8 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase): self.library._get_lun_attr = mock.Mock(return_value={'Volume': 'fakeLUN'}) self.library.zapi_client = mock.Mock() - self.library.zapi_client.get_lun_by_args.return_value = [ - mock.Mock(spec=netapp_api.NaElement)] - lun = fake.FAKE_LUN - self.library._get_lun_by_args = mock.Mock(return_value=[lun]) + lun = fake.FAKE_LUN_GET_ITER_RESULT + self.library.zapi_client.get_lun_by_args.return_value = lun self.library._add_lun_to_table = mock.Mock() self.library._clone_lun('fakeLUN', 'newFakeLUN', 'false') @@ -303,10 +300,8 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase): self.library._get_lun_attr = mock.Mock(return_value={'Volume': 'fakeLUN'}) self.library.zapi_client = mock.Mock() - self.library.zapi_client.get_lun_by_args.return_value = [ - mock.Mock(spec=netapp_api.NaElement)] - lun = fake.FAKE_LUN - self.library._get_lun_by_args = mock.Mock(return_value=[lun]) + lun = fake.FAKE_LUN_GET_ITER_RESULT + self.library.zapi_client.get_lun_by_args.return_value = lun self.library._add_lun_to_table = mock.Mock() self.library._clone_lun('fakeLUN', 'newFakeLUN', 'false', @@ -327,10 +322,8 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase): 'fakeLUN'}) self.library.zapi_client = mock.Mock() self.library.lun_space_reservation = 'false' - self.library.zapi_client.get_lun_by_args.return_value = [ - mock.Mock(spec=netapp_api.NaElement)] - lun = fake.FAKE_LUN - self.library._get_lun_by_args = mock.Mock(return_value=[lun]) + lun = fake.FAKE_LUN_GET_ITER_RESULT + self.library.zapi_client.get_lun_by_args.return_value = lun self.library._add_lun_to_table = mock.Mock() self.library._clone_lun('fakeLUN', 'newFakeLUN', is_snapshot=True) @@ -1542,27 +1535,22 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase): fake.LUN_WITH_METADATA['metadata']) new_snap_name = 'new-%s' % fake.SNAPSHOT['name'] snapshot_path = lun_obj.metadata['Path'] - flexvol_name = lun_obj.metadata['Volume'] block_count = 40960 mock__get_lun_from_table = self.mock_object( self.library, '_get_lun_from_table', return_value=lun_obj) mock__get_lun_block_count = self.mock_object( self.library, '_get_lun_block_count', return_value=block_count) - mock_create_lun = self.mock_object(self.library.zapi_client, - 'create_lun') mock__clone_lun = self.mock_object(self.library, '_clone_lun') self.library._clone_snapshot(fake.SNAPSHOT['name']) mock__get_lun_from_table.assert_called_once_with(fake.SNAPSHOT['name']) mock__get_lun_block_count.assert_called_once_with(snapshot_path) - mock_create_lun.assert_called_once_with(flexvol_name, new_snap_name, - six.text_type(lun_obj.size), - lun_obj.metadata) mock__clone_lun.assert_called_once_with(fake.SNAPSHOT['name'], new_snap_name, - block_count=block_count) + space_reserved='false', + is_snapshot=True) def test__clone_snapshot_invalid_block_count(self): lun_obj = block_base.NetAppLun(fake.LUN_WITH_METADATA['handle'], @@ -1594,8 +1582,6 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase): self.library, '_get_lun_from_table', return_value=lun_obj) mock__get_lun_block_count = self.mock_object( self.library, '_get_lun_block_count', return_value=block_count) - mock_create_lun = self.mock_object(self.library.zapi_client, - 'create_lun') side_effect = exception.VolumeBackendAPIException(data='data') mock__clone_lun = self.mock_object(self.library, '_clone_lun', side_effect=side_effect) @@ -1608,12 +1594,10 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase): mock__get_lun_from_table.assert_called_once_with(fake.SNAPSHOT['name']) mock__get_lun_block_count.assert_called_once_with(snapshot_path) - mock_create_lun.assert_called_once_with(flexvol_name, new_snap_name, - six.text_type(lun_obj.size), - lun_obj.metadata) mock__clone_lun.assert_called_once_with(fake.SNAPSHOT['name'], new_snap_name, - block_count=block_count) + space_reserved='false', + is_snapshot=True) mock_destroy_lun.assert_called_once_with(new_lun_path) def test__swap_luns(self): diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_cmode.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_cmode.py index d443234c544..21fbf652080 100644 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_cmode.py +++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_cmode.py @@ -453,7 +453,7 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase): vserver = self.driver._get_vserver_for_ip('FAKE_IP') - self.assertIsNone(vserver) + self.assertEqual(fake.VSERVER_NAME, vserver) def test_check_for_setup_error(self): mock_add_looping_tasks = self.mock_object( @@ -892,9 +892,8 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase): is_snapshot=is_snapshot) def test__clone_backing_file_for_volume(self): - body = fake.get_fake_net_interface_get_iter_response() self.driver.zapi_client.get_if_info_by_ip = mock.Mock( - return_value=[netapp_api.NaElement(body)]) + return_value=[{'ip': 'fake_ip'}]) self.driver.zapi_client.get_vol_by_junc_vserver = mock.Mock( return_value='nfsvol') self.mock_object(self.driver, '_get_export_ip_path', diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_capabilities.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_capabilities.py index b6858c9b04b..3468d2e4f6d 100644 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_capabilities.py +++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_capabilities.py @@ -20,7 +20,6 @@ from unittest import mock import ddt import six -from cinder import exception from cinder.tests.unit import test from cinder.tests.unit.volume.drivers.netapp.dataontap.client import ( fakes as fake_client) @@ -46,45 +45,6 @@ class CapabilitiesLibraryTestCase(test.TestCase): config.volume_backend_name = 'fake_backend' return config - def test_check_api_permissions(self): - - mock_log = self.mock_object(capabilities.LOG, 'warning') - - self.ssc_library.check_api_permissions() - - self.zapi_client.check_cluster_api.assert_has_calls( - [mock.call(*key) for key in capabilities.SSC_API_MAP.keys()]) - self.assertEqual(0, mock_log.call_count) - - def test_check_api_permissions_failed_ssc_apis(self): - - def check_cluster_api(object_name, operation_name, api): - if api != 'volume-get-iter': - return False - return True - - self.zapi_client.check_cluster_api.side_effect = check_cluster_api - mock_log = self.mock_object(capabilities.LOG, 'warning') - - self.ssc_library.check_api_permissions() - - self.assertEqual(1, mock_log.call_count) - - def test_check_api_permissions_failed_volume_api(self): - - def check_cluster_api(object_name, operation_name, api): - if api == 'volume-get-iter': - return False - return True - - self.zapi_client.check_cluster_api.side_effect = check_cluster_api - mock_log = self.mock_object(capabilities.LOG, 'warning') - - self.assertRaises(exception.VolumeBackendAPIException, - self.ssc_library.check_api_permissions) - - self.assertEqual(0, mock_log.call_count) - def test_get_ssc(self): result = self.ssc_library.get_ssc() diff --git a/cinder/volume/drivers/netapp/dataontap/block_base.py b/cinder/volume/drivers/netapp/dataontap/block_base.py index ece615f8e73..f0a8649edab 100644 --- a/cinder/volume/drivers/netapp/dataontap/block_base.py +++ b/cinder/volume/drivers/netapp/dataontap/block_base.py @@ -410,12 +410,11 @@ class NetAppBlockStorageLibrary(object): def _extract_lun_info(self, lun): """Extracts the LUNs from API and populates the LUN table.""" - meta_dict = self._create_lun_meta(lun) - path = lun.get_child_content('path') + path = lun['Path'] (_rest, _splitter, name) = path.rpartition('/') - handle = self._create_lun_handle(meta_dict) - size = lun.get_child_content('size') - return NetAppLun(handle, name, size, meta_dict) + handle = self._create_lun_handle(lun) + size = lun['Size'] + return NetAppLun(handle, name, size, lun) def _extract_and_populate_luns(self, api_luns): """Extracts the LUNs from API and populates the LUN table.""" @@ -547,9 +546,6 @@ class NetAppBlockStorageLibrary(object): LOG.error("Error getting LUN attribute. Exception: %s", e) return None - def _create_lun_meta(self, lun): - raise NotImplementedError() - def _get_fc_target_wwpns(self, include_partner=True): raise NotImplementedError() @@ -725,8 +721,8 @@ class NetAppBlockStorageLibrary(object): msg = _('Failure getting LUN info for %s.') raise exception.VolumeBackendAPIException(data=msg % seg[-1]) lun_info = lun_infos[-1] - bs = int(lun_info.get_child_content('block-size')) - ls = int(lun_info.get_child_content('size')) + bs = int(lun_info['BlockSize']) + ls = int(lun_info['Size']) block_count = ls / bs return block_count diff --git a/cinder/volume/drivers/netapp/dataontap/block_cmode.py b/cinder/volume/drivers/netapp/dataontap/block_cmode.py index 59d5b04fea2..0f6adca4e0f 100644 --- a/cinder/volume/drivers/netapp/dataontap/block_cmode.py +++ b/cinder/volume/drivers/netapp/dataontap/block_cmode.py @@ -236,27 +236,14 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary, if len(lun) == 0: msg = _("No cloned LUN named %s found on the filer") raise exception.VolumeBackendAPIException(data=msg % new_name) - clone_meta = self._create_lun_meta(lun[0]) - self._add_lun_to_table( - block_base.NetAppLun('%s:%s' % (clone_meta['Vserver'], - clone_meta['Path']), - new_name, - lun[0].get_child_content('size'), - clone_meta)) - def _create_lun_meta(self, lun): - """Creates LUN metadata dictionary.""" - self.zapi_client.check_is_naelement(lun) - meta_dict = {} - meta_dict['Vserver'] = lun.get_child_content('vserver') - meta_dict['Volume'] = lun.get_child_content('volume') - meta_dict['Qtree'] = lun.get_child_content('qtree') - meta_dict['Path'] = lun.get_child_content('path') - meta_dict['OsType'] = lun.get_child_content('multiprotocol-type') - meta_dict['SpaceReserved'] = \ - lun.get_child_content('is-space-reservation-enabled') - meta_dict['UUID'] = lun.get_child_content('uuid') - return meta_dict + clone_lun = lun[0] + self._add_lun_to_table( + block_base.NetAppLun('%s:%s' % (clone_lun['Vserver'], + clone_lun['Path']), + new_name, + clone_lun['Size'], + clone_lun)) def _get_fc_target_wwpns(self, include_partner=True): return self.zapi_client.get_fc_target_wwpns() @@ -879,8 +866,6 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary, LOG.info("Cloning LUN %s from snapshot %s in volume %s.", lun_name, snapshot_name, flexvol_name) - metadata = snapshot_lun.metadata - block_count = self._get_lun_block_count(snapshot_path) if block_count == 0: msg = _("%s cannot be reverted using clone operation" @@ -889,12 +874,9 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary, new_snap_name = "new-%s" % snapshot_name - self.zapi_client.create_lun( - flexvol_name, new_snap_name, - six.text_type(snapshot_lun.size), metadata) try: self._clone_lun(snapshot_name, new_snap_name, - block_count=block_count) + space_reserved='false', is_snapshot=True) return new_snap_name except Exception: with excutils.save_and_reraise_exception(): diff --git a/cinder/volume/drivers/netapp/dataontap/client/api.py b/cinder/volume/drivers/netapp/dataontap/client/api.py index 52f8585324a..bfe60449caa 100644 --- a/cinder/volume/drivers/netapp/dataontap/client/api.py +++ b/cinder/volume/drivers/netapp/dataontap/client/api.py @@ -644,6 +644,13 @@ class SSHUtil(object): # REST API error codes. REST_UNAUTHORIZED = '6' +REST_API_NOT_FOUND = '3' +REST_UPDATE_SNAPMIRROR_FAILED = '13303844' +REST_ERELATION_EXISTS = '6619637' +REST_SNAPMIRROR_IN_PROGRESS = '13303810' +REST_UPDATE_SNAPMIRROR_FAILED = '13303844' +REST_NO_SUCH_LUN_MAP = '5374922' +REST_NO_SUCH_FILE = '6684674' class RestNaServer(object): diff --git a/cinder/volume/drivers/netapp/dataontap/client/client_cmode.py b/cinder/volume/drivers/netapp/dataontap/client/client_cmode.py index d29039fe46e..c154072c5c5 100644 --- a/cinder/volume/drivers/netapp/dataontap/client/client_cmode.py +++ b/cinder/volume/drivers/netapp/dataontap/client/client_cmode.py @@ -37,6 +37,28 @@ DEFAULT_MAX_PAGE_LENGTH = 50 ONTAP_SELECT_MODEL = 'FDvM300' ONTAP_C190 = 'C190' +# NOTE(cknight): The keys in this map are tuples that contain arguments needed +# for efficient use of the system-user-capability-get-iter cDOT API. The +# values are SSC extra specs associated with the APIs listed in the keys. +SSC_API_MAP = { + ('storage.aggregate', 'show', 'aggr-options-list-info'): [ + 'netapp_raid_type', + ], + ('storage.disk', 'show', 'storage-disk-get-iter'): [ + 'netapp_disk_type', + ], + ('snapmirror', 'show', 'snapmirror-get-iter'): [ + 'netapp_mirrored', + ], + ('volume.efficiency', 'show', 'sis-get-iter'): [ + 'netapp_dedup', + 'netapp_compression', + ], + ('volume', '*show', 'volume-get-iter'): [ + 'netapp_flexvol_encryption', + ], +} + @six.add_metaclass(volume_utils.TraceWrapperMetaclass) class Client(client_base.Client): @@ -182,6 +204,32 @@ class Client(client_base.Client): result.get_child_by_name('next-tag').set_content('') return result + def check_api_permissions(self): + """Check which APIs that support SSC functionality are available.""" + + inaccessible_apis = [] + invalid_extra_specs = [] + + for api_tuple, extra_specs in SSC_API_MAP.items(): + object_name, operation_name, api = api_tuple + if not self.check_cluster_api(object_name, + operation_name, + api): + inaccessible_apis.append(api) + invalid_extra_specs.extend(extra_specs) + + if inaccessible_apis: + if 'volume-get-iter' in inaccessible_apis: + msg = _('User not permitted to query Data ONTAP volumes.') + raise exception.VolumeBackendAPIException(data=msg) + else: + LOG.warning('The configured user account does not have ' + 'sufficient privileges to use all needed ' + 'APIs. The following extra specs will fail ' + 'or be ignored: %s.', invalid_extra_specs) + + return invalid_extra_specs + def _get_cluster_nodes_info(self): """Return a list of models of the nodes in the cluster""" api_args = { @@ -481,7 +529,25 @@ class Client(client_base.Client): tag = result.get_child_content('next-tag') if tag is None: break - return luns + + lun_list = [self._create_lun_meta(lun) for lun in luns] + return lun_list + + def _create_lun_meta(self, lun): + """Creates LUN metadata dictionary.""" + self.check_is_naelement(lun) + meta_dict = {} + meta_dict['Vserver'] = lun.get_child_content('vserver') + meta_dict['Volume'] = lun.get_child_content('volume') + meta_dict['Size'] = lun.get_child_content('size') + meta_dict['Qtree'] = lun.get_child_content('qtree') + meta_dict['Path'] = lun.get_child_content('path') + meta_dict['OsType'] = lun.get_child_content('multiprotocol-type') + meta_dict['SpaceReserved'] = \ + lun.get_child_content('is-space-reservation-enabled') + meta_dict['UUID'] = lun.get_child_content('uuid') + meta_dict['BlockSize'] = lun.get_child_content('block-size') + return meta_dict def get_lun_map(self, path): """Gets the LUN map by LUN path.""" @@ -853,7 +919,10 @@ class Client(client_base.Client): attr_list = luns.get_child_by_name('attributes-list') if not attr_list: return [] - return attr_list.get_children() + + lun_list = [self._create_lun_meta(lun) + for lun in attr_list.get_children()] + return lun_list def file_assign_qos(self, flex_vol, qos_policy_group_name, qos_policy_group_is_adaptive, file_path): @@ -1061,7 +1130,8 @@ class Client(client_base.Client): num_records = result.get_child_content('num-records') if num_records and int(num_records) >= 1: attr_list = result.get_child_by_name('attributes-list') - return attr_list.get_children() + return [{'vserver': attr.get_child_content('vserver')} + for attr in attr_list.get_children()] raise exception.NotFound( _('No interface found on cluster for ip %s') % ip) diff --git a/cinder/volume/drivers/netapp/dataontap/client/client_cmode_rest.py b/cinder/volume/drivers/netapp/dataontap/client/client_cmode_rest.py index b336e01a7e2..8b597fb4a89 100644 --- a/cinder/volume/drivers/netapp/dataontap/client/client_cmode_rest.py +++ b/cinder/volume/drivers/netapp/dataontap/client/client_cmode_rest.py @@ -12,9 +12,15 @@ # License for the specific language governing permissions and limitations # under the License. +import copy +import math +from time import time + from oslo_log import log as logging +from oslo_utils import excutils import six +from cinder import exception from cinder.i18n import _ from cinder import utils from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api @@ -27,6 +33,31 @@ DEFAULT_MAX_PAGE_LENGTH = 10000 ONTAP_SELECT_MODEL = 'FDvM300' ONTAP_C190 = 'C190' HTTP_ACCEPTED = 202 +DELETED_PREFIX = 'deleted_cinder_' +DEFAULT_TIMEOUT = 15 + +# Keys in this map are REST API's endpoints that the user shall have permission +# in order to enable extra specs reported to Cinder's scheduler. +# NOTE(sfernand): ONTAP does not retrieve volume efficiency information +# properly when using the pre-created "vsadmin" role (SVM scoped), causing +# dedup and compression extra specs to be reported as disabled despite its +# current configuration. +SSC_API_MAP = { + '/storage/aggregates': [ + 'netapp_raid_type', + ], + '/storage/disks': [ + 'netapp_disk_type', + ], + '/snapmirror/relationships': [ + 'netapp_mirrored', + ], + '/storage/volumes': [ + 'netapp_flexvol_encryption' + 'netapp_dedup', + 'netapp_compression', + ], +} @six.add_metaclass(volume_utils.TraceWrapperMetaclass) @@ -267,6 +298,51 @@ class RestClient(object): return version + def check_api_permissions(self): + """Check which APIs that support SSC functionality are available.""" + + inaccessible_apis = [] + invalid_extra_specs = [] + + for api, extra_specs in SSC_API_MAP.items(): + if not self.check_cluster_api(api): + inaccessible_apis.append(api) + invalid_extra_specs.extend(extra_specs) + + if inaccessible_apis: + if '/storage/volumes' in inaccessible_apis: + msg = _('User not permitted to query Data ONTAP volumes.') + raise exception.VolumeBackendAPIException(data=msg) + else: + LOG.warning('The configured user account does not have ' + 'sufficient privileges to use all needed ' + 'APIs. The following extra specs will fail ' + 'or be ignored: %s.', invalid_extra_specs) + + return invalid_extra_specs + + def check_cluster_api(self, api): + """Checks the availability of a cluster API. + + Returns True if the specified cluster API exists and may be called by + the current user. + """ + try: + # No need to return any records here since we just want to know if + # the user is allowed to make the request. A "Permission Denied" + # error code is expected in case user does not have the necessary + # permissions. + self.send_request('%s?return_records=false' % api, 'get', + enable_tunneling=False) + except netapp_api.NaApiError as ex: + # NOTE(nahimsouza): This function only returns false in case user + # is not authorized. If other error is returned, it must be + # handled in the function call that uses the same endpoint. + if ex.code == netapp_api.REST_UNAUTHORIZED: + return False + + return True + def _get_cluster_nodes_info(self): """Return a list of models of the nodes in the cluster.""" query_args = {'fields': 'model,' @@ -276,7 +352,7 @@ class RestClient(object): nodes = [] try: - result = self.send_request('cluster/nodes', 'get', + result = self.send_request('/cluster/nodes', 'get', query=query_args, enable_tunneling=False) @@ -298,3 +374,1416 @@ class RestClient(object): LOG.exception('Failed to get the cluster nodes.') return nodes + + def list_flexvols(self): + """Returns the names of the flexvols on the controller.""" + + query = { + 'type': 'rw', + 'style': 'flex*', # Match both 'flexvol' and 'flexgroup' + 'is_svm_root': 'false', + 'error_state.is_inconsistent': 'false', + 'state': 'online', + 'fields': 'name' + } + + response = self.send_request( + '/storage/volumes/', 'get', query=query) + + records = response.get('records', []) + volumes = [volume['name'] for volume in records] + + return volumes + + def _get_unique_volume(self, records): + """Get the unique FlexVol or FlexGroup volume from a volume list.""" + if len(records) != 1: + msg = _('Could not find unique volume. Volumes found: %(vol)s.') + msg_args = {'vol': records} + raise exception.VolumeBackendAPIException(data=msg % msg_args) + + return records[0] + + def _get_volume_by_args(self, vol_name=None, vol_path=None, + vserver=None, fields=None): + """Get info from a single volume according to the args.""" + + query = { + 'type': 'rw', + 'style': 'flex*', # Match both 'flexvol' and 'flexgroup' + 'is_svm_root': 'false', + 'error_state.is_inconsistent': 'false', + 'state': 'online', + 'fields': 'name,style' + } + + if vol_name: + query['name'] = vol_name + if vol_path: + query['nas.path'] = vol_path + if vserver: + query['svm.name'] = vserver + if fields: + query['fields'] = fields + + volumes_response = self.send_request( + '/storage/volumes/', 'get', query=query) + + records = volumes_response.get('records', []) + volume = self._get_unique_volume(records) + return volume + + def get_flexvol(self, flexvol_path=None, flexvol_name=None): + """Get flexvol attributes needed for the storage service catalog.""" + + fields = ('aggregates.name,name,svm.name,nas.path,' + 'type,guarantee.honored,guarantee.type,' + 'space.snapshot.reserve_percent,space.size,' + 'qos.policy.name,snapshot_policy,language,style') + unique_volume = self._get_volume_by_args( + vol_name=flexvol_name, vol_path=flexvol_path, fields=fields) + + aggregate = None + if unique_volume['style'] == 'flexvol': + # flexvol has only 1 aggregate + aggregate = unique_volume['aggregates'][0]['name'] + else: + aggregate = [aggr["name"] + for aggr in unique_volume.get('aggregates', [])] + + qos_policy_group = ( + unique_volume.get('qos', {}).get('policy', {}).get('name')) + + volume = { + 'name': unique_volume['name'], + 'vserver': unique_volume['svm']['name'], + 'junction-path': unique_volume.get('nas', {}).get('path'), + 'aggregate': aggregate, + 'type': unique_volume['type'], + 'space-guarantee-enabled': unique_volume['guarantee']['honored'], + 'space-guarantee': unique_volume['guarantee']['type'], + 'percentage-snapshot-reserve': + str(unique_volume['space']['snapshot']['reserve_percent']), + 'size': str(unique_volume['space']['size']), + 'qos-policy-group': qos_policy_group, + 'snapshot-policy': unique_volume['snapshot_policy']['name'], + 'language': unique_volume['language'], + 'style-extended': unique_volume['style'], + } + + return volume + + def is_flexvol_mirrored(self, flexvol_name, vserver_name): + """Check if flexvol is a SnapMirror source.""" + + query = { + 'source.path': vserver_name + ':' + flexvol_name, + 'state': 'snapmirrored', + 'return_records': 'false', + } + + try: + response = self.send_request('/snapmirror/relationships/', + 'get', query=query) + return response['num_records'] > 0 + except netapp_api.NaApiError: + LOG.exception('Failed to get SnapMirror info for volume %s.', + flexvol_name) + + return False + + def is_flexvol_encrypted(self, flexvol_name, vserver_name): + """Check if a flexvol is encrypted.""" + + if not self.features.FLEXVOL_ENCRYPTION: + return False + + query = { + 'encryption.enabled': 'true', + 'name': flexvol_name, + 'svm.name': vserver_name, + 'return_records': 'false', + } + + try: + response = self.send_request( + '/storage/volumes/', 'get', query=query) + return response['num_records'] > 0 + except netapp_api.NaApiError: + LOG.exception('Failed to get Encryption info for volume %s.', + flexvol_name) + + return False + + def get_aggregate_disk_types(self, aggregate_name): + """Get the disk type(s) of an aggregate.""" + disk_types = self._get_aggregate_disk_types(aggregate_name) + return list(disk_types) if disk_types else None + + def _get_aggregate_disk_types(self, aggregate_name): + """Get the disk type(s) of an aggregate""" + + disk_types = set() + + query = { + 'aggregates.name': aggregate_name, + 'fields': 'effective_type' + } + + try: + response = self.send_request( + '/storage/disks', 'get', query=query, enable_tunneling=False) + except netapp_api.NaApiError: + LOG.exception('Failed to get disk info for aggregate %s.', + aggregate_name) + return disk_types + + for storage_disk_info in response['records']: + disk_types.add(storage_disk_info['effective_type']) + + return disk_types + + def _get_aggregates(self, aggregate_names=None, fields=None): + + query = {} + if aggregate_names: + query['name'] = ','.join(aggregate_names) + + if fields: + query['fields'] = fields + + response = self.send_request( + '/storage/aggregates', 'get', query=query, enable_tunneling=False) + + return response['records'] + + def get_aggregate(self, aggregate_name): + """Get aggregate attributes needed for the storage service catalog.""" + + if not aggregate_name: + return {} + + fields = ('name,block_storage.primary.raid_type,' + 'block_storage.storage_type,home_node.name') + + try: + aggrs = self._get_aggregates(aggregate_names=[aggregate_name], + fields=fields) + except netapp_api.NaApiError: + LOG.exception('Failed to get info for aggregate %s.', + aggregate_name) + return {} + + if len(aggrs) < 1: + return {} + + aggr_attributes = aggrs[0] + + aggregate = { + 'name': aggr_attributes['name'], + 'raid-type': + aggr_attributes['block_storage']['primary']['raid_type'], + 'is-hybrid': + aggr_attributes['block_storage']['storage_type'] == 'hybrid', + 'node-name': aggr_attributes['home_node']['name'], + } + + return aggregate + + def is_qos_min_supported(self, is_nfs, node_name): + """Check if the node supports QoS minimum.""" + if node_name is None: + # whether no access to node name (SVM account or error), the QoS + # min support is dropped. + return False + + qos_min_name = na_utils.qos_min_feature_name(is_nfs, node_name) + return getattr(self.features, qos_min_name, False).__bool__() + + def get_flexvol_dedupe_info(self, flexvol_name): + """Get dedupe attributes needed for the storage service catalog.""" + + query = { + 'efficiency.volume_path': '/vol/%s' % flexvol_name, + 'fields': 'efficiency.state,efficiency.compression' + } + + # Set default values for the case there is no response. + no_dedupe_response = { + 'compression': False, + 'dedupe': False, + 'logical-data-size': 0, + 'logical-data-limit': 1, + } + + try: + response = self.send_request('/storage/volumes', + 'get', query=query) + except netapp_api.NaApiError: + LOG.exception('Failed to get dedupe info for volume %s.', + flexvol_name) + return no_dedupe_response + + if response["num_records"] != 1: + return no_dedupe_response + + state = response["records"][0]["efficiency"]["state"] + compression = response["records"][0]["efficiency"]["compression"] + + # TODO(nahimsouza): as soon as REST API supports the fields + # 'logical-data-size and 'logical-data-limit', we should include + # them in the query and set them correctly. + # NOTE(nahimsouza): these fields are only used by the client function + # `get_flexvol_dedupe_used_percent`, since the function is not + # implemented on REST yet, the below hard-coded fields are not + # affecting the driver in anyway. + logical_data_size = 0 + logical_data_limit = 1 + + dedupe_info = { + 'compression': False if compression == "none" else True, + 'dedupe': False if state == "disabled" else True, + 'logical-data-size': logical_data_size, + 'logical-data-limit': logical_data_limit, + } + + return dedupe_info + + def get_lun_list(self): + """Gets the list of LUNs on filer. + + Gets the LUNs from cluster with vserver. + """ + + query = { + 'svm.name': self.vserver, + 'fields': 'svm.name,location.volume.name,space.size,' + 'location.qtree.name,name,os_type,' + 'space.guarantee.requested,uuid' + } + + response = self.send_request( + '/storage/luns/', 'get', query=query) + + if response['num_records'] == '0': + return [] + + lun_list = [] + for lun in response['records']: + lun_info = {} + lun_info['Vserver'] = lun['svm']['name'] + lun_info['Volume'] = lun['location']['volume']['name'] + lun_info['Size'] = lun['space']['size'] + lun_info['Qtree'] = \ + lun['location'].get('qtree', {}).get('name', '') + lun_info['Path'] = lun['name'] + lun_info['OsType'] = lun['os_type'] + lun_info['SpaceReserved'] = lun['space']['guarantee']['requested'] + lun_info['UUID'] = lun['uuid'] + + lun_list.append(lun_info) + + return lun_list + + def get_lun_by_args(self, **lun_info_args): + """Retrieves LUN with specified args.""" + + query = { + 'fields': 'svm.name,location.volume.name,space.size,' + 'location.qtree.name,name,os_type,' + 'space.guarantee.requested,uuid' + } + + if lun_info_args: + if 'vserver' in lun_info_args: + query['svm.name'] = lun_info_args['vserver'] + if 'path' in lun_info_args: + query['name'] = lun_info_args['path'] + if 'uuid' in lun_info_args: + query['uuid'] = lun_info_args['uuid'] + + response = self.send_request( + '/storage/luns/', 'get', query=query) + + if response['num_records'] == '0': + return [] + + lun_list = [] + for lun in response['records']: + lun_info = {} + lun_info['Vserver'] = lun['svm']['name'] + lun_info['Volume'] = lun['location']['volume']['name'] + lun_info['Size'] = lun['space']['size'] + lun_info['Qtree'] = \ + lun['location'].get('qtree', {}).get('name', '') + lun_info['Path'] = lun['name'] + lun_info['OsType'] = lun['os_type'] + lun_info['SpaceReserved'] = lun['space']['guarantee']['requested'] + lun_info['UUID'] = lun['uuid'] + + # NOTE(nahimsouza): Currently, ONTAP REST API does not have the + # 'block-size' in the response. By default, we are setting its + # value to 512, since traditional block size advertised by hard + # disks is 512 bytes. + lun_info['BlockSize'] = 512 + + lun_list.append(lun_info) + + return lun_list + + def get_lun_sizes_by_volume(self, volume_name): + """"Gets the list of LUNs and their sizes from a given volume name""" + + query = { + 'location.volume.name': volume_name, + 'fields': 'space.size,name' + } + + response = self.send_request('/storage/luns/', 'get', query=query) + + if response['num_records'] == '0': + return [] + + luns = [] + for lun_info in response['records']: + luns.append({ + 'path': lun_info.get('name', ''), + 'size': float(lun_info.get('space', {}).get('size', 0)) + }) + return luns + + def get_file_sizes_by_dir(self, dir_path): + """Gets the list of files and their sizes from a given directory.""" + + # 'dir_path' will always be a FlexVol name + volume = self._get_volume_by_args(vol_name=dir_path) + + query = { + 'type': 'file', + 'fields': 'size,name' + } + + vol_uuid = volume['uuid'] + try: + response = self.send_request( + f'/storage/volumes/{vol_uuid}/files', + 'get', query=query) + except netapp_api.NaApiError as e: + if e.code == netapp_api.REST_NO_SUCH_FILE: + return [] + else: + raise e + + files = [] + for file_info in response['records']: + files.append({ + 'name': file_info.get('name', ''), + 'file-size': float(file_info.get('size', 0)) + }) + return files + + def get_volume_state(self, junction_path=None, name=None): + """Returns volume state for a given name or junction path.""" + + query_args = {} + + if name: + query_args['name'] = name + if junction_path: + query_args['nas.path'] = junction_path + + query_args['fields'] = 'state' + + response = self.send_request('/storage/volumes/', + 'get', query=query_args) + try: + records = response.get('records', []) + unique_volume = self._get_unique_volume(records) + except exception.VolumeBackendAPIException: + return None + + return unique_volume['state'] + + def delete_snapshot(self, volume_name, snapshot_name): + """Deletes a volume snapshot.""" + volume = self._get_volume_by_args(vol_name=volume_name) + self.send_request( + f'/storage/volumes/{volume["uuid"]}/snapshots' + f'?name={snapshot_name}', 'delete') + + def get_operational_lif_addresses(self): + """Gets the IP addresses of operational LIFs on the vserver.""" + + query = { + 'state': 'up', + 'fields': 'ip.address', + } + + response = self.send_request( + '/network/ip/interfaces/', 'get', query=query) + + return [lif_info['ip']['address'] + for lif_info in response['records']] + + def _list_vservers(self): + """Get the names of vservers present""" + query = { + 'fields': 'name', + } + response = self.send_request('/svm/svms', 'get', query=query, + enable_tunneling=False) + + return [svm['name'] for svm in response.get('records', [])] + + def _get_ems_log_destination_vserver(self): + """Returns the best vserver destination for EMS messages.""" + + # NOTE(nahimsouza): Differently from ZAPI, only 'data' SVMs can be + # managed by the SVM REST APIs - that's why the vserver type is not + # specified. + vservers = self._list_vservers() + + if vservers: + return vservers[0] + + raise exception.NotFound("No Vserver found to receive EMS messages.") + + def send_ems_log_message(self, message_dict): + """Sends a message to the Data ONTAP EMS log.""" + + body = { + 'computer_name': message_dict['computer-name'], + 'event_source': message_dict['event-source'], + 'app_version': message_dict['app-version'], + 'category': message_dict['category'], + 'severity': 'notice', + 'autosupport_required': message_dict['auto-support'] == 'true', + 'event_id': message_dict['event-id'], + 'event_description': message_dict['event-description'], + } + + bkp_connection = copy.copy(self.connection) + bkp_timeout = self.connection.get_timeout() + bkp_vserver = self.vserver + + self.connection.set_timeout(25) + try: + # TODO(nahimsouza): Vserver is being set to replicate the ZAPI + # behavior, but need to check if this could be removed in REST API + self.connection.set_vserver( + self._get_ems_log_destination_vserver()) + self.send_request('/support/ems/application-logs', + 'post', body=body) + LOG.debug('EMS executed successfully.') + except netapp_api.NaApiError as e: + LOG.warning('Failed to invoke EMS. %s', e) + finally: + # Restores the data + timeout = ( + bkp_timeout if bkp_timeout is not None else DEFAULT_TIMEOUT) + self.connection.set_timeout(timeout) + self.connection = copy.copy(bkp_connection) + self.connection.set_vserver(bkp_vserver) + + def get_performance_counter_info(self, object_name, counter_name): + """Gets info about one or more Data ONTAP performance counters.""" + + # NOTE(nahimsouza): This conversion is nedeed because different names + # are used in ZAPI and we want to avoid changes in the driver for now. + rest_counter_names = { + 'domain_busy': 'domain_busy_percent', + 'processor_elapsed_time': 'elapsed_time', + 'avg_processor_busy': 'average_processor_busy_percent', + } + + rest_counter_name = counter_name + if counter_name in rest_counter_names: + rest_counter_name = rest_counter_names[counter_name] + + # Get counter table info + query = { + 'counter_schemas.name': rest_counter_name, + 'fields': 'counter_schemas.*' + } + + try: + table = self.send_request( + f'/cluster/counter/tables/{object_name}', + 'get', query=query, enable_tunneling=False) + + name = counter_name # use the original name (ZAPI compatible) + base_counter = table['counter_schemas'][0]['denominator']['name'] + + query = { + 'counters.name': rest_counter_name, + 'fields': 'counters.*' + } + + response = self.send_request( + f'/cluster/counter/tables/{object_name}/rows', + 'get', query=query, enable_tunneling=False) + + table_rows = response.get('records', []) + labels = [] + if len(table_rows) != 0: + labels = table_rows[0]['counters'][0].get('labels', []) + + # NOTE(nahimsouza): Values have a different format on REST API + # and we want to keep compatibility with ZAPI for a while + if object_name == 'wafl' and counter_name == 'cp_phase_times': + # discard the prefix 'cp_' + labels = [label[3:] for label in labels] + + return { + 'name': name, + 'labels': labels, + 'base-counter': base_counter, + } + except netapp_api.NaApiError: + raise exception.NotFound(_('Counter %s not found') % counter_name) + + def get_performance_instance_uuids(self, object_name, node_name): + """Get UUIDs of performance instances for a cluster node.""" + + query = { + 'id': node_name + ':*', + } + + response = self.send_request( + f'/cluster/counter/tables/{object_name}/rows', + 'get', query=query, enable_tunneling=False) + + records = response.get('records', []) + + uuids = [] + for record in records: + uuids.append(record['id']) + + return uuids + + def get_performance_counters(self, object_name, instance_uuids, + counter_names): + """Gets more cDOT performance counters.""" + + # NOTE(nahimsouza): This conversion is nedeed because different names + # are used in ZAPI and we want to avoid changes in the driver for now. + rest_counter_names = { + 'domain_busy': 'domain_busy_percent', + 'processor_elapsed_time': 'elapsed_time', + 'avg_processor_busy': 'average_processor_busy_percent', + } + + zapi_counter_names = { + 'domain_busy_percent': 'domain_busy', + 'elapsed_time': 'processor_elapsed_time', + 'average_processor_busy_percent': 'avg_processor_busy', + } + + for i in range(len(counter_names)): + if counter_names[i] in rest_counter_names: + counter_names[i] = rest_counter_names[counter_names[i]] + + query = { + 'id': '|'.join(instance_uuids), + 'counters.name': '|'.join(counter_names), + 'fields': 'id,counter_table.name,counters.*', + } + + response = self.send_request( + f'/cluster/counter/tables/{object_name}/rows', + 'get', query=query, enable_tunneling=False) + + counter_data = [] + for record in response.get('records', []): + for counter in record['counters']: + + counter_name = counter['name'] + + # Reverts the name conversion + if counter_name in zapi_counter_names: + counter_name = zapi_counter_names[counter_name] + + counter_value = '' + if counter.get('value'): + counter_value = counter.get('value') + elif counter.get('values'): + # NOTE(nahimsouza): Conversion made to keep compatibility + # with old ZAPI format + values = counter.get('values') + counter_value = ','.join([str(v) for v in values]) + + counter_data.append({ + 'instance-name': record['counter_table']['name'], + 'instance-uuid': record['id'], + 'node-name': record['id'].split(':')[0], + 'timestamp': int(time()), + counter_name: counter_value, + }) + + return counter_data + + def get_aggregate_capacities(self, aggregate_names): + """Gets capacity info for multiple aggregates.""" + + if not isinstance(aggregate_names, list): + return {} + + aggregates = {} + for aggregate_name in aggregate_names: + aggregates[aggregate_name] = self._get_aggregate_capacity( + aggregate_name) + + return aggregates + + def _get_aggregate_capacity(self, aggregate_name): + """Gets capacity info for an aggregate.""" + + fields = ('space.block_storage.available,space.block_storage.size,' + 'space.block_storage.used') + + try: + aggrs = self._get_aggregates(aggregate_names=[aggregate_name], + fields=fields) + + result = {} + if len(aggrs) > 0: + aggr = aggrs[0] + + available = float(aggr['space']['block_storage']['available']) + total = float(aggr['space']['block_storage']['size']) + used = float(aggr['space']['block_storage']['used']) + percent_used = int((used * 100) // total) + + result = { + 'percent-used': percent_used, + 'size-available': available, + 'size-total': total, + } + + return result + except netapp_api.NaApiError as e: + if (e.code == netapp_api.REST_API_NOT_FOUND or + e.code == netapp_api.REST_UNAUTHORIZED): + LOG.debug('Aggregate capacity can only be collected with ' + 'cluster scoped credentials.') + else: + LOG.exception('Failed to get info for aggregate %s.', + aggregate_name) + return {} + + def get_node_for_aggregate(self, aggregate_name): + """Get home node for the specified aggregate. + + This API could return None, most notably if it was sent + to a Vserver LIF, so the caller must be able to handle that case. + """ + + if not aggregate_name: + return None + + fields = 'home_node.name' + try: + aggrs = self._get_aggregates(aggregate_names=[aggregate_name], + fields=fields) + node = None + if len(aggrs) > 0: + aggr = aggrs[0] + node = aggr['home_node']['name'] + + return node + except netapp_api.NaApiError as e: + if e.code == netapp_api.REST_API_NOT_FOUND: + return None + else: + raise e + + def provision_qos_policy_group(self, qos_policy_group_info, + qos_min_support): + """Create QoS policy group on the backend if appropriate.""" + if qos_policy_group_info is None: + return + + # Legacy QoS uses externally provisioned QoS policy group, + # so we don't need to create one on the backend. + legacy = qos_policy_group_info.get('legacy') + if legacy: + return + + spec = qos_policy_group_info.get('spec') + + if not spec: + return + + is_adaptive = na_utils.is_qos_policy_group_spec_adaptive( + qos_policy_group_info) + self._validate_qos_policy_group(is_adaptive, spec=spec, + qos_min_support=qos_min_support) + + qos_policy_group = self._get_qos_first_policy_group_by_name( + spec['policy_name']) + + if not qos_policy_group: + self._create_qos_policy_group(spec, is_adaptive) + else: + self._modify_qos_policy_group(spec, is_adaptive, + qos_policy_group) + + def _get_qos_first_policy_group_by_name(self, qos_policy_group_name): + records = self._get_qos_policy_group_by_name(qos_policy_group_name) + if len(records) == 0: + return None + + return records[0] + + def _get_qos_policy_group_by_name(self, qos_policy_group_name): + query = {'name': qos_policy_group_name} + + response = self.send_request('/storage/qos/policies/', + 'get', query=query) + + records = response.get('records') + if not records: + return [] + + return records + + def _qos_spec_to_api_args(self, spec, is_adaptive, vserver=None): + """Convert a QoS spec to REST args.""" + rest_args = {} + if is_adaptive: + rest_args['adaptive'] = {} + if spec.get('absolute_min_iops'): + rest_args['adaptive']['absolute_min_iops'] = ( + self._sanitize_qos_spec_value( + spec.get('absolute_min_iops'))) + if spec.get('expected_iops'): + rest_args['adaptive']['expected_iops'] = ( + self._sanitize_qos_spec_value(spec.get('expected_iops'))) + if spec.get('expected_iops_allocation'): + rest_args['adaptive']['expected_iops_allocation'] = ( + spec.get('expected_iops_allocation')) + if spec.get('peak_iops'): + rest_args['adaptive']['peak_iops'] = ( + self._sanitize_qos_spec_value(spec.get('peak_iops'))) + if spec.get('peak_iops_allocation'): + rest_args['adaptive']['peak_iops_allocation'] = ( + spec.get('peak_iops_allocation')) + if spec.get('block_size'): + rest_args['adaptive']['block_size'] = ( + spec.get('block_size')) + else: + rest_args['fixed'] = {} + qos_max = spec.get('max_throughput') + if qos_max and 'iops' in qos_max: + rest_args['fixed']['max_throughput_iops'] = ( + self._sanitize_qos_spec_value(qos_max)) + elif qos_max: + # Convert from B/s to MB/s + value = math.ceil( + self._sanitize_qos_spec_value(qos_max) / (10**6)) + rest_args['fixed']['max_throughput_mbps'] = value + + qos_min = spec.get('min_throughput') + if qos_min and 'iops' in qos_min: + rest_args['fixed']['min_throughput_iops'] = ( + self._sanitize_qos_spec_value(qos_min)) + + if spec.get('policy_name'): + rest_args['name'] = spec.get('policy_name') + if spec.get('return_record'): + rest_args['return_records'] = spec.get('return_record') + + if vserver: + rest_args['svm'] = {} + rest_args['svm']['name'] = vserver + + return rest_args + + def _sanitize_qos_spec_value(self, value): + value = value.lower() + value = value.replace('iops', '').replace('b/s', '') + value = int(value) + return value + + def _create_qos_policy_group(self, spec, is_adaptive): + """Creates a QoS policy group.""" + body = self._qos_spec_to_api_args( + spec, is_adaptive, vserver=self.vserver) + + self.send_request('/storage/qos/policies/', 'post', body=body, + enable_tunneling=False) + + def _modify_qos_policy_group(self, spec, is_adaptive, qos_policy_group): + """Modifies a QoS policy group.""" + body = self._qos_spec_to_api_args(spec, is_adaptive) + if qos_policy_group['name'] == body['name']: + body.pop('name') + + self.send_request( + f'/storage/qos/policies/{qos_policy_group["uuid"]}', 'patch', + body=body, enable_tunneling=False) + + def get_vol_by_junc_vserver(self, vserver, junction): + """Gets the volume by junction path and vserver.""" + volume = self._get_volume_by_args(vol_path=junction, vserver=vserver) + return volume['name'] + + def file_assign_qos(self, flex_vol, qos_policy_group_name, + qos_policy_group_is_adaptive, file_path): + """Assigns the named QoS policy-group to a file.""" + volume = self._get_volume_by_args(flex_vol) + body = { + 'qos_policy.name': qos_policy_group_name + } + + self.send_request( + f'/storage/volumes/{volume["uuid"]}/files/{file_path}', + 'patch', body=body, enable_tunneling=False) + + def mark_qos_policy_group_for_deletion(self, qos_policy_group_info, + is_adaptive=False): + """Soft delete a QoS policy group backing a cinder volume.""" + if qos_policy_group_info is None: + return + + spec = qos_policy_group_info.get('spec') + + # For cDOT we want to delete the QoS policy group that we created for + # this cinder volume. Because the QoS policy may still be "in use" + # after the zapi call to delete the volume itself returns successfully, + # we instead rename the QoS policy group using a specific pattern and + # later attempt on a best effort basis to delete any QoS policy groups + # matching that pattern. + if spec: + current_name = spec['policy_name'] + new_name = DELETED_PREFIX + current_name + try: + self._rename_qos_policy_group(current_name, new_name) + except netapp_api.NaApiError as ex: + LOG.warning('Rename failure in cleanup of cDOT QoS policy ' + 'group %(current_name)s: %(ex)s', + {'current_name': current_name, 'ex': ex}) + + # Attempt to delete any QoS policies named "delete-openstack-*". + self.remove_unused_qos_policy_groups() + + def delete_file(self, path_to_file): + """Delete file at path.""" + LOG.debug('Deleting file: %s', path_to_file) + + volume_name = path_to_file.split('/')[2] + relative_path = '/'.join(path_to_file.split('/')[3:]) + volume = self._get_volume_by_args(volume_name) + + # Path requires "%2E" to represent "." and "%2F" to represent "/". + relative_path = relative_path.replace('.', '%2E').replace('/', '%2F') + + self.send_request(f'/storage/volumes/{volume["uuid"]}' + + f'/files/{relative_path}', 'delete') + + def _rename_qos_policy_group(self, qos_policy_group_name, new_name): + """Renames a QoS policy group.""" + body = {'name': new_name} + query = {'name': qos_policy_group_name} + self.send_request('/storage/qos/policies/', 'patch', body=body, + query=query, enable_tunneling=False) + + def remove_unused_qos_policy_groups(self): + """Deletes all QoS policy groups that are marked for deletion.""" + query = {'name': f'{DELETED_PREFIX}*'} + self.send_request('/storage/qos/policies', 'delete', query=query) + + def create_lun(self, volume_name, lun_name, size, metadata, + qos_policy_group_name=None, + qos_policy_group_is_adaptive=False): + """Issues API request for creating LUN on volume.""" + self._validate_qos_policy_group(qos_policy_group_is_adaptive) + + path = f'/vol/{volume_name}/{lun_name}' + space_reservation = metadata['SpaceReserved'] + initial_size = size + + body = { + 'name': path, + 'space.size': str(initial_size), + 'os_type': metadata['OsType'], + 'space.guarantee.requested': space_reservation + } + + if qos_policy_group_name: + body['qos_policy.name'] = qos_policy_group_name + + try: + self.send_request('/storage/luns', 'post', body=body) + except netapp_api.NaApiError as ex: + with excutils.save_and_reraise_exception(): + LOG.error('Error provisioning volume %(lun_name)s on ' + '%(volume_name)s. Details: %(ex)s', + { + 'lun_name': lun_name, + 'volume_name': volume_name, + 'ex': ex, + }) + + def do_direct_resize(self, path, new_size_bytes, force=True): + """Resize the LUN.""" + seg = path.split("/") + LOG.info('Resizing LUN %s directly to new size.', seg[-1]) + + body = {'name': path, 'space.size': new_size_bytes} + + self._lun_update_by_path(path, body) + + def _get_lun_by_path(self, path): + query = {'name': path} + response = self.send_request('/storage/luns', 'get', query=query) + records = response.get('records', []) + + return records + + def _get_first_lun_by_path(self, path): + records = self._get_lun_by_path(path) + if len(records) == 0: + return None + + return records[0] + + def _lun_update_by_path(self, path, body): + """Update the LUN.""" + lun = self._get_first_lun_by_path(path) + + if not lun: + raise netapp_api.NaApiError(code=netapp_api.EOBJECTNOTFOUND) + + self.send_request(f'/storage/luns/{lun["uuid"]}', 'patch', body=body) + + def _validate_qos_policy_group(self, is_adaptive, spec=None, + qos_min_support=False): + if is_adaptive and not self.features.ADAPTIVE_QOS: + msg = _("Adaptive QoS feature requires ONTAP 9.4 or later.") + raise na_utils.NetAppDriverException(msg) + + if not spec: + return + + if 'min_throughput' in spec and not qos_min_support: + msg = 'min_throughput is not supported by this back end.' + raise na_utils.NetAppDriverException(msg) + + def get_if_info_by_ip(self, ip): + """Gets the network interface info by ip.""" + query_args = {} + query_args['ip.address'] = volume_utils.resolve_hostname(ip) + query_args['fields'] = 'svm' + + result = self.send_request('/network/ip/interfaces/', 'get', + query=query_args, enable_tunneling=False) + num_records = result['num_records'] + records = result.get('records', []) + + if num_records == 0: + raise exception.NotFound( + _('No interface found on cluster for ip %s') % ip) + + return [{'vserver': item['svm']['name']} for item in records] + + def get_igroup_by_initiators(self, initiator_list): + """Get igroups exactly matching a set of initiators.""" + + igroup_list = [] + if not initiator_list: + return igroup_list + + query = { + 'svm.name': self.vserver, + 'initiators.name': ' '.join(initiator_list), + 'fields': 'name,protocol,os_type' + } + + response = self.send_request('/protocols/san/igroups', + 'get', query=query) + records = response.get('records', []) + for igroup_item in records: + igroup = {'initiator-group-os-type': igroup_item['os_type'], + 'initiator-group-type': igroup_item['protocol'], + 'initiator-group-name': igroup_item['name']} + igroup_list.append(igroup) + + return igroup_list + + def add_igroup_initiator(self, igroup, initiator): + """Adds initiators to the specified igroup.""" + query_initiator_uuid = { + 'name': igroup, + 'fields': 'uuid' + } + + response_initiator_uuid = self.send_request( + '/protocols/san/igroups/', 'get', query=query_initiator_uuid) + + response = response_initiator_uuid.get('records', []) + if len(response) < 1: + msg = _('Could not find igroup initiator.') + raise exception.VolumeBackendAPIException(data=msg) + + igroup_uuid = response[0]['uuid'] + + body = { + 'name': initiator + } + + self.send_request('/protocols/san/igroups/' + + igroup_uuid + '/initiators', + 'post', body=body) + + def create_igroup(self, igroup, igroup_type='iscsi', os_type='default'): + """Creates igroup with specified args.""" + body = { + 'name': igroup, + 'protocol': igroup_type, + 'os_type': os_type, + } + self.send_request('/protocols/san/igroups', 'post', body=body) + + def map_lun(self, path, igroup_name, lun_id=None): + """Maps LUN to the initiator and returns LUN id assigned.""" + + body_post = { + 'lun.name': path, + 'igroup.name': igroup_name, + } + + if lun_id is not None: + body_post['logical_unit_number'] = lun_id + + try: + result = self.send_request('/protocols/san/lun-maps', 'post', + body=body_post, + query={'return_records': 'true'}) + records = result.get('records') + lun_id_assigned = records[0].get('logical_unit_number') + return lun_id_assigned + except netapp_api.NaApiError as e: + code = e.code + message = e.message + LOG.warning('Error mapping LUN. Code :%(code)s, Message: ' + '%(message)s', {'code': code, 'message': message}) + raise + + def get_lun_map(self, path): + """Gets the LUN map by LUN path.""" + map_list = [] + + query = { + 'lun.name': path, + 'fields': 'igroup.name,logical_unit_number,svm.name', + } + + response = self.send_request('/protocols/san/lun-maps', + 'get', + query=query) + num_records = response.get('num_records') + records = response.get('records', None) + if records is None or num_records is None: + return map_list + + for element in records: + map_lun = {} + map_lun['initiator-group'] = element['igroup']['name'] + map_lun['lun-id'] = element['logical_unit_number'] + map_lun['vserver'] = element['svm']['name'] + map_list.append(map_lun) + + return map_list + + def get_fc_target_wwpns(self): + """Gets the FC target details.""" + wwpns = [] + query = { + 'fields': 'wwpn' + } + response = self.send_request('/network/fc/interfaces', + 'get', query=query) + + records = response.get('records') + for record in records: + wwpn = record.get('wwpn').lower() + wwpns.append(wwpn) + + return wwpns + + def unmap_lun(self, path, igroup_name): + """Unmaps a LUN from given initiator.""" + + # get lun amd igroup uuids + query_uuid = { + 'igroup.name': igroup_name, + 'lun.name': path, + 'fields': 'lun.uuid,igroup.uuid' + } + + response_uuid = self.send_request( + '/protocols/san/lun-maps', 'get', query=query_uuid) + + if response_uuid['num_records'] > 0: + lun_uuid = response_uuid['records'][0]['lun']['uuid'] + igroup_uuid = response_uuid['records'][0]['igroup']['uuid'] + + try: + self.send_request( + f'/protocols/san/lun-maps/{lun_uuid}/{igroup_uuid}', + 'delete') + except netapp_api.NaApiError as e: + LOG.warning("Error unmapping LUN. Code: %(code)s, Message: " + "%(message)s", {'code': e.code, + 'message': e.message}) + # if the LUN is already unmapped + if e.code == netapp_api.REST_NO_SUCH_LUN_MAP: + pass + else: + raise e + else: + # Input is invalid or LUN may already be unmapped + LOG.warning("Error unmapping LUN. Invalid input.") + + def has_luns_mapped_to_initiators(self, initiator_list): + """Checks whether any LUNs are mapped to the given initiator(s).""" + query = { + 'initiators.name': ' '.join(initiator_list), + 'fields': 'lun_maps' + } + + response = self.send_request('/protocols/san/igroups', + 'get', query=query) + + records = response.get('records', []) + if len(records) > 0: + for record in records: + lun_maps = record.get('lun_maps', []) + if len(lun_maps) > 0: + return True + + return False + + def get_iscsi_service_details(self): + """Returns iscsi iqn.""" + query = { + 'fields': 'target.name' + } + response = self.send_request( + '/protocols/san/iscsi/services', 'get', query=query) + records = response.get('records') + if records: + return records[0]['target']['name'] + + LOG.debug('No iSCSI service found for vserver %s', self.vserver) + return None + + def check_iscsi_initiator_exists(self, iqn): + """Returns True if initiator exists.""" + endpoint_url = '/protocols/san/iscsi/credentials' + initiator_exists = True + try: + query = { + 'initiator': iqn, + } + response = self.send_request(endpoint_url, 'get', query=query) + records = response.get('records') + if not records: + initiator_exists = False + + except netapp_api.NaApiError: + initiator_exists = False + + return initiator_exists + + def set_iscsi_chap_authentication(self, iqn, username, password): + """Provides NetApp host's CHAP credentials to the backend.""" + initiator_exists = self.check_iscsi_initiator_exists(iqn) + + command_template = ('iscsi security %(mode)s -vserver %(vserver)s ' + '-initiator-name %(iqn)s -auth-type CHAP ' + '-user-name %(username)s') + + if initiator_exists: + LOG.debug('Updating CHAP authentication for %(iqn)s.', + {'iqn': iqn}) + command = command_template % { + 'mode': 'modify', + 'vserver': self.vserver, + 'iqn': iqn, + 'username': username, + } + else: + LOG.debug('Adding initiator %(iqn)s with CHAP authentication.', + {'iqn': iqn}) + command = command_template % { + 'mode': 'create', + 'vserver': self.vserver, + 'iqn': iqn, + 'username': username, + } + + try: + with self.ssh_client.ssh_connect_semaphore: + ssh_pool = self.ssh_client.ssh_pool + with ssh_pool.item() as ssh: + self.ssh_client.execute_command_with_prompt(ssh, + command, + 'Password:', + password) + except Exception as e: + msg = _('Failed to set CHAP authentication for target IQN %(iqn)s.' + ' Details: %(ex)s') % { + 'iqn': iqn, + 'ex': e, + } + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def get_iscsi_target_details(self): + """Gets the iSCSI target portal details.""" + query = { + 'services': 'data_iscsi', + 'fields': 'ip.address,enabled' + } + + response = self.send_request('/network/ip/interfaces', + 'get', query=query) + + target_list = [] + records = response.get('records', []) + for record in records: + details = dict() + details['address'] = record['ip']['address'] + details['tpgroup-tag'] = None + details['interface-enabled'] = record['enabled'] + # NOTE(nahimsouza): from ONTAP documentation: + # ONTAP does not support changing the port number for iSCSI. + # Port number 3260 is registered as part of the iSCSI specification + # and cannot be used by any other application or service. + details['port'] = 3260 + target_list.append(details) + + return target_list + + def move_lun(self, path, new_path): + """Moves the LUN at path to new path.""" + seg = path.split("/") + new_seg = new_path.split("/") + LOG.debug("Moving LUN %(name)s to %(new_name)s.", + {'name': seg[-1], 'new_name': new_seg[-1]}) + query = { + 'svm.name': self.vserver, + 'name': path + } + body = { + 'name': new_path, + } + self.send_request('/storage/luns/', 'patch', query=query, body=body) + + def clone_file(self, flex_vol, src_path, dest_path, vserver, + dest_exists=False, source_snapshot=None, is_snapshot=False): + """Clones file on vserver.""" + LOG.debug('Cloning file - volume %(flex_vol)s, src %(src_path)s, ' + 'dest %(dest_path)s, vserver %(vserver)s,' + 'source_snapshot %(source_snapshot)s', + { + 'flex_vol': flex_vol, + 'src_path': src_path, + 'dest_path': dest_path, + 'vserver': vserver, + 'source_snapshot': source_snapshot, + }) + + volume = self._get_volume_by_args(flex_vol) + body = { + 'volume': { + 'uuid': volume['uuid'], + 'name': volume['name'] + }, + 'source_path': src_path, + 'destination_path': dest_path, + } + if is_snapshot and self.features.BACKUP_CLONE_PARAM: + body['is_backup'] = True + + if dest_exists: + body['overwrite_destination'] = True + + self.send_request('/storage/file/clone', 'post', body=body) + + def clone_lun(self, volume, name, new_name, space_reserved='true', + qos_policy_group_name=None, src_block=0, dest_block=0, + block_count=0, source_snapshot=None, is_snapshot=False, + qos_policy_group_is_adaptive=False): + """Clones lun on vserver.""" + LOG.debug('Cloning lun - volume: %(volume)s, name: %(name)s, ' + 'new_name: %(new_name)s, space_reserved: %(space_reserved)s,' + ' qos_policy_group_name: %(qos_policy_group_name)s', + { + 'volume': volume, + 'name': name, + 'new_name': new_name, + 'space_reserved': space_reserved, + 'qos_policy_group_name': qos_policy_group_name, + }) + + # NOTE(nahimsouza): some parameters are not available on REST API, + # but they are in the header just to keep compatilbility with ZAPI: + # src_block, dest_block, block_count, is_snapshot + + self._validate_qos_policy_group(qos_policy_group_is_adaptive) + + source_path = f'/vol/{volume}' + if source_snapshot: + source_path += f'/.snapshot/{source_snapshot}' + source_path += f'/{name}' + body = { + 'svm': { + 'name': self.vserver + }, + 'name': f'/vol/{volume}/{new_name}', + 'clone': { + 'source': { + 'name': source_path, + } + }, + 'space': { + 'guarantee': { + 'requested': space_reserved == 'true', + } + } + } + + if qos_policy_group_name: + body['qos_policy'] = {'name': qos_policy_group_name} + + self.send_request('/storage/luns', 'post', body=body) + + def destroy_lun(self, path, force=True): + """Destroys the LUN at the path.""" + query = {} + query['name'] = path + query['svm'] = self.vserver + + if force: + query['allow_delete_while_mapped'] = 'true' + + self.send_request('/storage/luns/', 'delete', query=query) + + def get_flexvol_capacity(self, flexvol_path=None, flexvol_name=None): + """Gets total capacity and free capacity, in bytes, of the flexvol.""" + fields = 'name,space.available,space.afs_total' + try: + volume = self._get_volume_by_args( + vol_name=flexvol_name, vol_path=flexvol_path, fields=fields) + capacity = { + 'size-total': float(volume['space']['afs_total']), + 'size-available': float(volume['space']['available']), + } + return capacity + except exception.VolumeBackendAPIException: + msg = _('Volume %s not found.') + msg_args = flexvol_path or flexvol_name + raise na_utils.NetAppDriverException(msg % msg_args) diff --git a/cinder/volume/drivers/netapp/dataontap/nfs_cmode.py b/cinder/volume/drivers/netapp/dataontap/nfs_cmode.py index 0cccbef649f..1cfc7c049b6 100644 --- a/cinder/volume/drivers/netapp/dataontap/nfs_cmode.py +++ b/cinder/volume/drivers/netapp/dataontap/nfs_cmode.py @@ -314,7 +314,7 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver, """Gets the vserver and export volume for share.""" (host_ip, export_path) = self._get_export_ip_path(volume_id, share) ifs = self.zapi_client.get_if_info_by_ip(host_ip) - vserver = ifs[0].get_child_content('vserver') + vserver = ifs[0].get('vserver') exp_volume = self.zapi_client.get_vol_by_junc_vserver(vserver, export_path) return vserver, exp_volume @@ -512,7 +512,7 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver, """Get vserver for the mentioned ip.""" try: ifs = self.zapi_client.get_if_info_by_ip(ip) - vserver = ifs[0].get_child_content('vserver') + vserver = ifs[0].get('vserver') return vserver except Exception: return None diff --git a/cinder/volume/drivers/netapp/dataontap/utils/capabilities.py b/cinder/volume/drivers/netapp/dataontap/utils/capabilities.py index 2437f802e68..aedd9e1ffa2 100644 --- a/cinder/volume/drivers/netapp/dataontap/utils/capabilities.py +++ b/cinder/volume/drivers/netapp/dataontap/utils/capabilities.py @@ -22,34 +22,9 @@ import re from oslo_log import log as logging import six -from cinder import exception -from cinder.i18n import _ - LOG = logging.getLogger(__name__) -# NOTE(cknight): The keys in this map are tuples that contain arguments needed -# for efficient use of the system-user-capability-get-iter cDOT API. The -# values are SSC extra specs associated with the APIs listed in the keys. -SSC_API_MAP = { - ('storage.aggregate', 'show', 'aggr-options-list-info'): [ - 'netapp_raid_type', - ], - ('storage.disk', 'show', 'storage-disk-get-iter'): [ - 'netapp_disk_type', - ], - ('snapmirror', 'show', 'snapmirror-get-iter'): [ - 'netapp_mirrored', - ], - ('volume.efficiency', 'show', 'sis-get-iter'): [ - 'netapp_dedup', - 'netapp_compression', - ], - ('volume', '*show', 'volume-get-iter'): [ - 'netapp_flexvol_encryption', - ], -} - class CapabilitiesLibrary(object): @@ -64,30 +39,7 @@ class CapabilitiesLibrary(object): self.invalid_extra_specs = [] def check_api_permissions(self): - """Check which APIs that support SSC functionality are available.""" - - inaccessible_apis = [] - invalid_extra_specs = [] - - for api_tuple, extra_specs in SSC_API_MAP.items(): - object_name, operation_name, api = api_tuple - if not self.zapi_client.check_cluster_api(object_name, - operation_name, - api): - inaccessible_apis.append(api) - invalid_extra_specs.extend(extra_specs) - - if inaccessible_apis: - if 'volume-get-iter' in inaccessible_apis: - msg = _('User not permitted to query Data ONTAP volumes.') - raise exception.VolumeBackendAPIException(data=msg) - else: - LOG.warning('The configured user account does not have ' - 'sufficient privileges to use all needed ' - 'APIs. The following extra specs will fail ' - 'or be ignored: %s.', invalid_extra_specs) - - self.invalid_extra_specs = invalid_extra_specs + self.invalid_extra_specs = self.zapi_client.check_api_permissions() def cluster_user_supported(self): return not self.invalid_extra_specs