diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/client/fakes.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/client/fakes.py index 634464fe35c..f6431071105 100644 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/client/fakes.py +++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/client/fakes.py @@ -622,7 +622,7 @@ AGGR_GET_NODE_RESPONSE = etree.XML(""" 'node': NODE_NAME, }) -AGGR_RAID_TYPE = 'raid_dp' +AGGREGATE_RAID_TYPE = 'raid_dp' AGGR_GET_ITER_SSC_RESPONSE = etree.XML(""" @@ -639,17 +639,19 @@ AGGR_GET_ITER_SSC_RESPONSE = etree.XML(""" %(raid)s + true %(aggr)s 1 -""" % {'aggr': VOLUME_AGGREGATE_NAME, 'raid': AGGR_RAID_TYPE}) +""" % {'aggr': VOLUME_AGGREGATE_NAME, 'raid': AGGREGATE_RAID_TYPE}) AGGR_INFO_SSC = { 'name': VOLUME_AGGREGATE_NAME, - 'raid-type': AGGR_RAID_TYPE, + 'raid-type': AGGREGATE_RAID_TYPE, + 'is-hybrid': True, } AGGR_SIZE_TOTAL = 107374182400 @@ -911,20 +913,41 @@ STORAGE_DISK_GET_ITER_RESPONSE_PAGE_3 = etree.XML(""" """) -AGGR_DISK_TYPE = 'FCAL' +AGGREGATE_DISK_TYPES = ['SATA', 'SSD'] STORAGE_DISK_GET_ITER_RESPONSE = etree.XML(""" cluster3-01:v5.19 - %s + %(type0)s + + + + cluster3-01:v5.20 + + %(type0)s + + + + cluster3-01:v5.20 + + %(type1)s + + + + cluster3-01:v5.20 + + %(type1)s - 1 + 4 -""" % AGGR_DISK_TYPE) +""" % { + 'type0': AGGREGATE_DISK_TYPES[0], + 'type1': AGGREGATE_DISK_TYPES[1], +}) SYSTEM_USER_CAPABILITY_GET_ITER_RESPONSE = etree.XML(""" diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode.py index 8ff81996c39..78df0d2ad98 100644 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode.py +++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode.py @@ -173,7 +173,7 @@ class NetAppCmodeClientTestCase(test.TestCase): max_page_length=10) num_records = result.get_child_content('num-records') - self.assertEqual('1', num_records) + self.assertEqual('4', num_records) args = copy.deepcopy(storage_disk_get_iter_args) args['max-records'] = 10 @@ -1681,6 +1681,7 @@ class NetAppCmodeClientTestCase(test.TestCase): 'aggregate-name': None, 'aggr-raid-attributes': { 'raid-type': None, + 'is-hybrid': None, }, }, } @@ -1692,6 +1693,7 @@ class NetAppCmodeClientTestCase(test.TestCase): expected = { 'name': fake_client.VOLUME_AGGREGATE_NAME, 'raid-type': 'raid_dp', + 'is-hybrid': True, } self.assertEqual(expected, result) @@ -1716,19 +1718,64 @@ class NetAppCmodeClientTestCase(test.TestCase): self.assertEqual({}, result) - def test_get_aggregate_disk_type(self): + @ddt.data({'types': {'FCAL'}, 'expected': ['FCAL']}, + {'types': {'SATA', 'SSD'}, 'expected': ['SATA', 'SSD']},) + @ddt.unpack + def test_get_aggregate_disk_types(self, types, expected): + + mock_get_aggregate_disk_types = self.mock_object( + self.client, '_get_aggregate_disk_types', + mock.Mock(return_value=types)) + + result = self.client.get_aggregate_disk_types( + fake_client.VOLUME_AGGREGATE_NAME) + + self.assertItemsEqual(expected, result) + mock_get_aggregate_disk_types.assert_called_once_with( + fake_client.VOLUME_AGGREGATE_NAME) + + def test_get_aggregate_disk_types_not_found(self): + + mock_get_aggregate_disk_types = self.mock_object( + self.client, '_get_aggregate_disk_types', + mock.Mock(return_value=set())) + + result = self.client.get_aggregate_disk_types( + fake_client.VOLUME_AGGREGATE_NAME) + + self.assertIsNone(result) + mock_get_aggregate_disk_types.assert_called_once_with( + fake_client.VOLUME_AGGREGATE_NAME) + + def test_get_aggregate_disk_types_shared(self): + + self.client.features.add_feature('ADVANCED_DISK_PARTITIONING') + mock_get_aggregate_disk_types = self.mock_object( + self.client, '_get_aggregate_disk_types', + mock.Mock(side_effect=[set(['SSD']), set(['SATA'])])) + + result = self.client.get_aggregate_disk_types( + fake_client.VOLUME_AGGREGATE_NAME) + + self.assertIsInstance(result, list) + self.assertItemsEqual(['SATA', 'SSD'], result) + mock_get_aggregate_disk_types.assert_has_calls([ + mock.call(fake_client.VOLUME_AGGREGATE_NAME), + mock.call(fake_client.VOLUME_AGGREGATE_NAME, shared=True), + ]) + + def test__get_aggregate_disk_types(self): api_response = netapp_api.NaElement( fake_client.STORAGE_DISK_GET_ITER_RESPONSE) self.mock_object(self.client, - 'send_request', + 'send_iter_request', mock.Mock(return_value=api_response)) - result = self.client.get_aggregate_disk_type( + result = self.client._get_aggregate_disk_types( fake_client.VOLUME_AGGREGATE_NAME) storage_disk_get_iter_args = { - 'max-records': 1, 'query': { 'storage-disk-info': { 'disk-raid-info': { @@ -1747,34 +1794,76 @@ class NetAppCmodeClientTestCase(test.TestCase): }, }, } - self.client.send_request.assert_called_once_with( + self.client.send_iter_request.assert_called_once_with( 'storage-disk-get-iter', storage_disk_get_iter_args, enable_tunneling=False) - self.assertEqual(fake_client.AGGR_DISK_TYPE, result) - @ddt.data(fake_client.NO_RECORDS_RESPONSE, fake_client.INVALID_RESPONSE) - def test_get_aggregate_disk_type_not_found(self, response): + expected = set(fake_client.AGGREGATE_DISK_TYPES) + self.assertEqual(expected, result) - api_response = netapp_api.NaElement(response) + def test__get_aggregate_disk_types_shared(self): + + api_response = netapp_api.NaElement( + fake_client.STORAGE_DISK_GET_ITER_RESPONSE) self.mock_object(self.client, - 'send_request', + 'send_iter_request', mock.Mock(return_value=api_response)) - result = self.client.get_aggregate_disk_type( + result = self.client._get_aggregate_disk_types( + fake_client.VOLUME_AGGREGATE_NAME, shared=True) + + storage_disk_get_iter_args = { + 'query': { + 'storage-disk-info': { + 'disk-raid-info': { + 'disk-shared-info': { + 'aggregate-list': { + 'shared-aggregate-info': { + 'aggregate-name': + fake_client.VOLUME_AGGREGATE_NAME, + }, + }, + }, + }, + }, + }, + 'desired-attributes': { + 'storage-disk-info': { + 'disk-raid-info': { + 'effective-disk-type': None, + }, + }, + }, + } + self.client.send_iter_request.assert_called_once_with( + 'storage-disk-get-iter', storage_disk_get_iter_args, + enable_tunneling=False) + + expected = set(fake_client.AGGREGATE_DISK_TYPES) + self.assertEqual(expected, result) + + def test__get_aggregate_disk_types_not_found(self): + + api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE) + self.mock_object(self.client, + 'send_iter_request', + mock.Mock(return_value=api_response)) + + result = self.client._get_aggregate_disk_types( fake_client.VOLUME_AGGREGATE_NAME) - self.assertEqual('unknown', result) + self.assertEqual(set(), result) - def test_get_aggregate_disk_type_api_error(self): + def test__get_aggregate_disk_types_api_error(self): self.mock_object(self.client, - 'send_request', + 'send_iter_request', mock.Mock(side_effect=self._mock_api_error())) - result = self.client.get_aggregate_disk_type( + result = self.client._get_aggregate_disk_types( fake_client.VOLUME_AGGREGATE_NAME) - self.assertEqual('unknown', result) + self.assertEqual(set([]), result) def test_get_aggregate_capacities(self): diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/performance/test_perf_cmode.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/performance/test_perf_cmode.py index 7a80579a87a..da52f356a22 100644 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/performance/test_perf_cmode.py +++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/performance/test_perf_cmode.py @@ -45,13 +45,13 @@ class PerformanceCmodeLibraryTestCase(test.TestCase): self.fake_volumes = { 'pool1': { - 'aggregate': 'aggr1', + 'netapp_aggregate': 'aggr1', }, 'pool2': { - 'aggregate': 'aggr2', + 'netapp_aggregate': 'aggr2', }, 'pool3': { - 'aggregate': 'aggr2', + 'netapp_aggregate': 'aggr2', }, } diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_cmode.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_cmode.py index d13106d4410..335ae3df9f7 100644 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_cmode.py +++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_cmode.py @@ -302,7 +302,7 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase): 'netapp_compression': 'false', 'netapp_mirrored': 'false', 'netapp_dedup': 'true', - 'aggregate': 'aggr1', + 'netapp_aggregate': 'aggr1', 'netapp_raid_type': 'raid_dp', 'netapp_disk_type': 'SSD', }, @@ -349,7 +349,7 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase): 'total_capacity_gb': 10.0, 'free_capacity_gb': 2.0, 'provisioned_capacity_gb': 8.0, - 'aggregate_used_percent': 45, + 'netapp_aggregate_used_percent': 45, 'utilization': 30.0, 'filter_function': 'filter', 'goodness_function': 'goodness', @@ -359,7 +359,7 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase): 'netapp_compression': 'false', 'netapp_mirrored': 'false', 'netapp_dedup': 'true', - 'aggregate': 'aggr1', + 'netapp_aggregate': 'aggr1', 'netapp_raid_type': 'raid_dp', 'netapp_disk_type': 'SSD', }] diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_cmode.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_cmode.py index 293ddffd99f..1a0a0469a9e 100644 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_cmode.py +++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_cmode.py @@ -93,7 +93,7 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase): 'netapp_compression': 'false', 'netapp_mirrored': 'false', 'netapp_dedup': 'true', - 'aggregate': 'aggr1', + 'netapp_aggregate': 'aggr1', 'netapp_raid_type': 'raid_dp', 'netapp_disk_type': 'SSD', }, @@ -146,7 +146,7 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase): 'total_capacity_gb': total_capacity_gb, 'free_capacity_gb': free_capacity_gb, 'provisioned_capacity_gb': provisioned_capacity_gb, - 'aggregate_used_percent': 45, + 'netapp_aggregate_used_percent': 45, 'utilization': 30.0, 'filter_function': 'filter', 'goodness_function': 'goodness', @@ -156,7 +156,7 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase): 'netapp_compression': 'false', 'netapp_mirrored': 'false', 'netapp_dedup': 'true', - 'aggregate': 'aggr1', + 'netapp_aggregate': 'aggr1', 'netapp_raid_type': 'raid_dp', 'netapp_disk_type': 'SSD', }] diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/fakes.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/fakes.py index fad7bf7bf9a..5d2abceaafe 100644 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/fakes.py +++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/fakes.py @@ -30,24 +30,26 @@ SSC = { 'thick_provisioning_support': True, 'thin_provisioning_support': False, 'netapp_thin_provisioned': 'false', - 'aggregate': 'aggr1', + 'netapp_aggregate': 'aggr1', 'netapp_compression': 'false', 'netapp_dedup': 'true', 'netapp_mirrored': 'false', 'netapp_raid_type': 'raid_dp', 'netapp_disk_type': 'SSD', + 'netapp_hybrid_aggregate': False, 'pool_name': 'volume1', }, 'volume2': { 'thick_provisioning_support': False, 'thin_provisioning_support': True, 'netapp_thin_provisioned': 'true', - 'aggregate': 'aggr2', + 'netapp_aggregate': 'aggr2', 'netapp_compression': 'true', 'netapp_dedup': 'true', 'netapp_mirrored': 'true', 'netapp_raid_type': 'raid_dp', 'netapp_disk_type': 'FCAL', + 'netapp_hybrid_aggregate': True, 'pool_name': 'volume2', }, } @@ -57,13 +59,13 @@ SSC_FLEXVOL_INFO = { 'thick_provisioning_support': True, 'thin_provisioning_support': False, 'netapp_thin_provisioned': 'false', - 'aggregate': 'aggr1', + 'netapp_aggregate': 'aggr1', }, 'volume2': { 'thick_provisioning_support': False, 'thin_provisioning_support': True, 'netapp_thin_provisioned': 'true', - 'aggregate': 'aggr2', + 'netapp_aggregate': 'aggr2', }, } @@ -91,9 +93,11 @@ SSC_AGGREGATE_INFO = { 'volume1': { 'netapp_disk_type': 'SSD', 'netapp_raid_type': 'raid_dp', + 'netapp_hybrid_aggregate': False, }, 'volume2': { 'netapp_disk_type': 'FCAL', 'netapp_raid_type': 'raid_dp', + 'netapp_hybrid_aggregate': True, }, } diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_capabilities.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_capabilities.py index c59f4063d85..fbb82a2ac9b 100644 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_capabilities.py +++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_capabilities.py @@ -171,7 +171,7 @@ class CapabilitiesLibraryTestCase(test.TestCase): 'netapp_thin_provisioned': 'true', 'thick_provisioning_support': False, 'thin_provisioning_support': True, - 'aggregate': 'fake_aggr1', + 'netapp_aggregate': 'fake_aggr1', } self.assertEqual(expected, result) self.zapi_client.get_flexvol.assert_called_once_with( @@ -198,7 +198,7 @@ class CapabilitiesLibraryTestCase(test.TestCase): 'netapp_thin_provisioned': 'false', 'thick_provisioning_support': lun_space_guarantee, 'thin_provisioning_support': not lun_space_guarantee, - 'aggregate': 'fake_aggr1', + 'netapp_aggregate': 'fake_aggr1', } self.assertEqual(expected, result) self.zapi_client.get_flexvol.assert_called_once_with( @@ -223,7 +223,7 @@ class CapabilitiesLibraryTestCase(test.TestCase): 'netapp_thin_provisioned': 'true', 'thick_provisioning_support': False, 'thin_provisioning_support': True, - 'aggregate': 'fake_aggr1', + 'netapp_aggregate': 'fake_aggr1', } self.assertEqual(expected, result) self.zapi_client.get_flexvol.assert_called_once_with( @@ -251,7 +251,7 @@ class CapabilitiesLibraryTestCase(test.TestCase): 'netapp_thin_provisioned': 'false', 'thick_provisioning_support': not nfs_sparsed_volumes, 'thin_provisioning_support': nfs_sparsed_volumes, - 'aggregate': 'fake_aggr1', + 'netapp_aggregate': 'fake_aggr1', } self.assertEqual(expected, result) self.zapi_client.get_flexvol.assert_called_once_with( @@ -291,25 +291,45 @@ class CapabilitiesLibraryTestCase(test.TestCase): def test_get_ssc_aggregate_info(self): - self.mock_object( - self.ssc_library.zapi_client, 'get_aggregate_disk_type', - mock.Mock(return_value=fake_client.AGGR_DISK_TYPE)) self.mock_object( self.ssc_library.zapi_client, 'get_aggregate', mock.Mock(return_value=fake_client.AGGR_INFO_SSC)) + self.mock_object( + self.ssc_library.zapi_client, 'get_aggregate_disk_types', + mock.Mock(return_value=fake_client.AGGREGATE_DISK_TYPES)) result = self.ssc_library._get_ssc_aggregate_info( fake_client.VOLUME_AGGREGATE_NAME) expected = { - 'netapp_disk_type': fake_client.AGGR_DISK_TYPE, - 'netapp_raid_type': fake_client.AGGR_RAID_TYPE, + 'netapp_disk_type': fake_client.AGGREGATE_DISK_TYPES, + 'netapp_raid_type': fake_client.AGGREGATE_RAID_TYPE, + 'netapp_hybrid_aggregate': 'true', } self.assertEqual(expected, result) - self.zapi_client.get_aggregate_disk_type.assert_called_once_with( - fake_client.VOLUME_AGGREGATE_NAME) self.zapi_client.get_aggregate.assert_called_once_with( fake_client.VOLUME_AGGREGATE_NAME) + self.zapi_client.get_aggregate_disk_types.assert_called_once_with( + fake_client.VOLUME_AGGREGATE_NAME) + + def test_get_ssc_aggregate_info_not_found(self): + + self.mock_object( + self.ssc_library.zapi_client, 'get_aggregate', + mock.Mock(return_value={})) + self.mock_object( + self.ssc_library.zapi_client, 'get_aggregate_disk_types', + mock.Mock(return_value=None)) + + result = self.ssc_library._get_ssc_aggregate_info( + fake_client.VOLUME_AGGREGATE_NAME) + + expected = { + 'netapp_disk_type': None, + 'netapp_raid_type': None, + 'netapp_hybrid_aggregate': None, + } + self.assertEqual(expected, result) def test_get_matching_flexvols_for_extra_specs(self): diff --git a/cinder/volume/drivers/netapp/dataontap/block_cmode.py b/cinder/volume/drivers/netapp/dataontap/block_cmode.py index a770dc9912c..3be46c59f7b 100644 --- a/cinder/volume/drivers/netapp/dataontap/block_cmode.py +++ b/cinder/volume/drivers/netapp/dataontap/block_cmode.py @@ -251,9 +251,9 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary): pool['provisioned_capacity_gb'] = round( pool['total_capacity_gb'] - pool['free_capacity_gb'], 2) - aggregate_name = ssc_vol_info.get('aggregate') + aggregate_name = ssc_vol_info.get('netapp_aggregate') aggr_capacity = aggr_capacities.get(aggregate_name, {}) - pool['aggregate_used_percent'] = aggr_capacity.get( + pool['netapp_aggregate_used_percent'] = aggr_capacity.get( 'percent-used', 0) # Add utilization data diff --git a/cinder/volume/drivers/netapp/dataontap/client/client_cmode.py b/cinder/volume/drivers/netapp/dataontap/client/client_cmode.py index 10f157028bc..d6f34c3368e 100644 --- a/cinder/volume/drivers/netapp/dataontap/client/client_cmode.py +++ b/cinder/volume/drivers/netapp/dataontap/client/client_cmode.py @@ -67,6 +67,8 @@ class Client(client_base.Client): self.features.add_feature('FAST_CLONE_DELETE', supported=ontapi_1_30) self.features.add_feature('SYSTEM_CONSTITUENT_METRICS', supported=ontapi_1_30) + self.features.add_feature('ADVANCED_DISK_PARTITIONING', + supported=ontapi_1_30) self.features.add_feature('BACKUP_CLONE_PARAM', supported=ontapi_1_100) def _invoke_vserver_api(self, na_element, vserver): @@ -1093,6 +1095,7 @@ class Client(client_base.Client): 'aggregate-name': None, 'aggr-raid-attributes': { 'raid-type': None, + 'is-hybrid': None, }, }, } @@ -1115,25 +1118,50 @@ class Client(client_base.Client): aggregate = { 'name': aggr_attributes.get_child_content('aggregate-name'), 'raid-type': aggr_raid_attrs.get_child_content('raid-type'), + 'is-hybrid': strutils.bool_from_string( + aggr_raid_attrs.get_child_content('is-hybrid')), } return aggregate - def get_aggregate_disk_type(self, aggregate_name): - """Get the disk type of an aggregate.""" + def get_aggregate_disk_types(self, aggregate_name): + """Get the disk type(s) of an aggregate.""" - # Note(cknight): Only get 1 disk, since apart from hybrid - # aggregates all disks must be the same type. - api_args = { - 'max-records': 1, - 'query': { - 'storage-disk-info': { - 'disk-raid-info': { - 'disk-aggregate-info': { + disk_types = set() + disk_types.update(self._get_aggregate_disk_types(aggregate_name)) + if self.features.ADVANCED_DISK_PARTITIONING: + disk_types.update(self._get_aggregate_disk_types(aggregate_name, + shared=True)) + + return list(disk_types) if disk_types else None + + def _get_aggregate_disk_types(self, aggregate_name, shared=False): + """Get the disk type(s) of an aggregate (may be a list).""" + + disk_types = set() + + if shared: + disk_raid_info = { + 'disk-shared-info': { + 'aggregate-list': { + 'shared-aggregate-info': { 'aggregate-name': aggregate_name, }, }, }, + } + else: + disk_raid_info = { + 'disk-aggregate-info': { + 'aggregate-name': aggregate_name, + }, + } + + api_args = { + 'query': { + 'storage-disk-info': { + 'disk-raid-info': disk_raid_info, + }, }, 'desired-attributes': { 'storage-disk-info': { @@ -1143,29 +1171,28 @@ class Client(client_base.Client): }, }, } + try: - result = self.send_request('storage-disk-get-iter', api_args, - enable_tunneling=False) + result = self.send_iter_request( + 'storage-disk-get-iter', api_args, enable_tunneling=False) except netapp_api.NaApiError: msg = _LE('Failed to get disk info for aggregate %s.') LOG.exception(msg, aggregate_name) - return 'unknown' - - if self._get_record_count(result) != 1: - return 'unknown' + return disk_types attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') for storage_disk_info in attributes_list.get_children(): - disk_raid_info = storage_disk_info.get_child_by_name( - 'disk-raid-info') or netapp_api.NaElement('none') - disk_type = disk_raid_info.get_child_content( - 'effective-disk-type') or 'unknown' - return disk_type + disk_raid_info = storage_disk_info.get_child_by_name( + 'disk-raid-info') or netapp_api.NaElement('none') + disk_type = disk_raid_info.get_child_content( + 'effective-disk-type') + if disk_type: + disk_types.add(disk_type) - return 'unknown' + return disk_types def get_aggregate_capacities(self, aggregate_names): """Gets capacity info for multiple aggregates.""" diff --git a/cinder/volume/drivers/netapp/dataontap/nfs_cmode.py b/cinder/volume/drivers/netapp/dataontap/nfs_cmode.py index b4ceeb4b98f..001eee42d36 100644 --- a/cinder/volume/drivers/netapp/dataontap/nfs_cmode.py +++ b/cinder/volume/drivers/netapp/dataontap/nfs_cmode.py @@ -208,9 +208,9 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver): capacity = self._get_share_capacity_info(nfs_share) pool.update(capacity) - aggregate_name = ssc_vol_info.get('aggregate') + aggregate_name = ssc_vol_info.get('netapp_aggregate') aggr_capacity = aggr_capacities.get(aggregate_name, {}) - pool['aggregate_used_percent'] = aggr_capacity.get( + pool['netapp_aggregate_used_percent'] = aggr_capacity.get( 'percent-used', 0) # Add utilization data diff --git a/cinder/volume/drivers/netapp/dataontap/performance/perf_cmode.py b/cinder/volume/drivers/netapp/dataontap/performance/perf_cmode.py index 16a276c07b6..e85c83d1169 100644 --- a/cinder/volume/drivers/netapp/dataontap/performance/perf_cmode.py +++ b/cinder/volume/drivers/netapp/dataontap/performance/perf_cmode.py @@ -97,7 +97,7 @@ class PerformanceCmodeLibrary(perf_base.PerformanceLibrary): # Update pool utilization map atomically pool_utilization = {} for pool_name, pool_info in ssc_pools.items(): - aggr_name = pool_info.get('aggregate', 'unknown') + aggr_name = pool_info.get('netapp_aggregate', 'unknown') node_name = aggr_node_map.get(aggr_name) if node_name: pool_utilization[pool_name] = node_utilization.get( @@ -118,7 +118,7 @@ class PerformanceCmodeLibrary(perf_base.PerformanceLibrary): aggr_names = set() for pool_name, pool_info in ssc_pools.items(): - aggr_names.add(pool_info.get('aggregate')) + aggr_names.add(pool_info.get('netapp_aggregate')) return aggr_names def _get_nodes_for_aggregates(self, aggr_names): diff --git a/cinder/volume/drivers/netapp/dataontap/utils/capabilities.py b/cinder/volume/drivers/netapp/dataontap/utils/capabilities.py index b4e7941f5e7..a5fcb796250 100644 --- a/cinder/volume/drivers/netapp/dataontap/utils/capabilities.py +++ b/cinder/volume/drivers/netapp/dataontap/utils/capabilities.py @@ -98,8 +98,8 @@ class CapabilitiesLibrary(object): aggregates = set() for __, flexvol_info in self.ssc.items(): - if 'aggregate' in flexvol_info: - aggregates.add(flexvol_info['aggregate']) + if 'netapp_aggregate' in flexvol_info: + aggregates.add(flexvol_info['netapp_aggregate']) return list(aggregates) def update_ssc(self, flexvol_map): @@ -126,7 +126,7 @@ class CapabilitiesLibrary(object): ssc_volume.update(self._get_ssc_mirror_info(flexvol_name)) # Get aggregate info - aggregate_name = ssc_volume.get('aggregate') + aggregate_name = ssc_volume.get('netapp_aggregate') ssc_volume.update(self._get_ssc_aggregate_info(aggregate_name)) ssc[flexvol_name] = ssc_volume @@ -147,7 +147,7 @@ class CapabilitiesLibrary(object): 'netapp_thin_provisioned': six.text_type(not netapp_thick).lower(), 'thick_provisioning_support': thick, 'thin_provisioning_support': not thick, - 'aggregate': volume_info.get('aggregate'), + 'netapp_aggregate': volume_info.get('aggregate'), } def _get_thick_provisioning_support(self, netapp_thick): @@ -190,14 +190,15 @@ class CapabilitiesLibrary(object): def _get_ssc_aggregate_info(self, aggregate_name): """Gather aggregate info and recast into SSC-style volume stats.""" - disk_type = self.zapi_client.get_aggregate_disk_type(aggregate_name) - aggr_info = self.zapi_client.get_aggregate(aggregate_name) - - raid_type = aggr_info.get('raid-type') + aggregate = self.zapi_client.get_aggregate(aggregate_name) + hybrid = (six.text_type(aggregate.get('is-hybrid')).lower() + if 'is-hybrid' in aggregate else None) + disk_types = self.zapi_client.get_aggregate_disk_types(aggregate_name) return { - 'netapp_disk_type': disk_type, - 'netapp_raid_type': raid_type, + 'netapp_raid_type': aggregate.get('raid-type'), + 'netapp_hybrid_aggregate': hybrid, + 'netapp_disk_type': disk_types, } def get_matching_flexvols_for_extra_specs(self, extra_specs): diff --git a/releasenotes/notes/hybrid-aggregates-in-netapp-cdot-drivers-f6afa9884cac4e86.yaml b/releasenotes/notes/hybrid-aggregates-in-netapp-cdot-drivers-f6afa9884cac4e86.yaml new file mode 100644 index 00000000000..b3bfba0f238 --- /dev/null +++ b/releasenotes/notes/hybrid-aggregates-in-netapp-cdot-drivers-f6afa9884cac4e86.yaml @@ -0,0 +1,4 @@ +--- +features: + - Add support for hybrid aggregates to the NetApp cDOT drivers. +