NetApp: Fix to support SVM scoped permissions.

Improves NetApp cDOT block and file drivers suport for SVM scoped user
accounts. Features not supported for SVM scoped users include QoS,
aggregate usage reporting, and dedupe usage reporting.

Change-Id: I2b42622dbbb0f9f9f3eb9081cf67fb27e6c1a218
Closes-Bug: #1694579
(cherry picked from commit 887797541d)
This commit is contained in:
Chuck Fouts 2017-07-03 16:44:41 -04:00 committed by Eric Harney
parent 970b8cb94c
commit 0cc92ee4b5
10 changed files with 221 additions and 174 deletions

View File

@ -2246,36 +2246,6 @@ class NetAppCmodeClientTestCase(test.TestCase):
self.assertListEqual([], result)
def test_check_for_cluster_credentials(self):
self.mock_object(self.client,
'list_cluster_nodes',
mock.Mock(return_value=fake_client.NODE_NAMES))
result = self.client.check_for_cluster_credentials()
self.assertTrue(result)
def test_check_for_cluster_credentials_not_found(self):
api_error = netapp_api.NaApiError(code=netapp_api.EAPINOTFOUND)
self.mock_object(self.client,
'list_cluster_nodes',
side_effect=api_error)
result = self.client.check_for_cluster_credentials()
self.assertFalse(result)
def test_check_for_cluster_credentials_api_error(self):
self.mock_object(self.client,
'list_cluster_nodes',
self._mock_api_error())
self.assertRaises(netapp_api.NaApiError,
self.client.check_for_cluster_credentials)
@ddt.data({'types': {'FCAL'}, 'expected': ['FCAL']},
{'types': {'SATA', 'SSD'}, 'expected': ['SATA', 'SSD']},)
@ddt.unpack

View File

@ -32,8 +32,8 @@ from cinder.volume.drivers.netapp.dataontap import block_base
from cinder.volume.drivers.netapp.dataontap import block_cmode
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp.dataontap.client import client_base
from cinder.volume.drivers.netapp.dataontap.client import client_cmode
from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode
from cinder.volume.drivers.netapp.dataontap.utils import capabilities
from cinder.volume.drivers.netapp.dataontap.utils import data_motion
from cinder.volume.drivers.netapp.dataontap.utils import loopingcalls
from cinder.volume.drivers.netapp.dataontap.utils import utils as dot_utils
@ -82,16 +82,17 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
config.netapp_vserver = 'openstack'
return config
@mock.patch.object(client_cmode.Client, 'check_for_cluster_credentials',
mock.MagicMock(return_value=False))
@mock.patch.object(perf_cmode, 'PerformanceCmodeLibrary', mock.Mock())
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.MagicMock(return_value=(1, 20)))
@mock.patch.object(capabilities.CapabilitiesLibrary,
'cluster_user_supported')
@mock.patch.object(capabilities.CapabilitiesLibrary,
'check_api_permissions')
@mock.patch.object(na_utils, 'check_flags')
@mock.patch.object(block_base.NetAppBlockStorageLibrary, 'do_setup')
def test_do_setup(self, super_do_setup, mock_check_flags):
self.zapi_client.check_for_cluster_credentials = mock.MagicMock(
return_value=True)
def test_do_setup(self, super_do_setup, mock_check_flags,
mock_check_api_permissions, mock_cluster_user_supported):
self.mock_object(client_base.Client, '_init_ssh_client')
self.mock_object(
dot_utils, 'get_backend_configuration',
@ -102,14 +103,12 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
super_do_setup.assert_called_once_with(context)
self.assertEqual(1, mock_check_flags.call_count)
mock_check_api_permissions.assert_called_once_with()
mock_cluster_user_supported.assert_called_once_with()
def test_check_for_setup_error(self):
super_check_for_setup_error = self.mock_object(
block_base.NetAppBlockStorageLibrary, 'check_for_setup_error')
mock_check_api_permissions = self.mock_object(
self.library.ssc_library, 'check_api_permissions')
mock_add_looping_tasks = self.mock_object(
self.library, '_add_looping_tasks')
mock_get_pool_map = self.mock_object(
self.library, '_get_flexvol_to_pool_map',
return_value={'fake_map': None})
@ -119,7 +118,6 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
self.library.check_for_setup_error()
self.assertEqual(1, super_check_for_setup_error.call_count)
mock_check_api_permissions.assert_called_once_with()
self.assertEqual(1, mock_add_looping_tasks.call_count)
mock_get_pool_map.assert_called_once_with()
mock_add_looping_tasks.assert_called_once_with()
@ -127,8 +125,6 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
def test_check_for_setup_error_no_filtered_pools(self):
self.mock_object(block_base.NetAppBlockStorageLibrary,
'check_for_setup_error')
mock_check_api_permissions = self.mock_object(
self.library.ssc_library, 'check_api_permissions')
self.mock_object(self.library, '_add_looping_tasks')
self.mock_object(
self.library, '_get_flexvol_to_pool_map', return_value={})
@ -136,24 +132,32 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
self.assertRaises(exception.NetAppDriverException,
self.library.check_for_setup_error)
mock_check_api_permissions.assert_called_once_with()
@ddt.data({'replication_enabled': True, 'failed_over': False},
{'replication_enabled': True, 'failed_over': True},
{'replication_enabled': False, 'failed_over': False})
@ddt.data({'replication_enabled': True, 'failed_over': False,
'cluster_credentials': True},
{'replication_enabled': True, 'failed_over': True,
'cluster_credentials': True},
{'replication_enabled': False, 'failed_over': False,
'cluster_credentials': False})
@ddt.unpack
def test_handle_housekeeping_tasks(self, replication_enabled, failed_over):
def test_handle_housekeeping_tasks(
self, replication_enabled, failed_over, cluster_credentials):
self.library.using_cluster_credentials = cluster_credentials
ensure_mirrors = self.mock_object(data_motion.DataMotionMixin,
'ensure_snapmirrors')
self.mock_object(self.library.ssc_library, 'get_ssc_flexvol_names',
return_value=fake_utils.SSC.keys())
mock_remove_unused_qos_policy_groups = self.mock_object(
self.zapi_client, 'remove_unused_qos_policy_groups')
self.library.replication_enabled = replication_enabled
self.library.failed_over = failed_over
self.library._handle_housekeeping_tasks()
(self.zapi_client.remove_unused_qos_policy_groups.
assert_called_once_with())
if self.library.using_cluster_credentials:
mock_remove_unused_qos_policy_groups.assert_called_once_with()
else:
mock_remove_unused_qos_policy_groups.assert_not_called()
if replication_enabled and not failed_over:
ensure_mirrors.assert_called_once_with(
self.library.configuration, self.library.backend_name,
@ -162,7 +166,6 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
self.assertFalse(ensure_mirrors.called)
def test_handle_ems_logging(self):
volume_list = ['vol0', 'vol1', 'vol2']
self.mock_object(
self.library.ssc_library, 'get_ssc_flexvol_names',
@ -346,9 +349,12 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
self.assertEqual(target_details_list[2], result)
@ddt.data([], ['target_1', 'target_2'])
def test_get_pool_stats(self, replication_backends):
@ddt.data({'replication_backends': [], 'cluster_credentials': False},
{'replication_backends': ['target_1', 'target_2'],
'cluster_credentials': True})
@ddt.unpack
def test_get_pool_stats(self, replication_backends, cluster_credentials):
self.library.using_cluster_credentials = cluster_credentials
ssc = {
'vola': {
'pool_name': 'vola',
@ -372,7 +378,6 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
self.mock_object(self.library, 'get_replication_backend_names',
return_value=replication_backends)
self.library.using_cluster_credentials = True
self.library.reserved_percentage = 5
self.library.max_over_subscription_ratio = 10
self.library.perf_library.get_node_utilization_for_pool = (
@ -428,6 +433,14 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
'netapp_disk_type': 'SSD',
'replication_enabled': False,
}]
expected[0].update({'QoS_support': cluster_credentials})
if not cluster_credentials:
expected[0].update({
'netapp_aggregate_used_percent': 0,
'netapp_dedupe_used_percent': 0
})
if replication_backends:
expected[0].update({
'replication_enabled': True,
@ -438,8 +451,9 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
self.assertEqual(expected, result)
mock_get_ssc.assert_called_once_with()
mock_get_aggrs.assert_called_once_with()
mock_get_aggr_capacities.assert_called_once_with(['aggr1'])
if cluster_credentials:
mock_get_aggrs.assert_called_once_with()
mock_get_aggr_capacities.assert_called_once_with(['aggr1'])
@ddt.data({}, None)
def test_get_pool_stats_no_ssc_vols(self, ssc):

View File

@ -38,6 +38,7 @@ from cinder.volume.drivers.netapp.dataontap.client import client_cmode
from cinder.volume.drivers.netapp.dataontap import nfs_base
from cinder.volume.drivers.netapp.dataontap import nfs_cmode
from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode
from cinder.volume.drivers.netapp.dataontap.utils import capabilities
from cinder.volume.drivers.netapp.dataontap.utils import data_motion
from cinder.volume.drivers.netapp.dataontap.utils import loopingcalls
from cinder.volume.drivers.netapp.dataontap.utils import utils as dot_utils
@ -68,6 +69,7 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
self.driver.perf_library = mock.Mock()
self.driver.ssc_library = mock.Mock()
self.driver.zapi_client = mock.Mock()
self.driver.using_cluster_credentials = True
def get_config_cmode(self):
config = na_fakes.create_configuration_cmode()
@ -111,16 +113,24 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
@mock.patch.object(perf_cmode, 'PerformanceCmodeLibrary', mock.Mock())
@mock.patch.object(client_cmode, 'Client', mock.Mock())
@mock.patch.object(capabilities.CapabilitiesLibrary,
'cluster_user_supported')
@mock.patch.object(capabilities.CapabilitiesLibrary,
'check_api_permissions')
@mock.patch.object(nfs.NfsDriver, 'do_setup')
@mock.patch.object(na_utils, 'check_flags')
def test_do_setup(self, mock_check_flags, mock_super_do_setup):
def test_do_setup(self, mock_check_flags, mock_super_do_setup,
mock_check_api_permissions, mock_cluster_user_supported):
self.mock_object(
dot_utils, 'get_backend_configuration',
return_value=self.get_config_cmode())
self.driver.do_setup(mock.Mock())
self.assertTrue(mock_check_flags.called)
self.assertTrue(mock_super_do_setup.called)
mock_check_api_permissions.assert_called_once_with()
mock_cluster_user_supported.assert_called_once_with()
def test__update_volume_stats(self):
mock_debug_log = self.mock_object(nfs_cmode.LOG, 'debug')
@ -146,9 +156,12 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
self.assertEqual(1, mock_debug_log.call_count)
self.assertEqual(expected_stats, self.driver._stats)
@ddt.data([], ['target_1', 'target_2'])
def test_get_pool_stats(self, replication_backends):
@ddt.data({'replication_backends': [], 'cluster_credentials': False},
{'replication_backends': ['target_1', 'target_2'],
'cluster_credentials': True})
@ddt.unpack
def test_get_pool_stats(self, replication_backends, cluster_credentials):
self.driver.using_cluster_credentials = cluster_credentials
self.driver.zapi_client = mock.Mock()
ssc = {
'vola': {
@ -211,7 +224,6 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
expected = [{
'pool_name': '10.10.10.10:/vola',
'QoS_support': True,
'reserved_percentage': fake.RESERVED_PERCENTAGE,
'max_over_subscription_ratio': fake.MAX_OVER_SUBSCRIPTION_RATIO,
'multiattach': False,
@ -235,6 +247,14 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
'consistent_group_snapshot_enabled': True,
'replication_enabled': False,
}]
expected[0].update({'QoS_support': cluster_credentials})
if not cluster_credentials:
expected[0].update({
'netapp_aggregate_used_percent': 0,
'netapp_dedupe_used_percent': 0
})
if replication_backends:
expected[0].update({
'replication_enabled': True,
@ -245,8 +265,9 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
self.assertEqual(expected, result)
mock_get_ssc.assert_called_once_with()
mock_get_aggrs.assert_called_once_with()
mock_get_aggr_capacities.assert_called_once_with(['aggr1'])
if cluster_credentials:
mock_get_aggrs.assert_called_once_with()
mock_get_aggr_capacities.assert_called_once_with(['aggr1'])
@ddt.data({}, None)
def test_get_pool_stats_no_ssc_vols(self, ssc):
@ -398,34 +419,41 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
def test_check_for_setup_error(self):
super_check_for_setup_error = self.mock_object(
nfs_base.NetAppNfsDriver, 'check_for_setup_error')
mock_check_api_permissions = self.mock_object(
self.driver.ssc_library, 'check_api_permissions')
mock_add_looping_tasks = self.mock_object(
self.driver, '_add_looping_tasks')
self.driver.check_for_setup_error()
self.assertEqual(1, super_check_for_setup_error.call_count)
mock_check_api_permissions.assert_called_once_with()
self.assertEqual(1, mock_add_looping_tasks.call_count)
mock_add_looping_tasks.assert_called_once_with()
@ddt.data({'replication_enabled': True, 'failed_over': False},
{'replication_enabled': True, 'failed_over': True},
{'replication_enabled': False, 'failed_over': False})
@ddt.data({'replication_enabled': True, 'failed_over': False,
'cluster_credentials': True},
{'replication_enabled': True, 'failed_over': True,
'cluster_credentials': True},
{'replication_enabled': False, 'failed_over': False,
'cluster_credentials': False})
@ddt.unpack
def test_handle_housekeeping_tasks(self, replication_enabled, failed_over):
def test_handle_housekeeping_tasks(
self, replication_enabled, failed_over, cluster_credentials):
self.driver.using_cluster_credentials = cluster_credentials
ensure_mirrors = self.mock_object(data_motion.DataMotionMixin,
'ensure_snapmirrors')
self.mock_object(self.driver.ssc_library, 'get_ssc_flexvol_names',
return_value=fake_ssc.SSC.keys())
mock_remove_unused_qos_policy_groups = self.mock_object(
self.driver.zapi_client, 'remove_unused_qos_policy_groups')
self.driver.replication_enabled = replication_enabled
self.driver.failed_over = failed_over
self.driver._handle_housekeeping_tasks()
(self.driver.zapi_client.remove_unused_qos_policy_groups.
assert_called_once_with())
if self.driver.using_cluster_credentials:
mock_remove_unused_qos_policy_groups.assert_called_once_with()
else:
mock_remove_unused_qos_policy_groups.assert_not_called()
if replication_enabled and not failed_over:
ensure_mirrors.assert_called_once_with(
self.driver.configuration, self.driver.backend_name,

View File

@ -272,8 +272,9 @@ class CapabilitiesLibraryTestCase(test.TestCase):
self.zapi_client.get_flexvol.assert_called_once_with(
flexvol_name=fake_client.VOLUME_NAMES[0])
def test_get_ssc_dedupe_info(self):
@ddt.data([], ['netapp_dedup'], ['netapp_compression'])
def test_get_ssc_dedupe_info(self, invalid_extra_specs):
self.ssc_library.invalid_extra_specs = invalid_extra_specs
self.mock_object(
self.ssc_library.zapi_client, 'get_flexvol_dedupe_info',
return_value=fake_client.VOLUME_DEDUPE_INFO_SSC)
@ -281,13 +282,20 @@ class CapabilitiesLibraryTestCase(test.TestCase):
result = self.ssc_library._get_ssc_dedupe_info(
fake_client.VOLUME_NAMES[0])
expected = {
'netapp_dedup': 'true',
'netapp_compression': 'false',
}
if invalid_extra_specs:
expected = {
'netapp_dedup': 'false',
'netapp_compression': 'false',
}
self.zapi_client.get_flexvol_dedupe_info.assert_not_called()
else:
expected = {
'netapp_dedup': 'true',
'netapp_compression': 'false',
}
self.zapi_client.get_flexvol_dedupe_info.assert_called_once_with(
fake_client.VOLUME_NAMES[0])
self.assertEqual(expected, result)
self.zapi_client.get_flexvol_dedupe_info.assert_called_once_with(
fake_client.VOLUME_NAMES[0])
def test_get_ssc_encryption_info(self):
@ -320,8 +328,9 @@ class CapabilitiesLibraryTestCase(test.TestCase):
self.zapi_client.is_flexvol_mirrored.assert_called_once_with(
fake_client.VOLUME_NAMES[0], fake.SSC_VSERVER)
def test_get_ssc_aggregate_info(self):
@ddt.data([], ['netapp_raid_type'])
def test_get_ssc_aggregate_info(self, invalid_extra_specs):
self.ssc_library.invalid_extra_specs = invalid_extra_specs
self.mock_object(
self.ssc_library.zapi_client, 'get_aggregate',
return_value=fake_client.AGGR_INFO_SSC)
@ -332,19 +341,29 @@ class CapabilitiesLibraryTestCase(test.TestCase):
result = self.ssc_library._get_ssc_aggregate_info(
fake_client.VOLUME_AGGREGATE_NAME)
expected = {
'netapp_disk_type': fake_client.AGGREGATE_DISK_TYPES,
'netapp_raid_type': fake_client.AGGREGATE_RAID_TYPE,
'netapp_hybrid_aggregate': 'true',
}
if invalid_extra_specs:
expected = {
'netapp_disk_type': None,
'netapp_raid_type': None,
'netapp_hybrid_aggregate': None,
}
self.zapi_client.get_aggregate.assert_not_called()
self.zapi_client.get_aggregate_disk_types.assert_not_called()
else:
expected = {
'netapp_disk_type': fake_client.AGGREGATE_DISK_TYPES,
'netapp_raid_type': fake_client.AGGREGATE_RAID_TYPE,
'netapp_hybrid_aggregate': 'true',
}
self.zapi_client.get_aggregate.assert_called_once_with(
fake_client.VOLUME_AGGREGATE_NAME)
self.zapi_client.get_aggregate_disk_types.assert_called_once_with(
fake_client.VOLUME_AGGREGATE_NAME)
self.assertEqual(expected, result)
self.zapi_client.get_aggregate.assert_called_once_with(
fake_client.VOLUME_AGGREGATE_NAME)
self.zapi_client.get_aggregate_disk_types.assert_called_once_with(
fake_client.VOLUME_AGGREGATE_NAME)
def test_get_ssc_aggregate_info_not_found(self):
self.ssc_library.invalid_extra_specs = ['netapp_raid_type']
self.mock_object(
self.ssc_library.zapi_client, 'get_aggregate', return_value={})
self.mock_object(
@ -478,3 +497,12 @@ class CapabilitiesLibraryTestCase(test.TestCase):
'netapp_compression': 'true',
}
self.assertEqual(expected, result)
@ddt.data([], ['netapp_dedup'], ['netapp_compression'])
def test_cluster_user_supported(self, invalid_extra_specs):
self.ssc_library.invalid_extra_specs = invalid_extra_specs
if invalid_extra_specs:
self.assertFalse(self.ssc_library.cluster_user_supported())
else:
self.assertTrue(self.ssc_library.cluster_user_supported())

View File

@ -73,18 +73,21 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary,
self.zapi_client = dot_utils.get_client_for_backend(
self.failed_over_backend_name or self.backend_name)
self.vserver = self.zapi_client.vserver
self.using_cluster_credentials = \
self.zapi_client.check_for_cluster_credentials()
# Performance monitoring library
self.perf_library = perf_cmode.PerformanceCmodeLibrary(
self.zapi_client)
# Storage service catalog
self.ssc_library = capabilities.CapabilitiesLibrary(
self.driver_protocol, self.vserver, self.zapi_client,
self.configuration)
self.ssc_library.check_api_permissions()
self.using_cluster_credentials = (
self.ssc_library.cluster_user_supported())
# Performance monitoring library
self.perf_library = perf_cmode.PerformanceCmodeLibrary(
self.zapi_client)
def _update_zapi_client(self, backend_name):
"""Set cDOT API client for the specified config backend stanza name."""
@ -99,8 +102,6 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary,
def check_for_setup_error(self):
"""Check that the driver is working and can communicate."""
self.ssc_library.check_api_permissions()
if not self._get_flexvol_to_pool_map():
msg = _('No pools are available for provisioning volumes. '
'Ensure that the configuration option '
@ -136,12 +137,12 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary,
def _handle_housekeeping_tasks(self):
"""Handle various cleanup activities."""
# Harvest soft-deleted QoS policy groups
self.zapi_client.remove_unused_qos_policy_groups()
active_backend = self.failed_over_backend_name or self.backend_name
# Add the task that harvests soft-deleted QoS policy groups.
if self.using_cluster_credentials:
self.zapi_client.remove_unused_qos_policy_groups()
LOG.debug("Current service state: Replication enabled: %("
"replication)s. Failed-Over: %(failed)s. Active Backend "
"ID: %(active)s",
@ -299,7 +300,7 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary,
pool.update(ssc_vol_info)
# Add driver capabilities and config info
pool['QoS_support'] = True
pool['QoS_support'] = self.using_cluster_credentials
pool['multiattach'] = False
pool['consistencygroup_support'] = True
pool['consistent_group_snapshot_enabled'] = True

View File

@ -21,7 +21,6 @@ import math
import re
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
import six
@ -836,21 +835,6 @@ class Client(client_base.Client):
return [node_info.get_child_content('node') for node_info
in nodes_info_list.get_children()]
def check_for_cluster_credentials(self):
"""Checks whether cluster-scoped credentials are being used or not."""
try:
self.list_cluster_nodes()
# API succeeded, so definitely a cluster management LIF
return True
except netapp_api.NaApiError as e:
if e.code == netapp_api.EAPINOTFOUND:
LOG.debug('Not connected to cluster management LIF.')
else:
with excutils.save_and_reraise_exception():
LOG.exception('Failed to get the list of nodes.')
return False
def get_operational_lif_addresses(self):
"""Gets the IP addresses of operational LIFs on the vserver."""
@ -1098,14 +1082,9 @@ class Client(client_base.Client):
try:
result = self.send_iter_request('sis-get-iter', api_args)
except netapp_api.NaApiError as e:
if e.code == netapp_api.EAPIPRIVILEGE:
LOG.debug('Dedup info for volume %(name)s will not be '
'collected. This API requires cluster-scoped '
'credentials.', {'name': flexvol_name})
else:
LOG.exception('Failed to get dedupe info for volume %s.',
flexvol_name)
except netapp_api.NaApiError:
LOG.exception('Failed to get dedupe info for volume %s.',
flexvol_name)
return no_dedupe_response
if self._get_record_count(result) != 1:
@ -1426,13 +1405,9 @@ class Client(client_base.Client):
try:
aggrs = self._get_aggregates(aggregate_names=[aggregate_name],
desired_attributes=desired_attributes)
except netapp_api.NaApiError as e:
if e.code == netapp_api.EAPINOTFOUND:
LOG.debug('Aggregate info can only be collected with '
'cluster-scoped credentials.')
else:
LOG.exception('Failed to get info for aggregate %s.',
aggregate_name)
except netapp_api.NaApiError:
LOG.exception('Failed to get info for aggregate %s.',
aggregate_name)
return {}
if len(aggrs) < 1:
@ -1502,13 +1477,9 @@ class Client(client_base.Client):
try:
result = self.send_iter_request(
'storage-disk-get-iter', api_args, enable_tunneling=False)
except netapp_api.NaApiError as e:
if e.code == netapp_api.EAPINOTFOUND:
LOG.debug('Disk types can only be collected with '
'cluster scoped credentials.')
else:
LOG.exception('Failed to get disk info for aggregate %s.',
aggregate_name)
except netapp_api.NaApiError:
LOG.exception('Failed to get disk info for aggregate %s.',
aggregate_name)
return disk_types
attributes_list = result.get_child_by_name(

View File

@ -77,14 +77,18 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
self.failed_over_backend_name or self.backend_name)
self.vserver = self.zapi_client.vserver
# Performance monitoring library
self.perf_library = perf_cmode.PerformanceCmodeLibrary(
self.zapi_client)
# Storage service catalog
self.ssc_library = capabilities.CapabilitiesLibrary(
'nfs', self.vserver, self.zapi_client, self.configuration)
self.ssc_library.check_api_permissions()
self.using_cluster_credentials = (
self.ssc_library.cluster_user_supported())
# Performance monitoring library
self.perf_library = perf_cmode.PerformanceCmodeLibrary(
self.zapi_client)
def _update_zapi_client(self, backend_name):
"""Set cDOT API client for the specified config backend stanza name."""
@ -98,7 +102,6 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
@utils.trace_method
def check_for_setup_error(self):
"""Check that the driver is working and can communicate."""
self.ssc_library.check_api_permissions()
self._add_looping_tasks()
super(NetAppCmodeNfsDriver, self).check_for_setup_error()
@ -143,12 +146,12 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
def _handle_housekeeping_tasks(self):
"""Handle various cleanup activities."""
# Harvest soft-deleted QoS policy groups
self.zapi_client.remove_unused_qos_policy_groups()
active_backend = self.failed_over_backend_name or self.backend_name
# Add the task that harvests soft-deleted QoS policy groups.
if self.using_cluster_credentials:
self.zapi_client.remove_unused_qos_policy_groups()
LOG.debug("Current service state: Replication enabled: %("
"replication)s. Failed-Over: %(failed)s. Active Backend "
"ID: %(active)s",
@ -252,12 +255,18 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
if not ssc:
return pools
# Get up-to-date node utilization metrics just once
self.perf_library.update_performance_cache(ssc)
# Utilization and performance metrics require cluster-scoped
# credentials
if self.using_cluster_credentials:
# Get up-to-date node utilization metrics just once
self.perf_library.update_performance_cache(ssc)
# Get up-to-date aggregate capacities just once
aggregates = self.ssc_library.get_ssc_aggregates()
aggr_capacities = self.zapi_client.get_aggregate_capacities(aggregates)
# Get up-to-date aggregate capacities just once
aggregates = self.ssc_library.get_ssc_aggregates()
aggr_capacities = self.zapi_client.get_aggregate_capacities(
aggregates)
else:
aggr_capacities = {}
for ssc_vol_name, ssc_vol_info in ssc.items():
@ -267,7 +276,7 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
pool.update(ssc_vol_info)
# Add driver capabilities and config info
pool['QoS_support'] = True
pool['QoS_support'] = self.using_cluster_credentials
pool['consistencygroup_support'] = True
pool['consistent_group_snapshot_enabled'] = True
pool['multiattach'] = False
@ -277,8 +286,11 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
capacity = self._get_share_capacity_info(nfs_share)
pool.update(capacity)
dedupe_used = self.zapi_client.get_flexvol_dedupe_used_percent(
ssc_vol_name)
if self.using_cluster_credentials:
dedupe_used = self.zapi_client.get_flexvol_dedupe_used_percent(
ssc_vol_name)
else:
dedupe_used = 0.0
pool['netapp_dedupe_used_percent'] = na_utils.round_down(
dedupe_used)

View File

@ -54,9 +54,9 @@ class PerformanceCmodeLibrary(perf_base.PerformanceLibrary):
self.avg_processor_busy_base_counter_name = 'cpu_elapsed_time'
else:
self.avg_processor_busy_base_counter_name = 'cpu_elapsed_time1'
LOG.exception('Could not get performance base counter '
'name. Performance-based scheduler '
'functions may not be available.')
LOG.warning('Could not get performance base counter '
'name. Performance-based scheduler '
'functions may not be available.')
def update_performance_cache(self, ssc_pools):
"""Called periodically to update per-pool node utilization metrics."""

View File

@ -61,6 +61,7 @@ class CapabilitiesLibrary(object):
self.configuration = configuration
self.backend_name = self.configuration.safe_get('volume_backend_name')
self.ssc = {}
self.invalid_extra_specs = []
def check_api_permissions(self):
"""Check which APIs that support SSC functionality are available."""
@ -86,6 +87,11 @@ class CapabilitiesLibrary(object):
'APIs. The following extra specs will fail '
'or be ignored: %s.', invalid_extra_specs)
self.invalid_extra_specs = invalid_extra_specs
def cluster_user_supported(self):
return not self.invalid_extra_specs
def get_ssc(self):
"""Get a copy of the Storage Service Catalog."""
@ -183,10 +189,15 @@ class CapabilitiesLibrary(object):
def _get_ssc_dedupe_info(self, flexvol_name):
"""Gather dedupe info and recast into SSC-style volume stats."""
dedupe_info = self.zapi_client.get_flexvol_dedupe_info(flexvol_name)
dedupe = dedupe_info.get('dedupe')
compression = dedupe_info.get('compression')
if ('netapp_dedup' in self.invalid_extra_specs or
'netapp_compression' in self.invalid_extra_specs):
dedupe = False
compression = False
else:
dedupe_info = self.zapi_client.get_flexvol_dedupe_info(
flexvol_name)
dedupe = dedupe_info.get('dedupe')
compression = dedupe_info.get('compression')
return {
'netapp_dedup': six.text_type(dedupe).lower(),
@ -211,13 +222,20 @@ class CapabilitiesLibrary(object):
def _get_ssc_aggregate_info(self, aggregate_name):
"""Gather aggregate info and recast into SSC-style volume stats."""
aggregate = self.zapi_client.get_aggregate(aggregate_name)
hybrid = (six.text_type(aggregate.get('is-hybrid')).lower()
if 'is-hybrid' in aggregate else None)
disk_types = self.zapi_client.get_aggregate_disk_types(aggregate_name)
if 'netapp_raid_type' in self.invalid_extra_specs:
raid_type = None
hybrid = None
disk_types = None
else:
aggregate = self.zapi_client.get_aggregate(aggregate_name)
raid_type = aggregate.get('raid-type')
hybrid = (six.text_type(aggregate.get('is-hybrid')).lower()
if 'is-hybrid' in aggregate else None)
disk_types = self.zapi_client.get_aggregate_disk_types(
aggregate_name)
return {
'netapp_raid_type': aggregate.get('raid-type'),
'netapp_raid_type': raid_type,
'netapp_hybrid_aggregate': hybrid,
'netapp_disk_type': disk_types,
}

View File

@ -0,0 +1,5 @@
---
fixes:
- NetApp cDOT block and file drivers have improved support for SVM scoped
user accounts. Features not supported for SVM scoped users include QoS,
aggregate usage reporting, and dedupe usage reporting.