VMAX Driver - VMAX OS Upgrade Bug

Workload support was dropped in ucode 5978. If a VMAX All Flash
array is upgraded to 5978 or greater and existing volume types
leveraged workload e.g. DSS, DSS_REP, OLTP and OLTP_REP,
attaching and detaching will no longer work and the volume type
will be unusable.

Change-Id: I0ad0c530c93a9494e1a3048e557360b38c4a125b
Closes-Bug: #1790141
This commit is contained in:
michael-mcaleer 2018-08-31 14:45:43 +01:00 committed by Helen Walsh
parent b56ebd44b2
commit 546faf0cd5
8 changed files with 292 additions and 55 deletions

View File

@ -1909,14 +1909,17 @@ class VMAXRestTest(test.TestCase):
self.data.array)
self.assertEqual(ref_settings, wl_settings)
def test_get_workload_settings_next_gen(self):
with mock.patch.object(self.rest, 'is_next_gen_array',
return_value=True):
wl_settings = self.rest.get_workload_settings(
self.data.array_herc)
self.assertEqual(['None'], wl_settings)
def test_get_workload_settings_failed(self):
wl_settings = self.rest.get_workload_settings(
self.data.failed_resource)
self.assertEqual([], wl_settings)
# New array
wl_settings = self.rest.get_workload_settings(
self.data.array_herc)
self.assertEqual([], wl_settings)
def test_is_compression_capable_true(self):
compr_capable = self.rest.is_compression_capable('000197800128')
@ -1952,6 +1955,28 @@ class VMAXRestTest(test.TestCase):
self.data.slo, self.data.workload, self.data.extra_specs)
self.assertEqual(self.data.storagegroup_name_f, sg_name)
def test_create_storage_group_next_gen(self):
with mock.patch.object(self.rest, 'is_next_gen_array',
return_value=True):
with mock.patch.object(self.rest, '_create_storagegroup',
return_value=(200, self.data.job_list[0])):
self.rest.create_storage_group(
self.data.array, self.data.storagegroup_name_f,
self.data.srp, self.data.slo, self.data.workload,
self.data.extra_specs)
payload = {"srpId": self.data.srp,
"storageGroupId": self.data.storagegroup_name_f,
"emulation": "FBA",
"sloBasedStorageGroupParam": [
{"num_of_vols": 0,
"sloId": self.data.slo,
"workloadSelection": 'NONE',
"volumeAttribute": {
"volume_size": "0",
"capacityUnit": "GB"}}]}
self.rest._create_storagegroup.assert_called_once_with(
self.data.array, payload)
def test_create_storage_group_failed(self):
self.assertRaises(
exception.VolumeBackendAPIException,
@ -2074,6 +2099,14 @@ class VMAXRestTest(test.TestCase):
self.assertEqual(ref_sg_name, storagegroup_name)
self.assertEqual(ref_storage_group, storagegroup)
def test_get_vmax_default_storage_group_next_gen(self):
with mock.patch.object(self.rest, 'is_next_gen_array',
return_value=True):
__, storagegroup_name = self.rest.get_vmax_default_storage_group(
self.data.array, self.data.srp,
self.data.slo, self.data.workload)
self.assertEqual('OS-SRP_1-Diamond-NONE-SG', storagegroup_name)
def test_delete_storage_group(self):
operation = 'delete storagegroup resource'
status_code = 204
@ -3551,6 +3584,14 @@ class VMAXProvisionTest(test.TestCase):
self.provision.get_slo_workload_settings_from_storage_group(
self.data.array, 'no_workload_sg'))
self.assertEqual(ref_settings2, sg_slo_settings2)
# NextGen Array
with mock.patch.object(self.rest, 'is_next_gen_array',
return_value=True):
ref_settings3 = "Diamond+NONE"
sg_slo_settings3 = (
self.provision.get_slo_workload_settings_from_storage_group(
self.data.array, self.data.defaultstoragegroup_name))
self.assertEqual(ref_settings3, sg_slo_settings3)
@mock.patch.object(rest.VMAXRest, 'wait_for_rdf_consistent_state')
@mock.patch.object(rest.VMAXRest, 'delete_rdf_pair')
@ -3778,6 +3819,18 @@ class VMAXCommonTest(test.TestCase):
configuration = FakeConfiguration(None, 'config_group', None, None)
fc.VMAXFCDriver(configuration=configuration)
@mock.patch.object(rest.VMAXRest, 'is_next_gen_array',
return_value=True)
@mock.patch.object(rest.VMAXRest, 'set_rest_credentials')
@mock.patch.object(common.VMAXCommon, '_get_slo_workload_combinations',
return_value=[])
@mock.patch.object(common.VMAXCommon, 'get_attributes_from_cinder_config',
return_value=VMAXCommonData.array_info_wl)
def test_gather_info_next_gen(self, mock_parse, mock_combo, mock_rest,
mock_nextgen):
self.common._gather_info()
self.assertTrue(self.common.nextGen)
def test_get_slo_workload_combinations_powermax(self):
array_info = self.common.get_attributes_from_cinder_config()
finalarrayinfolist = self.common._get_slo_workload_combinations(
@ -3794,6 +3847,36 @@ class VMAXCommonTest(test.TestCase):
array_info)
self.assertTrue(len(finalarrayinfolist) > 1)
@mock.patch.object(rest.VMAXRest, 'get_vmax_model',
return_value=VMAXCommonData.powermax_model_details[
'model'])
@mock.patch.object(rest.VMAXRest, 'get_workload_settings',
return_value=[])
@mock.patch.object(rest.VMAXRest, 'get_slo_list',
return_value=VMAXCommonData.powermax_slo_details[
'sloId'])
def test_get_slo_workload_combinations_next_gen(self, mck_slo, mck_wl,
mck_model):
self.common.nextGen = True
finalarrayinfolist = self.common._get_slo_workload_combinations(
self.data.array_info_no_wl)
self.assertTrue(len(finalarrayinfolist) == 14)
@mock.patch.object(rest.VMAXRest, 'get_vmax_model',
return_value=VMAXCommonData.vmax_model_details[
'model'])
@mock.patch.object(rest.VMAXRest, 'get_workload_settings',
return_value=[])
@mock.patch.object(rest.VMAXRest, 'get_slo_list',
return_value=VMAXCommonData.powermax_slo_details[
'sloId'])
def test_get_slo_workload_combinations_next_gen_vmax(
self, mck_slo, mck_wl, mck_model):
self.common.nextGen = True
finalarrayinfolist = self.common._get_slo_workload_combinations(
self.data.array_info_no_wl)
self.assertTrue(len(finalarrayinfolist) == 18)
def test_get_slo_workload_combinations_failed(self):
array_info = {}
self.assertRaises(exception.VolumeBackendAPIException,
@ -3985,6 +4068,21 @@ class VMAXCommonTest(test.TestCase):
device_info_dict = self.common.initialize_connection(volume, connector)
self.assertEqual(ref_dict, device_info_dict)
def test_initialize_connection_already_mapped_next_gen(self):
with mock.patch.object(self.rest, 'is_next_gen_array',
return_value=True):
volume = self.data.test_volume
connector = self.data.connector
host_lun = (self.data.maskingview[0]['maskingViewConnection'][0]
['host_lun_address'])
ref_dict = {'hostlunid': int(host_lun, 16),
'maskingview': self.data.masking_view_name_f,
'array': self.data.array,
'device_id': self.data.device_id}
device_info_dict = self.common.initialize_connection(volume,
connector)
self.assertEqual(ref_dict, device_info_dict)
@mock.patch.object(common.VMAXCommon, 'find_host_lun_id',
return_value=({}, False))
@mock.patch.object(common.VMAXCommon, '_attach_volume',
@ -4003,6 +4101,20 @@ class VMAXCommonTest(test.TestCase):
mock_attach.assert_called_once_with(
volume, connector, extra_specs, masking_view_dict)
@mock.patch.object(rest.VMAXRest, 'is_next_gen_array',
return_value=True)
@mock.patch.object(common.VMAXCommon, 'find_host_lun_id',
return_value=({}, False))
@mock.patch.object(common.VMAXCommon, '_attach_volume',
return_value=({}, VMAXCommonData.port_group_name_f))
def test_initialize_connection_not_mapped_next_gen(self, mock_attach,
mock_id, mck_gen):
volume = self.data.test_volume
connector = self.data.connector
device_info_dict = self.common.initialize_connection(
volume, connector)
self.assertEqual({}, device_info_dict)
@mock.patch.object(
masking.VMAXMasking, 'pre_multiattach',
return_value=VMAXCommonData.masking_view_dict_multiattach)
@ -4259,7 +4371,9 @@ class VMAXCommonTest(test.TestCase):
connector = self.data.connector
extra_specs = deepcopy(self.data.extra_specs)
extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
extra_specs[utils.WORKLOAD] = self.data.workload
ref_mv_dict = self.data.masking_view_dict
self.common.nextGen = False
masking_view_dict = self.common._populate_masking_dict(
volume, connector, extra_specs)
self.assertEqual(ref_mv_dict, masking_view_dict)
@ -4297,10 +4411,21 @@ class VMAXCommonTest(test.TestCase):
extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
extra_specs[utils.DISABLECOMPRESSION] = "true"
ref_mv_dict = self.data.masking_view_dict_compression_disabled
extra_specs[utils.WORKLOAD] = self.data.workload
masking_view_dict = self.common._populate_masking_dict(
volume, connector, extra_specs)
self.assertEqual(ref_mv_dict, masking_view_dict)
def test_populate_masking_dict_next_gen(self):
volume = self.data.test_volume
connector = self.data.connector
extra_specs = deepcopy(self.data.extra_specs)
extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
self.common.nextGen = True
masking_view_dict = self.common._populate_masking_dict(
volume, connector, extra_specs)
self.assertEqual('NONE', masking_view_dict[utils.WORKLOAD])
def test_create_cloned_volume(self):
volume = self.data.test_clone_volume
source_volume = self.data.test_volume
@ -4423,6 +4548,25 @@ class VMAXCommonTest(test.TestCase):
volume_name, volume_size, extra_specs)
self.assertEqual(ref_dict, volume_dict)
def test_create_volume_success_next_gen(self):
volume_name = '1'
volume_size = self.data.test_volume.size
extra_specs = self.data.extra_specs
self.common.nextGen = True
with mock.patch.object(self.utils, 'is_compression_disabled',
return_value=True):
with mock.patch.object(self.rest, 'is_next_gen_array',
return_value=True):
with mock.patch.object(self.masking,
'get_or_create_default_storage_group'):
self.common._create_volume(
volume_name, volume_size, extra_specs)
(self.masking.get_or_create_default_storage_group
.assert_called_once_with(extra_specs['array'],
extra_specs[utils.SRP],
extra_specs[utils.SLO],
'NONE', extra_specs, True))
def test_create_volume_failed(self):
volume_name = self.data.test_volume.name
volume_size = self.data.test_volume.size
@ -4513,6 +4657,15 @@ class VMAXCommonTest(test.TestCase):
self.common._set_vmax_extra_specs,
{}, srp_record)
def test_set_vmax_extra_specs_next_gen(self):
srp_record = self.common.get_attributes_from_cinder_config()
self.common.nextGen = True
extra_specs = self.common._set_vmax_extra_specs(
self.data.vol_type_extra_specs, srp_record)
ref_extra_specs = deepcopy(self.data.extra_specs_intervals_set)
ref_extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
self.assertEqual('NONE', extra_specs[utils.WORKLOAD])
def test_delete_volume_from_srp_success(self):
array = self.data.array
device_id = self.data.device_id
@ -5185,6 +5338,18 @@ class VMAXCommonTest(test.TestCase):
self.data.srp, volume_name, False, False)
self.assertEqual(ref_return, return_val)
def test_is_valid_for_storage_assisted_migration_next_gen(self):
device_id = self.data.device_id
host = {'host': self.data.new_host}
volume_name = self.data.test_volume.name
ref_return = (True, 'Silver', 'NONE')
with mock.patch.object(self.rest, 'is_next_gen_array',
return_value=True):
return_val = self.common._is_valid_for_storage_assisted_migration(
device_id, host, self.data.array,
self.data.srp, volume_name, False, False)
self.assertEqual(ref_return, return_val)
def test_find_volume_group(self):
group = self.data.test_group_1
array = self.data.array
@ -7118,6 +7283,20 @@ class VMAXMaskingTest(test.TestCase):
self.extra_specs, volume=vol_grp_member)
mock_return.assert_called_once()
def test_add_volume_to_default_storage_group_next_gen(self):
with mock.patch.object(rest.VMAXRest, 'is_next_gen_array',
return_value=True):
with mock.patch.object(
self.mask, 'get_or_create_default_storage_group'):
self.mask.add_volume_to_default_storage_group(
self.data.array, self.device_id, self.volume_name,
self.extra_specs)
(self.mask.get_or_create_default_storage_group
.assert_called_once_with(self.data.array, self.data.srp,
self.extra_specs[utils.SLO],
'NONE', self.extra_specs, False,
False, None))
@mock.patch.object(provision.VMAXProvision, 'create_storage_group')
def test_get_or_create_default_storage_group(self, mock_create_sg):
with mock.patch.object(
@ -7323,6 +7502,15 @@ class VMAXMaskingTest(test.TestCase):
self.data.extra_specs)
mock_return.assert_called_once()
def test_pre_multiattach_next_gen(self):
with mock.patch.object(utils.VMAXUtils, 'truncate_string',
return_value='DiamondDSS'):
self.mask.pre_multiattach(
self.data.array, self.data.device_id,
self.data.masking_view_dict_multiattach, self.data.extra_specs)
utils.VMAXUtils.truncate_string.assert_called_once_with(
'DiamondDSS', 10)
@mock.patch.object(rest.VMAXRest, 'get_storage_group_list',
side_effect=[{'storageGroupId': [
VMAXCommonData.no_slo_sg_name]}, {}])

View File

@ -136,6 +136,7 @@ class VMAXCommon(object):
self._get_replication_info()
self._gather_info()
self.version_dict = {}
self.nextGen = False
def _gather_info(self):
"""Gather the relevant information for update_volume_stats."""
@ -147,6 +148,9 @@ class VMAXCommon(object):
"configuration and note that the xml file is no "
"longer supported.")
self.rest.set_rest_credentials(array_info)
if array_info:
self.nextGen = self.rest.is_next_gen_array(
array_info['SerialNumber'])
finalarrayinfolist = self._get_slo_workload_combinations(
array_info)
self.pool_info['arrays_info'] = finalarrayinfolist
@ -219,29 +223,39 @@ class VMAXCommon(object):
array = self.active_backend_id
slo_settings = self.rest.get_slo_list(array)
# Remove 'None' and 'Optimized from SL list, they cannot be mixed
# with workloads so will be added later again
slo_list = [x for x in slo_settings
if x.lower() not in ['none', 'optimized']]
workload_settings = self.rest.get_workload_settings(array)
workload_settings.append("None")
slo_workload_set = set()
workload_settings.append('None')
slo_workload_set = set(
['%(slo)s:%(workload)s' % {'slo': slo,
'workload': workload}
for slo in slo_list for workload in workload_settings])
slo_workload_set.add('None:None')
if self.rest.is_next_gen_array(array):
if self.nextGen:
LOG.warning("Workloads have been deprecated for arrays "
"running PowerMax OS uCode level 5978 or higher. "
"Any supplied workloads will be treated as None "
"values. It is highly recommended to create a new "
"volume type without a workload specified.")
for slo in slo_list:
slo_workload_set.add(slo)
slo_workload_set.add('None')
slo_workload_set.add('Optimized')
else:
slo_workload_set = set(
['%(slo)s:%(workload)s' % {'slo': slo,
'workload': workload}
for slo in slo_list for workload in workload_settings])
slo_workload_set.add('None:None')
slo_workload_set.add('Optimized:None')
# If array is 5978 or greater and a VMAX AFA add legacy SL/WL
# combinations
if any(self.rest.get_vmax_model(array) in x for x in
utils.VMAX_AFA_MODELS):
slo_workload_set.add('Diamond:OLTP')
slo_workload_set.add('Diamond:OLTP_REP')
slo_workload_set.add('Diamond:DSS')
slo_workload_set.add('Diamond:DSS_REP')
slo_workload_set.add('Diamond:None')
if not any(self.rest.get_vmax_model(array) in x for x in
utils.VMAX_AFA_MODELS) and not \
self.rest.is_next_gen_array(array):
utils.VMAX_AFA_MODELS):
slo_workload_set.add('Optimized:None')
finalarrayinfolist = []
@ -641,6 +655,14 @@ class VMAXCommon(object):
volume, connector, extra_specs)
masking_view_dict[utils.IS_MULTIATTACH] = is_multiattach
if self.rest.is_next_gen_array(extra_specs['array']):
masking_view_dict['workload'] = 'NONE'
temp_pool = masking_view_dict['storagegroup_name']
splitPool = temp_pool.split('+')
if len(splitPool) == 4:
splitPool[1] = 'NONE'
masking_view_dict['storagegroup_name'] = '+'.join(splitPool)
if ('hostlunid' in device_info_dict and
device_info_dict['hostlunid'] is not None):
hostlunid = device_info_dict['hostlunid']
@ -1280,7 +1302,8 @@ class VMAXCommon(object):
protocol = self.utils.get_short_protocol_type(self.protocol)
short_host_name = self.utils.get_host_short_name(connector['host'])
masking_view_dict[utils.SLO] = extra_specs[utils.SLO]
masking_view_dict[utils.WORKLOAD] = extra_specs[utils.WORKLOAD]
masking_view_dict[utils.WORKLOAD] = 'NONE' if self.nextGen else (
extra_specs[utils.WORKLOAD])
masking_view_dict[utils.ARRAY] = extra_specs[utils.ARRAY]
masking_view_dict[utils.SRP] = extra_specs[utils.SRP]
masking_view_dict[utils.PORTGROUPNAME] = (
@ -1482,10 +1505,12 @@ class VMAXCommon(object):
:raises: VolumeBackendAPIException:
"""
array = extra_specs[utils.ARRAY]
self.nextGen = self.rest.is_next_gen_array(array)
if self.nextGen:
extra_specs[utils.WORKLOAD] = 'NONE'
is_valid_slo, is_valid_workload = self.provision.verify_slo_workload(
array, extra_specs[utils.SLO],
extra_specs[utils.WORKLOAD], extra_specs[utils.SRP])
if not is_valid_slo or not is_valid_workload:
exception_message = (_(
"Either SLO: %(slo)s or workload %(workload)s is invalid. "
@ -1575,14 +1600,15 @@ class VMAXCommon(object):
slo_from_extra_spec = pool_details[0]
workload_from_extra_spec = pool_details[1]
# Check if legacy pool chosen
if workload_from_extra_spec == pool_record['srpName']:
if (workload_from_extra_spec == pool_record['srpName'] or
self.nextGen):
workload_from_extra_spec = 'NONE'
elif pool_record.get('ServiceLevel'):
slo_from_extra_spec = pool_record['ServiceLevel']
workload_from_extra_spec = pool_record.get('Workload', 'None')
# If workload is None in cinder.conf, convert to string
if not workload_from_extra_spec:
if not workload_from_extra_spec or self.nextGen:
workload_from_extra_spec = 'NONE'
LOG.info("Pool_name is not present in the extra_specs "
"- using slo/ workload from cinder.conf: %(slo)s/%(wl)s.",
@ -2796,6 +2822,8 @@ class VMAXCommon(object):
raise IndexError
if target_slo.lower() == 'none':
target_slo = None
if self.rest.is_next_gen_array(target_array_serial):
target_workload = 'NONE'
except IndexError:
LOG.error("Error parsing array, pool, SLO and workload.")
return false_ret

View File

@ -100,6 +100,7 @@ class VMAXFCDriver(san.SanDriver, driver.FibreChannelDriver):
- Fix for get-pools command (bug #1784856)
3.3.0 - Fix for initiator retrieval and short hostname unmapping
(bugs #1783855 #1783867)
- Fix for HyperMax OS Upgrade Bug (bug #1790141)
"""
VERSION = "3.3.0"

View File

@ -105,6 +105,7 @@ class VMAXISCSIDriver(san.SanISCSIDriver):
- Fix for get-pools command (bug #1784856)
3.3.0 - Fix for initiator retrieval and short hostname unmapping
(bugs #1783855 #1783867)
- Fix for HyperMax OS Upgrade Bug (bug #1790141)
"""
VERSION = "3.3.0"

View File

@ -1395,6 +1395,8 @@ class VMAXMasking(object):
extra_specs)
rep_enabled = self.utils.is_replication_enabled(extra_specs)
rep_mode = extra_specs.get(utils.REP_MODE, None)
if self.rest.is_next_gen_array(serial_number):
extra_specs[utils.WORKLOAD] = 'NONE'
storagegroup_name = self.get_or_create_default_storage_group(
serial_number, extra_specs[utils.SRP], extra_specs[utils.SLO],
extra_specs[utils.WORKLOAD], extra_specs, do_disable_compression,
@ -1610,8 +1612,10 @@ class VMAXMasking(object):
sg_list = self.rest.get_storage_group_list(
serial_number, params={
'child': 'true', 'volumeId': device_id})
slo_wl_combo = self.utils.truncate_string(
extra_specs[utils.SLO] + extra_specs[utils.WORKLOAD], 10)
split_pool = extra_specs['pool_name'].split('+')
src_slo = split_pool[0]
src_wl = split_pool[1] if len(split_pool) == 4 else 'NONE'
slo_wl_combo = self.utils.truncate_string(src_slo + src_wl, 10)
for sg in sg_list.get('storageGroupId', []):
if slo_wl_combo in sg:
fast_source_sg_name = sg

View File

@ -502,7 +502,8 @@ class VMAXProvision(object):
if storage_group:
try:
slo = storage_group['slo']
workload = storage_group['workload']
workload = 'NONE' if self.rest.is_next_gen_array(array) else (
storage_group['workload'])
except KeyError:
pass
else:

View File

@ -243,10 +243,10 @@ class VMAXRest(object):
if status_code not in [STATUS_200, STATUS_201,
STATUS_202, STATUS_204]:
exception_message = (
_('Error %(operation)s. The status code received '
'is %(sc)s and the message is %(message)s.')
% {'operation': operation,
'sc': status_code, 'message': message})
_("Error %(operation)s. The status code received is %(sc)s "
"and the message is %(message)s.") % {
'operation': operation, 'sc': status_code,
'message': message})
raise exception.VolumeBackendAPIException(
data=exception_message)
@ -265,12 +265,11 @@ class VMAXRest(object):
rc, result, status, task = self.wait_for_job_complete(
job, extra_specs)
if rc != 0:
exception_message = (_(
"Error %(operation)s. Status code: %(sc)lu. "
"Error: %(error)s. Status: %(status)s.")
% {'operation': operation, 'sc': rc,
'error': six.text_type(result),
'status': status})
exception_message = (
_("Error %(operation)s. Status code: %(sc)lu. Error: "
"%(error)s. Status: %(status)s.") % {
'operation': operation, 'sc': rc,
'error': six.text_type(result), 'status': status})
LOG.error(exception_message)
raise exception.VolumeBackendAPIException(
data=exception_message)
@ -469,11 +468,11 @@ class VMAXRest(object):
:returns: slo_list -- list of service level names
"""
slo_list = []
slo_dict = self.get_resource(array, SLOPROVISIONING, 'slo',
version='90')
slo_dict = self.get_resource(array, SLOPROVISIONING, 'slo')
if slo_dict and slo_dict.get('sloId'):
if any(self.get_vmax_model(array) in x for x in
utils.VMAX_AFA_MODELS):
if not self.is_next_gen_array(array) and (
any(self.get_vmax_model(array) in x for x in
utils.VMAX_AFA_MODELS)):
if 'Optimized' in slo_dict.get('sloId'):
slo_dict['sloId'].remove('Optimized')
for slo in slo_dict['sloId']:
@ -489,7 +488,9 @@ class VMAXRest(object):
:returns: workload_setting -- list of workload names
"""
workload_setting = []
if not self.is_next_gen_array(array):
if self.is_next_gen_array(array):
workload_setting.append('None')
else:
wl_details = self.get_resource(
array, SLOPROVISIONING, 'workloadtype')
if wl_details:
@ -643,6 +644,8 @@ class VMAXRest(object):
"emulation": "FBA"})
if slo:
if self.is_next_gen_array(array):
workload = 'NONE'
slo_param = {"num_of_vols": 0,
"sloId": slo,
"workloadSelection": workload,
@ -875,12 +878,11 @@ class VMAXRest(object):
if sg_value is None or input_value != int(sg_value):
property_dict[sg_key] = input_value
else:
exception_message = (_(
"Invalid %(ds)s with value %(dt)s entered. "
"Valid values range from %(du)s %(dv)s to 100,000 %(dv)s") %
{'ds': input_key, 'dt': input_value, 'du': min_value,
'dv': qos_unit
})
exception_message = (
_("Invalid %(ds)s with value %(dt)s entered. Valid values "
"range from %(du)s %(dv)s to 100,000 %(dv)s") % {
'ds': input_key, 'dt': input_value, 'du': min_value,
'dv': qos_unit})
LOG.error(exception_message)
raise exception.VolumeBackendAPIException(
data=exception_message)
@ -895,12 +897,11 @@ class VMAXRest(object):
if distribution_type != sg_value:
property_dict["dynamicDistribution"] = distribution_type
else:
exception_message = (_(
"Wrong Distribution type value %(dt)s entered. "
"Please enter one of: %(dl)s") %
{'dt': qos_extra_spec.get('DistributionType'),
'dl': dynamic_list
})
exception_message = (
_("Wrong Distribution type value %(dt)s entered. Please enter "
"one of: %(dl)s") % {
'dt': qos_extra_spec.get('DistributionType'),
'dl': dynamic_list})
LOG.error(exception_message)
raise exception.VolumeBackendAPIException(
data=exception_message)
@ -936,6 +937,8 @@ class VMAXRest(object):
:param rep_mode: flag to indicate replication mode
:returns: the storage group dict (or None), the storage group name
"""
if self.is_next_gen_array(array):
workload = 'NONE'
storagegroup_name = self.utils.get_default_storage_group_name(
srp, slo, workload, do_disable_compression, is_re, rep_mode)
storagegroup = self.get_storage_group(array, storagegroup_name)
@ -1138,8 +1141,9 @@ class VMAXRest(object):
'for %(mv)s.', {'mv': maskingview})
else:
try:
host_lun_id = (connection_info['maskingViewConnection']
[0]['host_lun_address'])
host_lun_id = (
connection_info[
'maskingViewConnection'][0]['host_lun_address'])
host_lun_id = int(host_lun_id, 16)
except Exception as e:
LOG.error("Unable to retrieve connection information "
@ -2017,6 +2021,7 @@ class VMAXRest(object):
:param target_device: the target device id
:param extra_specs: the extra specifications
"""
def _wait_for_consistent_state():
# Called at an interval until the state of the
# rdf pair is 'consistent'.
@ -2340,8 +2345,8 @@ class VMAXRest(object):
payload['suspend'] = {"force": "true"}
elif action.lower() == 'establish':
metro_bias = (
True if extra_specs.get(utils.METROBIAS)
and extra_specs[utils.METROBIAS] is True else False)
True if extra_specs.get(utils.METROBIAS) and extra_specs[
utils.METROBIAS] is True else False)
payload['establish'] = {"metroBias": metro_bias,
"full": 'false'}
resource_name = ('%(sg_name)s/rdf_group/%(rdf_num)s'

View File

@ -0,0 +1,9 @@
---
fixes:
- PowerMax driver - Workload support was dropped in ucode 5978. If a
VMAX All Flash array is upgraded to 5978 or greater and existing
volume types leveraged workload e.g. DSS, DSS_REP, OLTP and OLTP_REP,
certain operations will no longer work and the volume type will be
unusable. This fix addresses these issues and fixes problems with
using old volume types with workloads included in the volume type
pool_name.