Merge "VMAX driver - Manage/Unmanage performance fix" into stable/ocata

This commit is contained in:
Jenkins 2017-05-24 17:08:34 +00:00 committed by Gerrit Code Review
commit 045a77b28d
3 changed files with 649 additions and 195 deletions

View File

@ -385,6 +385,22 @@ class VMAXCommonData(object):
provider_location_multi_pool = {'classname': 'Symm_StorageVolume',
'keybindings': keybindings,
'version': '2.2.0'}
keybindings_manage = {'CreationClassName': 'Symm_StorageVolume',
'SystemName': 'SYMMETRIX+000195900551',
'DeviceID': '10',
'SystemCreationClassName': 'Symm_StorageSystem'}
provider_location_manage = {'classname': 'Symm_StorageVolume',
'keybindings': keybindings_manage}
manage_vol = EMC_StorageVolume()
manage_vol['CreationClassName'] = 'Symm_StorageVolume'
manage_vol['ElementName'] = 'OS-Test_Manage_vol'
manage_vol['DeviceID'] = '10'
manage_vol['SystemName'] = 'SYMMETRIX+000195900551'
manage_vol['SystemCreationClassName'] = 'Symm_StorageSystem'
manage_vol.path = manage_vol
replication_driver_data = re_keybindings
block_size = 512
majorVersion = 1
@ -401,6 +417,9 @@ class VMAXCommonData(object):
properties = {'ConsumableBlocks': '12345',
'BlockSize': '512'}
array = '000197800123'
array_v3 = '1234567891011'
test_volume = {'name': 'vol1',
'size': 1,
'volume_name': 'vol1',
@ -3943,17 +3962,6 @@ class VMAXISCSIDriverNoFastTestCase(test.TestCase):
common._create_composite_volume.assert_called_with(
volume, "TargetBaseVol", 1234567, extraSpecs, 1)
def test_find_volume_by_device_id_on_array(self):
conn = self.fake_ecom_connection()
utils = self.driver.common.utils
volumeInstanceName = utils.find_volume_by_device_id_on_array(
conn, self.data.storage_system, self.data.test_volume['device_id'])
expectVolume = {}
expectVolume['CreationClassName'] = 'Symm_StorageVolume'
expectVolume['DeviceID'] = self.data.test_volume['device_id']
expect = conn.GetInstance(expectVolume)
self.assertEqual(expect, volumeInstanceName)
def test_get_volume_element_name(self):
volumeId = 'ea95aa39-080b-4f11-9856-a03acf9112ad'
util = self.driver.common.utils
@ -3975,10 +3983,11 @@ class VMAXISCSIDriverNoFastTestCase(test.TestCase):
def test_get_array_and_device_id_success(self):
deviceId = '0123'
arrayId = u'array1234'
arrayId = '1234567891011'
external_ref = {u'source-name': deviceId}
volume = {'volume_metadata': [{'key': 'array', 'value': arrayId}]
}
volume['host'] = 'HostX@Backend#Bronze+SRP_1+1234567891011'
utils = self.driver.common.utils
(arrId, devId) = utils.get_array_and_device_id(volume, external_ref)
self.assertEqual(arrayId, arrId)
@ -3986,10 +3995,11 @@ class VMAXISCSIDriverNoFastTestCase(test.TestCase):
def test_get_array_and_device_id_failed(self):
deviceId = '0123'
arrayId = u'array1234'
arrayId = '1234567891011'
external_ref = {u'no-source-name': deviceId}
volume = {'volume_metadata': [{'key': 'array', 'value': arrayId}]
}
volume['host'] = 'HostX@Backend#Bronze+SRP_1+1234567891011'
utils = self.driver.common.utils
self.assertRaises(exception.VolumeBackendAPIException,
utils.get_array_and_device_id,
@ -5243,73 +5253,6 @@ class VMAXFCDriverNoFastTestCase(test.TestCase):
self.driver.delete_cgsnapshot(
self.data.test_ctxt, self.data.test_CG_snapshot, [])
@ddt.data((2, 2), (1.75, 2))
@ddt.unpack
def test_manage_existing_get_size(self, gb_size, exp_size):
volume = {}
metadata = {'key': 'array',
'value': '12345'}
volume['volume_metadata'] = [metadata]
external_ref = {'source-name': '0123'}
utils = self.driver.common.utils
utils.get_volume_size = mock.Mock(
return_value=int(gb_size * units.Gi))
volumeInstanceName = {'CreationClassName': "Symm_StorageVolume",
'DeviceID': "0123",
'SystemName': "12345"}
utils.find_volume_by_device_id_on_array = mock.Mock(
return_value=volumeInstanceName)
size = self.driver.manage_existing_get_size(volume, external_ref)
self.assertEqual(exp_size, size)
def test_manage_existing_no_fast_success(self):
volume = {}
metadata = {'key': 'array',
'value': '12345'}
poolInstanceName = {}
storageSystem = {}
poolInstanceName['InstanceID'] = "SATA_GOLD1"
storageSystem['InstanceID'] = "SYMMETRIX+00019870000"
volume['volume_metadata'] = [metadata]
volume['name'] = "test-volume"
volume['id'] = "test-volume"
external_ref = {'source-name': '0123'}
utils = self.driver.common.utils
gbSize = 2
utils.get_volume_size = mock.Mock(
return_value=gbSize * units.Gi)
utils.get_associated_replication_from_source_volume = mock.Mock(
return_value=None)
utils.get_assoc_pool_from_volume = mock.Mock(
return_value=(poolInstanceName))
vol = EMC_StorageVolume()
vol['CreationClassName'] = 'Symm_StorageVolume'
vol['ElementName'] = 'OS-' + volume['id']
vol['DeviceID'] = external_ref['source-name']
vol['SystemName'] = storageSystem['InstanceID']
vol['SystemCreationClassName'] = 'Symm_StorageSystem'
vol.path = vol
utils.rename_volume = mock.Mock(
return_value=vol)
common = self.driver.common
common._initial_setup = mock.Mock(
return_value={'volume_backend_name': 'FCNoFAST',
'storagetype:fastpolicy': None})
common._get_pool_and_storage_system = mock.Mock(
return_value=(poolInstanceName, storageSystem))
volumeInstanceName = {'CreationClassName': "Symm_StorageVolume",
'DeviceID': "0123",
'SystemName': "12345"}
utils.find_volume_by_device_id_on_array = mock.Mock(
return_value=volumeInstanceName)
masking = self.driver.common.masking
masking.get_masking_view_from_storage_group = mock.Mock(
return_value={})
self.driver.manage_existing(volume, external_ref)
utils.rename_volume.assert_called_once_with(
common.conn, volumeInstanceName, volume['name'])
def test_unmanage_no_fast_success(self):
keybindings = {'CreationClassName': u'Symm_StorageVolume',
'SystemName': u'SYMMETRIX+000195900000',
@ -6031,6 +5974,7 @@ class VMAXFCDriverFastTestCase(test.TestCase):
storageSystem['InstanceID'] = "SYMMETRIX+00019870000"
volume['volume_metadata'] = [metadata]
volume['name'] = "test-volume"
volume['host'] = 'HostX@Backend#Bronze+SRP_1+1234567891011'
external_ref = {'source-name': '0123'}
common = self.driver.common
common._initial_setup = mock.Mock(
@ -8833,6 +8777,215 @@ class VMAXUtilsTest(test.TestCase):
conn, storageSystem, volumeInstance, extraSpecs)
self.assertEqual(syncInstance, foundSyncInstance)
def test_get_assoc_v2_pool_from_vol(self):
conn = FakeEcomConnection()
volumeInstanceName = {'CreationClassName': "Symm_StorageVolume",
'DeviceID': "0123",
'SystemName': "12345"}
pool = conn.AssociatorNames(
volumeInstanceName, ResultClass='EMC_VirtualProvisioningPool')
poolName = self.driver.utils.get_assoc_v2_pool_from_volume(
conn, volumeInstanceName)
self.assertEqual(pool[0]['ElementName'], poolName['ElementName'])
def test_get_assoc_v2_pool_from_vol_fail(self):
conn = FakeEcomConnection()
volumeInstanceName = {'CreationClassName': "Symm_StorageVolume",
'DeviceID': "0123",
'SystemName': "12345"}
conn.AssociatorNames = mock.Mock(return_value={})
poolName = self.driver.utils.get_assoc_v2_pool_from_volume(
conn, volumeInstanceName)
self.assertIsNone(poolName)
def test_get_assoc_v3_pool_from_vol(self):
conn = FakeEcomConnection()
volumeInstanceName = {'CreationClassName': "Symm_StorageVolume",
'DeviceID': "0123",
'SystemName': "12345"}
pool = conn.AssociatorNames(
volumeInstanceName, ResultClass='Symm_SRPStoragePool')
poolName = self.driver.utils.get_assoc_v3_pool_from_volume(
conn, volumeInstanceName)
self.assertEqual(pool[0]['ElementName'], poolName['ElementName'])
def test_get_assoc_v3_pool_from_vol_fail(self):
conn = FakeEcomConnection()
volumeInstanceName = {'CreationClassName': "Symm_StorageVolume",
'DeviceID': "0123",
'SystemName': "12345"}
conn.AssociatorNames = mock.Mock(return_value={})
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.utils.get_assoc_v3_pool_from_volume,
conn, volumeInstanceName)
def test_check_volume_no_fast_fail(self):
utils = self.driver.common.utils
initial_setup = {'volume_backend_name': 'FCFAST',
'storagetype:fastpolicy': 'GOLD'}
self.assertRaises(exception.VolumeBackendAPIException,
utils.check_volume_no_fast,
initial_setup)
def test_check_volume_no_fast_pass(self):
utils = self.driver.common.utils
initial_setup = {'volume_backend_name': 'FCnoFAST',
'storagetype:fastpolicy': None}
self.assertTrue(utils.check_volume_no_fast(
initial_setup))
def test_check_volume_not_in_masking_view_pass(self):
conn = FakeEcomConnection()
utils = self.driver.common.utils
bindings = {'CreationClassName': 'Symm_StorageVolume',
'SystemName': self.data.storage_system,
'DeviceID': self.data.test_volume['device_id'],
'SystemCreationClassName': 'Symm_StorageSystem'}
inst = FakeCIMInstanceName()
fake_inst = inst.fake_getinstancename('Symm_StorageVolume', bindings)
sgInstanceNames = conn.AssociatorNames(fake_inst,
ResultClass=
'CIM_DeviceMaskingGroup')
conn.AssociatorNames = mock.Mock(return_value={})
mock.patch.object(self.driver.utils, 'get_storage_groups_from_volume',
return_value=sgInstanceNames)
self.assertTrue(
utils.check_volume_not_in_masking_view(
conn, fake_inst, self.data.test_volume['device_id']))
def test_check_volume_not_in_masking_view_fail(self):
conn = FakeEcomConnection()
utils = self.driver.common.utils
bindings = {'CreationClassName': 'Symm_StorageVolume',
'SystemName': self.data.storage_system,
'DeviceID': self.data.test_volume['device_id'],
'SystemCreationClassName': 'Symm_StorageSystem'}
inst = FakeCIMInstanceName()
fake_inst = inst.fake_getinstancename('Symm_StorageVolume', bindings)
self.assertRaises(exception.VolumeBackendAPIException,
utils.check_volume_not_in_masking_view,
conn, fake_inst, self.data.test_volume['device_id'])
def test_check_volume_not_replication_source_pass(self):
conn = FakeEcomConnection()
utils = self.driver.common.utils
self.assertTrue(
utils.check_volume_not_replication_source(
conn, self.data.storage_system_v3,
self.data.test_volume['device_id']))
def test_check_volume_not_replication_source_fail(self):
conn = FakeEcomConnection()
utils = self.driver.common.utils
replication_source = 'testReplicationSync'
utils.get_associated_replication_from_source_volume = (
mock.Mock(return_value=replication_source))
self.assertRaises(
exception.VolumeBackendAPIException,
utils.check_volume_not_replication_source,
conn, self.data.storage_system_v3,
self.data.test_volume['device_id'])
def test_check_is_volume_in_cinder_managed_pool_fail(self):
conn = FakeEcomConnection()
utils = self.driver.common.utils
volumeInstanceName = {'CreationClassName': "Symm_StorageVolume",
'DeviceID': "0123",
'SystemName': "12345"}
poolInstanceName = {}
poolInstanceName['InstanceID'] = "SATA_GOLD1"
deviceId = '0123'
self.assertRaises(
exception.VolumeBackendAPIException,
utils.check_is_volume_in_cinder_managed_pool,
conn, volumeInstanceName, poolInstanceName, deviceId)
def test_check_is_volume_in_cinder_managed_pool_pass(self):
conn = FakeEcomConnection()
utils = self.driver.common.utils
volumeInstanceName = {}
poolInstanceName = {}
poolInstanceName['InstanceID'] = "SATA_GOLD2"
deviceId = self.data.test_volume['device_id']
utils.get_assoc_v2_pool_from_volume = (
mock.Mock(return_value=poolInstanceName))
self.assertTrue(
utils.check_is_volume_in_cinder_managed_pool(
conn, volumeInstanceName, poolInstanceName, deviceId))
def test_find_volume_by_device_id_on_array(self):
conn = FakeEcomConnection()
utils = self.driver.common.utils
bindings = {'CreationClassName': 'Symm_StorageVolume',
'SystemName': self.data.storage_system,
'DeviceID': self.data.test_volume['device_id'],
'SystemCreationClassName': 'Symm_StorageSystem'}
inst = FakeCIMInstanceName()
fake_inst = inst.fake_getinstancename('Symm_StorageVolume', bindings)
utils.find_volume_by_device_id_on_array = mock.Mock(
return_value=fake_inst)
volumeInstanceName = utils.find_volume_by_device_id_on_array(
self.data.storage_system, self.data.test_volume['device_id'])
expectVolume = {}
expectVolume['CreationClassName'] = 'Symm_StorageVolume'
expectVolume['DeviceID'] = self.data.test_volume['device_id']
expect = conn.GetInstance(expectVolume)
provider_location = ast.literal_eval(expect['provider_location'])
bindings = provider_location['keybindings']
self.assertEqual(bindings, volumeInstanceName)
def test_get_array_and_device_id(self):
utils = self.driver.common.utils
volume = self.data.test_volume.copy()
volume['volume_metadata'] = {'array': self.data.array_v3}
external_ref = {u'source-name': u'00002'}
array, device_id = utils.get_array_and_device_id(
volume, external_ref)
self.assertEqual(self.data.array_v3, array)
self.assertEqual('00002', device_id)
def test_get_array_and_device_id_exception(self):
utils = self.driver.common.utils
volume = self.data.test_volume.copy()
volume['volume_metadata'] = {'array': self.data.array}
external_ref = {u'source-name': None}
self.assertRaises(exception.VolumeBackendAPIException,
utils.get_array_and_device_id, volume, external_ref)
class VMAXCommonTest(test.TestCase):
def setUp(self):
@ -9619,6 +9772,152 @@ class VMAXCommonTest(test.TestCase):
exception.VolumeBackendAPIException,
common._get_port_group_from_source, deviceInfoDict)
def test_manage_existing_get_size(self):
common = self.driver.common
common.conn = FakeEcomConnection()
gb_size = 2
exp_size = 2
volume = {}
metadata = {'key': 'array',
'value': '12345'}
volume['volume_metadata'] = [metadata]
volume['host'] = 'HostX@Backend#Bronze+SRP_1+1234567891011'
external_ref = {'source-name': '0123'}
volumeInstanceName = {'CreationClassName': "Symm_StorageVolume",
'DeviceID': "0123",
'SystemName': "12345"}
utils = self.driver.common.utils
utils.get_volume_size = mock.Mock(
return_value=int(gb_size * units.Gi))
utils.find_volume_by_device_id_on_array = mock.Mock(
return_value=volumeInstanceName)
size = self.driver.manage_existing_get_size(volume, external_ref)
self.assertEqual(exp_size, size)
def test_manage_existing_get_size_fail(self):
common = self.driver.common
common.conn = FakeEcomConnection()
gb_size = 2
volume = {}
metadata = {'key': 'array',
'value': '12345'}
volume['volume_metadata'] = [metadata]
volume['host'] = 'HostX@Backend#Bronze+SRP_1+1234567891011'
external_ref = {'source-name': '0123'}
utils = self.driver.common.utils
utils.get_volume_size = mock.Mock(
return_value=int(gb_size * units.Gi))
utils.find_volume_by_device_id_on_array = mock.Mock(
return_value=None)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.common.manage_existing_get_size,
volume, external_ref)
def test_set_volume_replication_if_enabled(self):
common = self.driver.common
common.conn = FakeEcomConnection()
volume = {}
provider_location = {}
replication_status = 'replicated'
replication_driver_data = 'replication_data'
model_update = {}
model_update.update(
{'replication_status': replication_status})
model_update.update(
{'replication_driver_data': six.text_type(
replication_driver_data)})
extra_specs = self.data.extra_specs_is_re
common.setup_volume_replication = mock.Mock(
return_value=(replication_status, replication_driver_data))
new_model_update = common.set_volume_replication_if_enabled(
common.conn, extra_specs, volume, provider_location)
self.assertEqual(new_model_update, model_update)
@mock.patch.object(
common.VMAXCommon,
'set_volume_replication_if_enabled',
return_value={'replication_status': 'replicated',
'replication_driver_data': 'driver_data',
'display_name': 'vol1',
'provider_location':
VMAXCommonData.provider_location3})
@mock.patch.object(
utils.VMAXUtils,
'rename_volume',
return_value=VMAXCommonData.manage_vol)
@mock.patch.object(
utils.VMAXUtils,
'check_is_volume_in_cinder_managed_pool',
return_value=True)
@mock.patch.object(
utils.VMAXUtils,
'check_volume_not_replication_source',
return_value=True)
@mock.patch.object(
common.VMAXCommon,
'_get_pool_and_storage_system',
return_value=('cinder_pool', 'vmax_storage_system'))
@mock.patch.object(
utils.VMAXUtils,
'check_volume_not_in_masking_view',
return_value=True)
@mock.patch.object(
utils.VMAXUtils,
'find_volume_by_device_id_on_array',
return_value=VMAXCommonData.test_volume)
@mock.patch.object(
utils.VMAXUtils,
'check_volume_no_fast',
return_value=True)
@mock.patch.object(
utils.VMAXUtils,
'get_array_and_device_id',
return_value=('12345', '1'))
@mock.patch.object(
common.VMAXCommon,
'_get_ecom_connection',
return_value=FakeEcomConnection())
@mock.patch.object(
common.VMAXCommon,
'_initial_setup',
return_value=VMAXCommonData.extra_specs_is_re)
def test_manage_existing(self, mock_setup, mock_ecom, mock_ids,
mock_vol_fast, mock_vol_by_deviceId,
mock_vol_in_mv, mock_pool_sg, mock_vol_rep_src,
mock_vol_in_mng_pool, mock_rename_vol,
mock_set_vol_rep):
common = self.driver.common
volume = EMC_StorageVolume()
volume.name = 'vol1'
volume.display_name = 'vol1'
external_ref = {}
model_update = {
'replication_status': 'replicated',
'replication_driver_data': 'driver_data',
'display_name': 'vol1',
'provider_location': six.text_type(
self.data.provider_location_manage)}
new_model_update = common.manage_existing(volume,
external_ref)
self.assertEqual(model_update, new_model_update)
class VMAXProvisionTest(test.TestCase):
def setUp(self):

View File

@ -14,7 +14,6 @@
# under the License.
import ast
import math
import os.path
from oslo_config import cfg
@ -4636,84 +4635,34 @@ class VMAXCommon(object):
"""
extraSpecs = self._initial_setup(volume)
self.conn = self._get_ecom_connection()
arrayName, deviceId = self.utils.get_array_and_device_id(volume,
external_ref)
arrayName, deviceId = self.utils.get_array_and_device_id(
volume, external_ref)
self.utils.check_volume_no_fast(extraSpecs)
# Manage existing volume is not supported if fast enabled.
if extraSpecs[FASTPOLICY]:
LOG.warning(_LW(
"FAST is enabled. Policy: %(fastPolicyName)s."),
{'fastPolicyName': extraSpecs[FASTPOLICY]})
exceptionMessage = (_(
"Manage volume is not supported if FAST is enable. "
"FAST policy: %(fastPolicyName)s.")
% {'fastPolicyName': extraSpecs[FASTPOLICY]})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
# Check if the volume is attached by checking if in any masking view.
volumeInstanceName = (
self.utils.find_volume_by_device_id_on_array(self.conn,
arrayName, deviceId))
sgInstanceNames = (
self.utils.get_storage_groups_from_volume(
self.conn, volumeInstanceName))
self.utils.find_volume_by_device_id_on_array(
arrayName, deviceId))
for sgInstanceName in sgInstanceNames:
mvInstanceNames = (
self.masking.get_masking_view_from_storage_group(
self.conn, sgInstanceName))
for mvInstanceName in mvInstanceNames:
exceptionMessage = (_(
"Unable to import volume %(deviceId)s to cinder. "
"Volume is in masking view %(mv)s.")
% {'deviceId': deviceId,
'mv': mvInstanceName})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
self.utils.check_volume_not_in_masking_view(
self.conn, volumeInstanceName, deviceId)
# Check if there is any associated snapshots with the volume.
cinderPoolInstanceName, storageSystemName = (
self._get_pool_and_storage_system(extraSpecs))
repSessionInstanceName = (
self.utils.get_associated_replication_from_source_volume(
self.conn, storageSystemName, deviceId))
if repSessionInstanceName:
exceptionMessage = (_(
"Unable to import volume %(deviceId)s to cinder. "
"It is the source volume of replication session %(sync)s.")
% {'deviceId': deviceId,
'sync': repSessionInstanceName})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
# Make sure the existing external volume is in the same storage pool.
volumePoolInstanceName = (
self.utils.get_assoc_pool_from_volume(self.conn,
volumeInstanceName))
volumePoolName = volumePoolInstanceName['InstanceID']
cinderPoolName = cinderPoolInstanceName['InstanceID']
LOG.debug("Storage pool of existing volume: %(volPool)s, "
"Storage pool currently managed by cinder: %(cinderPool)s.",
{'volPool': volumePoolName,
'cinderPool': cinderPoolName})
if volumePoolName != cinderPoolName:
exceptionMessage = (_(
"Unable to import volume %(deviceId)s to cinder. The external "
"volume is not in the pool managed by current cinder host.")
% {'deviceId': deviceId})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
self.utils.check_volume_not_replication_source(
self.conn, storageSystemName, deviceId)
# Rename the volume
volumeId = volume['id']
self.utils.check_is_volume_in_cinder_managed_pool(
self.conn, volumeInstanceName, cinderPoolInstanceName,
deviceId)
volumeId = volume.name
volumeElementName = self.utils.get_volume_element_name(volumeId)
LOG.debug("Rename volume %(vol)s to %(elementName)s.",
LOG.debug("Rename volume %(vol)s to %(volumeId)s.",
{'vol': volumeInstanceName,
'elementName': volumeElementName})
'volumeId': volumeElementName})
volumeInstance = self.utils.rename_volume(self.conn,
volumeInstanceName,
volumeElementName)
@ -4724,25 +4673,44 @@ class VMAXCommon(object):
keys['DeviceID'] = volpath['DeviceID']
keys['SystemCreationClassName'] = volpath['SystemCreationClassName']
model_update = {}
provider_location = {}
provider_location['classname'] = volpath['CreationClassName']
provider_location['keybindings'] = keys
# set-up volume replication, if enabled
model_update = self.set_volume_replication_if_enabled(
self.conn, extraSpecs, volume, provider_location)
volumeDisplayName = volume.display_name
model_update.update(
{'display_name': volumeDisplayName})
model_update.update(
{'provider_location': six.text_type(provider_location)})
return model_update
def set_volume_replication_if_enabled(self, conn, extraSpecs,
volume, provider_location):
"""Set volume replication if enabled
If volume replication is enabled, set relevant
values in associated model_update dict.
:param conn: connection to the ecom server
:param extraSpecs: additional info
:param volume: the volume object
:param provider_location: volume classname & keybindings
:return: updated model_update
"""
model_update = {}
if self.utils.is_replication_enabled(extraSpecs):
replication_status, replication_driver_data = (
self.setup_volume_replication(
self.conn, volume, provider_location, extraSpecs))
conn, volume, provider_location, extraSpecs))
model_update.update(
{'replication_status': replication_status})
model_update.update(
{'replication_driver_data': six.text_type(
replication_driver_data)})
model_update.update({'display_name': volumeElementName})
model_update.update(
{'provider_location': six.text_type(provider_location)})
return model_update
def manage_existing_get_size(self, volume, external_ref):
@ -4758,15 +4726,25 @@ class VMAXCommon(object):
arrayName, deviceId = self.utils.get_array_and_device_id(volume,
external_ref)
volumeInstanceName = (
self.utils.find_volume_by_device_id_on_array(self.conn,
arrayName, deviceId))
volumeInstance = self.conn.GetInstance(volumeInstanceName)
byteSize = self.utils.get_volume_size(self.conn, volumeInstance)
gbSize = int(math.ceil(float(byteSize) / units.Gi))
self.utils.find_volume_by_device_id_on_array(arrayName, deviceId))
try:
volumeInstance = self.conn.GetInstance(volumeInstanceName)
byteSize = self.utils.get_volume_size(self.conn, volumeInstance)
fByteSize = float(byteSize)
gbSize = int(fByteSize / units.Gi)
except Exception:
exceptionMessage = (_("Volume %(deviceID)s not found.")
% {'deviceID': deviceId})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
LOG.debug(
"Size of volume %(deviceID)s is %(volumeSize)s GB.",
"Size of volume %(deviceID)s is %(volumeSize)s GB",
{'deviceID': deviceId,
'volumeSize': gbSize})
return gbSize
def unmanage(self, volume):
@ -4793,9 +4771,7 @@ class VMAXCommon(object):
raise exception.VolumeBackendAPIException(data=exceptionMessage)
# Rename the volume to volumeId, thus remove the 'OS-' prefix.
volumeInstance = self.utils.rename_volume(self.conn,
volumeInstance,
volumeId)
self.utils.rename_volume(self.conn, volumeInstance, volumeId)
def update_consistencygroup(self, group, add_volumes,
remove_volumes):

View File

@ -799,6 +799,61 @@ class VMAXUtils(object):
foundPoolInstanceName = foundPoolInstanceNames[0]
return foundPoolInstanceName
def get_assoc_v2_pool_from_volume(self, conn, volumeInstanceName):
"""Give the volume instance get the associated pool instance.
:param conn: connection to the ecom server
:param volumeInstanceName: the volume instance name
:returns: foundPoolInstanceName
"""
foundPoolInstanceName = None
foundPoolInstanceNames = (
conn.AssociatorNames(volumeInstanceName,
ResultClass='EMC_VirtualProvisioningPool'))
if not foundPoolInstanceNames:
deviceID = volumeInstanceName['DeviceID']
LOG.debug("Volume %(deviceId)s not in V2 pool",
{'deviceId': deviceID})
else:
LOG.debug("Retrieved pool: %(foundPoolInstanceNames)s",
{'foundPoolInstanceNames': foundPoolInstanceNames})
if foundPoolInstanceNames and len(foundPoolInstanceNames) > 0:
foundPoolInstanceName = foundPoolInstanceNames[0]
return foundPoolInstanceName
def get_assoc_v3_pool_from_volume(self, conn, volumeInstanceName):
"""Give the volume instance get the associated pool instance.
:param conn: connection to the ecom server
:param volumeInstanceName: the volume instance name
:returns: foundPoolInstanceName
"""
foundPoolInstanceName = None
foundPoolInstanceNames = (
conn.AssociatorNames(volumeInstanceName,
ResultClass='Symm_SRPStoragePool'))
if not foundPoolInstanceNames:
deviceID = volumeInstanceName['DeviceID']
LOG.debug("Volume %(deviceId)s not in V3 SRP",
{'deviceId': deviceID})
exceptionMessage = ("Unable to locate volume %(deviceId)s",
{'deviceId': deviceID})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
else:
LOG.debug("Retrieved pool: %(foundPoolInstanceNames)s",
{'foundPoolInstanceNames': foundPoolInstanceNames})
if foundPoolInstanceNames and len(foundPoolInstanceNames) > 0:
foundPoolInstanceName = foundPoolInstanceNames[0]
return foundPoolInstanceName
def check_if_volume_is_extendable(self, conn, volumeInstance):
"""Checks if a volume is extendable or not.
@ -2156,32 +2211,153 @@ class VMAXUtils(object):
myList.append(kwargs)
return myList
def find_volume_by_device_id_on_array(self, conn, storageSystem, deviceID):
def find_volume_by_device_id_on_array(self, storageSystem, deviceID):
"""Find the volume by device ID on a specific array.
:param conn: connection to the ecom server
:param storageSystem: the storage system name
:param deviceID: string value of the volume device ID
:returns: foundVolumeInstanceName
"""
foundVolumeInstanceName = None
volumeInstanceNames = conn.EnumerateInstanceNames(
'CIM_StorageVolume')
for volumeInstanceName in volumeInstanceNames:
if storageSystem not in volumeInstanceName['SystemName']:
continue
if deviceID == volumeInstanceName['DeviceID']:
foundVolumeInstanceName = volumeInstanceName
LOG.debug("Found volume: %(vol)s",
{'vol': foundVolumeInstanceName})
break
if foundVolumeInstanceName is None:
exceptionMessage = (_("Volume %(deviceID)s not found.")
% {'deviceID': deviceID})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
systemName = 'SYMMETRIX-+-%s' % storageSystem
bindings = {'CreationClassName': 'Symm_StorageVolume',
'SystemName': systemName,
'DeviceID': deviceID,
'SystemCreationClassName': 'Symm_StorageSystem'}
return foundVolumeInstanceName
instanceName = pywbem.CIMInstanceName(
classname='Symm_StorageVolume',
namespace=EMC_ROOT,
keybindings=bindings)
LOG.debug("Retrieved volume from VMAX: %(instanceName)s",
{'instanceName': instanceName})
return instanceName
def check_volume_no_fast(self, extraSpecs):
"""Check if the volume's extraSpecs indicate FAST is enabled.
:param extraSpecs: dict -- extra spec dict
:return: True if not fast
:raises: VolumeBackendAPIException
"""
try:
if extraSpecs['storagetype:fastpolicy'] is not None:
LOG.warning(_LW(
"FAST is enabled. Policy: %(fastPolicyName)s."),
{'fastPolicyName': extraSpecs['storagetype:fastpolicy']})
exceptionMessage = (_(
"Manage volume is not supported if FAST is enabled. "
"FAST policy: %(fastPolicyName)s."
) % {'fastPolicyName': extraSpecs[
'storagetype:fastpolicy']})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
else:
return True
except KeyError:
return True
def check_volume_not_in_masking_view(self, conn, volumeInstanceName,
deviceId):
"""Check if volume is in Masking View.
:param conn: connection to the ecom server
:param volumeInstanceName: the volume instance name
:param deviceId: string value of the volume device ID
:raises: VolumeBackendAPIException
:return: True if not in Masking View
"""
sgInstanceNames = (
self.get_storage_groups_from_volume(
conn, volumeInstanceName))
mvInstanceName = None
for sgInstanceName in sgInstanceNames:
maskingViews = conn.AssociatorNames(
sgInstanceName,
ResultClass='Symm_LunMaskingView')
if len(maskingViews) > 0:
mvInstanceName = maskingViews[0]
if mvInstanceName:
exceptionMessage = (_(
"Unable to import volume %(deviceId)s to cinder. "
"Volume is in masking view %(mv)s.")
% {'deviceId': deviceId, 'mv': mvInstanceName})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
if not mvInstanceName:
return True
def check_volume_not_replication_source(self, conn, storageSystemName,
deviceId):
"""Check volume not replication source.
Check if the volume is the source of a replicated
volume.
:param conn: connection to the ecom server
:param storageSystemName: the storage system name
:param deviceId: string value of the volume device ID
:raises: VolumeBackendAPIException
:returns: True if not replication source
"""
repSessionInstanceName = (
self.get_associated_replication_from_source_volume(
conn, storageSystemName, deviceId))
if repSessionInstanceName:
exceptionMessage = (_(
"Unable to import volume %(deviceId)s to cinder. "
"It is the source volume of replication session %(sync)s.")
% {'deviceId': deviceId, 'sync': repSessionInstanceName})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
else:
return True
def check_is_volume_in_cinder_managed_pool(
self, conn, volumeInstanceName, cinderPoolInstanceName,
deviceId):
"""Check if volume is in a Cinder managed pool.
:param conn: connection to the ecom server
:param volumeInstanceName: the volume instance name
:param cinderPoolInstanceName: the name of the storage pool
:param deviceId: string value of the volume device ID
:raises: VolumeBackendAPIException
:returns: True if volume in cinder managed pool
"""
volumePoolInstanceName = (
self.get_assoc_v2_pool_from_volume(conn,
volumeInstanceName))
if not volumePoolInstanceName:
volumePoolInstanceName = (
self.get_assoc_v3_pool_from_volume(conn,
volumeInstanceName))
volumePoolName = volumePoolInstanceName['InstanceID']
cinderPoolName = cinderPoolInstanceName['InstanceID']
LOG.debug("Storage pool of existing volume: %(volPool)s, "
"Storage pool currently managed by cinder: %(cinderPool)s.",
{'volPool': volumePoolName,
'cinderPool': cinderPoolName})
if volumePoolName != cinderPoolName:
exceptionMessage = (_(
"Unable to import volume %(deviceId)s to cinder. The external "
"volume is not in the pool managed by current cinder host.")
% {'deviceId': deviceId})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
else:
return True
def get_volume_element_name(self, volumeId):
"""Get volume element name follows naming convention, i.e. 'OS-UUID'.
@ -2214,6 +2390,7 @@ class VMAXUtils(object):
"""
if type(volume) is pywbem.cim_obj.CIMInstance:
volumeInstance = volume
volumeInstance['ElementName'] = newName
else:
volumeInstance = conn.GetInstance(volume)
volumeInstance['ElementName'] = newName
@ -2225,30 +2402,32 @@ class VMAXUtils(object):
return volumeInstance
def get_array_and_device_id(self, volume, external_ref):
@staticmethod
def get_array_and_device_id(volume, external_ref):
"""Helper function for manage volume to get array name and device ID.
:param volume: volume object from API
:param external_ref: the existing volume object to be manged
:returns: string value of the array name and device ID
"""
deviceId = external_ref.get(u'source-name', None)
arrayName = ''
for metadata in volume['volume_metadata']:
if metadata['key'].lower() == 'array':
arrayName = metadata['value']
break
device_id = external_ref.get(u'source-name', None)
LOG.debug("External_ref: %(er)s", {'er': external_ref})
if not device_id:
device_id = external_ref.get(u'source-id', None)
host = volume['host']
host_list = host.split('+')
array = host_list[(len(host_list) - 1)]
if deviceId:
if device_id:
LOG.debug("Get device ID of existing volume - device ID: "
"%(deviceId)s, Array: %(arrayName)s.",
{'deviceId': deviceId,
'arrayName': arrayName})
"%(device_id)s, Array: %(array)s.",
{'device_id': device_id,
'array': array})
else:
exception_message = (_("Source volume device ID is required."))
raise exception.VolumeBackendAPIException(
data=exception_message)
return (arrayName, deviceId)
return array, device_id
def get_associated_replication_from_source_volume(
self, conn, storageSystem, sourceDeviceId):