Fixing pending comments for CoprHD Drivers

There were few comments on the
day of the merge of the patch set https://review.openstack.org/#/c/317643,
this patch set fixes all those comments made by Michal and Walter.

Change-Id: I6f6807fa12dbb495cc263cfade69bbe0b9498392
This commit is contained in:
hallur, parashuram 2016-07-14 18:46:04 +05:30
parent b766225d42
commit 8d3dada903
9 changed files with 202 additions and 283 deletions

View File

@ -351,7 +351,6 @@ class MockedEMCCoprHDDriverCommon(coprhd_common.EMCCoprHDDriverCommon):
self.host_obj = Mock()
self.host_obj.list_by_tenant.return_value = []
self.host_obj.search_by_name.return_value = []
self.host_obj.list_all.return_value = [{'id': "host1_id",
'name': "host1"}]
self.host_obj.list_initiators.return_value = [
@ -503,8 +502,8 @@ class EMCCoprHDISCSIDriverTest(test.TestCase):
cg_data = get_test_CG_data(self.volume_type_id)
ctx = context.get_admin_context()
self.driver.create_consistencygroup(ctx, cg_data)
model_update, volumes_model_update = \
self.driver.delete_consistencygroup(ctx, cg_data, [])
model_update, volumes_model_update = (
self.driver.delete_consistencygroup(ctx, cg_data, []))
self.assertEqual([], volumes_model_update, 'Unexpected return data')
def test_create_update_delete_CG(self):
@ -515,14 +514,14 @@ class EMCCoprHDISCSIDriverTest(test.TestCase):
volume = get_test_volume_data(self.volume_type_id)
self.driver.create_volume(volume)
model_update, ret1, ret2 = \
self.driver.update_consistencygroup(ctx, cg_data, [volume], [])
model_update, ret1, ret2 = (
self.driver.update_consistencygroup(ctx, cg_data, [volume], []))
self.assertEqual({'status': fields.ConsistencyGroupStatus.AVAILABLE},
model_update)
model_update, volumes_model_update = \
self.driver.delete_consistencygroup(ctx, cg_data, [volume])
model_update, volumes_model_update = (
self.driver.delete_consistencygroup(ctx, cg_data, [volume]))
self.assertEqual({'status': fields.ConsistencyGroupStatus.AVAILABLE},
model_update)
self.assertEqual([{'status': 'deleted', 'id': '1'}],
@ -532,14 +531,14 @@ class EMCCoprHDISCSIDriverTest(test.TestCase):
cg_snap_data = get_test_CG_snap_data(self.volume_type_id)
ctx = context.get_admin_context()
model_update, snapshots_model_update = \
self.driver.create_cgsnapshot(ctx, cg_snap_data, [])
model_update, snapshots_model_update = (
self.driver.create_cgsnapshot(ctx, cg_snap_data, []))
self.assertEqual({'status': fields.ConsistencyGroupStatus.AVAILABLE},
model_update)
self.assertEqual([], snapshots_model_update, 'Unexpected return data')
model_update, snapshots_model_update = \
self.driver.delete_cgsnapshot(ctx, cg_snap_data, [])
model_update, snapshots_model_update = (
self.driver.delete_cgsnapshot(ctx, cg_snap_data, []))
self.assertEqual({}, model_update, 'Unexpected return data')
self.assertEqual([], snapshots_model_update, 'Unexpected return data')
@ -691,8 +690,8 @@ class EMCCoprHDFCDriverTest(test.TestCase):
cg_data = get_test_CG_data(self.volume_type_id)
ctx = context.get_admin_context()
self.driver.create_consistencygroup(ctx, cg_data)
model_update, volumes_model_update = \
self.driver.delete_consistencygroup(ctx, cg_data, [])
model_update, volumes_model_update = (
self.driver.delete_consistencygroup(ctx, cg_data, []))
self.assertEqual([], volumes_model_update, 'Unexpected return data')
def test_create_update_delete_CG(self):
@ -703,14 +702,14 @@ class EMCCoprHDFCDriverTest(test.TestCase):
volume = get_test_volume_data(self.volume_type_id)
self.driver.create_volume(volume)
model_update, ret1, ret2 = \
self.driver.update_consistencygroup(ctx, cg_data, [volume], [])
model_update, ret1, ret2 = (
self.driver.update_consistencygroup(ctx, cg_data, [volume], []))
self.assertEqual({'status': fields.ConsistencyGroupStatus.AVAILABLE},
model_update)
model_update, volumes_model_update = \
self.driver.delete_consistencygroup(ctx, cg_data, [volume])
model_update, volumes_model_update = (
self.driver.delete_consistencygroup(ctx, cg_data, [volume]))
self.assertEqual({'status': fields.ConsistencyGroupStatus.AVAILABLE},
model_update)
self.assertEqual([{'status': 'deleted', 'id': '1'}],
@ -720,14 +719,14 @@ class EMCCoprHDFCDriverTest(test.TestCase):
cg_snap_data = get_test_CG_snap_data(self.volume_type_id)
ctx = context.get_admin_context()
model_update, snapshots_model_update = \
self.driver.create_cgsnapshot(ctx, cg_snap_data, [])
model_update, snapshots_model_update = (
self.driver.create_cgsnapshot(ctx, cg_snap_data, []))
self.assertEqual({'status': fields.ConsistencyGroupStatus.AVAILABLE},
model_update)
self.assertEqual([], snapshots_model_update, 'Unexpected return data')
model_update, snapshots_model_update = \
self.driver.delete_cgsnapshot(ctx, cg_snap_data, [])
model_update, snapshots_model_update = (
self.driver.delete_cgsnapshot(ctx, cg_snap_data, []))
self.assertEqual({}, model_update, 'Unexpected return data')
self.assertEqual([], snapshots_model_update, 'Unexpected return data')
@ -749,7 +748,7 @@ class EMCCoprHDScaleIODriverTest(test.TestCase):
self.configuration.coprhd_tenant = "tenant"
self.configuration.coprhd_project = "project"
self.configuration.coprhd_varray = "varray"
self.configuration.coprhd_scaleio_rest_gateway_ip = "10.10.10.11"
self.configuration.coprhd_scaleio_rest_gateway_host = "10.10.10.11"
self.configuration.coprhd_scaleio_rest_gateway_port = 443
self.configuration.coprhd_scaleio_rest_server_username = (
"scaleio_username")
@ -878,8 +877,8 @@ class EMCCoprHDScaleIODriverTest(test.TestCase):
cg_data = get_test_CG_data(self.volume_type_id)
ctx = context.get_admin_context()
self.driver.create_consistencygroup(ctx, cg_data)
model_update, volumes_model_update = \
self.driver.delete_consistencygroup(ctx, cg_data, [])
model_update, volumes_model_update = (
self.driver.delete_consistencygroup(ctx, cg_data, []))
self.assertEqual([], volumes_model_update, 'Unexpected return data')
def test_create_update_delete_CG(self):
@ -890,14 +889,14 @@ class EMCCoprHDScaleIODriverTest(test.TestCase):
volume = get_test_volume_data(self.volume_type_id)
self.driver.create_volume(volume)
model_update, ret1, ret2 = \
self.driver.update_consistencygroup(ctx, cg_data, [volume], [])
model_update, ret1, ret2 = (
self.driver.update_consistencygroup(ctx, cg_data, [volume], []))
self.assertEqual({'status': fields.ConsistencyGroupStatus.AVAILABLE},
model_update)
model_update, volumes_model_update = \
self.driver.delete_consistencygroup(ctx, cg_data, [volume])
model_update, volumes_model_update = (
self.driver.delete_consistencygroup(ctx, cg_data, [volume]))
self.assertEqual({'status': fields.ConsistencyGroupStatus.AVAILABLE},
model_update)
self.assertEqual([{'status': 'deleted', 'id': '1'}],
@ -907,13 +906,13 @@ class EMCCoprHDScaleIODriverTest(test.TestCase):
cg_snap_data = get_test_CG_snap_data(self.volume_type_id)
ctx = context.get_admin_context()
model_update, snapshots_model_update = \
self.driver.create_cgsnapshot(ctx, cg_snap_data, [])
model_update, snapshots_model_update = (
self.driver.create_cgsnapshot(ctx, cg_snap_data, []))
self.assertEqual({'status': fields.ConsistencyGroupStatus.AVAILABLE},
model_update)
self.assertEqual([], snapshots_model_update, 'Unexpected return data')
model_update, snapshots_model_update = \
self.driver.delete_cgsnapshot(ctx, cg_snap_data, [])
model_update, snapshots_model_update = (
self.driver.delete_cgsnapshot(ctx, cg_snap_data, []))
self.assertEqual({}, model_update, 'Unexpected return data')
self.assertEqual([], snapshots_model_update, 'Unexpected return data')

View File

@ -150,7 +150,7 @@ class EMCCoprHDDriverCommon(object):
coprhd_utils.AUTH_TOKEN = None
# instantiate a few coprhd api objects for later use
# instantiate coprhd api objects for later use
self.volume_obj = coprhd_vol.Volume(
self.configuration.coprhd_hostname,
self.configuration.coprhd_port)
@ -233,7 +233,6 @@ class EMCCoprHDDriverCommon(object):
self.vpool = vpool['CoprHD:VPOOL']
try:
cgid = None
coprhd_cgid = None
try:
cgid = vol['consistencygroup_id']
@ -270,14 +269,14 @@ class EMCCoprHDDriverCommon(object):
self.configuration.coprhd_project,
self.configuration.coprhd_tenant)
cgUri = self.consistencygroup_obj.consistencygroup_query(
cg_uri = self.consistencygroup_obj.consistencygroup_query(
name,
self.configuration.coprhd_project,
self.configuration.coprhd_tenant)
self.set_tags_for_resource(
coprhd_cg.ConsistencyGroup.URI_CONSISTENCY_GROUP_TAGS,
cgUri, group)
cg_uri, group)
except coprhd_utils.CoprHdError as e:
coprhd_err_msg = (_("Consistency Group %(name)s:"
@ -423,17 +422,17 @@ class EMCCoprHDDriverCommon(object):
if not rslt:
continue
volUri = rslt[0]
vol_uri = rslt[0]
snapshots_of_volume = self.snapshot_obj.snapshot_list_uri(
'block',
'volumes',
volUri)
vol_uri)
for snapUri in snapshots_of_volume:
snapshot_obj = self.snapshot_obj.snapshot_show_uri(
'block',
volUri,
vol_uri,
snapUri['id'])
if not coprhd_utils.get_node_value(snapshot_obj,
@ -554,9 +553,9 @@ class EMCCoprHDDriverCommon(object):
log_err_msg)
@retry_wrapper
def set_volume_tags(self, vol, exemptTags=None, truncate_name=False):
if exemptTags is None:
exemptTags = []
def set_volume_tags(self, vol, exempt_tags=None, truncate_name=False):
if exempt_tags is None:
exempt_tags = []
self.authenticate_user()
name = self._get_resource_name(vol, truncate_name)
@ -568,19 +567,19 @@ class EMCCoprHDDriverCommon(object):
name)
self.set_tags_for_resource(
coprhd_vol.Volume.URI_TAG_VOLUME, vol_uri, vol, exemptTags)
coprhd_vol.Volume.URI_TAG_VOLUME, vol_uri, vol, exempt_tags)
@retry_wrapper
def set_tags_for_resource(self, uri, resourceId, resource,
exemptTags=None):
if exemptTags is None:
exemptTags = []
def set_tags_for_resource(self, uri, resource_id, resource,
exempt_tags=None):
if exempt_tags is None:
exempt_tags = []
self.authenticate_user()
# first, get the current tags that start with the OPENSTACK_TAG
# eyecatcher
formattedUri = uri.format(resourceId)
formattedUri = uri.format(resource_id)
remove_tags = []
currentTags = self.tag_obj.list_tags(formattedUri)
for cTag in currentTags:
@ -590,7 +589,7 @@ class EMCCoprHDDriverCommon(object):
try:
if remove_tags:
self.tag_obj.tag_resource(uri,
resourceId,
resource_id,
None,
remove_tags)
except coprhd_utils.CoprHdError as e:
@ -604,7 +603,7 @@ class EMCCoprHDDriverCommon(object):
try:
for prop, value in vars(resource).items():
try:
if prop in exemptTags:
if prop in exempt_tags:
continue
if prop.startswith("_"):
@ -627,7 +626,7 @@ class EMCCoprHDDriverCommon(object):
try:
self.tag_obj.tag_resource(
uri,
resourceId,
resource_id,
add_tags,
None)
except coprhd_utils.CoprHdError as e:
@ -653,12 +652,12 @@ class EMCCoprHDDriverCommon(object):
except KeyError as e:
pass
try:
(storageresType,
storageresTypename) = self.volume_obj.get_storageAttributes(
(storageres_type,
storageres_typename) = self.volume_obj.get_storageAttributes(
srcname, None, None)
resource_id = self.volume_obj.storage_resource_query(
storageresType,
storageres_type,
srcname,
None,
None,
@ -683,7 +682,7 @@ class EMCCoprHDDriverCommon(object):
self.volume_obj.volume_clone_detach(
"", full_project_name, name, True)
except IndexError as e:
except IndexError:
LOG.exception(_LE("Volume clone detach returned empty task list"))
except coprhd_utils.CoprHdError as e:
@ -761,12 +760,12 @@ class EMCCoprHDDriverCommon(object):
src_snapshot_name = self._get_coprhd_snapshot_name(
snapshot, coprhd_vol_info['volume_uri'])
(storageresType,
storageresTypename) = self.volume_obj.get_storageAttributes(
(storageres_type,
storageres_typename) = self.volume_obj.get_storageAttributes(
coprhd_vol_info['volume_name'], None, src_snapshot_name)
resource_id = self.volume_obj.storage_resource_query(
storageresType,
storageres_type,
coprhd_vol_info['volume_name'],
None,
src_snapshot_name,
@ -861,10 +860,10 @@ class EMCCoprHDDriverCommon(object):
volumename = self._get_coprhd_volume_name(vol)
projectname = self.configuration.coprhd_project
tenantname = self.configuration.coprhd_tenant
storageresType = 'block'
storageresTypename = 'volumes'
resourceUri = self.snapshot_obj.storage_resource_query(
storageresType,
storageres_type = 'block'
storageres_typename = 'volumes'
resource_uri = self.snapshot_obj.storage_resource_query(
storageres_type,
volume_name=volumename,
cg_name=None,
project=projectname,
@ -872,22 +871,22 @@ class EMCCoprHDDriverCommon(object):
inactive = False
sync = True
self.snapshot_obj.snapshot_create(
storageresType,
storageresTypename,
resourceUri,
storageres_type,
storageres_typename,
resource_uri,
snapshotname,
inactive,
sync)
snapshotUri = self.snapshot_obj.snapshot_query(
storageresType,
storageresTypename,
resourceUri,
snapshot_uri = self.snapshot_obj.snapshot_query(
storageres_type,
storageres_typename,
resource_uri,
snapshotname)
self.set_tags_for_resource(
coprhd_snap.Snapshot.URI_BLOCK_SNAPSHOTS_TAG,
snapshotUri, snapshot, ['_volume'])
snapshot_uri, snapshot, ['_volume'])
except coprhd_utils.CoprHdError as e:
coprhd_err_msg = (_("Snapshot: %(snapshotname)s, create failed"
@ -922,27 +921,27 @@ class EMCCoprHDDriverCommon(object):
volumename = self._get_coprhd_volume_name(vol)
projectname = self.configuration.coprhd_project
tenantname = self.configuration.coprhd_tenant
storageresType = 'block'
storageresTypename = 'volumes'
resourceUri = self.snapshot_obj.storage_resource_query(
storageresType,
storageres_type = 'block'
storageres_typename = 'volumes'
resource_uri = self.snapshot_obj.storage_resource_query(
storageres_type,
volume_name=volumename,
cg_name=None,
project=projectname,
tenant=tenantname)
if resourceUri is None:
if resource_uri is None:
LOG.info(_LI(
"Snapshot %s"
" is not found; snapshot deletion"
" is considered successful."), snapshotname)
else:
snapshotname = self._get_coprhd_snapshot_name(
snapshot, resourceUri)
snapshot, resource_uri)
self.snapshot_obj.snapshot_delete(
storageresType,
storageresTypename,
resourceUri,
storageres_type,
storageres_typename,
resource_uri,
snapshotname,
sync=True)
except coprhd_utils.CoprHdError as e:
@ -954,21 +953,21 @@ class EMCCoprHDDriverCommon(object):
log_err_msg)
@retry_wrapper
def initialize_connection(self, volume, protocol, initiatorPorts,
def initialize_connection(self, volume, protocol, initiator_ports,
hostname):
try:
self.authenticate_user()
volumename = self._get_coprhd_volume_name(volume)
foundgroupname = self._find_exportgroup(initiatorPorts)
foundgroupname = self._find_exportgroup(initiator_ports)
foundhostname = None
if foundgroupname is None:
for i in range(len(initiatorPorts)):
for i in range(len(initiator_ports)):
# check if this initiator is contained in any CoprHD Host
# object
LOG.debug(
"checking for initiator port: %s", initiatorPorts[i])
foundhostname = self._find_host(initiatorPorts[i])
"checking for initiator port: %s", initiator_ports[i])
foundhostname = self._find_host(initiator_ports[i])
if foundhostname:
LOG.info(_LI("Found host %s"), foundhostname)
@ -1006,7 +1005,7 @@ class EMCCoprHDDriverCommon(object):
None,
None)
return self._find_device_info(volume, initiatorPorts)
return self._find_device_info(volume, initiator_ports)
except coprhd_utils.CoprHdError as e:
raise coprhd_utils.CoprHdError(
@ -1017,12 +1016,12 @@ class EMCCoprHDDriverCommon(object):
{'name': self._get_coprhd_volume_name(
volume),
'hostname': hostname,
'initiatorport': initiatorPorts[0],
'initiatorport': initiator_ports[0],
'err': six.text_type(e.msg)})
)
@retry_wrapper
def terminate_connection(self, volume, protocol, initiatorPorts,
def terminate_connection(self, volume, protocol, initiator_ports,
hostname):
try:
self.authenticate_user()
@ -1038,7 +1037,7 @@ class EMCCoprHDDriverCommon(object):
itls = exports['itl']
for itl in itls:
itl_port = itl['initiator']['port']
if itl_port in initiatorPorts:
if itl_port in initiator_ports:
exportgroups.add(itl['export']['id'])
for exportgroup in exportgroups:
@ -1159,11 +1158,11 @@ class EMCCoprHDDriverCommon(object):
self.configuration.coprhd_port)
if len(rslt) > 0:
rsltCg = self.consistencygroup_obj.show(
rslt_cg = self.consistencygroup_obj.show(
rslt[0],
self.configuration.coprhd_project,
self.configuration.coprhd_tenant)
return rsltCg['id']
return rslt_cg['id']
else:
raise coprhd_utils.CoprHdError(
coprhd_utils.CoprHdError.NOT_FOUND_ERR,
@ -1193,11 +1192,11 @@ class EMCCoprHDDriverCommon(object):
if rslt is None or len(rslt) == 0:
return snapshot['name']
else:
rsltSnap = self.snapshot_obj.snapshot_show_uri(
rslt_snap = self.snapshot_obj.snapshot_show_uri(
'block',
resUri,
rslt[0])
return rsltSnap['name']
return rslt_snap['name']
def _get_coprhd_volume_name(self, vol, verbose=False):
tagname = self.OPENSTACK_TAG + ":id:" + vol['id']
@ -1217,12 +1216,12 @@ class EMCCoprHDDriverCommon(object):
self.configuration.coprhd_port)
if len(rslt) > 0:
rsltVol = self.volume_obj.show_by_uri(rslt[0])
rslt_vol = self.volume_obj.show_by_uri(rslt[0])
if verbose is True:
return {'volume_name': rsltVol['name'], 'volume_uri': rslt[0]}
return {'volume_name': rslt_vol['name'], 'volume_uri': rslt[0]}
else:
return rsltVol['name']
return rslt_vol['name']
else:
raise coprhd_utils.CoprHdError(
coprhd_utils.CoprHdError.NOT_FOUND_ERR,
@ -1337,22 +1336,6 @@ class EMCCoprHDDriverCommon(object):
return foundhostname
@retry_wrapper
def _host_exists(self, host_name):
"""Check if Host object with given hostname already exists in CoprHD.
"""
hosts = self.host_obj.search_by_name(host_name)
if len(hosts) > 0:
for host in hosts:
hostname = host['match']
if host_name == hostname:
return hostname
return hostname
LOG.debug("no host found for: %s", host_name)
return None
@retry_wrapper
def get_exports_count_by_initiators(self, initiator_ports):
"""Fetches ITL map for a given list of initiator ports."""

View File

@ -23,7 +23,7 @@ from oslo_log import log as logging
from cinder import interface
from cinder.volume import driver
from cinder.volume.drivers.coprhd import common as coprhd_common
from cinder.zonemanager import utils
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
@ -31,6 +31,7 @@ LOG = logging.getLogger(__name__)
@interface.volumedriver
class EMCCoprHDFCDriver(driver.FibreChannelDriver):
"""CoprHD FC Driver."""
VERSION = "3.0.0.0"
def __init__(self, *args, **kwargs):
super(EMCCoprHDFCDriver, self).__init__(*args, **kwargs)
@ -92,8 +93,8 @@ class EMCCoprHDFCDriver(driver.FibreChannelDriver):
"""Creates a consistencygroup."""
return self.common.create_consistencygroup(context, group)
def update_consistencygroup(self, context, group, add_volumes,
remove_volumes):
def update_consistencygroup(self, context, group, add_volumes=None,
remove_volumes=None):
"""Updates volumes in consistency group."""
return self.common.update_consistencygroup(group, add_volumes,
remove_volumes)
@ -114,7 +115,7 @@ class EMCCoprHDFCDriver(driver.FibreChannelDriver):
"""Make sure volume is exported."""
pass
@utils.AddFCZone
@fczm_utils.AddFCZone
def initialize_connection(self, volume, connector):
"""Initializes the connection and returns connection info."""
@ -150,10 +151,10 @@ class EMCCoprHDFCDriver(driver.FibreChannelDriver):
LOG.debug('FC properties: %s', properties)
return {
'driver_volume_type': 'fibre_channel',
'data': properties
'data': properties,
}
@utils.RemoveFCZone
@fczm_utils.RemoveFCZone
def terminate_connection(self, volume, connector, **kwargs):
"""Driver entry point to detach a volume from an instance."""

View File

@ -21,9 +21,9 @@ except ImportError:
import json
import re
import socket
import threading
import oslo_serialization
from oslo_utils import timeutils
from oslo_utils import units
import requests
from requests import exceptions
@ -31,20 +31,20 @@ import six
from cinder import exception
from cinder.i18n import _
from cinder.volume.drivers.coprhd.helpers.urihelper import (
singletonURIHelperInstance)
from cinder.volume.drivers.coprhd.helpers import urihelper
PROD_NAME = 'storageos'
TIMEOUT_SEC = 20 # 20 SECONDS
IS_TASK_TIMEOUT = False
global AUTH_TOKEN
AUTH_TOKEN = None
TASK_TIMEOUT = 300
URI_TASKS_BY_OPID = '/vdc/tasks/{0}'
def _decode_list(data):
rv = []
@ -77,7 +77,6 @@ def _decode_dict(data):
def json_decode(rsp):
"""Used to decode the JSON encoded response."""
o = ""
try:
o = json.loads(rsp, object_hook=_decode_dict)
except ValueError:
@ -144,34 +143,34 @@ def service_json_request(ip_addr, port, http_method, uri, body,
error_msg = None
if response.status_code == 500:
responseText = json_decode(response.text)
errorDetails = ""
if 'details' in responseText:
errorDetails = responseText['details']
response_text = json_decode(response.text)
error_details = ""
if 'details' in response_text:
error_details = response_text['details']
error_msg = (_("CoprHD internal server error. Error details: %s"),
errorDetails)
error_details)
elif response.status_code == 401:
error_msg = _("Access forbidden: Authentication required")
elif response.status_code == 403:
error_msg = ""
errorDetails = ""
errorDescription = ""
error_details = ""
error_description = ""
responseText = json_decode(response.text)
response_text = json_decode(response.text)
if 'details' in responseText:
errorDetails = responseText['details']
if 'details' in response_text:
error_details = response_text['details']
error_msg = (_("%(error_msg)s Error details:"
" %(errorDetails)s"),
" %(error_details)s"),
{'error_msg': error_msg,
'errorDetails': errorDetails
'error_details': error_details
})
elif 'description' in responseText:
errorDescription = responseText['description']
elif 'description' in response_text:
error_description = response_text['description']
error_msg = (_("%(error_msg)s Error description:"
" %(errorDescription)s"),
" %(error_description)s"),
{'error_msg': error_msg,
'errorDescription': errorDescription
'error_description': error_description
})
else:
error_msg = _("Access forbidden: You don't have"
@ -184,21 +183,21 @@ def service_json_request(ip_addr, port, http_method, uri, body,
error_msg = six.text_type(response.text)
elif response.status_code == 503:
error_msg = ""
errorDetails = ""
errorDescription = ""
error_details = ""
error_description = ""
responseText = json_decode(response.text)
response_text = json_decode(response.text)
if 'code' in responseText:
errorCode = responseText['code']
error_msg = error_msg + "Error " + six.text_type(errorCode)
if 'code' in response_text:
errorCode = response_text['code']
error_msg = "Error " + six.text_type(errorCode)
if 'details' in responseText:
errorDetails = responseText['details']
error_msg = error_msg + ": " + errorDetails
elif 'description' in responseText:
errorDescription = responseText['description']
error_msg = error_msg + ": " + errorDescription
if 'details' in response_text:
error_details = response_text['details']
error_msg = error_msg + ": " + error_details
elif 'description' in response_text:
error_description = response_text['description']
error_msg = error_msg + ": " + error_description
else:
error_msg = _("Service temporarily unavailable:"
" The server is temporarily unable to"
@ -381,8 +380,8 @@ def search_by_tag(resource_search_uri, ipaddr, port):
:param port: Port number
"""
# check if the URI passed has both project and name parameters
strUri = six.text_type(resource_search_uri)
if strUri.__contains__("search") and strUri.__contains__("?tag="):
str_uri = six.text_type(resource_search_uri)
if 'search' in str_uri and '?tag=' in str_uri:
# Get the project URI
(s, h) = service_json_request(
@ -404,47 +403,41 @@ def search_by_tag(resource_search_uri, ipaddr, port):
" is not in the expected"
" format, it should end"
" with ?tag={0}")
% strUri))
# Timeout handler for synchronous operations
def timeout_handler():
global IS_TASK_TIMEOUT
IS_TASK_TIMEOUT = True
% str_uri))
# Blocks the operation until the task is complete/error out/timeout
def block_until_complete(component_type,
resource_uri,
task_id,
ipAddr,
ipaddr,
port,
synctimeout=0):
global IS_TASK_TIMEOUT
IS_TASK_TIMEOUT = False
if synctimeout:
t = threading.Timer(synctimeout, timeout_handler)
else:
if not synctimeout:
synctimeout = TASK_TIMEOUT
t = threading.Timer(synctimeout, timeout_handler)
t = timeutils.StopWatch(duration=synctimeout)
t.start()
while True:
out = get_task_by_resourceuri_and_taskId(
component_type, resource_uri, task_id, ipAddr, port)
while not t.expired():
if component_type == 'block':
out = show_task_opid(task_id, ipaddr, port)
else:
out = get_task_by_resourceuri_and_taskId(
component_type, resource_uri, task_id, ipaddr, port)
if out:
if out["state"] == "ready":
# cancel the timer and return
t.cancel()
# stop the timer and return
t.stop()
break
# if the status of the task is 'error' then cancel the timer
# if the status of the task is 'error' then stop the timer
# and raise exception
if out["state"] == "error":
# cancel the timer
t.cancel()
# stop the timer
t.stop()
error_message = "Please see logs for more details"
if ("service_error" in out and
"details" in out["service_error"]):
error_message = out["service_error"]["details"]
@ -456,24 +449,35 @@ def block_until_complete(component_type,
'error_message': error_message
}))
if IS_TASK_TIMEOUT:
IS_TASK_TIMEOUT = False
raise CoprHdError(CoprHdError.TIME_OUT,
(_("Task did not complete in %d secs."
" Operation timed out. Task in CoprHD"
" will continue") % synctimeout))
else:
raise CoprHdError(CoprHdError.TIME_OUT,
(_("Task did not complete in %d secs."
" Operation timed out. Task in CoprHD"
" will continue") % synctimeout))
return
def show_task_opid(taskid, ipaddr, port):
(s, h) = service_json_request(
ipaddr, port,
"GET",
URI_TASKS_BY_OPID.format(taskid),
None)
if (not s):
return None
o = json_decode(s)
return o
def get_task_by_resourceuri_and_taskId(component_type, resource_uri,
task_id, ipAddr, port):
task_id, ipaddr, port):
"""Returns the single task details."""
task_uri_constant = singletonURIHelperInstance.getUri(
task_uri_constant = urihelper.singletonURIHelperInstance.getUri(
component_type, "task")
(s, h) = service_json_request(
ipAddr, port, "GET",
ipaddr, port, "GET",
task_uri_constant.format(resource_uri, task_id), None)
if not s:
return None

View File

@ -24,7 +24,6 @@ class Host(common.CoprHDResource):
URI_HOST_DETAILS = "/compute/hosts/{0}"
URI_HOST_LIST_INITIATORS = "/compute/hosts/{0}/initiators"
URI_COMPUTE_HOST = "/compute/hosts"
URI_HOSTS_SEARCH_BY_NAME = "/compute/hosts/search?name={0}"
def query_by_name(self, host_name, tenant_name=None):
"""Search host matching host_name and tenant if tenant_name provided.
@ -92,13 +91,3 @@ class Host(common.CoprHDResource):
if inactive:
return None
return o
def search_by_name(self, host_name):
"""Search host by its name."""
(s, h) = common.service_json_request(
self.ipaddr, self.port, "GET",
self.URI_HOSTS_SEARCH_BY_NAME.format(host_name), None)
o = common.json_decode(s)
if not o:
return []
return common.get_node_value(o, "resource")

View File

@ -13,8 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
import threading
import oslo_serialization
from cinder.i18n import _
@ -43,7 +41,6 @@ class Snapshot(common.CoprHDResource):
CG = 'consistency-groups'
BLOCK = 'block'
is_timeout = False
timeout = 300
def snapshot_list_uri(self, otype, otypename, ouri):
@ -108,60 +105,6 @@ class Snapshot(common.CoprHDResource):
(_("snapshot with the name: "
"%s Not Found") % snapshot_name))
def snapshot_show_task_opid(self, otype, snap, taskid):
(s, h) = common.service_json_request(
self.ipaddr, self.port,
"GET",
Snapshot.URI_SNAPSHOT_TASKS_BY_OPID.format(taskid),
None)
if (not s):
return None
o = common.json_decode(s)
return o
# Blocks the operation until the task is complete/error out/timeout
def block_until_complete(self, storageres_type, resuri,
task_id, synctimeout=0):
if synctimeout:
t = threading.Timer(synctimeout, common.timeout_handler)
else:
synctimeout = self.timeout
t = threading.Timer(synctimeout, common.timeout_handler)
t.start()
while True:
out = self.snapshot_show_task_opid(
storageres_type, resuri, task_id)
if out:
if out["state"] == "ready":
# cancel the timer and return
t.cancel()
break
# if the status of the task is 'error' then cancel the timer
# and raise exception
if out["state"] == "error":
# cancel the timer
t.cancel()
error_message = "Please see logs for more details"
if("service_error" in out and
"details" in out["service_error"]):
error_message = out["service_error"]["details"]
raise common.CoprHdError(
common.CoprHdError.VALUE_ERR,
(_("Task: %(task_id)s is failed with error: "
"%(error_message)s") %
{'task_id': task_id,
'error_message': error_message}))
if self.is_timeout:
self.is_timeout = False
raise common.CoprHdError(common.CoprHdError.TIME_OUT,
(_("Task did not complete in %d secs."
" Operation timed out. Task in"
" CoprHD will continue") %
synctimeout))
return
def storage_resource_query(self,
storageres_type,
volume_name,
@ -248,10 +191,10 @@ class Snapshot(common.CoprHDResource):
if sync:
return (
self.block_until_complete(
common.block_until_complete(
otype,
task['resource']['id'],
task["id"], synctimeout)
task["id"], self.ipaddr, self.port, synctimeout)
)
else:
return o
@ -291,10 +234,10 @@ class Snapshot(common.CoprHDResource):
if sync:
return (
self.block_until_complete(
common.block_until_complete(
otype,
task['resource']['id'],
task["id"], synctimeout)
task["id"], self.ipaddr, self.port, synctimeout)
)
else:
return o

View File

@ -275,8 +275,6 @@ class Volume(common.CoprHDResource):
secs, an exception is thrown
:returns: Created task details in JSON response payload
"""
from cinder.volume.drivers.coprhd.helpers import snapshot
snap_obj = snapshot.Snapshot(self.ipaddr, self.port)
is_snapshot_clone = False
clone_full_uri = None
@ -311,10 +309,10 @@ class Volume(common.CoprHDResource):
if is_snapshot_clone:
return (
snap_obj.block_until_complete(
common.block_until_complete(
"block",
task["resource"]["id"],
task["id"])
task["id"], self.ipaddr, self.port)
)
else:
return self.check_for_sync(task, sync, synctimeout)

View File

@ -29,6 +29,7 @@ LOG = logging.getLogger(__name__)
@interface.volumedriver
class EMCCoprHDISCSIDriver(driver.ISCSIDriver):
"""CoprHD iSCSI Driver."""
VERSION = "3.0.0.0"
def __init__(self, *args, **kwargs):
super(EMCCoprHDISCSIDriver, self).__init__(*args, **kwargs)
@ -95,7 +96,7 @@ class EMCCoprHDISCSIDriver(driver.ISCSIDriver):
return self.common.delete_consistencygroup(context, group, volumes)
def update_consistencygroup(self, context, group,
add_volumes, remove_volumes):
add_volumes=None, remove_volumes=None):
"""Updates volumes in consistency group."""
return self.common.update_consistencygroup(group, add_volumes,
remove_volumes)

View File

@ -33,9 +33,9 @@ from cinder.volume.drivers.coprhd import common as coprhd_common
LOG = logging.getLogger(__name__)
scaleio_opts = [
cfg.StrOpt('coprhd_scaleio_rest_gateway_ip',
cfg.StrOpt('coprhd_scaleio_rest_gateway_host',
default='None',
help='Rest Gateway for Scaleio'),
help='Rest Gateway IP or FQDN for Scaleio'),
cfg.PortOpt('coprhd_scaleio_rest_gateway_port',
default=4984,
help='Rest Gateway Port for Scaleio'),
@ -61,6 +61,7 @@ CONF.register_opts(scaleio_opts)
@interface.volumedriver
class EMCCoprHDScaleIODriver(driver.VolumeDriver):
"""CoprHD ScaleIO Driver."""
VERSION = "3.0.0.0"
server_token = None
def __init__(self, *args, **kwargs):
@ -141,7 +142,7 @@ class EMCCoprHDScaleIODriver(driver.VolumeDriver):
return self.common.create_consistencygroup(context, group, True)
def update_consistencygroup(self, context, group,
add_volumes, remove_volumes):
add_volumes=None, remove_volumes=None):
"""Updates volumes in consistency group."""
return self.common.update_consistencygroup(group, add_volumes,
remove_volumes)
@ -172,7 +173,7 @@ class EMCCoprHDScaleIODriver(driver.VolumeDriver):
properties['scaleIO_volname'] = volname
properties['hostIP'] = connector['ip']
properties[
'serverIP'] = self.configuration.coprhd_scaleio_rest_gateway_ip
'serverIP'] = self.configuration.coprhd_scaleio_rest_gateway_host
properties[
'serverPort'] = self.configuration.coprhd_scaleio_rest_gateway_port
properties[
@ -185,23 +186,23 @@ class EMCCoprHDScaleIODriver(driver.VolumeDriver):
properties['bandwidthLimit'] = None
properties['serverToken'] = self.server_token
initiatorPorts = []
initiatorPort = self._get_client_id(properties['serverIP'],
properties['serverPort'],
properties['serverUsername'],
properties['serverPassword'],
properties['hostIP'])
initiatorPorts.append(initiatorPort)
initiator_ports = []
initiator_port = self._get_client_id(properties['serverIP'],
properties['serverPort'],
properties['serverUsername'],
properties['serverPassword'],
properties['hostIP'])
initiator_ports.append(initiator_port)
properties['serverToken'] = self.server_token
self.common.initialize_connection(volume,
'scaleio',
initiatorPorts,
initiator_ports,
connector['host'])
dictobj = {
'driver_volume_type': 'scaleio',
'data': properties
'data': properties,
}
return dictobj
@ -214,7 +215,7 @@ class EMCCoprHDScaleIODriver(driver.VolumeDriver):
properties['scaleIO_volname'] = volname
properties['hostIP'] = connector['ip']
properties[
'serverIP'] = self.configuration.coprhd_scaleio_rest_gateway_ip
'serverIP'] = self.configuration.coprhd_scaleio_rest_gateway_host
properties[
'serverPort'] = self.configuration.coprhd_scaleio_rest_gateway_port
properties[
@ -225,16 +226,16 @@ class EMCCoprHDScaleIODriver(driver.VolumeDriver):
self.configuration.coprhd_scaleio_rest_server_password)
properties['serverToken'] = self.server_token
initiatorPort = self._get_client_id(properties['serverIP'],
properties['serverPort'],
properties['serverUsername'],
properties['serverPassword'],
properties['hostIP'])
initPorts = []
initPorts.append(initiatorPort)
initiator_port = self._get_client_id(properties['serverIP'],
properties['serverPort'],
properties['serverUsername'],
properties['serverPassword'],
properties['hostIP'])
init_ports = []
init_ports.append(initiator_port)
self.common.terminate_connection(volume,
'scaleio',
initPorts,
init_ports,
connector['host'])
def get_volume_stats(self, refresh=False):