summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorZuul <zuul@review.openstack.org>2018-08-12 21:58:31 +0000
committerGerrit Code Review <review@openstack.org>2018-08-12 21:58:31 +0000
commit1579981969fc16036ee0b50e1dd1e6f588f48d51 (patch)
tree80a9e6cef4114bd725f813aa64629b15e8cc0ec8
parent182c578bfc4791b04ef9c0fbbcadfce21cadd075 (diff)
parent285fbc6447a2557257c2c5a6b3a54d7c3e2235c2 (diff)
Merge "Remove the CoprHD driver"
-rw-r--r--api-ref/source/v3/index.rst2
-rw-r--r--cinder/opts.py6
-rw-r--r--cinder/tests/unit/volume/drivers/test_coprhd.py981
-rw-r--r--cinder/volume/drivers/coprhd/__init__.py0
-rw-r--r--cinder/volume/drivers/coprhd/common.py1512
-rw-r--r--cinder/volume/drivers/coprhd/fc.py272
-rw-r--r--cinder/volume/drivers/coprhd/helpers/__init__.py0
-rw-r--r--cinder/volume/drivers/coprhd/helpers/authentication.py220
-rw-r--r--cinder/volume/drivers/coprhd/helpers/commoncoprhdapi.py523
-rw-r--r--cinder/volume/drivers/coprhd/helpers/consistencygroup.py220
-rw-r--r--cinder/volume/drivers/coprhd/helpers/exportgroup.py303
-rw-r--r--cinder/volume/drivers/coprhd/helpers/host.py93
-rw-r--r--cinder/volume/drivers/coprhd/helpers/project.py88
-rw-r--r--cinder/volume/drivers/coprhd/helpers/snapshot.py257
-rw-r--r--cinder/volume/drivers/coprhd/helpers/tag.py55
-rw-r--r--cinder/volume/drivers/coprhd/helpers/tenant.py117
-rw-r--r--cinder/volume/drivers/coprhd/helpers/urihelper.py82
-rw-r--r--cinder/volume/drivers/coprhd/helpers/virtualarray.py79
-rw-r--r--cinder/volume/drivers/coprhd/helpers/virtualpool.py77
-rw-r--r--cinder/volume/drivers/coprhd/helpers/volume.py517
-rw-r--r--cinder/volume/drivers/coprhd/iscsi.py226
-rw-r--r--cinder/volume/drivers/coprhd/scaleio.py375
-rw-r--r--doc/source/configuration/block-storage/drivers/coprhd-driver.rst322
-rw-r--r--doc/source/configuration/block-storage/volume-drivers.rst1
-rw-r--r--doc/source/reference/support-matrix.ini12
-rw-r--r--doc/source/reference/support-matrix.rst10
-rw-r--r--releasenotes/notes/coprhd-remove-the-driver-00ef2c41f4c7dccd.yaml12
27 files changed, 23 insertions, 6339 deletions
diff --git a/api-ref/source/v3/index.rst b/api-ref/source/v3/index.rst
index 9e37d42..641f241 100644
--- a/api-ref/source/v3/index.rst
+++ b/api-ref/source/v3/index.rst
@@ -54,4 +54,4 @@ Block Storage API V3 (CURRENT)
54.. include:: worker-cleanup.inc 54.. include:: worker-cleanup.inc
55 55
56.. valid values for boolean parameters. 56.. valid values for boolean parameters.
57.. include:: valid-boolean-values.inc \ No newline at end of file 57.. include:: valid-boolean-values.inc
diff --git a/cinder/opts.py b/cinder/opts.py
index 3671e51..2708a41 100644
--- a/cinder/opts.py
+++ b/cinder/opts.py
@@ -71,10 +71,6 @@ from cinder import ssh_utils as cinder_sshutils
71from cinder.transfer import api as cinder_transfer_api 71from cinder.transfer import api as cinder_transfer_api
72from cinder.volume import api as cinder_volume_api 72from cinder.volume import api as cinder_volume_api
73from cinder.volume import driver as cinder_volume_driver 73from cinder.volume import driver as cinder_volume_driver
74from cinder.volume.drivers.coprhd import common as \
75 cinder_volume_drivers_coprhd_common
76from cinder.volume.drivers.coprhd import scaleio as \
77 cinder_volume_drivers_coprhd_scaleio
78from cinder.volume.drivers.datacore import driver as \ 74from cinder.volume.drivers.datacore import driver as \
79 cinder_volume_drivers_datacore_driver 75 cinder_volume_drivers_datacore_driver
80from cinder.volume.drivers.datacore import iscsi as \ 76from cinder.volume.drivers.datacore import iscsi as \
@@ -285,8 +281,6 @@ def list_opts():
285 cinder_volume_driver.volume_opts, 281 cinder_volume_driver.volume_opts,
286 cinder_volume_driver.iser_opts, 282 cinder_volume_driver.iser_opts,
287 cinder_volume_driver.nvmet_opts, 283 cinder_volume_driver.nvmet_opts,
288 cinder_volume_drivers_coprhd_common.volume_opts,
289 cinder_volume_drivers_coprhd_scaleio.scaleio_opts,
290 cinder_volume_drivers_datera_dateraiscsi.d_opts, 284 cinder_volume_drivers_datera_dateraiscsi.d_opts,
291 cinder_volume_drivers_dell_emc_ps.eqlx_opts, 285 cinder_volume_drivers_dell_emc_ps.eqlx_opts,
292 cinder_volume_drivers_dell_emc_sc_storagecentercommon. 286 cinder_volume_drivers_dell_emc_sc_storagecentercommon.
diff --git a/cinder/tests/unit/volume/drivers/test_coprhd.py b/cinder/tests/unit/volume/drivers/test_coprhd.py
deleted file mode 100644
index 83df077..0000000
--- a/cinder/tests/unit/volume/drivers/test_coprhd.py
+++ /dev/null
@@ -1,981 +0,0 @@
1# Copyright (c) 2012 - 2016 EMC Corporation, Inc.
2# All Rights Reserved.
3#
4# Licensed under the Apache License, Version 2.0 (the "License"); you may
5# not use this file except in compliance with the License. You may obtain
6# a copy of the License at
7#
8# http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13# License for the specific language governing permissions and limitations
14# under the License.
15
16import mock
17
18from cinder import context
19from cinder.objects import fields
20from cinder import test
21from cinder.tests.unit import fake_constants as fake
22from cinder.volume.drivers.coprhd import common as coprhd_common
23from cinder.volume.drivers.coprhd import fc as coprhd_fc
24from cinder.volume.drivers.coprhd import iscsi as coprhd_iscsi
25from cinder.volume.drivers.coprhd import scaleio as coprhd_scaleio
26from cinder.volume import volume_types
27
28"""
29Test Data required for mocking
30"""
31export_group_details_data = {
32 "inactive": False,
33 "initiators": [{"creation_time": 1392194176020,
34 "host": {"id": "urn:storageos:Host:3e21edff-8662-4e60-ab5",
35 "link": {"href": "/compute/hosts/urn:storageos:H",
36 "rel": "self"}},
37 "hostname": "lglw7134",
38 "id": "urn:storageos:Initiator:13945431-06b7-44a0-838c-50",
39 "inactive": False,
40 "initiator_node": "20:00:00:90:FA:13:81:8D",
41 "initiator_port": "iqn.1993-08.org.deb:01:222",
42 "link": {"href": "/compute/initiators/urn:storageos:Initi",
43 "rel": "self"},
44 "protocol": "iSCSI",
45 "registration_status": "REGISTERED",
46 "tags": []}],
47 "name": "ccgroup",
48 "project": 'project',
49 "tags": [],
50 "tenant": 'tenant',
51 "type": "Host",
52 "varray": {"id": "urn:storageos:VirtualArray:5af376e9-ce2f-493d-9079-a872",
53 "link": {"href": "/vdc/varrays/urn:storageos:VirtualArray:5af3",
54 "rel": "self"}
55 },
56 "volumes": [{"id": "urn:storageos:Volume:6dc64865-bb25-431c-b321-ac268f16"
57 "a7ae:vdc1",
58 "lun": 1
59 }]
60}
61
62varray_detail_data = {"name": "varray"}
63
64export_group_list = ["urn:storageos:ExportGroup:2dbce233-7da0-47cb-8ff3-68f48"]
65
66iscsi_itl_list = {"itl": [{"hlu": 3,
67 "initiator": {"id": "urn:storageos:Initiator:13945",
68 "link": {"rel": "self",
69 "href": "/comput"},
70 "port": "iqn.1993-08.org.deb:01:222"},
71 "export": {"id": "urn:storageos:ExportGroup:2dbce2",
72 "name": "ccgroup",
73 "link": {"rel": "self",
74 "href": "/block/expo"}},
75 "device": {"id": "urn:storageos:Volume:aa1fc84a-af",
76 "link": {"rel": "self",
77 "href": "/block/volumes/urn:s"},
78 "wwn": "600009700001957015735330303535"},
79 "target": {"id": "urn:storageos:StoragePort:d7e42",
80 "link": {"rel": "self",
81 "href": "/vdc/stor:"},
82 "port": "50:00:09:73:00:18:95:19",
83 'ip_address': "10.10.10.10",
84 'tcp_port': '22'}},
85 {"hlu": 3,
86 "initiator": {"id": "urn:storageos:Initiator:13945",
87 "link": {"rel": "self",
88 "href": "/comput"},
89 "port": "iqn.1993-08.org.deb:01:222"},
90 "export": {"id": "urn:storageos:ExportGroup:2dbce2",
91 "name": "ccgroup",
92 "link": {"rel": "self",
93 "href": "/block/expo"}},
94 "device": {"id": "urn:storageos:Volume:aa1fc84a-af",
95 "link": {"rel": "self",
96 "href": "/block/volumes/urn:s"},
97 "wwn": "600009700001957015735330303535"},
98 "target": {"id": "urn:storageos:StoragePort:d7e42",
99 "link": {"rel": "self",
100 "href": "/vdc/stor:"},
101 "port": "50:00:09:73:00:18:95:19",
102 'ip_address': "10.10.10.10",
103 'tcp_port': '22'}}]}
104
105fcitl_itl_list = {"itl": [{"hlu": 3,
106 "initiator": {"id": "urn:storageos:Initiator:13945",
107 "link": {"rel": "self",
108 "href": "/comput"},
109 "port": "12:34:56:78:90:12:34:56"},
110 "export": {"id": "urn:storageos:ExportGroup:2dbce2",
111 "name": "ccgroup",
112 "link": {"rel": "self",
113 "href": "/block/expo"}},
114 "device": {"id": "urn:storageos:Volume:aa1fc84a-af",
115 "link": {"rel": "self",
116 "href": "/block/volumes/urn:s"},
117 "wwn": "600009700001957015735330303535"},
118 "target": {"id": "urn:storageos:StoragePort:d7e42",
119 "link": {"rel": "self",
120 "href": "/vdc/stor:"},
121 "port": "12:34:56:78:90:12:34:56",
122 'ip_address': "10.10.10.10",
123 'tcp_port': '22'}},
124 {"hlu": 3,
125 "initiator": {"id": "urn:storageos:Initiator:13945",
126 "link": {"rel": "self",
127 "href": "/comput"},
128 "port": "12:34:56:78:90:12:34:56"},
129 "export": {"id": "urn:storageos:ExportGroup:2dbce2",
130 "name": "ccgroup",
131 "link": {"rel": "self",
132 "href": "/block/expo"}},
133 "device": {"id": "urn:storageos:Volume:aa1fc84a-af",
134 "link": {"rel": "self",
135 "href": "/block/volumes/urn:s"},
136 "wwn": "600009700001957015735330303535"},
137 "target": {"id": "urn:storageos:StoragePort:d7e42",
138 "link": {"rel": "self",
139 "href": "/vdc/stor:"},
140 "port": "12:34:56:78:90:12:34:56",
141 'ip_address': "10.10.10.10",
142 'tcp_port': '22'}}]}
143
144scaleio_itl_list = {"itl": [{"hlu": -1,
145 "initiator": {"id":
146 "urn:storageos:Initiator:920aee",
147 "link": {"rel": "self",
148 "href":
149 "/compute/initiators"},
150 "port": "bfdf432500000004"},
151 "export": {"id":
152 "urn:storageos:ExportGroup:5449235",
153 "name": "10.108.225.109",
154 "link": {"rel": "self",
155 "href":
156 "/block/exports/urn:stor"}},
157 "device": {"id":
158 "urn:storageos:Volume:b3624a83-3eb",
159 "link": {"rel": "self",
160 "href": "/block/volume"},
161 "wwn":
162 "4F48CC4C27A43248092128B400000004"},
163 "target": {}},
164 {"hlu": -1,
165 "initiator": {"id":
166 "urn:storageos:Initiator:920aee",
167 "link": {"rel": "self",
168 "href":
169 "/compute/initiators/"},
170 "port": "bfdf432500000004"},
171 "export": {"id":
172 "urn:storageos:ExportGroup:5449235",
173 "name": "10.108.225.109",
174 "link": {"rel": "self",
175 "href":
176 "/block/exports/urn:stor"}},
177 "device": {"id":
178 "urn:storageos:Volume:c014e96a-557",
179 "link": {"rel": "self",
180 "href":
181 "/block/volumes/urn:stor"},
182 "wwn":
183 "4F48CC4C27A43248092129320000000E"},
184 "target": {}}]}
185
186
187class test_volume_data(object):
188 name = 'test-vol1'
189 size = 1
190 volume_name = 'test-vol1'
191 id = fake.VOLUME_ID
192 group_id = None
193 provider_auth = None
194 project_id = fake.PROJECT_ID
195 display_name = 'test-vol1'
196 display_description = 'test volume',
197 volume_type_id = None
198 provider_id = fake.PROVIDER_ID
199
200 def __init__(self, volume_type_id):
201 self.volume_type_id = volume_type_id
202
203
204class source_test_volume_data(object):
205 name = 'source_test-vol1'
206 size = 1
207 volume_name = 'source_test-vol1'
208 id = fake.VOLUME2_ID
209 group_id = None
210 provider_auth = None
211 project_id = fake.PROJECT_ID
212 display_name = 'source_test-vol1'
213 display_description = 'test volume'
214 volume_type_id = None
215
216 def __init__(self, volume_type_id):
217 self.volume_type_id = volume_type_id
218
219
220class test_clone_volume_data(object):
221 name = 'clone-test-vol1'
222 size = 1
223 volume_name = 'clone-test-vol1'
224 id = fake.VOLUME3_ID
225 provider_auth = None
226 project_id = fake.PROJECT_ID
227 display_name = 'clone-test-vol1'
228 display_description = 'clone test volume'
229 volume_type_id = None
230
231 def __init__(self, volume_type_id):
232 self.volume_type_id = volume_type_id
233
234
235class test_snapshot_data(object):
236 name = 'snapshot1'
237 display_name = 'snapshot1'
238 size = 1
239 id = fake.SNAPSHOT_ID
240 volume_name = 'test-vol1'
241 volume_id = fake.VOLUME_ID
242 volume = None
243 volume_size = 1
244 project_id = fake.PROJECT_ID
245 status = fields.SnapshotStatus.AVAILABLE
246
247 def __init__(self, src_volume):
248 self.volume = src_volume
249
250
251def get_connector_data():
252 connector = {'ip': '10.0.0.2',
253 'initiator': 'iqn.1993-08.org.deb:01:222',
254 'wwpns': ["1234567890123456", "1234567890543211"],
255 'wwnns': ["223456789012345", "223456789054321"],
256 'host': 'fakehost'}
257 return connector
258
259
260class test_group_data(object):
261 name = 'group_name'
262 display_name = 'group_name'
263 id = fake.GROUP_ID
264 volume_type_ids = None
265 volume_types = None
266 group_type_id = None
267 status = fields.GroupStatus.AVAILABLE
268
269 def __init__(self, volume_types, group_type_id):
270 self.group_type_id = group_type_id
271 self.volume_types = volume_types
272
273
274class test_group_type_data(object):
275 name = 'group_name'
276 display_name = 'group_name'
277 groupsnapshot_id = None
278 id = fake.GROUP_TYPE_ID
279 description = 'group'
280
281
282class test_group_snap_data(object):
283 name = 'cg_snap_name'
284 display_name = 'cg_snap_name'
285 id = fake.GROUP_SNAPSHOT_ID
286 group_id = fake.GROUP_ID
287 status = fields.GroupStatus.AVAILABLE
288 snapshots = []
289 group = None
290 group_type_id = None
291
292 def __init__(self, volume_types, group_type_id):
293 self.group_type_id = group_type_id
294 self.group = test_group_data(volume_types, group_type_id)
295
296
297class MockedEMCCoprHDDriverCommon(coprhd_common.EMCCoprHDDriverCommon):
298
299 def __init__(self, protocol, default_backend_name,
300 configuration=None):
301
302 super(MockedEMCCoprHDDriverCommon, self).__init__(
303 protocol, default_backend_name, configuration)
304
305 def authenticate_user(self):
306 pass
307
308 def get_exports_count_by_initiators(self, initiator_ports):
309 return 0
310
311 def _get_coprhd_volume_name(self, vol, verbose=False):
312 if verbose is True:
313 return {'volume_name': "coprhd_vol_name",
314 'volume_uri': "coprhd_vol_uri"}
315 else:
316 return "coprhd_vol_name"
317
318 def _get_coprhd_snapshot_name(self, snapshot, resUri):
319 return "coprhd_snapshot_name"
320
321 def _get_coprhd_cgid(self, cgid):
322 return "cg_uri"
323
324 def init_volume_api(self):
325 self.volume_api = mock.Mock()
326 self.volume_api.get.return_value = {
327 'name': 'source_test-vol1',
328 'size': 1,
329 'volume_name': 'source_test-vol1',
330 'id': fake.VOLUME_ID,
331 'group_id': fake.GROUP_ID,
332 'provider_auth': None,
333 'project_id': fake.PROJECT_ID,
334 'display_name': 'source_test-vol1',
335 'display_description': 'test volume',
336 'volume_type_id': fake.VOLUME_TYPE_ID}
337
338 def init_coprhd_api_components(self):
339 self.volume_obj = mock.Mock()
340 self.volume_obj.create.return_value = "volume_created"
341 self.volume_obj.volume_query.return_value = "volume_uri"
342 self.volume_obj.get_storageAttributes.return_value = (
343 'block', 'volume_name')
344 self.volume_obj.storage_resource_query.return_value = "volume_uri"
345 self.volume_obj.is_volume_detachable.return_value = False
346 self.volume_obj.volume_clone_detach.return_value = 'detached'
347 self.volume_obj.getTags.return_value = (
348 ["Openstack-vol", "Openstack-vol1"])
349 self.volume_obj.tag.return_value = "tagged"
350 self.volume_obj.clone.return_value = "volume-cloned"
351
352 if(self.protocol == "iSCSI"):
353 self.volume_obj.get_exports_by_uri.return_value = (
354 iscsi_itl_list)
355 elif(self.protocol == "FC"):
356 self.volume_obj.get_exports_by_uri.return_value = (
357 fcitl_itl_list)
358 else:
359 self.volume_obj.get_exports_by_uri.return_value = (
360 scaleio_itl_list)
361
362 self.volume_obj.list_volumes.return_value = []
363 self.volume_obj.show.return_value = {"id": "vol_id"}
364 self.volume_obj.expand.return_value = "expanded"
365
366 self.tag_obj = mock.Mock()
367 self.tag_obj.list_tags.return_value = [
368 "Openstack-vol", "Openstack-vol1"]
369 self.tag_obj.tag_resource.return_value = "Tagged"
370
371 self.exportgroup_obj = mock.Mock()
372 self.exportgroup_obj.exportgroup_list.return_value = (
373 export_group_list)
374 self.exportgroup_obj.exportgroup_show.return_value = (
375 export_group_details_data)
376
377 self.exportgroup_obj.exportgroup_add_volumes.return_value = (
378 "volume-added")
379
380 self.host_obj = mock.Mock()
381 self.host_obj.list_by_tenant.return_value = []
382 self.host_obj.list_all.return_value = [{'id': "host1_id",
383 'name': "host1"}]
384 self.host_obj.list_initiators.return_value = [
385 {'name': "12:34:56:78:90:12:34:56"},
386 {'name': "12:34:56:78:90:54:32:11"},
387 {'name': "bfdf432500000004"}]
388
389 self.hostinitiator_obj = mock.Mock()
390 self.varray_obj = mock.Mock()
391 self.varray_obj.varray_show.return_value = varray_detail_data
392
393 self.snapshot_obj = mock.Mock()
394 mocked_snap_obj = self.snapshot_obj.return_value
395 mocked_snap_obj.storageResource_query.return_value = (
396 "resourceUri")
397 mocked_snap_obj.snapshot_create.return_value = (
398 "snapshot_created")
399 mocked_snap_obj.snapshot_query.return_value = "snapshot_uri"
400
401 self.consistencygroup_obj = mock.Mock()
402 mocked_group_object = self.consistencygroup_obj.return_value
403 mocked_group_object.create.return_value = "CG-Created"
404 mocked_group_object.consistencygroup_query.return_value = "CG-uri"
405
406
407class EMCCoprHDISCSIDriverTest(test.TestCase):
408
409 def setUp(self):
410 super(EMCCoprHDISCSIDriverTest, self).setUp()
411 self.create_coprhd_setup()
412
413 def create_coprhd_setup(self):
414
415 self.configuration = mock.Mock()
416 self.configuration.coprhd_hostname = "10.10.10.10"
417 self.configuration.coprhd_port = "4443"
418 self.configuration.volume_backend_name = "EMCCoprHDISCSIDriver"
419 self.configuration.coprhd_username = "user-name"
420 self.configuration.coprhd_password = "password"
421 self.configuration.coprhd_tenant = "tenant"
422 self.configuration.coprhd_project = "project"
423 self.configuration.coprhd_varray = "varray"
424 self.configuration.coprhd_emulate_snapshot = False
425
426 self.volume_type = self.create_coprhd_volume_type()
427 self.volume_type_id = self.volume_type.id
428 self.group_type = test_group_type_data()
429 self.group_type_id = self.group_type.id
430
431 self.mock_object(coprhd_iscsi.EMCCoprHDISCSIDriver,
432 '_get_common_driver',
433 self._get_mocked_common_driver)
434 self.driver = coprhd_iscsi.EMCCoprHDISCSIDriver(
435 configuration=self.configuration)
436
437 def tearDown(self):
438 self._cleanUp()
439 super(EMCCoprHDISCSIDriverTest, self).tearDown()
440
441 def _cleanUp(self):
442 self.delete_vipr_volume_type()
443
444 def create_coprhd_volume_type(self):
445 ctx = context.get_admin_context()
446 vipr_volume_type = volume_types.create(ctx,
447 "coprhd-volume-type",
448 {'CoprHD:VPOOL':
449 'vpool_coprhd'})
450 return vipr_volume_type
451
452 def _get_mocked_common_driver(self):
453 return MockedEMCCoprHDDriverCommon(
454 protocol="iSCSI",
455 default_backend_name="EMCViPRISCSIDriver",
456 configuration=self.configuration)
457
458 def delete_vipr_volume_type(self):
459 ctx = context.get_admin_context()
460 volume_types.destroy(ctx, self.volume_type_id)
461
462 def test_create_destroy(self):
463 volume = test_volume_data(self.volume_type_id)
464
465 self.driver.create_volume(volume)
466 self.driver.delete_volume(volume)
467
468 def test_get_volume_stats(self):
469 vol_stats = self.driver.get_volume_stats(True)
470 self.assertEqual('unknown', vol_stats['free_capacity_gb'])
471
472 def test_create_volume_clone(self):
473 src_volume_data = test_volume_data(self.volume_type_id)
474 clone_volume_data = test_clone_volume_data(self.volume_type_id)
475 self.driver.create_volume(src_volume_data)
476 self.driver.create_cloned_volume(clone_volume_data, src_volume_data)
477 self.driver.delete_volume(src_volume_data)
478 self.driver.delete_volume(clone_volume_data)
479
480 def test_create_destroy_snapshot(self):
481 volume_data = test_volume_data(self.volume_type_id)
482 snapshot_data = test_snapshot_data(
483 source_test_volume_data(self.volume_type_id))
484
485 self.driver.create_volume(volume_data)
486 self.driver.create_snapshot(snapshot_data)
487 self.driver.delete_snapshot(snapshot_data)
488 self.driver.delete_volume(volume_data)
489
490 def test_create_volume_from_snapshot(self):
491
492 src_vol_data = source_test_volume_data(self.volume_type_id)
493 self.driver.create_volume(src_vol_data)
494
495 volume_data = test_volume_data(self.volume_type_id)
496 snapshot_data = test_snapshot_data(src_vol_data)
497
498 self.driver.create_snapshot(snapshot_data)
499 self.driver.create_volume_from_snapshot(volume_data, snapshot_data)
500
501 self.driver.delete_snapshot(snapshot_data)
502 self.driver.delete_volume(src_vol_data)
503 self.driver.delete_volume(volume_data)
504
505 def test_extend_volume(self):
506 volume_data = test_volume_data(self.volume_type_id)
507 self.driver.create_volume(volume_data)
508 self.driver.extend_volume(volume_data, 2)
509 self.driver.delete_volume(volume_data)
510
511 def test_initialize_and_terminate_connection(self):
512 connector_data = get_connector_data()
513 volume_data = test_volume_data(self.volume_type_id)
514
515 self.driver.create_volume(volume_data)
516 res_initialize = self.driver.initialize_connection(
517 volume_data, connector_data)
518 expected_initialize = {'driver_volume_type': 'iscsi',
519 'data': {'target_lun': 3,
520 'target_portal': '10.10.10.10:22',
521 'target_iqn':
522 '50:00:09:73:00:18:95:19',
523 'target_discovered': False,
524 'volume_id': fake.VOLUME_ID}}
525 self.assertEqual(
526 expected_initialize, res_initialize, 'Unexpected return data')
527
528 self.driver.terminate_connection(volume_data, connector_data)
529 self.driver.delete_volume(volume_data)
530
531 @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type')
532 def test_create_delete_empty_group(self, cg_ss_enabled):
533 cg_ss_enabled.side_effect = [True, True]
534 group_data = test_group_data([self.volume_type],
535 self.group_type_id)
536 ctx = context.get_admin_context()
537 self.driver.create_group(ctx, group_data)
538 model_update, volumes_model_update = (
539 self.driver.delete_group(ctx, group_data, []))
540 self.assertEqual([], volumes_model_update, 'Unexpected return data')
541
542 @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type')
543 def test_create_update_delete_group(self, cg_ss_enabled):
544 cg_ss_enabled.side_effect = [True, True, True, True]
545 group_data = test_group_data([self.volume_type],
546 self.group_type_id)
547 ctx = context.get_admin_context()
548 self.driver.create_group(ctx, group_data)
549
550 volume = test_volume_data(self.volume_type_id)
551 self.driver.create_volume(volume)
552
553 model_update, ret1, ret2 = (
554 self.driver.update_group(ctx, group_data, [volume], []))
555
556 self.assertEqual({'status': fields.GroupStatus.AVAILABLE},
557 model_update)
558
559 model_update, volumes_model_update = (
560 self.driver.delete_group(ctx, group_data, [volume]))
561 self.assertEqual({'status': fields.GroupStatus.AVAILABLE},
562 model_update)
563 self.assertEqual([{'status': 'deleted', 'id': fake.VOLUME_ID}],
564 volumes_model_update)
565
566 @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type')
567 def test_create_delete_group_snap(self, cg_ss_enabled):
568 cg_ss_enabled.side_effect = [True, True]
569 group_snap_data = test_group_snap_data([self.volume_type],
570 self.group_type_id)
571 ctx = context.get_admin_context()
572
573 model_update, snapshots_model_update = (
574 self.driver.create_group_snapshot(ctx, group_snap_data, []))
575 self.assertEqual({'status': fields.GroupStatus.AVAILABLE},
576 model_update)
577 self.assertEqual([], snapshots_model_update, 'Unexpected return data')
578
579 model_update, snapshots_model_update = (
580 self.driver.delete_group_snapshot(ctx, group_snap_data, []))
581 self.assertEqual({}, model_update, 'Unexpected return data')
582 self.assertEqual([], snapshots_model_update, 'Unexpected return data')
583
584
585class EMCCoprHDFCDriverTest(test.TestCase):
586
587 def setUp(self):
588 super(EMCCoprHDFCDriverTest, self).setUp()
589 self.create_coprhd_setup()
590
591 def create_coprhd_setup(self):
592
593 self.configuration = mock.Mock()
594 self.configuration.coprhd_hostname = "10.10.10.10"
595 self.configuration.coprhd_port = "4443"
596 self.configuration.volume_backend_name = "EMCCoprHDFCDriver"
597 self.configuration.coprhd_username = "user-name"
598 self.configuration.coprhd_password = "password"
599 self.configuration.coprhd_tenant = "tenant"
600 self.configuration.coprhd_project = "project"
601 self.configuration.coprhd_varray = "varray"
602 self.configuration.coprhd_emulate_snapshot = False
603
604 self.volume_type = self.create_coprhd_volume_type()
605 self.volume_type_id = self.volume_type.id
606 self.group_type = test_group_type_data()
607 self.group_type_id = self.group_type.id
608
609 self.mock_object(coprhd_fc.EMCCoprHDFCDriver,
610 '_get_common_driver',
611 self._get_mocked_common_driver)
612 self.driver = coprhd_fc.EMCCoprHDFCDriver(
613 configuration=self.configuration)
614
615 def tearDown(self):
616 self._cleanUp()
617 super(EMCCoprHDFCDriverTest, self).tearDown()
618
619 def _cleanUp(self):
620 self.delete_vipr_volume_type()
621
622 def create_coprhd_volume_type(self):
623 ctx = context.get_admin_context()
624 vipr_volume_type = volume_types.create(ctx,
625 "coprhd-volume-type",
626 {'CoprHD:VPOOL': 'vpool_vipr'})
627 return vipr_volume_type
628
629 def _get_mocked_common_driver(self):
630 return MockedEMCCoprHDDriverCommon(
631 protocol="FC",
632 default_backend_name="EMCViPRFCDriver",
633 configuration=self.configuration)
634
635 def delete_vipr_volume_type(self):
636 ctx = context.get_admin_context()
637 volume_types.destroy(ctx, self.volume_type_id)
638
639 def test_create_destroy(self):
640 volume = test_volume_data(self.volume_type_id)
641
642 self.driver.create_volume(volume)
643 self.driver.delete_volume(volume)
644
645 def test_get_volume_stats(self):
646 vol_stats = self.driver.get_volume_stats(True)
647 self.assertEqual('unknown', vol_stats['free_capacity_gb'])
648
649 def test_create_volume_clone(self):
650
651 src_volume_data = test_volume_data(self.volume_type_id)
652 clone_volume_data = test_clone_volume_data(self.volume_type_id)
653 self.driver.create_volume(src_volume_data)
654 self.driver.create_cloned_volume(clone_volume_data, src_volume_data)
655 self.driver.delete_volume(src_volume_data)
656 self.driver.delete_volume(clone_volume_data)
657
658 def test_create_destroy_snapshot(self):
659
660 volume_data = test_volume_data(self.volume_type_id)
661 snapshot_data = test_snapshot_data(
662 source_test_volume_data(self.volume_type_id))
663
664 self.driver.create_volume(volume_data)
665 self.driver.create_snapshot(snapshot_data)
666 self.driver.delete_snapshot(snapshot_data)
667 self.driver.delete_volume(volume_data)
668
669 def test_create_volume_from_snapshot(self):
670 src_vol_data = source_test_volume_data(self.volume_type_id)
671 self.driver.create_volume(src_vol_data)
672
673 volume_data = test_volume_data(self.volume_type_id)
674 snapshot_data = test_snapshot_data(src_vol_data)
675
676 self.driver.create_snapshot(snapshot_data)
677 self.driver.create_volume_from_snapshot(volume_data, snapshot_data)
678
679 self.driver.delete_snapshot(snapshot_data)
680 self.driver.delete_volume(src_vol_data)
681 self.driver.delete_volume(volume_data)
682
683 def test_extend_volume(self):
684 volume_data = test_volume_data(self.volume_type_id)
685 self.driver.create_volume(volume_data)
686 self.driver.extend_volume(volume_data, 2)
687 self.driver.delete_volume(volume_data)
688
689 def test_initialize_and_terminate_connection(self):
690
691 connector_data = get_connector_data()
692 volume_data = test_volume_data(self.volume_type_id)
693
694 self.driver.create_volume(volume_data)
695 res_initiatlize = self.driver.initialize_connection(
696 volume_data, connector_data)
697 expected_initialize = {'driver_volume_type': 'fibre_channel',
698 'data': {'target_lun': 3,
699 'initiator_target_map':
700 {'1234567890543211':
701 ['1234567890123456',
702 '1234567890123456'],
703 '1234567890123456':
704 ['1234567890123456',
705 '1234567890123456']},
706 'target_wwn': ['1234567890123456',
707 '1234567890123456'],
708 'target_discovered': False,
709 'volume_id': fake.VOLUME_ID}}
710 self.assertEqual(
711 expected_initialize, res_initiatlize, 'Unexpected return data')
712
713 res_terminate = self.driver.terminate_connection(
714 volume_data, connector_data)
715 expected_terminate = {'driver_volume_type': 'fibre_channel',
716 'data': {'initiator_target_map':
717 {'1234567890543211':
718 ['1234567890123456',
719 '1234567890123456'],
720 '1234567890123456':
721 ['1234567890123456',
722 '1234567890123456']},
723 'target_wwn': ['1234567890123456',
724 '1234567890123456']}}
725 self.assertEqual(
726 expected_terminate, res_terminate, 'Unexpected return data')
727
728 self.driver.delete_volume(volume_data)
729
730 @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type')
731 def test_create_delete_empty_group(self, cg_ss_enabled):
732 cg_ss_enabled.side_effect = [True, True]
733 group_data = test_group_data([self.volume_type],
734 self.group_type_id)
735 ctx = context.get_admin_context()
736 self.driver.create_group(ctx, group_data)
737 model_update, volumes_model_update = (
738 self.driver.delete_group(ctx, group_data, []))
739 self.assertEqual([], volumes_model_update, 'Unexpected return data')
740
741 @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type')
742 def test_create_update_delete_group(self, cg_ss_enabled):
743 cg_ss_enabled.side_effect = [True, True, True]
744 group_data = test_group_data([self.volume_type],
745 self.group_type_id)
746 ctx = context.get_admin_context()
747 self.driver.create_group(ctx, group_data)
748
749 volume = test_volume_data(self.volume_type_id)
750 self.driver.create_volume(volume)
751
752 model_update, ret1, ret2 = (
753 self.driver.update_group(ctx, group_data, [volume], []))
754
755 self.assertEqual({'status': fields.GroupStatus.AVAILABLE},
756 model_update)
757
758 model_update, volumes_model_update = (
759 self.driver.delete_group(ctx, group_data, [volume]))
760 self.assertEqual({'status': fields.GroupStatus.AVAILABLE},
761 model_update)
762 self.assertEqual([{'status': 'deleted', 'id': fake.VOLUME_ID}],
763 volumes_model_update)
764
765 @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type')
766 def test_create_delete_group_snap(self, cg_ss_enabled):
767 cg_ss_enabled.side_effect = [True, True]
768 group_snap_data = test_group_snap_data([self.volume_type],
769 self.group_type_id)
770 ctx = context.get_admin_context()
771
772 model_update, snapshots_model_update = (
773 self.driver.create_group_snapshot(ctx, group_snap_data, []))
774 self.assertEqual({'status': fields.GroupStatus.AVAILABLE},
775 model_update)
776 self.assertEqual([], snapshots_model_update, 'Unexpected return data')
777
778 model_update, snapshots_model_update = (
779 self.driver.delete_group_snapshot(ctx, group_snap_data, []))
780 self.assertEqual({}, model_update, 'Unexpected return data')
781 self.assertEqual([], snapshots_model_update, 'Unexpected return data')
782
783
784class EMCCoprHDScaleIODriverTest(test.TestCase):
785
786 def setUp(self):
787 super(EMCCoprHDScaleIODriverTest, self).setUp()
788 self.create_coprhd_setup()
789
790 def create_coprhd_setup(self):
791
792 self.configuration = mock.Mock()
793 self.configuration.coprhd_hostname = "10.10.10.10"
794 self.configuration.coprhd_port = "4443"
795 self.configuration.volume_backend_name = "EMCCoprHDFCDriver"
796 self.configuration.coprhd_username = "user-name"
797 self.configuration.coprhd_password = "password"
798 self.configuration.coprhd_tenant = "tenant"
799 self.configuration.coprhd_project = "project"
800 self.configuration.coprhd_varray = "varray"
801 self.configuration.coprhd_scaleio_rest_gateway_host = "10.10.10.11"
802 self.configuration.coprhd_scaleio_rest_gateway_port = 443
803 self.configuration.coprhd_scaleio_rest_server_username = (
804 "scaleio_username")
805 self.configuration.coprhd_scaleio_rest_server_password = (
806 "scaleio_password")
807 self.configuration.scaleio_verify_server_certificate = False
808 self.configuration.scaleio_server_certificate_path = (
809 "/etc/scaleio/certs")
810
811 self.volume_type = self.create_coprhd_volume_type()
812 self.volume_type_id = self.volume_type.id
813 self.group_type = test_group_type_data()
814 self.group_type_id = self.group_type.id
815
816 self.mock_object(coprhd_scaleio.EMCCoprHDScaleIODriver,
817 '_get_common_driver',
818 self._get_mocked_common_driver)
819 self.mock_object(coprhd_scaleio.EMCCoprHDScaleIODriver,
820 '_get_client_id',
821 self._get_client_id)
822 self.driver = coprhd_scaleio.EMCCoprHDScaleIODriver(
823 configuration=self.configuration)
824
825 def tearDown(self):
826 self._cleanUp()
827 super(EMCCoprHDScaleIODriverTest, self).tearDown()
828
829 def _cleanUp(self):
830 self.delete_vipr_volume_type()
831
832 def create_coprhd_volume_type(self):
833 ctx = context.get_admin_context()
834 vipr_volume_type = volume_types.create(ctx,
835 "coprhd-volume-type",
836 {'CoprHD:VPOOL': 'vpool_vipr'})
837 return vipr_volume_type
838
839 def _get_mocked_common_driver(self):
840 return MockedEMCCoprHDDriverCommon(
841 protocol="scaleio",
842 default_backend_name="EMCCoprHDScaleIODriver",
843 configuration=self.configuration)
844
845 def _get_client_id(self, server_ip, server_port, server_username,
846 server_password, sdc_ip):
847 return "bfdf432500000004"
848
849 def delete_vipr_volume_type(self):
850 ctx = context.get_admin_context()
851 volume_types.destroy(ctx, self.volume_type_id)
852
853 def test_create_destroy(self):
854 volume = test_volume_data(self.volume_type_id)
855
856 self.driver.create_volume(volume)
857 self.driver.delete_volume(volume)
858
859 def test_get_volume_stats(self):
860 vol_stats = self.driver.get_volume_stats(True)
861 self.assertEqual('unknown', vol_stats['free_capacity_gb'])
862
863 def test_create_volume_clone(self):
864
865 src_volume_data = test_volume_data(self.volume_type_id)
866 clone_volume_data = test_clone_volume_data(self.volume_type_id)
867 self.driver.create_volume(src_volume_data)
868 self.driver.create_cloned_volume(clone_volume_data, src_volume_data)
869 self.driver.delete_volume(src_volume_data)
870 self.driver.delete_volume(clone_volume_data)
871
872 def test_create_destroy_snapshot(self):
873
874 volume_data = test_volume_data(self.volume_type_id)
875 snapshot_data = test_snapshot_data(
876 source_test_volume_data(self.volume_type_id))
877
878 self.driver.create_volume(volume_data)
879 self.driver.create_snapshot(snapshot_data)
880 self.driver.delete_snapshot(snapshot_data)
881 self.driver.delete_volume(volume_data)
882
883 def test_create_volume_from_snapshot(self):
884 src_vol_data = source_test_volume_data(self.volume_type_id)
885 self.driver.create_volume(src_vol_data)
886
887 volume_data = test_volume_data(self.volume_type_id)
888 snapshot_data = test_snapshot_data(src_vol_data)
889
890 self.driver.create_snapshot(snapshot_data)
891 self.driver.create_volume_from_snapshot(volume_data, snapshot_data)
892
893 self.driver.delete_snapshot(snapshot_data)
894 self.driver.delete_volume(src_vol_data)
895 self.driver.delete_volume(volume_data)
896
897 def test_extend_volume(self):
898 volume_data = test_volume_data(self.volume_type_id)
899 self.driver.create_volume(volume_data)
900 self.driver.extend_volume(volume_data, 2)
901 self.driver.delete_volume(volume_data)
902
903 def test_initialize_and_terminate_connection(self):
904
905 connector_data = get_connector_data()
906 volume_data = test_volume_data(self.volume_type_id)
907
908 self.driver.create_volume(volume_data)
909 res_initiatlize = self.driver.initialize_connection(
910 volume_data, connector_data)
911 exp_name = res_initiatlize['data']['scaleIO_volname']
912 expected_initialize = {'data': {'bandwidthLimit': None,
913 'hostIP': '10.0.0.2',
914 'iopsLimit': None,
915 'scaleIO_volname': exp_name,
916 'scaleIO_volume_id': fake.PROVIDER_ID,
917 'serverIP': '10.10.10.11',
918 'serverPassword': 'scaleio_password',
919 'serverPort': 443,
920 'serverToken': None,
921 'serverUsername': 'scaleio_username'},
922 'driver_volume_type': 'scaleio'}
923 self.assertEqual(
924 expected_initialize, res_initiatlize, 'Unexpected return data')
925
926 self.driver.terminate_connection(
927 volume_data, connector_data)
928 self.driver.delete_volume(volume_data)
929
930 @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type')
931 def test_create_delete_empty_group(self, cg_ss_enabled):
932 cg_ss_enabled.side_effect = [True, True]
933 group_data = test_group_data([self.volume_type],
934 self.group_type_id)
935 ctx = context.get_admin_context()
936 self.driver.create_group(ctx, group_data)
937 model_update, volumes_model_update = (
938 self.driver.delete_group(ctx, group_data, []))
939 self.assertEqual([], volumes_model_update, 'Unexpected return data')
940
941 @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type')
942 def test_create_update_delete_group(self, cg_ss_enabled):
943 cg_ss_enabled.side_effect = [True, True, True, True]
944 group_data = test_group_data([self.volume_type],
945 self.group_type_id)
946 ctx = context.get_admin_context()
947 self.driver.create_group(ctx, group_data)
948
949 volume = test_volume_data(self.volume_type_id)
950 self.driver.create_volume(volume)
951
952 model_update, ret1, ret2 = (
953 self.driver.update_group(ctx, group_data, [volume], []))
954
955 self.assertEqual({'status': fields.GroupStatus.AVAILABLE},
956 model_update)
957
958 model_update, volumes_model_update = (
959 self.driver.delete_group(ctx, group_data, [volume]))
960 self.assertEqual({'status': fields.GroupStatus.AVAILABLE},
961 model_update)
962 self.assertEqual([{'status': 'deleted', 'id': fake.VOLUME_ID}],
963 volumes_model_update)
964
965 @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type')
966 def test_create_delete_group_snap(self, cg_ss_enabled):
967 cg_ss_enabled.side_effect = [True, True]
968 group_snap_data = test_group_snap_data([self.volume_type],
969 self.group_type_id)
970 ctx = context.get_admin_context()
971
972 model_update, snapshots_model_update = (
973 self.driver.create_group_snapshot(ctx, group_snap_data, []))
974 self.assertEqual({'status': fields.GroupStatus.AVAILABLE},
975 model_update)
976 self.assertEqual([], snapshots_model_update, 'Unexpected return data')
977
978 model_update, snapshots_model_update = (
979 self.driver.delete_group_snapshot(ctx, group_snap_data, []))
980 self.assertEqual({}, model_update, 'Unexpected return data')
981 self.assertEqual([], snapshots_model_update, 'Unexpected return data')
diff --git a/cinder/volume/drivers/coprhd/__init__.py b/cinder/volume/drivers/coprhd/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/cinder/volume/drivers/coprhd/__init__.py
+++ /dev/null
diff --git a/cinder/volume/drivers/coprhd/common.py b/cinder/volume/drivers/coprhd/common.py
deleted file mode 100644
index 1c80727..0000000
--- a/cinder/volume/drivers/coprhd/common.py
+++ /dev/null
@@ -1,1512 +0,0 @@
1# Copyright (c) 2016 EMC Corporation
2# All Rights Reserved.
3#
4# Licensed under the Apache License, Version 2.0 (the "License"); you may
5# not use this file except in compliance with the License. You may obtain
6# a copy of the License at
7#
8# http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13# License for the specific language governing permissions and limitations
14# under the License.
15
16import base64
17import binascii
18import random
19import string
20
21import eventlet
22from oslo_config import cfg
23from oslo_log import log as logging
24from oslo_utils import encodeutils
25from oslo_utils import excutils
26from oslo_utils import units
27import six
28
29from cinder import context
30from cinder import exception
31from cinder.i18n import _
32from cinder.objects import fields
33from cinder.volume import configuration
34from cinder.volume.drivers.coprhd.helpers import (
35 authentication as coprhd_auth)
36from cinder.volume.drivers.coprhd.helpers import (
37 commoncoprhdapi as coprhd_utils)
38from cinder.volume.drivers.coprhd.helpers import (
39 consistencygroup as coprhd_cg)
40from cinder.volume.drivers.coprhd.helpers import exportgroup as coprhd_eg
41from cinder.volume.drivers.coprhd.helpers import host as coprhd_host
42from cinder.volume.drivers.coprhd.helpers import snapshot as coprhd_snap
43from cinder.volume.drivers.coprhd.helpers import tag as coprhd_tag
44
45from cinder.volume.drivers.coprhd.helpers import (
46 virtualarray as coprhd_varray)
47from cinder.volume.drivers.coprhd.helpers import volume as coprhd_vol
48from cinder.volume import utils as volume_utils
49from cinder.volume import volume_types
50
51LOG = logging.getLogger(__name__)
52
53MAX_RETRIES = 10
54INTERVAL_10_SEC = 10
55
56volume_opts = [
57 cfg.StrOpt('coprhd_hostname',
58 default=None,
59 help='Hostname for the CoprHD Instance'),
60 cfg.PortOpt('coprhd_port',
61 default=4443,
62 help='Port for the CoprHD Instance'),
63 cfg.StrOpt('coprhd_username',
64 default=None,
65 help='Username for accessing the CoprHD Instance'),
66 cfg.StrOpt('coprhd_password',
67 default=None,
68 help='Password for accessing the CoprHD Instance',
69 secret=True),
70 cfg.StrOpt('coprhd_tenant',
71 default=None,
72 help='Tenant to utilize within the CoprHD Instance'),
73 cfg.StrOpt('coprhd_project',
74 default=None,
75 help='Project to utilize within the CoprHD Instance'),
76 cfg.StrOpt('coprhd_varray',
77 default=None,
78 help='Virtual Array to utilize within the CoprHD Instance'),
79 cfg.BoolOpt('coprhd_emulate_snapshot',
80 default=False,
81 help='True | False to indicate if the storage array '
82 'in CoprHD is VMAX or VPLEX')
83]
84
85CONF = cfg.CONF
86CONF.register_opts(volume_opts, group=configuration.SHARED_CONF_GROUP)
87
88URI_VPOOL_VARRAY_CAPACITY = '/block/vpools/{0}/varrays/{1}/capacity'
89URI_BLOCK_EXPORTS_FOR_INITIATORS = '/block/exports?initiators={0}'
90EXPORT_RETRY_COUNT = 5
91MAX_DEFAULT_NAME_LENGTH = 128
92MAX_SNAPSHOT_NAME_LENGTH = 63
93MAX_CONSISTENCY_GROUP_NAME_LENGTH = 64
94MAX_SIO_LEN = 31
95
96
97def retry_wrapper(func):
98 def try_and_retry(*args, **kwargs):
99 retry = False
100 try:
101 return func(*args, **kwargs)
102 except coprhd_utils.CoprHdError as e:
103 # if we got an http error and
104 # the string contains 401 or if the string contains the word cookie
105 if (e.err_code == coprhd_utils.CoprHdError.HTTP_ERR and
106 (e.msg.find('401') != -1 or
107 e.msg.lower().find('cookie') != -1)):
108 retry = True
109 args[0].AUTHENTICATED = False
110 else:
111 exception_message = (_("\nCoprHD Exception: %(msg)s\n") %
112 {'msg': e.msg})
113 LOG.exception(exception_message)
114 raise exception.VolumeBackendAPIException(
115 data=exception_message)
116 except Exception as exc:
117 exception_message = (_("\nGeneral Exception: %(exec_info)s\n") %
118 {'exec_info':
119 encodeutils.exception_to_unicode(exc)})
120 LOG.exception(exception_message)
121 raise exception.VolumeBackendAPIException(
122 data=exception_message)
123
124 if retry:
125 return func(*args, **kwargs)
126
127 return try_and_retry
128
129
130class EMCCoprHDDriverCommon(object):
131
132 OPENSTACK_TAG = 'OpenStack'
133
134 def __init__(self, protocol, default_backend_name, configuration=None):
135 self.AUTHENTICATED = False
136 self.protocol = protocol
137 self.configuration = configuration
138 self.configuration.append_config_values(volume_opts)
139
140 self.init_coprhd_api_components()
141
142 self.stats = {'driver_version': '3.0.0.0',
143 'free_capacity_gb': 'unknown',
144 'reserved_percentage': '0',
145 'storage_protocol': protocol,
146 'total_capacity_gb': 'unknown',
147 'vendor_name': 'CoprHD',
148 'volume_backend_name':
149 self.configuration.volume_backend_name or
150 default_backend_name}
151
152 def init_coprhd_api_components(self):
153
154 coprhd_utils.AUTH_TOKEN = None
155
156 # instantiate coprhd api objects for later use
157 self.volume_obj = coprhd_vol.Volume(
158 self.configuration.coprhd_hostname,
159 self.configuration.coprhd_port)
160
161 self.exportgroup_obj = coprhd_eg.ExportGroup(
162 self.configuration.coprhd_hostname,
163 self.configuration.coprhd_port)
164
165 self.host_obj = coprhd_host.Host(
166 self.configuration.coprhd_hostname,
167 self.configuration.coprhd_port)
168
169 self.varray_obj = coprhd_varray.VirtualArray(
170 self.configuration.coprhd_hostname,
171 self.configuration.coprhd_port)
172
173 self.snapshot_obj = coprhd_snap.Snapshot(
174 self.configuration.coprhd_hostname,
175 self.configuration.coprhd_port)
176
177 self.consistencygroup_obj = coprhd_cg.ConsistencyGroup(
178 self.configuration.coprhd_hostname,
179 self.configuration.coprhd_port)
180
181 self.tag_obj = coprhd_tag.Tag(
182 self.configuration.coprhd_hostname,
183 self.configuration.coprhd_port)
184
185 def check_for_setup_error(self):
186 # validate all of the coprhd_* configuration values
187 if self.configuration.coprhd_hostname is None:
188 message = _("coprhd_hostname is not set in cinder configuration")
189 raise exception.VolumeBackendAPIException(data=message)
190
191 if self.configuration.coprhd_port is None:
192 message = _("coprhd_port is not set in cinder configuration")
193 raise exception.VolumeBackendAPIException(data=message)
194
195 if self.configuration.coprhd_username is None:
196 message = _("coprhd_username is not set in cinder configuration")
197 raise exception.VolumeBackendAPIException(data=message)
198
199 if self.configuration.coprhd_password is None:
200 message = _("coprhd_password is not set in cinder configuration")
201 raise exception.VolumeBackendAPIException(data=message)
202
203 if self.configuration.coprhd_tenant is None:
204 message = _("coprhd_tenant is not set in cinder configuration")
205 raise exception.VolumeBackendAPIException(data=message)
206
207 if self.configuration.coprhd_project is None:
208 message = _("coprhd_project is not set in cinder configuration")
209 raise exception.VolumeBackendAPIException(data=message)
210
211 if self.configuration.coprhd_varray is None:
212 message = _("coprhd_varray is not set in cinder configuration")
213 raise exception.VolumeBackendAPIException(data=message)
214
215 def authenticate_user(self):
216 # we should check to see if we are already authenticated before blindly
217 # doing it again
218 if self.AUTHENTICATED is False:
219 obj = coprhd_auth.Authentication(
220 self.configuration.coprhd_hostname,
221 self.configuration.coprhd_port)
222
223 username = self.configuration.coprhd_username
224 password = self.configuration.coprhd_password
225
226 coprhd_utils.AUTH_TOKEN = obj.authenticate_user(username,
227 password)
228 self.AUTHENTICATED = True
229
230 def create_volume(self, vol, driver, truncate_name=False):
231 self.authenticate_user()
232 name = self._get_resource_name(vol, MAX_DEFAULT_NAME_LENGTH,
233 truncate_name)
234 size = int(vol.size) * units.Gi
235
236 vpool = self._get_vpool(vol)
237 self.vpool = vpool['CoprHD:VPOOL']
238
239 try:
240 coprhd_cgid = None
241 try:
242 if vol.group_id:
243 if volume_utils.is_group_a_cg_snapshot_type(vol.group):
244 coprhd_cgid = self._get_coprhd_cgid(vol.group_id)
245 except KeyError:
246 coprhd_cgid = None
247 except AttributeError:
248 coprhd_cgid = None
249
250 full_project_name = ("%s/%s" % (self.configuration.coprhd_tenant,
251 self.configuration.coprhd_project)
252 )
253 self.volume_obj.create(full_project_name, name, size,
254 self.configuration.coprhd_varray,
255 self.vpool,
256 # no longer specified in volume creation
257 sync=True,
258 # no longer specified in volume creation
259 consistencygroup=coprhd_cgid)
260
261 except coprhd_utils.CoprHdError as e:
262 coprhd_err_msg = (_("Volume %(name)s: create failed\n%(err)s") %
263 {'name': name, 'err': six.text_type(e.msg)})
264
265 log_err_msg = ("Volume : %s creation failed" % name)
266 self._raise_or_log_exception(
267 e.err_code, coprhd_err_msg, log_err_msg)
268
269 @retry_wrapper
270 def create_consistencygroup(self, context, group, truncate_name=False):
271 self.authenticate_user()
272 name = self._get_resource_name(group,
273 MAX_CONSISTENCY_GROUP_NAME_LENGTH,
274 truncate_name)
275
276 try:
277 self.consistencygroup_obj.create(
278 name,
279 self.configuration.coprhd_project,
280 self.configuration.coprhd_tenant)
281
282 cg_uri = self.consistencygroup_obj.consistencygroup_query(
283 name,
284 self.configuration.coprhd_project,
285 self.configuration.coprhd_tenant)
286
287 self.set_tags_for_resource(
288 coprhd_cg.ConsistencyGroup.URI_CONSISTENCY_GROUP_TAGS,
289 cg_uri, group)
290
291 except coprhd_utils.CoprHdError as e:
292 coprhd_err_msg = (_("Consistency Group %(name)s:"
293 " create failed\n%(err)s") %
294 {'name': name, 'err': six.text_type(e.msg)})
295
296 log_err_msg = ("Consistency Group : %s creation failed" %
297 name)
298 self._raise_or_log_exception(e.err_code, coprhd_err_msg,
299 log_err_msg)
300
301 @retry_wrapper
302 def update_consistencygroup(self, group, add_volumes,
303 remove_volumes):
304 self.authenticate_user()
305 model_update = {'status': fields.GroupStatus.AVAILABLE}
306 cg_uri = self._get_coprhd_cgid(group.id)
307 add_volnames = []
308 remove_volnames = []
309
310 try:
311 if add_volumes:
312 for vol in add_volumes:
313 vol_name = self._get_coprhd_volume_name(vol)
314 add_volnames.append(vol_name)
315
316 if remove_volumes:
317 for vol in remove_volumes:
318 vol_name = self._get_coprhd_volume_name(vol)
319 remove_volnames.append(vol_name)
320
321 self.consistencygroup_obj.update(
322 cg_uri,
323 self.configuration.coprhd_project,
324 self.configuration.coprhd_tenant,
325 add_volnames, remove_volnames, True)
326
327 return model_update, None, None
328
329 except coprhd_utils.CoprHdError as e:
330 coprhd_err_msg = (_("Consistency Group %(cg_uri)s:"
331 " update failed\n%(err)s") %
332 {'cg_uri': cg_uri, 'err': six.text_type(e.msg)})
333
334 log_err_msg = ("Consistency Group : %s update failed" %
335 cg_uri)
336 self._raise_or_log_exception(e.err_code, coprhd_err_msg,
337 log_err_msg)
338
339 @retry_wrapper
340 def delete_consistencygroup(self, context, group, volumes,
341 truncate_name=False):
342 self.authenticate_user()
343 name = self._get_resource_name(group,
344 MAX_CONSISTENCY_GROUP_NAME_LENGTH,
345 truncate_name)
346 volumes_model_update = []
347
348 try:
349 for vol in volumes:
350 try:
351 vol_name = self._get_coprhd_volume_name(vol)
352 full_project_name = "%s/%s" % (
353 self.configuration.coprhd_tenant,
354 self.configuration.coprhd_project)
355
356 self.volume_obj.delete(full_project_name, vol_name,
357 sync=True,
358 force_delete=True)
359
360 update_item = {'id': vol.id,
361 'status':
362 fields.GroupStatus.DELETED}
363 volumes_model_update.append(update_item)
364
365 except exception.VolumeBackendAPIException:
366 update_item = {'id': vol.id,
367 'status': fields.ConsistencyGroupStatus.
368 ERROR_DELETING}
369
370 volumes_model_update.append(update_item)
371
372 LOG.exception("Failed to delete the volume %s of CG.",
373 vol.name)
374
375 self.consistencygroup_obj.delete(
376 name,
377 self.configuration.coprhd_project,
378 self.configuration.coprhd_tenant)
379
380 model_update = {}
381 model_update['status'] = group.status
382
383 return model_update, volumes_model_update
384
385 except coprhd_utils.CoprHdError as e:
386 coprhd_err_msg = (_("Consistency Group %(name)s:"
387 " delete failed\n%(err)s") %
388 {'name': name, 'err': six.text_type(e.msg)})
389
390 log_err_msg = ("Consistency Group : %s deletion failed" %
391 name)
392 self._raise_or_log_exception(e.err_code, coprhd_err_msg,
393 log_err_msg)
394
395 @retry_wrapper
396 def create_cgsnapshot(self, cgsnapshot, snapshots, truncate_name=False):
397 self.authenticate_user()
398
399 snapshots_model_update = []
400 cgsnapshot_name = self._get_resource_name(cgsnapshot,
401 MAX_SNAPSHOT_NAME_LENGTH,
402 truncate_name)
403
404 cg_id = None
405 cg_group = None
406
407 try:
408 cg_id = cgsnapshot.group_id
409 cg_group = cgsnapshot.group
410 except AttributeError:
411 pass
412
413 cg_name = None
414 coprhd_cgid = None
415
416 if cg_id:
417 coprhd_cgid = self._get_coprhd_cgid(cg_id)
418 cg_name = self._get_consistencygroup_name(cg_group)
419
420 LOG.info('Start to create cgsnapshot for consistency group'
421 ': %(group_name)s',
422 {'group_name': cg_name})
423
424 try:
425 self.snapshot_obj.snapshot_create(
426 'block',
427 'consistency-groups',
428 coprhd_cgid,
429 cgsnapshot_name,
430 False,
431 True)
432
433 for snapshot in snapshots:
434 vol_id_of_snap = snapshot.volume_id
435
436 # Finding the volume in CoprHD for this volume id
437 tagname = "OpenStack:id:" + vol_id_of_snap
438 rslt = coprhd_utils.search_by_tag(
439 coprhd_vol.Volume.URI_SEARCH_VOLUMES_BY_TAG.format(
440 tagname),
441 self.configuration.coprhd_hostname,
442 self.configuration.coprhd_port)
443
444 if not rslt:
445 continue
446
447 vol_uri = rslt[0]
448
449 snapshots_of_volume = self.snapshot_obj.snapshot_list_uri(
450 'block',
451 'volumes',
452 vol_uri)
453
454 for snapUri in snapshots_of_volume:
455 snapshot_obj = self.snapshot_obj.snapshot_show_uri(
456 'block',
457 vol_uri,
458 snapUri['id'])
459
460 if not coprhd_utils.get_node_value(snapshot_obj,
461 'inactive'):
462
463 # Creating snapshot for a consistency group.
464 # When we create a consistency group snapshot on
465 # coprhd then each snapshot of volume in the
466 # consistencygroup will be given a subscript. Ex if
467 # the snapshot name is cgsnap1 and lets say there are
468 # three vols(a,b,c) in CG. Then the names of snapshots
469 # of the volumes in cg on coprhd end will be like
470 # cgsnap1-1 cgsnap1-2 cgsnap1-3. So, we list the
471 # snapshots of the volume under consideration and then
472 # split the name using - from the ending as prefix
473 # and postfix. We compare the prefix to the cgsnapshot
474 # name and filter our the snapshots that correspond to
475 # the cgsnapshot
476
477 if '-' in snapshot_obj['name']:
478 (prefix, postfix) = snapshot_obj[
479 'name'].rsplit('-', 1)
480
481 if cgsnapshot_name == prefix:
482 self.set_tags_for_resource(
483 coprhd_snap.Snapshot.
484 URI_BLOCK_SNAPSHOTS_TAG,
485 snapUri['id'],
486 snapshot)
487
488 elif cgsnapshot_name == snapshot_obj['name']:
489 self.set_tags_for_resource(
490 coprhd_snap.Snapshot.URI_BLOCK_SNAPSHOTS_TAG,
491 snapUri['id'],
492 snapshot)
493
494 snapshot['status'] = fields.SnapshotStatus.AVAILABLE
495 snapshots_model_update.append(
496 {'id': snapshot.id, 'status':
497 fields.SnapshotStatus.AVAILABLE})
498
499 model_update = {'status': fields.ConsistencyGroupStatus.AVAILABLE}
500
501 return model_update, snapshots_model_update
502
503 except coprhd_utils.CoprHdError as e:
504 coprhd_err_msg = (_("Snapshot for Consistency Group %(cg_name)s:"
505 " create failed\n%(err)s") %
506 {'cg_name': cg_name,
507 'err': six.text_type(e.msg)})
508
509 log_err_msg = ("Snapshot %(name)s for Consistency"
510 " Group: %(cg_name)s creation failed" %
511 {'cg_name': cg_name,
512 'name': cgsnapshot_name})
513 self._raise_or_log_exception(e.err_code, coprhd_err_msg,
514 log_err_msg)
515
516 @retry_wrapper
517 def delete_cgsnapshot(self, cgsnapshot, snapshots, truncate_name=False):
518 self.authenticate_user()
519 cgsnapshot_id = cgsnapshot.id
520 cgsnapshot_name = self._get_resource_name(cgsnapshot,
521 MAX_SNAPSHOT_NAME_LENGTH,
522 truncate_name)
523
524 snapshots_model_update = []
525
526 cg_id = None
527 cg_group = None
528
529 try:
530 cg_id = cgsnapshot.group_id
531 cg_group = cgsnapshot.group
532 except AttributeError:
533 pass
534
535 coprhd_cgid = self._get_coprhd_cgid(cg_id)
536 cg_name = self._get_consistencygroup_name(cg_group)
537
538 model_update = {}
539 LOG.info('Delete cgsnapshot %(snap_name)s for consistency group: '
540 '%(group_name)s', {'snap_name': cgsnapshot.name,
541 'group_name': cg_name})
542
543 try:
544 uri = None
545 try:
546 uri = self.snapshot_obj.snapshot_query('block',
547 'consistency-groups',
548 coprhd_cgid,
549 cgsnapshot_name + '-1')
550 except coprhd_utils.CoprHdError as e:
551 if e.err_code == coprhd_utils.CoprHdError.NOT_FOUND_ERR:
552 uri = self.snapshot_obj.snapshot_query(
553 'block',
554 'consistency-groups',
555 coprhd_cgid,
556 cgsnapshot_name)
557 self.snapshot_obj.snapshot_delete_uri(
558 'block',
559 coprhd_cgid,
560 uri,
561 True,
562 0)
563
564 for snapshot in snapshots:
565 snapshots_model_update.append(
566 {'id': snapshot.id,
567 'status': fields.SnapshotStatus.DELETED})
568
569 return model_update, snapshots_model_update
570
571 except coprhd_utils.CoprHdError as e:
572 coprhd_err_msg = (_("Snapshot %(cgsnapshot_id)s: for"
573 " Consistency Group %(cg_name)s: delete"
574 " failed\n%(err)s") %
575 {'cgsnapshot_id': cgsnapshot_id,
576 'cg_name': cg_name,
577 'err': six.text_type(e.msg)})
578
579 log_err_msg = ("Snapshot %(name)s for Consistency"
580 " Group: %(cg_name)s deletion failed" %
581 {'cg_name': cg_name,
582 'name': cgsnapshot_name})
583 self._raise_or_log_exception(e.err_code, coprhd_err_msg,
584 log_err_msg)
585
586 @retry_wrapper
587 def set_volume_tags(self, vol, exempt_tags=None, truncate_name=False):
588 if exempt_tags is None:
589 exempt_tags = []
590
591 self.authenticate_user()
592 name = self._get_resource_name(vol,
593 MAX_DEFAULT_NAME_LENGTH,
594 truncate_name)
595 full_project_name = ("%s/%s" % (
596 self.configuration.coprhd_tenant,
597 self.configuration.coprhd_project))
598
599 vol_uri = self.volume_obj.volume_query(full_project_name,
600 name)
601
602 self.set_tags_for_resource(
603 coprhd_vol.Volume.URI_TAG_VOLUME, vol_uri, vol, exempt_tags)
604
605 @retry_wrapper
606 def set_tags_for_resource(self, uri, resource_id, resource,
607 exempt_tags=None):
608 if exempt_tags is None:
609 exempt_tags = []
610
611 self.authenticate_user()
612
613 # first, get the current tags that start with the OPENSTACK_TAG
614 # eyecatcher
615 formattedUri = uri.format(resource_id)
616 remove_tags = []
617 currentTags = self.tag_obj.list_tags(formattedUri)
618 for cTag in currentTags:
619 if cTag.startswith(self.OPENSTACK_TAG):
620 remove_tags.append(cTag)
621
622 try:
623 if remove_tags:
624 self.tag_obj.tag_resource(uri,
625 resource_id,
626 None,
627 remove_tags)
628 except coprhd_utils.CoprHdError as e:
629 if e.err_code == coprhd_utils.CoprHdError.SOS_FAILURE_ERR:
630 LOG.debug("CoprHdError adding the tag:\n %s", e.msg)
631
632 # now add the tags for the resource
633 add_tags = []
634 # put all the openstack resource properties into the CoprHD resource
635
636 try:
637 for prop, value in vars(resource).items():
638 try:
639 if prop in exempt_tags:
640 continue
641
642 if prop.startswith("_"):
643 prop = prop.replace("_", '', 1)
644
645 # don't put the status in, it's always the status before
646 # the current transaction
647 if ((not prop.startswith("status") and not
648 prop.startswith("obj_status") and
649 prop != "obj_volume") and value):
650 tag = ("%s:%s:%s" %
651 (self.OPENSTACK_TAG, prop,
652 six.text_type(value)))
653
654 if len(tag) > 128:
655 tag = tag[0:128]
656 add_tags.append(tag)
657 except TypeError:
658 LOG.error(
659 "Error tagging the resource property %s", prop)
660 except TypeError:
661 LOG.error("Error tagging the resource properties")
662
663 try:
664 self.tag_obj.tag_resource(
665 uri,
666 resource_id,
667 add_tags,
668 None)
669 except coprhd_utils.CoprHdError as e:
670 if e.err_code == coprhd_utils.CoprHdError.SOS_FAILURE_ERR:
671 LOG.debug(
672 "Adding the tag failed. CoprHdError: %s", e.msg)
673
674 return self.tag_obj.list_tags(formattedUri)
675
676 @retry_wrapper
677 def create_cloned_volume(self, vol, src_vref, truncate_name=False):
678 """Creates a clone of the specified volume."""
679 self.authenticate_user()
680 name = self._get_resource_name(vol,
681 MAX_DEFAULT_NAME_LENGTH,
682 truncate_name)
683 srcname = self._get_coprhd_volume_name(src_vref)
684
685 try:
686 if src_vref.group_id:
687 raise coprhd_utils.CoprHdError(
688 coprhd_utils.CoprHdError.SOS_FAILURE_ERR,
689 _("Clone can't be taken individually on a volume"
690 " that is part of a Consistency Group"))
691 except KeyError as e:
692 pass
693 except AttributeError:
694 pass
695 try:
696 (storageres_type,
697 storageres_typename) = self.volume_obj.get_storageAttributes(
698 srcname, None, None)
699
700 resource_id = self.volume_obj.storage_resource_query(
701 storageres_type,
702 srcname,
703 None,
704 None,
705 self.configuration.coprhd_project,
706 self.configuration.coprhd_tenant)
707
708 self.volume_obj.clone(
709 name,
710 resource_id,
711 sync=True)
712
713 full_project_name = "%s/%s" % (
714 self.configuration.coprhd_tenant,
715 self.configuration.coprhd_project)
716
717 detachable = self.volume_obj.is_volume_detachable(
718 full_project_name, name)
719 LOG.debug("Is volume detachable : %s", detachable)
720
721 # detach it from the source volume immediately after creation
722 if detachable:
723 self.volume_obj.volume_clone_detach(
724 "", full_project_name, name, True)
725
726 except IndexError:
727 LOG.exception("Volume clone detach returned empty task list")
728
729 except coprhd_utils.CoprHdError as e:
730 coprhd_err_msg = (_("Volume %(name)s: clone failed\n%(err)s") %
731 {'name': name, 'err': six.text_type(e.msg)})
732
733 log_err_msg = ("Volume : {%s} clone failed" % name)
734 self._raise_or_log_exception(e.err_code, coprhd_err_msg,
735 log_err_msg)
736
737 src_vol_size = 0
738 dest_vol_size = 0
739
740 try:
741 src_vol_size = src_vref.size
742 except AttributeError:
743 src_vol_size = src_vref.volume_size
744
745 try:
746 dest_vol_size = vol.size
747 except AttributeError:
748 dest_vol_size = vol.volume_size
749
750 if dest_vol_size > src_vol_size:
751 size_in_bytes = coprhd_utils.to_bytes("%sG" % dest_vol_size)
752 try:
753 self.volume_obj.expand(
754 ("%s/%s" % (self.configuration.coprhd_tenant,
755 self.configuration.coprhd_project)), name,
756 size_in_bytes,
757 True)
758 except coprhd_utils.CoprHdError as e:
759 coprhd_err_msg = (_("Volume %(volume_name)s: expand failed"
760 "\n%(err)s") %
761 {'volume_name': name,
762 'err': six.text_type(e.msg)})
763
764 log_err_msg = ("Volume : %s expand failed" % name)
765 self._raise_or_log_exception(e.err_code, coprhd_err_msg,
766 log_err_msg)
767
768 @retry_wrapper
769 def expand_volume(self, vol, new_size):
770 """expands the volume to new_size specified."""
771 self.authenticate_user()
772 volume_name = self._get_coprhd_volume_name(vol)
773 size_in_bytes = coprhd_utils.to_bytes("%sG" % new_size)
774
775 try:
776 self.volume_obj.expand(
777 ("%s/%s" % (self.configuration.coprhd_tenant,
778 self.configuration.coprhd_project)), volume_name,
779 size_in_bytes,
780 True)
781 except coprhd_utils.CoprHdError as e:
782 coprhd_err_msg = (_("Volume %(volume_name)s:"
783 " expand failed\n%(err)s") %
784 {'volume_name': volume_name,
785 'err': six.text_type(e.msg)})
786
787 log_err_msg = ("Volume : %s expand failed" %
788 volume_name)
789 self._raise_or_log_exception(e.err_code, coprhd_err_msg,
790 log_err_msg)
791
792 @retry_wrapper
793 def create_volume_from_snapshot(self, snapshot, volume,
794 truncate_name=False):
795 """Creates volume from given snapshot ( snapshot clone to volume )."""
796 self.authenticate_user()
797
798 if self.configuration.coprhd_emulate_snapshot:
799 self.create_cloned_volume(volume, snapshot, truncate_name)
800 return
801
802 try:
803 if snapshot.group_snapshot_id:
804 raise coprhd_utils.CoprHdError(
805 coprhd_utils.CoprHdError.SOS_FAILURE_ERR,
806 _("Volume cannot be created individually from a snapshot "
807 "that is part of a Consistency Group"))
808 except AttributeError:
809 pass
810
811 src_snapshot_name = None
812 src_vol_ref = snapshot.volume
813 new_volume_name = self._get_resource_name(volume,
814 MAX_DEFAULT_NAME_LENGTH,
815 truncate_name)
816
817 try:
818 coprhd_vol_info = self._get_coprhd_volume_name(
819 src_vol_ref, True)
820 src_snapshot_name = self._get_coprhd_snapshot_name(
821 snapshot, coprhd_vol_info['volume_uri'])
822
823 (storageres_type,
824 storageres_typename) = self.volume_obj.get_storageAttributes(
825 coprhd_vol_info['volume_name'], None, src_snapshot_name)
826
827 resource_id = self.volume_obj.storage_resource_query(
828 storageres_type,
829 coprhd_vol_info['volume_name'],
830 None,
831 src_snapshot_name,
832 self.configuration.coprhd_project,
833 self.configuration.coprhd_tenant)
834
835 self.volume_obj.clone(
836 new_volume_name,
837 resource_id,
838 sync=True)
839
840 except coprhd_utils.CoprHdError as e:
841 coprhd_err_msg = (_("Snapshot %(src_snapshot_name)s:"
842 " clone failed\n%(err)s") %
843 {'src_snapshot_name': src_snapshot_name,
844 'err': six.text_type(e.msg)})
845
846 log_err_msg = ("Snapshot : %s clone failed" %
847 src_snapshot_name)
848 self._raise_or_log_exception(e.err_code, coprhd_err_msg,
849 log_err_msg)
850
851 if volume.size > snapshot.volume_size:
852 size_in_bytes = coprhd_utils.to_bytes("%sG" % volume.size)
853
854 try:
855 self.volume_obj.expand(
856 ("%s/%s" % (self.configuration.coprhd_tenant,
857 self.configuration.coprhd_project)),
858 new_volume_name, size_in_bytes, True)
859
860 except coprhd_utils.CoprHdError as e:
861 coprhd_err_msg = (_("Volume %(volume_name)s: expand failed"
862 "\n%(err)s") %
863 {'volume_name': new_volume_name,
864 'err': six.text_type(e.msg)})
865
866 log_err_msg = ("Volume : %s expand failed" %
867 new_volume_name)
868 self._raise_or_log_exception(e.err_code, coprhd_err_msg,
869 log_err_msg)
870
871 @retry_wrapper
872 def delete_volume(self, vol):
873 self.authenticate_user()
874 name = self._get_coprhd_volume_name(vol)
875 try:
876 full_project_name = ("%s/%s" % (
877 self.configuration.coprhd_tenant,
878 self.configuration.coprhd_project))
879 self.volume_obj.delete(full_project_name, name, sync=True)
880 except coprhd_utils.CoprHdError as e:
881 if e.err_code == coprhd_utils.CoprHdError.NOT_FOUND_ERR:
882 LOG.info(
883 "Volume %s"
884 " no longer exists; volume deletion is"
885 " considered successful.", name)
886 else:
887 coprhd_err_msg = (_("Volume %(name)s: delete failed"
888 "\n%(err)s") %
889 {'name': name, 'err': six.text_type(e.msg)})
890
891 log_err_msg = ("Volume : %s delete failed" % name)
892 self._raise_or_log_exception(e.err_code, coprhd_err_msg,
893 log_err_msg)
894
895 @retry_wrapper
896 def create_snapshot(self, snapshot, truncate_name=False):
897 self.authenticate_user()
898
899 volume = snapshot.volume
900
901 try:
902 if volume.group_id:
903 raise coprhd_utils.CoprHdError(
904 coprhd_utils.CoprHdError.SOS_FAILURE_ERR,
905 _("Snapshot can't be taken individually on a volume"
906 " that is part of a Consistency Group"))
907 except KeyError:
908 LOG.info("No Consistency Group associated with the volume")
909
910 if self.configuration.coprhd_emulate_snapshot:
911 self.create_cloned_volume(snapshot, volume, truncate_name)
912 self.set_volume_tags(
913 snapshot, ['_volume', '_obj_volume_type'], truncate_name)
914 return
915
916 try:
917 snapshotname = self._get_resource_name(snapshot,
918 MAX_SNAPSHOT_NAME_LENGTH,
919 truncate_name)
920 vol = snapshot.volume
921
922 volumename = self._get_coprhd_volume_name(vol)
923 projectname = self.configuration.coprhd_project
924 tenantname = self.configuration.coprhd_tenant
925 storageres_type = 'block'
926 storageres_typename = 'volumes'
927 resource_uri = self.snapshot_obj.storage_resource_query(
928 storageres_type,
929 volume_name=volumename,
930 cg_name=None,
931 project=projectname,
932 tenant=tenantname)
933 inactive = False
934 sync = True
935 self.snapshot_obj.snapshot_create(
936 storageres_type,
937 storageres_typename,
938 resource_uri,
939 snapshotname,
940 inactive,
941 sync)
942
943 snapshot_uri = self.snapshot_obj.snapshot_query(
944 storageres_type,
945 storageres_typename,
946 resource_uri,
947 snapshotname)
948
949 self.set_tags_for_resource(
950 coprhd_snap.Snapshot.URI_BLOCK_SNAPSHOTS_TAG,
951 snapshot_uri, snapshot, ['_volume'])
952
953 except coprhd_utils.CoprHdError as e:
954 coprhd_err_msg = (_("Snapshot: %(snapshotname)s, create failed"
955 "\n%(err)s") % {'snapshotname': snapshotname,
956 'err': six.text_type(e.msg)})
957
958 log_err_msg = ("Snapshot : %s create failed" % snapshotname)
959 self._raise_or_log_exception(e.err_code, coprhd_err_msg,
960 log_err_msg)
961
962 @retry_wrapper
963 def delete_snapshot(self, snapshot):
964 self.authenticate_user()
965
966 vol = snapshot.volume
967
968 try:
969 if vol.group_id:
970 raise coprhd_utils.CoprHdError(
971 coprhd_utils.CoprHdError.SOS_FAILURE_ERR,
972 _("Snapshot delete can't be done individually on a volume"
973 " that is part of a Consistency Group"))
974 except KeyError:
975 LOG.info("No Consistency Group associated with the volume")
976
977 if self.configuration.coprhd_emulate_snapshot:
978 self.delete_volume(snapshot)
979 return
980
981 snapshotname = None
982 try:
983 volumename = self._get_coprhd_volume_name(vol)
984 projectname = self.configuration.coprhd_project
985 tenantname = self.configuration.coprhd_tenant
986 storageres_type = 'block'
987 storageres_typename = 'volumes'
988 resource_uri = self.snapshot_obj.storage_resource_query(
989 storageres_type,
990 volume_name=volumename,
991 cg_name=None,
992 project=projectname,
993 tenant=tenantname)
994 if resource_uri is None:
995 LOG.info(
996 "Snapshot %s"
997 " is not found; snapshot deletion"
998 " is considered successful.", snapshotname)
999 else:
1000 snapshotname = self._get_coprhd_snapshot_name(
1001 snapshot, resource_uri)
1002
1003 self.snapshot_obj.snapshot_delete(
1004 storageres_type,
1005 storageres_typename,
1006 resource_uri,
1007 snapshotname,
1008 sync=True)
1009 except coprhd_utils.CoprHdError as e:
1010 coprhd_err_msg = (_("Snapshot %s : Delete Failed\n") %
1011 snapshotname)
1012
1013 log_err_msg = ("Snapshot : %s delete failed" % snapshotname)
1014 self._raise_or_log_exception(e.err_code, coprhd_err_msg,
1015 log_err_msg)
1016
1017 @retry_wrapper
1018 def initialize_connection(self, volume, protocol, initiator_ports,
1019 hostname):
1020
1021 try:
1022 self.authenticate_user()
1023 volumename = self._get_coprhd_volume_name(volume)
1024 foundgroupname = self._find_exportgroup(initiator_ports)
1025 foundhostname = None
1026 if foundgroupname is None:
1027 for i in range(len(initiator_ports)):
1028 # check if this initiator is contained in any CoprHD Host
1029 # object
1030 LOG.debug(
1031 "checking for initiator port: %s", initiator_ports[i])
1032 foundhostname = self._find_host(initiator_ports[i])
1033
1034 if foundhostname:
1035 LOG.info("Found host %s", foundhostname)
1036 break
1037
1038 if not foundhostname:
1039 LOG.error("Auto host creation not supported")
1040 # create an export group for this host
1041 foundgroupname = foundhostname + 'SG'
1042 # create a unique name
1043 foundgroupname = foundgroupname + '-' + ''.join(
1044 random.choice(string.ascii_uppercase +
1045 string.digits)
1046 for x in range(6))
1047 self.exportgroup_obj.exportgroup_create(
1048 foundgroupname,
1049 self.configuration.coprhd_project,
1050 self.configuration.coprhd_tenant,
1051 self.configuration.coprhd_varray,
1052 'Host',
1053 foundhostname)
1054
1055 LOG.debug(
1056 "adding the volume to the exportgroup : %s", volumename)
1057
1058 self.exportgroup_obj.exportgroup_add_volumes(
1059 True,
1060 foundgroupname,
1061 self.configuration.coprhd_tenant,
1062 None,
1063 None,
1064 None,
1065 self.configuration.coprhd_project,
1066 [volumename],
1067 None,
1068 None)
1069
1070 return self._find_device_info(volume, initiator_ports)
1071
1072 except coprhd_utils.CoprHdError as e:
1073 raise coprhd_utils.CoprHdError(
1074 coprhd_utils.CoprHdError.SOS_FAILURE_ERR,
1075 (_("Attach volume (%(name)s) to host"
1076 " (%(hostname)s) initiator (%(initiatorport)s)"
1077 " failed:\n%(err)s") %
1078 {'name': self._get_coprhd_volume_name(
1079 volume),
1080 'hostname': hostname,
1081 'initiatorport': initiator_ports[0],
1082 'err': six.text_type(e.msg)})
1083 )
1084
1085 @retry_wrapper
1086 def terminate_connection(self, volume, protocol, initiator_ports,
1087 hostname):
1088 try:
1089 self.authenticate_user()
1090 volumename = self._get_coprhd_volume_name(volume)
1091 full_project_name = ("%s/%s" % (self.configuration.coprhd_tenant,
1092 self.configuration.coprhd_project))
1093 voldetails = self.volume_obj.show(full_project_name, volumename)
1094 volid = voldetails['id']
1095
1096 # find the exportgroups
1097 exports = self.volume_obj.get_exports_by_uri(volid)
1098 exportgroups = set()
1099 itls = exports['itl']
1100 for itl in itls:
1101 itl_port = itl['initiator']['port']
1102 if itl_port in initiator_ports:
1103 exportgroups.add(itl['export']['id'])
1104
1105 for exportgroup in exportgroups:
1106 self.exportgroup_obj.exportgroup_remove_volumes_by_uri(
1107 exportgroup,
1108 volid,
1109 True,
1110 None,
1111 None,
1112 None,
1113 None)
1114 else:
1115 LOG.info(
1116 "No export group found for the host: %s"
1117 "; this is considered already detached.", hostname)
1118
1119 return itls
1120
1121 except coprhd_utils.CoprHdError as e:
1122 raise coprhd_utils.CoprHdError(
1123 coprhd_utils.CoprHdError.SOS_FAILURE_ERR,
1124 (_("Detaching volume %(volumename)s from host"
1125 " %(hostname)s failed: %(err)s") %
1126 {'volumename': volumename,
1127 'hostname': hostname,
1128 'err': six.text_type(e.msg)})
1129 )
1130
1131 @retry_wrapper
1132 def _find_device_info(self, volume, initiator_ports):
1133 """Returns device_info in list of itls having the matched initiator.
1134
1135 (there could be multiple targets, hence a list):
1136 [
1137 {
1138 "hlu":9,
1139 "initiator":{...,"port":"20:00:00:25:B5:49:00:22"},
1140 "export":{...},
1141 "device":{...,"wwn":"600601602B802D00B62236585D0BE311"},
1142 "target":{...,"port":"50:06:01:6A:46:E0:72:EF"},
1143 "san_zone_name":"..."
1144 },
1145 {
1146 "hlu":9,
1147 "initiator":{...,"port":"20:00:00:25:B5:49:00:22"},
1148 "export":{...},
1149 "device":{...,"wwn":"600601602B802D00B62236585D0BE311"},
1150 "target":{...,"port":"50:06:01:62:46:E0:72:EF"},
1151 "san_zone_name":"..."
1152 }
1153 ]
1154 """
1155 volumename = self._get_coprhd_volume_name(volume)
1156 full_project_name = ("%s/%s" % (self.configuration.coprhd_tenant,
1157 self.configuration.coprhd_project))
1158 vol_uri = self.volume_obj.volume_query(full_project_name, volumename)
1159
1160 # The itl info shall be available at the first try since now export is
1161 # a synchronous call. We are trying a few more times to accommodate
1162 # any delay on filling in the itl info after the export task is
1163 # completed.
1164
1165 itls = []
1166 for x in range(MAX_RETRIES):
1167 exports = self.volume_obj.get_exports_by_uri(vol_uri)
1168 LOG.debug("Volume exports: ")
1169 LOG.info(vol_uri)
1170 LOG.debug(exports)
1171 for itl in exports['itl']:
1172 itl_port = itl['initiator']['port']
1173 if itl_port in initiator_ports:
1174 found_device_number = itl['hlu']
1175 if (found_device_number is not None and
1176 found_device_number != '-1'):
1177 # 0 is a valid number for found_device_number.
1178 # Only loop if it is None or -1
1179 LOG.debug("Found Device Number: %s",
1180 found_device_number)
1181 itls.append(itl)
1182
1183 if itls:
1184 break
1185 else:
1186 LOG.debug("Device Number not found yet."
1187 " Retrying after 10 seconds...")
1188 eventlet.sleep(INTERVAL_10_SEC)
1189
1190 if itls is None:
1191 # No device number found after 10 tries; return an empty itl
1192 LOG.info(
1193 "No device number has been found after 10 tries; "
1194 "this likely indicates an unsuccessful attach of "
1195 "volume volumename=%(volumename)s to"
1196 " initiator initiator_ports=%(initiator_ports)s",
1197 {'volumename': volumename,
1198 'initiator_ports': initiator_ports})
1199
1200 return itls
1201
1202 def _get_coprhd_cgid(self, cgid):
1203 tagname = self.OPENSTACK_TAG + ":id:" + cgid
1204 rslt = coprhd_utils.search_by_tag(
1205 coprhd_cg.ConsistencyGroup.URI_SEARCH_CONSISTENCY_GROUPS_BY_TAG.
1206 format(tagname),
1207 self.configuration.coprhd_hostname,
1208 self.configuration.coprhd_port)
1209
1210 # if the result is empty, then search with the tagname as
1211 # "OpenStack:obj_id" the openstack attribute for id can be obj_id
1212 # instead of id. this depends on the version
1213 if rslt is None or len(rslt) == 0:
1214 tagname = self.OPENSTACK_TAG + ":obj_id:" + cgid
1215 rslt = coprhd_utils.search_by_tag(
1216 coprhd_cg.ConsistencyGroup
1217 .URI_SEARCH_CONSISTENCY_GROUPS_BY_TAG.
1218 format(tagname),
1219 self.configuration.coprhd_hostname,
1220 self.configuration.coprhd_port)
1221
1222 if len(rslt) > 0:
1223 rslt_cg = self.consistencygroup_obj.show(
1224 rslt[0],
1225 self.configuration.coprhd_project,
1226 self.configuration.coprhd_tenant)
1227 return rslt_cg['id']
1228 else:
1229 raise coprhd_utils.CoprHdError(
1230 coprhd_utils.CoprHdError.NOT_FOUND_ERR,
1231 (_("Consistency Group %s not found") % cgid))
1232
1233 def _get_consistencygroup_name(self, consisgrp):
1234 return consisgrp.name
1235
1236 def _get_coprhd_snapshot_name(self, snapshot, resUri):
1237 tagname = self.OPENSTACK_TAG + ":id:" + snapshot['id']
1238 rslt = coprhd_utils.search_by_tag(
1239 coprhd_snap.Snapshot.URI_SEARCH_SNAPSHOT_BY_TAG.format(tagname),
1240 self.configuration.coprhd_hostname,
1241 self.configuration.coprhd_port)
1242
1243 # if the result is empty, then search with the tagname
1244 # as "OpenStack:obj_id"
1245 # as snapshots will be having the obj_id instead of just id.
1246 if not rslt:
1247 tagname = self.OPENSTACK_TAG + ":obj_id:" + snapshot['id']
1248 rslt = coprhd_utils.search_by_tag(
1249 coprhd_snap.Snapshot.URI_SEARCH_SNAPSHOT_BY_TAG.format(
1250 tagname),
1251 self.configuration.coprhd_hostname,
1252 self.configuration.coprhd_port)
1253
1254 if rslt is None or len(rslt) == 0:
1255 return snapshot['name']
1256 else:
1257 rslt_snap = self.snapshot_obj.snapshot_show_uri(
1258 'block',
1259 resUri,
1260 rslt[0])
1261 return rslt_snap['name']
1262
1263 def _get_coprhd_volume_name(self, vol, verbose=False):
1264 tagname = self.OPENSTACK_TAG + ":id:" + vol.id
1265 rslt = coprhd_utils.search_by_tag(
1266 coprhd_vol.Volume.URI_SEARCH_VOLUMES_BY_TAG.format(tagname),
1267 self.configuration.coprhd_hostname,
1268 self.configuration.coprhd_port)
1269
1270 # if the result is empty, then search with the tagname
1271 # as "OpenStack:obj_id"
1272 # as snapshots will be having the obj_id instead of just id.
1273 if len(rslt) == 0:
1274 tagname = self.OPENSTACK_TAG + ":obj_id:" + vol.id
1275 rslt = coprhd_utils.search_by_tag(
1276 coprhd_vol.Volume.URI_SEARCH_VOLUMES_BY_TAG.format(tagname),
1277 self.configuration.coprhd_hostname,
1278 self.configuration.coprhd_port)
1279
1280 if len(rslt) > 0:
1281 rslt_vol = self.volume_obj.show_by_uri(rslt[0])
1282
1283 if verbose is True:
1284 return {'volume_name': rslt_vol['name'], 'volume_uri': rslt[0]}
1285 else:
1286 return rslt_vol['name']
1287 else:
1288 raise coprhd_utils.CoprHdError(
1289 coprhd_utils.CoprHdError.NOT_FOUND_ERR,
1290 (_("Volume %s not found") % vol['display_name']))
1291
1292 def _get_resource_name(self, resource,
1293 max_name_cap=MAX_DEFAULT_NAME_LENGTH,
1294 truncate_name=False):
1295 # 36 refers to the length of UUID and +1 for '-'
1296 permitted_name_length = max_name_cap - (36 + 1)
1297 name = resource.display_name
1298 if not name:
1299 name = resource.name
1300
1301 '''
1302 for scaleio, truncate_name will be true. We make sure the
1303 total name is less than or equal to 31 characters.
1304 _id_to_base64 will return a 24 character name'''
1305 if truncate_name:
1306 name = self._id_to_base64(resource.id)
1307 return name
1308
1309 elif len(name) > permitted_name_length:
1310 '''
1311 The maximum length of resource name in CoprHD is 128. Hence we use
1312 only first 91 characters of the resource name'''
1313 return name[0:permitted_name_length] + "-" + resource.id
1314
1315 else:
1316 return name + "-" + resource.id
1317
1318 def _get_vpool(self, volume):
1319 vpool = {}
1320 ctxt = context.get_admin_context()
1321 type_id = volume.volume_type_id
1322 if type_id is not None:
1323 volume_type = volume_types.get_volume_type(ctxt, type_id)
1324 specs = volume_type.get('extra_specs')
1325 for key, value in specs.items():
1326 vpool[key] = value
1327
1328 return vpool
1329
1330 def _id_to_base64(self, id):
1331 # Base64 encode the id to get a volume name less than 32 characters due
1332 # to ScaleIO limitation.
1333 name = six.text_type(id).replace("-", "")
1334 try:
1335 name = base64.b16decode(name.upper())
1336 except (TypeError, binascii.Error):
1337 pass
1338 encoded_name = name
1339 if isinstance(encoded_name, six.text_type):
1340 encoded_name = encoded_name.encode('utf-8')
1341 encoded_name = base64.b64encode(encoded_name)
1342 if six.PY3:
1343 encoded_name = encoded_name.decode('ascii')
1344 LOG.debug("Converted id %(id)s to scaleio name %(name)s.",
1345 {'id': id, 'name': encoded_name})
1346 return encoded_name
1347
1348 def _raise_or_log_exception(self, err_code, coprhd_err_msg, log_err_msg):
1349
1350 if err_code == coprhd_utils.CoprHdError.SOS_FAILURE_ERR:
1351 raise coprhd_utils.CoprHdError(
1352 coprhd_utils.CoprHdError.SOS_FAILURE_ERR,
1353 coprhd_err_msg)
1354 else:
1355 with excutils.save_and_reraise_exception():
1356 LOG.exception(log_err_msg)
1357
1358 @retry_wrapper
1359 def _find_exportgroup(self, initiator_ports):
1360 """Find export group with initiator ports same as given initiators."""
1361 foundgroupname = None
1362 grouplist = self.exportgroup_obj.exportgroup_list(
1363 self.configuration.coprhd_project,
1364 self.configuration.coprhd_tenant)
1365 for groupid in grouplist:
1366 groupdetails = self.exportgroup_obj.exportgroup_show(
1367 groupid,
1368 self.configuration.coprhd_project,
1369 self.configuration.coprhd_tenant)
1370 if groupdetails is not None:
1371 if groupdetails['inactive']:
1372 continue
1373 initiators = groupdetails['initiators']
1374 if initiators is not None:
1375 inits_eg = set()
1376 for initiator in initiators:
1377 inits_eg.add(initiator['initiator_port'])
1378
1379 if inits_eg <= set(initiator_ports):
1380 foundgroupname = groupdetails['name']
1381 if foundgroupname is not None:
1382 # Check the associated varray
1383 if groupdetails['varray']:
1384 varray_uri = groupdetails['varray']['id']
1385 varray_details = self.varray_obj.varray_show(
1386 varray_uri)
1387 if varray_details['name'] == (
1388 self.configuration.coprhd_varray):
1389 LOG.debug(
1390 "Found exportgroup %s",
1391 foundgroupname)
1392 break
1393
1394 # Not the right varray
1395 foundgroupname = None
1396
1397 return foundgroupname
1398
1399 @retry_wrapper
1400 def _find_host(self, initiator_port):
1401 """Find the host, if exists, to which the given initiator belong."""
1402 foundhostname = None
1403 hosts = self.host_obj.list_all(self.configuration.coprhd_tenant)
1404 for host in hosts:
1405 initiators = self.host_obj.list_initiators(host['id'])
1406 for initiator in initiators:
1407 if initiator_port == initiator['name']:
1408 foundhostname = host['name']
1409 break
1410
1411 if foundhostname is not None:
1412 break
1413
1414 return foundhostname
1415
1416 @retry_wrapper
1417 def get_exports_count_by_initiators(self, initiator_ports):
1418 """Fetches ITL map for a given list of initiator ports."""
1419 comma_delimited_initiator_list = ",".join(initiator_ports)
1420 (s, h) = coprhd_utils.service_json_request(
1421 self.configuration.coprhd_hostname,
1422 self.configuration.coprhd_port, "GET",
1423 URI_BLOCK_EXPORTS_FOR_INITIATORS.format(
1424 comma_delimited_initiator_list),
1425 None)
1426
1427 export_itl_maps = coprhd_utils.json_decode(s)
1428
1429 if export_itl_maps is None:
1430 return 0
1431
1432 itls = export_itl_maps['itl']
1433 return itls.__len__()
1434
1435 @retry_wrapper
1436 def update_volume_stats(self):
1437 """Retrieve stats info."""
1438 LOG.debug("Updating volume stats")
1439 self.authenticate_user()
1440
1441 try:
1442 self.stats['consistencygroup_support'] = True
1443 self.stats['consistent_group_snapshot_enabled'] = True
1444 vols = self.volume_obj.list_volumes(
1445 self.configuration.coprhd_tenant +
1446 "/" +
1447 self.configuration.coprhd_project)
1448
1449 vpairs = set()
1450 if len(vols) > 0:
1451 for vol in vols:
1452 if vol:
1453 vpair = (vol["vpool"]["id"], vol["varray"]["id"])
1454 if vpair not in vpairs:
1455 vpairs.add(vpair)
1456
1457 if len(vpairs) > 0:
1458 free_gb = 0.0
1459 used_gb = 0.0
1460 for vpair in vpairs:
1461 if vpair:
1462 (s, h) = coprhd_utils.service_json_request(
1463 self.configuration.coprhd_hostname,
1464 self.configuration.coprhd_port,
1465 "GET",
1466 URI_VPOOL_VARRAY_CAPACITY.format(vpair[0],
1467 vpair[1]),
1468 body=None)
1469 capacity = coprhd_utils.json_decode(s)
1470
1471 free_gb += float(capacity["free_gb"])
1472 used_gb += float(capacity["used_gb"])
1473
1474 self.stats['free_capacity_gb'] = free_gb
1475 self.stats['total_capacity_gb'] = free_gb + used_gb
1476 self.stats['reserved_percentage'] = (
1477 self.configuration.reserved_percentage)
1478
1479 return self.stats
1480
1481 except coprhd_utils.CoprHdError:
1482 with excutils.save_and_reraise_exception():
1483 LOG.exception("Update volume stats failed")
1484
1485 @retry_wrapper
1486 def retype(self, ctxt, volume, new_type, diff, host):
1487 """changes the vpool type."""
1488 self.authenticate_user()
1489 volume_name = self._get_coprhd_volume_name(volume)
1490 vpool_name = new_type['extra_specs']['CoprHD:VPOOL']
1491
1492 try:
1493 full_project_name = "%s/%s" % (
1494 self.configuration.coprhd_tenant,
1495 self.configuration.coprhd_project)
1496
1497 task = self.volume_obj.update(
1498 full_project_name,
1499 volume_name,
1500 vpool_name)
1501
1502 self.volume_obj.check_for_sync(task['task'][0], True)
1503 return True
1504 except coprhd_utils.CoprHdError as e:
1505 coprhd_err_msg = (_("Volume %(volume_name)s: update failed"
1506 "\n%(err)s") % {'volume_name': volume_name,
1507 'err': six.text_type(e.msg)})
1508
1509 log_err_msg = ("Volume : %s type update failed" %
1510 volume_name)
1511 self._raise_or_log_exception(e.err_code, coprhd_err_msg,
1512 log_err_msg)
diff --git a/cinder/volume/drivers/coprhd/fc.py b/cinder/volume/drivers/coprhd/fc.py
deleted file mode 100644
index 3347a96..0000000
--- a/cinder/volume/drivers/coprhd/fc.py
+++ /dev/null
@@ -1,272 +0,0 @@
1# Copyright (c) 2016 EMC Corporation
2# All Rights Reserved.
3#
4# Licensed under the Apache License, Version 2.0 (the "License"); you may
5# not use this file except in compliance with the License. You may obtain
6# a copy of the License at
7#
8# http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13# License for the specific language governing permissions and limitations
14# under the License.
15
16
17"""Driver for EMC CoprHD FC volumes."""
18
19import re
20
21from oslo_log import log as logging
22
23from cinder import exception
24from cinder.i18n import _
25from cinder import interface
26from cinder.volume import driver
27from cinder.volume.drivers.coprhd import common as coprhd_common
28from cinder.volume import utils as volume_utils
29
30from cinder.zonemanager import utils as fczm_utils
31
32LOG = logging.getLogger(__name__)
33
34
35@interface.volumedriver
36class EMCCoprHDFCDriver(driver.FibreChannelDriver):
37 """CoprHD FC Driver."""
38 VERSION = "3.0.0.0"
39
40 # ThirdPartySystems wiki page
41 CI_WIKI_NAME = "EMC_CoprHD_CI"
42
43 # TODO(jsbryant) Remove driver in Stein if CI is not fixed
44 SUPPORTED = False
45
46 def __init__(self, *args, **kwargs):
47 super(EMCCoprHDFCDriver, self).__init__(*args, **kwargs)
48 self.common = self._get_common_driver()
49
50 def _get_common_driver(self):
51 return coprhd_common.EMCCoprHDDriverCommon(
52 protocol='FC',
53 default_backend_name=self.__class__.__name__,
54 configuration=self.configuration)
55
56 def check_for_setup_error(self):
57 self.common.check_for_setup_error()
58
59 def create_volume(self, volume):
60 """Creates a Volume."""
61 self.common.create_volume(volume, self)
62 self.common.set_volume_tags(volume, ['_obj_volume_type'])
63
64 def create_cloned_volume(self, volume, src_vref):
65 """Creates a cloned Volume."""
66 self.common.create_cloned_volume(volume, src_vref)
67 self.common.set_volume_tags(volume, ['_obj_volume_type'])
68
69 def create_volume_from_snapshot(self, volume, snapshot):
70 """Creates a volume from a snapshot."""
71 self.common.create_volume_from_snapshot(snapshot, volume)
72 self.common.set_volume_tags(volume, ['_obj_volume_type'])
73
74 def extend_volume(self, volume, new_size):
75 """expands the size of the volume."""
76 self.common.expand_volume(volume, new_size)
77
78 def delete_volume(self, volume):
79 """Deletes a volume."""
80 self.common.delete_volume(volume)
81
82 def create_snapshot(self, snapshot):
83 """Creates a snapshot."""
84 self.common.create_snapshot(snapshot)
85
86 def delete_snapshot(self, snapshot):
87 """Deletes a snapshot."""
88 self.common.delete_snapshot(snapshot)
89
90 def ensure_export(self, context, volume):
91 """Driver entry point to get the export info for an existing volume."""
92 pass
93
94 def create_export(self, context, volume, connector=None):
95 """Driver entry point to get the export info for a new volume."""
96 pass
97
98 def remove_export(self, context, volume):
99 """Driver entry point to remove an export for a volume."""
100 pass
101
102 def create_group(self, context, group):
103 """Creates a group."""
104 if volume_utils.is_group_a_cg_snapshot_type(group):
105 return self.common.create_consistencygroup(context, group)
106
107 # If the group is not consistency group snapshot enabled, then
108 # we shall rely on generic volume group implementation
109 raise NotImplementedError()
110
111 def update_group(self, context, group, add_volumes=None,
112 remove_volumes=None):
113 """Updates volumes in group."""
114 if volume_utils.is_group_a_cg_snapshot_type(group):
115 return self.common.update_consistencygroup(group, add_volumes,
116 remove_volumes)
117
118 # If the group is not consistency group snapshot enabled, then
119 # we shall rely on generic volume group implementation
120 raise NotImplementedError()
121
122 def create_group_from_src(self, ctxt, group, volumes,
123 group_snapshot=None, snapshots=None,
124 source_group=None, source_vols=None):
125 """Creates a group from source."""
126 if volume_utils.is_group_a_cg_snapshot_type(group):
127 message = _("create group from source is not supported "
128 "for CoprHD if the group type supports "
129 "consistent group snapshot.")
130 raise exception.VolumeBackendAPIException(data=message)
131 else:
132 raise NotImplementedError()
133
134 def delete_group(self, context, group, volumes):
135 """Deletes a group."""
136 if volume_utils.is_group_a_cg_snapshot_type(group):
137 return self.common.delete_consistencygroup(context, group, volumes)
138
139 # If the group is not consistency group snapshot enabled, then
140 # we shall rely on generic volume group implementation
141 raise NotImplementedError()
142
143 def create_group_snapshot(self, context, group_snapshot, snapshots):
144 """Creates a group snapshot."""
145 if volume_utils.is_group_a_cg_snapshot_type(group_snapshot):
146 return self.common.create_cgsnapshot(group_snapshot, snapshots)
147
148 # If the group is not consistency group snapshot enabled, then
149 # we shall rely on generic volume group implementation
150 raise NotImplementedError()
151
152 def delete_group_snapshot(self, context, group_snapshot, snapshots):
153 """Deletes a group snapshot."""
154 if volume_utils.is_group_a_cg_snapshot_type(group_snapshot):
155 return self.common.delete_cgsnapshot(group_snapshot, snapshots)
156
157 # If the group is not consistency group snapshot enabled, then
158 # we shall rely on generic volume group implementation
159 raise NotImplementedError()
160
161 def check_for_export(self, context, volume_id):
162 """Make sure volume is exported."""
163 pass
164
165 def initialize_connection(self, volume, connector):
166 """Initializes the connection and returns connection info."""
167
168 properties = {}
169 properties['volume_id'] = volume.id
170 properties['target_discovered'] = False
171 properties['target_wwn'] = []
172
173 init_ports = self._build_initport_list(connector)
174 itls = self.common.initialize_connection(volume, 'FC', init_ports,
175 connector['host'])
176
177 target_wwns = None
178 initiator_target_map = None
179
180 if itls:
181 properties['target_lun'] = itls[0]['hlu']
182 target_wwns, initiator_target_map = (
183 self._build_initiator_target_map(itls, connector))
184
185 properties['target_wwn'] = target_wwns
186 properties['initiator_target_map'] = initiator_target_map
187
188 auth = None
189 try:
190 auth = volume.provider_auth
191 except AttributeError:
192 pass
193
194 if auth:
195 (auth_method, auth_username, auth_secret) = auth.split()
196 properties['auth_method'] = auth_method
197 properties['auth_username'] = auth_username
198 properties['auth_password'] = auth_secret
199
200 LOG.debug('FC properties: %s', properties)
201 conn_info = {
202 'driver_volume_type': 'fibre_channel',
203 'data': properties,
204 }
205 fczm_utils.add_fc_zone(conn_info)
206 return conn_info
207
208 def terminate_connection(self, volume, connector, **kwargs):
209 """Driver entry point to detach a volume from an instance."""
210
211 init_ports = self._build_initport_list(connector)
212 itls = self.common.terminate_connection(volume, 'FC', init_ports,
213 connector['host'])
214
215 volumes_count = self.common.get_exports_count_by_initiators(init_ports)
216 if volumes_count > 0:
217 # return empty data
218 data = {'driver_volume_type': 'fibre_channel', 'data': {}}
219 else:
220 target_wwns, initiator_target_map = (
221 self._build_initiator_target_map(itls, connector))
222 data = {
223 'driver_volume_type': 'fibre_channel',
224 'data': {
225 'target_wwn': target_wwns,
226 'initiator_target_map': initiator_target_map}}
227 fczm_utils.remove_fc_zone(data)
228
229 LOG.debug('Return FC data: %s', data)
230 return data
231
232 def _build_initiator_target_map(self, itls, connector):
233
234 target_wwns = []
235 for itl in itls:
236 target_wwns.append(itl['target']['port'].replace(':', '').lower())
237
238 initiator_wwns = connector['wwpns']
239 initiator_target_map = {}
240 for initiator in initiator_wwns:
241 initiator_target_map[initiator] = target_wwns
242
243 return target_wwns, initiator_target_map
244
245 def _build_initport_list(self, connector):
246 init_ports = []
247 for i in range(len(connector['wwpns'])):
248 initiator_port = ':'.join(re.findall(
249 '..',
250 connector['wwpns'][i])).upper() # Add ":" every two digits
251 init_ports.append(initiator_port)
252
253 return init_ports
254
255 def get_volume_stats(self, refresh=False):
256 """Get volume status.
257
258 If 'refresh' is True, run update the stats first.
259 """
260 if refresh:
261 self.update_volume_stats()
262
263 return self._stats
264
265 def update_volume_stats(self):
266 """Retrieve stats info from virtual pool/virtual array."""
267 LOG.debug("Updating volume stats")
268 self._stats = self.common.update_volume_stats()
269
270 def retype(self, ctxt, volume, new_type, diff, host):
271 """Change the volume type."""
272 return self.common.retype(ctxt, volume, new_type, diff, host)
diff --git a/cinder/volume/drivers/coprhd/helpers/__init__.py b/cinder/volume/drivers/coprhd/helpers/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/cinder/volume/drivers/coprhd/helpers/__init__.py
+++ /dev/null
diff --git a/cinder/volume/drivers/coprhd/helpers/authentication.py b/cinder/volume/drivers/coprhd/helpers/authentication.py
deleted file mode 100644
index c0d9f7c..0000000
--- a/cinder/volume/drivers/coprhd/helpers/authentication.py
+++ /dev/null
@@ -1,220 +0,0 @@
1# Copyright (c) 2016 EMC Corporation
2# All Rights Reserved.
3#
4# Licensed under the Apache License, Version 2.0 (the "License"); you may
5# not use this file except in compliance with the License. You may obtain
6# a copy of the License at
7#
8# http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13# License for the specific language governing permissions and limitations
14# under the License.
15
16
17try:
18 import cookielib as cookie_lib
19except ImportError:
20 import http.cookiejar as cookie_lib
21import socket
22
23import requests
24from requests import exceptions
25import six
26from six.moves import http_client
27
28from cinder.i18n import _
29from cinder.volume.drivers.coprhd.helpers import commoncoprhdapi as common
30
31
32class Authentication(common.CoprHDResource):
33
34 # Commonly used URIs for the 'Authentication' module
35 URI_SERVICES_BASE = ''
36 URI_AUTHENTICATION = '/login'
37
38 HEADERS = {'Content-Type': 'application/json',
39 'ACCEPT': 'application/json', 'X-EMC-REST-CLIENT': 'TRUE'}
40
41 def authenticate_user(self, username, password):
42 """Makes REST API call to generate the authentication token.
43
44 Authentication token is generated for the specified user after
45 validation
46
47 :param username: Name of the user
48 :param password: Password for the user
49 :returns: The authtoken
50 """
51
52 SEC_REDIRECT = 302
53 SEC_AUTHTOKEN_HEADER = 'X-SDS-AUTH-TOKEN'
54 LB_API_PORT = 4443
55 # Port on which load-balancer/reverse-proxy listens to all incoming
56 # requests for CoprHD REST APIs
57 APISVC_PORT = 8443 # Port on which apisvc listens to incoming requests
58
59 cookiejar = cookie_lib.LWPCookieJar()
60
61 url = ('https://%(ip)s:%(port)d%(uri)s' %
62 {'ip': self.ipaddr, 'port': self.port,
63 'uri': self.URI_AUTHENTICATION})
64
65 try:
66 if self.port == APISVC_PORT:
67 login_response = requests.get(
68 url, headers=self.HEADERS, verify=False,
69 auth=(username, password), cookies=cookiejar,
70 allow_redirects=False, timeout=common.TIMEOUT_SEC)
71 if login_response.status_code == SEC_REDIRECT:
72 location = login_response.headers['Location']
73 if not location:
74 raise common.CoprHdError(
75 common.CoprHdError.HTTP_ERR, (_("The redirect"
76 " location of the"
77 " authentication"
78 " service is not"
79 " provided")))
80 # Make the second request
81 login_response = requests.get(
82 location, headers=self.HEADERS, verify=False,
83 cookies=cookiejar, allow_redirects=False,
84 timeout=common.TIMEOUT_SEC)
85 if (login_response.status_code !=
86 http_client.UNAUTHORIZED):
87 raise common.CoprHdError(
88 common.CoprHdError.HTTP_ERR, (_("The"
89 " authentication"
90 " service failed"
91 " to reply with"
92 " 401")))
93
94 # Now provide the credentials
95 login_response = requests.get(
96 location, headers=self.HEADERS,
97 auth=(username, password), verify=False,
98 cookies=cookiejar, allow_redirects=False,
99 timeout=common.TIMEOUT_SEC)
100 if login_response.status_code != SEC_REDIRECT:
101 raise common.CoprHdError(
102 common.CoprHdError.HTTP_ERR,
103 (_("Access forbidden: Authentication required")))
104 location = login_response.headers['Location']
105 if not location:
106 raise common.CoprHdError(
107 common.CoprHdError.HTTP_ERR,
108 (_("The"
109 " authentication service failed to provide the"
110 " location of the service URI when redirecting"
111 " back")))
112 authtoken = login_response.headers[SEC_AUTHTOKEN_HEADER]
113 if not authtoken:
114 details_str = self.extract_error_detail(login_response)
115 raise common.CoprHdError(common.CoprHdError.HTTP_ERR,
116 (_("The token is not"
117 " generated by"
118 " authentication service."
119 "%s") %
120 details_str))
121 # Make the final call to get the page with the token
122 new_headers = self.HEADERS
123 new_headers[SEC_AUTHTOKEN_HEADER] = authtoken
124 login_response = requests.get(
125 location, headers=new_headers, verify=False,
126 cookies=cookiejar, allow_redirects=False,
127 timeout=common.TIMEOUT_SEC)
128 if login_response.status_code != http_client.OK:
129 raise common.CoprHdError(
130 common.CoprHdError.HTTP_ERR, (_(
131 "Login failure code: "
132 "%(statuscode)s Error: %(responsetext)s") %
133 {'statuscode': six.text_type(
134 login_response.status_code),
135 'responsetext': login_response.text}))
136 elif self.port == LB_API_PORT:
137 login_response = requests.get(
138 url, headers=self.HEADERS, verify=False,
139 cookies=cookiejar, allow_redirects=False)
140
141 if(login_response.status_code ==
142 http_client.UNAUTHORIZED):
143 # Now provide the credentials
144 login_response = requests.get(
145 url, headers=self.HEADERS, auth=(username, password),
146 verify=False, cookies=cookiejar, allow_redirects=False)
147 authtoken = None
148 if SEC_AUTHTOKEN_HEADER in login_response.headers:
149 authtoken = login_response.headers[SEC_AUTHTOKEN_HEADER]
150 else:
151 raise common.CoprHdError(
152 common.CoprHdError.HTTP_ERR,
153 (_("Incorrect port number. Load balanced port is: "
154 "%(lb_api_port)s, api service port is: "
155 "%(apisvc_port)s") %
156 {'lb_api_port': LB_API_PORT,
157 'apisvc_port': APISVC_PORT}))
158
159 if not authtoken:
160 details_str = self.extract_error_detail(login_response)
161 raise common.CoprHdError(
162 common.CoprHdError.HTTP_ERR,
163 (_("The token is not generated by authentication service."
164 " %s") % details_str))
165
166 if login_response.status_code != http_client.OK:
167 error_msg = None
168 if login_response.status_code == http_client.UNAUTHORIZED:
169 error_msg = _("Access forbidden: Authentication required")
170 elif login_response.status_code == http_client.FORBIDDEN:
171 error_msg = _("Access forbidden: You don't have"
172 " sufficient privileges to perform"
173 " this operation")
174 elif (login_response.status_code ==
175 http_client.INTERNAL_SERVER_ERROR):
176 error_msg = _("Bourne internal server error")
177 elif login_response.status_code == http_client.NOT_FOUND:
178 error_msg = _(
179 "Requested resource is currently unavailable")
180 elif (login_response.status_code ==
181 http_client.METHOD_NOT_ALLOWED):
182 error_msg = (_("GET method is not supported by resource:"
183 " %s"),
184 url)
185 elif (login_response.status_code ==
186 http_client.SERVICE_UNAVAILABLE):
187 error_msg = _("Service temporarily unavailable:"
188 " The server is temporarily unable"
189 " to service your request")
190 else:
191 error_msg = login_response.text
192 raise common.CoprHdError(common.CoprHdError.HTTP_ERR,
193 (_("HTTP code: %(status_code)s"
194 ", response: %(reason)s"
195 " [%(error_msg)s]") % {
196 'status_code': six.text_type(
197 login_response.status_code),
198 'reason': six.text_type(
199 login_response.reason),
200 'error_msg': six.text_type(
201 error_msg)
202 }))
203 except (exceptions.SSLError, socket.error, exceptions.ConnectionError,
204 exceptions.Timeout) as e:
205 raise common.CoprHdError(
206 common.CoprHdError.HTTP_ERR, six.text_type(e))
207
208 return authtoken
209
210 def extract_error_detail(self, login_response):
211 details_str = ""
212 try:
213 if login_response.content:
214 json_object = common.json_decode(login_response.content)
215 if 'details' in json_object:
216 details_str = json_object['details']
217
218 return details_str
219 except common.CoprHdError:
220 return details_str
diff --git a/cinder/volume/drivers/coprhd/helpers/commoncoprhdapi.py b/cinder/volume/drivers/coprhd/helpers/commoncoprhdapi.py
deleted file mode 100644
index 71577fc..0000000
--- a/cinder/volume/drivers/coprhd/helpers/commoncoprhdapi.py
+++ /dev/null
@@ -1,523 +0,0 @@
1# Copyright (c) 2016 EMC Corporation
2# All Rights Reserved.
3#
4# Licensed under the Apache License, Version 2.0 (the "License"); you may
5# not use this file except in compliance with the License. You may obtain
6# a copy of the License at
7#
8# http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13# License for the specific language governing permissions and limitations
14# under the License.
15
16"""Contains some commonly used utility methods."""
17try:
18 import cookielib as cookie_lib
19except ImportError:
20 import http.cookiejar as cookie_lib
21import json
22import re
23import socket
24
25import oslo_serialization
26from oslo_utils import timeutils
27from oslo_utils import units
28import requests
29from requests import exceptions
30import six
31from six.moves import http_client
32
33from cinder import exception
34from cinder.i18n import _
35from cinder.volume.drivers.coprhd.helpers import urihelper
36
37
38PROD_NAME = 'storageos'
39
40TIMEOUT_SEC = 20 # 20 SECONDS
41
42global AUTH_TOKEN
43AUTH_TOKEN = None
44
45TASK_TIMEOUT = 300
46
47URI_TASKS_BY_OPID = '/vdc/tasks/{0}'
48
49
50def _decode_list(data):
51 rv = []
52 for item in data:
53 if isinstance(item, six.text_type):
54 item = item.encode('utf-8')
55 elif isinstance(item, list):
56 item = _decode_list(item)
57 elif isinstance(item, dict):
58 item = _decode_dict(item)
59 rv.append(item)
60 return rv
61
62
63def _decode_dict(data):
64 rv = {}
65 for key, value in data.items():
66 if isinstance(key, six.text_type):
67 key = key.encode('utf-8')
68 if isinstance(value, six.text_type):
69 value = value.encode('utf-8')
70 elif isinstance(value, list):
71 value = _decode_list(value)
72 elif isinstance(value, dict):
73 value = _decode_dict(value)
74 rv[key] = value
75 return rv
76
77
78def json_decode(rsp):
79 """Used to decode the JSON encoded response."""
80
81 try:
82 o = json.loads(rsp, object_hook=_decode_dict)
83 except ValueError:
84 raise CoprHdError(CoprHdError.VALUE_ERR,
85 (_("Failed to recognize JSON payload:\n[%s]") % rsp))
86 return o
87
88
89def service_json_request(ip_addr, port, http_method, uri, body,
90 contenttype='application/json', customheaders=None):
91 """Used to make an HTTP request and get the response.
92
93 The message body is encoded in JSON format
94
95 :param ip_addr: IP address or host name of the server
96 :param port: port number of the server on which it
97 is listening to HTTP requests
98 :param http_method: one of GET, POST, PUT, DELETE
99 :param uri: the request URI
100 :param body: the request payload
101 :returns: a tuple of two elements: (response body, response headers)
102 :raises CoprHdError: in case of HTTP errors with err_code 3
103 """
104
105 SEC_AUTHTOKEN_HEADER = 'X-SDS-AUTH-TOKEN'
106
107 headers = {'Content-Type': contenttype,
108 'ACCEPT': 'application/json, application/octet-stream',
109 'X-EMC-REST-CLIENT': 'TRUE'}
110
111 if customheaders:
112 headers.update(customheaders)
113
114 try:
115 protocol = "https://"
116 if port == 8080:
117 protocol = "http://"
118 url = protocol + ip_addr + ":" + six.text_type(port) + uri
119
120 cookiejar = cookie_lib.LWPCookieJar()
121 headers[SEC_AUTHTOKEN_HEADER] = AUTH_TOKEN
122
123 if http_method == 'GET':
124 response = requests.get(url, headers=headers, verify=False,
125 cookies=cookiejar)
126 elif http_method == 'POST':
127 response = requests.post(url, data=body, headers=headers,
128 verify=False, cookies=cookiejar)
129 elif http_method == 'PUT':
130 response = requests.put(url, data=body, headers=headers,
131 verify=False, cookies=cookiejar)
132 elif http_method == 'DELETE':
133
134 response = requests.delete(url, headers=headers, verify=False,
135 cookies=cookiejar)
136 else:
137 raise CoprHdError(CoprHdError.HTTP_ERR,
138 (_("Unknown/Unsupported HTTP method: %s") %
139 http_method))
140
141 if (response.status_code == http_client.OK or
142 response.status_code == http_client.ACCEPTED):
143 return (response.text, response.headers)
144
145 error_msg = None
146 if response.status_code == http_client.INTERNAL_SERVER_ERROR:
147 response_text = json_decode(response.text)
148 error_details = ""
149 if 'details' in response_text:
150 error_details = response_text['details']
151 error_msg = (_("CoprHD internal server error. Error details: %s"),
152 error_details)
153 elif response.status_code == http_client.UNAUTHORIZED:
154 error_msg = _("Access forbidden: Authentication required")
155 elif response.status_code == http_client.FORBIDDEN:
156 error_msg = ""
157 error_details = ""
158 error_description = ""
159
160 response_text = json_decode(response.text)
161
162 if 'details' in response_text:
163 error_details = response_text['details']
164 error_msg = (_("%(error_msg)s Error details:"
165 " %(error_details)s"),
166 {'error_msg': error_msg,
167 'error_details': error_details
168 })
169 elif 'description' in response_text:
170 error_description = response_text['description']
171 error_msg = (_("%(error_msg)s Error description:"
172 " %(error_description)s"),
173 {'error_msg': error_msg,
174 'error_description': error_description
175 })
176 else:
177 error_msg = _("Access forbidden: You don't have"
178 " sufficient privileges to perform this"
179 " operation")
180
181 elif response.status_code == http_client.NOT_FOUND:
182 error_msg = "Requested resource not found"
183 elif response.status_code == http_client.METHOD_NOT_ALLOWED:
184 error_msg = six.text_type(response.text)
185 elif response.status_code == http_client.SERVICE_UNAVAILABLE:
186 error_msg = ""
187 error_details = ""
188 error_description = ""
189
190 response_text = json_decode(response.text)
191
192 if 'code' in response_text:
193 errorCode = response_text['code']
194 error_msg = "Error " + six.text_type(errorCode)
195
196 if 'details' in response_text:
197 error_details = response_text['details']
198 error_msg = error_msg + ": " + error_details
199 elif 'description' in response_text:
200 error_description = response_text['description']
201 error_msg = error_msg + ": " + error_description
202 else:
203 error_msg = _("Service temporarily unavailable:"
204 " The server is temporarily unable to"
205 " service your request")
206 else:
207 error_msg = response.text
208 if isinstance(error_msg, six.text_type):
209 error_msg = error_msg.encode('utf-8')
210 raise CoprHdError(CoprHdError.HTTP_ERR,
211 (_("HTTP code: %(status_code)s"
212 ", %(reason)s"
213 " [%(error_msg)s]") % {
214 'status_code': six.text_type(
215 response.status_code),
216 'reason': six.text_type(
217 response.reason),
218 'error_msg': six.text_type(
219 error_msg)
220 }))
221 except (CoprHdError, socket.error, exceptions.SSLError,
222 exceptions.ConnectionError, exceptions.TooManyRedirects,
223 exceptions.Timeout) as e:
224 raise CoprHdError(CoprHdError.HTTP_ERR, six.text_type(e))
225 # TODO(Ravi) : Either following exception should have proper message or
226 # IOError should just be combined with the above statement
227 except IOError as e:
228 raise CoprHdError(CoprHdError.HTTP_ERR, six.text_type(e))
229
230
231def is_uri(name):
232 """Checks whether the name is a URI or not.
233
234 :param name: Name of the resource
235 :returns: True if name is URI, False otherwise
236 """
237 try:
238 (urn, prod, trailer) = name.split(':', 2)
239 return (urn == 'urn' and prod == PROD_NAME)
240 except Exception:
241 return False
242
243
244def format_json_object(obj):
245 """Formats JSON object to make it readable by proper indentation.
246
247 :param obj: JSON object
248 :returns: a string of formatted JSON object
249 """
250 return oslo_serialization.jsonutils.dumps(obj, sort_keys=True, indent=3)
251
252
253def get_parent_child_from_xpath(name):
254 """Returns the parent and child elements from XPath."""
255 if '/' in name:
256 (pname, label) = name.rsplit('/', 1)
257 else:
258 pname = None
259 label = name
260 return (pname, label)
261
262
263def to_bytes(in_str):
264 """Converts a size to bytes.
265
266 :param in_str: a number suffixed with a unit: {number}{unit}
267 units supported:
268 K, KB, k or kb - kilobytes
269 M, MB, m or mb - megabytes
270 G, GB, g or gb - gigabytes
271 T, TB, t or tb - terabytes
272 :returns: number of bytes
273 None; if input is incorrect
274 """
275 match = re.search('^([0-9]+)([a-zA-Z]{0,2})$', in_str)
276
277 if not match:
278 return None
279
280 unit = match.group(2).upper()
281 value = match.group(1)
282
283 size_count = int(value)
284 if unit in ['K', 'KB']:
285 multiplier = int(units.Ki)
286 elif unit in ['M', 'MB']:
287 multiplier = int(units.Mi)
288 elif unit in ['G', 'GB']:
289 multiplier = int(units.Gi)
290 elif unit in ['T', 'TB']:
291 multiplier = int(units.Ti)
292 elif unit == "":
293 return size_count
294 else:
295 return None
296
297 size_in_bytes = int(size_count * multiplier)
298 return size_in_bytes
299
300
301def get_list(json_object, parent_node_name, child_node_name=None):
302 """Returns a list of values from child_node_name.
303
304 If child_node is not given, then it will retrieve list from parent node
305 """
306 if not json_object:
307 return []
308
309 return_list = []
310 if isinstance(json_object[parent_node_name], list):
311 for detail in json_object[parent_node_name]:
312 if child_node_name:
313 return_list.append(detail[child_node_name])
314 else:
315 return_list.append(detail)
316 else:
317 if child_node_name:
318 return_list.append(json_object[parent_node_name][child_node_name])
319 else:
320 return_list.append(json_object[parent_node_name])
321
322 return return_list
323
324
325def get_node_value(json_object, parent_node_name, child_node_name=None):
326 """Returns value of given child_node.
327
328 If child_node is not given, then value of parent node is returned
329
330 :returns: None If json_object or parent_node is not given,
331 If child_node is not found under parent_node
332 """
333 if not json_object:
334 return None
335
336 if not parent_node_name:
337 return None
338
339 detail = json_object[parent_node_name]
340 if not child_node_name:
341 return detail
342
343 return_value = None
344
345 if child_node_name in detail:
346 return_value = detail[child_node_name]
347 else:
348 return_value = None
349
350 return return_value
351
352
353def format_err_msg_and_raise(operation_type, component,
354 error_message, error_code):
355 """Method to format error message.
356
357 :param operation_type: create, update, add, etc
358 :param component: storagesystem, vpool, etc
359 :param error_code: Error code from the API call
360 :param error_message: Detailed error message
361 """
362
363 formated_err_msg = (_("Error: Failed to %(operation_type)s"
364 " %(component)s") %
365 {'operation_type': operation_type,
366 'component': component
367 })
368 if error_message.startswith("\"\'") and error_message.endswith("\'\""):
369 # stripping the first 2 and last 2 characters, which are quotes.
370 error_message = error_message[2:len(error_message) - 2]
371
372 formated_err_msg = formated_err_msg + "\nReason:" + error_message
373 raise CoprHdError(error_code, formated_err_msg)
374
375
376def search_by_tag(resource_search_uri, ipaddr, port):
377 """Fetches the list of resources with a given tag.
378
379 :param resource_search_uri: The tag based search uri
380 Example: '/block/volumes/search?tag=tagexample1'
381 :param ipaddr: IP address of CoprHD host
382 :param port: Port number
383 """
384 # check if the URI passed has both project and name parameters
385 str_uri = six.text_type(resource_search_uri)
386 if 'search' in str_uri and '?tag=' in str_uri:
387 # Get the project URI
388
389 (s, h) = service_json_request(
390 ipaddr, port, "GET",
391 resource_search_uri, None)
392
393 o = json_decode(s)
394 if not o:
395 return None
396
397 resources = get_node_value(o, "resource")
398
399 resource_uris = []
400 for resource in resources:
401 resource_uris.append(resource["id"])
402 return resource_uris
403 else:
404 raise CoprHdError(CoprHdError.VALUE_ERR, (_("Search URI %s"
405 " is not in the expected"
406 " format, it should end"
407 " with ?tag={0}")
408 % str_uri))
409
410
411# Blocks the operation until the task is complete/error out/timeout
412def block_until_complete(component_type,
413 resource_uri,
414 task_id,
415 ipaddr,
416 port,
417 synctimeout=0):
418
419 if not synctimeout:
420 synctimeout = TASK_TIMEOUT
421 t = timeutils.StopWatch(duration=synctimeout)
422 t.start()
423 while not t.expired():
424 if component_type == 'block':
425 out = show_task_opid(task_id, ipaddr, port)
426 else:
427 out = get_task_by_resourceuri_and_taskId(
428 component_type, resource_uri, task_id, ipaddr, port)
429
430 if out:
431 if out["state"] == "ready":
432
433 # stop the timer and return
434 t.stop()
435 break
436
437 # if the status of the task is 'error' then stop the timer
438 # and raise exception
439 if out["state"] == "error":
440 # stop the timer
441 t.stop()
442 error_message = "Please see logs for more details"
443 if ("service_error" in out and
444 "details" in out["service_error"]):
445 error_message = out["service_error"]["details"]
446 raise CoprHdError(CoprHdError.VALUE_ERR,
447 (_("Task: %(task_id)s"
448 " is failed with"
449 " error: %(error_message)s") %
450 {'task_id': task_id,
451 'error_message': error_message
452 }))
453
454 else:
455 raise CoprHdError(CoprHdError.TIME_OUT,
456 (_("Task did not complete in %d secs."
457 " Operation timed out. Task in CoprHD"
458 " will continue") % synctimeout))
459
460 return
461
462
463def show_task_opid(taskid, ipaddr, port):
464 (s, h) = service_json_request(
465 ipaddr, port,
466 "GET",
467 URI_TASKS_BY_OPID.format(taskid),
468 None)
469 if (not s):
470