From 285fbc6447a2557257c2c5a6b3a54d7c3e2235c2 Mon Sep 17 00:00:00 2001 From: "Jay S. Bryant" Date: Thu, 9 Aug 2018 16:02:36 -0500 Subject: [PATCH] Remove the CoprHD driver The CoprHD driver was marked unsupported in Rocky and the vendor has indicated that the driver is deprecated. Given the feedback I am removing the driver. Change-Id: I978315402edeb0c5dd6aee28315973fa502b0e20 --- api-ref/source/v3/index.rst | 2 +- cinder/opts.py | 6 - .../tests/unit/volume/drivers/test_coprhd.py | 981 ----------- cinder/volume/drivers/coprhd/__init__.py | 0 cinder/volume/drivers/coprhd/common.py | 1512 ----------------- cinder/volume/drivers/coprhd/fc.py | 272 --- .../volume/drivers/coprhd/helpers/__init__.py | 0 .../drivers/coprhd/helpers/authentication.py | 220 --- .../drivers/coprhd/helpers/commoncoprhdapi.py | 523 ------ .../coprhd/helpers/consistencygroup.py | 220 --- .../drivers/coprhd/helpers/exportgroup.py | 303 ---- cinder/volume/drivers/coprhd/helpers/host.py | 93 - .../volume/drivers/coprhd/helpers/project.py | 88 - .../volume/drivers/coprhd/helpers/snapshot.py | 257 --- cinder/volume/drivers/coprhd/helpers/tag.py | 55 - .../volume/drivers/coprhd/helpers/tenant.py | 117 -- .../drivers/coprhd/helpers/urihelper.py | 82 - .../drivers/coprhd/helpers/virtualarray.py | 79 - .../drivers/coprhd/helpers/virtualpool.py | 77 - .../volume/drivers/coprhd/helpers/volume.py | 517 ------ cinder/volume/drivers/coprhd/iscsi.py | 226 --- cinder/volume/drivers/coprhd/scaleio.py | 375 ---- .../block-storage/drivers/coprhd-driver.rst | 322 ---- .../block-storage/volume-drivers.rst | 1 - doc/source/reference/support-matrix.ini | 12 - doc/source/reference/support-matrix.rst | 10 + ...hd-remove-the-driver-00ef2c41f4c7dccd.yaml | 12 + 27 files changed, 23 insertions(+), 6339 deletions(-) delete mode 100644 cinder/tests/unit/volume/drivers/test_coprhd.py delete mode 100644 cinder/volume/drivers/coprhd/__init__.py delete mode 100644 cinder/volume/drivers/coprhd/common.py delete mode 100644 cinder/volume/drivers/coprhd/fc.py delete mode 100644 cinder/volume/drivers/coprhd/helpers/__init__.py delete mode 100644 cinder/volume/drivers/coprhd/helpers/authentication.py delete mode 100644 cinder/volume/drivers/coprhd/helpers/commoncoprhdapi.py delete mode 100644 cinder/volume/drivers/coprhd/helpers/consistencygroup.py delete mode 100644 cinder/volume/drivers/coprhd/helpers/exportgroup.py delete mode 100644 cinder/volume/drivers/coprhd/helpers/host.py delete mode 100644 cinder/volume/drivers/coprhd/helpers/project.py delete mode 100644 cinder/volume/drivers/coprhd/helpers/snapshot.py delete mode 100644 cinder/volume/drivers/coprhd/helpers/tag.py delete mode 100644 cinder/volume/drivers/coprhd/helpers/tenant.py delete mode 100644 cinder/volume/drivers/coprhd/helpers/urihelper.py delete mode 100644 cinder/volume/drivers/coprhd/helpers/virtualarray.py delete mode 100644 cinder/volume/drivers/coprhd/helpers/virtualpool.py delete mode 100644 cinder/volume/drivers/coprhd/helpers/volume.py delete mode 100644 cinder/volume/drivers/coprhd/iscsi.py delete mode 100644 cinder/volume/drivers/coprhd/scaleio.py delete mode 100644 doc/source/configuration/block-storage/drivers/coprhd-driver.rst create mode 100644 releasenotes/notes/coprhd-remove-the-driver-00ef2c41f4c7dccd.yaml diff --git a/api-ref/source/v3/index.rst b/api-ref/source/v3/index.rst index 9e37d421665..641f2411a72 100644 --- a/api-ref/source/v3/index.rst +++ b/api-ref/source/v3/index.rst @@ -54,4 +54,4 @@ Block Storage API V3 (CURRENT) .. include:: worker-cleanup.inc .. valid values for boolean parameters. -.. include:: valid-boolean-values.inc \ No newline at end of file +.. include:: valid-boolean-values.inc diff --git a/cinder/opts.py b/cinder/opts.py index 3671e517b81..2708a41fbee 100644 --- a/cinder/opts.py +++ b/cinder/opts.py @@ -71,10 +71,6 @@ from cinder import ssh_utils as cinder_sshutils from cinder.transfer import api as cinder_transfer_api from cinder.volume import api as cinder_volume_api from cinder.volume import driver as cinder_volume_driver -from cinder.volume.drivers.coprhd import common as \ - cinder_volume_drivers_coprhd_common -from cinder.volume.drivers.coprhd import scaleio as \ - cinder_volume_drivers_coprhd_scaleio from cinder.volume.drivers.datacore import driver as \ cinder_volume_drivers_datacore_driver from cinder.volume.drivers.datacore import iscsi as \ @@ -285,8 +281,6 @@ def list_opts(): cinder_volume_driver.volume_opts, cinder_volume_driver.iser_opts, cinder_volume_driver.nvmet_opts, - cinder_volume_drivers_coprhd_common.volume_opts, - cinder_volume_drivers_coprhd_scaleio.scaleio_opts, cinder_volume_drivers_datera_dateraiscsi.d_opts, cinder_volume_drivers_dell_emc_ps.eqlx_opts, cinder_volume_drivers_dell_emc_sc_storagecentercommon. diff --git a/cinder/tests/unit/volume/drivers/test_coprhd.py b/cinder/tests/unit/volume/drivers/test_coprhd.py deleted file mode 100644 index 83df077a95f..00000000000 --- a/cinder/tests/unit/volume/drivers/test_coprhd.py +++ /dev/null @@ -1,981 +0,0 @@ -# Copyright (c) 2012 - 2016 EMC Corporation, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from cinder import context -from cinder.objects import fields -from cinder import test -from cinder.tests.unit import fake_constants as fake -from cinder.volume.drivers.coprhd import common as coprhd_common -from cinder.volume.drivers.coprhd import fc as coprhd_fc -from cinder.volume.drivers.coprhd import iscsi as coprhd_iscsi -from cinder.volume.drivers.coprhd import scaleio as coprhd_scaleio -from cinder.volume import volume_types - -""" -Test Data required for mocking -""" -export_group_details_data = { - "inactive": False, - "initiators": [{"creation_time": 1392194176020, - "host": {"id": "urn:storageos:Host:3e21edff-8662-4e60-ab5", - "link": {"href": "/compute/hosts/urn:storageos:H", - "rel": "self"}}, - "hostname": "lglw7134", - "id": "urn:storageos:Initiator:13945431-06b7-44a0-838c-50", - "inactive": False, - "initiator_node": "20:00:00:90:FA:13:81:8D", - "initiator_port": "iqn.1993-08.org.deb:01:222", - "link": {"href": "/compute/initiators/urn:storageos:Initi", - "rel": "self"}, - "protocol": "iSCSI", - "registration_status": "REGISTERED", - "tags": []}], - "name": "ccgroup", - "project": 'project', - "tags": [], - "tenant": 'tenant', - "type": "Host", - "varray": {"id": "urn:storageos:VirtualArray:5af376e9-ce2f-493d-9079-a872", - "link": {"href": "/vdc/varrays/urn:storageos:VirtualArray:5af3", - "rel": "self"} - }, - "volumes": [{"id": "urn:storageos:Volume:6dc64865-bb25-431c-b321-ac268f16" - "a7ae:vdc1", - "lun": 1 - }] -} - -varray_detail_data = {"name": "varray"} - -export_group_list = ["urn:storageos:ExportGroup:2dbce233-7da0-47cb-8ff3-68f48"] - -iscsi_itl_list = {"itl": [{"hlu": 3, - "initiator": {"id": "urn:storageos:Initiator:13945", - "link": {"rel": "self", - "href": "/comput"}, - "port": "iqn.1993-08.org.deb:01:222"}, - "export": {"id": "urn:storageos:ExportGroup:2dbce2", - "name": "ccgroup", - "link": {"rel": "self", - "href": "/block/expo"}}, - "device": {"id": "urn:storageos:Volume:aa1fc84a-af", - "link": {"rel": "self", - "href": "/block/volumes/urn:s"}, - "wwn": "600009700001957015735330303535"}, - "target": {"id": "urn:storageos:StoragePort:d7e42", - "link": {"rel": "self", - "href": "/vdc/stor:"}, - "port": "50:00:09:73:00:18:95:19", - 'ip_address': "10.10.10.10", - 'tcp_port': '22'}}, - {"hlu": 3, - "initiator": {"id": "urn:storageos:Initiator:13945", - "link": {"rel": "self", - "href": "/comput"}, - "port": "iqn.1993-08.org.deb:01:222"}, - "export": {"id": "urn:storageos:ExportGroup:2dbce2", - "name": "ccgroup", - "link": {"rel": "self", - "href": "/block/expo"}}, - "device": {"id": "urn:storageos:Volume:aa1fc84a-af", - "link": {"rel": "self", - "href": "/block/volumes/urn:s"}, - "wwn": "600009700001957015735330303535"}, - "target": {"id": "urn:storageos:StoragePort:d7e42", - "link": {"rel": "self", - "href": "/vdc/stor:"}, - "port": "50:00:09:73:00:18:95:19", - 'ip_address': "10.10.10.10", - 'tcp_port': '22'}}]} - -fcitl_itl_list = {"itl": [{"hlu": 3, - "initiator": {"id": "urn:storageos:Initiator:13945", - "link": {"rel": "self", - "href": "/comput"}, - "port": "12:34:56:78:90:12:34:56"}, - "export": {"id": "urn:storageos:ExportGroup:2dbce2", - "name": "ccgroup", - "link": {"rel": "self", - "href": "/block/expo"}}, - "device": {"id": "urn:storageos:Volume:aa1fc84a-af", - "link": {"rel": "self", - "href": "/block/volumes/urn:s"}, - "wwn": "600009700001957015735330303535"}, - "target": {"id": "urn:storageos:StoragePort:d7e42", - "link": {"rel": "self", - "href": "/vdc/stor:"}, - "port": "12:34:56:78:90:12:34:56", - 'ip_address': "10.10.10.10", - 'tcp_port': '22'}}, - {"hlu": 3, - "initiator": {"id": "urn:storageos:Initiator:13945", - "link": {"rel": "self", - "href": "/comput"}, - "port": "12:34:56:78:90:12:34:56"}, - "export": {"id": "urn:storageos:ExportGroup:2dbce2", - "name": "ccgroup", - "link": {"rel": "self", - "href": "/block/expo"}}, - "device": {"id": "urn:storageos:Volume:aa1fc84a-af", - "link": {"rel": "self", - "href": "/block/volumes/urn:s"}, - "wwn": "600009700001957015735330303535"}, - "target": {"id": "urn:storageos:StoragePort:d7e42", - "link": {"rel": "self", - "href": "/vdc/stor:"}, - "port": "12:34:56:78:90:12:34:56", - 'ip_address': "10.10.10.10", - 'tcp_port': '22'}}]} - -scaleio_itl_list = {"itl": [{"hlu": -1, - "initiator": {"id": - "urn:storageos:Initiator:920aee", - "link": {"rel": "self", - "href": - "/compute/initiators"}, - "port": "bfdf432500000004"}, - "export": {"id": - "urn:storageos:ExportGroup:5449235", - "name": "10.108.225.109", - "link": {"rel": "self", - "href": - "/block/exports/urn:stor"}}, - "device": {"id": - "urn:storageos:Volume:b3624a83-3eb", - "link": {"rel": "self", - "href": "/block/volume"}, - "wwn": - "4F48CC4C27A43248092128B400000004"}, - "target": {}}, - {"hlu": -1, - "initiator": {"id": - "urn:storageos:Initiator:920aee", - "link": {"rel": "self", - "href": - "/compute/initiators/"}, - "port": "bfdf432500000004"}, - "export": {"id": - "urn:storageos:ExportGroup:5449235", - "name": "10.108.225.109", - "link": {"rel": "self", - "href": - "/block/exports/urn:stor"}}, - "device": {"id": - "urn:storageos:Volume:c014e96a-557", - "link": {"rel": "self", - "href": - "/block/volumes/urn:stor"}, - "wwn": - "4F48CC4C27A43248092129320000000E"}, - "target": {}}]} - - -class test_volume_data(object): - name = 'test-vol1' - size = 1 - volume_name = 'test-vol1' - id = fake.VOLUME_ID - group_id = None - provider_auth = None - project_id = fake.PROJECT_ID - display_name = 'test-vol1' - display_description = 'test volume', - volume_type_id = None - provider_id = fake.PROVIDER_ID - - def __init__(self, volume_type_id): - self.volume_type_id = volume_type_id - - -class source_test_volume_data(object): - name = 'source_test-vol1' - size = 1 - volume_name = 'source_test-vol1' - id = fake.VOLUME2_ID - group_id = None - provider_auth = None - project_id = fake.PROJECT_ID - display_name = 'source_test-vol1' - display_description = 'test volume' - volume_type_id = None - - def __init__(self, volume_type_id): - self.volume_type_id = volume_type_id - - -class test_clone_volume_data(object): - name = 'clone-test-vol1' - size = 1 - volume_name = 'clone-test-vol1' - id = fake.VOLUME3_ID - provider_auth = None - project_id = fake.PROJECT_ID - display_name = 'clone-test-vol1' - display_description = 'clone test volume' - volume_type_id = None - - def __init__(self, volume_type_id): - self.volume_type_id = volume_type_id - - -class test_snapshot_data(object): - name = 'snapshot1' - display_name = 'snapshot1' - size = 1 - id = fake.SNAPSHOT_ID - volume_name = 'test-vol1' - volume_id = fake.VOLUME_ID - volume = None - volume_size = 1 - project_id = fake.PROJECT_ID - status = fields.SnapshotStatus.AVAILABLE - - def __init__(self, src_volume): - self.volume = src_volume - - -def get_connector_data(): - connector = {'ip': '10.0.0.2', - 'initiator': 'iqn.1993-08.org.deb:01:222', - 'wwpns': ["1234567890123456", "1234567890543211"], - 'wwnns': ["223456789012345", "223456789054321"], - 'host': 'fakehost'} - return connector - - -class test_group_data(object): - name = 'group_name' - display_name = 'group_name' - id = fake.GROUP_ID - volume_type_ids = None - volume_types = None - group_type_id = None - status = fields.GroupStatus.AVAILABLE - - def __init__(self, volume_types, group_type_id): - self.group_type_id = group_type_id - self.volume_types = volume_types - - -class test_group_type_data(object): - name = 'group_name' - display_name = 'group_name' - groupsnapshot_id = None - id = fake.GROUP_TYPE_ID - description = 'group' - - -class test_group_snap_data(object): - name = 'cg_snap_name' - display_name = 'cg_snap_name' - id = fake.GROUP_SNAPSHOT_ID - group_id = fake.GROUP_ID - status = fields.GroupStatus.AVAILABLE - snapshots = [] - group = None - group_type_id = None - - def __init__(self, volume_types, group_type_id): - self.group_type_id = group_type_id - self.group = test_group_data(volume_types, group_type_id) - - -class MockedEMCCoprHDDriverCommon(coprhd_common.EMCCoprHDDriverCommon): - - def __init__(self, protocol, default_backend_name, - configuration=None): - - super(MockedEMCCoprHDDriverCommon, self).__init__( - protocol, default_backend_name, configuration) - - def authenticate_user(self): - pass - - def get_exports_count_by_initiators(self, initiator_ports): - return 0 - - def _get_coprhd_volume_name(self, vol, verbose=False): - if verbose is True: - return {'volume_name': "coprhd_vol_name", - 'volume_uri': "coprhd_vol_uri"} - else: - return "coprhd_vol_name" - - def _get_coprhd_snapshot_name(self, snapshot, resUri): - return "coprhd_snapshot_name" - - def _get_coprhd_cgid(self, cgid): - return "cg_uri" - - def init_volume_api(self): - self.volume_api = mock.Mock() - self.volume_api.get.return_value = { - 'name': 'source_test-vol1', - 'size': 1, - 'volume_name': 'source_test-vol1', - 'id': fake.VOLUME_ID, - 'group_id': fake.GROUP_ID, - 'provider_auth': None, - 'project_id': fake.PROJECT_ID, - 'display_name': 'source_test-vol1', - 'display_description': 'test volume', - 'volume_type_id': fake.VOLUME_TYPE_ID} - - def init_coprhd_api_components(self): - self.volume_obj = mock.Mock() - self.volume_obj.create.return_value = "volume_created" - self.volume_obj.volume_query.return_value = "volume_uri" - self.volume_obj.get_storageAttributes.return_value = ( - 'block', 'volume_name') - self.volume_obj.storage_resource_query.return_value = "volume_uri" - self.volume_obj.is_volume_detachable.return_value = False - self.volume_obj.volume_clone_detach.return_value = 'detached' - self.volume_obj.getTags.return_value = ( - ["Openstack-vol", "Openstack-vol1"]) - self.volume_obj.tag.return_value = "tagged" - self.volume_obj.clone.return_value = "volume-cloned" - - if(self.protocol == "iSCSI"): - self.volume_obj.get_exports_by_uri.return_value = ( - iscsi_itl_list) - elif(self.protocol == "FC"): - self.volume_obj.get_exports_by_uri.return_value = ( - fcitl_itl_list) - else: - self.volume_obj.get_exports_by_uri.return_value = ( - scaleio_itl_list) - - self.volume_obj.list_volumes.return_value = [] - self.volume_obj.show.return_value = {"id": "vol_id"} - self.volume_obj.expand.return_value = "expanded" - - self.tag_obj = mock.Mock() - self.tag_obj.list_tags.return_value = [ - "Openstack-vol", "Openstack-vol1"] - self.tag_obj.tag_resource.return_value = "Tagged" - - self.exportgroup_obj = mock.Mock() - self.exportgroup_obj.exportgroup_list.return_value = ( - export_group_list) - self.exportgroup_obj.exportgroup_show.return_value = ( - export_group_details_data) - - self.exportgroup_obj.exportgroup_add_volumes.return_value = ( - "volume-added") - - self.host_obj = mock.Mock() - self.host_obj.list_by_tenant.return_value = [] - self.host_obj.list_all.return_value = [{'id': "host1_id", - 'name': "host1"}] - self.host_obj.list_initiators.return_value = [ - {'name': "12:34:56:78:90:12:34:56"}, - {'name': "12:34:56:78:90:54:32:11"}, - {'name': "bfdf432500000004"}] - - self.hostinitiator_obj = mock.Mock() - self.varray_obj = mock.Mock() - self.varray_obj.varray_show.return_value = varray_detail_data - - self.snapshot_obj = mock.Mock() - mocked_snap_obj = self.snapshot_obj.return_value - mocked_snap_obj.storageResource_query.return_value = ( - "resourceUri") - mocked_snap_obj.snapshot_create.return_value = ( - "snapshot_created") - mocked_snap_obj.snapshot_query.return_value = "snapshot_uri" - - self.consistencygroup_obj = mock.Mock() - mocked_group_object = self.consistencygroup_obj.return_value - mocked_group_object.create.return_value = "CG-Created" - mocked_group_object.consistencygroup_query.return_value = "CG-uri" - - -class EMCCoprHDISCSIDriverTest(test.TestCase): - - def setUp(self): - super(EMCCoprHDISCSIDriverTest, self).setUp() - self.create_coprhd_setup() - - def create_coprhd_setup(self): - - self.configuration = mock.Mock() - self.configuration.coprhd_hostname = "10.10.10.10" - self.configuration.coprhd_port = "4443" - self.configuration.volume_backend_name = "EMCCoprHDISCSIDriver" - self.configuration.coprhd_username = "user-name" - self.configuration.coprhd_password = "password" - self.configuration.coprhd_tenant = "tenant" - self.configuration.coprhd_project = "project" - self.configuration.coprhd_varray = "varray" - self.configuration.coprhd_emulate_snapshot = False - - self.volume_type = self.create_coprhd_volume_type() - self.volume_type_id = self.volume_type.id - self.group_type = test_group_type_data() - self.group_type_id = self.group_type.id - - self.mock_object(coprhd_iscsi.EMCCoprHDISCSIDriver, - '_get_common_driver', - self._get_mocked_common_driver) - self.driver = coprhd_iscsi.EMCCoprHDISCSIDriver( - configuration=self.configuration) - - def tearDown(self): - self._cleanUp() - super(EMCCoprHDISCSIDriverTest, self).tearDown() - - def _cleanUp(self): - self.delete_vipr_volume_type() - - def create_coprhd_volume_type(self): - ctx = context.get_admin_context() - vipr_volume_type = volume_types.create(ctx, - "coprhd-volume-type", - {'CoprHD:VPOOL': - 'vpool_coprhd'}) - return vipr_volume_type - - def _get_mocked_common_driver(self): - return MockedEMCCoprHDDriverCommon( - protocol="iSCSI", - default_backend_name="EMCViPRISCSIDriver", - configuration=self.configuration) - - def delete_vipr_volume_type(self): - ctx = context.get_admin_context() - volume_types.destroy(ctx, self.volume_type_id) - - def test_create_destroy(self): - volume = test_volume_data(self.volume_type_id) - - self.driver.create_volume(volume) - self.driver.delete_volume(volume) - - def test_get_volume_stats(self): - vol_stats = self.driver.get_volume_stats(True) - self.assertEqual('unknown', vol_stats['free_capacity_gb']) - - def test_create_volume_clone(self): - src_volume_data = test_volume_data(self.volume_type_id) - clone_volume_data = test_clone_volume_data(self.volume_type_id) - self.driver.create_volume(src_volume_data) - self.driver.create_cloned_volume(clone_volume_data, src_volume_data) - self.driver.delete_volume(src_volume_data) - self.driver.delete_volume(clone_volume_data) - - def test_create_destroy_snapshot(self): - volume_data = test_volume_data(self.volume_type_id) - snapshot_data = test_snapshot_data( - source_test_volume_data(self.volume_type_id)) - - self.driver.create_volume(volume_data) - self.driver.create_snapshot(snapshot_data) - self.driver.delete_snapshot(snapshot_data) - self.driver.delete_volume(volume_data) - - def test_create_volume_from_snapshot(self): - - src_vol_data = source_test_volume_data(self.volume_type_id) - self.driver.create_volume(src_vol_data) - - volume_data = test_volume_data(self.volume_type_id) - snapshot_data = test_snapshot_data(src_vol_data) - - self.driver.create_snapshot(snapshot_data) - self.driver.create_volume_from_snapshot(volume_data, snapshot_data) - - self.driver.delete_snapshot(snapshot_data) - self.driver.delete_volume(src_vol_data) - self.driver.delete_volume(volume_data) - - def test_extend_volume(self): - volume_data = test_volume_data(self.volume_type_id) - self.driver.create_volume(volume_data) - self.driver.extend_volume(volume_data, 2) - self.driver.delete_volume(volume_data) - - def test_initialize_and_terminate_connection(self): - connector_data = get_connector_data() - volume_data = test_volume_data(self.volume_type_id) - - self.driver.create_volume(volume_data) - res_initialize = self.driver.initialize_connection( - volume_data, connector_data) - expected_initialize = {'driver_volume_type': 'iscsi', - 'data': {'target_lun': 3, - 'target_portal': '10.10.10.10:22', - 'target_iqn': - '50:00:09:73:00:18:95:19', - 'target_discovered': False, - 'volume_id': fake.VOLUME_ID}} - self.assertEqual( - expected_initialize, res_initialize, 'Unexpected return data') - - self.driver.terminate_connection(volume_data, connector_data) - self.driver.delete_volume(volume_data) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_create_delete_empty_group(self, cg_ss_enabled): - cg_ss_enabled.side_effect = [True, True] - group_data = test_group_data([self.volume_type], - self.group_type_id) - ctx = context.get_admin_context() - self.driver.create_group(ctx, group_data) - model_update, volumes_model_update = ( - self.driver.delete_group(ctx, group_data, [])) - self.assertEqual([], volumes_model_update, 'Unexpected return data') - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_create_update_delete_group(self, cg_ss_enabled): - cg_ss_enabled.side_effect = [True, True, True, True] - group_data = test_group_data([self.volume_type], - self.group_type_id) - ctx = context.get_admin_context() - self.driver.create_group(ctx, group_data) - - volume = test_volume_data(self.volume_type_id) - self.driver.create_volume(volume) - - model_update, ret1, ret2 = ( - self.driver.update_group(ctx, group_data, [volume], [])) - - self.assertEqual({'status': fields.GroupStatus.AVAILABLE}, - model_update) - - model_update, volumes_model_update = ( - self.driver.delete_group(ctx, group_data, [volume])) - self.assertEqual({'status': fields.GroupStatus.AVAILABLE}, - model_update) - self.assertEqual([{'status': 'deleted', 'id': fake.VOLUME_ID}], - volumes_model_update) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_create_delete_group_snap(self, cg_ss_enabled): - cg_ss_enabled.side_effect = [True, True] - group_snap_data = test_group_snap_data([self.volume_type], - self.group_type_id) - ctx = context.get_admin_context() - - model_update, snapshots_model_update = ( - self.driver.create_group_snapshot(ctx, group_snap_data, [])) - self.assertEqual({'status': fields.GroupStatus.AVAILABLE}, - model_update) - self.assertEqual([], snapshots_model_update, 'Unexpected return data') - - model_update, snapshots_model_update = ( - self.driver.delete_group_snapshot(ctx, group_snap_data, [])) - self.assertEqual({}, model_update, 'Unexpected return data') - self.assertEqual([], snapshots_model_update, 'Unexpected return data') - - -class EMCCoprHDFCDriverTest(test.TestCase): - - def setUp(self): - super(EMCCoprHDFCDriverTest, self).setUp() - self.create_coprhd_setup() - - def create_coprhd_setup(self): - - self.configuration = mock.Mock() - self.configuration.coprhd_hostname = "10.10.10.10" - self.configuration.coprhd_port = "4443" - self.configuration.volume_backend_name = "EMCCoprHDFCDriver" - self.configuration.coprhd_username = "user-name" - self.configuration.coprhd_password = "password" - self.configuration.coprhd_tenant = "tenant" - self.configuration.coprhd_project = "project" - self.configuration.coprhd_varray = "varray" - self.configuration.coprhd_emulate_snapshot = False - - self.volume_type = self.create_coprhd_volume_type() - self.volume_type_id = self.volume_type.id - self.group_type = test_group_type_data() - self.group_type_id = self.group_type.id - - self.mock_object(coprhd_fc.EMCCoprHDFCDriver, - '_get_common_driver', - self._get_mocked_common_driver) - self.driver = coprhd_fc.EMCCoprHDFCDriver( - configuration=self.configuration) - - def tearDown(self): - self._cleanUp() - super(EMCCoprHDFCDriverTest, self).tearDown() - - def _cleanUp(self): - self.delete_vipr_volume_type() - - def create_coprhd_volume_type(self): - ctx = context.get_admin_context() - vipr_volume_type = volume_types.create(ctx, - "coprhd-volume-type", - {'CoprHD:VPOOL': 'vpool_vipr'}) - return vipr_volume_type - - def _get_mocked_common_driver(self): - return MockedEMCCoprHDDriverCommon( - protocol="FC", - default_backend_name="EMCViPRFCDriver", - configuration=self.configuration) - - def delete_vipr_volume_type(self): - ctx = context.get_admin_context() - volume_types.destroy(ctx, self.volume_type_id) - - def test_create_destroy(self): - volume = test_volume_data(self.volume_type_id) - - self.driver.create_volume(volume) - self.driver.delete_volume(volume) - - def test_get_volume_stats(self): - vol_stats = self.driver.get_volume_stats(True) - self.assertEqual('unknown', vol_stats['free_capacity_gb']) - - def test_create_volume_clone(self): - - src_volume_data = test_volume_data(self.volume_type_id) - clone_volume_data = test_clone_volume_data(self.volume_type_id) - self.driver.create_volume(src_volume_data) - self.driver.create_cloned_volume(clone_volume_data, src_volume_data) - self.driver.delete_volume(src_volume_data) - self.driver.delete_volume(clone_volume_data) - - def test_create_destroy_snapshot(self): - - volume_data = test_volume_data(self.volume_type_id) - snapshot_data = test_snapshot_data( - source_test_volume_data(self.volume_type_id)) - - self.driver.create_volume(volume_data) - self.driver.create_snapshot(snapshot_data) - self.driver.delete_snapshot(snapshot_data) - self.driver.delete_volume(volume_data) - - def test_create_volume_from_snapshot(self): - src_vol_data = source_test_volume_data(self.volume_type_id) - self.driver.create_volume(src_vol_data) - - volume_data = test_volume_data(self.volume_type_id) - snapshot_data = test_snapshot_data(src_vol_data) - - self.driver.create_snapshot(snapshot_data) - self.driver.create_volume_from_snapshot(volume_data, snapshot_data) - - self.driver.delete_snapshot(snapshot_data) - self.driver.delete_volume(src_vol_data) - self.driver.delete_volume(volume_data) - - def test_extend_volume(self): - volume_data = test_volume_data(self.volume_type_id) - self.driver.create_volume(volume_data) - self.driver.extend_volume(volume_data, 2) - self.driver.delete_volume(volume_data) - - def test_initialize_and_terminate_connection(self): - - connector_data = get_connector_data() - volume_data = test_volume_data(self.volume_type_id) - - self.driver.create_volume(volume_data) - res_initiatlize = self.driver.initialize_connection( - volume_data, connector_data) - expected_initialize = {'driver_volume_type': 'fibre_channel', - 'data': {'target_lun': 3, - 'initiator_target_map': - {'1234567890543211': - ['1234567890123456', - '1234567890123456'], - '1234567890123456': - ['1234567890123456', - '1234567890123456']}, - 'target_wwn': ['1234567890123456', - '1234567890123456'], - 'target_discovered': False, - 'volume_id': fake.VOLUME_ID}} - self.assertEqual( - expected_initialize, res_initiatlize, 'Unexpected return data') - - res_terminate = self.driver.terminate_connection( - volume_data, connector_data) - expected_terminate = {'driver_volume_type': 'fibre_channel', - 'data': {'initiator_target_map': - {'1234567890543211': - ['1234567890123456', - '1234567890123456'], - '1234567890123456': - ['1234567890123456', - '1234567890123456']}, - 'target_wwn': ['1234567890123456', - '1234567890123456']}} - self.assertEqual( - expected_terminate, res_terminate, 'Unexpected return data') - - self.driver.delete_volume(volume_data) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_create_delete_empty_group(self, cg_ss_enabled): - cg_ss_enabled.side_effect = [True, True] - group_data = test_group_data([self.volume_type], - self.group_type_id) - ctx = context.get_admin_context() - self.driver.create_group(ctx, group_data) - model_update, volumes_model_update = ( - self.driver.delete_group(ctx, group_data, [])) - self.assertEqual([], volumes_model_update, 'Unexpected return data') - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_create_update_delete_group(self, cg_ss_enabled): - cg_ss_enabled.side_effect = [True, True, True] - group_data = test_group_data([self.volume_type], - self.group_type_id) - ctx = context.get_admin_context() - self.driver.create_group(ctx, group_data) - - volume = test_volume_data(self.volume_type_id) - self.driver.create_volume(volume) - - model_update, ret1, ret2 = ( - self.driver.update_group(ctx, group_data, [volume], [])) - - self.assertEqual({'status': fields.GroupStatus.AVAILABLE}, - model_update) - - model_update, volumes_model_update = ( - self.driver.delete_group(ctx, group_data, [volume])) - self.assertEqual({'status': fields.GroupStatus.AVAILABLE}, - model_update) - self.assertEqual([{'status': 'deleted', 'id': fake.VOLUME_ID}], - volumes_model_update) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_create_delete_group_snap(self, cg_ss_enabled): - cg_ss_enabled.side_effect = [True, True] - group_snap_data = test_group_snap_data([self.volume_type], - self.group_type_id) - ctx = context.get_admin_context() - - model_update, snapshots_model_update = ( - self.driver.create_group_snapshot(ctx, group_snap_data, [])) - self.assertEqual({'status': fields.GroupStatus.AVAILABLE}, - model_update) - self.assertEqual([], snapshots_model_update, 'Unexpected return data') - - model_update, snapshots_model_update = ( - self.driver.delete_group_snapshot(ctx, group_snap_data, [])) - self.assertEqual({}, model_update, 'Unexpected return data') - self.assertEqual([], snapshots_model_update, 'Unexpected return data') - - -class EMCCoprHDScaleIODriverTest(test.TestCase): - - def setUp(self): - super(EMCCoprHDScaleIODriverTest, self).setUp() - self.create_coprhd_setup() - - def create_coprhd_setup(self): - - self.configuration = mock.Mock() - self.configuration.coprhd_hostname = "10.10.10.10" - self.configuration.coprhd_port = "4443" - self.configuration.volume_backend_name = "EMCCoprHDFCDriver" - self.configuration.coprhd_username = "user-name" - self.configuration.coprhd_password = "password" - self.configuration.coprhd_tenant = "tenant" - self.configuration.coprhd_project = "project" - self.configuration.coprhd_varray = "varray" - self.configuration.coprhd_scaleio_rest_gateway_host = "10.10.10.11" - self.configuration.coprhd_scaleio_rest_gateway_port = 443 - self.configuration.coprhd_scaleio_rest_server_username = ( - "scaleio_username") - self.configuration.coprhd_scaleio_rest_server_password = ( - "scaleio_password") - self.configuration.scaleio_verify_server_certificate = False - self.configuration.scaleio_server_certificate_path = ( - "/etc/scaleio/certs") - - self.volume_type = self.create_coprhd_volume_type() - self.volume_type_id = self.volume_type.id - self.group_type = test_group_type_data() - self.group_type_id = self.group_type.id - - self.mock_object(coprhd_scaleio.EMCCoprHDScaleIODriver, - '_get_common_driver', - self._get_mocked_common_driver) - self.mock_object(coprhd_scaleio.EMCCoprHDScaleIODriver, - '_get_client_id', - self._get_client_id) - self.driver = coprhd_scaleio.EMCCoprHDScaleIODriver( - configuration=self.configuration) - - def tearDown(self): - self._cleanUp() - super(EMCCoprHDScaleIODriverTest, self).tearDown() - - def _cleanUp(self): - self.delete_vipr_volume_type() - - def create_coprhd_volume_type(self): - ctx = context.get_admin_context() - vipr_volume_type = volume_types.create(ctx, - "coprhd-volume-type", - {'CoprHD:VPOOL': 'vpool_vipr'}) - return vipr_volume_type - - def _get_mocked_common_driver(self): - return MockedEMCCoprHDDriverCommon( - protocol="scaleio", - default_backend_name="EMCCoprHDScaleIODriver", - configuration=self.configuration) - - def _get_client_id(self, server_ip, server_port, server_username, - server_password, sdc_ip): - return "bfdf432500000004" - - def delete_vipr_volume_type(self): - ctx = context.get_admin_context() - volume_types.destroy(ctx, self.volume_type_id) - - def test_create_destroy(self): - volume = test_volume_data(self.volume_type_id) - - self.driver.create_volume(volume) - self.driver.delete_volume(volume) - - def test_get_volume_stats(self): - vol_stats = self.driver.get_volume_stats(True) - self.assertEqual('unknown', vol_stats['free_capacity_gb']) - - def test_create_volume_clone(self): - - src_volume_data = test_volume_data(self.volume_type_id) - clone_volume_data = test_clone_volume_data(self.volume_type_id) - self.driver.create_volume(src_volume_data) - self.driver.create_cloned_volume(clone_volume_data, src_volume_data) - self.driver.delete_volume(src_volume_data) - self.driver.delete_volume(clone_volume_data) - - def test_create_destroy_snapshot(self): - - volume_data = test_volume_data(self.volume_type_id) - snapshot_data = test_snapshot_data( - source_test_volume_data(self.volume_type_id)) - - self.driver.create_volume(volume_data) - self.driver.create_snapshot(snapshot_data) - self.driver.delete_snapshot(snapshot_data) - self.driver.delete_volume(volume_data) - - def test_create_volume_from_snapshot(self): - src_vol_data = source_test_volume_data(self.volume_type_id) - self.driver.create_volume(src_vol_data) - - volume_data = test_volume_data(self.volume_type_id) - snapshot_data = test_snapshot_data(src_vol_data) - - self.driver.create_snapshot(snapshot_data) - self.driver.create_volume_from_snapshot(volume_data, snapshot_data) - - self.driver.delete_snapshot(snapshot_data) - self.driver.delete_volume(src_vol_data) - self.driver.delete_volume(volume_data) - - def test_extend_volume(self): - volume_data = test_volume_data(self.volume_type_id) - self.driver.create_volume(volume_data) - self.driver.extend_volume(volume_data, 2) - self.driver.delete_volume(volume_data) - - def test_initialize_and_terminate_connection(self): - - connector_data = get_connector_data() - volume_data = test_volume_data(self.volume_type_id) - - self.driver.create_volume(volume_data) - res_initiatlize = self.driver.initialize_connection( - volume_data, connector_data) - exp_name = res_initiatlize['data']['scaleIO_volname'] - expected_initialize = {'data': {'bandwidthLimit': None, - 'hostIP': '10.0.0.2', - 'iopsLimit': None, - 'scaleIO_volname': exp_name, - 'scaleIO_volume_id': fake.PROVIDER_ID, - 'serverIP': '10.10.10.11', - 'serverPassword': 'scaleio_password', - 'serverPort': 443, - 'serverToken': None, - 'serverUsername': 'scaleio_username'}, - 'driver_volume_type': 'scaleio'} - self.assertEqual( - expected_initialize, res_initiatlize, 'Unexpected return data') - - self.driver.terminate_connection( - volume_data, connector_data) - self.driver.delete_volume(volume_data) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_create_delete_empty_group(self, cg_ss_enabled): - cg_ss_enabled.side_effect = [True, True] - group_data = test_group_data([self.volume_type], - self.group_type_id) - ctx = context.get_admin_context() - self.driver.create_group(ctx, group_data) - model_update, volumes_model_update = ( - self.driver.delete_group(ctx, group_data, [])) - self.assertEqual([], volumes_model_update, 'Unexpected return data') - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_create_update_delete_group(self, cg_ss_enabled): - cg_ss_enabled.side_effect = [True, True, True, True] - group_data = test_group_data([self.volume_type], - self.group_type_id) - ctx = context.get_admin_context() - self.driver.create_group(ctx, group_data) - - volume = test_volume_data(self.volume_type_id) - self.driver.create_volume(volume) - - model_update, ret1, ret2 = ( - self.driver.update_group(ctx, group_data, [volume], [])) - - self.assertEqual({'status': fields.GroupStatus.AVAILABLE}, - model_update) - - model_update, volumes_model_update = ( - self.driver.delete_group(ctx, group_data, [volume])) - self.assertEqual({'status': fields.GroupStatus.AVAILABLE}, - model_update) - self.assertEqual([{'status': 'deleted', 'id': fake.VOLUME_ID}], - volumes_model_update) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_create_delete_group_snap(self, cg_ss_enabled): - cg_ss_enabled.side_effect = [True, True] - group_snap_data = test_group_snap_data([self.volume_type], - self.group_type_id) - ctx = context.get_admin_context() - - model_update, snapshots_model_update = ( - self.driver.create_group_snapshot(ctx, group_snap_data, [])) - self.assertEqual({'status': fields.GroupStatus.AVAILABLE}, - model_update) - self.assertEqual([], snapshots_model_update, 'Unexpected return data') - - model_update, snapshots_model_update = ( - self.driver.delete_group_snapshot(ctx, group_snap_data, [])) - self.assertEqual({}, model_update, 'Unexpected return data') - self.assertEqual([], snapshots_model_update, 'Unexpected return data') diff --git a/cinder/volume/drivers/coprhd/__init__.py b/cinder/volume/drivers/coprhd/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/cinder/volume/drivers/coprhd/common.py b/cinder/volume/drivers/coprhd/common.py deleted file mode 100644 index 1c807276422..00000000000 --- a/cinder/volume/drivers/coprhd/common.py +++ /dev/null @@ -1,1512 +0,0 @@ -# Copyright (c) 2016 EMC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import base64 -import binascii -import random -import string - -import eventlet -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import encodeutils -from oslo_utils import excutils -from oslo_utils import units -import six - -from cinder import context -from cinder import exception -from cinder.i18n import _ -from cinder.objects import fields -from cinder.volume import configuration -from cinder.volume.drivers.coprhd.helpers import ( - authentication as coprhd_auth) -from cinder.volume.drivers.coprhd.helpers import ( - commoncoprhdapi as coprhd_utils) -from cinder.volume.drivers.coprhd.helpers import ( - consistencygroup as coprhd_cg) -from cinder.volume.drivers.coprhd.helpers import exportgroup as coprhd_eg -from cinder.volume.drivers.coprhd.helpers import host as coprhd_host -from cinder.volume.drivers.coprhd.helpers import snapshot as coprhd_snap -from cinder.volume.drivers.coprhd.helpers import tag as coprhd_tag - -from cinder.volume.drivers.coprhd.helpers import ( - virtualarray as coprhd_varray) -from cinder.volume.drivers.coprhd.helpers import volume as coprhd_vol -from cinder.volume import utils as volume_utils -from cinder.volume import volume_types - -LOG = logging.getLogger(__name__) - -MAX_RETRIES = 10 -INTERVAL_10_SEC = 10 - -volume_opts = [ - cfg.StrOpt('coprhd_hostname', - default=None, - help='Hostname for the CoprHD Instance'), - cfg.PortOpt('coprhd_port', - default=4443, - help='Port for the CoprHD Instance'), - cfg.StrOpt('coprhd_username', - default=None, - help='Username for accessing the CoprHD Instance'), - cfg.StrOpt('coprhd_password', - default=None, - help='Password for accessing the CoprHD Instance', - secret=True), - cfg.StrOpt('coprhd_tenant', - default=None, - help='Tenant to utilize within the CoprHD Instance'), - cfg.StrOpt('coprhd_project', - default=None, - help='Project to utilize within the CoprHD Instance'), - cfg.StrOpt('coprhd_varray', - default=None, - help='Virtual Array to utilize within the CoprHD Instance'), - cfg.BoolOpt('coprhd_emulate_snapshot', - default=False, - help='True | False to indicate if the storage array ' - 'in CoprHD is VMAX or VPLEX') -] - -CONF = cfg.CONF -CONF.register_opts(volume_opts, group=configuration.SHARED_CONF_GROUP) - -URI_VPOOL_VARRAY_CAPACITY = '/block/vpools/{0}/varrays/{1}/capacity' -URI_BLOCK_EXPORTS_FOR_INITIATORS = '/block/exports?initiators={0}' -EXPORT_RETRY_COUNT = 5 -MAX_DEFAULT_NAME_LENGTH = 128 -MAX_SNAPSHOT_NAME_LENGTH = 63 -MAX_CONSISTENCY_GROUP_NAME_LENGTH = 64 -MAX_SIO_LEN = 31 - - -def retry_wrapper(func): - def try_and_retry(*args, **kwargs): - retry = False - try: - return func(*args, **kwargs) - except coprhd_utils.CoprHdError as e: - # if we got an http error and - # the string contains 401 or if the string contains the word cookie - if (e.err_code == coprhd_utils.CoprHdError.HTTP_ERR and - (e.msg.find('401') != -1 or - e.msg.lower().find('cookie') != -1)): - retry = True - args[0].AUTHENTICATED = False - else: - exception_message = (_("\nCoprHD Exception: %(msg)s\n") % - {'msg': e.msg}) - LOG.exception(exception_message) - raise exception.VolumeBackendAPIException( - data=exception_message) - except Exception as exc: - exception_message = (_("\nGeneral Exception: %(exec_info)s\n") % - {'exec_info': - encodeutils.exception_to_unicode(exc)}) - LOG.exception(exception_message) - raise exception.VolumeBackendAPIException( - data=exception_message) - - if retry: - return func(*args, **kwargs) - - return try_and_retry - - -class EMCCoprHDDriverCommon(object): - - OPENSTACK_TAG = 'OpenStack' - - def __init__(self, protocol, default_backend_name, configuration=None): - self.AUTHENTICATED = False - self.protocol = protocol - self.configuration = configuration - self.configuration.append_config_values(volume_opts) - - self.init_coprhd_api_components() - - self.stats = {'driver_version': '3.0.0.0', - 'free_capacity_gb': 'unknown', - 'reserved_percentage': '0', - 'storage_protocol': protocol, - 'total_capacity_gb': 'unknown', - 'vendor_name': 'CoprHD', - 'volume_backend_name': - self.configuration.volume_backend_name or - default_backend_name} - - def init_coprhd_api_components(self): - - coprhd_utils.AUTH_TOKEN = None - - # instantiate coprhd api objects for later use - self.volume_obj = coprhd_vol.Volume( - self.configuration.coprhd_hostname, - self.configuration.coprhd_port) - - self.exportgroup_obj = coprhd_eg.ExportGroup( - self.configuration.coprhd_hostname, - self.configuration.coprhd_port) - - self.host_obj = coprhd_host.Host( - self.configuration.coprhd_hostname, - self.configuration.coprhd_port) - - self.varray_obj = coprhd_varray.VirtualArray( - self.configuration.coprhd_hostname, - self.configuration.coprhd_port) - - self.snapshot_obj = coprhd_snap.Snapshot( - self.configuration.coprhd_hostname, - self.configuration.coprhd_port) - - self.consistencygroup_obj = coprhd_cg.ConsistencyGroup( - self.configuration.coprhd_hostname, - self.configuration.coprhd_port) - - self.tag_obj = coprhd_tag.Tag( - self.configuration.coprhd_hostname, - self.configuration.coprhd_port) - - def check_for_setup_error(self): - # validate all of the coprhd_* configuration values - if self.configuration.coprhd_hostname is None: - message = _("coprhd_hostname is not set in cinder configuration") - raise exception.VolumeBackendAPIException(data=message) - - if self.configuration.coprhd_port is None: - message = _("coprhd_port is not set in cinder configuration") - raise exception.VolumeBackendAPIException(data=message) - - if self.configuration.coprhd_username is None: - message = _("coprhd_username is not set in cinder configuration") - raise exception.VolumeBackendAPIException(data=message) - - if self.configuration.coprhd_password is None: - message = _("coprhd_password is not set in cinder configuration") - raise exception.VolumeBackendAPIException(data=message) - - if self.configuration.coprhd_tenant is None: - message = _("coprhd_tenant is not set in cinder configuration") - raise exception.VolumeBackendAPIException(data=message) - - if self.configuration.coprhd_project is None: - message = _("coprhd_project is not set in cinder configuration") - raise exception.VolumeBackendAPIException(data=message) - - if self.configuration.coprhd_varray is None: - message = _("coprhd_varray is not set in cinder configuration") - raise exception.VolumeBackendAPIException(data=message) - - def authenticate_user(self): - # we should check to see if we are already authenticated before blindly - # doing it again - if self.AUTHENTICATED is False: - obj = coprhd_auth.Authentication( - self.configuration.coprhd_hostname, - self.configuration.coprhd_port) - - username = self.configuration.coprhd_username - password = self.configuration.coprhd_password - - coprhd_utils.AUTH_TOKEN = obj.authenticate_user(username, - password) - self.AUTHENTICATED = True - - def create_volume(self, vol, driver, truncate_name=False): - self.authenticate_user() - name = self._get_resource_name(vol, MAX_DEFAULT_NAME_LENGTH, - truncate_name) - size = int(vol.size) * units.Gi - - vpool = self._get_vpool(vol) - self.vpool = vpool['CoprHD:VPOOL'] - - try: - coprhd_cgid = None - try: - if vol.group_id: - if volume_utils.is_group_a_cg_snapshot_type(vol.group): - coprhd_cgid = self._get_coprhd_cgid(vol.group_id) - except KeyError: - coprhd_cgid = None - except AttributeError: - coprhd_cgid = None - - full_project_name = ("%s/%s" % (self.configuration.coprhd_tenant, - self.configuration.coprhd_project) - ) - self.volume_obj.create(full_project_name, name, size, - self.configuration.coprhd_varray, - self.vpool, - # no longer specified in volume creation - sync=True, - # no longer specified in volume creation - consistencygroup=coprhd_cgid) - - except coprhd_utils.CoprHdError as e: - coprhd_err_msg = (_("Volume %(name)s: create failed\n%(err)s") % - {'name': name, 'err': six.text_type(e.msg)}) - - log_err_msg = ("Volume : %s creation failed" % name) - self._raise_or_log_exception( - e.err_code, coprhd_err_msg, log_err_msg) - - @retry_wrapper - def create_consistencygroup(self, context, group, truncate_name=False): - self.authenticate_user() - name = self._get_resource_name(group, - MAX_CONSISTENCY_GROUP_NAME_LENGTH, - truncate_name) - - try: - self.consistencygroup_obj.create( - name, - self.configuration.coprhd_project, - self.configuration.coprhd_tenant) - - cg_uri = self.consistencygroup_obj.consistencygroup_query( - name, - self.configuration.coprhd_project, - self.configuration.coprhd_tenant) - - self.set_tags_for_resource( - coprhd_cg.ConsistencyGroup.URI_CONSISTENCY_GROUP_TAGS, - cg_uri, group) - - except coprhd_utils.CoprHdError as e: - coprhd_err_msg = (_("Consistency Group %(name)s:" - " create failed\n%(err)s") % - {'name': name, 'err': six.text_type(e.msg)}) - - log_err_msg = ("Consistency Group : %s creation failed" % - name) - self._raise_or_log_exception(e.err_code, coprhd_err_msg, - log_err_msg) - - @retry_wrapper - def update_consistencygroup(self, group, add_volumes, - remove_volumes): - self.authenticate_user() - model_update = {'status': fields.GroupStatus.AVAILABLE} - cg_uri = self._get_coprhd_cgid(group.id) - add_volnames = [] - remove_volnames = [] - - try: - if add_volumes: - for vol in add_volumes: - vol_name = self._get_coprhd_volume_name(vol) - add_volnames.append(vol_name) - - if remove_volumes: - for vol in remove_volumes: - vol_name = self._get_coprhd_volume_name(vol) - remove_volnames.append(vol_name) - - self.consistencygroup_obj.update( - cg_uri, - self.configuration.coprhd_project, - self.configuration.coprhd_tenant, - add_volnames, remove_volnames, True) - - return model_update, None, None - - except coprhd_utils.CoprHdError as e: - coprhd_err_msg = (_("Consistency Group %(cg_uri)s:" - " update failed\n%(err)s") % - {'cg_uri': cg_uri, 'err': six.text_type(e.msg)}) - - log_err_msg = ("Consistency Group : %s update failed" % - cg_uri) - self._raise_or_log_exception(e.err_code, coprhd_err_msg, - log_err_msg) - - @retry_wrapper - def delete_consistencygroup(self, context, group, volumes, - truncate_name=False): - self.authenticate_user() - name = self._get_resource_name(group, - MAX_CONSISTENCY_GROUP_NAME_LENGTH, - truncate_name) - volumes_model_update = [] - - try: - for vol in volumes: - try: - vol_name = self._get_coprhd_volume_name(vol) - full_project_name = "%s/%s" % ( - self.configuration.coprhd_tenant, - self.configuration.coprhd_project) - - self.volume_obj.delete(full_project_name, vol_name, - sync=True, - force_delete=True) - - update_item = {'id': vol.id, - 'status': - fields.GroupStatus.DELETED} - volumes_model_update.append(update_item) - - except exception.VolumeBackendAPIException: - update_item = {'id': vol.id, - 'status': fields.ConsistencyGroupStatus. - ERROR_DELETING} - - volumes_model_update.append(update_item) - - LOG.exception("Failed to delete the volume %s of CG.", - vol.name) - - self.consistencygroup_obj.delete( - name, - self.configuration.coprhd_project, - self.configuration.coprhd_tenant) - - model_update = {} - model_update['status'] = group.status - - return model_update, volumes_model_update - - except coprhd_utils.CoprHdError as e: - coprhd_err_msg = (_("Consistency Group %(name)s:" - " delete failed\n%(err)s") % - {'name': name, 'err': six.text_type(e.msg)}) - - log_err_msg = ("Consistency Group : %s deletion failed" % - name) - self._raise_or_log_exception(e.err_code, coprhd_err_msg, - log_err_msg) - - @retry_wrapper - def create_cgsnapshot(self, cgsnapshot, snapshots, truncate_name=False): - self.authenticate_user() - - snapshots_model_update = [] - cgsnapshot_name = self._get_resource_name(cgsnapshot, - MAX_SNAPSHOT_NAME_LENGTH, - truncate_name) - - cg_id = None - cg_group = None - - try: - cg_id = cgsnapshot.group_id - cg_group = cgsnapshot.group - except AttributeError: - pass - - cg_name = None - coprhd_cgid = None - - if cg_id: - coprhd_cgid = self._get_coprhd_cgid(cg_id) - cg_name = self._get_consistencygroup_name(cg_group) - - LOG.info('Start to create cgsnapshot for consistency group' - ': %(group_name)s', - {'group_name': cg_name}) - - try: - self.snapshot_obj.snapshot_create( - 'block', - 'consistency-groups', - coprhd_cgid, - cgsnapshot_name, - False, - True) - - for snapshot in snapshots: - vol_id_of_snap = snapshot.volume_id - - # Finding the volume in CoprHD for this volume id - tagname = "OpenStack:id:" + vol_id_of_snap - rslt = coprhd_utils.search_by_tag( - coprhd_vol.Volume.URI_SEARCH_VOLUMES_BY_TAG.format( - tagname), - self.configuration.coprhd_hostname, - self.configuration.coprhd_port) - - if not rslt: - continue - - vol_uri = rslt[0] - - snapshots_of_volume = self.snapshot_obj.snapshot_list_uri( - 'block', - 'volumes', - vol_uri) - - for snapUri in snapshots_of_volume: - snapshot_obj = self.snapshot_obj.snapshot_show_uri( - 'block', - vol_uri, - snapUri['id']) - - if not coprhd_utils.get_node_value(snapshot_obj, - 'inactive'): - - # Creating snapshot for a consistency group. - # When we create a consistency group snapshot on - # coprhd then each snapshot of volume in the - # consistencygroup will be given a subscript. Ex if - # the snapshot name is cgsnap1 and lets say there are - # three vols(a,b,c) in CG. Then the names of snapshots - # of the volumes in cg on coprhd end will be like - # cgsnap1-1 cgsnap1-2 cgsnap1-3. So, we list the - # snapshots of the volume under consideration and then - # split the name using - from the ending as prefix - # and postfix. We compare the prefix to the cgsnapshot - # name and filter our the snapshots that correspond to - # the cgsnapshot - - if '-' in snapshot_obj['name']: - (prefix, postfix) = snapshot_obj[ - 'name'].rsplit('-', 1) - - if cgsnapshot_name == prefix: - self.set_tags_for_resource( - coprhd_snap.Snapshot. - URI_BLOCK_SNAPSHOTS_TAG, - snapUri['id'], - snapshot) - - elif cgsnapshot_name == snapshot_obj['name']: - self.set_tags_for_resource( - coprhd_snap.Snapshot.URI_BLOCK_SNAPSHOTS_TAG, - snapUri['id'], - snapshot) - - snapshot['status'] = fields.SnapshotStatus.AVAILABLE - snapshots_model_update.append( - {'id': snapshot.id, 'status': - fields.SnapshotStatus.AVAILABLE}) - - model_update = {'status': fields.ConsistencyGroupStatus.AVAILABLE} - - return model_update, snapshots_model_update - - except coprhd_utils.CoprHdError as e: - coprhd_err_msg = (_("Snapshot for Consistency Group %(cg_name)s:" - " create failed\n%(err)s") % - {'cg_name': cg_name, - 'err': six.text_type(e.msg)}) - - log_err_msg = ("Snapshot %(name)s for Consistency" - " Group: %(cg_name)s creation failed" % - {'cg_name': cg_name, - 'name': cgsnapshot_name}) - self._raise_or_log_exception(e.err_code, coprhd_err_msg, - log_err_msg) - - @retry_wrapper - def delete_cgsnapshot(self, cgsnapshot, snapshots, truncate_name=False): - self.authenticate_user() - cgsnapshot_id = cgsnapshot.id - cgsnapshot_name = self._get_resource_name(cgsnapshot, - MAX_SNAPSHOT_NAME_LENGTH, - truncate_name) - - snapshots_model_update = [] - - cg_id = None - cg_group = None - - try: - cg_id = cgsnapshot.group_id - cg_group = cgsnapshot.group - except AttributeError: - pass - - coprhd_cgid = self._get_coprhd_cgid(cg_id) - cg_name = self._get_consistencygroup_name(cg_group) - - model_update = {} - LOG.info('Delete cgsnapshot %(snap_name)s for consistency group: ' - '%(group_name)s', {'snap_name': cgsnapshot.name, - 'group_name': cg_name}) - - try: - uri = None - try: - uri = self.snapshot_obj.snapshot_query('block', - 'consistency-groups', - coprhd_cgid, - cgsnapshot_name + '-1') - except coprhd_utils.CoprHdError as e: - if e.err_code == coprhd_utils.CoprHdError.NOT_FOUND_ERR: - uri = self.snapshot_obj.snapshot_query( - 'block', - 'consistency-groups', - coprhd_cgid, - cgsnapshot_name) - self.snapshot_obj.snapshot_delete_uri( - 'block', - coprhd_cgid, - uri, - True, - 0) - - for snapshot in snapshots: - snapshots_model_update.append( - {'id': snapshot.id, - 'status': fields.SnapshotStatus.DELETED}) - - return model_update, snapshots_model_update - - except coprhd_utils.CoprHdError as e: - coprhd_err_msg = (_("Snapshot %(cgsnapshot_id)s: for" - " Consistency Group %(cg_name)s: delete" - " failed\n%(err)s") % - {'cgsnapshot_id': cgsnapshot_id, - 'cg_name': cg_name, - 'err': six.text_type(e.msg)}) - - log_err_msg = ("Snapshot %(name)s for Consistency" - " Group: %(cg_name)s deletion failed" % - {'cg_name': cg_name, - 'name': cgsnapshot_name}) - self._raise_or_log_exception(e.err_code, coprhd_err_msg, - log_err_msg) - - @retry_wrapper - def set_volume_tags(self, vol, exempt_tags=None, truncate_name=False): - if exempt_tags is None: - exempt_tags = [] - - self.authenticate_user() - name = self._get_resource_name(vol, - MAX_DEFAULT_NAME_LENGTH, - truncate_name) - full_project_name = ("%s/%s" % ( - self.configuration.coprhd_tenant, - self.configuration.coprhd_project)) - - vol_uri = self.volume_obj.volume_query(full_project_name, - name) - - self.set_tags_for_resource( - coprhd_vol.Volume.URI_TAG_VOLUME, vol_uri, vol, exempt_tags) - - @retry_wrapper - def set_tags_for_resource(self, uri, resource_id, resource, - exempt_tags=None): - if exempt_tags is None: - exempt_tags = [] - - self.authenticate_user() - - # first, get the current tags that start with the OPENSTACK_TAG - # eyecatcher - formattedUri = uri.format(resource_id) - remove_tags = [] - currentTags = self.tag_obj.list_tags(formattedUri) - for cTag in currentTags: - if cTag.startswith(self.OPENSTACK_TAG): - remove_tags.append(cTag) - - try: - if remove_tags: - self.tag_obj.tag_resource(uri, - resource_id, - None, - remove_tags) - except coprhd_utils.CoprHdError as e: - if e.err_code == coprhd_utils.CoprHdError.SOS_FAILURE_ERR: - LOG.debug("CoprHdError adding the tag:\n %s", e.msg) - - # now add the tags for the resource - add_tags = [] - # put all the openstack resource properties into the CoprHD resource - - try: - for prop, value in vars(resource).items(): - try: - if prop in exempt_tags: - continue - - if prop.startswith("_"): - prop = prop.replace("_", '', 1) - - # don't put the status in, it's always the status before - # the current transaction - if ((not prop.startswith("status") and not - prop.startswith("obj_status") and - prop != "obj_volume") and value): - tag = ("%s:%s:%s" % - (self.OPENSTACK_TAG, prop, - six.text_type(value))) - - if len(tag) > 128: - tag = tag[0:128] - add_tags.append(tag) - except TypeError: - LOG.error( - "Error tagging the resource property %s", prop) - except TypeError: - LOG.error("Error tagging the resource properties") - - try: - self.tag_obj.tag_resource( - uri, - resource_id, - add_tags, - None) - except coprhd_utils.CoprHdError as e: - if e.err_code == coprhd_utils.CoprHdError.SOS_FAILURE_ERR: - LOG.debug( - "Adding the tag failed. CoprHdError: %s", e.msg) - - return self.tag_obj.list_tags(formattedUri) - - @retry_wrapper - def create_cloned_volume(self, vol, src_vref, truncate_name=False): - """Creates a clone of the specified volume.""" - self.authenticate_user() - name = self._get_resource_name(vol, - MAX_DEFAULT_NAME_LENGTH, - truncate_name) - srcname = self._get_coprhd_volume_name(src_vref) - - try: - if src_vref.group_id: - raise coprhd_utils.CoprHdError( - coprhd_utils.CoprHdError.SOS_FAILURE_ERR, - _("Clone can't be taken individually on a volume" - " that is part of a Consistency Group")) - except KeyError as e: - pass - except AttributeError: - pass - try: - (storageres_type, - storageres_typename) = self.volume_obj.get_storageAttributes( - srcname, None, None) - - resource_id = self.volume_obj.storage_resource_query( - storageres_type, - srcname, - None, - None, - self.configuration.coprhd_project, - self.configuration.coprhd_tenant) - - self.volume_obj.clone( - name, - resource_id, - sync=True) - - full_project_name = "%s/%s" % ( - self.configuration.coprhd_tenant, - self.configuration.coprhd_project) - - detachable = self.volume_obj.is_volume_detachable( - full_project_name, name) - LOG.debug("Is volume detachable : %s", detachable) - - # detach it from the source volume immediately after creation - if detachable: - self.volume_obj.volume_clone_detach( - "", full_project_name, name, True) - - except IndexError: - LOG.exception("Volume clone detach returned empty task list") - - except coprhd_utils.CoprHdError as e: - coprhd_err_msg = (_("Volume %(name)s: clone failed\n%(err)s") % - {'name': name, 'err': six.text_type(e.msg)}) - - log_err_msg = ("Volume : {%s} clone failed" % name) - self._raise_or_log_exception(e.err_code, coprhd_err_msg, - log_err_msg) - - src_vol_size = 0 - dest_vol_size = 0 - - try: - src_vol_size = src_vref.size - except AttributeError: - src_vol_size = src_vref.volume_size - - try: - dest_vol_size = vol.size - except AttributeError: - dest_vol_size = vol.volume_size - - if dest_vol_size > src_vol_size: - size_in_bytes = coprhd_utils.to_bytes("%sG" % dest_vol_size) - try: - self.volume_obj.expand( - ("%s/%s" % (self.configuration.coprhd_tenant, - self.configuration.coprhd_project)), name, - size_in_bytes, - True) - except coprhd_utils.CoprHdError as e: - coprhd_err_msg = (_("Volume %(volume_name)s: expand failed" - "\n%(err)s") % - {'volume_name': name, - 'err': six.text_type(e.msg)}) - - log_err_msg = ("Volume : %s expand failed" % name) - self._raise_or_log_exception(e.err_code, coprhd_err_msg, - log_err_msg) - - @retry_wrapper - def expand_volume(self, vol, new_size): - """expands the volume to new_size specified.""" - self.authenticate_user() - volume_name = self._get_coprhd_volume_name(vol) - size_in_bytes = coprhd_utils.to_bytes("%sG" % new_size) - - try: - self.volume_obj.expand( - ("%s/%s" % (self.configuration.coprhd_tenant, - self.configuration.coprhd_project)), volume_name, - size_in_bytes, - True) - except coprhd_utils.CoprHdError as e: - coprhd_err_msg = (_("Volume %(volume_name)s:" - " expand failed\n%(err)s") % - {'volume_name': volume_name, - 'err': six.text_type(e.msg)}) - - log_err_msg = ("Volume : %s expand failed" % - volume_name) - self._raise_or_log_exception(e.err_code, coprhd_err_msg, - log_err_msg) - - @retry_wrapper - def create_volume_from_snapshot(self, snapshot, volume, - truncate_name=False): - """Creates volume from given snapshot ( snapshot clone to volume ).""" - self.authenticate_user() - - if self.configuration.coprhd_emulate_snapshot: - self.create_cloned_volume(volume, snapshot, truncate_name) - return - - try: - if snapshot.group_snapshot_id: - raise coprhd_utils.CoprHdError( - coprhd_utils.CoprHdError.SOS_FAILURE_ERR, - _("Volume cannot be created individually from a snapshot " - "that is part of a Consistency Group")) - except AttributeError: - pass - - src_snapshot_name = None - src_vol_ref = snapshot.volume - new_volume_name = self._get_resource_name(volume, - MAX_DEFAULT_NAME_LENGTH, - truncate_name) - - try: - coprhd_vol_info = self._get_coprhd_volume_name( - src_vol_ref, True) - src_snapshot_name = self._get_coprhd_snapshot_name( - snapshot, coprhd_vol_info['volume_uri']) - - (storageres_type, - storageres_typename) = self.volume_obj.get_storageAttributes( - coprhd_vol_info['volume_name'], None, src_snapshot_name) - - resource_id = self.volume_obj.storage_resource_query( - storageres_type, - coprhd_vol_info['volume_name'], - None, - src_snapshot_name, - self.configuration.coprhd_project, - self.configuration.coprhd_tenant) - - self.volume_obj.clone( - new_volume_name, - resource_id, - sync=True) - - except coprhd_utils.CoprHdError as e: - coprhd_err_msg = (_("Snapshot %(src_snapshot_name)s:" - " clone failed\n%(err)s") % - {'src_snapshot_name': src_snapshot_name, - 'err': six.text_type(e.msg)}) - - log_err_msg = ("Snapshot : %s clone failed" % - src_snapshot_name) - self._raise_or_log_exception(e.err_code, coprhd_err_msg, - log_err_msg) - - if volume.size > snapshot.volume_size: - size_in_bytes = coprhd_utils.to_bytes("%sG" % volume.size) - - try: - self.volume_obj.expand( - ("%s/%s" % (self.configuration.coprhd_tenant, - self.configuration.coprhd_project)), - new_volume_name, size_in_bytes, True) - - except coprhd_utils.CoprHdError as e: - coprhd_err_msg = (_("Volume %(volume_name)s: expand failed" - "\n%(err)s") % - {'volume_name': new_volume_name, - 'err': six.text_type(e.msg)}) - - log_err_msg = ("Volume : %s expand failed" % - new_volume_name) - self._raise_or_log_exception(e.err_code, coprhd_err_msg, - log_err_msg) - - @retry_wrapper - def delete_volume(self, vol): - self.authenticate_user() - name = self._get_coprhd_volume_name(vol) - try: - full_project_name = ("%s/%s" % ( - self.configuration.coprhd_tenant, - self.configuration.coprhd_project)) - self.volume_obj.delete(full_project_name, name, sync=True) - except coprhd_utils.CoprHdError as e: - if e.err_code == coprhd_utils.CoprHdError.NOT_FOUND_ERR: - LOG.info( - "Volume %s" - " no longer exists; volume deletion is" - " considered successful.", name) - else: - coprhd_err_msg = (_("Volume %(name)s: delete failed" - "\n%(err)s") % - {'name': name, 'err': six.text_type(e.msg)}) - - log_err_msg = ("Volume : %s delete failed" % name) - self._raise_or_log_exception(e.err_code, coprhd_err_msg, - log_err_msg) - - @retry_wrapper - def create_snapshot(self, snapshot, truncate_name=False): - self.authenticate_user() - - volume = snapshot.volume - - try: - if volume.group_id: - raise coprhd_utils.CoprHdError( - coprhd_utils.CoprHdError.SOS_FAILURE_ERR, - _("Snapshot can't be taken individually on a volume" - " that is part of a Consistency Group")) - except KeyError: - LOG.info("No Consistency Group associated with the volume") - - if self.configuration.coprhd_emulate_snapshot: - self.create_cloned_volume(snapshot, volume, truncate_name) - self.set_volume_tags( - snapshot, ['_volume', '_obj_volume_type'], truncate_name) - return - - try: - snapshotname = self._get_resource_name(snapshot, - MAX_SNAPSHOT_NAME_LENGTH, - truncate_name) - vol = snapshot.volume - - volumename = self._get_coprhd_volume_name(vol) - projectname = self.configuration.coprhd_project - tenantname = self.configuration.coprhd_tenant - storageres_type = 'block' - storageres_typename = 'volumes' - resource_uri = self.snapshot_obj.storage_resource_query( - storageres_type, - volume_name=volumename, - cg_name=None, - project=projectname, - tenant=tenantname) - inactive = False - sync = True - self.snapshot_obj.snapshot_create( - storageres_type, - storageres_typename, - resource_uri, - snapshotname, - inactive, - sync) - - snapshot_uri = self.snapshot_obj.snapshot_query( - storageres_type, - storageres_typename, - resource_uri, - snapshotname) - - self.set_tags_for_resource( - coprhd_snap.Snapshot.URI_BLOCK_SNAPSHOTS_TAG, - snapshot_uri, snapshot, ['_volume']) - - except coprhd_utils.CoprHdError as e: - coprhd_err_msg = (_("Snapshot: %(snapshotname)s, create failed" - "\n%(err)s") % {'snapshotname': snapshotname, - 'err': six.text_type(e.msg)}) - - log_err_msg = ("Snapshot : %s create failed" % snapshotname) - self._raise_or_log_exception(e.err_code, coprhd_err_msg, - log_err_msg) - - @retry_wrapper - def delete_snapshot(self, snapshot): - self.authenticate_user() - - vol = snapshot.volume - - try: - if vol.group_id: - raise coprhd_utils.CoprHdError( - coprhd_utils.CoprHdError.SOS_FAILURE_ERR, - _("Snapshot delete can't be done individually on a volume" - " that is part of a Consistency Group")) - except KeyError: - LOG.info("No Consistency Group associated with the volume") - - if self.configuration.coprhd_emulate_snapshot: - self.delete_volume(snapshot) - return - - snapshotname = None - try: - volumename = self._get_coprhd_volume_name(vol) - projectname = self.configuration.coprhd_project - tenantname = self.configuration.coprhd_tenant - storageres_type = 'block' - storageres_typename = 'volumes' - resource_uri = self.snapshot_obj.storage_resource_query( - storageres_type, - volume_name=volumename, - cg_name=None, - project=projectname, - tenant=tenantname) - if resource_uri is None: - LOG.info( - "Snapshot %s" - " is not found; snapshot deletion" - " is considered successful.", snapshotname) - else: - snapshotname = self._get_coprhd_snapshot_name( - snapshot, resource_uri) - - self.snapshot_obj.snapshot_delete( - storageres_type, - storageres_typename, - resource_uri, - snapshotname, - sync=True) - except coprhd_utils.CoprHdError as e: - coprhd_err_msg = (_("Snapshot %s : Delete Failed\n") % - snapshotname) - - log_err_msg = ("Snapshot : %s delete failed" % snapshotname) - self._raise_or_log_exception(e.err_code, coprhd_err_msg, - log_err_msg) - - @retry_wrapper - def initialize_connection(self, volume, protocol, initiator_ports, - hostname): - - try: - self.authenticate_user() - volumename = self._get_coprhd_volume_name(volume) - foundgroupname = self._find_exportgroup(initiator_ports) - foundhostname = None - if foundgroupname is None: - for i in range(len(initiator_ports)): - # check if this initiator is contained in any CoprHD Host - # object - LOG.debug( - "checking for initiator port: %s", initiator_ports[i]) - foundhostname = self._find_host(initiator_ports[i]) - - if foundhostname: - LOG.info("Found host %s", foundhostname) - break - - if not foundhostname: - LOG.error("Auto host creation not supported") - # create an export group for this host - foundgroupname = foundhostname + 'SG' - # create a unique name - foundgroupname = foundgroupname + '-' + ''.join( - random.choice(string.ascii_uppercase + - string.digits) - for x in range(6)) - self.exportgroup_obj.exportgroup_create( - foundgroupname, - self.configuration.coprhd_project, - self.configuration.coprhd_tenant, - self.configuration.coprhd_varray, - 'Host', - foundhostname) - - LOG.debug( - "adding the volume to the exportgroup : %s", volumename) - - self.exportgroup_obj.exportgroup_add_volumes( - True, - foundgroupname, - self.configuration.coprhd_tenant, - None, - None, - None, - self.configuration.coprhd_project, - [volumename], - None, - None) - - return self._find_device_info(volume, initiator_ports) - - except coprhd_utils.CoprHdError as e: - raise coprhd_utils.CoprHdError( - coprhd_utils.CoprHdError.SOS_FAILURE_ERR, - (_("Attach volume (%(name)s) to host" - " (%(hostname)s) initiator (%(initiatorport)s)" - " failed:\n%(err)s") % - {'name': self._get_coprhd_volume_name( - volume), - 'hostname': hostname, - 'initiatorport': initiator_ports[0], - 'err': six.text_type(e.msg)}) - ) - - @retry_wrapper - def terminate_connection(self, volume, protocol, initiator_ports, - hostname): - try: - self.authenticate_user() - volumename = self._get_coprhd_volume_name(volume) - full_project_name = ("%s/%s" % (self.configuration.coprhd_tenant, - self.configuration.coprhd_project)) - voldetails = self.volume_obj.show(full_project_name, volumename) - volid = voldetails['id'] - - # find the exportgroups - exports = self.volume_obj.get_exports_by_uri(volid) - exportgroups = set() - itls = exports['itl'] - for itl in itls: - itl_port = itl['initiator']['port'] - if itl_port in initiator_ports: - exportgroups.add(itl['export']['id']) - - for exportgroup in exportgroups: - self.exportgroup_obj.exportgroup_remove_volumes_by_uri( - exportgroup, - volid, - True, - None, - None, - None, - None) - else: - LOG.info( - "No export group found for the host: %s" - "; this is considered already detached.", hostname) - - return itls - - except coprhd_utils.CoprHdError as e: - raise coprhd_utils.CoprHdError( - coprhd_utils.CoprHdError.SOS_FAILURE_ERR, - (_("Detaching volume %(volumename)s from host" - " %(hostname)s failed: %(err)s") % - {'volumename': volumename, - 'hostname': hostname, - 'err': six.text_type(e.msg)}) - ) - - @retry_wrapper - def _find_device_info(self, volume, initiator_ports): - """Returns device_info in list of itls having the matched initiator. - - (there could be multiple targets, hence a list): - [ - { - "hlu":9, - "initiator":{...,"port":"20:00:00:25:B5:49:00:22"}, - "export":{...}, - "device":{...,"wwn":"600601602B802D00B62236585D0BE311"}, - "target":{...,"port":"50:06:01:6A:46:E0:72:EF"}, - "san_zone_name":"..." - }, - { - "hlu":9, - "initiator":{...,"port":"20:00:00:25:B5:49:00:22"}, - "export":{...}, - "device":{...,"wwn":"600601602B802D00B62236585D0BE311"}, - "target":{...,"port":"50:06:01:62:46:E0:72:EF"}, - "san_zone_name":"..." - } - ] - """ - volumename = self._get_coprhd_volume_name(volume) - full_project_name = ("%s/%s" % (self.configuration.coprhd_tenant, - self.configuration.coprhd_project)) - vol_uri = self.volume_obj.volume_query(full_project_name, volumename) - - # The itl info shall be available at the first try since now export is - # a synchronous call. We are trying a few more times to accommodate - # any delay on filling in the itl info after the export task is - # completed. - - itls = [] - for x in range(MAX_RETRIES): - exports = self.volume_obj.get_exports_by_uri(vol_uri) - LOG.debug("Volume exports: ") - LOG.info(vol_uri) - LOG.debug(exports) - for itl in exports['itl']: - itl_port = itl['initiator']['port'] - if itl_port in initiator_ports: - found_device_number = itl['hlu'] - if (found_device_number is not None and - found_device_number != '-1'): - # 0 is a valid number for found_device_number. - # Only loop if it is None or -1 - LOG.debug("Found Device Number: %s", - found_device_number) - itls.append(itl) - - if itls: - break - else: - LOG.debug("Device Number not found yet." - " Retrying after 10 seconds...") - eventlet.sleep(INTERVAL_10_SEC) - - if itls is None: - # No device number found after 10 tries; return an empty itl - LOG.info( - "No device number has been found after 10 tries; " - "this likely indicates an unsuccessful attach of " - "volume volumename=%(volumename)s to" - " initiator initiator_ports=%(initiator_ports)s", - {'volumename': volumename, - 'initiator_ports': initiator_ports}) - - return itls - - def _get_coprhd_cgid(self, cgid): - tagname = self.OPENSTACK_TAG + ":id:" + cgid - rslt = coprhd_utils.search_by_tag( - coprhd_cg.ConsistencyGroup.URI_SEARCH_CONSISTENCY_GROUPS_BY_TAG. - format(tagname), - self.configuration.coprhd_hostname, - self.configuration.coprhd_port) - - # if the result is empty, then search with the tagname as - # "OpenStack:obj_id" the openstack attribute for id can be obj_id - # instead of id. this depends on the version - if rslt is None or len(rslt) == 0: - tagname = self.OPENSTACK_TAG + ":obj_id:" + cgid - rslt = coprhd_utils.search_by_tag( - coprhd_cg.ConsistencyGroup - .URI_SEARCH_CONSISTENCY_GROUPS_BY_TAG. - format(tagname), - self.configuration.coprhd_hostname, - self.configuration.coprhd_port) - - if len(rslt) > 0: - rslt_cg = self.consistencygroup_obj.show( - rslt[0], - self.configuration.coprhd_project, - self.configuration.coprhd_tenant) - return rslt_cg['id'] - else: - raise coprhd_utils.CoprHdError( - coprhd_utils.CoprHdError.NOT_FOUND_ERR, - (_("Consistency Group %s not found") % cgid)) - - def _get_consistencygroup_name(self, consisgrp): - return consisgrp.name - - def _get_coprhd_snapshot_name(self, snapshot, resUri): - tagname = self.OPENSTACK_TAG + ":id:" + snapshot['id'] - rslt = coprhd_utils.search_by_tag( - coprhd_snap.Snapshot.URI_SEARCH_SNAPSHOT_BY_TAG.format(tagname), - self.configuration.coprhd_hostname, - self.configuration.coprhd_port) - - # if the result is empty, then search with the tagname - # as "OpenStack:obj_id" - # as snapshots will be having the obj_id instead of just id. - if not rslt: - tagname = self.OPENSTACK_TAG + ":obj_id:" + snapshot['id'] - rslt = coprhd_utils.search_by_tag( - coprhd_snap.Snapshot.URI_SEARCH_SNAPSHOT_BY_TAG.format( - tagname), - self.configuration.coprhd_hostname, - self.configuration.coprhd_port) - - if rslt is None or len(rslt) == 0: - return snapshot['name'] - else: - rslt_snap = self.snapshot_obj.snapshot_show_uri( - 'block', - resUri, - rslt[0]) - return rslt_snap['name'] - - def _get_coprhd_volume_name(self, vol, verbose=False): - tagname = self.OPENSTACK_TAG + ":id:" + vol.id - rslt = coprhd_utils.search_by_tag( - coprhd_vol.Volume.URI_SEARCH_VOLUMES_BY_TAG.format(tagname), - self.configuration.coprhd_hostname, - self.configuration.coprhd_port) - - # if the result is empty, then search with the tagname - # as "OpenStack:obj_id" - # as snapshots will be having the obj_id instead of just id. - if len(rslt) == 0: - tagname = self.OPENSTACK_TAG + ":obj_id:" + vol.id - rslt = coprhd_utils.search_by_tag( - coprhd_vol.Volume.URI_SEARCH_VOLUMES_BY_TAG.format(tagname), - self.configuration.coprhd_hostname, - self.configuration.coprhd_port) - - if len(rslt) > 0: - rslt_vol = self.volume_obj.show_by_uri(rslt[0]) - - if verbose is True: - return {'volume_name': rslt_vol['name'], 'volume_uri': rslt[0]} - else: - return rslt_vol['name'] - else: - raise coprhd_utils.CoprHdError( - coprhd_utils.CoprHdError.NOT_FOUND_ERR, - (_("Volume %s not found") % vol['display_name'])) - - def _get_resource_name(self, resource, - max_name_cap=MAX_DEFAULT_NAME_LENGTH, - truncate_name=False): - # 36 refers to the length of UUID and +1 for '-' - permitted_name_length = max_name_cap - (36 + 1) - name = resource.display_name - if not name: - name = resource.name - - ''' - for scaleio, truncate_name will be true. We make sure the - total name is less than or equal to 31 characters. - _id_to_base64 will return a 24 character name''' - if truncate_name: - name = self._id_to_base64(resource.id) - return name - - elif len(name) > permitted_name_length: - ''' - The maximum length of resource name in CoprHD is 128. Hence we use - only first 91 characters of the resource name''' - return name[0:permitted_name_length] + "-" + resource.id - - else: - return name + "-" + resource.id - - def _get_vpool(self, volume): - vpool = {} - ctxt = context.get_admin_context() - type_id = volume.volume_type_id - if type_id is not None: - volume_type = volume_types.get_volume_type(ctxt, type_id) - specs = volume_type.get('extra_specs') - for key, value in specs.items(): - vpool[key] = value - - return vpool - - def _id_to_base64(self, id): - # Base64 encode the id to get a volume name less than 32 characters due - # to ScaleIO limitation. - name = six.text_type(id).replace("-", "") - try: - name = base64.b16decode(name.upper()) - except (TypeError, binascii.Error): - pass - encoded_name = name - if isinstance(encoded_name, six.text_type): - encoded_name = encoded_name.encode('utf-8') - encoded_name = base64.b64encode(encoded_name) - if six.PY3: - encoded_name = encoded_name.decode('ascii') - LOG.debug("Converted id %(id)s to scaleio name %(name)s.", - {'id': id, 'name': encoded_name}) - return encoded_name - - def _raise_or_log_exception(self, err_code, coprhd_err_msg, log_err_msg): - - if err_code == coprhd_utils.CoprHdError.SOS_FAILURE_ERR: - raise coprhd_utils.CoprHdError( - coprhd_utils.CoprHdError.SOS_FAILURE_ERR, - coprhd_err_msg) - else: - with excutils.save_and_reraise_exception(): - LOG.exception(log_err_msg) - - @retry_wrapper - def _find_exportgroup(self, initiator_ports): - """Find export group with initiator ports same as given initiators.""" - foundgroupname = None - grouplist = self.exportgroup_obj.exportgroup_list( - self.configuration.coprhd_project, - self.configuration.coprhd_tenant) - for groupid in grouplist: - groupdetails = self.exportgroup_obj.exportgroup_show( - groupid, - self.configuration.coprhd_project, - self.configuration.coprhd_tenant) - if groupdetails is not None: - if groupdetails['inactive']: - continue - initiators = groupdetails['initiators'] - if initiators is not None: - inits_eg = set() - for initiator in initiators: - inits_eg.add(initiator['initiator_port']) - - if inits_eg <= set(initiator_ports): - foundgroupname = groupdetails['name'] - if foundgroupname is not None: - # Check the associated varray - if groupdetails['varray']: - varray_uri = groupdetails['varray']['id'] - varray_details = self.varray_obj.varray_show( - varray_uri) - if varray_details['name'] == ( - self.configuration.coprhd_varray): - LOG.debug( - "Found exportgroup %s", - foundgroupname) - break - - # Not the right varray - foundgroupname = None - - return foundgroupname - - @retry_wrapper - def _find_host(self, initiator_port): - """Find the host, if exists, to which the given initiator belong.""" - foundhostname = None - hosts = self.host_obj.list_all(self.configuration.coprhd_tenant) - for host in hosts: - initiators = self.host_obj.list_initiators(host['id']) - for initiator in initiators: - if initiator_port == initiator['name']: - foundhostname = host['name'] - break - - if foundhostname is not None: - break - - return foundhostname - - @retry_wrapper - def get_exports_count_by_initiators(self, initiator_ports): - """Fetches ITL map for a given list of initiator ports.""" - comma_delimited_initiator_list = ",".join(initiator_ports) - (s, h) = coprhd_utils.service_json_request( - self.configuration.coprhd_hostname, - self.configuration.coprhd_port, "GET", - URI_BLOCK_EXPORTS_FOR_INITIATORS.format( - comma_delimited_initiator_list), - None) - - export_itl_maps = coprhd_utils.json_decode(s) - - if export_itl_maps is None: - return 0 - - itls = export_itl_maps['itl'] - return itls.__len__() - - @retry_wrapper - def update_volume_stats(self): - """Retrieve stats info.""" - LOG.debug("Updating volume stats") - self.authenticate_user() - - try: - self.stats['consistencygroup_support'] = True - self.stats['consistent_group_snapshot_enabled'] = True - vols = self.volume_obj.list_volumes( - self.configuration.coprhd_tenant + - "/" + - self.configuration.coprhd_project) - - vpairs = set() - if len(vols) > 0: - for vol in vols: - if vol: - vpair = (vol["vpool"]["id"], vol["varray"]["id"]) - if vpair not in vpairs: - vpairs.add(vpair) - - if len(vpairs) > 0: - free_gb = 0.0 - used_gb = 0.0 - for vpair in vpairs: - if vpair: - (s, h) = coprhd_utils.service_json_request( - self.configuration.coprhd_hostname, - self.configuration.coprhd_port, - "GET", - URI_VPOOL_VARRAY_CAPACITY.format(vpair[0], - vpair[1]), - body=None) - capacity = coprhd_utils.json_decode(s) - - free_gb += float(capacity["free_gb"]) - used_gb += float(capacity["used_gb"]) - - self.stats['free_capacity_gb'] = free_gb - self.stats['total_capacity_gb'] = free_gb + used_gb - self.stats['reserved_percentage'] = ( - self.configuration.reserved_percentage) - - return self.stats - - except coprhd_utils.CoprHdError: - with excutils.save_and_reraise_exception(): - LOG.exception("Update volume stats failed") - - @retry_wrapper - def retype(self, ctxt, volume, new_type, diff, host): - """changes the vpool type.""" - self.authenticate_user() - volume_name = self._get_coprhd_volume_name(volume) - vpool_name = new_type['extra_specs']['CoprHD:VPOOL'] - - try: - full_project_name = "%s/%s" % ( - self.configuration.coprhd_tenant, - self.configuration.coprhd_project) - - task = self.volume_obj.update( - full_project_name, - volume_name, - vpool_name) - - self.volume_obj.check_for_sync(task['task'][0], True) - return True - except coprhd_utils.CoprHdError as e: - coprhd_err_msg = (_("Volume %(volume_name)s: update failed" - "\n%(err)s") % {'volume_name': volume_name, - 'err': six.text_type(e.msg)}) - - log_err_msg = ("Volume : %s type update failed" % - volume_name) - self._raise_or_log_exception(e.err_code, coprhd_err_msg, - log_err_msg) diff --git a/cinder/volume/drivers/coprhd/fc.py b/cinder/volume/drivers/coprhd/fc.py deleted file mode 100644 index 3347a96525d..00000000000 --- a/cinder/volume/drivers/coprhd/fc.py +++ /dev/null @@ -1,272 +0,0 @@ -# Copyright (c) 2016 EMC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -"""Driver for EMC CoprHD FC volumes.""" - -import re - -from oslo_log import log as logging - -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder.volume import driver -from cinder.volume.drivers.coprhd import common as coprhd_common -from cinder.volume import utils as volume_utils - -from cinder.zonemanager import utils as fczm_utils - -LOG = logging.getLogger(__name__) - - -@interface.volumedriver -class EMCCoprHDFCDriver(driver.FibreChannelDriver): - """CoprHD FC Driver.""" - VERSION = "3.0.0.0" - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "EMC_CoprHD_CI" - - # TODO(jsbryant) Remove driver in Stein if CI is not fixed - SUPPORTED = False - - def __init__(self, *args, **kwargs): - super(EMCCoprHDFCDriver, self).__init__(*args, **kwargs) - self.common = self._get_common_driver() - - def _get_common_driver(self): - return coprhd_common.EMCCoprHDDriverCommon( - protocol='FC', - default_backend_name=self.__class__.__name__, - configuration=self.configuration) - - def check_for_setup_error(self): - self.common.check_for_setup_error() - - def create_volume(self, volume): - """Creates a Volume.""" - self.common.create_volume(volume, self) - self.common.set_volume_tags(volume, ['_obj_volume_type']) - - def create_cloned_volume(self, volume, src_vref): - """Creates a cloned Volume.""" - self.common.create_cloned_volume(volume, src_vref) - self.common.set_volume_tags(volume, ['_obj_volume_type']) - - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from a snapshot.""" - self.common.create_volume_from_snapshot(snapshot, volume) - self.common.set_volume_tags(volume, ['_obj_volume_type']) - - def extend_volume(self, volume, new_size): - """expands the size of the volume.""" - self.common.expand_volume(volume, new_size) - - def delete_volume(self, volume): - """Deletes a volume.""" - self.common.delete_volume(volume) - - def create_snapshot(self, snapshot): - """Creates a snapshot.""" - self.common.create_snapshot(snapshot) - - def delete_snapshot(self, snapshot): - """Deletes a snapshot.""" - self.common.delete_snapshot(snapshot) - - def ensure_export(self, context, volume): - """Driver entry point to get the export info for an existing volume.""" - pass - - def create_export(self, context, volume, connector=None): - """Driver entry point to get the export info for a new volume.""" - pass - - def remove_export(self, context, volume): - """Driver entry point to remove an export for a volume.""" - pass - - def create_group(self, context, group): - """Creates a group.""" - if volume_utils.is_group_a_cg_snapshot_type(group): - return self.common.create_consistencygroup(context, group) - - # If the group is not consistency group snapshot enabled, then - # we shall rely on generic volume group implementation - raise NotImplementedError() - - def update_group(self, context, group, add_volumes=None, - remove_volumes=None): - """Updates volumes in group.""" - if volume_utils.is_group_a_cg_snapshot_type(group): - return self.common.update_consistencygroup(group, add_volumes, - remove_volumes) - - # If the group is not consistency group snapshot enabled, then - # we shall rely on generic volume group implementation - raise NotImplementedError() - - def create_group_from_src(self, ctxt, group, volumes, - group_snapshot=None, snapshots=None, - source_group=None, source_vols=None): - """Creates a group from source.""" - if volume_utils.is_group_a_cg_snapshot_type(group): - message = _("create group from source is not supported " - "for CoprHD if the group type supports " - "consistent group snapshot.") - raise exception.VolumeBackendAPIException(data=message) - else: - raise NotImplementedError() - - def delete_group(self, context, group, volumes): - """Deletes a group.""" - if volume_utils.is_group_a_cg_snapshot_type(group): - return self.common.delete_consistencygroup(context, group, volumes) - - # If the group is not consistency group snapshot enabled, then - # we shall rely on generic volume group implementation - raise NotImplementedError() - - def create_group_snapshot(self, context, group_snapshot, snapshots): - """Creates a group snapshot.""" - if volume_utils.is_group_a_cg_snapshot_type(group_snapshot): - return self.common.create_cgsnapshot(group_snapshot, snapshots) - - # If the group is not consistency group snapshot enabled, then - # we shall rely on generic volume group implementation - raise NotImplementedError() - - def delete_group_snapshot(self, context, group_snapshot, snapshots): - """Deletes a group snapshot.""" - if volume_utils.is_group_a_cg_snapshot_type(group_snapshot): - return self.common.delete_cgsnapshot(group_snapshot, snapshots) - - # If the group is not consistency group snapshot enabled, then - # we shall rely on generic volume group implementation - raise NotImplementedError() - - def check_for_export(self, context, volume_id): - """Make sure volume is exported.""" - pass - - def initialize_connection(self, volume, connector): - """Initializes the connection and returns connection info.""" - - properties = {} - properties['volume_id'] = volume.id - properties['target_discovered'] = False - properties['target_wwn'] = [] - - init_ports = self._build_initport_list(connector) - itls = self.common.initialize_connection(volume, 'FC', init_ports, - connector['host']) - - target_wwns = None - initiator_target_map = None - - if itls: - properties['target_lun'] = itls[0]['hlu'] - target_wwns, initiator_target_map = ( - self._build_initiator_target_map(itls, connector)) - - properties['target_wwn'] = target_wwns - properties['initiator_target_map'] = initiator_target_map - - auth = None - try: - auth = volume.provider_auth - except AttributeError: - pass - - if auth: - (auth_method, auth_username, auth_secret) = auth.split() - properties['auth_method'] = auth_method - properties['auth_username'] = auth_username - properties['auth_password'] = auth_secret - - LOG.debug('FC properties: %s', properties) - conn_info = { - 'driver_volume_type': 'fibre_channel', - 'data': properties, - } - fczm_utils.add_fc_zone(conn_info) - return conn_info - - def terminate_connection(self, volume, connector, **kwargs): - """Driver entry point to detach a volume from an instance.""" - - init_ports = self._build_initport_list(connector) - itls = self.common.terminate_connection(volume, 'FC', init_ports, - connector['host']) - - volumes_count = self.common.get_exports_count_by_initiators(init_ports) - if volumes_count > 0: - # return empty data - data = {'driver_volume_type': 'fibre_channel', 'data': {}} - else: - target_wwns, initiator_target_map = ( - self._build_initiator_target_map(itls, connector)) - data = { - 'driver_volume_type': 'fibre_channel', - 'data': { - 'target_wwn': target_wwns, - 'initiator_target_map': initiator_target_map}} - fczm_utils.remove_fc_zone(data) - - LOG.debug('Return FC data: %s', data) - return data - - def _build_initiator_target_map(self, itls, connector): - - target_wwns = [] - for itl in itls: - target_wwns.append(itl['target']['port'].replace(':', '').lower()) - - initiator_wwns = connector['wwpns'] - initiator_target_map = {} - for initiator in initiator_wwns: - initiator_target_map[initiator] = target_wwns - - return target_wwns, initiator_target_map - - def _build_initport_list(self, connector): - init_ports = [] - for i in range(len(connector['wwpns'])): - initiator_port = ':'.join(re.findall( - '..', - connector['wwpns'][i])).upper() # Add ":" every two digits - init_ports.append(initiator_port) - - return init_ports - - def get_volume_stats(self, refresh=False): - """Get volume status. - - If 'refresh' is True, run update the stats first. - """ - if refresh: - self.update_volume_stats() - - return self._stats - - def update_volume_stats(self): - """Retrieve stats info from virtual pool/virtual array.""" - LOG.debug("Updating volume stats") - self._stats = self.common.update_volume_stats() - - def retype(self, ctxt, volume, new_type, diff, host): - """Change the volume type.""" - return self.common.retype(ctxt, volume, new_type, diff, host) diff --git a/cinder/volume/drivers/coprhd/helpers/__init__.py b/cinder/volume/drivers/coprhd/helpers/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/cinder/volume/drivers/coprhd/helpers/authentication.py b/cinder/volume/drivers/coprhd/helpers/authentication.py deleted file mode 100644 index c0d9f7c1b6f..00000000000 --- a/cinder/volume/drivers/coprhd/helpers/authentication.py +++ /dev/null @@ -1,220 +0,0 @@ -# Copyright (c) 2016 EMC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -try: - import cookielib as cookie_lib -except ImportError: - import http.cookiejar as cookie_lib -import socket - -import requests -from requests import exceptions -import six -from six.moves import http_client - -from cinder.i18n import _ -from cinder.volume.drivers.coprhd.helpers import commoncoprhdapi as common - - -class Authentication(common.CoprHDResource): - - # Commonly used URIs for the 'Authentication' module - URI_SERVICES_BASE = '' - URI_AUTHENTICATION = '/login' - - HEADERS = {'Content-Type': 'application/json', - 'ACCEPT': 'application/json', 'X-EMC-REST-CLIENT': 'TRUE'} - - def authenticate_user(self, username, password): - """Makes REST API call to generate the authentication token. - - Authentication token is generated for the specified user after - validation - - :param username: Name of the user - :param password: Password for the user - :returns: The authtoken - """ - - SEC_REDIRECT = 302 - SEC_AUTHTOKEN_HEADER = 'X-SDS-AUTH-TOKEN' - LB_API_PORT = 4443 - # Port on which load-balancer/reverse-proxy listens to all incoming - # requests for CoprHD REST APIs - APISVC_PORT = 8443 # Port on which apisvc listens to incoming requests - - cookiejar = cookie_lib.LWPCookieJar() - - url = ('https://%(ip)s:%(port)d%(uri)s' % - {'ip': self.ipaddr, 'port': self.port, - 'uri': self.URI_AUTHENTICATION}) - - try: - if self.port == APISVC_PORT: - login_response = requests.get( - url, headers=self.HEADERS, verify=False, - auth=(username, password), cookies=cookiejar, - allow_redirects=False, timeout=common.TIMEOUT_SEC) - if login_response.status_code == SEC_REDIRECT: - location = login_response.headers['Location'] - if not location: - raise common.CoprHdError( - common.CoprHdError.HTTP_ERR, (_("The redirect" - " location of the" - " authentication" - " service is not" - " provided"))) - # Make the second request - login_response = requests.get( - location, headers=self.HEADERS, verify=False, - cookies=cookiejar, allow_redirects=False, - timeout=common.TIMEOUT_SEC) - if (login_response.status_code != - http_client.UNAUTHORIZED): - raise common.CoprHdError( - common.CoprHdError.HTTP_ERR, (_("The" - " authentication" - " service failed" - " to reply with" - " 401"))) - - # Now provide the credentials - login_response = requests.get( - location, headers=self.HEADERS, - auth=(username, password), verify=False, - cookies=cookiejar, allow_redirects=False, - timeout=common.TIMEOUT_SEC) - if login_response.status_code != SEC_REDIRECT: - raise common.CoprHdError( - common.CoprHdError.HTTP_ERR, - (_("Access forbidden: Authentication required"))) - location = login_response.headers['Location'] - if not location: - raise common.CoprHdError( - common.CoprHdError.HTTP_ERR, - (_("The" - " authentication service failed to provide the" - " location of the service URI when redirecting" - " back"))) - authtoken = login_response.headers[SEC_AUTHTOKEN_HEADER] - if not authtoken: - details_str = self.extract_error_detail(login_response) - raise common.CoprHdError(common.CoprHdError.HTTP_ERR, - (_("The token is not" - " generated by" - " authentication service." - "%s") % - details_str)) - # Make the final call to get the page with the token - new_headers = self.HEADERS - new_headers[SEC_AUTHTOKEN_HEADER] = authtoken - login_response = requests.get( - location, headers=new_headers, verify=False, - cookies=cookiejar, allow_redirects=False, - timeout=common.TIMEOUT_SEC) - if login_response.status_code != http_client.OK: - raise common.CoprHdError( - common.CoprHdError.HTTP_ERR, (_( - "Login failure code: " - "%(statuscode)s Error: %(responsetext)s") % - {'statuscode': six.text_type( - login_response.status_code), - 'responsetext': login_response.text})) - elif self.port == LB_API_PORT: - login_response = requests.get( - url, headers=self.HEADERS, verify=False, - cookies=cookiejar, allow_redirects=False) - - if(login_response.status_code == - http_client.UNAUTHORIZED): - # Now provide the credentials - login_response = requests.get( - url, headers=self.HEADERS, auth=(username, password), - verify=False, cookies=cookiejar, allow_redirects=False) - authtoken = None - if SEC_AUTHTOKEN_HEADER in login_response.headers: - authtoken = login_response.headers[SEC_AUTHTOKEN_HEADER] - else: - raise common.CoprHdError( - common.CoprHdError.HTTP_ERR, - (_("Incorrect port number. Load balanced port is: " - "%(lb_api_port)s, api service port is: " - "%(apisvc_port)s") % - {'lb_api_port': LB_API_PORT, - 'apisvc_port': APISVC_PORT})) - - if not authtoken: - details_str = self.extract_error_detail(login_response) - raise common.CoprHdError( - common.CoprHdError.HTTP_ERR, - (_("The token is not generated by authentication service." - " %s") % details_str)) - - if login_response.status_code != http_client.OK: - error_msg = None - if login_response.status_code == http_client.UNAUTHORIZED: - error_msg = _("Access forbidden: Authentication required") - elif login_response.status_code == http_client.FORBIDDEN: - error_msg = _("Access forbidden: You don't have" - " sufficient privileges to perform" - " this operation") - elif (login_response.status_code == - http_client.INTERNAL_SERVER_ERROR): - error_msg = _("Bourne internal server error") - elif login_response.status_code == http_client.NOT_FOUND: - error_msg = _( - "Requested resource is currently unavailable") - elif (login_response.status_code == - http_client.METHOD_NOT_ALLOWED): - error_msg = (_("GET method is not supported by resource:" - " %s"), - url) - elif (login_response.status_code == - http_client.SERVICE_UNAVAILABLE): - error_msg = _("Service temporarily unavailable:" - " The server is temporarily unable" - " to service your request") - else: - error_msg = login_response.text - raise common.CoprHdError(common.CoprHdError.HTTP_ERR, - (_("HTTP code: %(status_code)s" - ", response: %(reason)s" - " [%(error_msg)s]") % { - 'status_code': six.text_type( - login_response.status_code), - 'reason': six.text_type( - login_response.reason), - 'error_msg': six.text_type( - error_msg) - })) - except (exceptions.SSLError, socket.error, exceptions.ConnectionError, - exceptions.Timeout) as e: - raise common.CoprHdError( - common.CoprHdError.HTTP_ERR, six.text_type(e)) - - return authtoken - - def extract_error_detail(self, login_response): - details_str = "" - try: - if login_response.content: - json_object = common.json_decode(login_response.content) - if 'details' in json_object: - details_str = json_object['details'] - - return details_str - except common.CoprHdError: - return details_str diff --git a/cinder/volume/drivers/coprhd/helpers/commoncoprhdapi.py b/cinder/volume/drivers/coprhd/helpers/commoncoprhdapi.py deleted file mode 100644 index 71577fc463a..00000000000 --- a/cinder/volume/drivers/coprhd/helpers/commoncoprhdapi.py +++ /dev/null @@ -1,523 +0,0 @@ -# Copyright (c) 2016 EMC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Contains some commonly used utility methods.""" -try: - import cookielib as cookie_lib -except ImportError: - import http.cookiejar as cookie_lib -import json -import re -import socket - -import oslo_serialization -from oslo_utils import timeutils -from oslo_utils import units -import requests -from requests import exceptions -import six -from six.moves import http_client - -from cinder import exception -from cinder.i18n import _ -from cinder.volume.drivers.coprhd.helpers import urihelper - - -PROD_NAME = 'storageos' - -TIMEOUT_SEC = 20 # 20 SECONDS - -global AUTH_TOKEN -AUTH_TOKEN = None - -TASK_TIMEOUT = 300 - -URI_TASKS_BY_OPID = '/vdc/tasks/{0}' - - -def _decode_list(data): - rv = [] - for item in data: - if isinstance(item, six.text_type): - item = item.encode('utf-8') - elif isinstance(item, list): - item = _decode_list(item) - elif isinstance(item, dict): - item = _decode_dict(item) - rv.append(item) - return rv - - -def _decode_dict(data): - rv = {} - for key, value in data.items(): - if isinstance(key, six.text_type): - key = key.encode('utf-8') - if isinstance(value, six.text_type): - value = value.encode('utf-8') - elif isinstance(value, list): - value = _decode_list(value) - elif isinstance(value, dict): - value = _decode_dict(value) - rv[key] = value - return rv - - -def json_decode(rsp): - """Used to decode the JSON encoded response.""" - - try: - o = json.loads(rsp, object_hook=_decode_dict) - except ValueError: - raise CoprHdError(CoprHdError.VALUE_ERR, - (_("Failed to recognize JSON payload:\n[%s]") % rsp)) - return o - - -def service_json_request(ip_addr, port, http_method, uri, body, - contenttype='application/json', customheaders=None): - """Used to make an HTTP request and get the response. - - The message body is encoded in JSON format - - :param ip_addr: IP address or host name of the server - :param port: port number of the server on which it - is listening to HTTP requests - :param http_method: one of GET, POST, PUT, DELETE - :param uri: the request URI - :param body: the request payload - :returns: a tuple of two elements: (response body, response headers) - :raises CoprHdError: in case of HTTP errors with err_code 3 - """ - - SEC_AUTHTOKEN_HEADER = 'X-SDS-AUTH-TOKEN' - - headers = {'Content-Type': contenttype, - 'ACCEPT': 'application/json, application/octet-stream', - 'X-EMC-REST-CLIENT': 'TRUE'} - - if customheaders: - headers.update(customheaders) - - try: - protocol = "https://" - if port == 8080: - protocol = "http://" - url = protocol + ip_addr + ":" + six.text_type(port) + uri - - cookiejar = cookie_lib.LWPCookieJar() - headers[SEC_AUTHTOKEN_HEADER] = AUTH_TOKEN - - if http_method == 'GET': - response = requests.get(url, headers=headers, verify=False, - cookies=cookiejar) - elif http_method == 'POST': - response = requests.post(url, data=body, headers=headers, - verify=False, cookies=cookiejar) - elif http_method == 'PUT': - response = requests.put(url, data=body, headers=headers, - verify=False, cookies=cookiejar) - elif http_method == 'DELETE': - - response = requests.delete(url, headers=headers, verify=False, - cookies=cookiejar) - else: - raise CoprHdError(CoprHdError.HTTP_ERR, - (_("Unknown/Unsupported HTTP method: %s") % - http_method)) - - if (response.status_code == http_client.OK or - response.status_code == http_client.ACCEPTED): - return (response.text, response.headers) - - error_msg = None - if response.status_code == http_client.INTERNAL_SERVER_ERROR: - response_text = json_decode(response.text) - error_details = "" - if 'details' in response_text: - error_details = response_text['details'] - error_msg = (_("CoprHD internal server error. Error details: %s"), - error_details) - elif response.status_code == http_client.UNAUTHORIZED: - error_msg = _("Access forbidden: Authentication required") - elif response.status_code == http_client.FORBIDDEN: - error_msg = "" - error_details = "" - error_description = "" - - response_text = json_decode(response.text) - - if 'details' in response_text: - error_details = response_text['details'] - error_msg = (_("%(error_msg)s Error details:" - " %(error_details)s"), - {'error_msg': error_msg, - 'error_details': error_details - }) - elif 'description' in response_text: - error_description = response_text['description'] - error_msg = (_("%(error_msg)s Error description:" - " %(error_description)s"), - {'error_msg': error_msg, - 'error_description': error_description - }) - else: - error_msg = _("Access forbidden: You don't have" - " sufficient privileges to perform this" - " operation") - - elif response.status_code == http_client.NOT_FOUND: - error_msg = "Requested resource not found" - elif response.status_code == http_client.METHOD_NOT_ALLOWED: - error_msg = six.text_type(response.text) - elif response.status_code == http_client.SERVICE_UNAVAILABLE: - error_msg = "" - error_details = "" - error_description = "" - - response_text = json_decode(response.text) - - if 'code' in response_text: - errorCode = response_text['code'] - error_msg = "Error " + six.text_type(errorCode) - - if 'details' in response_text: - error_details = response_text['details'] - error_msg = error_msg + ": " + error_details - elif 'description' in response_text: - error_description = response_text['description'] - error_msg = error_msg + ": " + error_description - else: - error_msg = _("Service temporarily unavailable:" - " The server is temporarily unable to" - " service your request") - else: - error_msg = response.text - if isinstance(error_msg, six.text_type): - error_msg = error_msg.encode('utf-8') - raise CoprHdError(CoprHdError.HTTP_ERR, - (_("HTTP code: %(status_code)s" - ", %(reason)s" - " [%(error_msg)s]") % { - 'status_code': six.text_type( - response.status_code), - 'reason': six.text_type( - response.reason), - 'error_msg': six.text_type( - error_msg) - })) - except (CoprHdError, socket.error, exceptions.SSLError, - exceptions.ConnectionError, exceptions.TooManyRedirects, - exceptions.Timeout) as e: - raise CoprHdError(CoprHdError.HTTP_ERR, six.text_type(e)) - # TODO(Ravi) : Either following exception should have proper message or - # IOError should just be combined with the above statement - except IOError as e: - raise CoprHdError(CoprHdError.HTTP_ERR, six.text_type(e)) - - -def is_uri(name): - """Checks whether the name is a URI or not. - - :param name: Name of the resource - :returns: True if name is URI, False otherwise - """ - try: - (urn, prod, trailer) = name.split(':', 2) - return (urn == 'urn' and prod == PROD_NAME) - except Exception: - return False - - -def format_json_object(obj): - """Formats JSON object to make it readable by proper indentation. - - :param obj: JSON object - :returns: a string of formatted JSON object - """ - return oslo_serialization.jsonutils.dumps(obj, sort_keys=True, indent=3) - - -def get_parent_child_from_xpath(name): - """Returns the parent and child elements from XPath.""" - if '/' in name: - (pname, label) = name.rsplit('/', 1) - else: - pname = None - label = name - return (pname, label) - - -def to_bytes(in_str): - """Converts a size to bytes. - - :param in_str: a number suffixed with a unit: {number}{unit} - units supported: - K, KB, k or kb - kilobytes - M, MB, m or mb - megabytes - G, GB, g or gb - gigabytes - T, TB, t or tb - terabytes - :returns: number of bytes - None; if input is incorrect - """ - match = re.search('^([0-9]+)([a-zA-Z]{0,2})$', in_str) - - if not match: - return None - - unit = match.group(2).upper() - value = match.group(1) - - size_count = int(value) - if unit in ['K', 'KB']: - multiplier = int(units.Ki) - elif unit in ['M', 'MB']: - multiplier = int(units.Mi) - elif unit in ['G', 'GB']: - multiplier = int(units.Gi) - elif unit in ['T', 'TB']: - multiplier = int(units.Ti) - elif unit == "": - return size_count - else: - return None - - size_in_bytes = int(size_count * multiplier) - return size_in_bytes - - -def get_list(json_object, parent_node_name, child_node_name=None): - """Returns a list of values from child_node_name. - - If child_node is not given, then it will retrieve list from parent node - """ - if not json_object: - return [] - - return_list = [] - if isinstance(json_object[parent_node_name], list): - for detail in json_object[parent_node_name]: - if child_node_name: - return_list.append(detail[child_node_name]) - else: - return_list.append(detail) - else: - if child_node_name: - return_list.append(json_object[parent_node_name][child_node_name]) - else: - return_list.append(json_object[parent_node_name]) - - return return_list - - -def get_node_value(json_object, parent_node_name, child_node_name=None): - """Returns value of given child_node. - - If child_node is not given, then value of parent node is returned - - :returns: None If json_object or parent_node is not given, - If child_node is not found under parent_node - """ - if not json_object: - return None - - if not parent_node_name: - return None - - detail = json_object[parent_node_name] - if not child_node_name: - return detail - - return_value = None - - if child_node_name in detail: - return_value = detail[child_node_name] - else: - return_value = None - - return return_value - - -def format_err_msg_and_raise(operation_type, component, - error_message, error_code): - """Method to format error message. - - :param operation_type: create, update, add, etc - :param component: storagesystem, vpool, etc - :param error_code: Error code from the API call - :param error_message: Detailed error message - """ - - formated_err_msg = (_("Error: Failed to %(operation_type)s" - " %(component)s") % - {'operation_type': operation_type, - 'component': component - }) - if error_message.startswith("\"\'") and error_message.endswith("\'\""): - # stripping the first 2 and last 2 characters, which are quotes. - error_message = error_message[2:len(error_message) - 2] - - formated_err_msg = formated_err_msg + "\nReason:" + error_message - raise CoprHdError(error_code, formated_err_msg) - - -def search_by_tag(resource_search_uri, ipaddr, port): - """Fetches the list of resources with a given tag. - - :param resource_search_uri: The tag based search uri - Example: '/block/volumes/search?tag=tagexample1' - :param ipaddr: IP address of CoprHD host - :param port: Port number - """ - # check if the URI passed has both project and name parameters - str_uri = six.text_type(resource_search_uri) - if 'search' in str_uri and '?tag=' in str_uri: - # Get the project URI - - (s, h) = service_json_request( - ipaddr, port, "GET", - resource_search_uri, None) - - o = json_decode(s) - if not o: - return None - - resources = get_node_value(o, "resource") - - resource_uris = [] - for resource in resources: - resource_uris.append(resource["id"]) - return resource_uris - else: - raise CoprHdError(CoprHdError.VALUE_ERR, (_("Search URI %s" - " is not in the expected" - " format, it should end" - " with ?tag={0}") - % str_uri)) - - -# Blocks the operation until the task is complete/error out/timeout -def block_until_complete(component_type, - resource_uri, - task_id, - ipaddr, - port, - synctimeout=0): - - if not synctimeout: - synctimeout = TASK_TIMEOUT - t = timeutils.StopWatch(duration=synctimeout) - t.start() - while not t.expired(): - if component_type == 'block': - out = show_task_opid(task_id, ipaddr, port) - else: - out = get_task_by_resourceuri_and_taskId( - component_type, resource_uri, task_id, ipaddr, port) - - if out: - if out["state"] == "ready": - - # stop the timer and return - t.stop() - break - - # if the status of the task is 'error' then stop the timer - # and raise exception - if out["state"] == "error": - # stop the timer - t.stop() - error_message = "Please see logs for more details" - if ("service_error" in out and - "details" in out["service_error"]): - error_message = out["service_error"]["details"] - raise CoprHdError(CoprHdError.VALUE_ERR, - (_("Task: %(task_id)s" - " is failed with" - " error: %(error_message)s") % - {'task_id': task_id, - 'error_message': error_message - })) - - else: - raise CoprHdError(CoprHdError.TIME_OUT, - (_("Task did not complete in %d secs." - " Operation timed out. Task in CoprHD" - " will continue") % synctimeout)) - - return - - -def show_task_opid(taskid, ipaddr, port): - (s, h) = service_json_request( - ipaddr, port, - "GET", - URI_TASKS_BY_OPID.format(taskid), - None) - if (not s): - return None - o = json_decode(s) - return o - - -def get_task_by_resourceuri_and_taskId(component_type, resource_uri, - task_id, ipaddr, port): - """Returns the single task details.""" - - task_uri_constant = urihelper.singletonURIHelperInstance.getUri( - component_type, "task") - (s, h) = service_json_request( - ipaddr, port, "GET", - task_uri_constant.format(resource_uri, task_id), None) - if not s: - return None - o = json_decode(s) - return o - - -class CoprHdError(exception.VolumeBackendAPIException): - - """Custom exception class used to report logical errors. - - Attributes: - err_code - String error code - msg - String text - """ - SOS_FAILURE_ERR = 1 - CMD_LINE_ERR = 2 - HTTP_ERR = 3 - VALUE_ERR = 4 - NOT_FOUND_ERR = 1 - ENTRY_ALREADY_EXISTS_ERR = 5 - MAX_COUNT_REACHED = 6 - TIME_OUT = 7 - - def __init__(self, err_code, msg): - self.err_code = err_code - self.msg = msg - - def __str__(self): - return repr(self.msg) - - -class CoprHDResource(object): - - def __init__(self, ipaddr, port): - """Constructor: takes IP address and port of the CoprHD instance. - - These are needed to make http requests for REST API - """ - self.ipaddr = ipaddr - self.port = port diff --git a/cinder/volume/drivers/coprhd/helpers/consistencygroup.py b/cinder/volume/drivers/coprhd/helpers/consistencygroup.py deleted file mode 100644 index e90a166310b..00000000000 --- a/cinder/volume/drivers/coprhd/helpers/consistencygroup.py +++ /dev/null @@ -1,220 +0,0 @@ -# Copyright (c) 2016 EMC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import oslo_serialization - -from cinder.i18n import _ -from cinder.volume.drivers.coprhd.helpers import commoncoprhdapi as common -from cinder.volume.drivers.coprhd.helpers import project - - -class ConsistencyGroup(common.CoprHDResource): - - URI_CONSISTENCY_GROUP = "/block/consistency-groups" - URI_CONSISTENCY_GROUPS_INSTANCE = URI_CONSISTENCY_GROUP + "/{0}" - URI_CONSISTENCY_GROUPS_DEACTIVATE = (URI_CONSISTENCY_GROUPS_INSTANCE + - "/deactivate") - URI_CONSISTENCY_GROUPS_SEARCH = ( - '/block/consistency-groups/search?project={0}') - URI_SEARCH_CONSISTENCY_GROUPS_BY_TAG = ( - '/block/consistency-groups/search?tag={0}') - URI_CONSISTENCY_GROUP_TAGS = ( - '/block/consistency-groups/{0}/tags') - - def list(self, project_name, tenant): - """This function gives list of comma separated consistency group uris. - - :param project_name: Name of the project path - :param tenant: Name of the tenant - :returns: list of consistency group ids separated by comma - """ - if tenant is None: - tenant = "" - projobj = project.Project(self.ipaddr, self.port) - fullproj = tenant + "/" + project_name - projuri = projobj.project_query(fullproj) - - (s, h) = common.service_json_request( - self.ipaddr, self.port, "GET", - self.URI_CONSISTENCY_GROUPS_SEARCH.format(projuri), None) - o = common.json_decode(s) - if not o: - return [] - - congroups = [] - resources = common.get_node_value(o, "resource") - for resource in resources: - congroups.append(resource["id"]) - - return congroups - - def show(self, name, project, tenant): - """This function will display the consistency group with details. - - :param name: Name of the consistency group - :param project: Name of the project - :param tenant: Name of the tenant - :returns: details of consistency group - """ - uri = self.consistencygroup_query(name, project, tenant) - (s, h) = common.service_json_request( - self.ipaddr, self.port, "GET", - self.URI_CONSISTENCY_GROUPS_INSTANCE.format(uri), None) - o = common.json_decode(s) - if o['inactive']: - return None - return o - - def consistencygroup_query(self, name, project, tenant): - """This function will return consistency group id. - - :param name: Name/id of the consistency group - :param project: Name of the project - :param tenant: Name of the tenant - :returns: id of the consistency group - """ - if common.is_uri(name): - return name - - uris = self.list(project, tenant) - for uri in uris: - congroup = self.show(uri, project, tenant) - if congroup and congroup['name'] == name: - return congroup['id'] - raise common.CoprHdError(common.CoprHdError.NOT_FOUND_ERR, - (_("Consistency Group %s: not found") % name)) - - # Blocks the operation until the task is complete/error out/timeout - def check_for_sync(self, result, sync, synctimeout=0): - if len(result["resource"]) > 0: - resource = result["resource"] - return ( - common.block_until_complete("consistencygroup", resource["id"], - result["id"], self.ipaddr, - self.port, synctimeout) - ) - else: - raise common.CoprHdError( - common.CoprHdError.SOS_FAILURE_ERR, - _("error: task list is empty, no task response found")) - - def create(self, name, project_name, tenant): - """This function will create consistency group with the given name. - - :param name: Name of the consistency group - :param project_name: Name of the project path - :param tenant: Container tenant name - :returns: status of creation - """ - # check for existence of consistency group. - try: - status = self.show(name, project_name, tenant) - except common.CoprHdError as e: - if e.err_code == common.CoprHdError.NOT_FOUND_ERR: - if tenant is None: - tenant = "" - fullproj = tenant + "/" + project_name - projobj = project.Project(self.ipaddr, self.port) - projuri = projobj.project_query(fullproj) - - parms = {'name': name, 'project': projuri, } - body = oslo_serialization.jsonutils.dumps(parms) - - (s, h) = common.service_json_request( - self.ipaddr, self.port, "POST", - self.URI_CONSISTENCY_GROUP, body) - - o = common.json_decode(s) - return o - else: - raise - if status: - common.format_err_msg_and_raise( - "create", "consistency group", - (_("consistency group with name: %s already exists") % name), - common.CoprHdError.ENTRY_ALREADY_EXISTS_ERR) - - def delete(self, name, project, tenant, coprhdonly=False): - """This function marks a particular consistency group as delete. - - :param name: Name of the consistency group - :param project: Name of the project - :param tenant: Name of the tenant - :returns: status of the delete operation - false, incase it fails to do delete - """ - params = '' - if coprhdonly is True: - params += "?type=" + 'CoprHD_ONLY' - uri = self.consistencygroup_query(name, project, tenant) - (s, h) = common.service_json_request( - self.ipaddr, self.port, - "POST", - self.URI_CONSISTENCY_GROUPS_DEACTIVATE.format(uri) + params, - None) - return - - def update(self, uri, project, tenant, add_volumes, remove_volumes, - sync, synctimeout=0): - """Function used to add or remove volumes from consistency group. - - It will update the consistency group with given volumes - - :param uri: URI of the consistency group - :param project: Name of the project path - :param tenant: Container tenant name - :param add_volumes: volumes to be added to the consistency group - :param remove_volumes: volumes to be removed from CG - :param sync: synchronous request - :param synctimeout: Query for task status for 'synctimeout' secs. - If the task doesn't complete in synctimeout - secs, an exception is thrown - :returns: status of creation - """ - if tenant is None: - tenant = "" - - parms = [] - add_voluris = [] - remove_voluris = [] - from cinder.volume.drivers.coprhd.helpers.volume import Volume - volobj = Volume(self.ipaddr, self.port) - if add_volumes: - for volname in add_volumes: - full_project_name = tenant + "/" + project - add_voluris.append( - volobj.volume_query(full_project_name, volname)) - volumes = {'volume': add_voluris} - parms = {'add_volumes': volumes} - - if remove_volumes: - for volname in remove_volumes: - full_project_name = tenant + "/" + project - remove_voluris.append( - volobj.volume_query(full_project_name, volname)) - volumes = {'volume': remove_voluris} - parms = {'remove_volumes': volumes} - - body = oslo_serialization.jsonutils.dumps(parms) - (s, h) = common.service_json_request( - self.ipaddr, self.port, "PUT", - self.URI_CONSISTENCY_GROUPS_INSTANCE.format(uri), - body) - - o = common.json_decode(s) - if sync: - return self.check_for_sync(o, sync, synctimeout) - else: - return o diff --git a/cinder/volume/drivers/coprhd/helpers/exportgroup.py b/cinder/volume/drivers/coprhd/helpers/exportgroup.py deleted file mode 100644 index 503758e2403..00000000000 --- a/cinder/volume/drivers/coprhd/helpers/exportgroup.py +++ /dev/null @@ -1,303 +0,0 @@ -# Copyright (c) 2016 EMC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import oslo_serialization - -from cinder.i18n import _ -from cinder.volume.drivers.coprhd.helpers import commoncoprhdapi as common -from cinder.volume.drivers.coprhd.helpers import host -from cinder.volume.drivers.coprhd.helpers import project -from cinder.volume.drivers.coprhd.helpers import virtualarray -from cinder.volume.drivers.coprhd.helpers import volume - - -class ExportGroup(common.CoprHDResource): - - URI_EXPORT_GROUP = "/block/exports" - URI_EXPORT_GROUPS_SHOW = URI_EXPORT_GROUP + "/{0}" - URI_EXPORT_GROUP_SEARCH = '/block/exports/search' - URI_EXPORT_GROUP_UPDATE = '/block/exports/{0}' - - def exportgroup_remove_volumes_by_uri(self, exportgroup_uri, - volume_id_list, sync=False, - tenantname=None, projectname=None, - cg=None, synctimeout=0): - """Remove volumes from the exportgroup, given the uris of volume.""" - - volume_list = volume_id_list - parms = {} - - parms['volume_changes'] = self._remove_list(volume_list) - o = self.send_json_request(exportgroup_uri, parms) - return self.check_for_sync(o, sync, synctimeout) - - def _remove_list(self, uris): - resChanges = {} - if not isinstance(uris, list): - resChanges['remove'] = [uris] - else: - resChanges['remove'] = uris - return resChanges - - def send_json_request(self, exportgroup_uri, param): - body = oslo_serialization.jsonutils.dumps(param) - (s, h) = common.service_json_request( - self.ipaddr, self.port, "PUT", - self.URI_EXPORT_GROUP_UPDATE.format(exportgroup_uri), body) - return common.json_decode(s) - - def check_for_sync(self, result, sync, synctimeout=0): - if sync: - if len(result["resource"]) > 0: - resource = result["resource"] - return ( - common.block_until_complete("export", resource["id"], - result["id"], self.ipaddr, - self.port, synctimeout) - ) - else: - raise common.CoprHdError( - common.CoprHdError.SOS_FAILURE_ERR, _( - "error: task list is empty, no task response found")) - else: - return result - - def exportgroup_list(self, project_name, tenant): - """This function gives list of export group uris separated by comma. - - :param project_name: Name of the project path - :param tenant: Name of the tenant - :returns: list of export group ids separated by comma - """ - if tenant is None: - tenant = "" - projobj = project.Project(self.ipaddr, self.port) - fullproj = tenant + "/" + project_name - projuri = projobj.project_query(fullproj) - - uri = self.URI_EXPORT_GROUP_SEARCH - - if '?' in uri: - uri += '&project=' + projuri - else: - uri += '?project=' + projuri - - (s, h) = common.service_json_request(self.ipaddr, self.port, "GET", - uri, None) - o = common.json_decode(s) - if not o: - return [] - - exportgroups = [] - resources = common.get_node_value(o, "resource") - for resource in resources: - exportgroups.append(resource["id"]) - - return exportgroups - - def exportgroup_show(self, name, project, tenant, varray=None): - """This function displays the Export group with details. - - :param name: Name of the export group - :param project: Name of the project - :param tenant: Name of the tenant - :returns: Details of export group - """ - varrayuri = None - if varray: - varrayObject = virtualarray.VirtualArray( - self.ipaddr, self.port) - varrayuri = varrayObject.varray_query(varray) - uri = self.exportgroup_query(name, project, tenant, varrayuri) - (s, h) = common.service_json_request( - self.ipaddr, - self.port, - "GET", - self.URI_EXPORT_GROUPS_SHOW.format(uri), None) - o = common.json_decode(s) - if o['inactive']: - return None - - return o - - def exportgroup_create(self, name, project_name, tenant, varray, - exportgrouptype, export_destination=None): - """This function creates the Export group with given name. - - :param name: Name of the export group - :param project_name: Name of the project path - :param tenant: Container tenant name - :param varray: Name of the virtual array - :param exportgrouptype: Type of the export group. Ex:Host etc - :returns: status of creation - """ - # check for existence of export group. - try: - status = self.exportgroup_show(name, project_name, tenant) - except common.CoprHdError as e: - if e.err_code == common.CoprHdError.NOT_FOUND_ERR: - if tenant is None: - tenant = "" - - fullproj = tenant + "/" + project_name - projObject = project.Project(self.ipaddr, self.port) - projuri = projObject.project_query(fullproj) - - varrayObject = virtualarray.VirtualArray( - self.ipaddr, self.port) - nhuri = varrayObject.varray_query(varray) - - parms = { - 'name': name, - 'project': projuri, - 'varray': nhuri, - 'type': exportgrouptype - } - - if exportgrouptype and export_destination: - host_obj = host.Host(self.ipaddr, self.port) - host_uri = host_obj.query_by_name(export_destination) - parms['hosts'] = [host_uri] - - body = oslo_serialization.jsonutils.dumps(parms) - (s, h) = common.service_json_request(self.ipaddr, - self.port, "POST", - self.URI_EXPORT_GROUP, - body) - - o = common.json_decode(s) - return o - else: - raise - - if status: - raise common.CoprHdError( - common.CoprHdError.ENTRY_ALREADY_EXISTS_ERR, (_( - "Export group with name %s" - " already exists") % name)) - - def exportgroup_query(self, name, project, tenant, varrayuri=None): - """Makes REST API call to query the exportgroup by name. - - :param name: Name/id of the export group - :param project: Name of the project - :param tenant: Name of the tenant - :param varrayuri: URI of the virtual array - :returns: id of the export group - """ - if common.is_uri(name): - return name - - uris = self.exportgroup_list(project, tenant) - for uri in uris: - exportgroup = self.exportgroup_show(uri, project, tenant) - if exportgroup and exportgroup['name'] == name: - if varrayuri: - varrayobj = exportgroup['varray'] - if varrayobj['id'] == varrayuri: - return exportgroup['id'] - else: - continue - else: - return exportgroup['id'] - raise common.CoprHdError( - common.CoprHdError.NOT_FOUND_ERR, - (_("Export Group %s: not found") % name)) - - def exportgroup_add_volumes(self, sync, exportgroupname, tenantname, - maxpaths, minpaths, pathsperinitiator, - projectname, volumenames, - cg=None, synctimeout=0, varray=None): - """Add volume to export group. - - :param sync: synchronous request - :param exportgroupname: Name/id of the export group - :param tenantname: tenant name - :param maxpaths: Maximum number of paths - :param minpaths: Minimum number of paths - :param pathsperinitiator: Paths per initiator - :param projectname: name of project - :param volumenames: names of volumes that needs - to be added to exportgroup - :param cg: consistency group - :param synctimeout: Query for task status for 'synctimeout' secs - If the task doesn't complete in synctimeout secs, - an exception is thrown - :param varray: Name of varray - :returns: action result - """ - varrayuri = None - if varray: - varrayObject = virtualarray.VirtualArray( - self.ipaddr, self.port) - varrayuri = varrayObject.varray_query(varray) - - exportgroup_uri = self.exportgroup_query(exportgroupname, - projectname, - tenantname, - varrayuri) - - # get volume uri - if tenantname is None: - tenantname = "" - # List of volumes - volume_list = [] - - if volumenames: - volume_list = self._get_resource_lun_tuple( - volumenames, "volumes", None, tenantname, - projectname, None) - - parms = {} - # construct the body - - volChanges = {} - volChanges['add'] = volume_list - parms['volume_changes'] = volChanges - - o = self.send_json_request(exportgroup_uri, parms) - return self.check_for_sync(o, sync, synctimeout) - - def _get_resource_lun_tuple(self, resources, resType, baseResUri, - tenantname, projectname, blockTypeName): - """Function to validate input volumes and return list of ids and luns. - - """ - copyEntries = [] - volumeObject = volume.Volume(self.ipaddr, self.port) - for copy in resources: - copyParam = [] - try: - copyParam = copy.split(":") - except Exception: - raise common.CoprHdError( - common.CoprHdError.CMD_LINE_ERR, - (_("Please provide valid format volume:" - " lun for parameter %s") % - resType)) - copy = dict() - if not len(copyParam): - raise common.CoprHdError( - common.CoprHdError.CMD_LINE_ERR, - (_("Please provide at least one volume for parameter %s") % - resType)) - if resType == "volumes": - full_project_name = tenantname + "/" + projectname - copy['id'] = volumeObject.volume_query( - full_project_name, copyParam[0]) - if len(copyParam) > 1: - copy['lun'] = copyParam[1] - copyEntries.append(copy) - return copyEntries diff --git a/cinder/volume/drivers/coprhd/helpers/host.py b/cinder/volume/drivers/coprhd/helpers/host.py deleted file mode 100644 index 8f6cb4b5476..00000000000 --- a/cinder/volume/drivers/coprhd/helpers/host.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright (c) 2016 EMC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder.i18n import _ -from cinder.volume.drivers.coprhd.helpers import commoncoprhdapi as common -from cinder.volume.drivers.coprhd.helpers import tenant - - -class Host(common.CoprHDResource): - - # All URIs for the Host operations - URI_HOST_DETAILS = "/compute/hosts/{0}" - URI_HOST_LIST_INITIATORS = "/compute/hosts/{0}/initiators" - URI_COMPUTE_HOST = "/compute/hosts" - - def query_by_name(self, host_name, tenant_name=None): - """Search host matching host_name and tenant if tenant_name provided. - - tenant_name is optional - """ - hostList = self.list_all(tenant_name) - for host in hostList: - hostUri = host['id'] - hostDetails = self.show_by_uri(hostUri) - if hostDetails: - if hostDetails['name'] == host_name: - return hostUri - - raise common.CoprHdError(common.CoprHdError.NOT_FOUND_ERR, (_( - "Host with name: %s not found") % host_name)) - - def list_initiators(self, host_name): - """Lists all initiators for the given host. - - :param host_name: The name of the host - """ - if not common.is_uri(host_name): - hostUri = self.query_by_name(host_name, None) - else: - hostUri = host_name - - (s, h) = common.service_json_request( - self.ipaddr, self.port, "GET", - Host.URI_HOST_LIST_INITIATORS.format(hostUri), - None) - o = common.json_decode(s) - - if not o or "initiator" not in o: - return [] - - return common.get_node_value(o, 'initiator') - - def list_all(self, tenant_name): - """Gets the ids and self links for all compute elements.""" - restapi = self.URI_COMPUTE_HOST - tenant_obj = tenant.Tenant(self.ipaddr, self.port) - if tenant_name is None: - tenant_uri = tenant_obj.tenant_getid() - else: - tenant_uri = tenant_obj.tenant_query(tenant_name) - restapi = restapi + "?tenant=" + tenant_uri - - (s, h) = common.service_json_request( - self.ipaddr, self.port, - "GET", - restapi, - None) - o = common.json_decode(s) - return o['host'] - - def show_by_uri(self, uri): - """Makes REST API call to retrieve Host details based on its UUID.""" - (s, h) = common.service_json_request(self.ipaddr, self.port, "GET", - Host.URI_HOST_DETAILS.format(uri), - None) - o = common.json_decode(s) - inactive = common.get_node_value(o, 'inactive') - - if inactive: - return None - return o diff --git a/cinder/volume/drivers/coprhd/helpers/project.py b/cinder/volume/drivers/coprhd/helpers/project.py deleted file mode 100644 index 3cc7b00e1ca..00000000000 --- a/cinder/volume/drivers/coprhd/helpers/project.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright (c) 2016 EMC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from cinder.i18n import _ -from cinder.volume.drivers.coprhd.helpers import commoncoprhdapi as common -from cinder.volume.drivers.coprhd.helpers import tenant - - -class Project(common.CoprHDResource): - - # Commonly used URIs for the 'Project' module - URI_PROJECT_LIST = '/tenants/{0}/projects' - URI_PROJECT = '/projects/{0}' - - def project_query(self, name): - """Retrieves UUID of project based on its name. - - :param name: name of project - :returns: UUID of project - :raises CoprHdError: - when project name is not found - """ - if common.is_uri(name): - return name - (tenant_name, project_name) = common.get_parent_child_from_xpath(name) - - tenant_obj = tenant.Tenant(self.ipaddr, self.port) - - tenant_uri = tenant_obj.tenant_query(tenant_name) - projects = self.project_list(tenant_uri) - if projects: - for project in projects: - if project: - project_detail = self.project_show_by_uri( - project['id']) - if(project_detail and - project_detail['name'] == project_name): - return project_detail['id'] - raise common.CoprHdError(common.CoprHdError.NOT_FOUND_ERR, (_( - "Project: %s not found") % project_name)) - - def project_list(self, tenant_name): - """Makes REST API call and retrieves projects based on tenant UUID. - - :param tenant_name: Name of the tenant - :returns: List of project UUIDs in JSON response payload - """ - tenant_obj = tenant.Tenant(self.ipaddr, self.port) - tenant_uri = tenant_obj.tenant_query(tenant_name) - (s, h) = common.service_json_request(self.ipaddr, self.port, "GET", - Project.URI_PROJECT_LIST.format( - tenant_uri), - None) - o = common.json_decode(s) - - if "project" in o: - return common.get_list(o, 'project') - return [] - - def project_show_by_uri(self, uri): - """Makes REST API call and retrieves project derails based on UUID. - - :param uri: UUID of project - :returns: Project details in JSON response payload - """ - - (s, h) = common.service_json_request(self.ipaddr, self.port, - "GET", - Project.URI_PROJECT.format(uri), - None) - o = common.json_decode(s) - inactive = common.get_node_value(o, 'inactive') - if inactive: - return None - - return o diff --git a/cinder/volume/drivers/coprhd/helpers/snapshot.py b/cinder/volume/drivers/coprhd/helpers/snapshot.py deleted file mode 100644 index 46a30f40b34..00000000000 --- a/cinder/volume/drivers/coprhd/helpers/snapshot.py +++ /dev/null @@ -1,257 +0,0 @@ -# Copyright (c) 2016 EMC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import oslo_serialization - -from cinder.i18n import _ -from cinder.volume.drivers.coprhd.helpers import commoncoprhdapi as common -from cinder.volume.drivers.coprhd.helpers import consistencygroup -from cinder.volume.drivers.coprhd.helpers import volume - - -class Snapshot(common.CoprHDResource): - - # Commonly used URIs for the 'Snapshot' module - URI_SNAPSHOTS = '/{0}/snapshots/{1}' - URI_BLOCK_SNAPSHOTS = '/block/snapshots/{0}' - URI_SEARCH_SNAPSHOT_BY_TAG = '/block/snapshots/search?tag={0}' - URI_SNAPSHOT_LIST = '/{0}/{1}/{2}/protection/snapshots' - URI_SNAPSHOT_TASKS_BY_OPID = '/vdc/tasks/{0}' - URI_RESOURCE_DEACTIVATE = '{0}/deactivate' - URI_CONSISTENCY_GROUP = "/block/consistency-groups" - URI_CONSISTENCY_GROUPS_SNAPSHOT_INSTANCE = ( - URI_CONSISTENCY_GROUP + "/{0}/protection/snapshots/{1}") - URI_CONSISTENCY_GROUPS_SNAPSHOT_DEACTIVATE = ( - URI_CONSISTENCY_GROUPS_SNAPSHOT_INSTANCE + "/deactivate") - URI_BLOCK_SNAPSHOTS_TAG = URI_BLOCK_SNAPSHOTS + '/tags' - - VOLUMES = 'volumes' - CG = 'consistency-groups' - BLOCK = 'block' - - timeout = 300 - - def snapshot_list_uri(self, otype, otypename, ouri): - """Makes REST API call to list snapshots under a volume. - - :param otype: block - :param otypename: either volume or consistency-group should be - provided - :param ouri: uri of volume or consistency-group - :returns: list of snapshots - """ - (s, h) = common.service_json_request( - self.ipaddr, self.port, - "GET", - Snapshot.URI_SNAPSHOT_LIST.format(otype, otypename, ouri), None) - o = common.json_decode(s) - return o['snapshot'] - - def snapshot_show_uri(self, otype, resource_uri, suri): - """Retrieves snapshot details based on snapshot Name or Label. - - :param otype: block - :param suri: uri of the Snapshot. - :param resource_uri: uri of the source resource - :returns: Snapshot details in JSON response payload - """ - if(resource_uri is not None and - resource_uri.find('BlockConsistencyGroup') > 0): - (s, h) = common.service_json_request( - self.ipaddr, self.port, - "GET", - Snapshot.URI_CONSISTENCY_GROUPS_SNAPSHOT_INSTANCE.format( - resource_uri, - suri), - None) - else: - (s, h) = common.service_json_request( - self.ipaddr, self.port, - "GET", - Snapshot.URI_SNAPSHOTS.format(otype, suri), None) - - return common.json_decode(s) - - def snapshot_query(self, storageres_type, - storageres_typename, resuri, snapshot_name): - if resuri is not None: - uris = self.snapshot_list_uri( - storageres_type, - storageres_typename, - resuri) - for uri in uris: - snapshot = self.snapshot_show_uri( - storageres_type, - resuri, - uri['id']) - if (False == common.get_node_value(snapshot, 'inactive') and - snapshot['name'] == snapshot_name): - return snapshot['id'] - - raise common.CoprHdError( - common.CoprHdError.SOS_FAILURE_ERR, - (_("snapshot with the name: " - "%s Not Found") % snapshot_name)) - - def storage_resource_query(self, - storageres_type, - volume_name, - cg_name, - project, - tenant): - resourcepath = "/" + project - if tenant is not None: - resourcepath = tenant + resourcepath - - resUri = None - resourceObj = None - if Snapshot.BLOCK == storageres_type and volume_name is not None: - resourceObj = volume.Volume(self.ipaddr, self.port) - resUri = resourceObj.volume_query(resourcepath, volume_name) - elif Snapshot.BLOCK == storageres_type and cg_name is not None: - resourceObj = consistencygroup.ConsistencyGroup( - self.ipaddr, - self.port) - resUri = resourceObj.consistencygroup_query( - cg_name, - project, - tenant) - else: - resourceObj = None - - return resUri - - def snapshot_create(self, otype, typename, ouri, - snaplabel, inactive, sync, - readonly=False, synctimeout=0): - """New snapshot is created, for a given volume. - - :param otype: block type should be provided - :param typename: either volume or consistency-groups should - be provided - :param ouri: uri of volume - :param snaplabel: name of the snapshot - :param inactive: if true, the snapshot will not activate the - synchronization between source and target volumes - :param sync: synchronous request - :param synctimeout: Query for task status for 'synctimeout' secs. - If the task doesn't complete in synctimeout secs, - an exception is thrown - """ - - # check snapshot is already exist - is_snapshot_exist = True - try: - self.snapshot_query(otype, typename, ouri, snaplabel) - except common.CoprHdError as e: - if e.err_code == common.CoprHdError.NOT_FOUND_ERR: - is_snapshot_exist = False - else: - raise - - if is_snapshot_exist: - raise common.CoprHdError( - common.CoprHdError.ENTRY_ALREADY_EXISTS_ERR, - (_("Snapshot with name %(snaplabel)s" - " already exists under %(typename)s") % - {'snaplabel': snaplabel, - 'typename': typename - })) - - parms = { - 'name': snaplabel, - # if true, the snapshot will not activate the synchronization - # between source and target volumes - 'create_inactive': inactive - } - if readonly is True: - parms['read_only'] = readonly - body = oslo_serialization.jsonutils.dumps(parms) - - # REST api call - (s, h) = common.service_json_request( - self.ipaddr, self.port, - "POST", - Snapshot.URI_SNAPSHOT_LIST.format(otype, typename, ouri), body) - o = common.json_decode(s) - - task = o["task"][0] - - if sync: - return ( - common.block_until_complete( - otype, - task['resource']['id'], - task["id"], self.ipaddr, self.port, synctimeout) - ) - else: - return o - - def snapshot_delete_uri(self, otype, resource_uri, - suri, sync, synctimeout=0): - """Delete a snapshot by uri. - - :param otype: block - :param resource_uri: uri of the source resource - :param suri: Uri of the Snapshot - :param sync: To perform operation synchronously - :param synctimeout: Query for task status for 'synctimeout' secs. If - the task doesn't complete in synctimeout secs, - an exception is thrown - """ - s = None - if resource_uri.find("Volume") > 0: - - (s, h) = common.service_json_request( - self.ipaddr, self.port, - "POST", - Snapshot.URI_RESOURCE_DEACTIVATE.format( - Snapshot.URI_BLOCK_SNAPSHOTS.format(suri)), - None) - elif resource_uri.find("BlockConsistencyGroup") > 0: - - (s, h) = common.service_json_request( - self.ipaddr, self.port, - "POST", - Snapshot.URI_CONSISTENCY_GROUPS_SNAPSHOT_DEACTIVATE.format( - resource_uri, - suri), - None) - o = common.json_decode(s) - task = o["task"][0] - - if sync: - return ( - common.block_until_complete( - otype, - task['resource']['id'], - task["id"], self.ipaddr, self.port, synctimeout) - ) - else: - return o - - def snapshot_delete(self, storageres_type, - storageres_typename, resource_uri, - name, sync, synctimeout=0): - snapshotUri = self.snapshot_query( - storageres_type, - storageres_typename, - resource_uri, - name) - self.snapshot_delete_uri( - storageres_type, - resource_uri, - snapshotUri, - sync, synctimeout) diff --git a/cinder/volume/drivers/coprhd/helpers/tag.py b/cinder/volume/drivers/coprhd/helpers/tag.py deleted file mode 100644 index 818c70d922e..00000000000 --- a/cinder/volume/drivers/coprhd/helpers/tag.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright (c) 2016 EMC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -"""Contains tagging related methods.""" - -import oslo_serialization - -from cinder.i18n import _ -from cinder.volume.drivers.coprhd.helpers import commoncoprhdapi as common - - -class Tag(common.CoprHDResource): - - def tag_resource(self, uri, resource_id, add, remove): - params = { - 'add': add, - 'remove': remove - } - - body = oslo_serialization.jsonutils.dumps(params) - - (s, h) = common.service_json_request(self.ipaddr, self.port, "PUT", - uri.format(resource_id), body) - o = common.json_decode(s) - return o - - def list_tags(self, resource_uri): - if resource_uri.__contains__("tag") is False: - raise common.CoprHdError( - common.CoprHdError.VALUE_ERR, _("URI should end with /tag")) - - (s, h) = common.service_json_request(self.ipaddr, - self.port, - "GET", - resource_uri, - None) - - allTags = [] - o = common.json_decode(s) - allTags = o['tag'] - - return allTags diff --git a/cinder/volume/drivers/coprhd/helpers/tenant.py b/cinder/volume/drivers/coprhd/helpers/tenant.py deleted file mode 100644 index 9fb0f022096..00000000000 --- a/cinder/volume/drivers/coprhd/helpers/tenant.py +++ /dev/null @@ -1,117 +0,0 @@ -# Copyright (c) 2016 EMC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder.i18n import _ -from cinder.volume.drivers.coprhd.helpers import commoncoprhdapi as common - - -class Tenant(common.CoprHDResource): - - URI_SERVICES_BASE = '' - URI_TENANT = URI_SERVICES_BASE + '/tenant' - URI_TENANTS = URI_SERVICES_BASE + '/tenants/{0}' - URI_TENANTS_SUBTENANT = URI_TENANTS + '/subtenants' - - def tenant_query(self, label): - """Returns the UID of the tenant specified by the hierarchical name. - - (ex tenant1/tenant2/tenant3) - """ - - if common.is_uri(label): - return label - - tenant_id = self.tenant_getid() - - if not label: - return tenant_id - - subtenants = self.tenant_list(tenant_id) - subtenants.append(self.tenant_show(None)) - - for tenant in subtenants: - if tenant['name'] == label: - rslt = self.tenant_show_by_uri(tenant['id']) - if rslt: - return tenant['id'] - - raise common.CoprHdError(common.CoprHdError.NOT_FOUND_ERR, - (_("Tenant %s: not found") % label)) - - def tenant_show(self, label): - """Returns the details of the tenant based on its name.""" - if label: - tenant_id = self.tenant_query(label) - else: - tenant_id = self.tenant_getid() - - return self.tenant_show_by_uri(tenant_id) - - def tenant_getid(self): - (s, h) = common.service_json_request(self.ipaddr, self.port, - "GET", Tenant.URI_TENANT, None) - - o = common.json_decode(s) - return o['id'] - - def tenant_list(self, uri=None): - """Returns all the tenants under a parent tenant. - - :param uri: The parent tenant name - :returns: JSON payload of tenant list - """ - - if not uri: - uri = self.tenant_getid() - - tenantdtls = self.tenant_show_by_uri(uri) - - if(tenantdtls and not ('parent_tenant' in tenantdtls and - ("id" in tenantdtls['parent_tenant']))): - (s, h) = common.service_json_request( - self.ipaddr, self.port, - "GET", self.URI_TENANTS_SUBTENANT.format(uri), None) - - o = common.json_decode(s) - return o['subtenant'] - - else: - return [] - - def tenant_show_by_uri(self, uri): - """Makes REST API call to retrieve tenant details based on UUID.""" - (s, h) = common.service_json_request(self.ipaddr, self.port, "GET", - Tenant.URI_TENANTS.format(uri), - None) - - o = common.json_decode(s) - if 'inactive' in o and o['inactive']: - return None - - return o - - def get_tenant_by_name(self, tenant): - uri = None - if not tenant: - uri = self.tenant_getid() - else: - if not common.is_uri(tenant): - uri = self.tenant_query(tenant) - else: - uri = tenant - if not uri: - raise common.CoprHdError(common.CoprHdError.NOT_FOUND_ERR, - (_("Tenant %s: not found") % tenant)) - return uri diff --git a/cinder/volume/drivers/coprhd/helpers/urihelper.py b/cinder/volume/drivers/coprhd/helpers/urihelper.py deleted file mode 100644 index f9b98350934..00000000000 --- a/cinder/volume/drivers/coprhd/helpers/urihelper.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright (c) 2016 EMC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -class URIHelper(object): - - """This map will be a map of maps. - - e.g for project component type, it will hold a map - of its operations vs their uris - """ - COMPONENT_TYPE_VS_URIS_MAP = dict() - """Volume URIs.""" - VOLUME_URIS_MAP = dict() - URI_VOLUMES = '/block/volumes' - URI_VOLUME = URI_VOLUMES + '/{0}' - URI_VOLUME_TASK_LIST = URI_VOLUME + '/tasks' - URI_VOLUME_TASK = URI_VOLUME_TASK_LIST + '/{1}' - - """Consistencygroup URIs.""" - CG_URIS_MAP = dict() - URI_CGS = '/block/consistency-groups' - URI_CG = URI_CGS + '/{0}' - URI_CG_TASK_LIST = URI_CG + '/tasks' - URI_CG_TASK = URI_CG_TASK_LIST + '/{1}' - - """Export Group URIs.""" - # Map to hold all export group uris - EXPORT_GROUP_URIS_MAP = dict() - URI_EXPORT_GROUP_TASKS_LIST = '/block/exports/{0}/tasks' - URI_EXPORT_GROUP_TASK = URI_EXPORT_GROUP_TASKS_LIST + '/{1}' - - def __init__(self): - """During initialization of the class, lets fill all the maps.""" - self.__fillExportGroupMap() - self.__fillVolumeMap() - self.__fillConsistencyGroupMap() - self.__initializeComponentVsUriMap() - - def __call__(self): - return self - - def __initializeComponentVsUriMap(self): - self.COMPONENT_TYPE_VS_URIS_MAP["export"] = self.EXPORT_GROUP_URIS_MAP - self.COMPONENT_TYPE_VS_URIS_MAP[ - "volume"] = self.VOLUME_URIS_MAP - self.COMPONENT_TYPE_VS_URIS_MAP[ - "consistencygroup"] = self.CG_URIS_MAP - - def __fillExportGroupMap(self): - self.EXPORT_GROUP_URIS_MAP["task"] = self.URI_EXPORT_GROUP_TASK - - def __fillVolumeMap(self): - self.VOLUME_URIS_MAP["task"] = self.URI_VOLUME_TASK - - def __fillConsistencyGroupMap(self): - self.CG_URIS_MAP["task"] = self.URI_CG_TASK - - def getUri(self, componentType, operationType): - return ( - self.COMPONENT_TYPE_VS_URIS_MAP.get( - componentType).get( - operationType) - ) - -"""Defining the singleton instance. - -Use this instance any time the access is required for this module/class -""" -singletonURIHelperInstance = URIHelper() diff --git a/cinder/volume/drivers/coprhd/helpers/virtualarray.py b/cinder/volume/drivers/coprhd/helpers/virtualarray.py deleted file mode 100644 index de0ec96b1f2..00000000000 --- a/cinder/volume/drivers/coprhd/helpers/virtualarray.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright (c) 2016 EMC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder.i18n import _ -from cinder.volume.drivers.coprhd.helpers import commoncoprhdapi as common - - -class VirtualArray(common.CoprHDResource): - - # Commonly used URIs for the 'varrays' module - URI_VIRTUALARRAY = '/vdc/varrays' - URI_VIRTUALARRAY_BY_VDC_ID = '/vdc/varrays?vdc-id={0}' - URI_VIRTUALARRAY_URI = '/vdc/varrays/{0}' - - def varray_query(self, name): - """Returns the UID of the varray specified by the name.""" - if common.is_uri(name): - return name - - uris = self.varray_list() - - for uri in uris: - varray = self.varray_show(uri) - if varray and varray['name'] == name: - return varray['id'] - - raise common.CoprHdError(common.CoprHdError.NOT_FOUND_ERR, - (_("varray %s: not found") % name)) - - def varray_list(self, vdcname=None): - """Returns all the varrays in a vdc. - - :param vdcname: Name of the Virtual Data Center - :returns: JSON payload of varray list - """ - vdcrestapi = None - if vdcname is not None: - vdcrestapi = VirtualArray.URI_VIRTUALARRAY_BY_VDC_ID.format( - vdcname) - else: - vdcrestapi = VirtualArray.URI_VIRTUALARRAY - (s, h) = common.service_json_request( - self.ipaddr, self.port, "GET", - vdcrestapi, None) - - o = common.json_decode(s) - - returnlst = [] - for item in o['varray']: - returnlst.append(item['id']) - - return returnlst - - def varray_show(self, label): - """Makes REST API call to retrieve varray details based on name.""" - uri = self.varray_query(label) - - (s, h) = common.service_json_request( - self.ipaddr, self.port, "GET", - VirtualArray.URI_VIRTUALARRAY_URI.format(uri), - None) - - o = common.json_decode(s) - if 'inactive' in o and o['inactive'] is True: - return None - else: - return o diff --git a/cinder/volume/drivers/coprhd/helpers/virtualpool.py b/cinder/volume/drivers/coprhd/helpers/virtualpool.py deleted file mode 100644 index f86917f9cd8..00000000000 --- a/cinder/volume/drivers/coprhd/helpers/virtualpool.py +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright (c) 2016 EMC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder.i18n import _ -from cinder.volume.drivers.coprhd.helpers import commoncoprhdapi as common - - -class VirtualPool(common.CoprHDResource): - - URI_VPOOL = "/{0}/vpools" - URI_VPOOL_SHOW = URI_VPOOL + "/{1}" - URI_VPOOL_SEARCH = URI_VPOOL + "/search?name={1}" - - def vpool_show_uri(self, vpooltype, uri): - """Makes REST API call and retrieves vpool details based on UUID. - - This function will take uri as input and returns with - all parameters of VPOOL like label, urn and type. - - :param vpooltype : Type of virtual pool {'block'} - :param uri : unique resource identifier of the vpool - :returns: object containing all the details of vpool - """ - - (s, h) = common.service_json_request( - self.ipaddr, self.port, - "GET", - self.URI_VPOOL_SHOW.format(vpooltype, uri), None) - - o = common.json_decode(s) - if o['inactive']: - return None - - return o - - def vpool_query(self, name, vpooltype): - """Makes REST API call to query the vpool by name and type. - - This function will take the VPOOL name and type of VPOOL - as input and get uri of the first occurrence of given VPOOL. - - :param name: Name of the VPOOL - :param vpooltype: Type of the VPOOL {'block'} - :returns: uri of the given vpool - """ - if common.is_uri(name): - return name - - (s, h) = common.service_json_request( - self.ipaddr, self.port, "GET", - self.URI_VPOOL_SEARCH.format(vpooltype, name), None) - - o = common.json_decode(s) - if len(o['resource']) > 0: - # Get the Active vpool ID. - for vpool in o['resource']: - if self.vpool_show_uri(vpooltype, vpool['id']) is not None: - return vpool['id'] - # Raise not found exception. as we did not find any active vpool. - raise common.CoprHdError(common.CoprHdError.NOT_FOUND_ERR, - (_("VPool %(name)s ( %(vpooltype)s ) :" - " not found") % - {'name': name, - 'vpooltype': vpooltype - })) diff --git a/cinder/volume/drivers/coprhd/helpers/volume.py b/cinder/volume/drivers/coprhd/helpers/volume.py deleted file mode 100644 index e8a6e2d4911..00000000000 --- a/cinder/volume/drivers/coprhd/helpers/volume.py +++ /dev/null @@ -1,517 +0,0 @@ -# Copyright (c) 2016 EMC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import oslo_serialization -from oslo_utils import units -import six - -from cinder.i18n import _ -from cinder.volume.drivers.coprhd.helpers import commoncoprhdapi as common -from cinder.volume.drivers.coprhd.helpers import consistencygroup -from cinder.volume.drivers.coprhd.helpers import project -from cinder.volume.drivers.coprhd.helpers import virtualarray -from cinder.volume.drivers.coprhd.helpers import virtualpool - - -class Volume(common.CoprHDResource): - - # Commonly used URIs for the 'Volume' module - URI_SEARCH_VOLUMES = '/block/volumes/search?project={0}' - URI_SEARCH_VOLUMES_BY_TAG = '/block/volumes/search?tag={0}' - URI_VOLUMES = '/block/volumes' - URI_VOLUME = URI_VOLUMES + '/{0}' - URI_VOLUME_EXPORTS = URI_VOLUME + '/exports' - URI_BULK_DELETE = URI_VOLUMES + '/deactivate' - URI_DEACTIVATE = URI_VOLUME + '/deactivate' - URI_EXPAND = URI_VOLUME + '/expand' - URI_TAG_VOLUME = URI_VOLUME + "/tags" - URI_VOLUME_CHANGE_VPOOL = URI_VOLUMES + "/vpool-change" - - # Protection REST APIs - clone - URI_VOLUME_PROTECTION_FULLCOPIES = ( - '/block/volumes/{0}/protection/full-copies') - URI_SNAPSHOT_PROTECTION_FULLCOPIES = ( - '/block/snapshots/{0}/protection/full-copies') - - URI_VOLUME_CLONE_DETACH = "/block/full-copies/{0}/detach" - - # New CG URIs - URI_CG_CLONE = "/block/consistency-groups/{0}/protection/full-copies" - URI_CG_CLONE_DETACH = ( - "/block/consistency-groups/{0}/protection/full-copies/{1}/detach") - - VOLUMES = 'volumes' - CG = 'consistency-groups' - BLOCK = 'block' - SNAPSHOTS = 'snapshots' - - # Lists volumes in a project - def list_volumes(self, project): - """Makes REST API call to list volumes under a project. - - :param project: name of project - :returns: List of volumes uuids in JSON response payload - """ - - volume_uris = self.search_volumes(project) - volumes = [] - for uri in volume_uris: - volume = self.show_by_uri(uri) - if volume: - volumes.append(volume) - return volumes - - def search_volumes(self, project_name): - - proj = project.Project(self.ipaddr, self.port) - project_uri = proj.project_query(project_name) - - (s, h) = common.service_json_request(self.ipaddr, self.port, - "GET", - Volume.URI_SEARCH_VOLUMES.format( - project_uri), - None) - o = common.json_decode(s) - if not o: - return [] - - volume_uris = [] - resources = common.get_node_value(o, "resource") - for resource in resources: - volume_uris.append(resource["id"]) - return volume_uris - - # Shows volume information given its uri - def show_by_uri(self, uri): - """Makes REST API call and retrieves volume details based on UUID. - - :param uri: UUID of volume - :returns: Volume details in JSON response payload - """ - - (s, h) = common.service_json_request(self.ipaddr, self.port, - "GET", - Volume.URI_VOLUME.format(uri), - None) - o = common.json_decode(s) - inactive = common.get_node_value(o, 'inactive') - if inactive: - return None - return o - - # Creates a volume given label, project, vpool and size - def create(self, project_name, label, size, varray, vpool, - sync, consistencygroup, synctimeout=0): - """Makes REST API call to create volume under a project. - - :param project_name: name of the project under which the volume - will be created - :param label: name of volume - :param size: size of volume - :param varray: name of varray - :param vpool: name of vpool - :param sync: synchronous request - :param consistencygroup: To create volume under a consistencygroup - :param synctimeout: Query for task status for 'synctimeout' secs. - If the task doesn't complete in synctimeout secs, - an exception is thrown - :returns: Created task details in JSON response payload - """ - - proj_obj = project.Project(self.ipaddr, self.port) - project_uri = proj_obj.project_query(project_name) - - vpool_obj = virtualpool.VirtualPool(self.ipaddr, self.port) - vpool_uri = vpool_obj.vpool_query(vpool, "block") - - varray_obj = virtualarray.VirtualArray(self.ipaddr, self.port) - varray_uri = varray_obj.varray_query(varray) - - request = { - 'name': label, - 'size': size, - 'varray': varray_uri, - 'project': project_uri, - 'vpool': vpool_uri, - 'count': 1 - } - if consistencygroup: - request['consistency_group'] = consistencygroup - - body = oslo_serialization.jsonutils.dumps(request) - (s, h) = common.service_json_request(self.ipaddr, self.port, - "POST", - Volume.URI_VOLUMES, - body) - o = common.json_decode(s) - - if sync: - # check task empty - if len(o["task"]) > 0: - task = o["task"][0] - return self.check_for_sync(task, sync, synctimeout) - else: - raise common.CoprHdError( - common.CoprHdError.SOS_FAILURE_ERR, - _("error: task list is empty, no task response found")) - else: - return o - - # Blocks the operation until the task is complete/error out/timeout - def check_for_sync(self, result, sync, synctimeout=0): - if sync: - if len(result["resource"]) > 0: - resource = result["resource"] - return ( - common.block_until_complete("volume", resource["id"], - result["id"], self.ipaddr, - self.port, synctimeout) - ) - else: - raise common.CoprHdError( - common.CoprHdError.SOS_FAILURE_ERR, - _("error: task list is empty, no task response found")) - else: - return result - - # Queries a volume given its name - def volume_query(self, full_project_name, volume_name): - """Makes REST API call to query the volume by name. - - :param volume_name: name of volume - :param full_project_name: Full project path - :returns: Volume details in JSON response payload - """ - if common.is_uri(volume_name): - return volume_name - - if not full_project_name: - raise common.CoprHdError(common.CoprHdError.NOT_FOUND_ERR, - _("Project name not specified")) - uris = self.search_volumes(full_project_name) - for uri in uris: - volume = self.show_by_uri(uri) - if volume and 'name' in volume and volume['name'] == volume_name: - return volume['id'] - raise common.CoprHdError(common.CoprHdError.NOT_FOUND_ERR, - (_("Volume" - "%s: not found") % volume_name)) - - def get_storageAttributes(self, volume_name, cg_name, snapshot_name=None): - storageres_type = None - storageres_typename = None - - if snapshot_name is not None: - storageres_type = Volume.BLOCK - storageres_typename = Volume.SNAPSHOTS - elif volume_name is not None: - storageres_type = Volume.BLOCK - storageres_typename = Volume.VOLUMES - elif cg_name is not None: - storageres_type = Volume.BLOCK - storageres_typename = Volume.CG - else: - storageres_type = None - storageres_typename = None - return (storageres_type, storageres_typename) - - def storage_resource_query(self, - storageres_type, - volume_name, - cg_name, - snapshot_name, - project, - tenant): - resourcepath = "/" + project - if tenant is not None: - resourcepath = tenant + resourcepath - - resUri = None - resourceObj = None - - if Volume.BLOCK == storageres_type and volume_name is not None: - resUri = self.volume_query(resourcepath, volume_name) - if snapshot_name is not None: - from cinder.volume.drivers.coprhd.helpers import snapshot - snapobj = snapshot.Snapshot(self.ipaddr, self.port) - resUri = snapobj.snapshot_query(storageres_type, - Volume.VOLUMES, resUri, - snapshot_name) - - elif Volume.BLOCK == storageres_type and cg_name is not None: - resourceObj = consistencygroup.ConsistencyGroup( - self.ipaddr, self.port) - resUri = resourceObj.consistencygroup_query( - cg_name, - project, - tenant) - else: - resourceObj = None - - return resUri - - # Creates volume(s) from given source volume - def clone(self, new_vol_name, resource_uri, - sync, synctimeout=0): - """Makes REST API call to clone volume. - - :param new_vol_name: name of volume - :param resource_uri: uri of source volume - :param sync: synchronous request - :param synctimeout: Query for task status for 'synctimeout' secs. - If the task doesn't complete in synctimeout secs, - an exception is thrown - :returns: Created task details in JSON response payload - """ - is_snapshot_clone = False - clone_full_uri = None - - # consistency group - if resource_uri.find("BlockConsistencyGroup") > 0: - clone_full_uri = Volume.URI_CG_CLONE.format(resource_uri) - elif resource_uri.find("BlockSnapshot") > 0: - is_snapshot_clone = True - clone_full_uri = ( - Volume.URI_SNAPSHOT_PROTECTION_FULLCOPIES.format(resource_uri)) - else: - clone_full_uri = ( - Volume.URI_VOLUME_PROTECTION_FULLCOPIES.format(resource_uri)) - - request = { - 'name': new_vol_name, - 'type': None, - 'count': 1 - } - - request["count"] = 1 - - body = oslo_serialization.jsonutils.dumps(request) - (s, h) = common.service_json_request(self.ipaddr, self.port, - "POST", - clone_full_uri, - body) - o = common.json_decode(s) - - if sync: - task = o["task"][0] - - if is_snapshot_clone: - return ( - common.block_until_complete( - "block", - task["resource"]["id"], - task["id"], self.ipaddr, self.port) - ) - else: - return self.check_for_sync(task, sync, synctimeout) - else: - return o - - # To check whether a cloned volume is in detachable state or not - def is_volume_detachable(self, full_project_name, name): - - volume_uri = self.volume_query(full_project_name, name) - vol = self.show_by_uri(volume_uri) - # Filtering based on "replicaState" attribute value of Cloned volume. - # If "replicaState" value is "SYNCHRONIZED" then only Cloned volume - # would be in detachable state. - try: - return vol['protection']['full_copies'][ - 'replicaState'] == 'SYNCHRONIZED' - except TypeError: - return False - - def volume_clone_detach(self, resource_uri, full_project_name, - name, sync, synctimeout=0): - - volume_uri = self.volume_query(full_project_name, name) - - # consistency group - if resource_uri.find("BlockConsistencyGroup") > 0: - (s, h) = common.service_json_request( - self.ipaddr, self.port, - "POST", - Volume.URI_CG_CLONE_DETACH.format( - resource_uri, - volume_uri), None) - else: - (s, h) = common.service_json_request( - self.ipaddr, self.port, - "POST", - Volume.URI_VOLUME_CLONE_DETACH.format(volume_uri), None) - - o = common.json_decode(s) - if sync: - task = o["task"][0] - return self.check_for_sync(task, sync, synctimeout) - else: - return o - - # Shows volume information given its name - def show(self, full_project_name, name): - """Retrieves volume details based on volume name. - - :param full_project_name: project path of the volume - :param name: name of the volume. If the volume is under a project, - then full XPath needs to be specified. - Example: If VOL1 is a volume under project PROJ1, - then the name of volume is PROJ1/VOL1 - :returns: Volume details in JSON response payload - """ - if common.is_uri(name): - return name - if full_project_name is None: - raise common.CoprHdError(common.CoprHdError.NOT_FOUND_ERR, - (_("Volume %s : not found") % - six.text_type(name))) - - uris = self.search_volumes(full_project_name) - - for uri in uris: - volume = self.show_by_uri(uri) - if volume and 'name' in volume and volume['name'] == name: - return volume - raise common.CoprHdError(common.CoprHdError.NOT_FOUND_ERR, - (_("Volume" - " %s : not found") % six.text_type(name))) - - def expand(self, full_project_name, volume_name, new_size, - sync=False, synctimeout=0): - - volume_detail = self.show(full_project_name, volume_name) - from decimal import Decimal - new_size_in_gb = Decimal(Decimal(new_size) / (units.Gi)) - current_size = Decimal(volume_detail["provisioned_capacity_gb"]) - if new_size_in_gb <= current_size: - raise common.CoprHdError( - common.CoprHdError.VALUE_ERR, - (_("error: Incorrect value of new size: %(new_size_in_gb)s" - " GB\nNew size must be greater than current size: " - "%(current_size)s GB") % {'new_size_in_gb': new_size_in_gb, - 'current_size': current_size})) - - body = oslo_serialization.jsonutils.dumps({ - "new_size": new_size - }) - - (s, h) = common.service_json_request(self.ipaddr, self.port, - "POST", - Volume.URI_EXPAND.format( - volume_detail["id"]), - body) - if not s: - return None - o = common.json_decode(s) - - if sync: - return self.check_for_sync(o, sync, synctimeout) - return o - - # Deletes a volume given a volume name - def delete(self, full_project_name, name, sync=False, - force_delete=False, coprhdonly=False, synctimeout=0): - """Deletes a volume based on volume name. - - :param full_project_name: project name - :param name: name of volume to be deleted - :param sync: synchronous request - :param force_delete: if true, it will force the delete of internal - volumes that have the SUPPORTS_FORCE flag - :param coprhdonly: to delete volumes from coprHD only - :param synctimeout: Query for task status for 'synctimeout' secs. If - the task doesn't complete in synctimeout secs, - an exception is thrown - """ - volume_uri = self.volume_query(full_project_name, name) - return self.delete_by_uri(volume_uri, sync, force_delete, - coprhdonly, synctimeout) - - # Deletes a volume given a volume uri - def delete_by_uri(self, uri, sync=False, - force_delete=False, coprhdonly=False, synctimeout=0): - """Deletes a volume based on volume uri.""" - params = '' - if force_delete: - params += '&' if ('?' in params) else '?' - params += "force=" + "true" - if coprhdonly is True: - params += '&' if ('?' in params) else '?' - params += "type=" + 'CoprHD_ONLY' - - (s, h) = common.service_json_request(self.ipaddr, self.port, - "POST", - Volume.URI_DEACTIVATE.format( - uri) + params, - None) - if not s: - return None - o = common.json_decode(s) - if sync: - return self.check_for_sync(o, sync, synctimeout) - return o - - # Gets the exports info given a volume uri - def get_exports_by_uri(self, uri): - """Makes REST API call to get exports info of a volume. - - :param uri: URI of the volume - :returns: Exports details in JSON response payload - """ - (s, h) = common.service_json_request(self.ipaddr, self.port, - "GET", - Volume.URI_VOLUME_EXPORTS.format( - uri), - None) - return common.json_decode(s) - - # Update a volume information - # Changed the volume vpool - def update(self, prefix_path, name, vpool): - """Makes REST API call to update a volume information. - - :param name: name of the volume to be updated - :param vpool: name of vpool - :returns: Created task details in JSON response payload - """ - namelist = [] - - if isinstance(name, list): - namelist = name - else: - namelist.append(name) - - volumeurilist = [] - - for item in namelist: - volume_uri = self.volume_query(prefix_path, item) - volumeurilist.append(volume_uri) - - vpool_obj = virtualpool.VirtualPool(self.ipaddr, self.port) - vpool_uri = vpool_obj.vpool_query(vpool, "block") - - params = { - 'vpool': vpool_uri, - 'volumes': volumeurilist - } - - body = oslo_serialization.jsonutils.dumps(params) - - (s, h) = common.service_json_request( - self.ipaddr, self.port, "POST", - Volume.URI_VOLUME_CHANGE_VPOOL, - body) - - o = common.json_decode(s) - return o diff --git a/cinder/volume/drivers/coprhd/iscsi.py b/cinder/volume/drivers/coprhd/iscsi.py deleted file mode 100644 index fe48d4d268c..00000000000 --- a/cinder/volume/drivers/coprhd/iscsi.py +++ /dev/null @@ -1,226 +0,0 @@ -# Copyright (c) 2016 EMC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -"""Driver for EMC CoprHD iSCSI volumes.""" - -from oslo_log import log as logging - -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder.volume import driver -from cinder.volume.drivers.coprhd import common as coprhd_common -from cinder.volume import utils as volume_utils - -LOG = logging.getLogger(__name__) - - -@interface.volumedriver -class EMCCoprHDISCSIDriver(driver.ISCSIDriver): - """CoprHD iSCSI Driver.""" - VERSION = "3.0.0.0" - - # ThirdPartySystems wiki page name - CI_WIKI_NAME = "EMC_CoprHD_CI" - - # TODO(jsbryant) Remove driver in Stein if CI is not fixed - SUPPORTED = False - - def __init__(self, *args, **kwargs): - super(EMCCoprHDISCSIDriver, self).__init__(*args, **kwargs) - self.common = self._get_common_driver() - - def _get_common_driver(self): - return coprhd_common.EMCCoprHDDriverCommon( - protocol='iSCSI', - default_backend_name=self.__class__.__name__, - configuration=self.configuration) - - def check_for_setup_error(self): - self.common.check_for_setup_error() - - def create_volume(self, volume): - """Creates a Volume.""" - self.common.create_volume(volume, self) - self.common.set_volume_tags(volume, ['_obj_volume_type']) - - def create_cloned_volume(self, volume, src_vref): - """Creates a cloned Volume.""" - self.common.create_cloned_volume(volume, src_vref) - self.common.set_volume_tags(volume, ['_obj_volume_type']) - - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from a snapshot.""" - self.common.create_volume_from_snapshot(snapshot, volume) - self.common.set_volume_tags(volume, ['_obj_volume_type']) - - def extend_volume(self, volume, new_size): - """expands the size of the volume.""" - self.common.expand_volume(volume, new_size) - - def delete_volume(self, volume): - """Deletes a volume.""" - self.common.delete_volume(volume) - - def create_snapshot(self, snapshot): - """Creates a snapshot.""" - self.common.create_snapshot(snapshot) - - def delete_snapshot(self, snapshot): - """Deletes a snapshot.""" - self.common.delete_snapshot(snapshot) - - def ensure_export(self, context, volume): - """Driver entry point to get the export info for an existing volume.""" - pass - - def create_export(self, context, volume, connector=None): - """Driver entry point to get the export info for a new volume.""" - pass - - def remove_export(self, context, volume): - """Driver entry point to remove an export for a volume.""" - pass - - def create_group(self, context, group): - """Creates a group.""" - if volume_utils.is_group_a_cg_snapshot_type(group): - return self.common.create_consistencygroup(context, group) - - # If the group is not consistency group snapshot enabled, then - # we shall rely on generic volume group implementation - raise NotImplementedError() - - def create_group_from_src(self, ctxt, group, volumes, - group_snapshot=None, snapshots=None, - source_group=None, source_vols=None): - """Creates a group from source.""" - if volume_utils.is_group_a_cg_snapshot_type(group): - message = _("create group from source is not supported " - "for CoprHD if the group type supports " - "consistent group snapshot.") - raise exception.VolumeBackendAPIException(data=message) - else: - raise NotImplementedError() - - def update_group(self, context, group, add_volumes=None, - remove_volumes=None): - """Updates volumes in group.""" - if volume_utils.is_group_a_cg_snapshot_type(group): - return self.common.update_consistencygroup(group, add_volumes, - remove_volumes) - - # If the group is not consistency group snapshot enabled, then - # we shall rely on generic volume group implementation - raise NotImplementedError() - - def delete_group(self, context, group, volumes): - """Deletes a group.""" - if volume_utils.is_group_a_cg_snapshot_type(group): - return self.common.delete_consistencygroup(context, group, volumes) - - # If the group is not consistency group snapshot enabled, then - # we shall rely on generic volume group implementation - raise NotImplementedError() - - def create_group_snapshot(self, context, group_snapshot, snapshots): - """Creates a group snapshot.""" - if volume_utils.is_group_a_cg_snapshot_type(group_snapshot): - LOG.debug("creating a group snapshot") - return self.common.create_cgsnapshot(group_snapshot, snapshots) - - # If the group is not consistency group snapshot enabled, then - # we shall rely on generic volume group implementation - raise NotImplementedError() - - def delete_group_snapshot(self, context, group_snapshot, snapshots): - """Deletes a group snapshot.""" - if volume_utils.is_group_a_cg_snapshot_type(group_snapshot): - return self.common.delete_cgsnapshot(group_snapshot, snapshots) - - # If the group is not consistency group snapshot enabled, then - # we shall rely on generic volume group implementation - raise NotImplementedError() - - def check_for_export(self, context, volume_id): - """Make sure volume is exported.""" - pass - - def initialize_connection(self, volume, connector): - """Initializes the connection and returns connection info.""" - - initiator_ports = [] - initiator_ports.append(connector['initiator']) - itls = self.common.initialize_connection(volume, - 'iSCSI', - initiator_ports, - connector['host']) - properties = {} - properties['target_discovered'] = False - properties['volume_id'] = volume.id - if itls: - properties['target_iqn'] = itls[0]['target']['port'] - properties['target_portal'] = '%s:%s' % ( - itls[0]['target']['ip_address'], - itls[0]['target']['tcp_port']) - properties['target_lun'] = itls[0]['hlu'] - - auth = None - try: - auth = volume.provider_auth - except AttributeError: - pass - - if auth: - (auth_method, auth_username, auth_secret) = auth.split() - properties['auth_method'] = auth_method - properties['auth_username'] = auth_username - properties['auth_password'] = auth_secret - - LOG.debug("ISCSI properties: %s", properties) - return { - 'driver_volume_type': 'iscsi', - 'data': properties, - } - - def terminate_connection(self, volume, connector, **kwargs): - """Disallow connection from connector.""" - - init_ports = [] - init_ports.append(connector['initiator']) - self.common.terminate_connection(volume, - 'iSCSI', - init_ports, - connector['host']) - - def get_volume_stats(self, refresh=False): - """Get volume status. - - If 'refresh' is True, run update the stats first. - """ - if refresh: - self.update_volume_stats() - - return self._stats - - def update_volume_stats(self): - """Retrieve stats info from virtual pool/virtual array.""" - LOG.debug("Updating volume stats") - self._stats = self.common.update_volume_stats() - - def retype(self, ctxt, volume, new_type, diff, host): - """Change the volume type.""" - return self.common.retype(ctxt, volume, new_type, diff, host) diff --git a/cinder/volume/drivers/coprhd/scaleio.py b/cinder/volume/drivers/coprhd/scaleio.py deleted file mode 100644 index c4ddc3b3171..00000000000 --- a/cinder/volume/drivers/coprhd/scaleio.py +++ /dev/null @@ -1,375 +0,0 @@ -# Copyright (c) 2016 EMC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -"""Driver for EMC CoprHD ScaleIO volumes.""" - -from oslo_config import cfg -from oslo_log import log as logging -import requests -import six -from six.moves import http_client -from six.moves import urllib - -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder.volume import configuration -from cinder.volume import driver -from cinder.volume.drivers.coprhd import common as coprhd_common -from cinder.volume import utils as volume_utils - - -LOG = logging.getLogger(__name__) - -scaleio_opts = [ - cfg.StrOpt('coprhd_scaleio_rest_gateway_host', - default='None', - help='Rest Gateway IP or FQDN for Scaleio'), - cfg.PortOpt('coprhd_scaleio_rest_gateway_port', - default=4984, - help='Rest Gateway Port for Scaleio'), - cfg.StrOpt('coprhd_scaleio_rest_server_username', - default=None, - help='Username for Rest Gateway'), - cfg.StrOpt('coprhd_scaleio_rest_server_password', - default=None, - help='Rest Gateway Password', - secret=True), - cfg.BoolOpt('scaleio_verify_server_certificate', - default=False, - help='verify server certificate'), - cfg.StrOpt('scaleio_server_certificate_path', - default=None, - help='Server certificate path') -] - -CONF = cfg.CONF -CONF.register_opts(scaleio_opts, group=configuration.SHARED_CONF_GROUP) - - -@interface.volumedriver -class EMCCoprHDScaleIODriver(driver.VolumeDriver): - """CoprHD ScaleIO Driver.""" - VERSION = "3.0.0.0" - server_token = None - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "EMC_CoprHD_CI" - - def __init__(self, *args, **kwargs): - super(EMCCoprHDScaleIODriver, self).__init__(*args, **kwargs) - self.configuration.append_config_values(scaleio_opts) - self.common = self._get_common_driver() - - def _get_common_driver(self): - return coprhd_common.EMCCoprHDDriverCommon( - protocol='scaleio', - default_backend_name=self.__class__.__name__, - configuration=self.configuration) - - def check_for_setup_error(self): - self.common.check_for_setup_error() - if (self.configuration.scaleio_verify_server_certificate is True and - self.configuration.scaleio_server_certificate_path is None): - message = _("scaleio_verify_server_certificate is True but" - " scaleio_server_certificate_path is not provided" - " in cinder configuration") - raise exception.VolumeBackendAPIException(data=message) - - def create_volume(self, volume): - """Creates a Volume.""" - self.common.create_volume(volume, self, True) - self.common.set_volume_tags(volume, ['_obj_volume_type'], True) - vol_size = self._update_volume_size(int(volume.size)) - return {'size': vol_size} - - def _update_volume_size(self, vol_size): - """update the openstack volume size.""" - default_size = 8 - if (vol_size % default_size) != 0: - return (vol_size / default_size) * default_size + default_size - else: - return vol_size - - def create_cloned_volume(self, volume, src_vref): - """Creates a cloned Volume.""" - self.common.create_cloned_volume(volume, src_vref, True) - self.common.set_volume_tags(volume, ['_obj_volume_type'], True) - - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from a snapshot.""" - self.common.create_volume_from_snapshot(snapshot, volume, True) - self.common.set_volume_tags(volume, ['_obj_volume_type'], True) - - def extend_volume(self, volume, new_size): - """expands the size of the volume.""" - self.common.expand_volume(volume, new_size) - - def delete_volume(self, volume): - """Deletes an volume.""" - self.common.delete_volume(volume) - - def create_snapshot(self, snapshot): - """Creates a snapshot.""" - self.common.create_snapshot(snapshot, True) - - def delete_snapshot(self, snapshot): - """Deletes a snapshot.""" - self.common.delete_snapshot(snapshot) - - def ensure_export(self, context, volume): - """Driver entry point to get the export info for an existing volume.""" - pass - - def create_export(self, context, volume, connector=None): - """Driver entry point to get the export info for a new volume.""" - pass - - def remove_export(self, context, volume): - """Driver exntry point to remove an export for a volume.""" - pass - - def create_group(self, context, group): - """Creates a group.""" - if volume_utils.is_group_a_cg_snapshot_type(group): - return self.common.create_consistencygroup(context, group, True) - - # If the group is not consistency group snapshot enabled, then - # we shall rely on generic volume group implementation - raise NotImplementedError() - - def update_group(self, context, group, add_volumes=None, - remove_volumes=None): - """Updates volumes in group.""" - if volume_utils.is_group_a_cg_snapshot_type(group): - return self.common.update_consistencygroup(group, add_volumes, - remove_volumes) - - # If the group is not consistency group snapshot enabled, then - # we shall rely on generic volume group implementation - raise NotImplementedError() - - def create_group_from_src(self, ctxt, group, volumes, - group_snapshot=None, snapshots=None, - source_group=None, source_vols=None): - """Creates a group from source.""" - if volume_utils.is_group_a_cg_snapshot_type(group): - message = _("create group from source is not supported " - "for CoprHD if the group type supports " - "consistent group snapshot.") - raise exception.VolumeBackendAPIException(data=message) - else: - raise NotImplementedError() - - def delete_group(self, context, group, volumes): - """Deletes a group.""" - if volume_utils.is_group_a_cg_snapshot_type(group): - return self.common.delete_consistencygroup(context, group, - volumes, True) - - # If the group is not consistency group snapshot enabled, then - # we shall rely on generic volume group implementation - raise NotImplementedError() - - def create_group_snapshot(self, context, group_snapshot, snapshots): - """Creates a group snapshot.""" - if volume_utils.is_group_a_cg_snapshot_type(group_snapshot): - LOG.debug("creating a group snapshot") - return self.common.create_cgsnapshot(group_snapshot, snapshots, - True) - - # If the group is not consistency group snapshot enabled, then - # we shall rely on generic volume group implementation - raise NotImplementedError() - - def delete_group_snapshot(self, context, group_snapshot, snapshots): - """Deletes a group snapshot.""" - if volume_utils.is_group_a_cg_snapshot_type(group_snapshot): - return self.common.delete_cgsnapshot(group_snapshot, snapshots, - True) - - # If the group is not consistency group snapshot enabled, then - # we shall rely on generic volume group implementation - raise NotImplementedError() - - def check_for_export(self, context, volume_id): - """Make sure volume is exported.""" - pass - - def initialize_connection(self, volume, connector): - """Initializes the connection and returns connection info.""" - - volname = self.common._get_resource_name(volume, - coprhd_common.MAX_SIO_LEN, - True) - - properties = {} - properties['scaleIO_volname'] = volname - properties['scaleIO_volume_id'] = volume.provider_id - properties['hostIP'] = connector['ip'] - properties[ - 'serverIP'] = self.configuration.coprhd_scaleio_rest_gateway_host - properties[ - 'serverPort'] = self.configuration.coprhd_scaleio_rest_gateway_port - properties[ - 'serverUsername'] = ( - self.configuration.coprhd_scaleio_rest_server_username) - properties[ - 'serverPassword'] = ( - self.configuration.coprhd_scaleio_rest_server_password) - properties['iopsLimit'] = None - properties['bandwidthLimit'] = None - properties['serverToken'] = self.server_token - - initiator_ports = [] - initiator_port = self._get_client_id(properties['serverIP'], - properties['serverPort'], - properties['serverUsername'], - properties['serverPassword'], - properties['hostIP']) - initiator_ports.append(initiator_port) - - properties['serverToken'] = self.server_token - self.common.initialize_connection(volume, - 'scaleio', - initiator_ports, - connector['host']) - - dictobj = { - 'driver_volume_type': 'scaleio', - 'data': properties, - } - - return dictobj - - def terminate_connection(self, volume, connector, **kwargs): - """Disallow connection from connector.""" - - volname = volume.display_name - properties = {} - properties['scaleIO_volname'] = volname - properties['scaleIO_volume_id'] = volume.provider_id - properties['hostIP'] = connector['ip'] - properties[ - 'serverIP'] = self.configuration.coprhd_scaleio_rest_gateway_host - properties[ - 'serverPort'] = self.configuration.coprhd_scaleio_rest_gateway_port - properties[ - 'serverUsername'] = ( - self.configuration.coprhd_scaleio_rest_server_username) - properties[ - 'serverPassword'] = ( - self.configuration.coprhd_scaleio_rest_server_password) - properties['serverToken'] = self.server_token - - initiator_port = self._get_client_id(properties['serverIP'], - properties['serverPort'], - properties['serverUsername'], - properties['serverPassword'], - properties['hostIP']) - init_ports = [] - init_ports.append(initiator_port) - self.common.terminate_connection(volume, - 'scaleio', - init_ports, - connector['host']) - - def get_volume_stats(self, refresh=False): - """Get volume status. - - If 'refresh' is True, run update the stats first. - """ - if refresh: - self.update_volume_stats() - - return self._stats - - def update_volume_stats(self): - """Retrieve stats info from virtual pool/virtual array.""" - LOG.debug("Updating volume stats") - self._stats = self.common.update_volume_stats() - - def _get_client_id(self, server_ip, server_port, server_username, - server_password, sdc_ip): - ip_encoded = urllib.parse.quote(sdc_ip, '') - ip_double_encoded = urllib.parse.quote(ip_encoded, '') - - request = ("https://%s:%s/api/types/Sdc/instances/getByIp::%s/" % - (server_ip, six.text_type(server_port), ip_double_encoded)) - - LOG.info("ScaleIO get client id by ip request: %s", request) - - if self.configuration.scaleio_verify_server_certificate: - verify_cert = self.configuration.scaleio_server_certificate_path - else: - verify_cert = False - - r = requests.get( - request, auth=(server_username, self.server_token), - verify=verify_cert) - r = self._check_response( - r, request, server_ip, server_port, - server_username, server_password) - - sdc_id = r.json() - if not sdc_id: - msg = (_("Client with ip %s wasn't found ") % sdc_ip) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - if r.status_code != http_client.OK and "errorCode" in sdc_id: - msg = (_("Error getting sdc id from ip %(sdc_ip)s:" - " %(sdc_id_message)s") % {'sdc_ip': sdc_ip, - 'sdc_id_message': sdc_id[ - 'message']}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - LOG.info("ScaleIO sdc id is %s", sdc_id) - return sdc_id - - def _check_response(self, response, request, - server_ip, server_port, - server_username, server_password): - if (response.status_code == http_client.UNAUTHORIZED) or ( - response.status_code == http_client.FORBIDDEN): - LOG.info( - "Token is invalid, going to re-login and get a new one") - - login_request = ("https://%s:%s/api/login" % - (server_ip, six.text_type(server_port))) - if self.configuration.scaleio_verify_server_certificate: - verify_cert = ( - self.configuration.scaleio_server_certificate_path) - else: - verify_cert = False - - r = requests.get( - login_request, auth=(server_username, server_password), - verify=verify_cert) - - token = r.json() - self.server_token = token - # repeat request with valid token - LOG.info("Going to perform request again %s with valid token", - request) - res = requests.get( - request, auth=(server_username, self.server_token), - verify=verify_cert) - return res - return response - - def retype(self, ctxt, volume, new_type, diff, host): - """Change the volume type.""" - return self.common.retype(ctxt, volume, new_type, diff, host) diff --git a/doc/source/configuration/block-storage/drivers/coprhd-driver.rst b/doc/source/configuration/block-storage/drivers/coprhd-driver.rst deleted file mode 100644 index f5230411159..00000000000 --- a/doc/source/configuration/block-storage/drivers/coprhd-driver.rst +++ /dev/null @@ -1,322 +0,0 @@ -===================================== -CoprHD FC, iSCSI, and ScaleIO drivers -===================================== - -CoprHD is an open source software-defined storage controller and API platform. -It enables policy-based management and cloud automation of storage resources -for block, object and file storage providers. -For more details, see `CoprHD `_. - -EMC ViPR Controller is the commercial offering of CoprHD. These same volume -drivers can also be considered as EMC ViPR Controller Block Storage drivers. - - -System requirements -~~~~~~~~~~~~~~~~~~~ - -CoprHD version 3.0 is required. Refer to the CoprHD documentation for -installation and configuration instructions. - -If you are using these drivers to integrate with EMC ViPR Controller, use -EMC ViPR Controller 3.0. - - -Supported operations -~~~~~~~~~~~~~~~~~~~~ - -The following operations are supported: - -- Create, delete, attach, detach, retype, clone, and extend volumes. -- Create, list, and delete volume snapshots. -- Create a volume from a snapshot. -- Copy a volume to an image. -- Copy an image to a volume. -- Clone a volume. -- Extend a volume. -- Retype a volume. -- Get volume statistics. -- Create, delete, and update consistency groups. -- Create and delete consistency group snapshots. - - -Driver options -~~~~~~~~~~~~~~ - -The following table contains the configuration options specific to the -CoprHD volume driver. - -.. config-table:: - :config-target: CoprHD - - cinder.volume.drivers.coprhd.common - cinder.volume.drivers.coprhd.scaleio - - -Preparation -~~~~~~~~~~~ - -This involves setting up the CoprHD environment first and then configuring -the CoprHD Block Storage driver. - -CoprHD ------- - -The CoprHD environment must meet specific configuration requirements to -support the OpenStack Block Storage driver. - -- CoprHD users must be assigned a Tenant Administrator role or a Project - Administrator role for the Project being used. CoprHD roles are configured - by CoprHD Security Administrators. Consult the CoprHD documentation for - details. - -- A CorprHD system administrator must execute the following configurations - using the CoprHD UI, CoprHD API, or CoprHD CLI: - - - Create CoprHD virtual array - - Create CoprHD virtual storage pool - - Virtual Array designated for iSCSI driver must have an IP network created - with appropriate IP storage ports - - Designated tenant for use - - Designated project for use - -.. note:: Use each back end to manage one virtual array and one virtual - storage pool. However, the user can have multiple instances of - CoprHD Block Storage driver, sharing the same virtual array and virtual - storage pool. - -- A typical CoprHD virtual storage pool will have the following values - specified: - - - Storage Type: Block - - Provisioning Type: Thin - - Protocol: iSCSI/Fibre Channel(FC)/ScaleIO - - Multi-Volume Consistency: DISABLED OR ENABLED - - Maximum Native Snapshots: A value greater than 0 allows the OpenStack user - to take Snapshots - - -CoprHD drivers - Single back end --------------------------------- - -**cinder.conf** - -#. Modify ``/etc/cinder/cinder.conf`` by adding the following lines, - substituting values for your environment: - - .. code-block:: ini - - [coprhd-iscsi] - volume_driver = cinder.volume.drivers.coprhd.iscsi.EMCCoprHDISCSIDriver - volume_backend_name = coprhd-iscsi - coprhd_hostname = - coprhd_port = 4443 - coprhd_username = - coprhd_password = - coprhd_tenant = - coprhd_project = - coprhd_varray = - coprhd_emulate_snapshot = True or False, True if the CoprHD vpool has VMAX or VPLEX as the backing storage - -#. If you use the ScaleIO back end, add the following lines: - - .. code-block:: ini - - coprhd_scaleio_rest_gateway_host = - coprhd_scaleio_rest_gateway_port = 443 - coprhd_scaleio_rest_server_username = - coprhd_scaleio_rest_server_password = - scaleio_verify_server_certificate = True or False - scaleio_server_certificate_path = - -#. Specify the driver using the ``enabled_backends`` parameter:: - - enabled_backends = coprhd-iscsi - - .. note:: To utilize the Fibre Channel driver, replace the - ``volume_driver`` line above with:: - - volume_driver = cinder.volume.drivers.coprhd.fc.EMCCoprHDFCDriver - - .. note:: To utilize the ScaleIO driver, replace the ``volume_driver`` line - above with:: - - volume_driver = cinder.volume.drivers.coprhd.fc.EMCCoprHDScaleIODriver - - .. note:: Set ``coprhd_emulate_snapshot`` to True if the CoprHD vpool has - VMAX or VPLEX as the back-end storage. For these type of back-end - storages, when a user tries to create a snapshot, an actual volume - gets created in the back end. - -#. Modify the ``rpc_response_timeout`` value in ``/etc/cinder/cinder.conf`` to - at least 5 minutes. If this entry does not already exist within the - ``cinder.conf`` file, add it in the ``[DEFAULT]`` section: - - .. code-block:: ini - - [DEFAULT] - # ... - rpc_response_timeout = 300 - -#. Now, restart the ``cinder-volume`` service. - -**Volume type creation and extra specs** - -#. Create OpenStack volume types: - - .. code-block:: console - - $ openstack volume type create - -#. Map the OpenStack volume type to the CoprHD virtual pool: - - .. code-block:: console - - $ openstack volume type set --property CoprHD:VPOOL= - -#. Map the volume type created to appropriate back-end driver: - - .. code-block:: console - - $ openstack volume type set --property volume_backend_name= - - -CoprHD drivers - Multiple back-ends ------------------------------------ - -**cinder.conf** - -#. Add or modify the following entries if you are planning to use multiple - back-end drivers: - - .. code-block:: ini - - enabled_backends = coprhddriver-iscsi,coprhddriver-fc,coprhddriver-scaleio - -#. Add the following at the end of the file: - - .. code-block:: ini - - [coprhddriver-iscsi] - volume_driver = cinder.volume.drivers.coprhd.iscsi.EMCCoprHDISCSIDriver - volume_backend_name = EMCCoprHDISCSIDriver - coprhd_hostname = - coprhd_port = 4443 - coprhd_username = - coprhd_password = - coprhd_tenant = - coprhd_project = - coprhd_varray = - - - [coprhddriver-fc] - volume_driver = cinder.volume.drivers.coprhd.fc.EMCCoprHDFCDriver - volume_backend_name = EMCCoprHDFCDriver - coprhd_hostname = - coprhd_port = 4443 - coprhd_username = - coprhd_password = - coprhd_tenant = - coprhd_project = - coprhd_varray = - - - [coprhddriver-scaleio] - volume_driver = cinder.volume.drivers.coprhd.scaleio.EMCCoprHDScaleIODriver - volume_backend_name = EMCCoprHDScaleIODriver - coprhd_hostname = - coprhd_port = 4443 - coprhd_username = - coprhd_password = - coprhd_tenant = - coprhd_project = - coprhd_varray = - coprhd_scaleio_rest_gateway_host = - coprhd_scaleio_rest_gateway_port = 443 - coprhd_scaleio_rest_server_username = - coprhd_scaleio_rest_server_password = - scaleio_verify_server_certificate = True or False - scaleio_server_certificate_path = - - -#. Restart the ``cinder-volume`` service. - - -**Volume type creation and extra specs** - -Setup the ``volume-types`` and ``volume-type`` to ``volume-backend`` -association: - -.. code-block:: console - - $ openstack volume type create "CoprHD High Performance ISCSI" - $ openstack volume type set "CoprHD High Performance ISCSI" --property CoprHD:VPOOL="High Performance ISCSI" - $ openstack volume type set "CoprHD High Performance ISCSI" --property volume_backend_name= EMCCoprHDISCSIDriver - - $ openstack volume type create "CoprHD High Performance FC" - $ openstack volume type set "CoprHD High Performance FC" --property CoprHD:VPOOL="High Performance FC" - $ openstack volume type set "CoprHD High Performance FC" --property volume_backend_name= EMCCoprHDFCDriver - - $ openstack volume type create "CoprHD performance SIO" - $ openstack volume type set "CoprHD performance SIO" --property CoprHD:VPOOL="Scaled Perf" - $ openstack volume type set "CoprHD performance SIO" --property volume_backend_name= EMCCoprHDScaleIODriver - - -ISCSI driver notes -~~~~~~~~~~~~~~~~~~ - -* The compute host must be added to the CoprHD along with its ISCSI - initiator. -* The ISCSI initiator must be associated with IP network on the CoprHD. - - -FC driver notes -~~~~~~~~~~~~~~~ - -* The compute host must be attached to a VSAN or fabric discovered - by CoprHD. -* There is no need to perform any SAN zoning operations. CoprHD will perform - the necessary operations automatically as part of the provisioning process. - - -ScaleIO driver notes -~~~~~~~~~~~~~~~~~~~~ - -* Install the ScaleIO SDC on the compute host. -* The compute host must be added as the SDC to the ScaleIO MDS - using the below commands:: - - /opt/emc/scaleio/sdc/bin/drv_cfg --add_mdm --ip List of MDM IPs - (starting with primary MDM and separated by comma) - Example: - /opt/emc/scaleio/sdc/bin/drv_cfg --add_mdm --ip - 10.247.78.45,10.247.78.46,10.247.78.47 - -This step has to be repeated whenever the SDC (compute host in this case) -is rebooted. - - -Consistency group configuration -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To enable the support of consistency group and consistency group snapshot -operations, use a text editor to edit the file ``/etc/cinder/policy.json`` and -change the values of the below fields as specified. Upon editing the file, -restart the ``c-api`` service:: - - "consistencygroup:create" : "", - "consistencygroup:delete": "", - "consistencygroup:get": "", - "consistencygroup:get_all": "", - "consistencygroup:update": "", - "consistencygroup:create_cgsnapshot" : "", - "consistencygroup:delete_cgsnapshot": "", - "consistencygroup:get_cgsnapshot": "", - "consistencygroup:get_all_cgsnapshots": "", - - -Names of resources in back-end storage -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -All the resources like volume, consistency group, snapshot, and consistency -group snapshot will use the display name in OpenStack for naming in the -back-end storage. diff --git a/doc/source/configuration/block-storage/volume-drivers.rst b/doc/source/configuration/block-storage/volume-drivers.rst index 52d638eb682..259f69f2ce1 100644 --- a/doc/source/configuration/block-storage/volume-drivers.rst +++ b/doc/source/configuration/block-storage/volume-drivers.rst @@ -31,7 +31,6 @@ Driver Configuration Reference drivers/lvm-volume-driver drivers/nfs-volume-driver drivers/sheepdog-driver - drivers/coprhd-driver drivers/datacore-volume-driver drivers/datera-volume-driver drivers/dell-equallogic-driver diff --git a/doc/source/reference/support-matrix.ini b/doc/source/reference/support-matrix.ini index 3b6977ccac0..df028eefed1 100644 --- a/doc/source/reference/support-matrix.ini +++ b/doc/source/reference/support-matrix.ini @@ -15,9 +15,6 @@ ##################################################################### # Drivers: -[driver.coprhd] -title=CoprHD Storage Driver (FC, iSCSI, ScaleIO) - [driver.datacore] title=DataCore Storage Driver (FC, iSCSI) @@ -211,7 +208,6 @@ notes=A vendor driver is considered supported if the vendor is accurate results. If a vendor doesn't meet this requirement the driver is marked unsupported and is removed if the problem isn't resolved before the end of the subsequent release. -driver.coprhd=missing driver.datacore=missing driver.datera=complete driver.dell_emc_powermax=complete @@ -279,7 +275,6 @@ title=Extend an Attached Volume status=optional notes=Cinder supports the ability to extend a volume that is attached to an instance, but not all drivers are able to do this. -driver.coprhd=complete driver.datacore=complete driver.datera=complete driver.dell_emc_powermax=complete @@ -347,7 +342,6 @@ title=Snapshot Attachment status=optional notes=This is the ability to directly attach a snapshot to an instance like a volume. -driver.coprhd=missing driver.datacore=missing driver.datera=missing driver.dell_emc_powermax=missing @@ -416,7 +410,6 @@ status=optional notes=Vendor drivers that support Quality of Service (QoS) are able to utilize QoS Specs associated with volume extra specs to control QoS settings on a per volume basis. -driver.coprhd=missing driver.datacore=missing driver.datera=complete driver.dell_emc_powermax=complete @@ -486,7 +479,6 @@ notes=Vendor drivers that support volume replication can report this capability to be utilized by the scheduler allowing users to request replicated volumes via extra specs. Such drivers are also then able to take advantage of Cinder's failover and failback commands. -driver.coprhd=missing driver.datacore=missing driver.datera=missing driver.dell_emc_powermax=complete @@ -557,7 +549,6 @@ notes=Vendor drivers that support consistency groups are able to deletion. Grouping the volumes ensures that operations are only completed on the group of volumes, not individually, enabling the creation of consistent snapshots across a group. -driver.coprhd=complete driver.datacore=missing driver.datera=missing driver.dell_emc_powermax=complete @@ -627,7 +618,6 @@ notes=If a volume driver supports thin provisioning it means that it will allow the scheduler to provision more storage space than physically exists on the backend. This may also be called 'oversubscription'. -driver.coprhd=missing driver.datacore=missing driver.datera=missing driver.dell_emc_powermax=complete @@ -698,7 +688,6 @@ notes=Storage assisted volume migration is like host assisted volume assistance of the Cinder host. Vendor drivers that implement this can migrate volumes completely through the storage backend's functionality. -driver.coprhd=missing driver.datacore=missing driver.datera=missing driver.dell_emc_powermax=complete @@ -769,7 +758,6 @@ notes=Vendor drivers that report multi-attach support are able It is important to note that a clustered file system that supports multi-attach functionality is required to use multi- attach functionality otherwise data corruption may occur. -driver.coprhd=missing driver.datacore=missing driver.datera=missing driver.dell_emc_powermax=complete diff --git a/doc/source/reference/support-matrix.rst b/doc/source/reference/support-matrix.rst index 13d629da261..53f315882bb 100644 --- a/doc/source/reference/support-matrix.rst +++ b/doc/source/reference/support-matrix.rst @@ -55,3 +55,13 @@ matrix we include the list of required functions here for reference. .. support_matrix:: support-matrix.ini +Driver Removal History +~~~~~~~~~~~~~~~~~~~~~~ + +The section will be used to track driver removal starting from the Rocky +release. + +* Rocky + * CoprHD Storage Driver (FC, iSCSI, ScaleIO) + + diff --git a/releasenotes/notes/coprhd-remove-the-driver-00ef2c41f4c7dccd.yaml b/releasenotes/notes/coprhd-remove-the-driver-00ef2c41f4c7dccd.yaml new file mode 100644 index 00000000000..916b4543084 --- /dev/null +++ b/releasenotes/notes/coprhd-remove-the-driver-00ef2c41f4c7dccd.yaml @@ -0,0 +1,12 @@ +--- +upgrade: + - | + With removal of the CoprHD Volume Driver any volumes being used by Cinder + within a CoprHD backend should be migrated to a supported storage + backend before upgrade. +other: + - | + After being marked unsupported in the Rocky release the CoprHD + driver is now being removed in Stein. The vendor has + indicated that this is desired as the CoprHD driver has been + deprecated.