diff --git a/cinder/opts.py b/cinder/opts.py index 91da0da7b84..d87bc0ccf84 100644 --- a/cinder/opts.py +++ b/cinder/opts.py @@ -131,6 +131,8 @@ from cinder.volume.drivers.nexenta import options as \ cinder_volume_drivers_nexenta_options from cinder.volume.drivers import nfs as cinder_volume_drivers_nfs from cinder.volume.drivers import nimble as cinder_volume_drivers_nimble +from cinder.volume.drivers.prophetstor import options as \ + cinder_volume_drivers_prophetstor_options from cinder.volume.drivers import pure as cinder_volume_drivers_pure from cinder.volume.drivers import qnap as cinder_volume_drivers_qnap from cinder.volume.drivers import quobyte as cinder_volume_drivers_quobyte @@ -334,6 +336,7 @@ def list_opts(): cinder_volume_drivers_nexenta_options.NEXENTA_EDGE_OPTS, cinder_volume_drivers_nfs.nfs_opts, cinder_volume_drivers_nimble.nimble_opts, + cinder_volume_drivers_prophetstor_options.DPL_OPTS, cinder_volume_drivers_pure.PURE_OPTS, cinder_volume_drivers_qnap.qnap_opts, cinder_volume_drivers_quobyte.volume_opts, diff --git a/cinder/tests/unit/volume/drivers/test_prophetstor_dpl.py b/cinder/tests/unit/volume/drivers/test_prophetstor_dpl.py new file mode 100644 index 00000000000..f8eeeaa1b1e --- /dev/null +++ b/cinder/tests/unit/volume/drivers/test_prophetstor_dpl.py @@ -0,0 +1,931 @@ +# Copyright (c) 2014 ProphetStor, Inc. +# All Rights Reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import errno +import re + +import mock +from oslo_utils import units +from six.moves import http_client + +from cinder import context +from cinder import exception +from cinder.objects import fields +from cinder import test +from cinder.tests.unit import fake_constants +from cinder.tests.unit import fake_snapshot +from cinder.tests.unit import utils as test_utils +from cinder.volume import configuration as conf +from cinder.volume.drivers.prophetstor import dpl_iscsi as DPLDRIVER +from cinder.volume.drivers.prophetstor import dplcommon as DPLCOMMON +from cinder.volume import group_types + +POOLUUID = 'ac33fc6e417440d5a1ef27d7231e1cc4' +VOLUMEUUID = 'a000000000000000000000000000001' +INITIATOR = 'iqn.2013-08.org.debian:01:aaaaaaaa' +DATA_IN_CONNECTOR = {'initiator': INITIATOR} +DATA_SERVER_INFO = 0, { + 'metadata': {'vendor': 'ProphetStor', + 'version': '1.5'}} + +DATA_POOLS = 0, { + 'children': [POOLUUID] +} + +DATA_POOLINFO = 0, { + 'capabilitiesURI': '', + 'children': [], + 'childrenrange': '', + 'completionStatus': 'Complete', + 'metadata': {'available_capacity': 4294967296, + 'ctime': 1390551362349, + 'vendor': 'prophetstor', + 'version': '1.5', + 'display_description': 'Default Pool', + 'display_name': 'default_pool', + 'event_uuid': '4f7c4d679a664857afa4d51f282a516a', + 'physical_device': {'cache': [], + 'data': ['disk_uuid_0', + 'disk_uuid_1', + 'disk_uuid_2'], + 'log': [], + 'spare': []}, + 'pool_uuid': POOLUUID, + 'properties': {'raid_level': 'raid0'}, + 'state': 'Online', + 'used_capacity': 0, + 'total_capacity': 4294967296, + 'zpool_guid': '8173612007304181810'}, + 'objectType': 'application/cdmi-container', + 'percentComplete': 100} + +DATA_ASSIGNVDEV = 0, { + 'children': [], + 'childrenrange': '', + 'completionStatus': 'Complete', + 'domainURI': '', + 'exports': {'Network/iSCSI': [ + {'logical_unit_name': '', + 'logical_unit_number': '101', + 'permissions': [INITIATOR], + 'portals': ['172.31.1.210:3260'], + 'target_identifier': + 'iqn.2013-09.com.prophetstor:hypervisor.886423051816' + }]}, + 'metadata': {'ctime': 0, + 'event_uuid': 'c11e90287e9348d0b4889695f1ec4be5', + 'type': 'volume'}, + 'objectID': '', + 'objectName': 'd827e23d403f4f12bb208a6fec208fd8', + 'objectType': 'application/cdmi-container', + 'parentID': '8daa374670af447e8efea27e16bf84cd', + 'parentURI': '/dpl_volume', + 'snapshots': [] +} + +DATA_OUTPUT = 0, None + +MOD_OUTPUT = {'status': 'available'} + +DATA_IN_GROUP = {'id': 'fe2dbc51-5810-451d-ab2f-8c8a48d15bee', + 'name': 'group123', + 'description': 'des123', + 'status': ''} + +DATA_IN_VOLUME = {'id': 'c11e902-87e9-348d-0b48-89695f1ec4be5', + 'display_name': 'abc123', + 'display_description': '', + 'size': 10, + 'host': "hostname@backend#%s" % POOLUUID} + +DATA_IN_VOLUME_VG = {'id': 'fe2dbc5-1581-0451-dab2-f8c8a48d15bee', + 'display_name': 'abc123', + 'display_description': '', + 'size': 10, + 'group_id': + 'fe2dbc51-5810-451d-ab2f-8c8a48d15bee', + 'status': 'available', + 'host': "hostname@backend#%s" % POOLUUID} + +DATA_IN_REMOVE_VOLUME_VG = { + 'id': 'fe2dbc515810451dab2f8c8a48d15bee', + 'display_name': 'fe2dbc515810451dab2f8c8a48d15bee', + 'display_description': '', + 'size': 10, + 'group_id': 'fe2dbc51-5810-451d-ab2f-8c8a48d15bee', + 'status': 'available', + 'host': "hostname@backend#%s" % POOLUUID} + +DATA_IN_VOLUME1 = {'id': 'c11e902-87e9-348d-0b48-89695f1ec4bef', + 'display_name': 'abc456', + 'display_description': '', + 'size': 10, + 'host': "hostname@backend#%s" % POOLUUID} + +DATA_IN_CG_SNAPSHOT = { + 'group_id': 'fe2dbc51-5810-451d-ab2f-8c8a48d15bee', + 'id': 'cgsnapshot1', + 'name': 'cgsnapshot1', + 'description': 'cgsnapshot1', + 'status': ''} + +DATA_IN_SNAPSHOT = {'id': 'fe2dbc5-1581-0451-dab2-f8c8a48d15bee', + 'volume_id': 'c11e902-87e9-348d-0b48-89695f1ec4be5', + 'display_name': 'snapshot1', + 'display_description': '', + 'volume_size': 5} + +DATA_OUT_SNAPSHOT_CG = { + 'id': 'snapshot1', + 'volume_id': 'c11e902-87e9-348d-0b48-89695f1ec4be5', + 'display_name': 'snapshot1', + 'display_description': '', + 'group_snapshot_id': 'fe2dbc51-5810-451d-ab2f-8c8a48d15bee'} + +DATA_OUT_CG = { + "objectType": "application/cdmi-container", + "objectID": "fe2dbc515810451dab2f8c8a48d15bee", + "objectName": "", + "parentURI": "/dpl_volgroup", + "parentID": "fe2dbc515810451dab2f8c8a48d15bee", + "domainURI": "", + "capabilitiesURI": "", + "completionStatus": "Complete", + "percentComplete": 100, + "metadata": + { + "type": "volume|snapshot|replica", + "volume_group_uuid": "", + "origin_uuid": "", + "snapshot_uuid": "", + "display_name": "", + "display_description": "", + "ctime": 12345678, + "total_capacity": 1024, + "snapshot_used_capacity": 0, + "maximum_snapshot": 1024, + "snapshot_quota": 0, + "state": "", + "properties": + { + "snapshot_rotation": True, + } + }, + "childrenrange": "", + "children": + [ + 'fe2dbc515810451dab2f8c8a48d15bee', + ], +} + + +class TestProphetStorDPLVolume(test.TestCase): + + def _gen_snapshot_url(self, vdevid, snapshotid): + snapshot_url = '/%s/%s/%s' % (vdevid, DPLCOMMON.DPL_OBJ_SNAPSHOT, + snapshotid) + return snapshot_url + + def setUp(self): + super(TestProphetStorDPLVolume, self).setUp() + self.dplcmd = DPLCOMMON.DPLVolume('1.1.1.1', 8356, 'admin', 'password') + self.DPL_MOCK = mock.MagicMock() + self.dplcmd.objCmd = self.DPL_MOCK + self.DPL_MOCK.send_cmd.return_value = DATA_OUTPUT + + def test_getserverinfo(self): + self.dplcmd.get_server_info() + self.DPL_MOCK.send_cmd.assert_called_once_with( + 'GET', + '/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_SYSTEM), + None, + [http_client.OK, http_client.ACCEPTED]) + + def test_createvdev(self): + self.dplcmd.create_vdev(DATA_IN_VOLUME['id'], + DATA_IN_VOLUME['display_name'], + DATA_IN_VOLUME['display_description'], + POOLUUID, + int(DATA_IN_VOLUME['size']) * units.Gi) + + metadata = {} + metadata['display_name'] = DATA_IN_VOLUME['display_name'] + metadata['display_description'] = DATA_IN_VOLUME['display_description'] + metadata['pool_uuid'] = POOLUUID + metadata['total_capacity'] = int(DATA_IN_VOLUME['size']) * units.Gi + metadata['maximum_snapshot'] = 1024 + metadata['properties'] = dict(thin_provision=True) + params = {} + params['metadata'] = metadata + self.DPL_MOCK.send_cmd.assert_called_once_with( + 'PUT', + '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME, + DATA_IN_VOLUME['id']), + params, + [http_client.OK, http_client.ACCEPTED, http_client.CREATED]) + + def test_extendvdev(self): + self.dplcmd.extend_vdev(DATA_IN_VOLUME['id'], + DATA_IN_VOLUME['display_name'], + DATA_IN_VOLUME['display_description'], + int(DATA_IN_VOLUME['size']) * units.Gi) + metadata = {} + metadata['display_name'] = DATA_IN_VOLUME['display_name'] + metadata['display_description'] = DATA_IN_VOLUME['display_description'] + metadata['total_capacity'] = int(DATA_IN_VOLUME['size']) * units.Gi + metadata['maximum_snapshot'] = 1024 + params = {} + params['metadata'] = metadata + self.DPL_MOCK.send_cmd.assert_called_once_with( + 'PUT', + '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME, + DATA_IN_VOLUME['id']), + params, + [http_client.OK, http_client.ACCEPTED, http_client.CREATED]) + + def test_deletevdev(self): + self.dplcmd.delete_vdev(DATA_IN_VOLUME['id'], True) + metadata = {} + params = {} + metadata['force'] = True + params['metadata'] = metadata + self.DPL_MOCK.send_cmd.assert_called_once_with( + 'DELETE', + '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME, + DATA_IN_VOLUME['id']), + params, + [http_client.OK, http_client.ACCEPTED, http_client.NOT_FOUND, + http_client.NO_CONTENT]) + + def test_createvdevfromsnapshot(self): + self.dplcmd.create_vdev_from_snapshot( + DATA_IN_VOLUME['id'], + DATA_IN_VOLUME['display_name'], + DATA_IN_VOLUME['display_description'], + DATA_IN_SNAPSHOT['id'], + POOLUUID) + metadata = {} + params = {} + metadata['snapshot_operation'] = 'copy' + metadata['display_name'] = DATA_IN_VOLUME['display_name'] + metadata['display_description'] = DATA_IN_VOLUME['display_description'] + metadata['pool_uuid'] = POOLUUID + metadata['maximum_snapshot'] = 1024 + metadata['properties'] = dict(thin_provision=True) + params['metadata'] = metadata + params['copy'] = self._gen_snapshot_url(DATA_IN_VOLUME['id'], + DATA_IN_SNAPSHOT['id']) + self.DPL_MOCK.send_cmd.assert_called_once_with( + 'PUT', + '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME, + DATA_IN_VOLUME['id']), + params, + [http_client.OK, http_client.ACCEPTED, http_client.CREATED]) + + def test_getpool(self): + self.dplcmd.get_pool(POOLUUID) + self.DPL_MOCK.send_cmd.assert_called_once_with( + 'GET', + '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_POOL, + POOLUUID), + None, + [http_client.OK, http_client.ACCEPTED]) + + def test_clonevdev(self): + self.dplcmd.clone_vdev( + DATA_IN_VOLUME['id'], + DATA_IN_VOLUME1['id'], + POOLUUID, + DATA_IN_VOLUME['display_name'], + DATA_IN_VOLUME['display_description'], + int(DATA_IN_VOLUME['size']) * units.Gi + ) + metadata = {} + params = {} + metadata["snapshot_operation"] = "clone" + metadata["display_name"] = DATA_IN_VOLUME['display_name'] + metadata["display_description"] = DATA_IN_VOLUME['display_description'] + metadata["pool_uuid"] = POOLUUID + metadata["total_capacity"] = int(DATA_IN_VOLUME['size']) * units.Gi + metadata['maximum_snapshot'] = 1024 + metadata['properties'] = dict(thin_provision=True) + params["metadata"] = metadata + params["copy"] = DATA_IN_VOLUME['id'] + + self.DPL_MOCK.send_cmd.assert_called_once_with( + 'PUT', + '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME, + DATA_IN_VOLUME1['id']), + params, + [http_client.OK, http_client.CREATED, http_client.ACCEPTED]) + + def test_createvdevsnapshot(self): + self.dplcmd.create_vdev_snapshot( + DATA_IN_VOLUME['id'], + DATA_IN_SNAPSHOT['id'], + DATA_IN_SNAPSHOT['display_name'], + DATA_IN_SNAPSHOT['display_description'] + ) + metadata = {} + params = {} + metadata['display_name'] = DATA_IN_SNAPSHOT['display_name'] + metadata['display_description'] = ( + DATA_IN_SNAPSHOT['display_description']) + params['metadata'] = metadata + params['snapshot'] = DATA_IN_SNAPSHOT['id'] + + self.DPL_MOCK.send_cmd.assert_called_once_with( + 'PUT', + '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME, + DATA_IN_VOLUME['id']), + params, + [http_client.OK, http_client.CREATED, http_client.ACCEPTED]) + + def test_getvdev(self): + self.dplcmd.get_vdev(DATA_IN_VOLUME['id']) + self.DPL_MOCK.send_cmd.assert_called_once_with( + 'GET', + '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME, + DATA_IN_VOLUME['id']), + None, + [http_client.OK, http_client.ACCEPTED, http_client.NOT_FOUND]) + + def test_getvdevstatus(self): + self.dplcmd.get_vdev_status(DATA_IN_VOLUME['id'], '123456') + self.DPL_MOCK.send_cmd.assert_called_once_with( + 'GET', + '/%s/%s/%s/?event_uuid=%s' % (DPLCOMMON.DPL_VER_V1, + DPLCOMMON.DPL_OBJ_VOLUME, + DATA_IN_VOLUME['id'], + '123456'), + None, + [http_client.OK, http_client.NOT_FOUND]) + + def test_getpoolstatus(self): + self.dplcmd.get_pool_status(POOLUUID, '123456') + self.DPL_MOCK.send_cmd.assert_called_once_with( + 'GET', + '/%s/%s/%s/?event_uuid=%s' % (DPLCOMMON.DPL_VER_V1, + DPLCOMMON.DPL_OBJ_POOL, + POOLUUID, + '123456'), + None, + [http_client.OK, http_client.NOT_FOUND]) + + def test_assignvdev(self): + self.dplcmd.assign_vdev( + DATA_IN_VOLUME['id'], + 'iqn.1993-08.org.debian:01:test1', + '', + '1.1.1.1:3260', + 0 + ) + params = {} + metadata = {} + exports = {} + metadata['export_operation'] = 'assign' + exports['Network/iSCSI'] = {} + target_info = {} + target_info['logical_unit_number'] = 0 + target_info['logical_unit_name'] = '' + permissions = [] + portals = [] + portals.append('1.1.1.1:3260') + permissions.append('iqn.1993-08.org.debian:01:test1') + target_info['permissions'] = permissions + target_info['portals'] = portals + exports['Network/iSCSI'] = target_info + + params['metadata'] = metadata + params['exports'] = exports + self.DPL_MOCK.send_cmd.assert_called_once_with( + 'PUT', + '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, + DPLCOMMON.DPL_OBJ_VOLUME, + DATA_IN_VOLUME['id']), + params, + [http_client.OK, http_client.ACCEPTED, http_client.CREATED]) + + def test_unassignvdev(self): + self.dplcmd.unassign_vdev(DATA_IN_VOLUME['id'], + 'iqn.1993-08.org.debian:01:test1', + '') + params = {} + metadata = {} + exports = {} + metadata['export_operation'] = 'unassign' + params['metadata'] = metadata + + exports['Network/iSCSI'] = {} + exports['Network/iSCSI']['target_identifier'] = '' + permissions = [] + permissions.append('iqn.1993-08.org.debian:01:test1') + exports['Network/iSCSI']['permissions'] = permissions + + params['exports'] = exports + self.DPL_MOCK.send_cmd.assert_called_once_with( + 'PUT', + '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, + DPLCOMMON.DPL_OBJ_VOLUME, + DATA_IN_VOLUME['id']), + params, + [http_client.OK, http_client.ACCEPTED, + http_client.NO_CONTENT, http_client.NOT_FOUND]) + + def test_deletevdevsnapshot(self): + self.dplcmd.delete_vdev_snapshot(DATA_IN_VOLUME['id'], + DATA_IN_SNAPSHOT['id']) + params = {} + params['copy'] = self._gen_snapshot_url(DATA_IN_VOLUME['id'], + DATA_IN_SNAPSHOT['id']) + self.DPL_MOCK.send_cmd.assert_called_once_with( + 'DELETE', + '/%s/%s/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, + DPLCOMMON.DPL_OBJ_VOLUME, + DATA_IN_VOLUME['id'], + DPLCOMMON.DPL_OBJ_SNAPSHOT, + DATA_IN_SNAPSHOT['id']), + None, + [http_client.OK, http_client.ACCEPTED, http_client.NO_CONTENT, + http_client.NOT_FOUND]) + + def test_listvdevsnapshots(self): + self.dplcmd.list_vdev_snapshots(DATA_IN_VOLUME['id']) + self.DPL_MOCK.send_cmd.assert_called_once_with( + 'GET', + '/%s/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, + DPLCOMMON.DPL_OBJ_VOLUME, + DATA_IN_VOLUME['id'], + DPLCOMMON.DPL_OBJ_SNAPSHOT), + None, + [http_client.OK]) + + +class TestProphetStorDPLDriver(test.TestCase): + + def __init__(self, method): + super(TestProphetStorDPLDriver, self).__init__(method) + + def _conver_uuid2hex(self, strID): + return strID.replace('-', '') + + def setUp(self): + super(TestProphetStorDPLDriver, self).setUp() + self.configuration = mock.Mock(conf.Configuration) + self.configuration.san_ip = '1.1.1.1' + self.configuration.dpl_port = 8356 + self.configuration.san_login = 'admin' + self.configuration.san_password = 'password' + self.configuration.dpl_pool = POOLUUID + self.configuration.target_port = 3260 + self.configuration.san_is_local = False + self.configuration.san_thin_provision = True + self.configuration.driver_ssl_cert_verify = False + self.configuration.driver_ssl_cert_path = None + self.context = context.get_admin_context() + self.DPL_MOCK = mock.MagicMock() + self.DB_MOCK = mock.MagicMock() + self.dpldriver = DPLDRIVER.DPLISCSIDriver( + configuration=self.configuration) + self.dpldriver.dpl = self.DPL_MOCK + self.dpldriver.db = self.DB_MOCK + self.dpldriver.do_setup(self.context) + + def test_get_volume_stats(self): + self.DPL_MOCK.get_pool.return_value = DATA_POOLINFO + self.DPL_MOCK.get_server_info.return_value = DATA_SERVER_INFO + res = self.dpldriver.get_volume_stats(True) + self.assertEqual('ProphetStor', res['vendor_name']) + self.assertEqual('1.5', res['driver_version']) + pool = res["pools"][0] + self.assertEqual(4, pool['total_capacity_gb']) + self.assertEqual(4, pool['free_capacity_gb']) + self.assertEqual(0, pool['reserved_percentage']) + self.assertFalse(pool['QoS_support']) + + def test_create_volume(self): + volume = test_utils.create_volume( + self.context, + id=DATA_IN_VOLUME['id'], + display_name=DATA_IN_VOLUME['display_name'], + size=DATA_IN_VOLUME['size'], + host=DATA_IN_VOLUME['host']) + self.DPL_MOCK.create_vdev.return_value = DATA_OUTPUT + self.dpldriver.create_volume(volume) + self.DPL_MOCK.create_vdev.assert_called_once_with( + self._conver_uuid2hex(volume.id), + volume.display_name, + volume.display_description, + self.configuration.dpl_pool, + int(volume.size) * units.Gi, + True) + + def test_create_volume_without_pool(self): + volume = test_utils.create_volume( + self.context, + id=DATA_IN_VOLUME['id'], + display_name=DATA_IN_VOLUME['display_name'], + size=DATA_IN_VOLUME['size'], + host=DATA_IN_VOLUME['host']) + self.DPL_MOCK.create_vdev.return_value = DATA_OUTPUT + self.configuration.dpl_pool = "" + volume.host = "host@backend" # missing pool + self.assertRaises(exception.InvalidHost, self.dpldriver.create_volume, + volume=volume) + + def test_create_volume_with_configuration_pool(self): + volume = test_utils.create_volume( + self.context, + id=DATA_IN_VOLUME['id'], + display_name=DATA_IN_VOLUME['display_name'], + size=DATA_IN_VOLUME['size'], + host="host@backend") + self.DPL_MOCK.create_vdev.return_value = DATA_OUTPUT + self.dpldriver.create_volume(volume) + self.DPL_MOCK.create_vdev.assert_called_once_with( + self._conver_uuid2hex(volume.id), + volume.display_name, volume.display_description, + self.configuration.dpl_pool, int(volume.size) * units.Gi, True) + + def test_create_volume_of_group(self): + group_type = group_types.create( + self.context, + 'group', + {'consistent_group_snapshot_enabled': ' True'} + ) + group = test_utils.create_group( + self.context, + id=fake_constants.CONSISTENCY_GROUP_ID, + host='host@backend#unit_test_pool', + group_type_id=group_type.id) + self.DPL_MOCK.create_vdev.return_value = DATA_OUTPUT + self.DPL_MOCK.join_vg.return_value = DATA_OUTPUT + volume = test_utils.create_volume( + self.context, + id=DATA_IN_VOLUME_VG['id'], + display_name=DATA_IN_VOLUME_VG['display_name'], + size=DATA_IN_VOLUME_VG['size'], + group_id=group.id, + host=DATA_IN_VOLUME_VG['host']) + self.dpldriver.create_volume(volume) + self.DPL_MOCK.create_vdev.assert_called_once_with( + self._conver_uuid2hex(volume.id), + volume.display_name, + volume.display_description, + self.configuration.dpl_pool, + int(volume.size) * units.Gi, + True) + self.DPL_MOCK.join_vg.assert_called_once_with( + self._conver_uuid2hex(volume.id), + self._conver_uuid2hex(volume.group_id)) + + def test_delete_volume(self): + volume = test_utils.create_volume( + self.context, + id=DATA_IN_VOLUME['id'], + display_name=DATA_IN_VOLUME['display_name'], + size=DATA_IN_VOLUME['size'], + host=DATA_IN_VOLUME['host']) + self.DPL_MOCK.delete_vdev.return_value = DATA_OUTPUT + self.dpldriver.delete_volume(volume) + self.DPL_MOCK.delete_vdev.assert_called_once_with( + self._conver_uuid2hex(volume.id)) + + def test_delete_volume_of_group(self): + group_type = group_types.create( + self.context, + 'group', + {'consistent_group_snapshot_enabled': ' True'} + ) + group = test_utils.create_group( + self.context, + id=fake_constants.CONSISTENCY_GROUP_ID, + host='host@backend#unit_test_pool', + group_type_id=group_type.id) + volume = test_utils.create_volume( + self.context, + id=DATA_IN_VOLUME_VG['id'], + display_name=DATA_IN_VOLUME_VG['display_name'], + size=DATA_IN_VOLUME_VG['size'], + group_id=group.id, + host=DATA_IN_VOLUME_VG['host']) + self.DPL_MOCK.delete_vdev.return_value = DATA_OUTPUT + self.DPL_MOCK.leave_vg.return_volume = DATA_OUTPUT + self.dpldriver.delete_volume(volume) + self.DPL_MOCK.leave_vg.assert_called_once_with( + self._conver_uuid2hex(volume.id), + self._conver_uuid2hex(volume.group_id) + ) + self.DPL_MOCK.delete_vdev.assert_called_once_with( + self._conver_uuid2hex(volume.id)) + + def test_create_volume_from_snapshot(self): + self.DPL_MOCK.create_vdev_from_snapshot.return_value = DATA_OUTPUT + self.DPL_MOCK.extend_vdev.return_value = DATA_OUTPUT + volume = test_utils.create_volume( + self.context, + id=DATA_IN_VOLUME_VG['id'], + display_name=DATA_IN_VOLUME_VG['display_name'], + size=DATA_IN_VOLUME_VG['size'], + host=DATA_IN_VOLUME_VG['host']) + self.dpldriver.create_volume_from_snapshot( + volume, DATA_IN_SNAPSHOT) + self.DPL_MOCK.create_vdev_from_snapshot.assert_called_once_with( + self._conver_uuid2hex(volume.id), + volume.display_name, + volume.display_description, + self._conver_uuid2hex(volume.id), + self.configuration.dpl_pool, + True) + self.DPL_MOCK.extend_vdev.assert_called_once_with( + self._conver_uuid2hex(volume.id), + volume.display_name, + volume.display_description, + volume.size * units.Gi) + + def test_create_cloned_volume(self): + new_volume = test_utils.create_volume( + self.context, + id=DATA_IN_VOLUME1['id'], + display_name=DATA_IN_VOLUME1['display_name'], + size=DATA_IN_VOLUME1['size'], + host=DATA_IN_VOLUME1['host']) + src_volume = test_utils.create_volume( + self.context, + id=DATA_IN_VOLUME['id']) + self.DPL_MOCK.clone_vdev.return_value = DATA_OUTPUT + self.dpldriver.create_cloned_volume(new_volume, src_volume) + self.DPL_MOCK.clone_vdev.assert_called_once_with( + self._conver_uuid2hex(src_volume.id), + self._conver_uuid2hex(new_volume.id), + self.configuration.dpl_pool, + new_volume.display_name, + new_volume.display_description, + int(new_volume.size) * + units.Gi, + True) + + def test_create_snapshot(self): + self.DPL_MOCK.create_vdev_snapshot.return_value = DATA_OUTPUT + self.dpldriver.create_snapshot(DATA_IN_SNAPSHOT) + self.DPL_MOCK.create_vdev_snapshot.assert_called_once_with( + self._conver_uuid2hex(DATA_IN_SNAPSHOT['volume_id']), + self._conver_uuid2hex(DATA_IN_SNAPSHOT['id']), + DATA_IN_SNAPSHOT['display_name'], + DATA_IN_SNAPSHOT['display_description']) + + def test_delete_snapshot(self): + self.DPL_MOCK.delete_vdev_snapshot.return_value = DATA_OUTPUT + self.dpldriver.delete_snapshot(DATA_IN_SNAPSHOT) + self.DPL_MOCK.delete_vdev_snapshot.assert_called_once_with( + self._conver_uuid2hex(DATA_IN_SNAPSHOT['volume_id']), + self._conver_uuid2hex(DATA_IN_SNAPSHOT['id'])) + + def test_initialize_connection(self): + self.DPL_MOCK.assign_vdev.return_value = DATA_ASSIGNVDEV + self.DPL_MOCK.get_vdev.return_value = DATA_ASSIGNVDEV + res = self.dpldriver.initialize_connection(DATA_IN_VOLUME, + DATA_IN_CONNECTOR) + self.assertEqual('iscsi', res['driver_volume_type']) + self.assertEqual(101, res['data']['target_lun']) + self.assertTrue(res['data']['target_discovered']) + self.assertEqual('172.31.1.210:3260', res['data']['target_portal']) + self.assertEqual( + 'iqn.2013-09.com.prophetstor:hypervisor.886423051816', + res['data']['target_iqn']) + + def test_terminate_connection(self): + self.DPL_MOCK.unassign_vdev.return_value = DATA_OUTPUT + self.dpldriver.terminate_connection(DATA_IN_VOLUME, DATA_IN_CONNECTOR) + self.DPL_MOCK.unassign_vdev.assert_called_once_with( + self._conver_uuid2hex(DATA_IN_VOLUME['id']), + DATA_IN_CONNECTOR['initiator']) + + def test_terminate_connection_volume_detached(self): + self.DPL_MOCK.unassign_vdev.return_value = errno.ENODATA, None + self.dpldriver.terminate_connection(DATA_IN_VOLUME, DATA_IN_CONNECTOR) + self.DPL_MOCK.unassign_vdev.assert_called_once_with( + self._conver_uuid2hex(DATA_IN_VOLUME['id']), + DATA_IN_CONNECTOR['initiator']) + + def test_terminate_connection_failed(self): + self.DPL_MOCK.unassign_vdev.return_value = errno.EFAULT, None + ex = self.assertRaises( + exception.VolumeBackendAPIException, + self.dpldriver.terminate_connection, + volume=DATA_IN_VOLUME, connector=DATA_IN_CONNECTOR) + self.assertIsNotNone( + re.match(r".*Flexvisor failed", ex.msg)) + + def test_get_pool_info(self): + self.DPL_MOCK.get_pool.return_value = DATA_POOLINFO + _, res = self.dpldriver._get_pool_info(POOLUUID) + self.assertEqual(4294967296, res['metadata']['available_capacity']) + self.assertEqual(1390551362349, res['metadata']['ctime']) + self.assertEqual('Default Pool', + res['metadata']['display_description']) + self.assertEqual('default_pool', + res['metadata']['display_name']) + self.assertEqual('4f7c4d679a664857afa4d51f282a516a', + res['metadata']['event_uuid']) + self.assertEqual( + {'cache': [], + 'data': ['disk_uuid_0', 'disk_uuid_1', 'disk_uuid_2'], + 'log': [], + 'spare': []}, + res['metadata']['physical_device']) + self.assertEqual(POOLUUID, res['metadata']['pool_uuid']) + self.assertEqual( + {'raid_level': 'raid0'}, + res['metadata']['properties']) + self.assertEqual('Online', res['metadata']['state']) + self.assertEqual(4294967296, res['metadata']['total_capacity']) + self.assertEqual('8173612007304181810', res['metadata']['zpool_guid']) + + def test_create_group(self): + group_type = group_types.create( + self.context, + 'group', + {'consistent_group_snapshot_enabled': ' True'} + ) + group = test_utils.create_group( + self.context, + id=fake_constants.CONSISTENCY_GROUP_ID, + host='host@backend#unit_test_pool', + group_type_id=group_type.id) + self.DPL_MOCK.create_vg.return_value = DATA_OUTPUT + model_update = self.dpldriver.create_group(self.context, group) + self.DPL_MOCK.create_vg.assert_called_once_with( + self._conver_uuid2hex(fake_constants.CONSISTENCY_GROUP_ID), + 'test_group', + 'this is a test group') + self.assertDictEqual({'status': ( + fields.ConsistencyGroupStatus.AVAILABLE)}, model_update) + + def test_delete_group(self): + group_type = group_types.create( + self.context, + 'group', + {'consistent_group_snapshot_enabled': ' True'} + ) + group = test_utils.create_group( + self.context, + id=fake_constants.CONSISTENCY_GROUP_ID, + host='host@backend#unit_test_pool', + group_type_id=group_type.id) + self.DB_MOCK.volume_get_all_by_group.return_value = ( + [DATA_IN_VOLUME_VG]) + self.DPL_MOCK.delete_vdev.return_value = DATA_OUTPUT + self.DPL_MOCK.delete_cg.return_value = DATA_OUTPUT + model_update, volumes = self.dpldriver.delete_group( + self.context, group, []) + self.DPL_MOCK.delete_vg.assert_called_once_with( + self._conver_uuid2hex(fake_constants.CONSISTENCY_GROUP_ID)) + self.DPL_MOCK.delete_vdev.assert_called_once_with( + self._conver_uuid2hex((DATA_IN_VOLUME_VG['id']))) + self.assertDictEqual({'status': ( + fields.ConsistencyGroupStatus.DELETED)}, model_update) + + def test_update_group(self): + group_type = group_types.create( + self.context, + 'group', + {'consistent_group_snapshot_enabled': ' True'} + ) + self.DPL_MOCK.get_vg.return_value = (0, DATA_OUT_CG) + self.DPL_MOCK.join_vg.return_value = DATA_OUTPUT + self.DPL_MOCK.leave_vg.return_value = DATA_OUTPUT + group = test_utils.create_group( + self.context, + id='fe2dbc51-5810-451d-ab2f-8c8a48d15bee', + host='host@backend#unit_test_pool', + group_type_id=group_type.id) + vol_add = test_utils.create_volume( + self.context, + id=fake_constants.VOLUME2_ID, + display_name=DATA_IN_VOLUME_VG['display_name'], + size=DATA_IN_VOLUME_VG['size'], + group_id='fe2dbc51-5810-451d-ab2f-8c8a48d15bee', + host=DATA_IN_VOLUME_VG['host']) + vol_del = test_utils.create_volume( + self.context, + id=DATA_IN_REMOVE_VOLUME_VG['id'], + display_name=DATA_IN_REMOVE_VOLUME_VG['display_name'], + size=DATA_IN_REMOVE_VOLUME_VG['size'], + group_id='fe2dbc51-5810-451d-ab2f-8c8a48d15bee', + host=DATA_IN_REMOVE_VOLUME_VG['host']) + (model_update, add_vols, remove_vols) = ( + self.dpldriver.update_group( + self.context, group, [vol_add], [vol_del])) + self.DPL_MOCK.join_vg.assert_called_once_with( + self._conver_uuid2hex(vol_add.id), + self._conver_uuid2hex(group.id)) + self.DPL_MOCK.leave_vg.assert_called_once_with( + self._conver_uuid2hex(vol_del.id), + self._conver_uuid2hex(group.id)) + self.assertDictEqual({'status': ( + fields.ConsistencyGroupStatus.AVAILABLE)}, model_update) + + def test_update_group_exception_join(self): + group_type = group_types.create( + self.context, + 'group', + {'consistent_group_snapshot_enabled': ' True'} + ) + self.DPL_MOCK.get_vg.return_value = (0, DATA_OUT_CG) + self.DPL_MOCK.join_vg.return_value = -1, None + self.DPL_MOCK.leave_vg.return_value = DATA_OUTPUT + volume = test_utils.create_volume( + self.context, + id=fake_constants.VOLUME2_ID, + display_name=DATA_IN_VOLUME_VG['display_name'], + size=DATA_IN_VOLUME_VG['size'], + host=DATA_IN_VOLUME_VG['host']) + group = test_utils.create_group( + self.context, + id=fake_constants.CONSISTENCY_GROUP_ID, + host='host@backend#unit_test_pool', + group_type_id=group_type.id) + self.assertRaises(exception.VolumeBackendAPIException, + self.dpldriver.update_group, + context=None, + group=group, + add_volumes=[volume], + remove_volumes=None) + + def test_update_group_exception_leave(self): + group_type = group_types.create( + self.context, + 'group', + {'consistent_group_snapshot_enabled': ' True'} + ) + self.DPL_MOCK.get_vg.return_value = (0, DATA_OUT_CG) + self.DPL_MOCK.leave_vg.return_value = -1, None + volume = test_utils.create_volume( + self.context, + id='fe2dbc51-5810-451d-ab2f-8c8a48d15bee', + display_name=DATA_IN_VOLUME_VG['display_name'], + size=DATA_IN_VOLUME_VG['size'], + host=DATA_IN_VOLUME_VG['host']) + group = test_utils.create_group( + self.context, + id=fake_constants.CONSISTENCY_GROUP_ID, + host='host@backend#unit_test_pool', + group_type_id=group_type.id) + self.assertRaises(exception.VolumeBackendAPIException, + self.dpldriver.update_group, + context=None, + group=group, + add_volumes=None, + remove_volumes=[volume]) + + @mock.patch( + 'cinder.objects.snapshot.SnapshotList.get_all_for_group_snapshot') + def test_create_group_snapshot(self, get_all_for_group_snapshot): + group_type = group_types.create( + self.context, + 'group', + {'consistent_group_snapshot_enabled': ' True'} + ) + snapshot_obj = fake_snapshot.fake_snapshot_obj(self.context) + snapshot_obj.group_id = \ + DATA_IN_CG_SNAPSHOT['group_id'] + snapshot_obj.group_type_id = group_type.id + get_all_for_group_snapshot.return_value = [snapshot_obj] + self.DPL_MOCK.create_vdev_snapshot.return_value = DATA_OUTPUT + model_update, snapshots = self.dpldriver.create_group_snapshot( + self.context, snapshot_obj, []) + self.assertDictEqual({'status': 'available'}, model_update) + + @mock.patch( + 'cinder.objects.snapshot.SnapshotList.get_all_for_group_snapshot') + def test_delete_group_snapshot(self, get_all_for_group_snapshot): + group_type = group_types.create( + self.context, + 'group', + {'consistent_group_snapshot_enabled': ' True'} + ) + snapshot_obj = fake_snapshot.fake_snapshot_obj(self.context) + snapshot_obj.group_id = \ + DATA_IN_CG_SNAPSHOT['group_id'] + snapshot_obj.group_type_id = group_type.id + get_all_for_group_snapshot.return_value = [snapshot_obj] + self.DPL_MOCK.delete_group_snapshot.return_value = DATA_OUTPUT + model_update, snapshots = self.dpldriver.delete_group_snapshot( + self.context, snapshot_obj, []) + self.DPL_MOCK.delete_vdev_snapshot.assert_called_once_with( + self._conver_uuid2hex(snapshot_obj.group_id), + self._conver_uuid2hex(snapshot_obj.id), + True) + self.assertDictEqual({'status': 'deleted'}, model_update) diff --git a/cinder/volume/drivers/prophetstor/__init__.py b/cinder/volume/drivers/prophetstor/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cinder/volume/drivers/prophetstor/dpl_fc.py b/cinder/volume/drivers/prophetstor/dpl_fc.py new file mode 100644 index 00000000000..14824117a7e --- /dev/null +++ b/cinder/volume/drivers/prophetstor/dpl_fc.py @@ -0,0 +1,413 @@ +# Copyright (c) 2014 ProphetStor, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import errno + +from oslo_log import log as logging + +from cinder import exception +from cinder.i18n import _ +from cinder import interface +from cinder.volume import driver +from cinder.volume.drivers.prophetstor import dplcommon +from cinder.zonemanager import utils as fczm_utils + +LOG = logging.getLogger(__name__) + + +@interface.volumedriver +class DPLFCDriver(dplcommon.DPLCOMMONDriver, + driver.FibreChannelDriver): + def __init__(self, *args, **kwargs): + super(DPLFCDriver, self).__init__(*args, **kwargs) + + def _get_fc_channel(self): + """Get FibreChannel info. + + :returns: fcInfos[uuid] + fcInfo[uuid]['display_name'] + fcInfo[uuid]['display_description'] + fcInfo[uuid]['hardware_address'] + fcInfo[uuid]['type'] + fcInfo[uuid]['speed'] + fcInfo[uuid]['state'] + """ + output = None + fcInfos = {} + try: + retCode, output = self.dpl.get_server_info() + if retCode == 0 and output: + fcUuids = output.get('metadata', + {}).get('storage_adapter', {}).keys() + for fcUuid in fcUuids: + fcInfo = output.get('metadata', + {}).get('storage_adapter', + {}).get(fcUuid) + if fcInfo['type'] == 'fc': + fcInfos[fcUuid] = fcInfo + except Exception as e: + LOG.error("Failed to get fiber channel info from storage " + "due to %(stat)s", {'stat': e}) + return fcInfos + + def _get_targets(self): + """Get targets. + + :returns: targetInfos[uuid] = targetInfo + targetInfo['targetUuid'] + targetInfo['targetName'] + targetInfo['targetAddr'] + """ + output = None + targetInfos = {} + try: + retCode, output = self.dpl.get_target_list('target') + if retCode == 0 and output: + for targetInfo in output.get('children', []): + targetI = {} + targetI['targetUuid'] = targetInfo[0] + targetI['targetName'] = targetInfo[1] + targetI['targetAddr'] = targetInfo[2] + targetInfos[str(targetInfo[0])] = targetI + except Exception as e: + targetInfos = {} + LOG.error("Failed to get fiber channel target from " + "storage server due to %(stat)s", + {'stat': e}) + return targetInfos + + def _get_targetwpns(self, volumeid, initiatorWwpns): + lstargetWwpns = [] + try: + ret, output = self.dpl.get_vdev(volumeid) + if ret == 0 and output: + exports = output.get('exports', {}) + fc_infos = exports.get('Network/FC', {}) + for fc_info in fc_infos: + for p in fc_info.get('permissions', []): + if p.get(initiatorWwpns, None): + targetWwpns = fc_info.get('target_identifier', '') + lstargetWwpns.append(targetWwpns) + except Exception as e: + LOG.error("Failed to get target wwpns from storage due " + "to %(stat)s", {'stat': e}) + lstargetWwpns = [] + return lstargetWwpns + + def _is_initiator_wwpn_active(self, targetWwpn, initiatorWwpn): + fActive = False + output = None + try: + retCode, output = self.dpl.get_sns_table(targetWwpn) + if retCode == 0 and output: + for fdwwpn, fcport in output.get('metadata', + {}).get('sns_table', + []): + if fdwwpn == initiatorWwpn: + fActive = True + break + except Exception: + LOG.error('Failed to get sns table') + return fActive + + def _convertHex2String(self, wwpns): + szwwpns = '' + if len(str(wwpns)) == 16: + szwwpns = '%2s:%2s:%2s:%2s:%2s:%2s:%2s:%2s' % ( + str(wwpns)[0:2], + str(wwpns)[2:4], + str(wwpns)[4:6], + str(wwpns)[6:8], + str(wwpns)[8:10], + str(wwpns)[10:12], + str(wwpns)[12:14], + str(wwpns)[14:16]) + return szwwpns + + def _export_fc(self, volumeid, targetwwpns, initiatorwwpns, volumename): + ret = 0 + output = '' + LOG.debug('Export fc: %(volume)s, %(wwpns)s, %(iqn)s, %(volumename)s', + {'volume': volumeid, 'wwpns': targetwwpns, + 'iqn': initiatorwwpns, 'volumename': volumename}) + try: + ret, output = self.dpl.assign_vdev_fc( + self._conver_uuid2hex(volumeid), targetwwpns, + initiatorwwpns, volumename) + except Exception: + LOG.error('Volume %(volumeid)s failed to send assign command, ' + 'ret: %(status)s output: %(output)s', + {'volumeid': volumeid, 'status': ret, 'output': output}) + ret = errno.EFAULT + + if ret == errno.EAGAIN: + ret, event_uuid = self._get_event_uuid(output) + if len(event_uuid): + ret = 0 + status = self._wait_event( + self.dpl.get_vdev_status, + self._conver_uuid2hex(volumeid), event_uuid) + if status['state'] == 'error': + ret = errno.EFAULT + msg = _('Flexvisor failed to assign volume %(id)s: ' + '%(status)s.') % {'id': volumeid, + 'status': status} + raise exception.VolumeBackendAPIException(data=msg) + else: + ret = errno.EFAULT + msg = _('Flexvisor failed to assign volume %(id)s due to ' + 'unable to query status by event ' + 'id.') % {'id': volumeid} + raise exception.VolumeBackendAPIException(data=msg) + elif ret != 0: + msg = _('Flexvisor assign volume failed:%(id)s:' + '%(status)s.') % {'id': volumeid, 'status': ret} + raise exception.VolumeBackendAPIException(data=msg) + + return ret + + def _delete_export_fc(self, volumeid, targetwwpns, initiatorwwpns): + ret = 0 + output = '' + ret, output = self.dpl.unassign_vdev_fc( + self._conver_uuid2hex(volumeid), + targetwwpns, initiatorwwpns) + if ret == errno.EAGAIN: + ret, event_uuid = self._get_event_uuid(output) + if ret == 0 and len(event_uuid): + status = self._wait_event( + self.dpl.get_vdev_status, volumeid, event_uuid) + if status['state'] == 'error': + ret = errno.EFAULT + msg = _('Flexvisor failed to unassign volume %(id)s:' + ' %(status)s.') % {'id': volumeid, + 'status': status} + raise exception.VolumeBackendAPIException(data=msg) + else: + msg = _('Flexvisor failed to unassign volume (get event) ' + '%(id)s.') % {'id': volumeid} + raise exception.VolumeBackendAPIException(data=msg) + elif ret != 0: + msg = _('Flexvisor unassign volume failed:%(id)s:' + '%(status)s.') % {'id': volumeid, 'status': ret} + raise exception.VolumeBackendAPIException(data=msg) + else: + LOG.info('Flexvisor succeeded to unassign volume %(id)s.', + {'id': volumeid}) + + return ret + + def _build_initiator_target_map(self, connector, tgtwwns): + """Build the target_wwns and the initiator target map.""" + init_targ_map = {} + initiator_wwns = connector['wwpns'] + for initiator in initiator_wwns: + init_targ_map[initiator] = tgtwwns + + return init_targ_map + + def initialize_connection(self, volume, connector): + """Allow connection to connector and return connection info.""" + """ + connector = {'ip': CONF.my_ip, + 'host': CONF.host, + 'initiator': self._initiator, + 'wwnns': self._fc_wwnns, + 'wwpns': self._fc_wwpns} + + """ + dc_fc = {} + dc_target = {} + lsTargetWwpn = [] + output = None + properties = {} + preferTargets = {} + ret = 0 + targetIdentifier = [] + szwwpns = [] + LOG.info('initialize_connection volume: %(volume)s, connector:' + ' %(connector)s', + {"volume": volume, "connector": connector}) + # Get Storage Fiber channel controller + dc_fc = self._get_fc_channel() + + # Get existed FC target list to decide target wwpn + dc_target = self._get_targets() + if len(dc_target) == 0: + msg = _('Backend storage did not configure fiber channel ' + 'target.') + raise exception.VolumeBackendAPIException(data=msg) + + for keyFc in dc_fc: + for targetuuid in dc_target: + if dc_fc[keyFc]['hardware_address'] == \ + dc_target[targetuuid]['targetAddr']: + preferTargets[targetuuid] = dc_target[targetuuid] + break + # Confirm client wwpn is existed in sns table + # Covert wwwpns to 'xx:xx:xx:xx:xx:xx:xx:xx' format + for dwwpn in connector['wwpns']: + szwwpn = self._convertHex2String(dwwpn) + if len(szwwpn) == 0: + msg = _('Invalid wwpns format %(wwpns)s') % \ + {'wwpns': connector['wwpns']} + raise exception.VolumeBackendAPIException(data=msg) + szwwpns.append(szwwpn) + + if len(szwwpns): + for targetUuid in preferTargets: + targetWwpn = '' + targetWwpn = preferTargets.get(targetUuid, + {}).get('targetAddr', '') + lsTargetWwpn.append(targetWwpn) + # Use wwpns to assign volume. + LOG.info('Prefer use target wwpn %(wwpn)s', + {'wwpn': lsTargetWwpn}) + # Start to create export in all FC target node. + assignedTarget = [] + for pTarget in lsTargetWwpn: + try: + ret = self._export_fc(volume['id'], str(pTarget), szwwpns, + volume['name']) + if ret: + break + else: + assignedTarget.append(pTarget) + except Exception as e: + LOG.error('Failed to export fiber channel target ' + 'due to %s', e) + ret = errno.EFAULT + break + if ret == 0: + ret, output = self.dpl.get_vdev(self._conver_uuid2hex( + volume['id'])) + nLun = -1 + if ret == 0: + try: + for p in output['exports']['Network/FC']: + # check initiator wwpn existed in target initiator list + for initI in p.get('permissions', []): + for szwpn in szwwpns: + if initI.get(szwpn, None): + nLun = initI[szwpn] + break + if nLun != -1: + break + + if nLun != -1: + targetIdentifier.append( + str(p['target_identifier']).replace(':', '')) + + except Exception: + msg = _('Invalid connection initialization response of ' + 'volume %(name)s: ' + '%(output)s') % {'name': volume['name'], + 'output': output} + raise exception.VolumeBackendAPIException(data=msg) + + if nLun != -1: + init_targ_map = self._build_initiator_target_map(connector, + targetIdentifier) + properties['target_discovered'] = True + properties['target_wwn'] = targetIdentifier + properties['target_lun'] = int(nLun) + properties['volume_id'] = volume['id'] + properties['initiator_target_map'] = init_targ_map + LOG.info('%(volume)s assign type fibre_channel, properties ' + '%(properties)s', + {'volume': volume['id'], 'properties': properties}) + else: + msg = _('Invalid connection initialization response of ' + 'volume %(name)s') % {'name': volume['name']} + raise exception.VolumeBackendAPIException(data=msg) + LOG.info('Connect initialization info: ' + '{driver_volume_type: fibre_channel, ' + 'data: %(properties)s', {'properties': properties}) + conn_info = {'driver_volume_type': 'fibre_channel', + 'data': properties} + fczm_utils.add_fc_zone(conn_info) + return conn_info + + def terminate_connection(self, volume, connector, **kwargs): + """Disallow connection from connector.""" + """ + connector = {'ip': CONF.my_ip, + 'host': CONF.host, + 'initiator': self._initiator, + 'wwnns': self._fc_wwnns, + 'wwpns': self._fc_wwpns} + """ + lstargetWwpns = [] + lsTargets = [] + szwwpns = [] + ret = 0 + info = {'driver_volume_type': 'fibre_channel', 'data': {}} + LOG.info('terminate_connection volume: %(volume)s, ' + 'connector: %(con)s', + {'volume': volume, 'con': connector}) + # Query targetwwpns. + # Get all target list of volume. + for dwwpn in connector['wwpns']: + szwwpn = self._convertHex2String(dwwpn) + if len(szwwpn) == 0: + msg = _('Invalid wwpns format %(wwpns)s') % \ + {'wwpns': connector['wwpns']} + raise exception.VolumeBackendAPIException(data=msg) + szwwpns.append(szwwpn) + + if len(szwwpns) == 0: + ret = errno.EFAULT + msg = _('Invalid wwpns format %(wwpns)s') % \ + {'wwpns': connector['wwpns']} + raise exception.VolumeBackendAPIException(data=msg) + else: + for szwwpn in szwwpns: + lstargetWwpns = self._get_targetwpns( + self._conver_uuid2hex(volume['id']), szwwpn) + lsTargets = list(set(lsTargets + lstargetWwpns)) + + # Remove all export target + try: + for ptarget in lsTargets: + ret = self._delete_export_fc(volume['id'], ptarget, szwwpns) + if ret: + break + except Exception: + ret = errno.EFAULT + finally: + if ret: + msg = _('Faield to unassign %(volume)s') % (volume['id']) + raise exception.VolumeBackendAPIException(data=msg) + + # Failed to delete export with fibre channel + if ret: + init_targ_map = self._build_initiator_target_map(connector, + lsTargets) + info['data'] = {'target_wwn': lsTargets, + 'initiator_target_map': init_targ_map} + fczm_utils.remove_fc_zone(info) + + return info + + def get_volume_stats(self, refresh=False): + if refresh: + data = super(DPLFCDriver, self).get_volume_stats(refresh) + if data: + data['storage_protocol'] = 'FC' + backend_name = \ + self.configuration.safe_get('volume_backend_name') + data['volume_backend_name'] = (backend_name or 'DPLFCDriver') + self._stats = data + return self._stats diff --git a/cinder/volume/drivers/prophetstor/dpl_iscsi.py b/cinder/volume/drivers/prophetstor/dpl_iscsi.py new file mode 100644 index 00000000000..f922e5dca57 --- /dev/null +++ b/cinder/volume/drivers/prophetstor/dpl_iscsi.py @@ -0,0 +1,155 @@ +# Copyright (c) 2014 ProphetStor, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import errno + +from oslo_log import log as logging + +from cinder import exception +from cinder.i18n import _ +from cinder import interface +import cinder.volume.driver +from cinder.volume.drivers.prophetstor import dplcommon + +LOG = logging.getLogger(__name__) + + +@interface.volumedriver +class DPLISCSIDriver(dplcommon.DPLCOMMONDriver, + cinder.volume.driver.ISCSIDriver): + def __init__(self, *args, **kwargs): + super(DPLISCSIDriver, self).__init__(*args, **kwargs) + + def initialize_connection(self, volume, connector): + """Allow connection to connector and return connection info.""" + properties = {} + properties['target_lun'] = None + properties['target_discovered'] = True + properties['target_portal'] = '' + properties['target_iqn'] = None + properties['volume_id'] = volume['id'] + + dpl_server = self.configuration.san_ip + dpl_iscsi_port = self.configuration.target_port + ret, output = self.dpl.assign_vdev(self._conver_uuid2hex( + volume['id']), connector['initiator'].lower(), volume['id'], + '%s:%d' % (dpl_server, dpl_iscsi_port), 0) + + if ret == errno.EAGAIN: + ret, event_uuid = self._get_event_uuid(output) + if len(event_uuid): + ret = 0 + status = self._wait_event( + self.dpl.get_vdev_status, self._conver_uuid2hex( + volume['id']), event_uuid) + if status['state'] == 'error': + ret = errno.EFAULT + msg = _('Flexvisor failed to assign volume %(id)s: ' + '%(status)s.') % {'id': volume['id'], + 'status': status} + raise exception.VolumeBackendAPIException(data=msg) + else: + ret = errno.EFAULT + msg = _('Flexvisor failed to assign volume %(id)s due to ' + 'unable to query status by event ' + 'id.') % {'id': volume['id']} + raise exception.VolumeBackendAPIException(data=msg) + elif ret != 0: + msg = _('Flexvisor assign volume failed.:%(id)s:' + '%(status)s.') % {'id': volume['id'], 'status': ret} + raise exception.VolumeBackendAPIException(data=msg) + + if ret == 0: + ret, output = self.dpl.get_vdev( + self._conver_uuid2hex(volume['id'])) + if ret == 0: + for tgInfo in output['exports']['Network/iSCSI']: + if tgInfo['permissions'] and \ + isinstance(tgInfo['permissions'][0], dict): + for assign in tgInfo['permissions']: + if connector['initiator'].lower() in assign.keys(): + for tgportal in tgInfo.get('portals', {}): + properties['target_portal'] = tgportal + break + properties['target_lun'] = \ + int(assign[connector['initiator'].lower()]) + break + + if properties['target_portal'] != '': + properties['target_iqn'] = tgInfo['target_identifier'] + break + else: + if connector['initiator'].lower() in tgInfo['permissions']: + for tgportal in tgInfo.get('portals', {}): + properties['target_portal'] = tgportal + break + + if properties['target_portal'] != '': + properties['target_lun'] = int( + tgInfo['logical_unit_number']) + properties['target_iqn'] = tgInfo['target_identifier'] + break + + if not (ret == 0 or properties['target_portal']): + msg = _('Flexvisor failed to assign volume %(volume)s ' + 'iqn %(iqn)s.') % {'volume': volume['id'], + 'iqn': connector['initiator']} + raise exception.VolumeBackendAPIException(data=msg) + + return {'driver_volume_type': 'iscsi', 'data': properties} + + def terminate_connection(self, volume, connector, **kwargs): + """Disallow connection from connector.""" + ret, output = self.dpl.unassign_vdev( + self._conver_uuid2hex(volume['id']), + connector['initiator']) + + if ret == errno.EAGAIN: + ret, event_uuid = self._get_event_uuid(output) + if ret == 0: + status = self._wait_event( + self.dpl.get_vdev_status, volume['id'], event_uuid) + if status['state'] == 'error': + ret = errno.EFAULT + msg = _('Flexvisor failed to unassign volume %(id)s:' + ' %(status)s.') % {'id': volume['id'], + 'status': status} + raise exception.VolumeBackendAPIException(data=msg) + else: + msg = _('Flexvisor failed to unassign volume (get event) ' + '%(id)s.') % {'id': volume['id']} + raise exception.VolumeBackendAPIException(data=msg) + elif ret == errno.ENODATA: + LOG.info('Flexvisor already unassigned volume %(id)s.', + {'id': volume['id']}) + elif ret != 0: + msg = _('Flexvisor failed to unassign volume:%(id)s:' + '%(status)s.') % {'id': volume['id'], 'status': ret} + raise exception.VolumeBackendAPIException(data=msg) + + def get_volume_stats(self, refresh=False): + if refresh: + try: + data = super(DPLISCSIDriver, self).get_volume_stats(refresh) + if data: + data['storage_protocol'] = 'iSCSI' + backend_name = \ + self.configuration.safe_get('volume_backend_name') + data['volume_backend_name'] = \ + (backend_name or 'DPLISCSIDriver') + self._stats = data + except Exception as exc: + LOG.warning('Cannot get volume status %(exc)s.', {'exc': exc}) + return self._stats diff --git a/cinder/volume/drivers/prophetstor/dplcommon.py b/cinder/volume/drivers/prophetstor/dplcommon.py new file mode 100644 index 00000000000..22618b06f21 --- /dev/null +++ b/cinder/volume/drivers/prophetstor/dplcommon.py @@ -0,0 +1,1522 @@ +# Copyright (c) 2014 ProphetStor, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Implementation of the class of ProphetStor DPL storage adapter of Federator. + # v2.0.1 Consistency group support + # v2.0.2 Pool aware scheduler + # v2.0.3 Consistency group modification support + # v2.0.4 Port ProphetStor driver to use new driver model + # v2.0.5 Move from httplib to requests +""" + +import base64 +import errno +import json +import random +import time + +from oslo_log import log as logging +from oslo_service import loopingcall +from oslo_utils import units +import requests +import six +from six.moves import http_client + +from cinder import exception +from cinder.i18n import _ +from cinder import objects +from cinder.objects import fields +from cinder.volume import driver +from cinder.volume.drivers.prophetstor import options +from cinder.volume.drivers.san import san +from cinder.volume import volume_utils + +LOG = logging.getLogger(__name__) + +CONNECTION_RETRY = 10 +MAXSNAPSHOTS = 1024 +DISCOVER_SERVER_TYPE = 'dpl' +DPL_BLOCKSTOR = '/dpl_blockstor' +DPL_SYSTEM = '/dpl_system' + +DPL_VER_V1 = 'v1' +DPL_OBJ_POOL = 'dpl_pool' +DPL_OBJ_DISK = 'dpl_disk' +DPL_OBJ_VOLUME = 'dpl_volume' +DPL_OBJ_VOLUMEGROUP = 'dpl_volgroup' +DPL_OBJ_SNAPSHOT = 'cdmi_snapshots' +DPL_OBJ_EXPORT = 'dpl_export' + +DPL_OBJ_REPLICATION = 'cdmi_replication' +DPL_OBJ_TARGET = 'dpl_target' +DPL_OBJ_SYSTEM = 'dpl_system' +DPL_OBJ_SNS = 'sns_table' + + +class DPLCommand(object): + """DPL command interface.""" + + def __init__(self, ip, port, username, password, cert_verify=False, + cert_path=None): + self.ip = ip + self.port = port + self.username = username + self.password = password + self.cert_verify = cert_verify + self.cert_path = cert_path + + def send_cmd(self, method, url, params, expected_status): + """Send command to DPL.""" + retcode = 0 + data = {} + header = {'Content-Type': 'application/cdmi-container', + 'Accept': 'application/cdmi-container', + 'x-cdmi-specification-version': '1.0.2'} + # base64 encode the username and password + auth = base64.encodestring('%s:%s' + % (self.username, + self.password)).replace('\n', '') + header['Authorization'] = 'Basic %s' % auth + + if not params: + payload = None + else: + try: + payload = json.dumps(params, ensure_ascii=False) + payload.encode('utf-8') + except Exception as e: + LOG.error('JSON encode params %(param)s error:' + ' %(status)s.', {'param': params, 'status': e}) + retcode = errno.EINVAL + + retry = CONNECTION_RETRY + func = getattr(requests, method.lower()) + + cert_path = False + if self.cert_verify: + cert_path = self.cert_path + else: + cert_path = False + + while (retry): + try: + r = func( + url="https://%s:%s%s" % (self.ip, self.port, url), + data=payload, headers=header, verify=cert_path) + + if r.status_code == http_client.SERVICE_UNAVAILABLE: + LOG.error("The flexvisor service is unavailable.") + continue + else: + break + except Exception as e: + msg = (_("failed to %(method)s due to %(error)s") + % {"method": method, "error": six.text_type(e)}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if (r.status_code in expected_status and + r.status_code == http_client.NOT_FOUND): + retcode = errno.ENODATA + elif r.status_code not in expected_status: + LOG.error('%(method)s %(url)s unexpected response status: ' + '%(response)s (expects: %(expects)s).', + {'method': method, + 'url': url, + 'response': http_client.responses[r.status_code], + 'expects': expected_status}) + if r.status_code == http_client.UNAUTHORIZED: + raise exception.NotAuthorized + else: + retcode = errno.EIO + elif r.status_code is http_client.NOT_FOUND: + retcode = errno.ENODATA + elif r.status_code is http_client.ACCEPTED: + retcode = errno.EAGAIN + try: + data = r.json() + except (TypeError, ValueError) as e: + LOG.error('Call to json.loads() raised an exception: %s.', + e) + retcode = errno.ENOEXEC + except Exception as e: + LOG.error('Read response raised an exception: %s.', + e) + retcode = errno.ENOEXEC + elif (r.status_code in [http_client.OK, http_client.CREATED] and + http_client.NO_CONTENT not in expected_status): + try: + data = r.json() + except (TypeError, ValueError) as e: + LOG.error('Call to json.loads() raised an exception: %s.', + e) + retcode = errno.ENOEXEC + except Exception as e: + LOG.error('Read response raised an exception: %s.', + e) + retcode = errno.ENOEXEC + + return retcode, data + + +class DPLVolume(object): + + def __init__(self, dplServer, dplPort, dplUser, dplPassword, + cert_verify=False, cert_path=None): + self.objCmd = DPLCommand( + dplServer, dplPort, dplUser, dplPassword, cert_verify=cert_verify, + cert_path=cert_path) + + def _execute(self, method, url, params, expected_status): + if self.objCmd: + return self.objCmd.send_cmd(method, url, params, expected_status) + else: + return -1, None + + def _gen_snapshot_url(self, vdevid, snapshotid): + snapshot_url = '/%s/%s/%s' % (vdevid, DPL_OBJ_SNAPSHOT, snapshotid) + return snapshot_url + + def get_server_info(self): + method = 'GET' + url = ('/%s/%s/' % (DPL_VER_V1, DPL_OBJ_SYSTEM)) + return self._execute(method, url, None, + [http_client.OK, http_client.ACCEPTED]) + + def create_vdev(self, volumeID, volumeName, volumeDesc, poolID, volumeSize, + fthinprovision=True, maximum_snapshot=MAXSNAPSHOTS, + snapshot_quota=None): + method = 'PUT' + metadata = {} + params = {} + url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, volumeID) + + if volumeName is None or volumeName == '': + metadata['display_name'] = volumeID + else: + metadata['display_name'] = volumeName + metadata['display_description'] = volumeDesc + metadata['pool_uuid'] = poolID + metadata['total_capacity'] = volumeSize + metadata['maximum_snapshot'] = maximum_snapshot + if snapshot_quota is not None: + metadata['snapshot_quota'] = int(snapshot_quota) + metadata['properties'] = dict(thin_provision=fthinprovision) + params['metadata'] = metadata + return self._execute(method, + url, params, + [http_client.OK, http_client.ACCEPTED, + http_client.CREATED]) + + def extend_vdev(self, volumeID, volumeName, volumeDesc, volumeSize, + maximum_snapshot=MAXSNAPSHOTS, snapshot_quota=None): + method = 'PUT' + metadata = {} + params = {} + url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, volumeID) + + if volumeName is None or volumeName == '': + metadata['display_name'] = volumeID + else: + metadata['display_name'] = volumeName + metadata['display_description'] = volumeDesc + metadata['total_capacity'] = int(volumeSize) + metadata['maximum_snapshot'] = maximum_snapshot + if snapshot_quota is not None: + metadata['snapshot_quota'] = snapshot_quota + params['metadata'] = metadata + return self._execute(method, + url, params, + [http_client.OK, http_client.ACCEPTED, + http_client.CREATED]) + + def delete_vdev(self, volumeID, force=True): + method = 'DELETE' + metadata = {} + params = {} + url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, volumeID) + + metadata['force'] = force + params['metadata'] = metadata + return self._execute(method, + url, params, + [http_client.OK, http_client.ACCEPTED, + http_client.NOT_FOUND, http_client.NO_CONTENT]) + + def create_vdev_from_snapshot(self, vdevID, vdevDisplayName, vdevDesc, + snapshotID, poolID, fthinprovision=True, + maximum_snapshot=MAXSNAPSHOTS, + snapshot_quota=None): + method = 'PUT' + metadata = {} + params = {} + url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevID) + metadata['snapshot_operation'] = 'copy' + if vdevDisplayName is None or vdevDisplayName == "": + metadata['display_name'] = vdevID + else: + metadata['display_name'] = vdevDisplayName + metadata['display_description'] = vdevDesc + metadata['pool_uuid'] = poolID + metadata['properties'] = {} + metadata['maximum_snapshot'] = maximum_snapshot + if snapshot_quota: + metadata['snapshot_quota'] = snapshot_quota + metadata['properties'] = dict(thin_provision=fthinprovision) + + params['metadata'] = metadata + params['copy'] = self._gen_snapshot_url(vdevID, snapshotID) + return self._execute(method, + url, params, + [http_client.OK, http_client.ACCEPTED, + http_client.CREATED]) + + def spawn_vdev_from_snapshot(self, new_vol_id, src_vol_id, + vol_display_name, description, snap_id): + method = 'PUT' + params = {} + metadata = {} + url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, new_vol_id) + + metadata['snapshot_operation'] = 'spawn' + if vol_display_name is None or vol_display_name == '': + metadata['display_name'] = new_vol_id + else: + metadata['display_name'] = vol_display_name + metadata['display_description'] = description + params['metadata'] = metadata + params['copy'] = self._gen_snapshot_url(src_vol_id, snap_id) + + return self._execute(method, url, params, + [http_client.OK, http_client.ACCEPTED, + http_client.CREATED]) + + def get_pools(self): + method = 'GET' + url = '/%s/%s/' % (DPL_VER_V1, DPL_OBJ_POOL) + return self._execute(method, url, None, [http_client.OK]) + + def get_pool(self, poolid): + method = 'GET' + url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_POOL, poolid) + return self._execute(method, url, None, + [http_client.OK, http_client.ACCEPTED]) + + def clone_vdev(self, SourceVolumeID, NewVolumeID, poolID, volumeName, + volumeDesc, volumeSize, fthinprovision=True, + maximum_snapshot=MAXSNAPSHOTS, snapshot_quota=None): + method = 'PUT' + params = {} + metadata = {} + url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, NewVolumeID) + metadata["snapshot_operation"] = "clone" + if volumeName is None or volumeName == '': + metadata["display_name"] = NewVolumeID + else: + metadata["display_name"] = volumeName + metadata["display_description"] = volumeDesc + metadata["pool_uuid"] = poolID + metadata["total_capacity"] = volumeSize + metadata["maximum_snapshot"] = maximum_snapshot + if snapshot_quota: + metadata["snapshot_quota"] = snapshot_quota + metadata["properties"] = dict(thin_provision=fthinprovision) + params["metadata"] = metadata + params["copy"] = SourceVolumeID + + return self._execute(method, + url, params, + [http_client.OK, http_client.CREATED, + http_client.ACCEPTED]) + + def create_vdev_snapshot(self, vdevid, snapshotid, snapshotname='', + snapshotdes='', isgroup=False): + method = 'PUT' + metadata = {} + params = {} + if isgroup: + url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUMEGROUP, vdevid) + else: + url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid) + + if not snapshotname: + metadata['display_name'] = snapshotid + else: + metadata['display_name'] = snapshotname + metadata['display_description'] = snapshotdes + + params['metadata'] = metadata + params['snapshot'] = snapshotid + + return self._execute(method, + url, params, + [http_client.OK, http_client.CREATED, + http_client.ACCEPTED]) + + def get_vdev(self, vdevid): + method = 'GET' + url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid) + + return self._execute(method, + url, None, + [http_client.OK, http_client.ACCEPTED, + http_client.NOT_FOUND]) + + def get_vdev_status(self, vdevid, eventid): + method = 'GET' + url = ('/%s/%s/%s/?event_uuid=%s' % (DPL_VER_V1, DPL_OBJ_VOLUME, + vdevid, eventid)) + + return self._execute(method, + url, None, + [http_client.OK, http_client.NOT_FOUND]) + + def get_pool_status(self, poolid, eventid): + method = 'GET' + url = ('/%s/%s/%s/?event_uuid=%s' % (DPL_VER_V1, DPL_OBJ_POOL, + poolid, eventid)) + + return self._execute(method, + url, None, + [http_client.OK, http_client.NOT_FOUND]) + + def assign_vdev(self, vdevid, iqn, lunname, portal, lunid=0): + method = 'PUT' + metadata = {} + exports = {} + params = {} + url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid) + + metadata['export_operation'] = 'assign' + exports['Network/iSCSI'] = {} + target_info = {} + target_info['logical_unit_number'] = 0 + target_info['logical_unit_name'] = lunname + permissions = [] + portals = [] + portals.append(portal) + permissions.append(iqn) + target_info['permissions'] = permissions + target_info['portals'] = portals + exports['Network/iSCSI'] = target_info + + params['metadata'] = metadata + params['exports'] = exports + + return self._execute(method, + url, params, + [http_client.OK, http_client.ACCEPTED, + http_client.CREATED]) + + def assign_vdev_fc(self, vdevid, targetwwpn, initiatorwwpn, lunname, + lunid=-1): + method = 'PUT' + metadata = {} + exports = {} + params = {} + url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid) + metadata['export_operation'] = 'assign' + exports['Network/FC'] = {} + target_info = {} + target_info['target_identifier'] = targetwwpn + target_info['logical_unit_number'] = lunid + target_info['logical_unit_name'] = lunname + target_info['permissions'] = initiatorwwpn + exports['Network/FC'] = target_info + + params['metadata'] = metadata + params['exports'] = exports + + return self._execute(method, + url, params, + [http_client.OK, http_client.ACCEPTED, + http_client.CREATED]) + + def unassign_vdev(self, vdevid, initiatorIqn, targetIqn=''): + method = 'PUT' + metadata = {} + exports = {} + params = {} + url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid) + + metadata['export_operation'] = 'unassign' + params['metadata'] = metadata + + exports['Network/iSCSI'] = {} + exports['Network/iSCSI']['target_identifier'] = targetIqn + permissions = [] + permissions.append(initiatorIqn) + exports['Network/iSCSI']['permissions'] = permissions + + params['exports'] = exports + + return self._execute(method, + url, params, + [http_client.OK, http_client.ACCEPTED, + http_client.NO_CONTENT, http_client.NOT_FOUND]) + + def unassign_vdev_fc(self, vdevid, targetwwpn, initiatorwwpns): + method = 'PUT' + metadata = {} + exports = {} + params = {} + url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid) + + metadata['export_operation'] = 'unassign' + params['metadata'] = metadata + + exports['Network/FC'] = {} + exports['Network/FC']['target_identifier'] = targetwwpn + permissions = initiatorwwpns + exports['Network/FC']['permissions'] = permissions + + params['exports'] = exports + + return self._execute(method, + url, params, + [http_client.OK, http_client.ACCEPTED, + http_client.NO_CONTENT, http_client.NOT_FOUND]) + + def delete_vdev_snapshot(self, objID, snapshotID, isGroup=False): + method = 'DELETE' + if isGroup: + url = ('/%s/%s/%s/%s/%s/' % (DPL_VER_V1, + DPL_OBJ_VOLUMEGROUP, + objID, + DPL_OBJ_SNAPSHOT, snapshotID)) + else: + url = ('/%s/%s/%s/%s/%s/' % (DPL_VER_V1, + DPL_OBJ_VOLUME, objID, + DPL_OBJ_SNAPSHOT, snapshotID)) + + return self._execute(method, + url, None, + [http_client.OK, http_client.ACCEPTED, + http_client.NO_CONTENT, http_client.NOT_FOUND]) + + def rollback_vdev(self, vdevid, snapshotid): + method = 'PUT' + params = {} + url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid) + + params['copy'] = self._gen_snapshot_url(vdevid, snapshotid) + + return self._execute(method, + url, params, + [http_client.OK, http_client.ACCEPTED]) + + def list_vdev_snapshots(self, vdevid, isGroup=False): + method = 'GET' + if isGroup: + url = ('/%s/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUMEGROUP, vdevid, + DPL_OBJ_SNAPSHOT)) + else: + url = ('/%s/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, + vdevid, DPL_OBJ_SNAPSHOT)) + + return self._execute(method, + url, None, + [http_client.OK]) + + def query_vdev_snapshot(self, vdevid, snapshotID, isGroup=False): + method = 'GET' + if isGroup: + url = ('/%s/%s/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUMEGROUP, + vdevid, DPL_OBJ_SNAPSHOT, snapshotID)) + else: + url = ('/%s/%s/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid, + DPL_OBJ_SNAPSHOT, snapshotID)) + + return self._execute(method, + url, None, + [http_client.OK]) + + def create_target(self, targetID, protocol, displayName, targetAddress, + description=''): + method = 'PUT' + params = {} + url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_EXPORT, targetID) + params['metadata'] = {} + metadata = params['metadata'] + metadata['type'] = 'target' + metadata['protocol'] = protocol + if displayName is None or displayName == '': + metadata['display_name'] = targetID + else: + metadata['display_name'] = displayName + metadata['display_description'] = description + metadata['address'] = targetAddress + return self._execute(method, url, params, [http_client.OK]) + + def get_target(self, targetID): + method = 'GET' + url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_EXPORT, targetID) + return self._execute(method, url, None, [http_client.OK]) + + def delete_target(self, targetID): + method = 'DELETE' + url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_EXPORT, targetID) + return self._execute(method, + url, None, + [http_client.OK, http_client.ACCEPTED, + http_client.NOT_FOUND]) + + def get_target_list(self, type='target'): + # type = target/initiator + method = 'GET' + if type is None: + url = '/%s/%s/' % (DPL_VER_V1, DPL_OBJ_EXPORT) + else: + url = '/%s/%s/?type=%s' % (DPL_VER_V1, DPL_OBJ_EXPORT, type) + return self._execute(method, url, None, [http_client.OK]) + + def get_sns_table(self, wwpn): + method = 'PUT' + params = {} + url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_EXPORT, DPL_OBJ_SNS) + params['metadata'] = {} + params['metadata']['protocol'] = 'fc' + params['metadata']['address'] = str(wwpn) + return self._execute(method, url, params, [http_client.OK]) + + def create_vg(self, groupID, groupName, groupDesc='', listVolume=None, + maxSnapshots=MAXSNAPSHOTS, rotationSnapshot=True): + method = 'PUT' + metadata = {} + params = {} + properties = {} + url = '/%s/%s/' % (DPL_OBJ_VOLUMEGROUP, groupID) + if listVolume: + metadata['volume'] = listVolume + else: + metadata['volume'] = [] + metadata['display_name'] = groupName + metadata['display_description'] = groupDesc + metadata['maximum_snapshot'] = maxSnapshots + properties['snapshot_rotation'] = rotationSnapshot + metadata['properties'] = properties + params['metadata'] = metadata + return self._execute(method, url, params, + [http_client.OK, http_client.ACCEPTED, + http_client.CREATED]) + + def get_vg_list(self, vgtype=None): + method = 'GET' + if vgtype: + url = '/%s/?volume_group_type=%s' % (DPL_OBJ_VOLUMEGROUP, vgtype) + else: + url = '/%s/' % (DPL_OBJ_VOLUMEGROUP) + return self._execute(method, url, None, [http_client.OK]) + + def get_vg(self, groupID): + method = 'GET' + url = '/%s/%s/' % (DPL_OBJ_VOLUMEGROUP, groupID) + return self._execute(method, url, None, [http_client.OK]) + + def delete_vg(self, groupID, force=True): + method = 'DELETE' + metadata = {} + params = {} + url = '/%s/%s/' % (DPL_OBJ_VOLUMEGROUP, groupID) + metadata['force'] = force + params['metadata'] = metadata + return self._execute(method, url, params, + [http_client.NO_CONTENT, http_client.NOT_FOUND]) + + def join_vg(self, volumeID, groupID): + method = 'PUT' + metadata = {} + params = {} + url = '/%s/%s/' % (DPL_OBJ_VOLUMEGROUP, groupID) + metadata['volume_group_operation'] = 'join' + metadata['volume'] = [] + metadata['volume'].append(volumeID) + params['metadata'] = metadata + return self._execute(method, url, params, + [http_client.OK, http_client.ACCEPTED]) + + def leave_vg(self, volumeID, groupID): + method = 'PUT' + metadata = {} + params = {} + url = '/%s/%s/' % (DPL_OBJ_VOLUMEGROUP, groupID) + metadata['volume_group_operation'] = 'leave' + metadata['volume'] = [] + metadata['volume'].append(volumeID) + params['metadata'] = metadata + return self._execute(method, url, params, + [http_client.OK, http_client.ACCEPTED]) + + +class DPLCOMMONDriver(driver.CloneableImageVD, + driver.BaseVD): + """Class of dpl storage adapter.""" + VERSION = '2.0.5' + + # ThirdPartySystems wiki page + CI_WIKI_NAME = "ProphetStor_CI" + + # TODO(jsbryant) Remove driver in the 'U' release if CI is not fixed. + SUPPORTED = False + + def __init__(self, *args, **kwargs): + cert_path = None + cert_verify = False + super(DPLCOMMONDriver, self).__init__(*args, **kwargs) + if self.configuration: + self.configuration.append_config_values(options.DPL_OPTS) + self.configuration.append_config_values(san.san_opts) + cert_verify = self.configuration.driver_ssl_cert_verify + cert_path = self.configuration.driver_ssl_cert_path + + if cert_verify: + if not cert_path: + LOG.warning( + "Flexvisor: cert_verify is enabled but required cert_path" + " option is missing.") + cert_path = None + else: + cert_path = None + + self.dpl = DPLVolume(self.configuration.san_ip, + self.configuration.dpl_port, + self.configuration.san_login, + self.configuration.san_password, + cert_verify=cert_verify, + cert_path=cert_path) + self._stats = {} + + @staticmethod + def get_driver_options(): + return options.DPL_OPTS + + def _convert_size_GB(self, size): + s = round(float(size) / units.Gi, 2) + if s > 0: + return s + else: + return 0 + + def _conver_uuid2hex(self, strID): + if strID: + return strID.replace('-', '') + else: + return None + + def _get_event_uuid(self, output): + ret = 0 + event_uuid = "" + + if (type(output) is dict and + output.get("metadata") and output["metadata"]): + if (output["metadata"].get("event_uuid") and + output["metadata"]["event_uuid"]): + event_uuid = output["metadata"]["event_uuid"] + else: + ret = errno.EINVAL + else: + ret = errno.EINVAL + return ret, event_uuid + + def _wait_event(self, callFun, objuuid, eventid=None): + nRetry = 30 + fExit = False + status = {} + status['state'] = 'error' + status['output'] = {} + while nRetry: + try: + if eventid: + ret, output = callFun( + self._conver_uuid2hex(objuuid), + self._conver_uuid2hex(eventid)) + else: + ret, output = callFun(self._conver_uuid2hex(objuuid)) + + if ret == 0: + if output['completionStatus'] == 'Complete': + fExit = True + status['state'] = 'available' + status['output'] = output + elif output['completionStatus'] == 'Error': + fExit = True + status['state'] = 'error' + raise loopingcall.LoopingCallDone(retvalue=False) + else: + nsleep = random.randint(0, 10) + value = round(float(nsleep) / 10, 2) + time.sleep(value) + elif ret == errno.ENODATA: + status['state'] = 'deleted' + fExit = True + else: + nRetry -= 1 + time.sleep(3) + continue + + except Exception as e: + LOG.error('Flexvisor failed to get event %(volume)s ' + '(%(status)s).', + {'volume': eventid, 'status': e}) + raise loopingcall.LoopingCallDone(retvalue=False) + + if fExit is True: + break + return status + + def _join_volume_group(self, volume, cgId): + # Join volume group if consistency group id not empty + msg = '' + try: + ret, output = self.dpl.join_vg( + self._conver_uuid2hex(volume['id']), + self._conver_uuid2hex(cgId)) + except Exception as e: + ret = errno.EFAULT + msg = _('Fexvisor failed to add volume %(id)s ' + 'due to %(reason)s.') % {"id": volume['id'], + "reason": six.text_type(e)} + if ret: + if not msg: + msg = _('Flexvisor failed to add volume %(id)s ' + 'to group %(cgid)s.') % {'id': volume['id'], + 'cgid': cgId} + raise exception.VolumeBackendAPIException(data=msg) + else: + LOG.info('Flexvisor succeeded to add volume %(id)s to ' + 'group %(cgid)s.', + {'id': volume['id'], 'cgid': cgId}) + + def _leave_volume_group(self, volume, cgId): + # Leave volume group if consistency group id not empty + msg = '' + try: + ret, output = self.dpl.leave_vg( + self._conver_uuid2hex(volume['id']), + self._conver_uuid2hex(cgId)) + except Exception as e: + ret = errno.EFAULT + msg = _('Fexvisor failed to remove volume %(id)s ' + 'due to %(reason)s.') % {"id": volume['id'], + "reason": six.text_type(e)} + if ret: + if not msg: + msg = _('Flexvisor failed to remove volume %(id)s ' + 'from group %(cgid)s.') % {'id': volume['id'], + 'cgid': cgId} + raise exception.VolumeBackendAPIException(data=msg) + else: + LOG.info('Flexvisor succeeded to remove volume %(id)s from ' + 'group %(cgid)s.', + {'id': volume['id'], 'cgid': cgId}) + + def _get_snapshotid_of_vgsnapshot(self, vgID, vgsnapshotID, volumeID): + snapshotID = None + ret, out = self.dpl.query_vdev_snapshot(vgID, vgsnapshotID, True) + if ret == 0: + volumes = out.get('metadata', {}).get('member', {}) + if volumes: + snapshotID = volumes.get(volumeID, None) + else: + msg = _('Flexvisor failed to get snapshot id of volume ' + '%(id)s from group %(vgid)s.') % {'id': volumeID, + 'vgid': vgID} + raise exception.VolumeBackendAPIException(data=msg) + if not snapshotID: + msg = _('Flexvisor could not find volume %(id)s snapshot in' + ' the group %(vgid)s snapshot ' + '%(vgsid)s.') % {'id': volumeID, 'vgid': vgID, + 'vgsid': vgsnapshotID} + raise exception.VolumeBackendAPIException(data=msg) + return snapshotID + + def create_export(self, context, volume, connector): + pass + + def ensure_export(self, context, volume): + pass + + def remove_export(self, context, volume): + pass + + def _create_consistencygroup(self, context, group): + """Creates a consistencygroup.""" + LOG.info('Start to create consistency group: %(group_name)s ' + 'id: %(id)s', + {'group_name': group.name, 'id': group.id}) + model_update = {'status': fields.GroupStatus.AVAILABLE} + try: + ret, output = self.dpl.create_vg( + self._conver_uuid2hex(group.id), + group.name, + group.description) + if ret: + msg = _('Failed to create consistency group ' + '%(id)s:%(ret)s.') % {'id': group.id, + 'ret': ret} + raise exception.VolumeBackendAPIException(data=msg) + else: + return model_update + except Exception as e: + msg = _('Failed to create consistency group ' + '%(id)s due to %(reason)s.') % {'id': group.id, + 'reason': six.text_type(e)} + raise exception.VolumeBackendAPIException(data=msg) + + def _delete_consistencygroup(self, context, group, volumes): + """Delete a consistency group.""" + ret = 0 + volumes = self.db.volume_get_all_by_group( + context, group.id) + model_update = {} + model_update['status'] = group.status + LOG.info('Start to delete consistency group: %(cg_name)s', + {'cg_name': group.id}) + try: + self.dpl.delete_vg(self._conver_uuid2hex(group.id)) + except Exception as e: + msg = _('Failed to delete consistency group %(id)s ' + 'due to %(reason)s.') % {'id': group.id, + 'reason': six.text_type(e)} + raise exception.VolumeBackendAPIException(data=msg) + + for volume_ref in volumes: + try: + self.dpl.delete_vdev(self._conver_uuid2hex(volume_ref['id'])) + volume_ref['status'] = 'deleted' + except Exception: + ret = errno.EFAULT + volume_ref['status'] = 'error_deleting' + model_update['status'] = ( + fields.GroupStatus.ERROR_DELETING) + if ret == 0: + model_update['status'] = fields.GroupStatus.DELETED + return model_update, volumes + + def _create_cgsnapshot(self, context, cgsnapshot, snapshots): + """Creates a cgsnapshot.""" + snapshots = objects.SnapshotList().get_all_for_group_snapshot( + context, cgsnapshot.id) + model_update = {} + LOG.info('Start to create cgsnapshot for consistency group' + ': %(group_name)s', + {'group_name': cgsnapshot.group_id}) + try: + self.dpl.create_vdev_snapshot( + self._conver_uuid2hex(cgsnapshot.group_id), + self._conver_uuid2hex(cgsnapshot.id), + cgsnapshot.name, + '', + True) + for snapshot in snapshots: + snapshot.status = fields.SnapshotStatus.AVAILABLE + except Exception as e: + msg = _('Failed to create cg snapshot %(id)s ' + 'due to %(reason)s.') % {'id': cgsnapshot.id, + 'reason': six.text_type(e)} + raise exception.VolumeBackendAPIException(data=msg) + + model_update['status'] = 'available' + + return model_update, snapshots + + def _delete_cgsnapshot(self, context, cgsnapshot, snapshots): + """Deletes a cgsnapshot.""" + snapshots = objects.SnapshotList().get_all_for_group_snapshot( + context, cgsnapshot.id) + model_update = {} + model_update['status'] = cgsnapshot.status + LOG.info('Delete cgsnapshot %(snap_name)s for consistency group: ' + '%(group_name)s', + {'snap_name': cgsnapshot.id, + 'group_name': cgsnapshot.group_id}) + try: + self.dpl.delete_vdev_snapshot( + self._conver_uuid2hex(cgsnapshot.group_id), + self._conver_uuid2hex(cgsnapshot.id), True) + for snapshot in snapshots: + snapshot.status = fields.SnapshotStatus.DELETED + except Exception as e: + msg = _('Failed to delete cgsnapshot %(id)s due to ' + '%(reason)s.') % {'id': cgsnapshot.id, + 'reason': six.text_type(e)} + raise exception.VolumeBackendAPIException(data=msg) + + model_update['status'] = 'deleted' + return model_update, snapshots + + def update_group(self, context, group, add_volumes=None, + remove_volumes=None): + addvollist = [] + removevollist = [] + cgid = group.id + vid = '' + model_update = {'status': fields.GroupStatus.AVAILABLE} + if not volume_utils.is_group_a_cg_snapshot_type(group): + raise NotImplementedError() + # Get current group info in backend storage. + ret, output = self.dpl.get_vg(self._conver_uuid2hex(cgid)) + if ret == 0: + group_members = output.get('children', []) + + if add_volumes: + addvollist = add_volumes + if remove_volumes: + removevollist = remove_volumes + + # Process join volumes. + try: + for volume in addvollist: + vid = volume['id'] + # Verify the volume exists in the group or not. + if self._conver_uuid2hex(vid) in group_members: + continue + self._join_volume_group(volume, cgid) + except Exception as e: + msg = _("Fexvisor failed to join the volume %(vol)s in the " + "group %(group)s due to " + "%(ret)s.") % {"vol": vid, "group": cgid, + "ret": six.text_type(e)} + raise exception.VolumeBackendAPIException(data=msg) + # Process leave volumes. + try: + for volume in removevollist: + vid = volume['id'] + if self._conver_uuid2hex(vid) in group_members: + self._leave_volume_group(volume, cgid) + except Exception as e: + msg = _("Fexvisor failed to remove the volume %(vol)s in the " + "group %(group)s due to " + "%(ret)s.") % {"vol": vid, "group": cgid, + "ret": six.text_type(e)} + raise exception.VolumeBackendAPIException(data=msg) + return model_update, None, None + + def create_group(self, context, group): + if volume_utils.is_group_a_cg_snapshot_type(group): + return self._create_consistencygroup(context, group) + raise NotImplementedError() + + def delete_group(self, context, group, volumes): + if volume_utils.is_group_a_cg_snapshot_type(group): + return self._delete_consistencygroup(context, group, volumes) + raise NotImplementedError() + + def create_group_snapshot(self, context, group_snapshot, snapshots): + if volume_utils.is_group_a_cg_snapshot_type(group_snapshot): + return self._create_cgsnapshot(context, group_snapshot, snapshots) + raise NotImplementedError() + + def delete_group_snapshot(self, context, group_snapshot, snapshots): + if volume_utils.is_group_a_cg_snapshot_type(group_snapshot): + return self._delete_cgsnapshot(context, group_snapshot, snapshots) + raise NotImplementedError() + + def create_group_from_src(self, context, group, volumes, + group_snapshot=None, snapshots=None, + source_group=None, source_vols=None): + err_msg = _("Prophet Storage doesn't support create_group_from_src.") + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def create_volume(self, volume): + """Create a volume.""" + pool = volume_utils.extract_host(volume['host'], + level='pool') + if not pool: + if not self.configuration.dpl_pool: + msg = _("Pool is not available in the volume host fields.") + raise exception.InvalidHost(reason=msg) + else: + pool = self.configuration.dpl_pool + + ret, output = self.dpl.create_vdev( + self._conver_uuid2hex(volume['id']), + volume.get('display_name', ''), + volume.get('display_description', ''), + pool, + int(volume['size']) * units.Gi, + self.configuration.san_thin_provision) + if ret == errno.EAGAIN: + ret, event_uuid = self._get_event_uuid(output) + if ret == 0: + status = self._wait_event(self.dpl.get_vdev_status, + volume['id'], + event_uuid) + if status['state'] != 'available': + msg = _('Flexvisor failed to create volume %(volume)s: ' + '%(status)s.') % {'volume': volume['id'], + 'status': ret} + raise exception.VolumeBackendAPIException(data=msg) + else: + msg = _('Flexvisor failed to create volume (get event) ' + '%s.') % (volume['id']) + raise exception.VolumeBackendAPIException( + data=msg) + elif ret != 0: + msg = _('Flexvisor create volume failed.:%(volumeid)s:' + '%(status)s.') % {'volumeid': volume['id'], + 'status': ret} + raise exception.VolumeBackendAPIException( + data=msg) + else: + LOG.info('Flexvisor succeeded to create volume %(id)s.', + {'id': volume['id']}) + + if volume.group_id: + group = volume_utils.group_get_by_id(volume.group_id) + if volume_utils.is_group_a_cg_snapshot_type(group): + try: + self._join_volume_group(volume, volume.group_id) + except Exception: + # Delete volume if volume failed to join group. + self.dpl.delete_vdev(self._conver_uuid2hex(volume['id'])) + msg = _('Flexvisor failed to create volume %(id)s in the ' + 'group %(vgid)s.') % { + 'id': volume['id'], + 'vgid': volume.group_id} + raise exception.VolumeBackendAPIException(data=msg) + + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a volume from a snapshot.""" + src_volume = None + vgID = None + # Detect whether a member of the group. + snapshotID = snapshot['id'] + # Try to get cgid if volume belong in the group. + src_volumeID = snapshot['volume_id'] + cgsnapshotID = snapshot.get('group_snapshot_id', None) + if cgsnapshotID: + try: + src_volume = self.db.volume_get(src_volumeID) + except Exception: + msg = _("Flexvisor unable to find the source volume " + "%(id)s info.") % {'id': src_volumeID} + raise exception.VolumeBackendAPIException(data=msg) + if src_volume: + vgID = src_volume.group_id + + # Get the volume origin snapshot id if the source snapshot is group + # snapshot. + if vgID: + snapshotID = self._get_snapshotid_of_vgsnapshot( + self._conver_uuid2hex(vgID), + self._conver_uuid2hex(cgsnapshotID), + self._conver_uuid2hex(src_volumeID)) + + pool = volume_utils.extract_host(volume['host'], + level='pool') + if not pool: + if not self.configuration.dpl_pool: + msg = _("Pool is not available in the volume host fields.") + raise exception.InvalidHost(reason=msg) + else: + pool = self.configuration.dpl_pool + + ret, output = self.dpl.create_vdev_from_snapshot( + self._conver_uuid2hex(volume['id']), + volume.get('display_name', ''), + volume.get('display_description', ''), + self._conver_uuid2hex(snapshotID), + pool, + self.configuration.san_thin_provision) + if ret == errno.EAGAIN: + ret, event_uuid = self._get_event_uuid(output) + if ret == 0: + status = self._wait_event(self.dpl.get_vdev_status, + volume['id'], + event_uuid) + if status['state'] != 'available': + msg = _('Flexvisor failed to create volume from ' + 'snapshot %(id)s:' + '%(status)s.') % {'id': snapshot['id'], + 'status': ret} + raise exception.VolumeBackendAPIException( + data=msg) + else: + msg = _('Flexvisor failed to create volume from snapshot ' + '(failed to get event) ' + '%(id)s.') % {'id': snapshot['id']} + raise exception.VolumeBackendAPIException(data=msg) + elif ret != 0: + msg = _('Flexvisor failed to create volume from snapshot ' + '%(id)s: %(status)s.') % {'id': snapshot['id'], + 'status': ret} + raise exception.VolumeBackendAPIException( + data=msg) + else: + LOG.info('Flexvisor succeeded to create volume %(id)s ' + 'from snapshot.', {'id': volume['id']}) + + if volume['size'] > snapshot['volume_size']: + self.extend_volume(volume, volume['size']) + + if volume.group_id: + group = volume_utils.group_get_by_id(volume.group_id) + if volume_utils.is_group_a_cg_snapshot_type(group): + try: + self._join_volume_group(volume, volume.group_id) + except Exception: + # Delete volume if volume failed to join group. + self.dpl.delete_vdev(self._conver_uuid2hex(volume['id'])) + raise + + def spawn_volume_from_snapshot(self, volume, snapshot): + """Spawn a REFERENCED volume from a snapshot.""" + ret, output = self.dpl.spawn_vdev_from_snapshot( + self._conver_uuid2hex(volume['id']), + self._conver_uuid2hex(snapshot['volume_id']), + volume.get('display_name', ''), + volume.get('display_description', ''), + self._conver_uuid2hex(snapshot['id'])) + + if ret == errno.EAGAIN: + # its an async process + ret, event_uuid = self._get_event_uuid(output) + if ret == 0: + status = self._wait_event(self.dpl.get_vdev_status, + volume['id'], event_uuid) + if status['state'] != 'available': + msg = _('Flexvisor failed to spawn volume from snapshot ' + '%(id)s:%(status)s.') % {'id': snapshot['id'], + 'status': ret} + raise exception.VolumeBackendAPIException(data=msg) + else: + msg = _('Flexvisor failed to spawn volume from snapshot ' + '(failed to get event) ' + '%(id)s.') % {'id': snapshot['id']} + raise exception.VolumeBackendAPIException(data=msg) + elif ret != 0: + msg = _('Flexvisor failed to create volume from snapshot ' + '%(id)s: %(status)s.') % {'id': snapshot['id'], + 'status': ret} + + raise exception.VolumeBackendAPIException( + data=msg) + else: + LOG.info('Flexvisor succeeded to create volume %(id)s ' + 'from snapshot.', {'id': volume['id']}) + + def create_cloned_volume(self, volume, src_vref): + """Creates a clone of the specified volume.""" + pool = volume_utils.extract_host(volume['host'], + level='pool') + if not pool: + if not self.configuration.dpl_pool: + msg = _("Pool is not available in the volume host fields.") + raise exception.InvalidHost(reason=msg) + else: + pool = self.configuration.dpl_pool + + ret, output = self.dpl.clone_vdev( + self._conver_uuid2hex(src_vref['id']), + self._conver_uuid2hex(volume['id']), + pool, + volume.get('display_name', ''), + volume.get('display_description', ''), + int(volume['size']) * units.Gi, + self.configuration.san_thin_provision) + if ret == errno.EAGAIN: + ret, event_uuid = self._get_event_uuid(output) + if ret == 0: + status = self._wait_event(self.dpl.get_vdev_status, + volume['id'], + event_uuid) + if status['state'] != 'available': + msg = _('Flexvisor failed to clone volume %(id)s: ' + '%(status)s.') % {'id': src_vref['id'], + 'status': ret} + raise exception.VolumeBackendAPIException(data=msg) + else: + msg = _('Flexvisor failed to clone volume (failed to' + ' get event) %(id)s.') % {'id': src_vref['id']} + raise exception.VolumeBackendAPIException( + data=msg) + elif ret != 0: + msg = _('Flexvisor failed to clone volume %(id)s: ' + '%(status)s.') % {'id': src_vref['id'], 'status': ret} + raise exception.VolumeBackendAPIException( + data=msg) + else: + LOG.info('Flexvisor succeeded to clone volume %(id)s.', + {'id': volume['id']}) + + if volume.group_id: + group = volume_utils.group_get_by_id(volume.group_id) + if volume_utils.is_group_a_cg_snapshot_type(group): + try: + self._join_volume_group(volume, volume.group_id) + except Exception: + # Delete volume if volume failed to join group. + self.dpl.delete_vdev(self._conver_uuid2hex(volume['id'])) + msg = _('Flexvisor volume %(id)s failed to join group ' + '%(vgid)s.') % {'id': volume['id'], + 'vgid': volume.group_id} + raise exception.VolumeBackendAPIException(data=msg) + + def delete_volume(self, volume): + """Deletes a volume.""" + ret = 0 + if volume.group_id: + group = volume_utils.group_get_by_id(volume.group_id) + if group and volume_utils.is_group_a_cg_snapshot_type(group): + msg = '' + try: + ret, out = self.dpl.leave_vg( + self._conver_uuid2hex(volume['id']), + self._conver_uuid2hex(volume.group_id)) + if ret: + LOG.warning('Flexvisor failed to delete volume ' + '%(id)s from the group %(vgid)s.', + {'id': volume['id'], + 'vgid': volume.group_id}) + except Exception as e: + LOG.warning('Flexvisor failed to delete volume %(id)s ' + 'from group %(vgid)s due to %(status)s.', + {'id': volume['id'], + 'vgid': volume.group_id, + 'status': e}) + + if ret: + ret = 0 + + ret, output = self.dpl.delete_vdev(self._conver_uuid2hex(volume['id'])) + if ret == errno.EAGAIN: + status = self._wait_event(self.dpl.get_vdev, volume['id']) + if status['state'] == 'error': + msg = _('Flexvisor failed deleting volume %(id)s: ' + '%(status)s.') % {'id': volume['id'], 'status': ret} + raise exception.VolumeBackendAPIException(data=msg) + elif ret == errno.ENODATA: + ret = 0 + LOG.info('Flexvisor volume %(id)s does not ' + 'exist.', {'id': volume['id']}) + elif ret != 0: + msg = _('Flexvisor failed to delete volume %(id)s: ' + '%(status)s.') % {'id': volume['id'], 'status': ret} + raise exception.VolumeBackendAPIException( + data=msg) + + def extend_volume(self, volume, new_size): + ret, output = self.dpl.extend_vdev(self._conver_uuid2hex(volume['id']), + volume.get('display_name', ''), + volume.get('display_description', + ''), + new_size * units.Gi) + if ret == errno.EAGAIN: + ret, event_uuid = self._get_event_uuid(output) + if ret == 0: + status = self._wait_event(self.dpl.get_vdev_status, + volume['id'], + event_uuid) + if status['state'] != 'available': + msg = _('Flexvisor failed to extend volume ' + '%(id)s:%(status)s.') % {'id': volume, + 'status': ret} + raise exception.VolumeBackendAPIException( + data=msg) + else: + msg = _('Flexvisor failed to extend volume ' + '(failed to get event) ' + '%(id)s.') % {'id': volume['id']} + raise exception.VolumeBackendAPIException(data=msg) + elif ret != 0: + msg = _('Flexvisor failed to extend volume ' + '%(id)s: %(status)s.') % {'id': volume['id'], + 'status': ret} + raise exception.VolumeBackendAPIException( + data=msg) + else: + LOG.info('Flexvisor succeeded to extend volume' + ' %(id)s.', {'id': volume['id']}) + + def create_snapshot(self, snapshot): + """Creates a snapshot.""" + ret, output = self.dpl.create_vdev_snapshot( + self._conver_uuid2hex(snapshot['volume_id']), + self._conver_uuid2hex(snapshot['id']), + snapshot.get('display_name', ''), + snapshot.get('display_description', '')) + + if ret == errno.EAGAIN: + ret, event_uuid = self._get_event_uuid(output) + if ret == 0: + status = self._wait_event(self.dpl.get_vdev_status, + snapshot['volume_id'], + event_uuid) + if status['state'] != 'available': + msg = (_('Flexvisor failed to create snapshot for volume ' + '%(id)s: %(status)s.') % + {'id': snapshot['volume_id'], 'status': ret}) + raise exception.VolumeBackendAPIException(data=msg) + else: + msg = (_('Flexvisor failed to create snapshot for volume ' + '(failed to get event) %(id)s.') % + {'id': snapshot['volume_id']}) + raise exception.VolumeBackendAPIException(data=msg) + elif ret != 0: + msg = _('Flexvisor failed to create snapshot for volume %(id)s: ' + '%(status)s.') % {'id': snapshot['volume_id'], + 'status': ret} + raise exception.VolumeBackendAPIException(data=msg) + + def delete_snapshot(self, snapshot): + """Deletes a snapshot.""" + ret, output = self.dpl.delete_vdev_snapshot( + self._conver_uuid2hex(snapshot['volume_id']), + self._conver_uuid2hex(snapshot['id'])) + if ret == errno.EAGAIN: + ret, event_uuid = self._get_event_uuid(output) + if ret == 0: + status = self._wait_event(self.dpl.get_vdev_status, + snapshot['volume_id'], + event_uuid) + if status['state'] != 'available': + msg = _('Flexvisor failed to delete snapshot %(id)s: ' + '%(status)s.') % {'id': snapshot['id'], + 'status': ret} + raise exception.VolumeBackendAPIException(data=msg) + else: + msg = _('Flexvisor failed to delete snapshot (failed to ' + 'get event) %(id)s.') % {'id': snapshot['id']} + raise exception.VolumeBackendAPIException(data=msg) + elif ret == errno.ENODATA: + LOG.info('Flexvisor snapshot %(id)s not existed.', + {'id': snapshot['id']}) + elif ret != 0: + msg = _('Flexvisor failed to delete snapshot %(id)s: ' + '%(status)s.') % {'id': snapshot['id'], 'status': ret} + raise exception.VolumeBackendAPIException(data=msg) + else: + LOG.info('Flexvisor succeeded to delete snapshot %(id)s.', + {'id': snapshot['id']}) + + def get_volume_stats(self, refresh=False): + """Get volume stats. + + If 'refresh' is True, run update the stats first. + """ + if refresh: + self._update_volume_stats() + + return self._stats + + def _get_pools(self): + pools = [] + qpools = [] + # Defined access pool by cinder configuration. + defined_pool = self.configuration.dpl_pool + if defined_pool: + qpools.append(defined_pool) + else: + try: + ret, output = self.dpl.get_pools() + if ret == 0: + for poolUuid, poolName in output.get('children', []): + qpools.append(poolUuid) + else: + LOG.error("Flexvisor failed to get pool list." + " (Error: %d)", ret) + except Exception as e: + LOG.error("Flexvisor failed to get pool list due to " + "%s.", e) + + # Query pool detail information + for poolid in qpools: + ret, output = self._get_pool_info(poolid) + if ret == 0: + pool = {} + pool['pool_name'] = output['metadata']['pool_uuid'] + pool['total_capacity_gb'] = ( + self._convert_size_GB( + int(output['metadata']['total_capacity']))) + pool['free_capacity_gb'] = ( + self._convert_size_GB( + int(output['metadata']['available_capacity']))) + pool['QoS_support'] = False + pool['reserved_percentage'] = 0 + pools.append(pool) + else: + LOG.warning("Failed to query pool %(id)s status " + "%(ret)d.", {'id': poolid, 'ret': ret}) + continue + return pools + + def _update_volume_stats(self, refresh=False): + """Return the current state of the volume service. + + If 'refresh' is True, run the update first. + """ + data = {} + pools = self._get_pools() + data['volume_backend_name'] = ( + self.configuration.safe_get('volume_backend_name')) + location_info = '%(driver)s:%(host)s:%(volume)s' % { + 'driver': self.__class__.__name__, + 'host': self.configuration.san_ip, + 'volume': self.configuration.dpl_pool + } + try: + ret, output = self.dpl.get_server_info() + if ret == 0: + data['vendor_name'] = output['metadata']['vendor'] + data['driver_version'] = output['metadata']['version'] + data['storage_protocol'] = 'iSCSI' + data['location_info'] = location_info + data['consistencygroup_support'] = True + data['consistent_group_snapshot_enabled'] = True + data['pools'] = pools + self._stats = data + except Exception as e: + LOG.error('Failed to get server info due to ' + '%(state)s.', {'state': e}) + return self._stats + + def do_setup(self, context): + """Any initialization the volume driver does while starting.""" + self.context = context + LOG.info('Activate Flexvisor cinder volume driver.') + + def check_for_setup_error(self): + """Check DPL can connect properly.""" + pass + + def _get_pool_info(self, poolid): + """Query pool information.""" + ret, output = self.dpl.get_pool(poolid) + if ret == errno.EAGAIN: + ret, event_uuid = self._get_event_uuid(output) + if ret == 0: + status = self._wait_event(self.dpl.get_pool_status, poolid, + event_uuid) + if status['state'] != 'available': + msg = _('Flexvisor failed to get pool info %(id)s: ' + '%(status)s.') % {'id': poolid, 'status': ret} + raise exception.VolumeBackendAPIException(data=msg) + else: + ret = 0 + output = status.get('output', {}) + else: + LOG.error('Flexvisor failed to get pool %(id)s info.', + {'id': poolid}) + raise exception.VolumeBackendAPIException( + data="failed to get event") + elif ret != 0: + msg = _('Flexvisor failed to get pool info %(id)s: ' + '%(status)s.') % {'id': poolid, 'status': ret} + raise exception.VolumeBackendAPIException(data=msg) + else: + LOG.debug('Flexvisor succeeded to get pool info.') + return ret, output diff --git a/cinder/volume/drivers/prophetstor/options.py b/cinder/volume/drivers/prophetstor/options.py new file mode 100644 index 00000000000..e5eb051e079 --- /dev/null +++ b/cinder/volume/drivers/prophetstor/options.py @@ -0,0 +1,31 @@ +# Copyright (c) 2014 ProphetStor, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from oslo_config import cfg + +from cinder.volume import configuration + +DPL_OPTS = [ + cfg.StrOpt('dpl_pool', + default='', + help='DPL pool uuid in which DPL volumes are stored.'), + cfg.PortOpt('dpl_port', + default=8357, + help='DPL port number.'), +] + +CONF = cfg.CONF +CONF.register_opts(DPL_OPTS, group=configuration.SHARED_CONF_GROUP) diff --git a/doc/source/configuration/block-storage/drivers/prophetstor-dpl-driver.rst b/doc/source/configuration/block-storage/drivers/prophetstor-dpl-driver.rst new file mode 100644 index 00000000000..9d5a2bc8d9a --- /dev/null +++ b/doc/source/configuration/block-storage/drivers/prophetstor-dpl-driver.rst @@ -0,0 +1,104 @@ +=========================================== +ProphetStor Fibre Channel and iSCSI drivers +=========================================== + +ProhetStor Fibre Channel and iSCSI drivers add support for +ProphetStor Flexvisor through the Block Storage service. +ProphetStor Flexvisor enables commodity x86 hardware as software-defined +storage leveraging well-proven ZFS for disk management to provide +enterprise grade storage services such as snapshots, data protection +with different RAID levels, replication, and deduplication. + +The ``DPLFCDriver`` and ``DPLISCSIDriver`` drivers run volume operations +by communicating with the ProphetStor storage system over HTTPS. + +Supported operations +~~~~~~~~~~~~~~~~~~~~ + +* Create, delete, attach, and detach volumes. + +* Create, list, and delete volume snapshots. + +* Create a volume from a snapshot. + +* Copy an image to a volume. + +* Copy a volume to an image. + +* Clone a volume. + +* Extend a volume. + +Enable the Fibre Channel or iSCSI drivers +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The ``DPLFCDriver`` and ``DPLISCSIDriver`` are installed with the OpenStack +software. + +#. Query storage pool id to configure ``dpl_pool`` of the ``cinder.conf`` + file. + + a. Log on to the storage system with administrator access. + + .. code-block:: console + + $ ssh root@STORAGE_IP_ADDRESS + + b. View the current usable pool id. + + .. code-block:: console + + $ flvcli show pool list + - d5bd40b58ea84e9da09dcf25a01fdc07 : default_pool_dc07 + + c. Use ``d5bd40b58ea84e9da09dcf25a01fdc07`` to configure the ``dpl_pool`` of + ``/etc/cinder/cinder.conf`` file. + + .. note:: + + Other management commands can be referenced with the help command + :command:`flvcli -h`. + +#. Make the following changes on the volume node ``/etc/cinder/cinder.conf`` + file. + + .. code-block:: ini + + # IP address of SAN controller (string value) + san_ip=STORAGE IP ADDRESS + + # Username for SAN controller (string value) + san_login=USERNAME + + # Password for SAN controller (string value) + san_password=PASSWORD + + # Use thin provisioning for SAN volumes? (boolean value) + san_thin_provision=true + + # The port that the iSCSI daemon is listening on. (integer value) + iscsi_port=3260 + + # DPL pool uuid in which DPL volumes are stored. (string value) + dpl_pool=d5bd40b58ea84e9da09dcf25a01fdc07 + + # DPL port number. (integer value) + dpl_port=8357 + + # Uncomment one of the next two option to enable Fibre channel or iSCSI + # FIBRE CHANNEL(uncomment the next line to enable the FC driver) + #volume_driver=cinder.volume.drivers.prophetstor.dpl_fc.DPLFCDriver + # iSCSI (uncomment the next line to enable the iSCSI driver) + #volume_driver=cinder.volume.drivers.prophetstor.dpl_iscsi.DPLISCSIDriver + +#. Save the changes to the ``/etc/cinder/cinder.conf`` file and + restart the ``cinder-volume`` service. + +The ProphetStor Fibre Channel or iSCSI drivers are now enabled on your +OpenStack system. If you experience problems, review the Block Storage +service log files for errors. + +The following table contains the options supported by the ProphetStor +storage driver. + +.. include:: ../../tables/cinder-prophetstor_dpl.inc diff --git a/doc/source/reference/support-matrix.ini b/doc/source/reference/support-matrix.ini index 100515156b4..f9e5b0ba3b2 100644 --- a/doc/source/reference/support-matrix.ini +++ b/doc/source/reference/support-matrix.ini @@ -138,6 +138,9 @@ title=Generic NFS Reference Driver (NFS) [driver.nimble] title=Nimble Storage Driver (iSCSI) +[driver.prophetstor] +title=ProphetStor Flexvisor Driver (iSCSI, NFS) + [driver.pure] title=Pure Storage Driver (iSCSI, FC) @@ -231,6 +234,7 @@ driver.netapp_solidfire=complete driver.nexenta=complete driver.nfs=complete driver.nimble=missing +driver.prophetstor=missing driver.pure=complete driver.qnap=complete driver.quobyte=complete @@ -292,6 +296,7 @@ driver.netapp_solidfire=complete driver.nexenta=complete driver.nfs=complete driver.nimble=complete +driver.prophetstor=complete driver.pure=complete driver.qnap=complete driver.quobyte=complete @@ -353,6 +358,7 @@ driver.netapp_solidfire=missing driver.nexenta=missing driver.nfs=missing driver.nimble=missing +driver.prophetstor=missing driver.pure=missing driver.qnap=missing driver.quobyte=missing @@ -417,6 +423,7 @@ driver.netapp_solidfire=complete driver.nexenta=missing driver.nfs=missing driver.nimble=missing +driver.prophetstor=missing driver.pure=missing driver.qnap=missing driver.quobyte=missing @@ -480,6 +487,7 @@ driver.netapp_solidfire=complete driver.nexenta=missing driver.nfs=missing driver.nimble=missing +driver.prophetstor=missing driver.pure=complete driver.qnap=missing driver.quobyte=missing @@ -544,6 +552,7 @@ driver.netapp_solidfire=complete driver.nexenta=missing driver.nfs=missing driver.nimble=missing +driver.prophetstor=complete driver.pure=complete driver.qnap=missing driver.quobyte=missing @@ -607,6 +616,7 @@ driver.netapp_solidfire=complete driver.nexenta=missing driver.nfs=complete driver.nimble=missing +driver.prophetstor=missing driver.pure=complete driver.qnap=missing driver.quobyte=missing @@ -671,6 +681,7 @@ driver.netapp_solidfire=missing driver.nexenta=missing driver.nfs=missing driver.nimble=missing +driver.prophetstor=missing driver.pure=missing driver.qnap=missing driver.quobyte=missing @@ -735,6 +746,7 @@ driver.netapp_solidfire=complete driver.nexenta=missing driver.nfs=missing driver.nimble=missing +driver.prophetstor=missing driver.pure=complete driver.qnap=missing driver.quobyte=missing @@ -796,6 +808,7 @@ driver.netapp_solidfire=complete driver.nexenta=missing driver.nfs=missing driver.nimble=missing +driver.prophetstor=missing driver.pure=missing driver.qnap=missing driver.quobyte=missing @@ -861,6 +874,7 @@ driver.netapp_solidfire=missing driver.nexenta=missing driver.nfs=missing driver.nimble=missing +driver.prophetstor=missing driver.pure=missing driver.qnap=missing driver.quobyte=missing diff --git a/doc/source/reference/support-matrix.rst b/doc/source/reference/support-matrix.rst index 0a2aec3b16f..2310640f3fd 100644 --- a/doc/source/reference/support-matrix.rst +++ b/doc/source/reference/support-matrix.rst @@ -88,5 +88,4 @@ release. * Ussuri * HPE Lefthand Driver (iSCSI) - * ProphetStor Flexvisor Driver * Sheepdog Driver diff --git a/releasenotes/notes/prophetstor-driver-removal-f6b9911d1f84b48f.yaml b/releasenotes/notes/prophetstor-driver-removal-f6b9911d1f84b48f.yaml deleted file mode 100644 index ff64b4323db..00000000000 --- a/releasenotes/notes/prophetstor-driver-removal-f6b9911d1f84b48f.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -upgrade: - - | - The ProphetStor Flexvisor driver was marked unsupported in the - Train release and has now been removed. All data on - ProphetStor Flexvisor backends should be migrated to a supported - storage backend before upgrading your Cinder installation.