From 8c04a9a92263d771741f03f003b1a6ee6d4be73c Mon Sep 17 00:00:00 2001 From: Woojay Poynter Date: Mon, 10 Dec 2018 14:16:35 -0800 Subject: [PATCH] Add new LINSTOR volume driver LINSTOR is a storage orchestrator for DRBD. Change-Id: Ic3a4f85c10a2432d4128fce08604e9868722e8f7 --- cinder/opts.py | 3 + .../unit/volume/drivers/test_linstordrv.py | 1137 +++++++++++++++++ cinder/volume/drivers/linstordrv.py | 1088 ++++++++++++++++ .../block-storage/drivers/linstor-driver.rst | 29 + .../block-storage/volume-drivers.rst | 3 +- doc/source/reference/support-matrix.ini | 12 + driver-requirements.txt | 5 + ...instor-volume-driver-20273a9ad3783cf5.yaml | 4 + 8 files changed, 2280 insertions(+), 1 deletion(-) create mode 100644 cinder/tests/unit/volume/drivers/test_linstordrv.py create mode 100644 cinder/volume/drivers/linstordrv.py create mode 100644 doc/source/configuration/block-storage/drivers/linstor-driver.rst create mode 100644 releasenotes/notes/drbd-linstor-volume-driver-20273a9ad3783cf5.yaml diff --git a/cinder/opts.py b/cinder/opts.py index cf25d618c84..6c24c1eaf8f 100644 --- a/cinder/opts.py +++ b/cinder/opts.py @@ -127,6 +127,8 @@ from cinder.volume.drivers.kaminario import kaminario_common as \ cinder_volume_drivers_kaminario_kaminariocommon from cinder.volume.drivers.lenovo import lenovo_common as \ cinder_volume_drivers_lenovo_lenovocommon +from cinder.volume.drivers import linstordrv as \ + cinder_volume_drivers_linstordrv from cinder.volume.drivers import lvm as cinder_volume_drivers_lvm from cinder.volume.drivers.netapp import options as \ cinder_volume_drivers_netapp_options @@ -310,6 +312,7 @@ def list_opts(): kaminario_opts, cinder_volume_drivers_lenovo_lenovocommon.common_opts, cinder_volume_drivers_lenovo_lenovocommon.iscsi_opts, + cinder_volume_drivers_linstordrv.linstor_opts, cinder_volume_drivers_lvm.volume_opts, cinder_volume_drivers_netapp_options.netapp_proxy_opts, cinder_volume_drivers_netapp_options.netapp_connection_opts, diff --git a/cinder/tests/unit/volume/drivers/test_linstordrv.py b/cinder/tests/unit/volume/drivers/test_linstordrv.py new file mode 100644 index 00000000000..c6c5eda412c --- /dev/null +++ b/cinder/tests/unit/volume/drivers/test_linstordrv.py @@ -0,0 +1,1137 @@ +# Copyright (c) 2018 LINBIT HA Solutions GmbH +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from oslo_config import cfg +from oslo_utils import timeutils + +from cinder import exception as cinder_exception +from cinder import test +from cinder.volume import configuration as conf +from cinder.volume.drivers import linstordrv as drv + +CONF = cfg.CONF + +CINDER_UNKNOWN = 'unknown' +LVM = 'Lvm' +LVMTHIN = 'LvmThin' +DRIVER = 'cinder.volume.drivers.linstordrv.' + +RESOURCE = { + 'name': 'CV_bc3015e6-695f-4688-91f2-1deb4321e4f0', + 'volume': { + 'device_path': '/dev/drbd1000', + }, +} + +RESOURCE_LIST = { + 'resourceStates': [ + { + 'rscName': 'CV_bc3015e6-695f-4688-91f2-1deb4321e4f0', + 'nodeName': 'node_one', + 'inUse': False, + 'vlmStates': [ + { + 'vlmNr': 0, + 'diskState': 'Diskless', + } + ], + }, + { + 'rscName': 'CV_bc3015e6-695f-4688-91f2-1deb4321e4f0', + 'nodeName': 'node_two', + 'inUse': False, + 'vlmStates': [ + { + 'vlmNr': 0, + 'diskState': 'UpToDate', + } + ], + }, + ], + 'resources': [ + { + 'name': 'CV_bc3015e6-695f-4688-91f2-1deb4321e4f0', + 'nodeId': 0, + 'nodeName': 'node_one', + 'nodeUuid': '67939f68-2b26-41b7-b32e-a20b77664aef', + 'props': [{'key': 'PeerSlots', 'value': '7'}], + 'rscDfnUuid': '03623665-35a3-4caa-aa92-0c8badbda84a', + 'uuid': '559a229e-2b97-4d20-8f6d-87778bbe2f9e', + 'vlms': [ + { + 'backingDisk': '/dev/vg-35/f1_00000', + 'devicePath': '/dev/drbd1000', + 'metaDisk': 'internal', + 'storPoolName': 'DfltStorPool', + 'storPoolUuid': 'd2f293f5-5d73-4447-a14b-70efe01302be', + 'vlmDfnUuid': '0eedabe4-3c20-4eff-af74-b2ec2304ab0c', + 'vlmMinorNr': 1000, + 'vlmNr': 0, + 'vlmUuid': '38e48fb8-e0af-4317-8aab-aabb46db4cf8' + } + ] + }, + { + 'name': 'CV_bc3015e6-695f-4688-91f2-1deb4321e4f0', + 'nodeId': 1, + 'nodeName': 'node_two', + 'nodeUuid': '82c4c5a5-8290-481e-9e35-1c71094b0cab', + 'props': [{'key': 'PeerSlots', 'value': '7'}], + 'rscDfnUuid': '03623665-35a3-4caa-aa92-0c8badbda84a', + 'rscFlags': ['DISKLESS'], + 'uuid': '23d3d331-ad0c-43f3-975b-d1048e09dc23', + 'vlms': [ + { + 'backingDisk': 'none', + 'devicePath': '/dev/drbd1000', + 'metaDisk': 'internal', + 'storPoolName': 'DfltStorPool', + 'storPoolUuid': '85ef7894-0682-4019-b95a-1b25e81c0cb5', + 'vlmDfnUuid': '0eedabe4-3c20-4eff-af74-b2ec2304ab0c', + 'vlmMinorNr': 1000, + 'vlmNr': 0, + 'vlmUuid': 'd25b6c91-680f-4aa6-97c3-533e4bf4e659' + } + ] + } + ] +} + +RESOURCE_LIST_RESP = ['node_two', 'node_one'] + +SNAPSHOT_LIST_RESP = ['node_two'] + +RESOURCE_DFN_LIST = { + 'rscDfns': [ + { + 'rscDfnPort': 7002, + 'rscDfnProps': [{'key': u'DrbdPrimarySetOn', + 'value': u'NODE_TWO'}], + 'rscDfnSecret': u'syxflfoMqj84cUUcsqta', + 'rscDfnUuid': u'f55f0c28-455b-458f-a05d-b5f7f16b5c22', + 'rscName': u'CV_bc3015e6-695f-4688-91f2-1deb4321e4f0', + 'vlmDfns': [ + { + 'vlmDfnUuid': u'89f6eff2-c4cd-4586-9ab8-8e850568b93b', + 'vlmMinor': 1001, + 'vlmNr': 0, + 'vlmProps': [{'key': u'DrbdCurrentGi', + 'value': u'2286D24524D26AA'}], + 'vlmSize': '1044480'} + ] + }, + ] +} + +RESOURCE_DFN_LIST_RESP = [ + { + 'rd_name': u'CV_bc3015e6-695f-4688-91f2-1deb4321e4f0', + 'rd_port': 7002, + 'rd_size': 1.0, + 'rd_uuid': u'f55f0c28-455b-458f-a05d-b5f7f16b5c22', + 'vlm_dfn_uuid': u'89f6eff2-c4cd-4586-9ab8-8e850568b93b' + } +] + +NODES_LIST = { + 'nodes': [ + { + 'connectionStatus': 2, + 'name': u'node_two', + 'netInterfaces': [ + { + 'address': u'192.168.66.113', + 'name': u'default', + 'stltEncryptionType': u'PLAIN', + 'stltPort': 3366, + 'uuid': u'224e50c3-09a8-4cf8-b701-13663a66aecd' + } + ], + 'props': [{'key': u'CurStltConnName', 'value': u'default'}], + 'type': u'COMBINED', + 'uuid': u'67939f68-2b26-41b7-b32e-a20b77664aef' + }, + { + 'connectionStatus': 2, + 'name': u'node_one', + 'netInterfaces': [ + { + 'address': u'192.168.66.115', + 'name': u'default', + 'stltEncryptionType': u'PLAIN', + 'stltPort': 3366, + 'uuid': u'36f42ec9-9999-4ad7-a889-8d7dbb498163' + } + ], + 'props': [{'key': u'CurStltConnName', 'value': u'default'}], + 'type': u'COMBINED', + 'uuid': u'82c4c5a5-8290-481e-9e35-1c71094b0cab' + } + ] +} + +NODES_RESP = [ + { + 'node_address': u'192.168.66.113', + 'node_name': u'node_two', + 'node_uuid': u'67939f68-2b26-41b7-b32e-a20b77664aef' + }, + { + 'node_address': u'192.168.66.115', + 'node_name': u'node_one', + 'node_uuid': u'82c4c5a5-8290-481e-9e35-1c71094b0cab' + } +] + +STORAGE_POOL_DEF = { + 'storPoolDfns': [ + { + 'storPoolName': u'DfltStorPool', + 'uuid': u'f51611c6-528f-4793-a87a-866d09e6733a' + } + ] +} + +STORAGE_POOL_DEF_RESP = [ + { + 'spd_name': u'DfltStorPool', + 'spd_uuid': u'f51611c6-528f-4793-a87a-866d09e6733a' + } +] + +STORAGE_POOL_LIST = { + 'storPools': [ + { + 'driver': u'LvmThinDriver', + 'freeSpace': { + 'freeCapacity': '36700160', + 'storPoolName': u'DfltStorPool', + 'storPoolUuid': u'd2f293f5-5d73-4447-a14b-70efe01302be', + 'totalCapacity': '36700160' + }, + 'nodeName': u'node_two', + 'nodeUuid': u'67939f68-2b26-41b7-b32e-a20b77664aef', + 'props': [{'key': u'StorDriver/LvmVg', 'value': u'vg-35'}, + {'key': u'StorDriver/ThinPool', + 'value': u'thinpool'}], + 'staticTraits': [{'key': u'Provisioning', 'value': u'Thin'}, + {'key': u'SupportsSnapshots', + 'value': u'true'}], + 'storPoolDfnUuid': u'f51611c6-528f-4793-a87a-866d09e6733a', + 'storPoolName': u'DfltStorPool', + 'storPoolUuid': u'd2f293f5-5d73-4447-a14b-70efe01302be', + 'vlms': [ + { + 'backingDisk': + u'/dev/vg-35/CV_bc3015e6-695f-4688-91f2-1deb4321e4f0', + 'devicePath': u'/dev/drbd1001', + 'metaDisk': u'internal', + 'storPoolName': u'DfltStorPool', + 'storPoolUuid': u'd2f293f5-5d73-4447-a14b-70efe01302be', + 'vlmDfnUuid': u'89f6eff2-c4cd-4586-9ab8-8e850568b93b', + 'vlmMinorNr': 1001, + 'vlmNr': 0, + 'vlmUuid': u'b91392ae-904a-4bc6-862f-9c7aca629b35' + }, + { + 'backingDisk': u'/dev/vg-35/f1_00000', + 'devicePath': u'/dev/drbd1000', + 'metaDisk': u'internal', + 'storPoolName': u'DfltStorPool', + 'storPoolUuid': u'd2f293f5-5d73-4447-a14b-70efe01302be', + 'vlmDfnUuid': u'0eedabe4-3c20-4eff-af74-b2ec2304ab0c', + 'vlmMinorNr': 1000, + 'vlmNr': 0, + 'vlmUuid': u'38e48fb8-e0af-4317-8aab-aabb46db4cf8' + } + ] + }, + { + 'driver': u'DisklessDriver', + 'freeSpace': { + 'freeCapacity': '9223372036854775807', + 'storPoolName': u'DfltStorPool', + 'storPoolUuid': u'85ef7894-0682-4019-b95a-1b25e81c0cb5', + 'totalCapacity': '9223372036854775807' + }, + 'nodeName': u'node_one', + 'nodeUuid': u'82c4c5a5-8290-481e-9e35-1c71094b0cab', + 'staticTraits': [{'key': u'SupportsSnapshots', + 'value': u'false'}], + 'storPoolDfnUuid': u'f51611c6-528f-4793-a87a-866d09e6733a', + 'storPoolName': u'DfltStorPool', + 'storPoolUuid': u'85ef7894-0682-4019-b95a-1b25e81c0cb5', + 'vlms': [ + { + 'backingDisk': u'none', + 'devicePath': u'/dev/drbd1001', + 'metaDisk': u'internal', + 'storPoolName': u'DfltStorPool', + 'storPoolUuid': u'85ef7894-0682-4019-b95a-1b25e81c0cb5', + 'vlmDfnUuid': u'89f6eff2-c4cd-4586-9ab8-8e850568b93b', + 'vlmMinorNr': 1001, + 'vlmNr': 0, + 'vlmUuid': u'4c63ee46-acb0-4aa5-8758-8fa8f65fdd5a' + }, + { + 'backingDisk': u'none', + 'devicePath': u'/dev/drbd1000', + 'metaDisk': u'internal', + 'storPoolName': u'DfltStorPool', + 'storPoolUuid': u'85ef7894-0682-4019-b95a-1b25e81c0cb5', + 'vlmDfnUuid': u'0eedabe4-3c20-4eff-af74-b2ec2304ab0c', + 'vlmMinorNr': 1000, + 'vlmNr': 0, + 'vlmUuid': u'd25b6c91-680f-4aa6-97c3-533e4bf4e659' + } + ] + } + ] +} + +STORAGE_POOL_LIST_RESP = [ + { + 'driver_name': 'LvmThin', + 'node_name': u'node_two', + 'node_uuid': u'67939f68-2b26-41b7-b32e-a20b77664aef', + 'sp_cap': 35.0, + 'sp_free': 35.0, + 'sp_name': u'DfltStorPool', + 'sp_uuid': u'd2f293f5-5d73-4447-a14b-70efe01302be', + 'sp_vlms_uuid': [u'89f6eff2-c4cd-4586-9ab8-8e850568b93b', + u'0eedabe4-3c20-4eff-af74-b2ec2304ab0c'] + }, + { + 'driver_name': u'DisklessDriver', + 'node_name': u'node_one', + 'node_uuid': u'82c4c5a5-8290-481e-9e35-1c71094b0cab', + 'sp_cap': 0.0, + 'sp_free': -1.0, + 'sp_name': u'DfltStorPool', + 'sp_uuid': u'85ef7894-0682-4019-b95a-1b25e81c0cb5', + 'sp_vlms_uuid': [u'89f6eff2-c4cd-4586-9ab8-8e850568b93b', + u'0eedabe4-3c20-4eff-af74-b2ec2304ab0c'] + } +] + +VOLUME_LIST = { + 'resourceStates': [ + { + 'inUse': False, + 'nodeName': u'wp-u16-cinder-dev-lg', + 'rscName': u'CV_bc3015e6-695f-4688-91f2-1deb4321e4f0', + 'vlmStates': [{'diskState': u'Diskless', 'vlmNr': 0}] + }, + { + 'nodeName': u'wp-u16-cinder-dev-1', 'rscName': u'foo' + }, + { + 'inUse': False, + 'nodeName': u'wp-u16-cinder-dev-1', + 'rscName': u'CV_bc3015e6-695f-4688-91f2-1deb4321e4f0', + 'vlmStates': [{'diskState': u'UpToDate', 'vlmNr': 0}] + } + ], + 'resources': [ + { + 'name': u'CV_bc3015e6-695f-4688-91f2-1deb4321e4f0', + 'nodeId': 0, + 'nodeName': u'wp-u16-cinder-dev-1', + 'nodeUuid': u'67939f68-2b26-41b7-b32e-a20b77664aef', + 'props': [{'key': u'PeerSlots', 'value': u'7'}], + 'rscDfnUuid': u'f55f0c28-455b-458f-a05d-b5f7f16b5c22', + 'uuid': u'2da61a7a-83b7-41d1-8a96-3a1a118dfba2', + 'vlms': [ + { + 'backingDisk': + u'/dev/vg-35/CV_bc3015e6-695f-4688-91f2-' + + u'1deb4321e4f0_00000', + 'devicePath': u'/dev/drbd1001', + 'metaDisk': u'internal', + 'storPoolName': u'DfltStorPool', + 'storPoolUuid': u'd2f293f5-5d73-4447-a14b-70efe01302be', + 'vlmDfnUuid': u'89f6eff2-c4cd-4586-9ab8-8e850568b93b', + 'vlmMinorNr': 1001, + 'vlmNr': 0, + 'vlmUuid': u'b91392ae-904a-4bc6-862f-9c7aca629b35' + } + ] + }, + { + 'name': u'CV_bc3015e6-695f-4688-91f2-1deb4321e4f0', + 'nodeId': 1, + 'nodeName': u'wp-u16-cinder-dev-lg', + 'nodeUuid': u'82c4c5a5-8290-481e-9e35-1c71094b0cab', + 'props': [{'key': u'PeerSlots', 'value': u'7'}], + 'rscDfnUuid': u'f55f0c28-455b-458f-a05d-b5f7f16b5c22', + 'rscFlags': [u'DISKLESS'], + 'uuid': u'bd6472d1-dc3c-4d41-a5f0-f44271c05680', + 'vlms': [ + { + 'backingDisk': u'none', + 'devicePath': u'/dev/drbd1001', + 'metaDisk': u'internal', + 'storPoolName': u'DfltStorPool', + 'storPoolUuid': u'85ef7894-0682-4019-b95a-1b25e81c0cb5', + 'vlmDfnUuid': u'89f6eff2-c4cd-4586-9ab8-8e850568b93b', + 'vlmMinorNr': 1001, + 'vlmNr': 0, + 'vlmUuid': u'4c63ee46-acb0-4aa5-8758-8fa8f65fdd5a' + } + ] + } + ] +} + +VOLUME_LIST_RESP = [ + { + 'node_name': u'wp-u16-cinder-dev-1', + 'rd_name': u'CV_bc3015e6-695f-4688-91f2-1deb4321e4f0', + 'volume': [ + { + 'backingDisk': u'/dev/vg-35/CV_bc3015e6-695f-4688-91f2-' + + u'1deb4321e4f0_00000', + 'devicePath': u'/dev/drbd1001', + 'metaDisk': u'internal', + 'storPoolName': u'DfltStorPool', + 'storPoolUuid': u'd2f293f5-5d73-4447-a14b-70efe01302be', + 'vlmDfnUuid': u'89f6eff2-c4cd-4586-9ab8-8e850568b93b', + 'vlmMinorNr': 1001, + 'vlmNr': 0, + 'vlmUuid': u'b91392ae-904a-4bc6-862f-9c7aca629b35' + } + ] + }, + { + 'node_name': u'wp-u16-cinder-dev-lg', + 'rd_name': u'CV_bc3015e6-695f-4688-91f2-1deb4321e4f0', + 'volume': [ + { + 'backingDisk': u'none', + 'devicePath': u'/dev/drbd1001', + 'metaDisk': u'internal', + 'storPoolName': u'DfltStorPool', + 'storPoolUuid': u'85ef7894-0682-4019-b95a-1b25e81c0cb5', + 'vlmDfnUuid': u'89f6eff2-c4cd-4586-9ab8-8e850568b93b', + 'vlmMinorNr': 1001, + 'vlmNr': 0, + 'vlmUuid': u'4c63ee46-acb0-4aa5-8758-8fa8f65fdd5a' + } + ] + } +] + +VOLUME_STATS_RESP = { + 'driver_version': '0.0.7', + 'pools': [ + { + 'QoS_support': False, + 'backend_state': 'up', + 'filter_function': None, + 'free_capacity_gb': 35.0, + 'goodness_function': None, + 'location_info': 'linstor://localhost', + 'max_over_subscription_ratio': 0, + 'multiattach': False, + 'pool_name': 'lin-test-driver', + 'provisioned_capacity_gb': 1.0, + 'reserved_percentage': 0, + 'thick_provisioning_support': False, + 'thin_provisioning_support': True, + 'total_capacity_gb': 35.0, + 'total_volumes': 1, + } + ], + 'vendor_name': 'LINBIT', + 'volume_backend_name': 'lin-test-driver' +} + +CINDER_VOLUME = { + 'id': 'bc3015e6-695f-4688-91f2-1deb4321e4f0', + 'name': 'test-lin-vol', + 'size': 1, + 'volume_type_id': 'linstor', + 'created_at': timeutils.utcnow() +} + +SNAPSHOT = { + 'id': 'bc3015e6-695f-4688-91f2-1deb4321e4f0', + 'volume_id': 'bc3015e6-695f-4688-91f2-1deb4321e4f0', + 'volume_size': 1 +} + +VOLUME_NAMES = { + 'linstor': 'CV_bc3015e6-695f-4688-91f2-1deb4321e4f0', + 'cinder': 'bc3015e6-695f-4688-91f2-1deb4321e4f0', + 'snap': 'SN_bc3015e6-695f-4688-91f2-1deb4321e4f0', +} + + +class LinstorAPIFakeDriver(object): + + def fake_api_ping(self): + return 1234 + + def fake_api_resource_list(self): + return RESOURCE_LIST + + def fake_api_node_list(self): + return NODES_LIST + + def fake_api_storage_pool_dfn_list(self): + return STORAGE_POOL_DEF + + def fake_api_storage_pool_list(self): + return STORAGE_POOL_LIST + + def fake_api_volume_list(self): + return VOLUME_LIST + + def fake_api_resource_dfn_list(self): + return RESOURCE_DFN_LIST + + def fake_api_snapshot_list(self): + return SNAPSHOT_LIST_RESP + + +class LinstorBaseDriverTestCase(test.TestCase): + + def __init__(self, *args, **kwargs): + super(LinstorBaseDriverTestCase, self).__init__(*args, **kwargs) + + def setUp(self): + super(LinstorBaseDriverTestCase, self).setUp() + + if drv is None: + return + + self._mock = mock.Mock() + self._fake_driver = LinstorAPIFakeDriver() + + self.configuration = mock.Mock(conf.Configuration) + + self.driver = drv.LinstorBaseDriver( + configuration=self.configuration) + self.driver.VERSION = '0.0.7' + self.driver.default_rsc_size = 1 + self.driver.default_vg_name = 'vg-1' + self.driver.default_downsize_factor = int('4096') + self.driver.default_pool = STORAGE_POOL_DEF_RESP[0]['spd_name'] + self.driver.host_name = 'node_one' + self.driver.diskless = True + self.driver.default_uri = 'linstor://localhost' + self.driver.default_backend_name = 'lin-test-driver' + self.driver.configuration.reserved_percentage = 0 + self.driver.configuration.max_over_subscription_ratio = 0 + + @mock.patch(DRIVER + 'LinstorBaseDriver._ping') + def test_ping(self, m_ping): + m_ping.return_value = self._fake_driver.fake_api_ping() + + val = self.driver._ping() + expected = 1234 + self.assertEqual(expected, val) + + @mock.patch('uuid.uuid4') + def test_clean_uuid(self, m_uuid): + m_uuid.return_value = u'bd6472d1-dc3c-4d41-a5f0-f44271c05680' + + val = self.driver._clean_uuid() + expected = u'bd6472d1-dc3c-4d41-a5f0-f44271c05680' + self.assertEqual(expected, val) + + # Test volume size conversions + def test_unit_conversions_to_linstor(self): + val = self.driver._vol_size_to_linstor(1) + expected = 1044480 # 1048575 - 4096 + self.assertEqual(expected, val) + + def test_unit_conversions_to_cinder(self): + val = self.driver._vol_size_to_cinder(1048576) + expected = 1 + self.assertEqual(expected, val) + + def test_is_clean_volume_name(self): + val = self.driver._is_clean_volume_name(VOLUME_NAMES['cinder'], + drv.DM_VN_PREFIX) + expected = VOLUME_NAMES['linstor'] + self.assertEqual(expected, val) + + def test_snapshot_name_from_cinder_snapshot(self): + val = self.driver._snapshot_name_from_cinder_snapshot( + SNAPSHOT) + expected = VOLUME_NAMES['snap'] + self.assertEqual(expected, val) + + def test_cinder_volume_name_from_drbd_resource(self): + val = self.driver._cinder_volume_name_from_drbd_resource( + VOLUME_NAMES['linstor']) + expected = VOLUME_NAMES['cinder'] + self.assertEqual(expected, val) + + def test_drbd_resource_name_from_cinder_snapshot(self): + val = self.driver._drbd_resource_name_from_cinder_snapshot( + SNAPSHOT) + expected = VOLUME_NAMES['linstor'] + self.assertEqual(expected, val) + + def test_drbd_resource_name_from_cinder_volume(self): + val = self.driver._drbd_resource_name_from_cinder_volume( + CINDER_VOLUME) + expected = VOLUME_NAMES['linstor'] + self.assertEqual(expected, val) + + @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_resource_list') + def test_get_rcs_path(self, m_rsc_list): + m_rsc_list.return_value = self._fake_driver.fake_api_resource_list() + + val = self.driver._get_rsc_path(VOLUME_NAMES['linstor']) + expected = '/dev/vg-35/f1_00000' + self.assertEqual(expected, val) + + @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_resource_list') + def test_get_local_path(self, m_rsc_list): + m_rsc_list.return_value = self._fake_driver.fake_api_resource_list() + + val = self.driver._get_local_path(CINDER_VOLUME) + expected = '/dev/vg-35/f1_00000' + self.assertEqual(expected, val) + + @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_storage_pool_dfn_list') + def test_get_spd(self, m_spd_list): + m_spd_list.return_value = ( + self._fake_driver.fake_api_storage_pool_dfn_list()) + + val = self.driver._get_spd() + expected = STORAGE_POOL_DEF_RESP + self.assertEqual(expected, val) + + @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_storage_pool_list') + def test_get_storage_pool(self, m_sp_list): + m_sp_list.return_value = ( + self._fake_driver.fake_api_storage_pool_list()) + + val = self.driver._get_storage_pool() + expected = STORAGE_POOL_LIST_RESP + self.assertEqual(expected, val) + + @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_resource_dfn_list') + def test_get_resource_definitions(self, m_rscd_list): + m_rscd_list.return_value = ( + self._fake_driver.fake_api_resource_dfn_list()) + + val = self.driver._get_resource_definitions() + expected = RESOURCE_DFN_LIST_RESP + self.assertEqual(expected, val) + + @mock.patch(DRIVER + 'LinstorBaseDriver._get_snapshot_nodes') + def test_get_snapshot_nodes(self, m_rsc_list): + m_rsc_list.return_value = self._fake_driver.fake_api_snapshot_list() + + val = self.driver._get_snapshot_nodes(VOLUME_NAMES['linstor']) + expected = SNAPSHOT_LIST_RESP + self.assertEqual(expected, val) + + @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_nodes_list') + def test_get_linstor_nodes(self, m_node_list): + m_node_list.return_value = self._fake_driver.fake_api_node_list() + + val = self.driver._get_linstor_nodes() + expected = RESOURCE_LIST_RESP + self.assertEqual(expected, val) + + @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_nodes_list') + def test_get_nodes(self, m_node_list): + m_node_list.return_value = self._fake_driver.fake_api_node_list() + + val = self.driver._get_nodes() + expected = NODES_RESP + self.assertEqual(expected, val) + + @mock.patch(DRIVER + 'LinstorBaseDriver.get_goodness_function') + @mock.patch(DRIVER + 'LinstorBaseDriver.get_filter_function') + @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_resource_dfn_list') + @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_storage_pool_list') + def test_get_volume_stats(self, + m_sp_list, + m_rscd_list, + m_filter, + m_goodness): + m_sp_list.return_value = ( + self._fake_driver.fake_api_storage_pool_list()) + m_rscd_list.return_value = ( + self._fake_driver.fake_api_resource_dfn_list()) + m_filter.return_value = None + m_goodness.return_value = None + + val = self.driver._get_volume_stats() + expected = VOLUME_STATS_RESP + self.assertEqual(expected, val) + + @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_resource_list') + @mock.patch(DRIVER + 'LinstorBaseDriver._check_api_reply') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_snapshot_create') + def test_create_snapshot_fail(self, + m_snap_create, + m_api_reply, + m_rsc_list): + m_snap_create.return_value = None + m_rsc_list.return_value = self._fake_driver.fake_api_resource_list() + m_api_reply.return_value = False + + self.assertRaises(cinder_exception.VolumeBackendAPIException, + self.driver.create_snapshot, SNAPSHOT) + + @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_resource_list') + @mock.patch(DRIVER + 'LinstorBaseDriver._check_api_reply') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_snapshot_create') + def test_create_snapshot_success(self, + m_snap_create, + m_api_reply, + m_rsc_list): + m_snap_create.return_value = None + m_rsc_list.return_value = self._fake_driver.fake_api_resource_list() + m_api_reply.return_value = True + + # No exception should be raised + self.assertIsNone(self.driver.create_snapshot(SNAPSHOT)) + + @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_resource_dfn_list') + @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_resource_list') + @mock.patch(DRIVER + 'LinstorBaseDriver._check_api_reply') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_snapshot_delete') + def test_delete_snapshot_fail(self, + m_snap_delete, + m_api_reply, + m_rsc_list, + m_rsc_dfn_list): + m_snap_delete.return_value = None + m_api_reply.return_value = False + m_rsc_list.return_value = self._fake_driver.fake_api_resource_list() + m_rsc_dfn_list.return_value = ( + self._fake_driver.fake_api_resource_dfn_list()) + + self.assertRaises(cinder_exception.VolumeBackendAPIException, + self.driver.delete_snapshot, SNAPSHOT) + + @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_dfn_delete') + @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_resource_list') + @mock.patch(DRIVER + 'LinstorBaseDriver._check_api_reply') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_snapshot_delete') + def test_delete_snapshot_success(self, + m_snap_delete, + m_api_reply, + m_rsc_list, + m_rsc_dfn_delete): + m_snap_delete.return_value = None + m_api_reply.return_value = True + m_rsc_list.return_value = self._fake_driver.fake_api_resource_list() + m_rsc_dfn_delete.return_value = True + + # No exception should be raised + self.driver.delete_snapshot(SNAPSHOT) + + @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_storage_pool_list') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_volume_dfn_set_sp') + @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_volume_extend') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_create') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_snapshot_resource_restore') + @mock.patch(DRIVER + 'LinstorBaseDriver._get_linstor_nodes') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_snapshot_volume_dfn_restore') + @mock.patch(DRIVER + 'LinstorBaseDriver._check_api_reply') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_dfn_create') + def test_create_volume_from_snapshot(self, + m_rsc_dfn_create, + m_api_reply, + m_snap_vd_restore, + m_lin_nodes, + m_snap_rsc_restore, + m_rsc_create, + m_vol_extend, + m_vol_dfn, + m_sp_list): + m_rsc_dfn_create.return_value = True + m_api_reply.return_value = True + m_snap_vd_restore.return_value = True + m_nodes = [] + m_nodes.append('for test') + m_nodes.remove('for test') + m_lin_nodes.return_value = m_nodes + m_snap_rsc_restore.return_value = True + m_rsc_create.return_value = True + m_vol_extend.return_value = True + m_vol_dfn.return_value = True + m_sp_list.return_value = ( + self._fake_driver.fake_api_storage_pool_list()) + + self.assertIsNone(self.driver.create_volume_from_snapshot( + CINDER_VOLUME, SNAPSHOT)) + + @mock.patch(DRIVER + 'LinstorBaseDriver._check_api_reply') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_create') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_volume_dfn_create') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_dfn_create') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_storage_pool_create') + @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_storage_pool_dfn_list') + @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_nodes_list') + @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_storage_pool_list') + def test_create_volume_fail_no_linstor_nodes(self, + m_sp_list, + m_node_list, + m_spd_list, + m_sp_create, + m_rsc_dfn_create, + m_vol_dfn_create, + m_rsc_create, + m_api_reply): + m_sp_list.return_value = [] + m_node_list.return_value = [] + m_spd_list.return_value = ( + self._fake_driver.fake_api_storage_pool_dfn_list()) + m_sp_create.return_value = True + m_rsc_dfn_create.return_value = True + m_vol_dfn_create.return_value = True + m_rsc_create.return_value = True + m_api_reply.return_value = True + + test_volume = CINDER_VOLUME + test_volume['migration_status'] = ('migrating:', + str(VOLUME_NAMES['cinder'])) + test_volume['display_name'] = 'test_volume' + test_volume['host'] = 'node_one' + test_volume['size'] = 1 + + self.assertRaises(cinder_exception.VolumeBackendAPIException, + self.driver.create_volume, test_volume) + + @mock.patch(DRIVER + 'LinstorBaseDriver._check_api_reply') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_create') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_volume_dfn_create') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_dfn_create') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_storage_pool_create') + @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_storage_pool_dfn_list') + @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_nodes_list') + @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_storage_pool_list') + def test_create_volume_fail_rsc_create(self, + m_sp_list, + m_node_list, + m_spd_list, + m_sp_create, + m_rsc_dfn_create, + m_vol_dfn_create, + m_rsc_create, + m_api_reply): + m_sp_list.return_value = ( + self._fake_driver.fake_api_storage_pool_list()) + m_node_list.return_value = self._fake_driver.fake_api_node_list() + m_spd_list.return_value = ( + self._fake_driver.fake_api_storage_pool_dfn_list()) + m_sp_create.return_value = True + m_rsc_dfn_create.return_value = True + m_vol_dfn_create.return_value = True + m_rsc_create.return_value = True + m_api_reply.return_value = False + + test_volume = CINDER_VOLUME + test_volume['migration_status'] = ('migrating:', + str(VOLUME_NAMES['cinder'])) + test_volume['display_name'] = 'test_volume' + test_volume['host'] = 'node_one' + test_volume['size'] = 1 + + self.assertRaises(cinder_exception.VolumeBackendAPIException, + self.driver.create_volume, test_volume) + + @mock.patch(DRIVER + 'LinstorBaseDriver._check_api_reply') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_create') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_volume_dfn_create') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_dfn_create') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_storage_pool_create') + @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_storage_pool_dfn_list') + @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_nodes_list') + @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_storage_pool_list') + def test_create_volume(self, + m_sp_list, + m_node_list, + m_spd_list, + m_sp_create, + m_rsc_dfn_create, + m_vol_dfn_create, + m_rsc_create, + m_api_reply): + m_sp_list.return_value = ( + self._fake_driver.fake_api_storage_pool_list()) + m_node_list.return_value = self._fake_driver.fake_api_node_list() + m_spd_list.return_value = ( + self._fake_driver.fake_api_storage_pool_dfn_list()) + m_sp_create.return_value = True + m_rsc_dfn_create.return_value = True + m_vol_dfn_create.return_value = True + m_rsc_create.return_value = True + m_api_reply.return_value = True + + test_volume = CINDER_VOLUME + test_volume['migration_status'] = ('migrating:', + str(VOLUME_NAMES['cinder'])) + test_volume['display_name'] = 'test_volume' + test_volume['host'] = 'node_one' + test_volume['size'] = 1 + + val = self.driver.create_volume(test_volume) + expected = {} + self.assertEqual(val, expected) + + @mock.patch(DRIVER + 'LinstorBaseDriver._check_api_reply') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_dfn_delete') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_volume_dfn_delete') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_delete') + @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_resource_list') + def test_delete_volume_fail_incomplete(self, + m_rsc_list, + m_rsc_delete, + m_vol_dfn_delete, + m_rsc_dfn_delete, + m_api_reply): + m_rsc_list.return_value = self._fake_driver.fake_api_resource_list() + m_rsc_delete.return_value = True + m_vol_dfn_delete.return_value = True + m_rsc_dfn_delete.return_value = True + m_api_reply.return_value = False + + test_volume = CINDER_VOLUME + test_volume['display_name'] = 'linstor_test' + test_volume['host'] = 'node_one' + test_volume['size'] = 1 + + self.assertRaises(cinder_exception.VolumeBackendAPIException, + self.driver.delete_volume, test_volume) + + @mock.patch(DRIVER + 'LinstorBaseDriver._check_api_reply') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_dfn_delete') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_volume_dfn_delete') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_delete') + @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_resource_list') + def test_delete_volume(self, + m_rsc_list, + m_rsc_delete, + m_vol_dfn_delete, + m_rsc_dfn_delete, + m_api_reply): + m_rsc_list.return_value = self._fake_driver.fake_api_resource_list() + m_rsc_delete.return_value = True + m_vol_dfn_delete.return_value = True + m_rsc_dfn_delete.return_value = True + m_api_reply.return_value = True + + test_volume = CINDER_VOLUME + test_volume['display_name'] = 'linstor_test' + test_volume['host'] = 'node_one' + test_volume['size'] = 1 + + val = self.driver.delete_volume(test_volume) + expected = True + self.assertEqual(val, expected) + + @mock.patch(DRIVER + 'LinstorBaseDriver._check_api_reply') + @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_volume_extend') + def test_extend_volume_success(self, m_vol_extend, m_api_reply): + m_vol_extend.return_value = True + m_api_reply.return_value = True + + # No exception should be raised + self.driver.extend_volume(CINDER_VOLUME, 2) + + @mock.patch(DRIVER + 'LinstorBaseDriver._check_api_reply') + @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_volume_extend') + def test_extend_volume_fail(self, m_vol_extend, m_api_reply): + m_vol_extend.return_value = False + m_api_reply.return_value = False + + self.assertRaises(cinder_exception.VolumeBackendAPIException, + self.driver.extend_volume, + CINDER_VOLUME, + 2) + + def test_migrate_volume(self): + m_ctxt = {} + m_volume = {} + m_host = '' + + val = self.driver.migrate_volume(m_ctxt, m_volume, m_host) + expected = (False, None) + self.assertEqual(val, expected) + + +class LinstorIscsiDriverTestCase(test.TestCase): + + def __init__(self, *args, **kwargs): + super(LinstorIscsiDriverTestCase, self).__init__(*args, **kwargs) + + def setUp(self): + super(LinstorIscsiDriverTestCase, self).setUp() + + self._mock = mock.Mock() + self._fake_driver = LinstorAPIFakeDriver() + + self.configuration = mock.Mock(conf.Configuration) + self.configuration.iscsi_helper = 'tgtadm' + self.driver = drv.LinstorIscsiDriver( + configuration=self.configuration, h_name='tgtadm') + + self.driver.VERSION = '0.0.7' + self.driver.default_rsc_size = 1 + self.driver.default_vg_name = 'vg-1' + self.driver.default_downsize_factor = int('4096') + self.driver.default_pool = STORAGE_POOL_DEF_RESP[0]['spd_name'] + self.driver.host_name = 'node_one' + self.driver.diskless = True + self.driver.location_info = 'LinstorIscsi:linstor://localhost' + self.driver.default_backend_name = 'lin-test-driver' + self.driver.configuration.reserved_percentage = int('0') + self.driver.configuration.max_over_subscription_ratio = int('0') + + @mock.patch(DRIVER + 'LinstorIscsiDriver._get_volume_stats') + def test_iscsi_get_volume_stats(self, m_vol_stats): + + m_vol_stats.return_value = VOLUME_STATS_RESP + + val = self.driver.get_volume_stats() + + expected = VOLUME_STATS_RESP + expected["storage_protocol"] = 'iSCSI' + self.assertEqual(val, expected) + + @mock.patch(DRIVER + 'proto') + @mock.patch(DRIVER + 'linstor') + def test_iscsi_check_for_setup_error_pass(self, m_linstor, m_proto): + m_linstor.return_value = True + m_proto.return_value = True + + # No exception should be raised + self.driver.check_for_setup_error() + + +class LinstorDrbdDriverTestCase(test.TestCase): + + def __init__(self, *args, **kwargs): + super(LinstorDrbdDriverTestCase, self).__init__(*args, **kwargs) + + def setUp(self): + super(LinstorDrbdDriverTestCase, self).setUp() + + self._mock = mock.Mock() + self._fake_driver = LinstorAPIFakeDriver() + + self.configuration = mock.Mock(conf.Configuration) + self.driver = drv.LinstorDrbdDriver( + configuration=self.configuration) + + self.driver.VERSION = '0.0.7' + self.driver.default_rsc_size = 1 + self.driver.default_vg_name = 'vg-1' + self.driver.default_downsize_factor = int('4096') + self.driver.default_pool = STORAGE_POOL_DEF_RESP[0]['spd_name'] + self.driver.host_name = 'node_one' + self.driver.diskless = True + self.driver.location_info = 'LinstorDrbd:linstor://localhost' + self.driver.default_backend_name = 'lin-test-driver' + self.driver.configuration.reserved_percentage = int('0') + self.driver.configuration.max_over_subscription_ratio = int('0') + + @mock.patch(DRIVER + 'LinstorDrbdDriver._get_rsc_path') + def test_drbd_return_drbd_config(self, m_rsc_path): + m_rsc_path.return_value = '/dev/drbd1000' + + val = self.driver._return_drbd_config(CINDER_VOLUME) + + expected = { + 'driver_volume_type': 'local', + 'data': { + "device_path": str(m_rsc_path.return_value) + } + } + self.assertEqual(val, expected) + + @mock.patch(DRIVER + 'LinstorDrbdDriver._get_api_storage_pool_list') + def test_drbd_node_in_sp(self, m_sp_list): + m_sp_list.return_value = ( + self._fake_driver.fake_api_storage_pool_list()) + + val = self.driver._node_in_sp('node_two') + self.assertTrue(val) + + @mock.patch(DRIVER + 'LinstorDrbdDriver._get_volume_stats') + def test_drbd_get_volume_stats(self, m_vol_stats): + m_vol_stats.return_value = VOLUME_STATS_RESP + + val = self.driver.get_volume_stats() + expected = VOLUME_STATS_RESP + expected["storage_protocol"] = 'DRBD' + self.assertEqual(val, expected) + + @mock.patch(DRIVER + 'proto') + @mock.patch(DRIVER + 'linstor') + def test_drbd_check_for_setup_error_pass(self, m_linstor, m_proto): + m_linstor.return_value = True + m_proto.return_value = True + + # No exception should be raised + self.driver.check_for_setup_error() + + @mock.patch(DRIVER + 'LinstorDrbdDriver._get_rsc_path') + @mock.patch(DRIVER + 'LinstorDrbdDriver._check_api_reply') + @mock.patch(DRIVER + 'LinstorDrbdDriver._api_rsc_create') + @mock.patch(DRIVER + 'LinstorDrbdDriver._node_in_sp') + def test_drbd_initialize_connection_pass(self, + m_node_sp, + m_rsc_create, + m_check, + m_rsc_path): + m_node_sp.return_value = True + m_rsc_create.return_value = True + m_check.return_value = True + m_rsc_path.return_value = '/dev/drbd1000' + + connector = {} + connector["host"] = 'wp-u16-cinder-dev-lg' + + val = self.driver.initialize_connection(CINDER_VOLUME, connector) + + expected = { + 'driver_volume_type': 'local', + 'data': { + "device_path": str(m_rsc_path.return_value) + } + } + self.assertEqual(val, expected) + + @mock.patch(DRIVER + 'LinstorDrbdDriver._check_api_reply') + @mock.patch(DRIVER + 'LinstorDrbdDriver._api_rsc_delete') + @mock.patch(DRIVER + 'LinstorDrbdDriver._node_in_sp') + def test_drbd_terminate_connection_pass(self, + m_node_sp, + m_rsc_create, + m_check): + m_node_sp.return_value = True + m_rsc_create.return_value = True + m_check.return_value = True + + connector = {} + connector["host"] = 'wp-u16-cinder-dev-lg' + + # No exception should be raised + self.driver.terminate_connection(CINDER_VOLUME, connector) diff --git a/cinder/volume/drivers/linstordrv.py b/cinder/volume/drivers/linstordrv.py new file mode 100644 index 00000000000..de2f2464414 --- /dev/null +++ b/cinder/volume/drivers/linstordrv.py @@ -0,0 +1,1088 @@ +# Copyright (c) 2014-2018 LINBIT HA Solutions GmbH +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""This driver connects Cinder to an installed LINSTOR instance. + +See https://docs.linbit.com/docs/users-guide-9.0/#ch-openstack +for more details. +""" + +import socket +import uuid + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import importutils +from oslo_utils import units + +from cinder import exception +from cinder.i18n import _ +from cinder.image import image_utils +from cinder import interface +from cinder.volume import configuration +from cinder.volume import driver + +try: + import google.protobuf.json_format as proto +except ImportError: + proto = None + +try: + import linstor + lin_drv = linstor.Linstor +except ImportError: + linstor = None + lin_drv = None + +# To override these values, update cinder.conf in /etc/cinder/ +linstor_opts = [ + cfg.StrOpt('linstor_default_volume_group_name', + default='drbd-vg', + help='Default Volume Group name for LINSTOR. ' + 'Not Cinder Volume.'), + + cfg.StrOpt('linstor_default_uri', + default='linstor://localhost', + help='Default storage URI for LINSTOR.'), + + cfg.StrOpt('linstor_default_storage_pool_name', + default='DfltStorPool', + help='Default Storage Pool name for LINSTOR.'), + + cfg.FloatOpt('linstor_volume_downsize_factor', + default=4096, + help='Default volume downscale size in KiB = 4 MiB.'), + + cfg.IntOpt('linstor_default_blocksize', + default=4096, + help='Default Block size for Image restoration. ' + 'When using iSCSI transport, this option ' + 'specifies the block size'), + + cfg.BoolOpt('linstor_controller_diskless', + default=True, + help='True means Cinder node is a diskless LINSTOR node.') +] + +LOG = logging.getLogger(__name__) + +CONF = cfg.CONF +CONF.register_opts(linstor_opts, group=configuration.SHARED_CONF_GROUP) + +CINDER_UNKNOWN = 'unknown' +DM_VN_PREFIX = 'CV_' +DM_SN_PREFIX = 'SN_' +LVM = 'Lvm' +LVMTHIN = 'LvmThin' + + +class LinstorBaseDriver(driver.VolumeDriver): + """Cinder driver that uses Linstor for storage.""" + + VERSION = '1.0.0' + + # ThirdPartySystems wiki page + CI_WIKI_NAME = 'LINBIT_LINSTOR_CI' + + def __init__(self, *args, **kwargs): + super(LinstorBaseDriver, self).__init__(*args, **kwargs) + LOG.debug('START: Base Init Linstor') + + self.configuration.append_config_values(linstor_opts) + self.default_pool = self.configuration.safe_get( + 'linstor_default_storage_pool_name') + self.default_uri = self.configuration.safe_get( + 'linstor_default_uri') + self.default_downsize_factor = self.configuration.safe_get( + 'linstor_volume_downsize_factor') + self.default_vg_name = self.configuration.safe_get( + 'linstor_default_volume_group_name') + self.default_blocksize = self.configuration.safe_get( + 'linstor_default_blocksize') + self.diskless = self.configuration.safe_get( + 'linstor_controller_diskless') + self.default_backend_name = self.configuration.safe_get( + 'volume_backend_name') + self.host_name = socket.gethostname() + + def _ping(self): + with lin_drv(self.default_uri) as lin: + return lin.ping() + + def _clean_uuid(self): + """Returns a UUID string, WITHOUT braces.""" + # Some uuid library versions put braces around the result. + # We don't want them, just a plain [0-9a-f-]+ string. + uuid_str = str(uuid.uuid4()) + uuid_str = uuid_str.replace("{", "") + uuid_str = uuid_str.replace("}", "") + return uuid_str + + # LINSTOR works in kiB units; Cinder uses GiB. + def _vol_size_to_linstor(self, size): + return int(size * units.Mi - self.default_downsize_factor) + + def _vol_size_to_cinder(self, size): + return int(size / units.Mi) + + def _is_clean_volume_name(self, name, prefix): + try: + if (name.startswith(CONF.volume_name_template % "") and + uuid.UUID(name[7:]) is not None): + return prefix + name[7:] + except ValueError: + return None + + try: + if uuid.UUID(name) is not None: + return prefix + name + except ValueError: + return None + + def _snapshot_name_from_cinder_snapshot(self, snapshot): + sn_name = self._is_clean_volume_name(snapshot['id'], DM_SN_PREFIX) + return sn_name + + def _cinder_volume_name_from_drbd_resource(self, rsc_name): + cinder_volume_name = rsc_name.split(DM_VN_PREFIX)[1] + return cinder_volume_name + + def _drbd_resource_name_from_cinder_snapshot(self, snapshot): + drbd_resource_name = '{}{}'.format(DM_VN_PREFIX, + snapshot['volume_id']) + return drbd_resource_name + + def _drbd_resource_name_from_cinder_volume(self, volume): + drbd_resource_name = '{}{}'.format(DM_VN_PREFIX, volume['id']) + return drbd_resource_name + + def _get_api_resource_list(self): + with lin_drv(self.default_uri) as lin: + if not lin.connected: + lin.connect() + + response = proto.MessageToDict(lin.resource_list()[0].proto_msg) + return response + + def _get_api_resource_dfn_list(self): + with lin_drv(self.default_uri) as lin: + if not lin.connected: + lin.connect() + + response = proto.MessageToDict( + lin.resource_dfn_list()[0].proto_msg) + return response + + def _get_api_nodes_list(self): + with lin_drv(self.default_uri) as lin: + if not lin.connected: + lin.connect() + + response = proto.MessageToDict(lin.node_list()[0].proto_msg) + return response + + def _get_api_storage_pool_dfn_list(self): + with lin_drv(self.default_uri) as lin: + if not lin.connected: + lin.connect() + + response = proto.MessageToDict( + lin.storage_pool_dfn_list()[0].proto_msg) + return response + + def _get_api_storage_pool_list(self): + with lin_drv(self.default_uri) as lin: + if not lin.connected: + lin.connect() + + response = proto.MessageToDict( + lin.storage_pool_list()[0].proto_msg) + return response + + def _get_api_volume_extend(self, rsc_target_name, new_size): + with lin_drv(self.default_uri) as lin: + if not lin.connected: + lin.connect() + + vol_reply = lin.volume_dfn_modify( + rsc_name=rsc_target_name, + volume_nr=0, + size=self._vol_size_to_linstor(new_size)) + return vol_reply + + def _api_snapshot_create(self, node_names, rsc_name, snapshot_name): + with lin_drv(self.default_uri) as lin: + if not lin.connected: + lin.connect() + + snap_reply = lin.snapshot_create(node_names=node_names, + rsc_name=rsc_name, + snapshot_name=snapshot_name, + async_msg=False) + return snap_reply + + def _api_snapshot_delete(self, drbd_rsc_name, snap_name): + with lin_drv(self.default_uri) as lin: + if not lin.connected: + lin.connect() + + snap_reply = lin.snapshot_delete(rsc_name=drbd_rsc_name, + snapshot_name=snap_name) + return snap_reply + + def _api_rsc_dfn_delete(self, drbd_rsc_name): + with lin_drv(self.default_uri) as lin: + if not lin.connected: + lin.connect() + + snap_reply = lin.resource_dfn_delete(drbd_rsc_name) + return snap_reply + + def _api_storage_pool_create(self, + node_name, + storage_pool_name, + storage_driver, + driver_pool_name): + with lin_drv(self.default_uri) as lin: + if not lin.connected: + lin.connect() + + sp_reply = lin.storage_pool_create( + node_name=node_name, + storage_pool_name=storage_pool_name, + storage_driver=storage_driver, + driver_pool_name=driver_pool_name) + return sp_reply + + def _api_rsc_dfn_create(self, rsc_name): + with lin_drv(self.default_uri) as lin: + if not lin.connected: + lin.connect() + + rsc_dfn_reply = lin.resource_dfn_create(rsc_name) + return rsc_dfn_reply + + def _api_volume_dfn_create(self, rsc_name, size): + with lin_drv(self.default_uri) as lin: + if not lin.connected: + lin.connect() + + vol_dfn_reply = lin.volume_dfn_create( + rsc_name=rsc_name, + storage_pool=self.default_pool, + size=size) + return vol_dfn_reply + + def _api_volume_dfn_set_sp(self, rsc_target_name): + with lin_drv(self.default_uri) as lin: + if not lin.connected: + lin.connect() + + snap_reply = lin.volume_dfn_modify( + rsc_name=rsc_target_name, + volume_nr=0, + set_properties={ + 'StorPoolName': self.default_pool + }) + return snap_reply + + def _api_rsc_create(self, rsc_name, node_name, diskless=False): + with lin_drv(self.default_uri) as lin: + if not lin.connected: + lin.connect() + + if diskless: + storage_pool = None + else: + storage_pool = self.default_pool + + new_rsc = linstor.ResourceData(rsc_name=rsc_name, + node_name=node_name, + storage_pool=storage_pool, + diskless=diskless) + + rsc_reply = lin.resource_create([new_rsc], async_msg=False) + return rsc_reply + + def _api_rsc_delete(self, rsc_name, node_name): + with lin_drv(self.default_uri) as lin: + if not lin.connected: + lin.connect() + + rsc_reply = lin.resource_delete(node_name=node_name, + rsc_name=rsc_name) + return rsc_reply + + def _api_volume_dfn_delete(self, rsc_name, volume_nr): + with lin_drv(self.default_uri) as lin: + if not lin.connected: + lin.connect() + + rsc_reply = lin.volume_dfn_delete(rsc_name=rsc_name, + volume_nr=volume_nr) + return rsc_reply + + def _api_snapshot_volume_dfn_restore(self, + src_rsc_name, + src_snap_name, + new_vol_name): + with lin_drv(self.default_uri) as lin: + if not lin.connected: + lin.connect() + + vol_reply = lin.snapshot_volume_definition_restore( + from_resource=src_rsc_name, + from_snapshot=src_snap_name, + to_resource=new_vol_name) + return vol_reply + + def _api_snapshot_resource_restore(self, + nodes, + src_rsc_name, + src_snap_name, + new_vol_name): + with lin_drv(self.default_uri) as lin: + if not lin.connected: + lin.connect() + + rsc_reply = lin.snapshot_resource_restore( + node_names=nodes, + from_resource=src_rsc_name, + from_snapshot=src_snap_name, + to_resource=new_vol_name) + return rsc_reply + + def _get_rsc_path(self, rsc_name): + rsc_list_reply = self._get_api_resource_list() + + for rsc in rsc_list_reply['resources']: + if rsc['name'] == rsc_name and rsc['nodeName'] == self.host_name: + for volume in rsc['vlms']: + if volume['vlmNr'] == 0: + return volume['backingDisk'] + + def _get_local_path(self, volume): + try: + full_rsc_name = ( + self._drbd_resource_name_from_cinder_volume(volume)) + + return self._get_rsc_path(full_rsc_name) + + except Exception: + message = _('Local Volume not found.') + raise exception.VolumeBackendAPIException(data=message) + + def _get_spd(self): + # Storage Pool Definition List + spd_list_reply = self._get_api_storage_pool_dfn_list() + + spd_list = [] + for node in spd_list_reply['storPoolDfns']: + spd_item = {} + spd_item['spd_uuid'] = node['uuid'] + spd_item['spd_name'] = node['storPoolName'] + spd_list.append(spd_item) + + return spd_list + + def _get_storage_pool(self): + # Fetch Storage Pool List + sp_list_reply = self._get_api_storage_pool_list() + + # Fetch Resource Definition List + sp_list = [] + + # Separate the diskless nodes + sp_diskless_list = [] + node_count = 0 + + if sp_list_reply: + for node in sp_list_reply['storPools']: + if node['storPoolName'] == self.default_pool: + sp_node = {} + sp_node['node_uuid'] = node['nodeUuid'] + sp_node['node_name'] = node['nodeName'] + sp_node['sp_uuid'] = node['storPoolUuid'] + sp_node['sp_name'] = node['storPoolName'] + sp_node['sp_vlms_uuid'] = [] + if 'vlms' in node: + for vlm in node['vlms']: + sp_node['sp_vlms_uuid'].append(vlm['vlmDfnUuid']) + + if 'Diskless' in node['driver']: + diskless = True + sp_node['sp_free'] = -1.0 + sp_node['sp_cap'] = 0.0 + else: + diskless = False + if 'freeSpace' in node: + sp_node['sp_free'] = round( + int(node['freeSpace']['freeCapacity']) / + units.Mi, + 2) + sp_node['sp_cap'] = round( + int(node['freeSpace']['totalCapacity']) / + units.Mi, + 2) + + # Driver + if node['driver'] == "LvmDriver": + sp_node['driver_name'] = LVM + elif node['driver'] == "LvmThinDriver": + sp_node['driver_name'] = LVMTHIN + else: + sp_node['driver_name'] = node['driver'] + + if diskless: + sp_diskless_list.append(sp_node) + else: + sp_list.append(sp_node) + node_count += 1 + + # Add the diskless nodes to the end of the list + if sp_diskless_list: + sp_list.extend(sp_diskless_list) + + return sp_list + + def _get_volume_stats(self): + + data = {} + data["volume_backend_name"] = self.default_backend_name + data["vendor_name"] = 'LINBIT' + data["driver_version"] = self.VERSION + data["pools"] = [] + + sp_data = self._get_storage_pool() + rd_list = self._get_resource_definitions() + + # Total volumes and capacity + num_vols = 0 + for rd in rd_list: + num_vols += 1 + + allocated_sizes_gb = [] + free_capacity_gb = [] + total_capacity_gb = [] + thin_enabled = False + + # Free capacity for Local Node + single_pool = {} + for sp in sp_data: + if 'Diskless' not in sp['driver_name']: + if 'LvmThin' in sp['driver_name']: + thin_enabled = True + if 'sp_cap' in sp: + if sp['sp_cap'] >= 0.0: + total_capacity_gb.append(sp['sp_cap']) + if 'sp_free' in sp: + if sp['sp_free'] >= 0.0: + free_capacity_gb.append(sp['sp_free']) + sp_allocated_size_gb = 0 + for vlm_uuid in sp['sp_vlms_uuid']: + for rd in rd_list: + if 'vlm_dfn_uuid' in rd: + if rd['vlm_dfn_uuid'] == vlm_uuid: + sp_allocated_size_gb += rd['rd_size'] + allocated_sizes_gb.append(sp_allocated_size_gb) + + single_pool["pool_name"] = data["volume_backend_name"] + single_pool["free_capacity_gb"] = min(free_capacity_gb) + single_pool["total_capacity_gb"] = min(total_capacity_gb) + single_pool['provisioned_capacity_gb'] = max(allocated_sizes_gb) + single_pool["reserved_percentage"] = ( + self.configuration.reserved_percentage) + single_pool['thin_provisioning_support'] = thin_enabled + single_pool['thick_provisioning_support'] = not thin_enabled + single_pool['max_over_subscription_ratio'] = ( + self.configuration.max_over_subscription_ratio) + single_pool["location_info"] = self.default_uri + single_pool["total_volumes"] = num_vols + single_pool["filter_function"] = self.get_filter_function() + single_pool["goodness_function"] = self.get_goodness_function() + single_pool["QoS_support"] = False + single_pool["multiattach"] = False + single_pool["backend_state"] = 'up' + + data["pools"].append(single_pool) + + return data + + def _get_resource_definitions(self): + + rd_list = [] + + rd_list_reply = self._get_api_resource_dfn_list() + + # Only if resource definition present + if 'rscDfns' in rd_list_reply: + for node in rd_list_reply['rscDfns']: + + # Count only Cinder volumes + if DM_VN_PREFIX in node['rscName']: + rd_node = {} + rd_node['rd_uuid'] = node['rscDfnUuid'] + rd_node['rd_name'] = node['rscName'] + rd_node['rd_port'] = node['rscDfnPort'] + + if 'vlmDfns' in node: + for vol in node['vlmDfns']: + if vol['vlmNr'] == 0: + rd_node['vlm_dfn_uuid'] = vol['vlmDfnUuid'] + rd_node['rd_size'] = round( + float(vol['vlmSize']) / units.Mi, 2) + break + + rd_list.append(rd_node) + + return rd_list + + def _get_snapshot_nodes(self, resource): + """Returns all available resource nodes for snapshot. + + However, it excludes diskless nodes. + """ + + rsc_list_reply = self._get_api_resource_list() # reply in dict + + snap_list = [] + for rsc in rsc_list_reply['resources']: + if rsc['name'] != resource: + continue + + # Diskless nodes are not available for snapshots + diskless = False + if 'rscFlags' in rsc: + if 'DISKLESS' in rsc['rscFlags']: + diskless = True + if not diskless: + snap_list.append(rsc['nodeName']) + + return snap_list + + def _get_linstor_nodes(self): + # Returns all available DRBD nodes + node_list_reply = self._get_api_nodes_list() + + node_list = [] + for node in node_list_reply['nodes']: + node_list.append(node['name']) + + return node_list + + def _get_nodes(self): + # Get Node List + node_list_reply = self._get_api_nodes_list() + + node_list = [] + if node_list_reply: + for node in node_list_reply['nodes']: + node_item = {} + node_item['node_name'] = node['name'] + node_item['node_uuid'] = node['uuid'] + node_item['node_address'] = ( + node['netInterfaces'][0]['address']) + node_list.append(node_item) + + return node_list + + def _check_api_reply(self, api_response, noerror_only=False): + if noerror_only: + # Checks if none of the replies has an error + return lin_drv.all_api_responses_no_error(api_response) + else: + # Check if all replies are success + return lin_drv.all_api_responses_success(api_response) + + def _copy_vol_to_image(self, context, image_service, image_meta, rsc_path): + + return image_utils.upload_volume(context, + image_service, + image_meta, + rsc_path) + + # + # Snapshot + # + def create_snapshot(self, snapshot): + snap_name = self._snapshot_name_from_cinder_snapshot(snapshot) + drbd_rsc_name = self._drbd_resource_name_from_cinder_snapshot(snapshot) + node_names = self._get_snapshot_nodes(drbd_rsc_name) + + snap_reply = self._api_snapshot_create(node_names=node_names, + rsc_name=drbd_rsc_name, + snapshot_name=snap_name) + + if not self._check_api_reply(snap_reply, noerror_only=True): + msg = 'ERROR creating a LINSTOR snapshot {}'.format(snap_name) + LOG.error(msg) + raise exception.VolumeBackendAPIException(msg) + + def delete_snapshot(self, snapshot): + snap_name = self._snapshot_name_from_cinder_snapshot(snapshot) + drbd_rsc_name = self._drbd_resource_name_from_cinder_snapshot(snapshot) + + snap_reply = self._api_snapshot_delete(drbd_rsc_name, snap_name) + + if not self._check_api_reply(snap_reply, noerror_only=True): + msg = 'ERROR deleting a LINSTOR snapshot {}'.format(snap_name) + LOG.error(msg) + raise exception.VolumeBackendAPIException(msg) + + # Delete RD if no other RSC are found + if not self._get_snapshot_nodes(drbd_rsc_name): + self._api_rsc_dfn_delete(drbd_rsc_name) + + def create_volume_from_snapshot(self, volume, snapshot): + src_rsc_name = self._drbd_resource_name_from_cinder_snapshot(snapshot) + src_snap_name = self._snapshot_name_from_cinder_snapshot(snapshot) + new_vol_name = self._drbd_resource_name_from_cinder_volume(volume) + + # New RD + rsc_reply = self._api_rsc_dfn_create(new_vol_name) + + if not self._check_api_reply(rsc_reply): + msg = _('Error on creating LINSTOR Resource Definition') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + # New VD from Snap + reply = self._api_snapshot_volume_dfn_restore(src_rsc_name, + src_snap_name, + new_vol_name) + if not self._check_api_reply(reply, noerror_only=True): + msg = _('Error on restoring LINSTOR Volume Definition') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + # Set StorPoolName property on VD + reply = self._api_volume_dfn_set_sp(new_vol_name) + if not self._check_api_reply(reply): + msg = _('Error on restoring LINSTOR Volume StorPoolName property') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + # New RSC from Snap + # Assumes restoring to all the nodes containing the storage pool + # unless diskless + nodes = [] + for node in self._get_storage_pool(): + + if 'Diskless' in node['driver_name']: + continue + + # Filter out controller node if LINSTOR is diskless + if self.diskless and node['node_name'] == self.host_name: + continue + else: + nodes.append(node['node_name']) + + reply = self._api_snapshot_resource_restore(nodes, + src_rsc_name, + src_snap_name, + new_vol_name) + if not self._check_api_reply(reply, noerror_only=True): + msg = _('Error on restoring LINSTOR resources') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + # Manually add the controller node as a resource if diskless + if self.diskless: + reply = self._api_rsc_create(rsc_name=new_vol_name, + node_name=self.host_name, + diskless=self.diskless) + + # Upsize if larger volume than original snapshot + src_rsc_size = int(snapshot['volume_size']) + new_vol_size = int(volume['size']) + + if new_vol_size > src_rsc_size: + + upsize_target_name = self._is_clean_volume_name(volume['id'], + DM_VN_PREFIX) + reply = self._get_api_volume_extend( + rsc_target_name=upsize_target_name, + new_size=new_vol_size) + + if not self._check_api_reply(reply, noerror_only=True): + # Delete failed volume + failed_volume = [] + failed_volume['id'] = volume['id'] + self.delete_volume(failed_volume) + + msg = _('Error on extending LINSTOR resource size') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def create_volume(self, volume): + # Check for Storage Pool List + sp_data = self._get_storage_pool() + rsc_size = 1 + rsc_size = volume['size'] + + # No existing Storage Pools found + if not sp_data: + + # Check for Nodes + node_list = self._get_nodes() + + if not node_list: + msg = _('No LINSTOR resource nodes available / configured') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + # Create Storage Pool (definition is implicit) + spd_list = self._get_spd() + + if spd_list: + spd_name = spd_list[0]['spd_name'] + + for node in node_list: + + node_driver = None + for sp in sp_data: + if sp['node_name'] == node['node_name']: + node_driver = sp['driver_name'] + + sp_reply = self._api_storage_pool_create( + node_name=node['node_name'], + storage_pool_name=spd_name, + storage_driver=node_driver, + driver_pool_name=self.default_vg_name) + + if not self._check_api_reply(sp_reply): + msg = _('Could not create a LINSTOR storage pool') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + # # Check for RD + # If Retyping from another volume, use parent/origin uuid + # as a name source + if (volume['migration_status'] is not None and + str(volume['migration_status']).find('success') == -1): + src_name = str(volume['migration_status']).split(':')[1] + rsc_name = self._is_clean_volume_name(str(src_name), + DM_VN_PREFIX) + else: + rsc_name = self._is_clean_volume_name(volume['id'], + DM_VN_PREFIX) + + # Create a New RD + rsc_dfn_reply = self._api_rsc_dfn_create(rsc_name) + if not self._check_api_reply(rsc_dfn_reply, + noerror_only=True): + msg = _("Error creating a LINSTOR resource definition") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + # Create a New VD + vd_size = self._vol_size_to_linstor(rsc_size) + vd_reply = self._api_volume_dfn_create(rsc_name=rsc_name, + size=int(vd_size)) + if not self._check_api_reply(vd_reply, + noerror_only=True): + msg = _("Error creating a LINSTOR volume definition") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + # Create LINSTOR Resources + ctrl_in_sp = False + for node in sp_data: + + # Check if controller is in the pool + if node['node_name'] == self.host_name: + ctrl_in_sp = True + + # Create resources and, + # Check only errors when creating diskless resources + if 'Diskless' in node['driver_name']: + diskless = True + else: + diskless = False + rsc_reply = self._api_rsc_create(rsc_name=rsc_name, + node_name=node['node_name'], + diskless=diskless) + + if not self._check_api_reply(rsc_reply, noerror_only=True): + msg = _("Error creating a LINSTOR resource") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + # If the controller is diskless and not in the pool, create a diskless + # resource on it + if not ctrl_in_sp and self.diskless: + rsc_reply = self._api_rsc_create(rsc_name=rsc_name, + node_name=self.host_name, + diskless=True) + + if not self._check_api_reply(rsc_reply, noerror_only=True): + msg = _("Error creating a LINSTOR resource") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + return {} + + def delete_volume(self, volume): + drbd_rsc_name = self._drbd_resource_name_from_cinder_volume(volume) + rsc_list_reply = self._get_api_resource_list() + + if rsc_list_reply: + # Delete Resources + for rsc in rsc_list_reply['resources']: + if rsc['name'] != drbd_rsc_name: + continue + + rsc_reply = self._api_rsc_delete( + node_name=rsc['nodeName'], + rsc_name=drbd_rsc_name) + if not self._check_api_reply(rsc_reply, noerror_only=True): + msg = _("Error deleting a LINSTOR resource") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + # Delete VD + vd_reply = self._api_volume_dfn_delete(drbd_rsc_name, 0) + if not vd_reply: + if not self._check_api_reply(vd_reply): + msg = _("Error deleting a LINSTOR volume definition") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + # Delete RD + # Will fail if snapshot exists but expected + self._api_rsc_dfn_delete(drbd_rsc_name) + + return True + + def extend_volume(self, volume, new_size): + rsc_target_name = self._is_clean_volume_name(volume['id'], + DM_VN_PREFIX) + + extend_reply = self._get_api_volume_extend(rsc_target_name, new_size) + + if not self._check_api_reply(extend_reply, noerror_only=True): + msg = _("ERROR Linstor Volume Extend") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def create_cloned_volume(self, volume, src_vref): + temp_id = self._clean_uuid() + snapshot = {} + snapshot['id'] = temp_id + snapshot['volume_id'] = src_vref['id'] + snapshot['volume_size'] = src_vref['size'] + + self.create_snapshot(snapshot) + + self.create_volume_from_snapshot(volume, snapshot) + + self.delete_snapshot(snapshot) + + def copy_image_to_volume(self, context, volume, image_service, image_id): + # self.create_volume(volume) already called by Cinder, and works. + # Need to check return values + full_rsc_name = self._drbd_resource_name_from_cinder_volume(volume) + + # This creates a LINSTOR volume at the original size. + image_utils.fetch_to_raw(context, + image_service, + image_id, + str(self._get_rsc_path(full_rsc_name)), + self.default_blocksize, + size=volume['size']) + return {} + + def copy_volume_to_image(self, context, volume, image_service, image_meta): + full_rsc_name = self._drbd_resource_name_from_cinder_volume(volume) + rsc_path = str(self._get_rsc_path(full_rsc_name)) + + self._copy_vol_to_image(context, + image_service, + image_meta, + rsc_path) + return {} + + # Not supported currently + def migrate_volume(self, ctxt, volume, host, thin=False, mirror_count=0): + return (False, None) + + def check_for_setup_error(self): + + msg = None + if linstor is None: + msg = _('Linstor python package not found') + + if proto is None: + msg = _('Protobuf python package not found') + + if msg is not None: + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + + def create_export(self, context, volume, connector): + pass + + def ensure_export(self, context, volume): + pass + + def initialize_connection(self, volume, connector, **kwargs): + pass + + def remove_export(self, context, volume): + pass + + def terminate_connection(self, volume, connector, **kwargs): + pass + + +# Class with iSCSI interface methods +@interface.volumedriver +class LinstorIscsiDriver(LinstorBaseDriver): + """Cinder iSCSI driver that uses Linstor for storage.""" + + def __init__(self, *args, **kwargs): + super(LinstorIscsiDriver, self).__init__(*args, **kwargs) + + # iSCSI target_helper + if 'h_name' in kwargs: + self.helper_name = kwargs.get('h_name') + self.helper_driver = self.helper_name + self.target_driver = None + else: + self.helper_name = self.configuration.safe_get('iscsi_helper') + self.helper_driver = self.target_mapping[self.helper_name] + self.target_driver = importutils.import_object( + self.helper_driver, + configuration=self.configuration, + db=self.db, + executor=self._execute) + + LOG.info('START: LINSTOR DRBD driver {}'.format(self.helper_name)) + + def get_volume_stats(self, refresh=False): + data = self._get_volume_stats() + data["storage_protocol"] = 'iSCSI' + data["pools"][0]["location_info"] = ( + 'LinstorIscsiDriver:' + data["pools"][0]["location_info"]) + + return data + + def ensure_export(self, context, volume): + volume_path = self._get_local_path(volume) + + return self.target_driver.ensure_export( + context, + volume, + volume_path) + + def create_export(self, context, volume, connector): + volume_path = self._get_local_path(volume) + + export_info = self.target_driver.create_export( + context, + volume, + volume_path) + + return {'provider_location': export_info['location'], + 'provider_auth': export_info['auth'], } + + def remove_export(self, context, volume): + + return self.target_driver.remove_export(context, volume) + + def initialize_connection(self, volume, connector, **kwargs): + + return self.target_driver.initialize_connection(volume, connector) + + def validate_connector(self, connector): + + return self.target_driver.validate_connector(connector) + + def terminate_connection(self, volume, connector, **kwargs): + + return self.target_driver.terminate_connection(volume, + connector, + **kwargs) + + +# Class with DRBD transport mode +@interface.volumedriver +class LinstorDrbdDriver(LinstorBaseDriver): + """Cinder DRBD driver that uses Linstor for storage.""" + + def __init__(self, *args, **kwargs): + super(LinstorDrbdDriver, self).__init__(*args, **kwargs) + + def _return_drbd_config(self, volume): + full_rsc_name = self._drbd_resource_name_from_cinder_volume(volume) + rsc_path = self._get_rsc_path(full_rsc_name) + return { + 'driver_volume_type': 'local', + 'data': { + "device_path": str(rsc_path) + } + } + + def _node_in_sp(self, node_name): + for pool in self._get_storage_pool(): + if pool['node_name'] == node_name: + return True + return False + + def get_volume_stats(self, refresh=False): + data = self._get_volume_stats() + data["storage_protocol"] = 'DRBD' + data["pools"][0]["location_info"] = 'LinstorDrbdDriver:{}'.format( + data["pools"][0]["location_info"]) + + return data + + def initialize_connection(self, volume, connector, **kwargs): + node_name = connector['host'] + if not self._node_in_sp(connector['host']): + + full_rsc_name = self._drbd_resource_name_from_cinder_volume(volume) + rsc_reply = self._api_rsc_create(rsc_name=full_rsc_name, + node_name=node_name, + diskless=True) + if not self._check_api_reply(rsc_reply, noerror_only=True): + msg = _('Error on creating LINSTOR Resource') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + return self._return_drbd_config(volume) + + def terminate_connection(self, volume, connector, **kwargs): + if connector: + node_name = connector['host'] + if not self._node_in_sp(connector['host']): + rsc_name = self._drbd_resource_name_from_cinder_volume(volume) + rsc_reply = self._api_rsc_delete(rsc_name=rsc_name, + node_name=node_name) + if not self._check_api_reply(rsc_reply, noerror_only=True): + msg = _('Error on deleting LINSTOR Resource') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def create_export(self, context, volume, connector): + + return self._return_drbd_config(volume) + + def ensure_export(self, context, volume): + + return self._return_drbd_config(volume) + + def remove_export(self, context, volume): + pass diff --git a/doc/source/configuration/block-storage/drivers/linstor-driver.rst b/doc/source/configuration/block-storage/drivers/linstor-driver.rst new file mode 100644 index 00000000000..c628c78a593 --- /dev/null +++ b/doc/source/configuration/block-storage/drivers/linstor-driver.rst @@ -0,0 +1,29 @@ +============== +LINSTOR driver +============== + +The LINSTOR driver allows Cinder to use DRBD/LINSTOR instances. + +Configuration +~~~~~~~~~~~~~ + +Set the following option in the ``cinder.conf`` file for the DRBD transport: + +.. code-block:: ini + + volume_driver = cinder.volume.drivers.linstordrv.LinstorDrbdDriver + +Or use the following for iSCSI transport: + +.. code-block:: ini + + volume_driver = cinder.volume.drivers.linstordrv.LinstorIscsiDriver + + +The following table contains the configuration options supported by the +LINSTOR driver: + +.. config-table:: + :config-target: LINSTOR + + cinder.volume.drivers.linstordrv diff --git a/doc/source/configuration/block-storage/volume-drivers.rst b/doc/source/configuration/block-storage/volume-drivers.rst index 313034de689..52374fa2a70 100644 --- a/doc/source/configuration/block-storage/volume-drivers.rst +++ b/doc/source/configuration/block-storage/volume-drivers.rst @@ -39,8 +39,8 @@ Driver Configuration Reference drivers/dell-emc-vnx-driver drivers/dell-emc-vmax-driver drivers/dell-emc-vxflex-driver - drivers/emc-xtremio-driver drivers/drbd-driver + drivers/emc-xtremio-driver drivers/fujitsu-eternus-dx-driver drivers/hpe-3par-driver drivers/hpe-lefthand-driver @@ -54,6 +54,7 @@ Driver Configuration Reference drivers/inspur-instorage-driver drivers/kaminario-driver drivers/lenovo-driver + drivers/linstor-driver drivers/nec-storage-m-series-driver drivers/netapp-volume-driver drivers/nimble-volume-driver diff --git a/doc/source/reference/support-matrix.ini b/doc/source/reference/support-matrix.ini index 6cfddc84eb2..bfc870371c7 100644 --- a/doc/source/reference/support-matrix.ini +++ b/doc/source/reference/support-matrix.ini @@ -111,6 +111,9 @@ title=Lenovo Storage Driver (FC, iSCSI) [driver.linbit_drbd] title=LinBit DRDB Driver (DRBD) +[driver.linbit_linstor] +title=LINBIT DRBD/LINSTOR Driver (DRBD) + [driver.lvm] title=Logical Volume Manager (LVM) Reference Driver (iSCSI) @@ -231,6 +234,7 @@ driver.inspur=complete driver.kaminario=complete driver.lenovo=complete driver.linbit_drbd=complete +driver.linbit_linstor=complete driver.lvm=complete driver.nec=complete driver.netapp_ontap=complete @@ -295,6 +299,7 @@ driver.inspur=complete driver.kaminario=complete driver.lenovo=complete driver.linbit_drbd=complete +driver.linbit_linstor=complete driver.lvm=complete driver.nec=complete driver.netapp_ontap=missing @@ -359,6 +364,7 @@ driver.inspur=complete driver.kaminario=missing driver.lenovo=missing driver.linbit_drbd=missing +driver.linbit_linstor=missing driver.lvm=missing driver.nec=complete driver.netapp_ontap=missing @@ -424,6 +430,7 @@ driver.inspur=complete driver.kaminario=missing driver.lenovo=missing driver.linbit_drbd=missing +driver.linbit_linstor=missing driver.lvm=missing driver.nec=complete driver.netapp_ontap=complete @@ -490,6 +497,7 @@ driver.inspur=complete driver.kaminario=complete driver.lenovo=missing driver.linbit_drbd=missing +driver.linbit_linstor=missing driver.lvm=missing driver.nec=missing driver.netapp_ontap=complete @@ -557,6 +565,7 @@ driver.inspur=complete driver.kaminario=missing driver.lenovo=missing driver.linbit_drbd=missing +driver.linbit_linstor=missing driver.lvm=missing driver.nec=missing driver.netapp_ontap=complete @@ -623,6 +632,7 @@ driver.inspur=missing driver.kaminario=complete driver.lenovo=missing driver.linbit_drbd=missing +driver.linbit_linstor=missing driver.lvm=complete driver.nec=missing driver.netapp_ontap=complete @@ -690,6 +700,7 @@ driver.inspur=missing driver.kaminario=missing driver.lenovo=missing driver.linbit_drbd=missing +driver.linbit_linstor=missing driver.lvm=missing driver.nec=missing driver.netapp_ontap=missing @@ -757,6 +768,7 @@ driver.inspur=missing driver.kaminario=missing driver.lenovo=missing driver.linbit_drbd=missing +driver.linbit_linstor=missing driver.lvm=complete driver.nec=missing driver.netapp_ontap=complete diff --git a/driver-requirements.txt b/driver-requirements.txt index 8adca4296b9..362ec805452 100644 --- a/driver-requirements.txt +++ b/driver-requirements.txt @@ -27,6 +27,11 @@ pywbem>=0.7.0 # LGPLv2.1+ # IBM XIV pyxcli>=1.1.5 # Apache-2.0 +# LINSTOR +protobuf>=3.6.1 # BSD +eventlet>=0.24.1 # MIT +python-linstor>=0.6.2 # GPLv3 + # RBD rados # LGPLv2.1 rbd # LGPLv2.1 diff --git a/releasenotes/notes/drbd-linstor-volume-driver-20273a9ad3783cf5.yaml b/releasenotes/notes/drbd-linstor-volume-driver-20273a9ad3783cf5.yaml new file mode 100644 index 00000000000..bb3750a094a --- /dev/null +++ b/releasenotes/notes/drbd-linstor-volume-driver-20273a9ad3783cf5.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + New Cinder volume driver for LINBIT LINSTOR resources.