diff --git a/cinder/opts.py b/cinder/opts.py index e31e1093d1c..157dc1ea46d 100644 --- a/cinder/opts.py +++ b/cinder/opts.py @@ -130,6 +130,8 @@ from cinder.volume.drivers.inspur.instorage import instorage_iscsi as \ cinder_volume_drivers_inspur_instorage_instorageiscsi from cinder.volume.drivers.kaminario import kaminario_common as \ cinder_volume_drivers_kaminario_kaminariocommon +from cinder.volume.drivers.kioxia import kumoscale as \ + cinder_volume_drivers_kioxia_kumoscale from cinder.volume.drivers.lenovo import lenovo_common as \ cinder_volume_drivers_lenovo_lenovocommon from cinder.volume.drivers import linstordrv as \ @@ -278,6 +280,7 @@ def list_opts(): instorage_mcs_opts, cinder_volume_drivers_inspur_instorage_instorageiscsi. instorage_mcs_iscsi_opts, + cinder_volume_drivers_kioxia_kumoscale.KUMOSCALE_OPTS, cinder_volume_drivers_open_e_options.jdss_connection_opts, cinder_volume_drivers_open_e_options.jdss_iscsi_opts, cinder_volume_drivers_open_e_options.jdss_volume_opts, diff --git a/cinder/tests/unit/volume/drivers/test_kioxia.py b/cinder/tests/unit/volume/drivers/test_kioxia.py new file mode 100644 index 00000000000..7910a1a9c17 --- /dev/null +++ b/cinder/tests/unit/volume/drivers/test_kioxia.py @@ -0,0 +1,768 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import unittest +from unittest import mock + +from oslo_utils.secretutils import md5 + +from cinder import exception +from cinder.tests.unit import test +from cinder.volume import configuration as conf +from cinder.volume.drivers.kioxia import entities +from cinder.volume.drivers.kioxia import kumoscale as kioxia +from cinder.volume.drivers.kioxia import rest_client + +VOL_BACKEND_NAME = 'kioxia_kumoscale_1' +VOL_NAME = 'volume-c2fd04e3-320e-44eb-b-2' +VOL_UUID = 'c20aba21-6ef6-446b-b374-45733b4883ba' +VOL_SIZE = 10 +VOL_PROTOCOL = 'NVMeoF' +SNAP_UUID = 'c9ef9d49-0d26-44cb-b609-0b8bd2d3db77' +CONN_UUID = '34206309-3733-4cc6-a7d5-9d4dbbe377da' +CONN_HOST_NAME = 'devstack' +CONN_NQN = 'nqn.2014-08.org.nvmexpress:uuid:' \ + 'beaae2de-3a97-4be1-a739-6ac4bc5bf138' +success_prov_response = entities.ProvisionerResponse(None, None, "Success", + "Success") +fail_prov_response = entities.ProvisionerResponse(None, None, "Failure", + "Failure") +prov_backend1 = entities.Backend(None, None, None, None, 'dummy-pid-1') +prov_backend2 = entities.Backend(None, None, None, None, 'dummy-pid-2') +prov_location1 = entities.Location(VOL_UUID, prov_backend1) +prov_location2 = entities.Location(VOL_UUID, prov_backend2) +prov_volume = entities.VolumeProv(VOL_UUID, None, None, None, + None, None, None, None, None, None, + None, True, None, [prov_location1, + prov_location2]) +prov_volumes_response = entities.ProvisionerResponse([prov_volume]) +no_entities_prov_response = entities.ProvisionerResponse([], None, "Success") + + +class KioxiaVolumeTestCase(test.TestCase): + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_info') + @mock.patch.object(kioxia.KumoScaleBaseVolumeDriver, '_get_kumoscale') + def setUp(self, mock_kumoscale, mock_get_info): + mock_get_info.return_value = success_prov_response + mock_kumoscale.return_value = \ + rest_client.KioxiaProvisioner(['1.2.3.4'], 'cert', 'token') + super(KioxiaVolumeTestCase, self).setUp() + self.cfg = mock.Mock(spec=conf.Configuration) + self.cfg.volume_backend_name = VOL_BACKEND_NAME + self.cfg.url = 'dummyURL' + self.cfg.token = 'dummy.dummy.Rf-dummy-dummy-lE' + self.cfg.cafile = 'dummy' + self.cfg.num_replicas = 1 + self.cfg.block_size = 512 + self.cfg.max_iops_per_gb = 1000 + self.cfg.desired_iops_per_gb = 1000 + self.cfg.max_bw_per_gb = 1000 + self.cfg.desired_bw_per_gb = 1000 + self.cfg.same_rack_allowed = False + self.cfg.max_replica_down_time = 5 + self.cfg.span_allowed = True + self.cfg.vol_reserved_space_percentage = 20 + self.cfg.provisioning_type = 'THIN' + self.driver = kioxia.KumoScaleBaseVolumeDriver(configuration=self.cfg) + self.driver.configuration.get = lambda *args, **kwargs: {} + self.driver.num_replicas = 2 + self.expected_stats = { + 'volume_backend_name': VOL_BACKEND_NAME, + 'vendor_name': 'KIOXIA', + 'driver_version': self.driver.VERSION, + 'storage_protocol': 'NVMeOF', + 'consistencygroup_support': False, + 'thin_provisioning_support': True, + 'multiattach': False, + 'total_capacity_gb': 1000, + 'free_capacity_gb': 600 + } + + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_info') + def test_get_kumoscale(self, mock_get_info): + mock_get_info.return_value = success_prov_response + result = self.driver._get_kumoscale('https://1.2.3.4:8090', 'token', + 'cert') + self.assertEqual(result.mgmt_ips, ['1.2.3.4']) + self.assertEqual(result.port, '8090') + self.assertEqual(result.token, 'token') + + @mock.patch.object(rest_client.KioxiaProvisioner, 'create_volume') + def test_volume_create_success(self, mock_create_volume): + testvol = _stub_volume() + mock_create_volume.return_value = success_prov_response + result = self.driver.create_volume(testvol) + args, kwargs = mock_create_volume.call_args + mock_call = args[0] + self.assertEqual(mock_call.alias, testvol['name'][:27]) + self.assertEqual(mock_call.capacity, testvol['size']) + self.assertEqual(mock_call.uuid, testvol['id']) + self.assertEqual(mock_call.protocol, VOL_PROTOCOL) + self.assertIsNone(result) + + @mock.patch.object(rest_client.KioxiaProvisioner, 'create_volume') + def test_volume_create_failure(self, mock_create_volume): + testvol = _stub_volume() + mock_create_volume.return_value = fail_prov_response + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_volume, testvol) + + @mock.patch.object(rest_client.KioxiaProvisioner, 'create_volume') + def test_volume_create_exception(self, mock_create_volume): + testvol = _stub_volume() + mock_create_volume.side_effect = Exception() + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_volume, testvol) + + @mock.patch.object(rest_client.KioxiaProvisioner, 'delete_volume') + def test_delete_volume_success(self, mock_delete_volume): + testvol = _stub_volume() + mock_delete_volume.return_value = success_prov_response + result = self.driver.delete_volume(testvol) + mock_delete_volume.assert_any_call(testvol['id']) + self.assertIsNone(result) + + @mock.patch.object(rest_client.KioxiaProvisioner, 'delete_volume') + def test_delete_volume_failure(self, mock_delete_volume): + testvol = _stub_volume() + mock_delete_volume.return_value = fail_prov_response + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.delete_volume, testvol) + + @mock.patch.object(rest_client.KioxiaProvisioner, 'delete_volume') + def test_delete_volume_exception(self, mock_delete_volume): + testvol = _stub_volume() + mock_delete_volume.side_effect = Exception() + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.delete_volume, testvol) + + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') + @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') + @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') + def test_initialize_connection(self, mock_host_probe, + mock_publish, + mock_get_volumes_by_uuid, + mock_get_targets, + mock_get_backend_by_id): + testvol = _stub_volume() + testconn = _stub_connector() + prov_target1 = TargetEntity('target.nqn', prov_backend1) + prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP') + backend = BackendEntity([prov_portal]) + prov_targets_response = entities.ProvisionerResponse([prov_target1]) + mock_publish.return_value = success_prov_response + mock_host_probe.return_value = success_prov_response + mock_get_volumes_by_uuid.return_value = prov_volumes_response + mock_get_targets.return_value = prov_targets_response + mock_get_backend_by_id.return_value = \ + entities.ProvisionerResponse([backend]) + result = self.driver.initialize_connection(testvol, testconn) + mock_host_probe.assert_any_call(testconn['nqn'], + testconn['uuid'], + testconn['host'], + 'Agent', 'cinder-driver-0.1', 30) + mock_publish.assert_any_call(testconn['uuid'], testvol['id']) + mock_get_volumes_by_uuid.assert_any_call(testvol['id']) + mock_get_targets.assert_any_call(testconn['uuid'], testvol['id']) + mock_get_backend_by_id.assert_any_call('dummy-pid-1') + expected_replica = {'portals': [('1.2.3.4', '4420', 'TCP')], + 'target_nqn': 'target.nqn', + 'vol_uuid': testvol['id']} + expected_data = { + 'vol_uuid': testvol['id'], + 'alias': testvol['name'], + 'writable': True, + 'volume_replicas': [expected_replica] + } + expected_result = { + 'driver_volume_type': 'nvmeof', + 'data': expected_data + } + self.assertDictEqual(result, expected_result) + + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') + @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') + @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') + def test_initialize_connection_host_probe_failure(self, mock_host_probe, + mock_publish, + mock_get_volumes_by_uuid, + mock_get_targets, + mock_get_backend_by_id): + testvol = _stub_volume() + testconn = _stub_connector() + prov_target = TargetEntity('target.nqn', prov_backend1) + prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP') + backend = BackendEntity([prov_portal]) + prov_targets_response = entities.ProvisionerResponse([prov_target]) + mock_publish.return_value = success_prov_response + mock_host_probe.return_value = fail_prov_response + mock_get_volumes_by_uuid.return_value = prov_volumes_response + mock_get_targets.return_value = prov_targets_response + mock_get_backend_by_id.return_value = \ + entities.ProvisionerResponse([backend]) + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.initialize_connection, testvol, testconn) + + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') + @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') + @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') + def test_initialize_connection_host_probe_exception( + self, mock_host_probe, mock_publish, mock_get_volumes_by_uuid, + mock_get_targets, mock_get_backend_by_id): + testvol = _stub_volume() + testconn = _stub_connector() + prov_target = TargetEntity('target.nqn', prov_backend1) + prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP') + backend = BackendEntity([prov_portal]) + prov_targets_response = entities.ProvisionerResponse([prov_target]) + mock_publish.return_value = success_prov_response + mock_host_probe.side_effect = Exception() + mock_get_volumes_by_uuid.return_value = prov_volumes_response + mock_get_targets.return_value = prov_targets_response + mock_get_backend_by_id.return_value = \ + entities.ProvisionerResponse([backend]) + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.initialize_connection, testvol, testconn) + + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') + @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') + @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') + def test_initialize_connection_publish_failure(self, mock_host_probe, + mock_publish, + mock_get_volumes_by_uuid, + mock_get_targets, + mock_get_backend_by_id): + testvol = _stub_volume() + testconn = _stub_connector() + prov_target = TargetEntity('target.nqn', prov_backend1) + prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP') + backend = BackendEntity([prov_portal]) + prov_targets_response = entities.ProvisionerResponse([prov_target]) + mock_publish.return_value = fail_prov_response + mock_host_probe.return_value = success_prov_response + mock_get_volumes_by_uuid.return_value = prov_volumes_response + mock_get_targets.return_value = prov_targets_response + mock_get_backend_by_id.return_value = \ + entities.ProvisionerResponse([backend]) + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.initialize_connection, testvol, testconn) + + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') + @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') + @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') + def test_initialize_connection_publish_exception(self, mock_host_probe, + mock_publish, + mock_get_volumes_by_uuid, + mock_get_targets, + mock_get_backend_by_id): + testvol = _stub_volume() + testconn = _stub_connector() + prov_target = TargetEntity('target.nqn', prov_backend1) + prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP') + backend = BackendEntity([prov_portal]) + prov_targets_response = entities.ProvisionerResponse([prov_target]) + mock_publish.side_effect = Exception() + mock_host_probe.return_value = success_prov_response + mock_get_volumes_by_uuid.return_value = prov_volumes_response + mock_get_targets.return_value = prov_targets_response + mock_get_backend_by_id.return_value = \ + entities.ProvisionerResponse([backend]) + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.initialize_connection, testvol, testconn) + + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') + @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') + @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') + def test_initialize_connection_volumes_failure(self, mock_host_probe, + mock_publish, + mock_get_volumes_by_uuid, + mock_get_targets, + mock_get_backend_by_id): + testvol = _stub_volume() + testconn = _stub_connector() + prov_target = TargetEntity('target.nqn', prov_backend1) + prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP') + backend = BackendEntity([prov_portal]) + prov_targets_response = entities.ProvisionerResponse([prov_target]) + mock_publish.return_value = success_prov_response + mock_host_probe.return_value = success_prov_response + mock_get_volumes_by_uuid.return_value = fail_prov_response + mock_get_targets.return_value = prov_targets_response + mock_get_backend_by_id.return_value = \ + entities.ProvisionerResponse([backend]) + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.initialize_connection, testvol, testconn) + + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') + @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') + @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') + def test_initialize_connection_no_volumes(self, mock_host_probe, + mock_publish, + mock_get_volumes_by_uuid, + mock_get_targets, + mock_get_backend_by_id): + testvol = _stub_volume() + testconn = _stub_connector() + prov_target = TargetEntity('target.nqn', prov_backend1) + prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP') + backend = BackendEntity([prov_portal]) + prov_targets_response = entities.ProvisionerResponse([prov_target]) + mock_publish.return_value = success_prov_response + mock_host_probe.return_value = success_prov_response + mock_get_volumes_by_uuid.return_value = no_entities_prov_response + mock_get_targets.return_value = prov_targets_response + mock_get_backend_by_id.return_value = \ + entities.ProvisionerResponse([backend]) + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.initialize_connection, testvol, testconn) + + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') + @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') + @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') + def test_initialize_connection_volumes_exception(self, mock_host_probe, + mock_publish, + mock_get_volumes_by_uuid, + mock_get_targets, + mock_get_backend_by_id): + testvol = _stub_volume() + testconn = _stub_connector() + prov_target = TargetEntity('target.nqn', prov_backend1) + prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP') + backend = BackendEntity([prov_portal]) + prov_targets_response = entities.ProvisionerResponse([prov_target]) + mock_publish.return_value = success_prov_response + mock_host_probe.return_value = success_prov_response + mock_get_volumes_by_uuid.side_effect = Exception() + mock_get_targets.return_value = prov_targets_response + mock_get_backend_by_id.return_value = \ + entities.ProvisionerResponse([backend]) + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.initialize_connection, testvol, testconn) + + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') + @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') + @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') + def test_initialize_connection_targets_failure(self, mock_host_probe, + mock_publish, + mock_get_volumes_by_uuid, + mock_get_targets, + mock_get_backend_by_id): + testvol = _stub_volume() + testconn = _stub_connector() + prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP') + backend = BackendEntity([prov_portal]) + mock_publish.return_value = success_prov_response + mock_host_probe.return_value = success_prov_response + mock_get_volumes_by_uuid.return_value = prov_volumes_response + mock_get_targets.return_value = fail_prov_response + mock_get_backend_by_id.return_value = \ + entities.ProvisionerResponse([backend]) + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.initialize_connection, testvol, testconn) + + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') + @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') + @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') + def test_initialize_connection_no_targets(self, mock_host_probe, + mock_publish, + mock_get_volumes_by_uuid, + mock_get_targets, + mock_get_backend_by_id): + testvol = _stub_volume() + testconn = _stub_connector() + prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP') + backend = BackendEntity([prov_portal]) + mock_publish.return_value = success_prov_response + mock_host_probe.return_value = success_prov_response + mock_get_volumes_by_uuid.return_value = prov_volumes_response + mock_get_targets.return_value = no_entities_prov_response + mock_get_backend_by_id.return_value = \ + entities.ProvisionerResponse([backend]) + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.initialize_connection, testvol, testconn) + + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') + @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') + @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') + def test_initialize_connection_targets_exception(self, mock_host_probe, + mock_publish, + mock_get_volumes_by_uuid, + mock_get_targets, + mock_get_backend_by_id): + testvol = _stub_volume() + testconn = _stub_connector() + prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP') + backend = BackendEntity([prov_portal]) + mock_publish.return_value = success_prov_response + mock_host_probe.return_value = success_prov_response + mock_get_volumes_by_uuid.return_value = prov_volumes_response + mock_get_targets.side_effect = Exception() + mock_get_backend_by_id.return_value = \ + entities.ProvisionerResponse([backend]) + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.initialize_connection, testvol, testconn) + + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') + @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') + @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') + def test_initialize_connection_backend_failure(self, mock_host_probe, + mock_publish, + mock_get_volumes_by_uuid, + mock_get_targets, + mock_get_backend_by_id): + testvol = _stub_volume() + testconn = _stub_connector() + prov_target = TargetEntity('target.nqn', prov_backend1) + prov_targets_response = entities.ProvisionerResponse([prov_target]) + mock_publish.return_value = success_prov_response + mock_host_probe.return_value = success_prov_response + mock_get_volumes_by_uuid.return_value = prov_volumes_response + mock_get_targets.return_value = prov_targets_response + mock_get_backend_by_id.return_value = fail_prov_response + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.initialize_connection, testvol, testconn) + + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') + @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') + @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') + def test_initialize_connection_no_backend(self, mock_host_probe, + mock_publish, + mock_get_volumes_by_uuid, + mock_get_targets, + mock_get_backend_by_id): + testvol = _stub_volume() + testconn = _stub_connector() + prov_target = TargetEntity('target.nqn', prov_backend1) + prov_targets_response = entities.ProvisionerResponse([prov_target]) + mock_publish.return_value = success_prov_response + mock_host_probe.return_value = success_prov_response + mock_get_volumes_by_uuid.return_value = prov_volumes_response + mock_get_targets.return_value = prov_targets_response + mock_get_backend_by_id.return_value = no_entities_prov_response + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.initialize_connection, testvol, testconn) + + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') + @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') + @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') + def test_initialize_connection_backend_exception(self, mock_host_probe, + mock_publish, + mock_get_volumes_by_uuid, + mock_get_targets, + mock_get_backend_by_id): + testvol = _stub_volume() + testconn = _stub_connector() + prov_target = TargetEntity('target.nqn', prov_backend1) + prov_targets_response = entities.ProvisionerResponse([prov_target]) + mock_publish.return_value = success_prov_response + mock_host_probe.return_value = success_prov_response + mock_get_volumes_by_uuid.return_value = prov_volumes_response + mock_get_targets.return_value = prov_targets_response + mock_get_backend_by_id.side_effect = Exception() + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.initialize_connection, testvol, testconn) + + @mock.patch.object(rest_client.KioxiaProvisioner, 'unpublish') + def test_terminate_connection(self, mock_unpublish): + testvol = _stub_volume() + testconn = _stub_connector() + mock_unpublish.return_value = success_prov_response + result = self.driver.terminate_connection(testvol, testconn) + mock_unpublish.assert_any_call(testconn['uuid'], testvol['id']) + self.assertIsNone(result) + + @mock.patch.object(rest_client.KioxiaProvisioner, 'unpublish') + def test_terminate_connection_unpublish_failure(self, mock_unpublish): + testvol = _stub_volume() + testconn = _stub_connector() + mock_unpublish.return_value = fail_prov_response + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.terminate_connection, testvol, testconn) + + @mock.patch.object(rest_client.KioxiaProvisioner, 'unpublish') + def test_terminate_connection_unpublish_exception(self, mock_unpublish): + testvol = _stub_volume() + testconn = _stub_connector() + mock_unpublish.side_effect = Exception() + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.terminate_connection, testvol, testconn) + + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_tenants') + def test_get_volume_stats(self, mock_get_tenants): + tenant = TenantEntity(1000, 400) + mock_get_tenants.return_value = entities.ProvisionerResponse([tenant]) + result = self.driver.get_volume_stats(True) + mock_get_tenants.assert_any_call() + self.assertDictEqual(result, self.expected_stats) + + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_tenants') + def test_get_volume_stats_tenants_failure(self, mock_get_tenants): + mock_get_tenants.return_value = fail_prov_response + self.expected_stats['total_capacity_gb'] = 'unknown' + self.expected_stats['free_capacity_gb'] = 'unknown' + self.assertDictEqual( + self.driver.get_volume_stats(True), self.expected_stats) + + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_tenants') + def test_get_volume_stats_no_tenants(self, mock_get_tenants): + mock_get_tenants.return_value = no_entities_prov_response + self.expected_stats['total_capacity_gb'] = 'unknown' + self.expected_stats['free_capacity_gb'] = 'unknown' + self.assertDictEqual( + self.driver.get_volume_stats(True), self.expected_stats) + + @mock.patch.object(rest_client.KioxiaProvisioner, 'get_tenants') + def test_get_volume_stats_tenants_exception(self, mock_get_tenants): + mock_get_tenants.side_effect = Exception() + self.expected_stats['total_capacity_gb'] = 'unknown' + self.expected_stats['free_capacity_gb'] = 'unknown' + self.assertDictEqual( + self.driver.get_volume_stats(True), self.expected_stats) + + @mock.patch.object(rest_client.KioxiaProvisioner, 'create_snapshot') + def test_create_snapshot_success(self, mock_create_snapshot): + testsnap = _stub_snapshot() + mock_create_snapshot.return_value = success_prov_response + result = self.driver.create_snapshot(testsnap) + args, kwargs = mock_create_snapshot.call_args + mock_call = args[0] + self.assertEqual(mock_call.alias, testsnap['name']) + self.assertEqual(mock_call.volumeID, testsnap['volume_id']) + self.assertEqual(mock_call.snapshotID, testsnap['id']) + self.assertIsNone(result) + + @mock.patch.object(rest_client.KioxiaProvisioner, 'create_snapshot') + def test_create_snapshot_failure(self, mock_create_snapshot): + testsnap = _stub_snapshot() + mock_create_snapshot.return_value = fail_prov_response + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_snapshot, testsnap) + + @mock.patch.object(rest_client.KioxiaProvisioner, 'create_snapshot') + def test_create_snapshot_exception(self, mock_create_snapshot): + testsnap = _stub_snapshot() + mock_create_snapshot.side_effect = Exception() + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_snapshot, testsnap) + + @mock.patch.object(rest_client.KioxiaProvisioner, 'delete_snapshot') + def test_delete_snapshot_success(self, mock_delete_snapshot): + testsnap = _stub_snapshot() + mock_delete_snapshot.return_value = success_prov_response + result = self.driver.delete_snapshot(testsnap) + mock_delete_snapshot.assert_any_call(testsnap['id']) + self.assertIsNone(result) + + @mock.patch.object(rest_client.KioxiaProvisioner, 'delete_snapshot') + def test_delete_snapshot_failure(self, mock_delete_snapshot): + testsnap = _stub_snapshot() + mock_delete_snapshot.return_value = fail_prov_response + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.delete_snapshot, testsnap) + + @mock.patch.object(rest_client.KioxiaProvisioner, 'delete_snapshot') + def test_delete_snapshot_exception(self, mock_delete_snapshot): + testsnap = _stub_snapshot() + mock_delete_snapshot.side_effect = Exception() + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.delete_snapshot, testsnap) + + @mock.patch.object(rest_client.KioxiaProvisioner, 'create_snapshot_volume') + def test_create_volume_from_snapshot_success(self, + mock_create_snapshot_volume): + testsnap = _stub_snapshot() + testvol = _stub_volume() + mock_create_snapshot_volume.return_value = success_prov_response + result = self.driver.create_volume_from_snapshot(testvol, testsnap) + args, kwargs = mock_create_snapshot_volume.call_args + mock_call = args[0] + self.assertEqual(mock_call.alias, testvol['name']) + self.assertEqual(mock_call.volumeID, testsnap['volume_id']) + self.assertEqual(mock_call.snapshotID, testsnap['id']) + self.assertEqual(mock_call.protocol, VOL_PROTOCOL) + self.assertIsNone(result) + + @mock.patch.object(rest_client.KioxiaProvisioner, 'create_snapshot_volume') + def test_create_volume_from_snapshot_failure(self, + mock_create_snapshot_volume): + testsnap = _stub_snapshot() + testvol = _stub_volume() + mock_create_snapshot_volume.return_value = fail_prov_response + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_volume_from_snapshot, testvol, + testsnap) + + @mock.patch.object(rest_client.KioxiaProvisioner, 'create_snapshot_volume') + def test_create_volume_from_snapshot_exception( + self, mock_create_snapshot_volume): + testsnap = _stub_snapshot() + testvol = _stub_volume() + mock_create_snapshot_volume.side_effect = Exception() + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_volume_from_snapshot, testvol, + testsnap) + + @mock.patch.object(rest_client.KioxiaProvisioner, 'expand_volume') + def test_extend_volume_success(self, mock_expand_volume): + testvol = _stub_volume() + mock_expand_volume.return_value = success_prov_response + new_size = VOL_SIZE + 2 + result = self.driver.extend_volume(testvol, new_size) + mock_expand_volume.assert_any_call(new_size, testvol['id']) + self.assertIsNone(result) + + @mock.patch.object(rest_client.KioxiaProvisioner, 'expand_volume') + def test_extend_volume_failure(self, mock_expand_volume): + testvol = _stub_volume() + mock_expand_volume.return_value = fail_prov_response + new_size = VOL_SIZE + 2 + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.extend_volume, testvol, new_size) + + @mock.patch.object(rest_client.KioxiaProvisioner, 'expand_volume') + def test_extend_volume_exception(self, mock_expand_volume): + testvol = _stub_volume() + mock_expand_volume.side_effect = Exception() + new_size = VOL_SIZE + 2 + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.extend_volume, testvol, new_size) + + @mock.patch.object(rest_client.KioxiaProvisioner, 'clone_volume') + def test_create_cloned_volume_success(self, mock_clone_volume): + testvol = _stub_volume() + mock_clone_volume.return_value = success_prov_response + result = self.driver.create_cloned_volume(testvol, testvol) + args, kwargs = mock_clone_volume.call_args + mock_call = args[0] + self.assertEqual(mock_call.alias, testvol['name']) + self.assertEqual(mock_call.capacity, testvol['size']) + self.assertEqual(mock_call.volumeId, testvol['id']) + self.assertEqual(mock_call.sourceVolumeId, testvol['id']) + self.assertIsNone(result) + + @mock.patch.object(rest_client.KioxiaProvisioner, 'clone_volume') + def test_create_cloned_volume_failure(self, mock_clone_volume): + testvol = _stub_volume() + mock_clone_volume.return_value = fail_prov_response + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_cloned_volume, testvol, testvol) + + @mock.patch.object(rest_client.KioxiaProvisioner, 'clone_volume') + def test_create_cloned_volume_exception(self, mock_clone_volume): + testvol = _stub_volume() + mock_clone_volume.side_effect = Exception() + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_cloned_volume, testvol, testvol) + + def test_convert_host_name(self): + name = 'ks-node3-000c2960a794-000c2960a797' + result = self.driver._convert_host_name(name) + expected = md5(name.encode('utf-8'), usedforsecurity=False).hexdigest() + self.assertEqual(result, expected) + + def test_create_export(self): + result = self.driver.create_export(None, None, None) + self.assertIsNone(result) + + def test_ensure_export(self): + result = self.driver.ensure_export(None, None) + self.assertIsNone(result) + + def test_remove_export(self): + result = self.driver.remove_export(None, None) + self.assertIsNone(result) + + def test_check_for_setup_error(self): + result = self.driver.check_for_setup_error() + self.assertIsNone(result) + + +def _stub_volume(*args, **kwargs): + volume = {'id': kwargs.get('id', VOL_UUID), + 'name': kwargs.get('name', VOL_NAME), + 'project_id': "test-project", + 'display_name': kwargs.get('display_name', VOL_NAME), + 'size': kwargs.get('size', VOL_SIZE), + 'provider_location': kwargs.get('provider_location', None), + 'volume_type_id': kwargs.get('volume_type_id', None)} + return volume + + +def _stub_connector(*args, **kwargs): + connector = {'uuid': kwargs.get('uuid', CONN_UUID), + 'nqn': kwargs.get('nqn', CONN_NQN), + 'host': kwargs.get('host', CONN_HOST_NAME)} + return connector + + +def _stub_snapshot(*args, **kwargs): + volume = {'id': kwargs.get('id', SNAP_UUID), + 'name': kwargs.get('name', 'snap2000'), + 'volume_id': kwargs.get('id', VOL_UUID)} + return volume + + +class TenantEntity: + def __init__(self, capacity, consumed): + self.tenantId = '0' + self.capacity = capacity + self.consumedCapacity = consumed + + +class TargetEntity: + def __init__(self, name, backend): + self.targetName = name + self.backend = backend + + +class BackendEntity: + def __init__(self, portals): + self.portals = portals + + +class PortalEntity: + def __init__(self, ip, port, transport): + self.ip = ip + self.port = port + self.transport = transport + + +if __name__ == '__main__': + unittest.main() diff --git a/cinder/volume/drivers/kioxia/entities.py b/cinder/volume/drivers/kioxia/entities.py new file mode 100644 index 00000000000..f67d5328df7 --- /dev/null +++ b/cinder/volume/drivers/kioxia/entities.py @@ -0,0 +1,467 @@ +# (c) Copyright Kioxia Corporation 2021 All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json + + +class JsonClass(object): + + def __init__(self): + pass + + def to_json(self): + return json.dumps( + self, + default=lambda o: o.__dict__, + sort_keys=True, + indent=4) + + def __str__(self): + return ', '.join(['{key}={value}'.format( + key=key, value=self.__dict__.get(key)) for key in self.__dict__]) + + def __getattr__(self, item): + return "N/A" + + def set_items(self, json_object): + json_keys = json_object.keys() + for key in json_keys: + if not isinstance(json_object[key], 'dict'): + self.__dict__[key] = json_object[key] + + +class ProvisionerResponse(JsonClass): + # + # Provisioner response data + # + + def __init__( + self, + prov_entities, + res_id=None, + status=None, + description=None, + path=None): + JsonClass.__init__(self) + self.prov_entities = prov_entities + self.resID = res_id + self.status = "Success" if status is None else status + self.description = self.status if description is None else description + self.path = path + + def __str__(self): + items = "" + if self.prov_entities: + num_of_entities = len(self.prov_entities) + if num_of_entities == 1: + items = self.prov_entities[0] + else: + items = num_of_entities + return "(" + str(items) + ", " + str(self.resID) + ", " + \ + str(self.status) + ", " + str(self.description) + ")" + + +class ProvisionerInfo(JsonClass): + # + # Provisioner Info data + # + + def __init__(self, totalFreeSpace, version, syslogsBackend=None): + self.totalFreeSpace = totalFreeSpace + self.version = version + self.syslogsBackend = syslogsBackend + + +class Backend(JsonClass): + # + # Backend data + # + + def __init__( + self, + mgmt_ips=None, + rack=None, + region=None, + zone=None, + persistentID=None, + inUse=None, + hostId=None, + state=None, + totalCapacity=None, + availableCapacity=None, + lastProbTime=None, + probeInterval=None, + totalBW=None, + availableBW=None, + totalIOPS=None, + availableIOPS=None): + self.mgmtIPs = mgmt_ips + self.rack = rack + self.region = region + self.zone = zone + self.persistentID = persistentID + self.inUse = inUse + self.state = state + self.totalCapacity = totalCapacity + self.availableCapacity = availableCapacity + self.lastProbTime = lastProbTime + self.probeInterval = probeInterval + self.totalBW = totalBW + self.availableBW = availableBW + self.totalIOPS = totalIOPS + self.availableIOPS = availableIOPS + self.hostId = hostId + + +class Replica(JsonClass): + # + # Backend data + # + + def __init__(self, sameRackAllowed, racks, regions, zones): + self.sameRackAllowed = sameRackAllowed + self.racks = racks + self.regions = regions + self.zones = zones + + +class Location(JsonClass): + # + # Location data + # + + def __init__( + self, + uuid=None, + backend=None, + replicaState=None, + currentStateTime=None): + self.uuid = uuid + self.backend = backend + self.replicaState = replicaState + self.currentStateTime = currentStateTime + + +class VolumeProv(JsonClass): + # + # Provisioner Volume data + # + + def __init__( + self, + uuid=None, + alias=None, + capacity=None, + numReplicas=None, + maxIOPS=None, + desiredIOPS=None, + maxBW=None, + desiredBW=None, + blockSize=None, + maxReplicaDownTime=None, + snapshotID=None, + writable=None, + reservedSpace=None, + location=None): + self.uuid = uuid + self.alias = alias + self.capacity = capacity + self.numReplicas = numReplicas + self.maxIOPS = maxIOPS + self.desiredIOPS = desiredIOPS + self.maxBW = maxBW + self.desiredBW = desiredBW + self.blockSize = blockSize + self.maxReplicaDownTime = maxReplicaDownTime + self.snapshotID = snapshotID + self.writable = writable + self.reservedSpacePercentage = reservedSpace + self.location = location + + +class StorageClass(JsonClass): + # + # Provisioner Storage Class + # + + def __init__( + self, + replicas, + racks=None, + regions=None, + zones=None, + blockSize=None, + maxIOPSPerGB=None, + desiredIOPSPerGB=None, + maxBWPerGB=None, + desiredBWPerGB=None, + sameRackAllowed=None, + maxReplicaDownTime=None, + hostId=None, + spanAllowed=None, + name=None, + shareSSDBetweenVolumes=None): + self.numReplicas = replicas + if racks is not None: + self.racks = racks + if regions is not None: + self.regions = regions + if zones is not None: + self.zones = zones + if blockSize is not None: + self.blockSize = blockSize + if maxIOPSPerGB is not None: + self.maxIOPSPerGB = maxIOPSPerGB + if desiredIOPSPerGB is not None: + self.desiredIOPSPerGB = desiredIOPSPerGB + if maxBWPerGB is not None: + self.maxBWPerGB = maxBWPerGB + if desiredBWPerGB is not None: + self.desiredBWPerGB = desiredBWPerGB + if sameRackAllowed is not None: + self.sameRackAllowed = sameRackAllowed + if maxReplicaDownTime is not None: + self.maxReplicaDownTime = maxReplicaDownTime + if hostId is not None: + self.hostId = hostId + if spanAllowed is not None: + self.allowSpan = spanAllowed + if name is not None: + self.name = name + if shareSSDBetweenVolumes is not None: + self.shareSSDBetweenVolumes = shareSSDBetweenVolumes + + +class VolumeCreate(JsonClass): + # + # Provisioner Volume data for Create operation + # + + def __init__( + self, + alias, + capacity, + storage_class, + prov_type, + reserved_space=None, + protocol=None, + uuid=None): + self.alias = alias + self.capacity = capacity + self.storageClass = storage_class + self.provisioningType = prov_type + if reserved_space is not None: + self.reservedSpacePercentage = reserved_space + if protocol is not None: + self.protocol = protocol + if uuid is not None: + self.uuid = uuid + + +class SyslogEntity(JsonClass): + # + # Syslog Entity object + # + + def __init__( + self, + name=None, + url=None, + state=None, + useTls=None, + certFileName=None): + self.name = name + self.url = url + self.state = state + self.useTls = useTls + self.certFileName = certFileName + + +class SnapshotCreate(JsonClass): + # + # Provisioner Snapshot data for Create operation + # + + def __init__( + self, + alias, + volumeID, + reservedSpacePercentage=None, + snapshotID=None): + self.alias = alias + self.volumeID = volumeID + if reservedSpacePercentage is not None: + self.reservedSpacePercentage = reservedSpacePercentage + if snapshotID is not None: + self.snapshotID = snapshotID + + +class SnapshotEntity(JsonClass): + # + # Provisioner Snapshot Entity data for Show operation + # + + def __init__( + self, + alias=None, + snapshotID=None, + reservedSpace=None, + volumeID=None, + capacity=None, + timestamp=None): + self.alias = alias + self.volumeID = volumeID + self.reservedSpace = reservedSpace + self.snapshotID = snapshotID + self.capacity = capacity + self.timestamp = timestamp + + +class SnapshotVolumeCreate(JsonClass): + # + # Provisioner Snapshot Volume data for Create operation + # + + def __init__( + self, + alias, + snapshotID, + writable, + reservedSpacePercentage=None, + volumeID=None, + maxIOPSPerGB=None, + maxBWPerGB=None, + protocol=None, + spanAllowed=None, + storageClassName=None): + self.alias = alias + self.snapshotID = snapshotID + self.writable = writable + if reservedSpacePercentage is not None: + self.reservedSpacePercentage = reservedSpacePercentage + if volumeID is not None: + self.volumeID = volumeID + if maxIOPSPerGB is not None: + self.maxIOPSPerGB = maxIOPSPerGB + if maxBWPerGB is not None: + self.maxBWPerGB = maxBWPerGB + if protocol is not None: + self.protocol = protocol + if spanAllowed is not None: + self.allowSpan = spanAllowed + if storageClassName is not None: + self.storageClassName = storageClassName + + +class ForwardEntity(JsonClass): + # + # Provisioner Forward Entity data + # + + def __init__( + self, + loggingType, + level, + host, + appName, + message, + parametersList): + self.loggingType = loggingType + self.level = level + self.host = host + self.appName = appName + self.message = message + self.parametersList = parametersList + + +class LicenseEntity(JsonClass): + # + # Provisioner License Entity data + # + + def __init__( + self, + license_type=None, + expirationDate=None, + maxBackends=None): + self.type = license_type + self.expirationDate = expirationDate + self.maxBackends = maxBackends + + +class HostEntity(JsonClass): + # + # Provisioner Host Entity data + # + + def __init__( + self, + nqn=None, + uuid=None, + name=None, + clientType=None, + version=None, + state=None, + lastProbeTime=None, + duration=None): + self.nqn = nqn + self.uuid = uuid + self.name = name + self.clientType = clientType + self.version = version + self.state = state + self.lastProbeTime = lastProbeTime + self.duration = duration + + +class TargetEntity(JsonClass): + # + # Provisioner Target Entity data for Show operation + # + + def __init__(self, alias=None): + self.alias = alias + + +class TenantEntity(JsonClass): + # + # Provisioner Tenant Entity data for Show operation + # + + def __init__(self, capacity, iops, bw, uuid=None, name=None): + self.capacity = capacity + self.totalIOPS = iops + self.totalBW = bw + if uuid is not None: + self.tenantId = uuid + if name is not None: + self.name = name + + +class CloneEntity(JsonClass): + # + # Provisioner Clone Entity data + # + + def __init__(self, sourceVolumeId, alias, volumeId=None, + reservedSpacePercentage=None, + capacity=None): + self.sourceVolumeId = sourceVolumeId + self.alias = alias + if volumeId is not None: + self.volumeId = volumeId + if reservedSpacePercentage is not None: + self.reservedSpacePercentage = reservedSpacePercentage + if capacity is not None: + self.capacity = capacity diff --git a/cinder/volume/drivers/kioxia/kumoscale.py b/cinder/volume/drivers/kioxia/kumoscale.py new file mode 100644 index 00000000000..379b22ceb42 --- /dev/null +++ b/cinder/volume/drivers/kioxia/kumoscale.py @@ -0,0 +1,490 @@ +# (c) Copyright Kioxia Corporation 2021 All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Volume driver for KIOXIA KumoScale NVMeOF storage system.""" + + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils.secretutils import md5 + +from cinder import exception +from cinder.i18n import _ +from cinder import interface +from cinder.volume import driver +from cinder.volume.drivers.kioxia import entities +from cinder.volume.drivers.kioxia import rest_client + +LOG = logging.getLogger(__name__) + +KUMOSCALE_OPTS = [ + cfg.StrOpt("kioxia_url", help="KumoScale provisioner REST API URL"), + cfg.StrOpt("kioxia_cafile", help="Cert for provisioner REST API SSL"), + cfg.StrOpt("kioxia_token", help="KumoScale Provisioner auth token."), + cfg.IntOpt( + "kioxia_num_replicas", default=1, + help="Number of volume replicas."), + cfg.IntOpt( + "kioxia_max_iops_per_gb", default=0, help="Upper limit for IOPS/GB."), + cfg.IntOpt( + "kioxia_desired_iops_per_gb", default=0, help="Desired IOPS/GB."), + cfg.IntOpt( + "kioxia_max_bw_per_gb", default=0, + help="Upper limit for bandwidth in B/s per GB."), + cfg.IntOpt( + "kioxia_desired_bw_per_gb", default=0, + help="Desired bandwidth in B/s per GB."), + cfg.BoolOpt( + "kioxia_same_rack_allowed", default=False, + help="Can more than one replica be allocated to same rack."), + cfg.IntOpt( + "kioxia_block_size", default=4096, + help="Volume block size in bytes - 512 or 4096 (Default)."), + cfg.BoolOpt( + "kioxia_writable", default=False, + help="Volumes from snapshot writeable or not."), + cfg.StrOpt( + "kioxia_provisioning_type", default="THICK", + choices=[ + ('THICK', 'Thick provisioning'), ('THIN', 'Thin provisioning')], + help="Thin or thick volume, Default thick."), + cfg.IntOpt( + "kioxia_vol_reserved_space_percentage", default=0, + help="Thin volume reserved capacity allocation percentage."), + cfg.IntOpt( + "kioxia_snap_reserved_space_percentage", default=0, + help="Percentage of the parent volume to be used for log."), + cfg.IntOpt( + "kioxia_snap_vol_reserved_space_percentage", default=0, + help="Writable snapshot percentage of parent volume used for log."), + cfg.IntOpt( + "kioxia_max_replica_down_time", default=0, + help="Replicated volume max downtime for replica in minutes."), + cfg.BoolOpt( + "kioxia_span_allowed", default=True, + help="Allow span - Default True."), + cfg.BoolOpt( + "kioxia_snap_vol_span_allowed", default=True, + help="Allow span in snapshot volume - Default True.") +] + +CONF = cfg.CONF +CONF.register_opts(KUMOSCALE_OPTS) + + +@interface.volumedriver +class KumoScaleBaseVolumeDriver(driver.BaseVD): + """Performs volume management on KumoScale Provisioner. + + Version history: + + .. code-block:: none + + 1.0.0 - Initial driver version. + """ + + VERSION = '1.0.0' + CI_WIKI_NAME = 'KIOXIA_CI' + SUPPORTED_REST_API_VERSIONS = ['1.0', '1.1'] + + def __init__(self, *args, **kwargs): + super(KumoScaleBaseVolumeDriver, self).__init__(*args, **kwargs) + self.configuration.append_config_values(KUMOSCALE_OPTS) + self._backend_name = ( + self.configuration.volume_backend_name or self.__class__.__name__) + self.kumoscale = self._get_kumoscale( + self.configuration.safe_get("kioxia_url"), + self.configuration.safe_get("kioxia_token"), + self.configuration.safe_get("kioxia_cafile")) + + self.num_replicas = self.configuration.safe_get("kioxia_num_replicas") + self.same_rack_allowed = self.configuration.safe_get( + "kioxia_same_rack_allowed") + self.max_iops_per_gb = self.configuration.safe_get( + "kioxia_max_iops_per_gb") + self.desired_iops_per_gb = self.configuration.safe_get( + "kioxia_desired_iops_per_gb") + self.max_bw_per_gb = self.configuration.safe_get( + "kioxia_max_bw_per_gb") + self.desired_bw_per_gb = self.configuration.safe_get( + "kioxia_desired_bw_per_gb") + self.block_size = self.configuration.safe_get("kioxia_block_size") + self.writable = self.configuration.safe_get("kioxia_writable") + self.provisioning_type = self.configuration.safe_get( + "kioxia_provisioning_type") + self.vol_reserved_space_percentage = self.configuration.safe_get( + "kioxia_vol_reserved_space_percentage") + self.snap_vol_reserved_space_percentage = self.configuration.safe_get( + "kioxia_snap_vol_reserved_space_percentage") + self.snap_reserved_space_percentage = self.configuration.safe_get( + "kioxia_snap_reserved_space_percentage") + self.max_replica_down_time = self.configuration.safe_get( + "kioxia_max_replica_down_time") + self.span_allowed = self.configuration.safe_get("kioxia_span_allowed") + self.snap_vol_span_allowed = self.configuration.safe_get( + "kioxia_snap_vol_span_allowed") + + @staticmethod + def get_driver_options(): + return KUMOSCALE_OPTS + + def _get_kumoscale(self, url, token, cert): + """Returns an initialized rest client""" + url_strs = url.split(":") + ip_str = url_strs[1] + ip_strs = ip_str.split("//") + ip = ip_strs[1] + port = url_strs[2] + kumoscale = rest_client.KioxiaProvisioner([ip], cert, token, port) + return kumoscale + + def create_volume(self, volume): + """Create the volume""" + volume_name = volume["name"] + volume_uuid = volume["id"] + volume_size = volume["size"] + zone_list = None if 'availability_zone' not in volume else [ + volume['availability_zone']] + + if self.num_replicas > 1 and len(volume_name) > 27: + volume_name = volume_name[:27] # workaround for limitation + storage_class = entities.StorageClass( + self.num_replicas, None, None, zone_list, self.block_size, + self.max_iops_per_gb, self.desired_iops_per_gb, self.max_bw_per_gb, + self.desired_bw_per_gb, self.same_rack_allowed, + self.max_replica_down_time, None, self.span_allowed) + ks_volume = entities.VolumeCreate( + volume_name, volume_size, storage_class, self.provisioning_type, + self.vol_reserved_space_percentage, 'NVMeoF', volume_uuid) + + try: + result = self.kumoscale.create_volume(ks_volume) + except Exception as e: + msg = (_("Volume %(volname)s creation exception: %(txt)s") % + {'volname': volume_name, 'txt': str(e)}) + raise exception.VolumeBackendAPIException(data=msg) + + if result.status != 'Success': + raise exception.VolumeBackendAPIException(data=result.description) + + def delete_volume(self, volume): + """Delete the volume""" + volume_uuid = volume["id"] + + try: + result = self.kumoscale.delete_volume(volume_uuid) + except Exception as e: + msg = (_("Volume %(voluuid)s deletion exception: %(txt)s") % + {'voluuid': volume_uuid, 'txt': str(e)}) + raise exception.VolumeBackendAPIException(data=msg) + + if result.status not in ('Success', 'DeviceNotFound', 'NotExists'): + raise exception.VolumeBackendAPIException(data=result.description) + + def create_snapshot(self, snapshot): + + snapshot_name = snapshot['name'] + snapshot_uuid = snapshot['id'] + volume_uuid = snapshot['volume_id'] + ks_snapshot = entities.SnapshotCreate( + snapshot_name, volume_uuid, + self.snap_reserved_space_percentage, snapshot_uuid) + + try: + result = self.kumoscale.create_snapshot(ks_snapshot) + except Exception as e: + msg = (_("Snapshot %(snapname)s creation exception: %(txt)s") % + {'snapname': snapshot_name, 'txt': str(e)}) + raise exception.VolumeBackendAPIException(data=msg) + + if result.status != 'Success': + raise exception.VolumeBackendAPIException(data=result.description) + + def delete_snapshot(self, snapshot): + + snapshot_uuid = snapshot['id'] + + try: + result = self.kumoscale.delete_snapshot(snapshot_uuid) + except Exception as e: + msg = (_("Snapshot %(snapuuid)s deletion exception: %(txt)s") % + {'snapuuid': snapshot_uuid, 'txt': str(e)}) + raise exception.VolumeBackendAPIException(data=msg) + + if result.status not in ('Success', 'DeviceNotFound', 'NotExists'): + raise exception.VolumeBackendAPIException(data=result.description) + + def create_volume_from_snapshot(self, volume, snapshot): + + volume_name = volume["name"] + volume_uuid = volume["id"] + snapshot_uuid = snapshot["id"] + if self.writable: + reserved_space_percentage = self.snap_vol_reserved_space_percentage + else: + reserved_space_percentage = 0 + + ks_snapshot_volume = entities.SnapshotVolumeCreate( + volume_name, snapshot_uuid, self.writable, + reserved_space_percentage, volume_uuid, + self.max_iops_per_gb, self.max_bw_per_gb, 'NVMeoF', + self.snap_vol_span_allowed) + + try: + result = self.kumoscale.create_snapshot_volume(ks_snapshot_volume) + except Exception as e: + msg = (_("Volume %(volname)s from snapshot exception: %(txt)s") % + {'volname': volume_name, 'txt': str(e)}) + raise exception.VolumeBackendAPIException(data=msg) + + if result.status != 'Success': + raise exception.VolumeBackendAPIException(data=result.description) + + def initialize_connection(self, volume, connector, initiator_data=None): + """Connect the initiator to a volume""" + host_uuid = connector['uuid'] + ks_volume = None + targets = [] + volume_replicas = [] + volume_uuid = volume['id'] + volume_name = volume['name'] + + try: + result = self.kumoscale.host_probe( + connector['nqn'], connector['uuid'], + KumoScaleBaseVolumeDriver._convert_host_name( + connector['host']), + 'Agent', 'cinder-driver-0.1', 30) + except Exception as e: + msg = (_("Host %(uuid)s host_probe exception: %(txt)s") % + {'uuid': connector['uuid'], 'txt': str(e)}) + raise exception.VolumeBackendAPIException(data=msg) + + if result.status != 'Success': + msg = (_("host_probe for %(uuid)s failed with %(txt)s") % + {'uuid': connector['uuid'], 'txt': result.description}) + raise exception.VolumeBackendAPIException(data=msg) + + try: + result = self.kumoscale.publish(host_uuid, volume_uuid) + except Exception as e: + msg = (_("Volume %(voluuid)s publish exception: %(txt)s") % + {'voluuid': volume_uuid, 'txt': str(e)}) + raise exception.VolumeBackendAPIException(data=msg) + + if result.status != "Success" and result.status != 'AlreadyPublished': + raise exception.VolumeBackendAPIException(data=result.description) + + try: + result = self.kumoscale.get_volumes_by_uuid(volume_uuid) + except Exception as e: + msg = (_("Volume %(voluuid)s fetch exception: %(txt)s") % + {'voluuid': volume_uuid, 'txt': str(e)}) + raise exception.VolumeBackendAPIException(data=msg) + + if result.status == "Success": + if len(result.prov_entities) == 0: + raise exception.VolumeBackendAPIException( + data=_("Volume %s not found") % volume_uuid) + else: + ks_volume = result.prov_entities[0] + else: + msg = (_("get_volumes_by_uuid for %(uuid)s failed with %(txt)s") % + {'uuid': volume_uuid, 'txt': result.description}) + raise exception.VolumeBackendAPIException(data=msg) + + try: + result = self.kumoscale.get_targets(host_uuid, ks_volume.uuid) + except Exception as e: + msg = (_("Volume %(voluuid)s get targets exception: %(txt)s") % + {'voluuid': volume_uuid, 'txt': str(e)}) + raise exception.VolumeBackendAPIException(data=msg) + + if result.status == "Success": + if len(result.prov_entities) == 0: + raise exception.VolumeBackendAPIException( + data=_("Volume %s targets not found") % ks_volume.uuid) + else: + targets = result.prov_entities + + ks_volume_replicas = ks_volume.location + for i in range(len(targets)): + persistent_id = str(targets[i].backend.persistentID) + + try: + result = self.kumoscale.get_backend_by_id(persistent_id) + except Exception as e: + msg = (_("Backend %(backpid)s exception: %(txt)s") % + {'backpid': persistent_id, 'txt': str(e)}) + raise exception.VolumeBackendAPIException(data=msg) + + if result.status == "Success": + if len(result.prov_entities) == 0: + raise exception.VolumeBackendAPIException( + data=_("Backend %s not found") % persistent_id) + else: + backend = result.prov_entities[0] + else: + msg = (_("get_backend_by_id for %(pid)s failed with %(txt)s") % + {'pid': persistent_id, 'txt': result.description}) + raise exception.VolumeBackendAPIException(data=msg) + + str_portals = [] + for p in range(len(backend.portals)): + portal = backend.portals[p] + portal_ip = str(portal.ip) + portal_port = str(portal.port) + portal_transport = str(portal.transport) + str_portals.append( + (portal_ip, portal_port, portal_transport)) + + for j in range(len(ks_volume_replicas)): + ks_replica = ks_volume_replicas[j] + if str(ks_replica.backend.persistentID) == persistent_id: + break + + replica = dict() + replica['vol_uuid'] = ks_replica.uuid + replica['target_nqn'] = str(targets[i].targetName) + replica['portals'] = str_portals + + volume_replicas.append(replica) + + if len(volume_replicas) > 1: # workaround for limitation + volume_name = volume_name[:27] + + data = { + 'vol_uuid': volume_uuid, + 'alias': volume_name, + 'writable': ks_volume.writable, + 'volume_replicas': volume_replicas + } + + if result.status != 'Success': + raise exception.VolumeBackendAPIException(data=result.description) + + return { + 'driver_volume_type': 'nvmeof', + 'data': data + } + + @staticmethod + def _convert_host_name(name): + if name is None: + return "" + if len(name) > 32: + name = md5(name.encode('utf-8'), usedforsecurity=False).hexdigest() + else: + name = name.replace('.', '-').lower() + return name + + def terminate_connection(self, volume, connector, **kwargs): + """Terminate connection.""" + volume_uuid = volume['id'] + if connector: + host_uuid = connector['uuid'] + else: + host_uuid = None + + try: + result = self.kumoscale.unpublish(host_uuid, volume_uuid) + except Exception as e: + msg = (_("Volume %(voluuid)s unpublish exception: %(txt)s") % + {'voluuid': volume_uuid, 'txt': str(e)}) + raise exception.VolumeBackendAPIException(data=msg) + + if result.status != 'Success' and ( + result.status != 'VolumeNotPublished'): + raise exception.VolumeBackendAPIException(data=result.description) + + def _update_volume_stats(self): + data = dict( + volume_backend_name=self._backend_name, + vendor_name='KIOXIA', + driver_version=self.VERSION, + storage_protocol='NVMeOF', + ) + data['total_capacity_gb'] = 'unknown' + data['free_capacity_gb'] = 'unknown' + data['consistencygroup_support'] = False + data['thin_provisioning_support'] = True + data['multiattach'] = False + + result = None + tenants = [] + try: + result = self.kumoscale.get_tenants() + except Exception as e: + msg = _("Get tenants exception: %s") % str(e) + LOG.exception(msg) + + if result and result.status == "Success": + if len(result.prov_entities) == 0: + LOG.error("No kumoscale tenants") + else: + tenants = result.prov_entities + elif result: + LOG.error("Get tenants API error: %s", result.description) + + default_tenant = None + for i in range(len(tenants)): + if tenants[i].tenantId == "0": + default_tenant = tenants[i] + break + + if default_tenant: + total_capacity = default_tenant.capacity + consumed_capacity = default_tenant.consumedCapacity + free_capacity = total_capacity - consumed_capacity + data['total_capacity_gb'] = total_capacity + data['free_capacity_gb'] = free_capacity + + self._stats = data + + def extend_volume(self, volume, new_size): + try: + result = self.kumoscale.expand_volume( + new_size, volume["id"]) + except Exception as e: + msg = (_("Volume %(volid)s expand exception: %(txt)s") % + {'volid': volume["id"], 'txt': str(e)}) + raise exception.VolumeBackendAPIException(data=msg) + if result.status != 'Success': + raise exception.VolumeBackendAPIException(data=result.description) + + def create_cloned_volume(self, volume, src_vref): + clone_entity = entities.CloneEntity( + src_vref['id'], volume['name'], + volumeId=volume['id'], + capacity=volume['size']) + try: + result = self.kumoscale.clone_volume(clone_entity) + except Exception as e: + msg = (_("Volume %(volid)s clone exception: %(txt)s") % + {'volid': volume["id"], 'txt': str(e)}) + raise exception.VolumeBackendAPIException(data=msg) + if result.status != 'Success': + raise exception.VolumeBackendAPIException(data=result.description) + + def create_export(self, context, volume, connector): + pass + + def ensure_export(self, context, volume): + pass + + def remove_export(self, context, volume): + pass + + def check_for_setup_error(self): + pass diff --git a/cinder/volume/drivers/kioxia/rest_client.py b/cinder/volume/drivers/kioxia/rest_client.py new file mode 100644 index 00000000000..e5ab9ed3cbf --- /dev/null +++ b/cinder/volume/drivers/kioxia/rest_client.py @@ -0,0 +1,1055 @@ +# (c) Copyright Kioxia Corporation 2021 All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc +import json +import os +import ssl + +import urllib3 + +from cinder.volume.drivers.kioxia import entities + +urllib3.disable_warnings() +RUN_COMMAND_TRIALS = 20 +RUN_COMMAND_SLEEP = 0.5 + + +class ClassBuilder(object): + def __init__(self, **kwargs): + for key, value in kwargs.items(): + if value is not None: + self.__dict__[key] = value + + def to_json(self): + return json.dumps( + self, + default=lambda o: o.__dict__, + sort_keys=True, + indent=4) + + +class JsonToClass(object): + + def __init__(self, json_object, first=False): + if isinstance(json_object, list): + self.records = [] + for list_index in range(len(json_object)): + list_item = JsonToClass(json_object[list_index]) + self.records.append(list_item) + else: + if first: + self.records = None + self.build_class(json_object) + if first: + if 'status' not in json_object: + self.status = "Success" + if 'description' not in json_object: + self.description = "Success." + pass + + def __getattr__(self, item): + return "N/A" + + def to_json(self): + return json.dumps( + self, + default=lambda o: o.__dict__, + sort_keys=True, + indent=4) + + def __str__(self): + return json.dumps(self, default=lambda o: o.__dict__) + + def is_exist(self, item): + if item in self.__dict__.keys() and self.__dict__[item] is not None: + return True + return False + + def build_class(self, json_object): + json_keys = json_object.keys() + for key in json_keys: + if isinstance(json_object[key], list): + self.__dict__[key] = [] + for i in range(len(json_object[key])): + if isinstance(json_object[key][i], dict): + sub_object = JsonToClass(json_object[key][i]) + else: + sub_object = json_object[key][i] + self.__dict__[key].append(sub_object) + continue + if not isinstance(json_object[key], dict): + self.__dict__[key] = json_object[key] + continue + self.__dict__[key] = {} + sub_object = JsonToClass(json_object[key]) + self.__dict__[key] = sub_object + + +class ProvisionerVisitor(object): + # + # Provisioner Visitor + # + + def __init__(self, http, command_str): + self.http = http + self.command_str = command_str + + @abc.abstractmethod + def visit(self, url): + return + + +class ProvisionerGetVisitor(ProvisionerVisitor): + # + # Provisioner Get Visitor + # + + def visit(self, url, token=None): + r = self.http.request( + 'GET', url, headers={ + "Authorization": "Bearer " + token}) + return r + + +class ProvisionerPostVisitor(ProvisionerVisitor): + # + # Provisioner Post Visitor + # + + def __init__(self, http, command_str, json_body): + ProvisionerVisitor.__init__(self, http, command_str) + self.json_body = json_body + + def visit(self, url, token=None): + r = self.http.request( + 'POST', + url, + body=self.json_body, + headers={ + 'Content-Type': 'application/json', + "Authorization": "Bearer " + + token}) + return r + + +class ProvisionerDeleteVisitor(ProvisionerVisitor): + # + # Provisioner Delete Visitor + # + + def __init__(self, http, command_str): + ProvisionerVisitor.__init__(self, http, command_str) + + def visit(self, url, token=None): + r = self.http.request( + 'DELETE', + url, + body=None, + headers={ + 'Content-Type': 'application/json', + "Authorization": "Bearer " + + token}) + return r + + +class ProvisionerPatchVisitor(ProvisionerVisitor): + # + # Provisioner Patch Visitor + # + + def __init__(self, http, command_str, json_body=None): + ProvisionerVisitor.__init__(self, http, command_str) + self.json_body = json_body + + def visit(self, url, token=None): + r = self.http.request( + 'PATCH', + url, + body=self.json_body, + headers={ + 'Content-Type': 'application/json', + "Authorization": "Bearer " + + token}) + return r + + +class ProvisionerPutVisitor(ProvisionerVisitor): + # + # Provisioner Put Visitor + # + + def __init__(self, http, command_str, json_body): + ProvisionerVisitor.__init__(self, http, command_str) + self.json_body = json_body + + def visit(self, url, token=None): + r = self.http.request( + 'PUT', + url, + body=self.json_body, + headers={ + 'Content-Type': 'application/json', + "Authorization": "Bearer " + + token}) + return r + + +class ProvisionerPostDataVisitor(ProvisionerVisitor): + # + # Provisioner Post Data Visitor + # + + def __init__(self, http, command_str, path): + ProvisionerVisitor.__init__(self, http, command_str) + self.path = path + self.timeout = 90 + + def visit(self, url, token=None): + binary_data = open(self.path, 'rb').read() + disposition = "inline; filename=" + os.path.basename(self.path) + + r = self.http.request( + 'POST', + url, + body=binary_data, + headers={ + 'Content-Type': 'application/x-gtar', + 'Content-Disposition': disposition, + "Authorization": "Bearer " + + token}, + timeout=self.timeout) + return r + + +class ProvisionerConnector(object): + # + # Provisioner Connector + # + + def __init__(self, ips, port, visitor): + self.visitor = visitor + self.ips = ips + self.port = port + + def visit_provisioner(self, token=None): + r = None + if self.ips: + num_of_ips = len(self.ips) + if num_of_ips > 0: + for i in range(num_of_ips): + ip = self.ips[i] + url = 'https://' + ip + ':' + \ + str(self.port) + '/' + self.visitor.command_str + try: + if token is None: + token = "Unknown" + r = self.visitor.visit(url, token) + if r: + if i != 0: + KioxiaProvisioner.switch_path(i) + return r + except BaseException: + continue + return r + return r + return r + + +class KioxiaProvisioner(object): + # + # REST client class that interacts with a specific Provisioner + # :type ips: str array + # :param ips: Provisioner management IPs + # :type cert: str + # :param cert: KumoScale keystore pem file full path + # + + mgmt_ips = [] + + def __init__(self, ips, cert, token, port=8090): + self.mgmt_ips = ips + self.port = port + self.user = None + self.token = token + if cert is None: + cert = '/etc/kioxia/ssdtoolbox.pem' + KioxiaProvisioner.mgmt_ips = ips + self.http = urllib3.PoolManager( + cert_reqs=ssl.CERT_NONE, + cert_file=cert, + assert_hostname=False, + timeout=urllib3.Timeout( + connect=5.0, + read=60.0)) + + def set_token(self, user, token): + self.user = user + self.token = token + + def result_support(self, result): + if result is not None: + if result.data is not None: + if "Status 401" in str(result.data): + ClassBuilder() + return entities.ProvisionerResponse( + None, None, "Bad credentials") + if "Status 403" in str(result.data): + return entities.ProvisionerResponse( + None, None, "Access is denied") + if str(result.data) == "": + return entities.ProvisionerResponse([], None, "Success") + try: + result_data = json.loads(result.data) + if ('status' in result_data and + result_data['status'] != "Success"): + return entities.ProvisionerResponse( + result_data, None, result_data['status'], + result_data['description']) + return entities.ProvisionerResponse(result_data) + except Exception as e: + return entities.ProvisionerResponse( + None, None, type(e).__name__, e.message) + return entities.ProvisionerResponse( + None, + None, + "Provisioner Communication Error", + "Provisioner Communication Error") + + # Call to switch last successful connected ip + @staticmethod + def switch_path(ip_idx): + temp = KioxiaProvisioner.mgmt_ips[0] + KioxiaProvisioner.mgmt_ips[0] = KioxiaProvisioner.mgmt_ips[ip_idx] + KioxiaProvisioner.mgmt_ips[ip_idx] = temp + + # Call Provisioner with get request + def provisioner_get_request(self, api_name): + get_visitor = ProvisionerGetVisitor(self.http, api_name) + provisioner_connector = ProvisionerConnector( + self.mgmt_ips, self.port, get_visitor) + r = provisioner_connector.visit_provisioner(self.token) + return self.result_support(r) + + # Call Provisioner with delete request + def provisioner_delete_request(self, api_name): + delete_visitor = ProvisionerDeleteVisitor(self.http, api_name) + provisioner_connector = ProvisionerConnector( + self.mgmt_ips, self.port, delete_visitor) + r = provisioner_connector.visit_provisioner(self.token) + return self.result_support(r) + + # Call Provisioner with patch request + def provisioner_patch_request(self, api_name, json_body=None): + patch_visitor = ProvisionerPatchVisitor(self.http, api_name, json_body) + provisioner_connector = ProvisionerConnector( + self.mgmt_ips, self.port, patch_visitor) + r = provisioner_connector.visit_provisioner(self.token) + return self.result_support(r) + + # Call Provisioner with update request + def provisioner_put_request(self, api_name, json_body): + put_visitor = ProvisionerPutVisitor(self.http, api_name, json_body) + provisioner_connector = ProvisionerConnector( + self.mgmt_ips, self.port, put_visitor) + r = provisioner_connector.visit_provisioner(self.token) + return self.result_support(r) + + # Call Provisioner with post request + def provisioner_post_request(self, api_name, json_body, password=None): + post_visitor = ProvisionerPostVisitor(self.http, api_name, json_body) + provisioner_connector = ProvisionerConnector( + KioxiaProvisioner.mgmt_ips, self.port, post_visitor) + r = provisioner_connector.visit_provisioner(self.token) + return self.result_support(r) + + def get_info(self): + # Call to Get Info API + + # @rtype: ProvisionerResponse + # @returns: Provisioner response data contain Provisioner information + # + + result_response = self.provisioner_get_request('info') + if result_response.status == "Success": + result_entity = JsonToClass(result_response.prov_entities, True) + return entities.ProvisionerResponse(result_entity) + return result_response + + def get_provisioner_info(self): + # Call to Get Info API + + # @rtype: ProvisionerResponse + # @returns: Provisioner response data contain Provisioner information + # + + result_response = self.provisioner_get_request('info') + if result_response.status == "Success": + result_entity = JsonToClass(result_response.prov_entities, True) + return result_entity + return result_response + + def add_backend(self, backend_entity): + # Call to Add Backend API + + # @rtype: ProvisionerResponse + # @returns: Provisioner response data + # + j = backend_entity.to_json() + result_response = self.provisioner_post_request('backends', j) + return result_response + + def update_backend(self, backend_entity, persistent_id): + # all to Update Backend API + + # @rtype: ProvisionerResponse + # @returns: Provisioner response data + # + j = backend_entity.to_json() + result_response = self.provisioner_put_request( + 'backends/' + persistent_id, j) + return result_response + + def delete_backend(self, persistent_id): + # Call to Delete Backend API + + # @rtype: ProvisionerResponse + # @returns: Provisioner response data + # + result_response = self.provisioner_delete_request( + 'backends/' + persistent_id) + return result_response + + def get_backends(self): + # Call to List of Backends API + + # @rtype: ProvisionerResponse + # @returns: Provisioner response data contain List of Backends + # + + result_response = self.provisioner_get_request('backends') + if result_response.status == "Success": + result_entity = JsonToClass(result_response.prov_entities, True) + return entities.ProvisionerResponse(result_entity.records) + return result_response + + def get_backend_by_id(self, uuid): + # Call to List of Backends API + + # @rtype: ProvisionerResponse + # @returns: Provisioner response data contain List of Backends + # + + result_response = self.provisioner_get_request('backends/' + uuid) + if result_response.status == "Success": + result_entity = JsonToClass(result_response.prov_entities, True) + return entities.ProvisionerResponse(result_entity.records) + return result_response + + def get_volumes(self, tenant_uuid=None): + # Call to List of Volumes API + + # @rtype: ProvisionerResponse + # @returns: Provisioner response data contain List of Volumes + # + + tenant_id = "" + if tenant_uuid is not None: + tenant_id = tenant_uuid + "/" + result_response = self.provisioner_get_request(tenant_id + 'volumes') + if result_response.status == "Success": + result_entity = JsonToClass(result_response.prov_entities, True) + return entities.ProvisionerResponse(result_entity.records) + return result_response + + def get_volumes_by_alias(self, alias, tenant_uuid=None): + # Call to List of Volumes API + + # @rtype: ProvisionerResponse + # @returns: Provisioner response data contain List of Volumes + # + + tenant_id = "" + if tenant_uuid is not None: + tenant_id = tenant_uuid + "/" + result_response = self.provisioner_get_request( + tenant_id + 'volumes_by_alias/' + alias) + if result_response.status == "Success": + result_entity = JsonToClass(result_response.prov_entities, True) + return entities.ProvisionerResponse(result_entity.records) + return result_response + + def get_volumes_by_uuid( + self, + volume_uuid, + tenant_uuid=None): + # Call to List of Volumes API + + # @rtype: ProvisionerResponse + # @returns: Provisioner response data contain List of Volumes + # + + tenant_id = "" + if tenant_uuid is not None: + tenant_id = tenant_uuid + "/" + result_response = self.provisioner_get_request( + tenant_id + 'volumes/' + volume_uuid) + if result_response.status == "Success": + result_entity = JsonToClass(result_response.prov_entities, True) + return entities.ProvisionerResponse(result_entity.records) + return result_response + + def add_replica( + self, + replica_entity, + volume_uuid, + tenant_uuid=None): + # Call to Add Replica API + + # @rtype: ProvisionerResponse + # @returns: Provisioner response data + # + tenant_id = "" + if tenant_uuid is not None: + tenant_id = tenant_uuid + "/" + j = replica_entity.to_json() + result_response = self.provisioner_post_request( + tenant_id + 'replica/' + volume_uuid, j) + return result_response + + def delete_replica( + self, + volume_uuid, + replica_uuid, + tenant_uuid=None): + # Call to Delete Replica API + + # @rtype: ProvisionerResponse + # @returns: Provisioner response data + # + tenant_id = "" + if tenant_uuid is not None: + tenant_id = tenant_uuid + "/" + result_response = self.provisioner_patch_request( + tenant_id + 'replica/' + volume_uuid + "/" + replica_uuid) + return result_response + + def delete_replica_confirm( + self, + volume_uuid, + replica_uuid, + tenant_uuid=None): + # Call to Delete Replica Confirm API + + # @rtype: ProvisionerResponse + # @returns: Provisioner response data + # + tenant_id = "" + if tenant_uuid is not None: + tenant_id = tenant_uuid + "/" + result_response = self.provisioner_delete_request( + tenant_id + 'replica/' + volume_uuid + "/" + replica_uuid) + return result_response + + def create_volume(self, volume_entity, tenant_uuid=None): + # Call to Create Volume API + + # @rtype: ProvisionerResponse + # @returns: Provisioner response data + # + tenant_id = "" + if tenant_uuid is not None: + tenant_id = tenant_uuid + "/" + j = volume_entity.to_json() + result_response = self.provisioner_post_request( + tenant_id + 'volumes', j) + return result_response + + def delete_volume(self, volume_uuid, tenant_uuid=None): + # Call to Delete Volume API + + # @rtype: ProvisionerResponse + # @returns: Provisioner response data + # + tenant_id = "" + if tenant_uuid is not None: + tenant_id = tenant_uuid + "/" + result_response = self.provisioner_delete_request( + tenant_id + 'volumes/' + volume_uuid) + return result_response + + def expand_volume( + self, + new_capacity, + volume_uuid, + tenant_uuid=None): + # Call to Expand Volume API + + # @rtype: ProvisionerResponse + # @returns: Provisioner response data + # + tenant_id = "" + if tenant_uuid is not None: + tenant_id = tenant_uuid + "/" + entity = ClassBuilder(newCapacity=str(new_capacity)) + j = entity.to_json() + result_response = self.provisioner_patch_request( + tenant_id + 'volumes/' + volume_uuid, j) + return result_response + + def set_replica_state( + self, + volume_uuid, + replica_uuid, + state, + tenant_uuid=None): + # Call to Set Replica State API + + # @rtype: ProvisionerResponse + # @returns: Provisioner response data + # + tenant_id = "" + if tenant_uuid is not None: + tenant_id = tenant_uuid + "/" + result_response = self.provisioner_patch_request( + tenant_id + 'replica/' + volume_uuid + "/" + + replica_uuid + "/" + str(state)) + return result_response + + def get_snapshots( + self, + snapshot_uuid=None, + tenant_uuid=None): + # Call to List of Snapshots API + + # @rtype: ProvisionerResponse + # @returns: Provisioner response data contain List of Volumes + # + tenant_id = "" + if tenant_uuid is not None: + tenant_id = tenant_uuid + "/" + if snapshot_uuid is None: + result_response = self.provisioner_get_request( + tenant_id + 'snapshots') + else: + result_response = self.provisioner_get_request( + tenant_id + 'snapshots/' + snapshot_uuid) + if result_response.status == "Success": + result_entity = JsonToClass(result_response.prov_entities, True) + return entities.ProvisionerResponse(result_entity.records) + return result_response + + def get_snapshots_by_vol( + self, + volume_uuid, + tenant_uuid=None): + # Call to Get Snapshot Information via Volume UUID API + + # @rtype: ProvisionerResponse + # @returns: Provisioner response data contain List of Volumes + # + + tenant_id = "" + if tenant_uuid is not None: + tenant_id = tenant_uuid + "/" + result_response = self.provisioner_get_request( + tenant_id + 'snapshots_by_vol/' + volume_uuid) + if result_response.status == "Success": + result_entity = JsonToClass(result_response.prov_entities, True) + return entities.ProvisionerResponse(result_entity.records) + return result_response + + def get_snapshots_by_alias(self, alias, tenant_uuid=None): + # Call to Get Snapshot Information via alias API + + # @rtype: ProvisionerResponse + # @returns: Provisioner response data contain List of Volumes + # + + tenant_id = "" + if tenant_uuid is not None: + tenant_id = tenant_uuid + "/" + result_response = self.provisioner_get_request( + tenant_id + 'snapshots_by_alias/' + alias) + if result_response.status == "Success": + result_entity = JsonToClass(result_response.prov_entities, True) + return entities.ProvisionerResponse(result_entity.records) + return result_response + + def set_license(self, license_key): + # Call to Set License API + + # @rtype: ProvisionerResponse + # @returns: Provisioner response data + # + entity = ClassBuilder(license=license_key) + j = entity.to_json() + result_response = self.provisioner_post_request('license', j) + return result_response + + def get_license(self): + # Call to Get License API + + # @rtype: ProvisionerResponse + # @returns: Provisioner response data + # + result_response = self.provisioner_get_request('license') + if result_response.status == "Success": + result_entity = JsonToClass(result_response.prov_entities, True) + return entities.ProvisionerResponse(result_entity) + return result_response + + def get_inventory(self): + # Call to Get Inventory API + + # @rtype: ProvisionerResponse + # @returns: Provisioner response data + # + result_response = self.provisioner_get_request('inventory') + return result_response + + def reset_inventory(self): + # Call to Reset Inventory API + + # @rtype: ProvisionerResponse + # @returns: Provisioner response data + # + result_response = self.provisioner_delete_request('reset_inventory') + return result_response + + def get_syslogs(self): + # Call to Get Syslogs API + + # @rtype: ProvisionerResponse + # @returns: Provisioner response data + # + result_response = self.provisioner_get_request('syslog') + if result_response.status == "Success": + result_entity = JsonToClass(result_response.prov_entities, True) + return entities.ProvisionerResponse(result_entity.records) + return result_response + + def create_snapshot( + self, + snapshot_entity, + tenant_uuid=None): + # Call to Create Snapshot API + + # @rtype: ProvisionerResponse + # @returns: Provisioner response data + # + tenant_id = "" + if tenant_uuid is not None: + tenant_id = tenant_uuid + "/" + j = snapshot_entity.to_json() + result_response = self.provisioner_post_request( + tenant_id + 'snapshots', j) + return result_response + + def delete_snapshot( + self, + snapshot_uuid, + tenant_uuid=None): + # Call to Delete Snapshot API + + # @rtype: ProvisionerResponse + # @returns: Provisioner response data + # + tenant_id = "" + if tenant_uuid is not None: + tenant_id = tenant_uuid + "/" + result_response = self.provisioner_delete_request( + tenant_id + 'snapshots/' + snapshot_uuid) + return result_response + + def create_snapshot_volume( + self, + snapshot_volume_entity, + tenant_uuid=None): + # Call to Create Snapshot Volume API + + # @rtype: ProvisionerResponse + # @returns: Provisioner response data + # + tenant_id = "" + if tenant_uuid is not None: + tenant_id = tenant_uuid + "/" + j = snapshot_volume_entity.to_json() + result_response = self.provisioner_post_request( + tenant_id + 'snapshot_volumes', j) + return result_response + + def forward_log(self, forward_entity): + # Call to Forward Log API + + # @rtype: ProvisionerResponse + # @returns: Provisioner response data + # + j = forward_entity.to_json() + result_response = self.provisioner_post_request('forward_log', j) + return result_response + + def get_hosts(self): + # Call to Get Hosts API + + # @rtype: ProvisionerResponse + # @returns: Provisioner response data + # + result_response = self.provisioner_get_request('hosts') + if result_response.status == "Success": + result_entity = JsonToClass(result_response.prov_entities, True) + return entities.ProvisionerResponse(result_entity.records) + return result_response + + def get_hosts_by_name(self, host_name): + # Call to Get Hosts API + + # @rtype: ProvisionerResponse + # @returns: Provisioner response data + # + result_response = self.provisioner_get_request( + 'hosts?hostName=' + host_name) + if result_response.status == "Success": + result_entity = JsonToClass(result_response.prov_entities, True) + return entities.ProvisionerResponse(result_entity.records) + return result_response + + def delete_host(self, host_uuid): + # Call to Delete Host API + + # @rtype: ProvisionerResponse + # @returns: Provisioner response data + # + + result_response = self.provisioner_delete_request('hosts/' + host_uuid) + return result_response + + def get_targets(self, host_uuid, volume_uuid): + # Call to Get Targets API + + # @rtype: ProvisionerResponse + # @returns: Provisioner response data + # + + if host_uuid is None and volume_uuid is None: + return entities.ProvisionerResponse( + None, None, "ParametersError", "All parameters missing") + if host_uuid is not None: + request = "?hostId=" + host_uuid + else: + request = "?volId=" + volume_uuid + if host_uuid is not None and volume_uuid is not None: + request += "&volId=" + volume_uuid + result_response = self.provisioner_get_request('targets' + request) + if result_response.status == "Success": + result_entity = JsonToClass(result_response.prov_entities, True) + return entities.ProvisionerResponse(result_entity.records) + return result_response + + def publish( + self, + host_uuid, + volume_uuid, + tenant_uuid=None): + # Call to Pablish API + + # @rtype: ProvisionerResponse + # @returns: Provisioner response data + # + + tenant_id = "" + if tenant_uuid is not None: + tenant_id = tenant_uuid + "/" + entity = ClassBuilder(hostId=host_uuid, volId=volume_uuid) + j = entity.to_json() + result_response = self.provisioner_post_request( + tenant_id + 'publish', j) + return result_response + + def unpublish( + self, + host_uuid, + volume_uuid, + tenant_uuid=None): + # Call to UnPablish API + + # @rtype: ProvisionerResponse + # @returns: Provisioner response data + # + + tenant_id = "" + if tenant_uuid is not None: + tenant_id = tenant_uuid + "/" + entity = ClassBuilder(hostId=host_uuid, volId=volume_uuid) + j = entity.to_json() + result_response = self.provisioner_post_request( + tenant_id + 'unpublish', j) + return result_response + + def host_probe(self, host_nqn, host_uuid, host_name, + client_type, sw_version, duration_in_sec): + # Call to Host Probe API + + # @rtype: ProvisionerResponse + # @returns: Provisioner response data + # + + entity = ClassBuilder( + hostNqn=host_nqn, + hostId=host_uuid, + name=host_name, + clientType=client_type, + version=sw_version, + duration=duration_in_sec) + j = entity.to_json() + result_response = self.provisioner_post_request('host_probe', j) + return result_response + + def migrate_volume( + self, + volume_uuid, + replica_uuid, + tenant_uuid=None): + # Call to Migrate Volume API + + # @rtype: ProvisionerResponse + # @returns: Provisioner response data + # + + tenant_id = "" + if tenant_uuid is not None: + tenant_id = tenant_uuid + "/" + entity = ClassBuilder(volId=volume_uuid, repId=replica_uuid) + j = entity.to_json() + result_response = self.provisioner_post_request( + tenant_id + 'migrate_volume', j) + return result_response + + def get_tasks(self, task_id=None, host_id=None): + # Call to Get Tasks API + + # @rtype: ProvisionerResponse + # @returns: Provisioner response data + # + if task_id is not None: + cmd = "tasks?taskId=" + str(task_id) + elif host_id is not None: + cmd = "tasks?hostId=" + str(host_id) + else: + cmd = "tasks" + result_response = self.provisioner_get_request(cmd) + if result_response.status == "Success": + result_entity = JsonToClass(result_response.prov_entities, True) + return entities.ProvisionerResponse(result_entity.records) + return result_response + + def remove_task(self, task_id, host_id=None): + # Call to Remove Task API + + # @rtype: ProvisionerResponse + # @returns: Provisioner response data + # + + cmd = 'tasks?taskId=' + task_id + if host_id is not None: + cmd += "&hostId=" + host_id + result_response = self.provisioner_delete_request(cmd) + return result_response + + def update_task(self, task_id, host_id, state=None, progress=None, + status=None, description=None, tags=None): + # Call to Update Task API + + # @rtype: ProvisionerResponse + # @returns: Provisioner response data + # + + entity = ClassBuilder( + taskId=task_id, + hostId=host_id, + state=state, + progress=progress, + taskStatus=status, + statusDescription=description, + taskConfiguration=tags) + j = entity.to_json() + result_response = self.provisioner_put_request('tasks', j) + return result_response + + def create_tenant(self, tenant_entity): + # Call to Create Tenant API + + # @rtype: ProvisionerResponse + # @returns: Provisioner response data + # + j = tenant_entity.to_json() + result_response = self.provisioner_post_request('tenants', j) + return result_response + + def delete_tenant(self, tenant_uuid): + # Call to Delete Tenant API + + # @rtype: ProvisionerResponse + # @returns: Provisioner response data + # + result_response = self.provisioner_delete_request( + 'tenants/' + tenant_uuid) + return result_response + + def modify_tenant(self, tenant_entity, tenant_uuid): + # Call to Modify Tenant API + + # @rtype: ProvisionerResponse + # @returns: Provisioner response data + # + j = tenant_entity.to_json() + result_response = self.provisioner_put_request( + 'tenants/' + tenant_uuid, j) + return result_response + + def get_tenants(self): + # Call to List of Tenants API + + # @rtype: ProvisionerResponse + # @returns: Provisioner response data contain List of Volumes + # + + result_response = self.provisioner_get_request('tenants') + if result_response.status == "Success": + result_entity = JsonToClass(result_response.prov_entities, True) + return entities.ProvisionerResponse(result_entity.records) + return result_response + + def clone_volume(self, clone_entity, tenant_uuid=None): + # Call to Clone Volume API + + # @rtype: ProvisionerResponse + # @returns: Provisioner response data + # + + tenant_id = "" + if tenant_uuid is not None: + tenant_id = tenant_uuid + "/" + j = clone_entity.to_json() + result_response = self.provisioner_post_request( + tenant_id + 'clone_volume', j) + return result_response + + def get_non_implemented(self, param1=None, param2=None): + # Call to Get Not Implemented Answer API + + # @rtype: KSResponse + # @returns: KumoScale response data + # + return entities.ProvisionerResponse(None, None, "Not implemented") diff --git a/doc/source/configuration/block-storage/drivers/kioxia-kumoscale-driver.rst b/doc/source/configuration/block-storage/drivers/kioxia-kumoscale-driver.rst new file mode 100644 index 00000000000..a35c22a03e1 --- /dev/null +++ b/doc/source/configuration/block-storage/drivers/kioxia-kumoscale-driver.rst @@ -0,0 +1,67 @@ +============================== +KIOXIA Kumoscale NVMeOF Driver +============================== + +KIOXIA Kumoscale volume driver provides OpenStack Compute instances +with access to KIOXIA Kumoscale NVMeOF storage systems. + +This documentation explains how to configure Cinder for use with the +KIOXIA Kumoscale storage backend system. + +Driver options +~~~~~~~~~~~~~~ + +The following table contains the configuration options supported by the +KIOXIA Kumoscale NVMeOF driver. + +.. config-table:: + :config-target: KIOXIA Kumoscale + + cinder.volume.drivers.kioxia.kumoscale + +Supported operations +~~~~~~~~~~~~~~~~~~~~ + +- Create, list, delete, attach and detach volumes +- Create, list and delete volume snapshots +- Create a volume from a snapshot +- Copy an image to a volume. +- Copy a volume to an image. +- Create volume from snapshot +- Clone a volume +- Extend a volume + +Configure KIOXIA Kumoscale NVMeOF backend +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This section details the steps required to configure the KIOXIA Kumoscale +storage cinder driver. + +#. In the ``cinder.conf`` configuration file under the ``[DEFAULT]`` + section, set the enabled_backends parameter. + + .. code-block:: ini + + [DEFAULT] + enabled_backends = kumoscale-1 + + +#. Add a backend group section for the backend group specified + in the enabled_backends parameter. + +#. In the newly created backend group section, set the + following configuration options: + + .. code-block:: ini + + [kumoscale-1] + # Backend name + volume_backend_name=kumoscale-1 + # The driver path + volume_driver=cinder.volume.drivers.kioxia.kumoscale.KumoScaleBaseVolumeDriver + # Kumoscale provisioner URL + kioxia_url=https://70.0.0.13:30100 + # Kumoscale provisioner cert file + kioxia_cafile=/etc/kioxia/ssdtoolbox.pem + # Kumoscale provisioner token + token=eyJhbGciOiJIUzI1NiJ9... diff --git a/doc/source/reference/support-matrix.ini b/doc/source/reference/support-matrix.ini index 6150f9da038..77b52e91751 100644 --- a/doc/source/reference/support-matrix.ini +++ b/doc/source/reference/support-matrix.ini @@ -117,6 +117,9 @@ title=Inspur AS13000 Storage Driver (iSCSI) [driver.kaminario] title=Kaminario Storage Driver (iSCSI, FC) +[driver.kioxia_kumoscale] +title=Kioxia Kumoscale Driver (NVMeOF) + [driver.lenovo] title=Lenovo Storage Driver (FC, iSCSI) @@ -245,6 +248,7 @@ driver.infortrend=complete driver.inspur=complete driver.inspur_as13000=complete driver.kaminario=complete +driver.kioxia_kumoscale=complete driver.lenovo=complete driver.linbit_linstor=complete driver.lvm=complete @@ -313,6 +317,7 @@ driver.infortrend=complete driver.inspur=complete driver.inspur_as13000=complete driver.kaminario=complete +driver.kioxia_kumoscale=complete driver.lenovo=complete driver.linbit_linstor=complete driver.lvm=complete @@ -381,6 +386,7 @@ driver.infortrend=missing driver.inspur=complete driver.inspur_as13000=missing driver.kaminario=missing +driver.kioxia_kumoscale=missing driver.lenovo=missing driver.linbit_linstor=missing driver.lvm=missing @@ -452,6 +458,7 @@ driver.infortrend=missing driver.inspur=complete driver.inspur_as13000=missing driver.kaminario=missing +driver.kioxia_kumoscale=missing driver.lenovo=missing driver.linbit_linstor=missing driver.lvm=missing @@ -522,6 +529,7 @@ driver.infortrend=complete driver.inspur=complete driver.inspur_as13000=missing driver.kaminario=complete +driver.kioxia_kumoscale=missing driver.lenovo=missing driver.linbit_linstor=missing driver.lvm=missing @@ -593,6 +601,7 @@ driver.infortrend=missing driver.inspur=complete driver.inspur_as13000=missing driver.kaminario=missing +driver.kioxia_kumoscale=missing driver.lenovo=missing driver.linbit_linstor=missing driver.lvm=missing @@ -663,6 +672,7 @@ driver.infortrend=complete driver.inspur=missing driver.inspur_as13000=complete driver.kaminario=complete +driver.kioxia_kumoscale=complete driver.lenovo=missing driver.linbit_linstor=missing driver.lvm=complete @@ -734,6 +744,7 @@ driver.infortrend=complete driver.inspur=missing driver.inspur_as13000=missing driver.kaminario=missing +driver.kioxia_kumoscale=missing driver.lenovo=missing driver.linbit_linstor=missing driver.lvm=missing @@ -805,6 +816,7 @@ driver.infortrend=complete driver.inspur=missing driver.inspur_as13000=complete driver.kaminario=missing +driver.kioxia_kumoscale=missing driver.lenovo=complete driver.linbit_linstor=missing driver.lvm=complete @@ -873,6 +885,7 @@ driver.infortrend=missing driver.inspur=missing driver.inspur_as13000=missing driver.kaminario=missing +driver.kioxia_kumoscale=missing driver.lenovo=missing driver.linbit_linstor=missing driver.lvm=complete @@ -945,6 +958,7 @@ driver.infortrend=missing driver.inspur=missing driver.inspur_as13000=missing driver.kaminario=missing +driver.kioxia_kumoscale=missing driver.lenovo=missing driver.linbit_linstor=missing driver.lvm=missing diff --git a/releasenotes/notes/bp-kumoscale-driver-3a01460f1aa83939.yaml b/releasenotes/notes/bp-kumoscale-driver-3a01460f1aa83939.yaml new file mode 100644 index 00000000000..cb35bc67226 --- /dev/null +++ b/releasenotes/notes/bp-kumoscale-driver-3a01460f1aa83939.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + New Cinder volume driver for KIOXIA Kumoscale storage systems. + The driver storage system supports NVMeOF.