From c981616d59576afb36afe602d09f081a6d37edfd Mon Sep 17 00:00:00 2001 From: Alex Deiter Date: Thu, 26 Jul 2018 07:13:05 -0700 Subject: [PATCH] Refactored NexentaStor5 driver - Failover support for both NFS and iSCSI. - Host groups for iSCSI. - Control of ZFS parent/child dependencies causing datasets to remain on NexentaStor while being removed from cinder. - Support for iSCSI multipath. - Revert to snapshot for both NFS and iSCSI. - Fix for race condition on delete volume/snapshot. Change-Id: I1a756101dbca583584de4c478c612009fa9f4596 --- .../drivers/nexenta/test_nexenta5_iscsi.py | 1146 ++++++++++-- .../drivers/nexenta/test_nexenta5_jsonrpc.py | 1350 ++++++++++++-- .../drivers/nexenta/test_nexenta5_nfs.py | 1234 +++++++++++-- cinder/volume/drivers/nexenta/ns5/iscsi.py | 1638 +++++++++++++---- cinder/volume/drivers/nexenta/ns5/jsonrpc.py | 720 ++++++-- cinder/volume/drivers/nexenta/ns5/nfs.py | 1518 +++++++++++---- cinder/volume/drivers/nexenta/options.py | 76 +- cinder/volume/drivers/nexenta/utils.py | 58 +- ...astor5-driver-update-937d2a1ba76a504a.yaml | 34 + 9 files changed, 6307 insertions(+), 1467 deletions(-) create mode 100644 releasenotes/notes/nexentastor5-driver-update-937d2a1ba76a504a.yaml diff --git a/cinder/tests/unit/volume/drivers/nexenta/test_nexenta5_iscsi.py b/cinder/tests/unit/volume/drivers/nexenta/test_nexenta5_iscsi.py index a94e7f0bac7..83a0a3a26bc 100644 --- a/cinder/tests/unit/volume/drivers/nexenta/test_nexenta5_iscsi.py +++ b/cinder/tests/unit/volume/drivers/nexenta/test_nexenta5_iscsi.py @@ -1,4 +1,4 @@ -# Copyright 2016 Nexenta Systems, Inc. +# Copyright 2019 Nexenta Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -15,235 +15,385 @@ """ Unit tests for OpenStack Cinder volume driver """ +import uuid import mock -from mock import patch from oslo_utils import units from cinder import context from cinder import db -from cinder import exception from cinder import test +from cinder.tests.unit.consistencygroup.fake_cgsnapshot import ( + fake_cgsnapshot_obj as fake_cgsnapshot) +from cinder.tests.unit.consistencygroup.fake_consistencygroup import ( + fake_consistencyobject_obj as fake_cgroup) +from cinder.tests.unit import fake_constants as fake +from cinder.tests.unit.fake_snapshot import fake_snapshot_obj as fake_snapshot +from cinder.tests.unit.fake_volume import fake_volume_obj as fake_volume from cinder.volume import configuration as conf from cinder.volume.drivers.nexenta.ns5 import iscsi from cinder.volume.drivers.nexenta.ns5 import jsonrpc class TestNexentaISCSIDriver(test.TestCase): - TEST_VOLUME_NAME = 'volume1' - TEST_VOLUME_NAME2 = 'volume2' - TEST_VOLUME_NAME3 = 'volume3' - TEST_SNAPSHOT_NAME = 'snapshot1' - TEST_VOLUME_REF = { - 'name': TEST_VOLUME_NAME, - 'size': 1, - 'id': '1', - 'status': 'available' - } - TEST_VOLUME_REF2 = { - 'name': TEST_VOLUME_NAME2, - 'size': 1, - 'id': '2', - 'status': 'in-use' - } - TEST_VOLUME_REF3 = { - 'name': TEST_VOLUME_NAME3, - 'size': 2, - 'id': '2', - 'status': 'in-use' - } - TEST_SNAPSHOT_REF = { - 'name': TEST_SNAPSHOT_NAME, - 'volume_name': TEST_VOLUME_NAME, - 'volume_id': '1', - 'volume_size': 1 - } - - def __init__(self, method): - super(TestNexentaISCSIDriver, self).__init__(method) def setUp(self): super(TestNexentaISCSIDriver, self).setUp() - self.cfg = mock.Mock(spec=conf.Configuration) self.ctxt = context.get_admin_context() + self.cfg = mock.Mock(spec=conf.Configuration) + self.cfg.volume_backend_name = 'nexenta_iscsi' + self.cfg.nexenta_group_snapshot_template = 'group-snapshot-%s' + self.cfg.nexenta_origin_snapshot_template = 'origin-snapshot-%s' self.cfg.nexenta_dataset_description = '' self.cfg.nexenta_host = '1.1.1.1' self.cfg.nexenta_user = 'admin' self.cfg.nexenta_password = 'nexenta' self.cfg.nexenta_volume = 'cinder' - self.cfg.nexenta_rest_port = 2000 + self.cfg.nexenta_rest_port = 8443 self.cfg.nexenta_use_https = False - self.cfg.nexenta_iscsi_target_portal_port = 8080 - self.cfg.nexenta_target_prefix = 'iqn:' - self.cfg.nexenta_target_group_prefix = 'cinder/' + self.cfg.nexenta_iscsi_target_portal_port = 3260 + self.cfg.nexenta_target_prefix = 'iqn:cinder' + self.cfg.nexenta_target_group_prefix = 'cinder' self.cfg.nexenta_ns5_blocksize = 32 self.cfg.nexenta_sparse = True + self.cfg.nexenta_lu_writebackcache_disabled = True self.cfg.nexenta_dataset_compression = 'on' self.cfg.nexenta_dataset_dedup = 'off' self.cfg.reserved_percentage = 20 + self.cfg.nexenta_host_group_prefix = 'hg' self.cfg.nexenta_volume = 'pool' - self.cfg.nexenta_volume_group = 'dsg' + self.cfg.driver_ssl_cert_verify = False + self.cfg.nexenta_luns_per_target = 20 + self.cfg.driver_ssl_cert_verify = False + self.cfg.nexenta_iscsi_target_portals = '1.1.1.1:3260,2.2.2.2:3260' + self.cfg.nexenta_iscsi_target_host_group = 'all' + self.cfg.nexenta_rest_address = '1.1.1.1' + self.cfg.nexenta_rest_backoff_factor = 1 + self.cfg.nexenta_rest_retry_count = 3 + self.cfg.nexenta_rest_connect_timeout = 1 + self.cfg.nexenta_rest_read_timeout = 1 + self.cfg.nexenta_volume_group = 'vg' + self.cfg.safe_get = self.fake_safe_get self.nef_mock = mock.Mock() - self.mock_object(jsonrpc, 'NexentaJSONProxy', + self.mock_object(jsonrpc, 'NefRequest', return_value=self.nef_mock) self.drv = iscsi.NexentaISCSIDriver( configuration=self.cfg) self.drv.db = db - self.drv._fetch_volumes = lambda: None self.drv.do_setup(self.ctxt) - def _create_volume_db_entry(self): - vol = { - 'id': '1', - 'size': 1, - 'status': 'available', - 'provider_location': self.TEST_VOLUME_NAME - } - return db.volume_create(self.ctxt, vol)['id'] + def fake_safe_get(self, key): + try: + value = getattr(self.cfg, key) + except AttributeError: + value = None + return value + + def fake_uuid4(): + return uuid.UUID('38d18a48-b791-4046-b523-a84aad966310') def test_do_setup(self): - self.nef_mock.post.side_effect = exception.NexentaException( - 'Could not create volume group') - self.assertRaises( - exception.NexentaException, - self.drv.do_setup, self.ctxt) - - self.nef_mock.post.side_effect = exception.NexentaException( - '{"code": "EEXIST"}') self.assertIsNone(self.drv.do_setup(self.ctxt)) - def test_check_for_setup_error(self): - self.nef_mock.get.return_value = { - 'data': [{'name': 'iscsit', 'state': 'offline'}]} - self.assertRaises( - exception.NexentaException, self.drv.check_for_setup_error) + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefServices.get') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefVolumeGroups.create') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefVolumeGroups.get') + def test_check_for_setup_error(self, volume_group_get, + volume_group_create, + service_get): + path = self.drv.root_path + bs = self.cfg.nexenta_ns5_blocksize * units.Ki + name = 'iscsit' + state = 'online' + volume_group_get.return_value = {'path': path} + service_get.return_value = {'name': name, 'state': state} + self.assertIsNone(self.drv.check_for_setup_error()) + volume_group_get.assert_called_with(path) + service_get.assert_called_with(name) - self.nef_mock.get.side_effect = exception.NexentaException( - 'fake_exception') - self.assertRaises(LookupError, self.drv.check_for_setup_error) + volume_group_get.side_effect = jsonrpc.NefException({ + 'message': 'Failed to open dataset', + 'code': 'ENOENT' + }) + volume_group_create.return_value = {} + self.assertIsNone(self.drv.check_for_setup_error()) + volume_group_get.assert_called_with(path) + payload = {'path': path, 'volumeBlockSize': bs} + volume_group_create.assert_called_with(payload) + service_get.assert_called_with(name) - def test_create_volume(self): - self.drv.create_volume(self.TEST_VOLUME_REF) - url = 'storage/pools/pool/volumeGroups/dsg/volumes' - self.nef_mock.post.assert_called_with(url, { - 'name': self.TEST_VOLUME_REF['name'], - 'volumeSize': 1 * units.Gi, - 'volumeBlockSize': 32768, - 'sparseVolume': self.cfg.nexenta_sparse}) + state = 'offline' + volume_group_get.return_value = {'path': path} + service_get.return_value = {'name': name, 'state': state} + self.assertRaises(jsonrpc.NefException, + self.drv.check_for_setup_error) - def test_delete_volume(self): - self.drv.collect_zfs_garbage = lambda x: None - self.nef_mock.delete.side_effect = exception.NexentaException( - 'Failed to destroy snapshot') - self.assertIsNone(self.drv.delete_volume(self.TEST_VOLUME_REF)) - url = 'storage/pools/pool/volumeGroups' - data = {'name': 'dsg', 'volumeBlockSize': 32768} - self.nef_mock.post.assert_called_with(url, data) - - def test_extend_volume(self): - self.drv.extend_volume(self.TEST_VOLUME_REF, 2) - url = ('storage/pools/pool/volumeGroups/dsg/volumes/%(name)s') % { - 'name': self.TEST_VOLUME_REF['name']} - self.nef_mock.put.assert_called_with(url, { - 'volumeSize': 2 * units.Gi}) - - def test_delete_snapshot(self): - self._create_volume_db_entry() - url = ('storage/pools/pool/volumeGroups/dsg/' - 'volumes/volume-1/snapshots/snapshot1') - - self.nef_mock.delete.side_effect = exception.NexentaException('EBUSY') - self.drv.delete_snapshot(self.TEST_SNAPSHOT_REF) - self.nef_mock.delete.assert_called_with(url) - - self.nef_mock.delete.side_effect = exception.NexentaException('Error') - self.drv.delete_snapshot(self.TEST_SNAPSHOT_REF) - self.nef_mock.delete.assert_called_with(url) - - @patch('cinder.volume.drivers.nexenta.ns5.iscsi.' - 'NexentaISCSIDriver.create_snapshot') - @patch('cinder.volume.drivers.nexenta.ns5.iscsi.' - 'NexentaISCSIDriver.delete_snapshot') - @patch('cinder.volume.drivers.nexenta.ns5.iscsi.' - 'NexentaISCSIDriver.create_volume_from_snapshot') - def test_create_cloned_volume(self, crt_vol, dlt_snap, crt_snap): - self._create_volume_db_entry() - vol = self.TEST_VOLUME_REF2 - src_vref = self.TEST_VOLUME_REF - crt_vol.side_effect = exception.NexentaException('fake_exception') - dlt_snap.side_effect = exception.NexentaException('fake_exception') - self.assertRaises( - exception.NexentaException, - self.drv.create_cloned_volume, vol, src_vref) - - def test_create_snapshot(self): - self._create_volume_db_entry() - self.drv.create_snapshot(self.TEST_SNAPSHOT_REF) - url = 'storage/pools/pool/volumeGroups/dsg/volumes/volume-1/snapshots' - self.nef_mock.post.assert_called_with( - url, {'name': 'snapshot1'}) - - def test_create_larger_volume_from_snapshot(self): - self._create_volume_db_entry() - vol = self.TEST_VOLUME_REF3 - src_vref = self.TEST_SNAPSHOT_REF - - self.drv.create_volume_from_snapshot(vol, src_vref) - - # make sure the volume get extended! - url = ('storage/pools/pool/volumeGroups/dsg/volumes/%(name)s') % { - 'name': self.TEST_VOLUME_REF3['name']} - self.nef_mock.put.assert_called_with(url, { - 'volumeSize': 2 * units.Gi}) - - def test_do_export(self): - target_name = 'new_target' - lun = 0 - - class GetSideEffect(object): - def __init__(self): - self.lm_counter = -1 - - def __call__(self, *args, **kwargs): - # Find out whether the volume is exported - if 'san/lunMappings?volume=' in args[0]: - self.lm_counter += 1 - # a value for the first call - if self.lm_counter == 0: - return {'data': []} - else: - return {'data': [{'lun': lun}]} - # Get the name of just created target - elif 'san/iscsi/targets' in args[0]: - return {'data': [{'name': target_name}]} - - def post_side_effect(*args, **kwargs): - if 'san/iscsi/targets' in args[0]: - return {'data': [{'name': target_name}]} - - self.nef_mock.get.side_effect = GetSideEffect() - self.nef_mock.post.side_effect = post_side_effect - res = self.drv._do_export(self.ctxt, self.TEST_VOLUME_REF) - provider_location = '%(host)s:%(port)s,1 %(name)s %(lun)s' % { - 'host': self.cfg.nexenta_host, - 'port': self.cfg.nexenta_iscsi_target_portal_port, - 'name': target_name, - 'lun': lun, + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefVolumes.create') + def test_create_volume(self, create_volume): + volume = fake_volume(self.ctxt) + self.assertIsNone(self.drv.create_volume(volume)) + path = self.drv._get_volume_path(volume) + size = volume['size'] * units.Gi + bs = self.cfg.nexenta_ns5_blocksize * units.Ki + payload = { + 'path': path, + 'volumeSize': size, + 'volumeBlockSize': bs, + 'compressionMode': self.cfg.nexenta_dataset_compression, + 'sparseVolume': self.cfg.nexenta_sparse } - expected = {'provider_location': provider_location} - self.assertEqual(expected, res) + create_volume.assert_called_with(payload) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefVolumes.delete') + def test_delete_volume(self, delete_volume): + volume = fake_volume(self.ctxt) + self.assertIsNone(self.drv.delete_volume(volume)) + path = self.drv._get_volume_path(volume) + payload = {'snapshots': True} + delete_volume.assert_called_with(path, payload) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefVolumes.set') + def test_extend_volume(self, extend_volume): + volume = fake_volume(self.ctxt) + size = volume['size'] * 2 + self.assertIsNone(self.drv.extend_volume(volume, size)) + path = self.drv._get_volume_path(volume) + size = size * units.Gi + payload = {'volumeSize': size} + extend_volume.assert_called_with(path, payload) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefSnapshots.delete') + def test_delete_snapshot(self, delete_snapshot): + volume = fake_volume(self.ctxt) + snapshot = fake_snapshot(self.ctxt) + snapshot.volume = volume + delete_snapshot.return_value = {} + self.assertIsNone(self.drv.delete_snapshot(snapshot)) + path = self.drv._get_snapshot_path(snapshot) + payload = {'defer': True} + delete_snapshot.assert_called_with(path, payload) + + def test_snapshot_revert_use_temp_snapshot(self): + result = self.drv.snapshot_revert_use_temp_snapshot() + expected = False + self.assertEqual(expected, result) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefVolumes.rollback') + def test_revert_to_snapshot(self, rollback_volume): + volume = fake_volume(self.ctxt) + snapshot = fake_snapshot(self.ctxt) + snapshot.volume = volume + rollback_volume.return_value = {} + self.assertIsNone( + self.drv.revert_to_snapshot(self.ctxt, volume, snapshot) + ) + path = self.drv._get_volume_path(volume) + payload = {'snapshot': snapshot['name']} + rollback_volume.assert_called_with(path, payload) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.iscsi.' + 'NexentaISCSIDriver.delete_snapshot') + @mock.patch('cinder.volume.drivers.nexenta.ns5.iscsi.' + 'NexentaISCSIDriver.create_volume_from_snapshot') + @mock.patch('cinder.volume.drivers.nexenta.ns5.iscsi.' + 'NexentaISCSIDriver.create_snapshot') + def test_create_cloned_volume(self, create_snapshot, create_volume, + delete_snapshot): + volume = fake_volume(self.ctxt) + clone_spec = {'id': fake.VOLUME2_ID} + clone = fake_volume(self.ctxt, **clone_spec) + create_snapshot.return_value = {} + create_volume.return_value = {} + delete_snapshot.return_value = {} + self.assertIsNone(self.drv.create_cloned_volume(clone, volume)) + snapshot = { + 'name': self.drv.origin_snapshot_template % clone['id'], + 'volume_id': volume['id'], + 'volume_name': volume['name'], + 'volume_size': volume['size'] + } + create_snapshot.assert_called_with(snapshot) + create_volume.assert_called_with(clone, snapshot) + create_volume.side_effect = jsonrpc.NefException({ + 'message': 'Failed to create volume', + 'code': 'EBUSY' + }) + self.assertRaises(jsonrpc.NefException, + self.drv.create_cloned_volume, + clone, volume) + create_snapshot.side_effect = jsonrpc.NefException({ + 'message': 'Failed to open dataset', + 'code': 'ENOENT' + }) + self.assertRaises(jsonrpc.NefException, + self.drv.create_cloned_volume, + clone, volume) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefSnapshots.create') + def test_create_snapshot(self, create_snapshot): + volume = fake_volume(self.ctxt) + snapshot = fake_snapshot(self.ctxt) + snapshot.volume = volume + create_snapshot.return_value = {} + self.assertIsNone(self.drv.create_snapshot(snapshot)) + path = self.drv._get_snapshot_path(snapshot) + payload = {'path': path} + create_snapshot.assert_called_with(payload) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'iscsi.NexentaISCSIDriver.extend_volume') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefSnapshots.clone') + def test_create_volume_from_snapshot(self, clone_snapshot, + extend_volume): + volume = fake_volume(self.ctxt) + snapshot = fake_snapshot(self.ctxt) + snapshot.volume = volume + clone_size = 10 + clone_spec = { + 'id': fake.VOLUME2_ID, + 'size': clone_size + } + clone = fake_volume(self.ctxt, **clone_spec) + snapshot_path = self.drv._get_snapshot_path(snapshot) + clone_path = self.drv._get_volume_path(clone) + clone_snapshot.return_value = {} + extend_volume.return_value = None + self.assertIsNone( + self.drv.create_volume_from_snapshot(clone, snapshot) + ) + clone_payload = {'targetPath': clone_path} + clone_snapshot.assert_called_with(snapshot_path, clone_payload) + extend_volume.assert_called_with(clone, clone_size) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefLunMappings.list') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'iscsi.NexentaISCSIDriver._create_target_group') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'iscsi.NexentaISCSIDriver._create_target') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'iscsi.NexentaISCSIDriver._target_group_props') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'iscsi.NexentaISCSIDriver._get_host_portals') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'iscsi.NexentaISCSIDriver._get_host_group') + @mock.patch('uuid.uuid4', fake_uuid4) + def test_initialize_connection(self, get_host_group, get_host_portals, + get_target_group_props, create_target, + create_target_group, list_mappings): + volume = fake_volume(self.ctxt) + host_iqn = 'iqn:cinder-client' + target_iqn = 'iqn:cinder-target' + connector = {'initiator': host_iqn, 'multipath': True} + host_group = 'cinder-host-group' + target_group = 'cinder-target-group' + target_portals = self.cfg.nexenta_iscsi_target_portals.split(',') + get_host_group.return_value = host_group + get_host_portals.return_value = { + target_iqn: target_portals + } + list_mappings.return_value = [{ + 'id': '309F9B9013CF627A00000000', + 'lun': 0, + 'hostGroup': host_group, + 'targetGroup': target_group + }] + get_target_group_props.return_value = { + target_iqn: target_portals + } + create_target.return_value = {} + create_target_group.return_value = {} + result = self.drv.initialize_connection(volume, connector) + expected = { + 'driver_volume_type': 'iscsi', + 'data': { + 'target_discovered': False, + 'encrypted': False, + 'qos_specs': None, + 'target_luns': [0] * len(target_portals), + 'access_mode': 'rw', + 'volume_id': volume['id'], + 'target_portals': target_portals, + 'target_iqns': [target_iqn] * len(target_portals) + } + } + self.assertEqual(expected, result) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefLunMappings.delete') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefLunMappings.list') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'iscsi.NexentaISCSIDriver._get_host_group') + def test_terminate_connection(self, get_host_group, + list_mappings, delete_mapping): + volume = fake_volume(self.ctxt) + host_group = 'cinder-host-group' + target_group = 'cinder-target-group' + connector = {'initiator': 'iqn:test'} + get_host_group.return_value = host_group + list_mappings.return_value = [{ + 'id': '309F9B9013CF627A00000000', + 'lun': 0, + 'hostGroup': host_group, + 'targetGroup': target_group + }] + delete_mapping.return_value = {} + expected = {'driver_volume_type': 'iscsi', 'data': {}} + result = self.drv.terminate_connection(volume, connector) + self.assertEqual(expected, result) + + def test_create_export(self): + volume = fake_volume(self.ctxt) + connector = {'initiator': 'iqn:test'} + self.assertIsNone( + self.drv.create_export(self.ctxt, volume, connector) + ) + + def test_ensure_export(self): + volume = fake_volume(self.ctxt) + self.assertIsNone( + self.drv.ensure_export(self.ctxt, volume) + ) def test_remove_export(self): - mapping_id = '1234567890' - self.nef_mock.get.return_value = {'data': [{'id': mapping_id}]} - self.drv.remove_export(self.ctxt, self.TEST_VOLUME_REF) - url = 'san/lunMappings/%s' % mapping_id - self.nef_mock.delete.assert_called_with(url) + volume = fake_volume(self.ctxt) + self.assertIsNone( + self.drv.remove_export(self.ctxt, volume) + ) - def test_update_volume_stats(self): - self.nef_mock.get.return_value = { - 'bytesAvailable': 10 * units.Gi, - 'bytesUsed': 2 * units.Gi + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefVolumeGroups.get') + def test_get_volume_stats(self, get_volume_group): + available = 100 + used = 75 + get_volume_group.return_value = { + 'bytesAvailable': available * units.Gi, + 'bytesUsed': used * units.Gi + } + result = self.drv.get_volume_stats(True) + payload = {'fields': 'bytesAvailable,bytesUsed'} + get_volume_group.assert_called_with(self.drv.root_path, payload) + self.assertEqual(self.drv._stats, result) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefVolumeGroups.get') + def test_update_volume_stats(self, get_volume_group): + available = 8 + used = 2 + get_volume_group.return_value = { + 'bytesAvailable': available * units.Gi, + 'bytesUsed': used * units.Gi } location_info = '%(driver)s:%(host)s:%(pool)s/%(group)s' % { 'driver': self.drv.__class__.__name__, @@ -251,22 +401,638 @@ class TestNexentaISCSIDriver(test.TestCase): 'pool': self.cfg.nexenta_volume, 'group': self.cfg.nexenta_volume_group, } - stats = { + expected = { 'vendor_name': 'Nexenta', 'dedup': self.cfg.nexenta_dataset_dedup, 'compression': self.cfg.nexenta_dataset_compression, 'description': self.cfg.nexenta_dataset_description, 'driver_version': self.drv.VERSION, 'storage_protocol': 'iSCSI', - 'total_capacity_gb': 10, - 'free_capacity_gb': 8, + 'sparsed_volumes': self.cfg.nexenta_sparse, + 'total_capacity_gb': used + available, + 'free_capacity_gb': available, 'reserved_percentage': self.cfg.reserved_percentage, 'QoS_support': False, - 'volume_backend_name': self.drv.backend_name, + 'multiattach': True, + 'consistencygroup_support': True, + 'consistent_group_snapshot_enabled': True, + 'volume_backend_name': self.cfg.volume_backend_name, 'location_info': location_info, 'iscsi_target_portal_port': ( self.cfg.nexenta_iscsi_target_portal_port), - 'nef_url': self.drv.nef.url + 'nef_url': self.cfg.nexenta_rest_address, + 'nef_port': self.cfg.nexenta_rest_port } - self.drv._update_volume_stats() - self.assertEqual(stats, self.drv._stats) + self.assertIsNone(self.drv._update_volume_stats()) + self.assertEqual(expected, self.drv._stats) + + def test__get_volume_path(self): + volume = fake_volume(self.ctxt) + result = self.drv._get_volume_path(volume) + expected = '%s/%s/%s' % (self.cfg.nexenta_volume, + self.cfg.nexenta_volume_group, + volume['name']) + self.assertEqual(expected, result) + + def test__get_snapshot_path(self): + volume = fake_volume(self.ctxt) + snapshot = fake_snapshot(self.ctxt) + snapshot.volume = volume + result = self.drv._get_snapshot_path(snapshot) + expected = '%s/%s/%s@%s' % (self.cfg.nexenta_volume, + self.cfg.nexenta_volume_group, + snapshot['volume_name'], + snapshot['name']) + self.assertEqual(expected, result) + + def test__get_target_group_name(self): + target_iqn = '%s-test' % self.cfg.nexenta_target_prefix + result = self.drv._get_target_group_name(target_iqn) + expected = '%s-test' % self.cfg.nexenta_target_group_prefix + self.assertEqual(expected, result) + + def test__get_target_name(self): + target_group = '%s-test' % self.cfg.nexenta_target_group_prefix + result = self.drv._get_target_name(target_group) + expected = '%s-test' % self.cfg.nexenta_target_prefix + self.assertEqual(expected, result) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefNetAddresses.list') + def test__get_host_addresses(self, list_addresses): + expected = ['1.1.1.1', '2.2.2.2', '3.3.3.3'] + return_value = [] + for address in expected: + return_value.append({ + 'addressType': 'static', + 'address': '%s/24' % address + }) + list_addresses.return_value = return_value + result = self.drv._get_host_addresses() + self.assertEqual(expected, result) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'iscsi.NexentaISCSIDriver._get_host_addresses') + def test__get_host_portals(self, list_addresses): + list_addresses.return_value = ['1.1.1.1', '2.2.2.2', '3.3.3.3'] + expected = ['1.1.1.1:3260', '2.2.2.2:3260'] + result = self.drv._get_host_portals() + self.assertEqual(expected, result) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefTargets.list') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefTargetsGroups.list') + def test__target_group_props(self, list_target_groups, list_targets): + host_portals = ['1.1.1.1:3260', '2.2.2.2:3260'] + target_group = 'cinder-test' + list_target_groups.return_value = [{ + 'name': target_group, + 'members': [ + 'iqn:cinder-test' + ] + }] + list_targets.return_value = [{ + 'name': 'iqn:cinder-test', + 'portals': [ + { + 'address': '1.1.1.1', + 'port': 3260 + }, + { + 'address': '2.2.2.2', + 'port': 3260 + } + ] + }] + expected = {'iqn:cinder-test': host_portals} + result = self.drv._target_group_props(target_group, host_portals) + self.assertEqual(expected, result) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefTargetsGroups.create') + def test__create_target_group(self, create_target_group): + name = 'name' + members = ['a', 'b', 'c'] + create_target_group.return_value = {} + self.assertIsNone(self.drv._create_target_group(name, members)) + payload = {'name': name, 'members': members} + create_target_group.assert_called_with(payload) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefTargetsGroups.set') + def test__update_target_group(self, update_target_group): + name = 'name' + members = ['a', 'b', 'c'] + update_target_group.return_value = {} + self.assertIsNone(self.drv._update_target_group(name, members)) + payload = {'members': members} + update_target_group.assert_called_with(name, payload) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefLunMappings.delete') + def test__delete_lun_mapping(self, delete_mapping): + name = 'name' + delete_mapping.return_value = {} + self.assertIsNone(self.drv._delete_lun_mapping(name)) + delete_mapping.assert_called_with(name) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefTargets.create') + def test__create_target(self, create_target): + name = 'name' + portals = ['1.1.1.1:3260', '2.2.2.2:3260'] + create_target.return_value = {} + self.assertIsNone(self.drv._create_target(name, portals)) + payload = { + 'name': name, + 'portals': [ + { + 'address': '1.1.1.1', + 'port': 3260 + }, + { + 'address': '2.2.2.2', + 'port': 3260 + } + ] + } + create_target.assert_called_with(payload) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefHostGroups.list') + def test__get_host_group(self, get_hostgroup): + member = 'member1' + get_hostgroup.return_value = [ + { + 'name': 'name1', + 'members': [ + 'member1', + 'member2', + 'member3' + ] + }, + { + 'name': 'name2', + 'members': [ + 'member4', + 'member5', + 'member6' + ] + } + ] + expected = 'name1' + result = self.drv._get_host_group(member) + self.assertEqual(expected, result) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefHostGroups.create') + def test__create_host_group(self, create_host_group): + name = 'name' + members = ['a', 'b', 'c'] + create_host_group.return_value = {} + self.assertIsNone(self.drv._create_host_group(name, members)) + payload = {'name': name, 'members': members} + create_host_group.assert_called_with(payload) + + def test__s2d(self): + portals = ['1.1.1.1:3260', '2.2.2.2:3260'] + expected = [ + { + 'address': '1.1.1.1', + 'port': 3260 + }, + { + 'address': '2.2.2.2', + 'port': 3260 + } + ] + result = self.drv._s2d(portals) + self.assertEqual(expected, result) + + def test__d2s(self): + portals = [ + { + 'address': '1.1.1.1', + 'port': 3260 + }, + { + 'address': '2.2.2.2', + 'port': 3260 + } + ] + expected = ['1.1.1.1:3260', '2.2.2.2:3260'] + result = self.drv._d2s(portals) + self.assertEqual(expected, result) + + def test_create_consistencygroup(self): + cgroup = fake_cgroup(self.ctxt) + result = self.drv.create_consistencygroup(self.ctxt, cgroup) + expected = {} + self.assertEqual(expected, result) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'iscsi.NexentaISCSIDriver.delete_volume') + def test_delete_consistencygroup(self, delete_volume): + cgroup = fake_cgroup(self.ctxt) + volume1 = fake_volume(self.ctxt) + volume2_spec = {'id': fake.VOLUME2_ID} + volume2 = fake_volume(self.ctxt, **volume2_spec) + volumes = [volume1, volume2] + delete_volume.return_value = {} + result = self.drv.delete_consistencygroup(self.ctxt, + cgroup, + volumes) + expected = ({}, []) + self.assertEqual(expected, result) + + def test_update_consistencygroup(self): + cgroup = fake_cgroup(self.ctxt) + volume1 = fake_volume(self.ctxt) + volume2_spec = {'id': fake.VOLUME2_ID} + volume2 = fake_volume(self.ctxt, **volume2_spec) + volume3_spec = {'id': fake.VOLUME3_ID} + volume3 = fake_volume(self.ctxt, **volume3_spec) + volume4_spec = {'id': fake.VOLUME4_ID} + volume4 = fake_volume(self.ctxt, **volume4_spec) + add_volumes = [volume1, volume2] + remove_volumes = [volume3, volume4] + result = self.drv.update_consistencygroup(self.ctxt, + cgroup, + add_volumes, + remove_volumes) + expected = ({}, [], []) + self.assertEqual(expected, result) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefSnapshots.delete') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefSnapshots.rename') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefSnapshots.create') + def test_create_cgsnapshot(self, create_snapshot, + rename_snapshot, + delete_snapshot): + cgsnapshot = fake_cgsnapshot(self.ctxt) + volume = fake_volume(self.ctxt) + snapshot = fake_snapshot(self.ctxt) + snapshot.volume = volume + snapshots = [snapshot] + cgsnapshot_name = ( + self.cfg.nexenta_group_snapshot_template % cgsnapshot['id']) + cgsnapshot_path = '%s@%s' % (self.drv.root_path, cgsnapshot_name) + snapshot_path = '%s/%s@%s' % (self.drv.root_path, + snapshot['volume_name'], + cgsnapshot_name) + create_snapshot.return_value = {} + rename_snapshot.return_value = {} + delete_snapshot.return_value = {} + result = self.drv.create_cgsnapshot(self.ctxt, + cgsnapshot, + snapshots) + create_payload = {'path': cgsnapshot_path, 'recursive': True} + create_snapshot.assert_called_with(create_payload) + rename_payload = {'newName': snapshot['name']} + rename_snapshot.assert_called_with(snapshot_path, rename_payload) + delete_payload = {'defer': True, 'recursive': True} + delete_snapshot.assert_called_with(cgsnapshot_path, delete_payload) + expected = ({}, []) + self.assertEqual(expected, result) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'iscsi.NexentaISCSIDriver.delete_snapshot') + def test_delete_cgsnapshot(self, delete_snapshot): + cgsnapshot = fake_cgsnapshot(self.ctxt) + volume = fake_volume(self.ctxt) + snapshot = fake_snapshot(self.ctxt) + snapshot.volume = volume + snapshots = [snapshot] + delete_snapshot.return_value = {} + result = self.drv.delete_cgsnapshot(self.ctxt, + cgsnapshot, + snapshots) + delete_snapshot.assert_called_with(snapshot) + expected = ({}, []) + self.assertEqual(expected, result) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'iscsi.NexentaISCSIDriver.create_volume_from_snapshot') + def test_create_consistencygroup_from_src_snapshots(self, create_volume): + cgroup = fake_cgroup(self.ctxt) + cgsnapshot = fake_cgsnapshot(self.ctxt) + volume = fake_volume(self.ctxt) + snapshot = fake_snapshot(self.ctxt) + snapshot.volume = volume + snapshots = [snapshot] + clone_spec = {'id': fake.VOLUME2_ID} + clone = fake_volume(self.ctxt, **clone_spec) + clones = [clone] + create_volume.return_value = {} + result = self.drv.create_consistencygroup_from_src(self.ctxt, cgroup, + clones, cgsnapshot, + snapshots, None, + None) + create_volume.assert_called_with(clone, snapshot) + expected = ({}, []) + self.assertEqual(expected, result) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefSnapshots.delete') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'iscsi.NexentaISCSIDriver.create_volume_from_snapshot') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefSnapshots.create') + def test_create_consistencygroup_from_src_volumes(self, + create_snapshot, + create_volume, + delete_snapshot): + src_cgroup = fake_cgroup(self.ctxt) + dst_cgroup_spec = {'id': fake.CONSISTENCY_GROUP2_ID} + dst_cgroup = fake_cgroup(self.ctxt, **dst_cgroup_spec) + src_volume = fake_volume(self.ctxt) + src_volumes = [src_volume] + dst_volume_spec = {'id': fake.VOLUME2_ID} + dst_volume = fake_volume(self.ctxt, **dst_volume_spec) + dst_volumes = [dst_volume] + create_snapshot.return_value = {} + create_volume.return_value = {} + delete_snapshot.return_value = {} + result = self.drv.create_consistencygroup_from_src(self.ctxt, + dst_cgroup, + dst_volumes, + None, None, + src_cgroup, + src_volumes) + snapshot_name = ( + self.cfg.nexenta_origin_snapshot_template % dst_cgroup['id']) + snapshot_path = '%s@%s' % (self.drv.root_path, snapshot_name) + create_payload = {'path': snapshot_path, 'recursive': True} + create_snapshot.assert_called_with(create_payload) + snapshot = { + 'name': snapshot_name, + 'volume_id': src_volume['id'], + 'volume_name': src_volume['name'], + 'volume_size': src_volume['size'] + } + create_volume.assert_called_with(dst_volume, snapshot) + delete_payload = {'defer': True, 'recursive': True} + delete_snapshot.assert_called_with(snapshot_path, delete_payload) + expected = ({}, []) + self.assertEqual(expected, result) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefVolumes.list') + def test__get_existing_volume(self, list_volumes): + volume = fake_volume(self.ctxt) + parent = self.drv.root_path + name = volume['name'] + size = volume['size'] + path = self.drv._get_volume_path(volume) + list_volumes.return_value = [{ + 'name': name, + 'path': path, + 'volumeSize': size * units.Gi + }] + result = self.drv._get_existing_volume({'source-name': name}) + payload = { + 'parent': parent, + 'fields': 'name,path,volumeSize', + 'name': name + } + list_volumes.assert_called_with(payload) + expected = { + 'name': name, + 'path': path, + 'size': size + } + self.assertEqual(expected, result) + + def test__check_already_managed_snapshot(self): + volume = fake_volume(self.ctxt) + snapshot = fake_snapshot(self.ctxt) + snapshot.volume = volume + result = self.drv._check_already_managed_snapshot(snapshot) + expected = False + self.assertEqual(expected, result) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefSnapshots.list') + def test__get_existing_snapshot(self, list_snapshots): + volume = fake_volume(self.ctxt) + snapshot = fake_snapshot(self.ctxt) + snapshot.volume = volume + name = snapshot['name'] + path = self.drv._get_snapshot_path(snapshot) + parent = self.drv._get_volume_path(volume) + list_snapshots.return_value = [{ + 'name': name, + 'path': path + }] + payload = {'source-name': name} + result = self.drv._get_existing_snapshot(snapshot, payload) + payload = { + 'parent': parent, + 'fields': 'name,path', + 'recursive': False, + 'name': name + } + list_snapshots.assert_called_with(payload) + expected = { + 'name': name, + 'path': path, + 'volume_name': volume['name'], + 'volume_size': volume['size'] + } + self.assertEqual(expected, result) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefVolumes.rename') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefLunMappings.list') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'iscsi.NexentaISCSIDriver._get_existing_volume') + def test_manage_existing(self, get_existing_volume, + list_mappings, rename_volume): + existing_volume = fake_volume(self.ctxt) + manage_volume_spec = {'id': fake.VOLUME2_ID} + manage_volume = fake_volume(self.ctxt, **manage_volume_spec) + existing_name = existing_volume['name'] + existing_path = self.drv._get_volume_path(existing_volume) + existing_size = existing_volume['size'] + manage_path = self.drv._get_volume_path(manage_volume) + get_existing_volume.return_value = { + 'name': existing_name, + 'path': existing_path, + 'size': existing_size + } + list_mappings.return_value = [] + payload = {'source-name': existing_name} + self.assertIsNone(self.drv.manage_existing(manage_volume, payload)) + get_existing_volume.assert_called_with(payload) + payload = {'volume': existing_path} + list_mappings.assert_called_with(payload) + payload = {'newPath': manage_path} + rename_volume.assert_called_with(existing_path, payload) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.iscsi.' + 'NexentaISCSIDriver._get_existing_volume') + def test_manage_existing_get_size(self, get_volume): + volume = fake_volume(self.ctxt) + name = volume['name'] + size = volume['size'] + path = self.drv._get_volume_path(volume) + get_volume.return_value = { + 'name': name, + 'path': path, + 'size': size + } + payload = {'source-name': name} + result = self.drv.manage_existing_get_size(volume, payload) + expected = size + self.assertEqual(expected, result) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefVolumes.list') + def test_get_manageable_volumes(self, list_volumes): + volume = fake_volume(self.ctxt) + volumes = [volume] + name = volume['name'] + size = volume['size'] + path = self.drv._get_volume_path(volume) + guid = 12345 + parent = self.drv.root_path + list_volumes.return_value = [{ + 'name': name, + 'path': path, + 'guid': guid, + 'volumeSize': size * units.Gi + }] + result = self.drv.get_manageable_volumes(volumes, None, 1, + 0, 'size', 'asc') + payload = { + 'parent': parent, + 'fields': 'name,guid,path,volumeSize', + 'recursive': False + } + list_volumes.assert_called_with(payload) + expected = [{ + 'cinder_id': volume['id'], + 'extra_info': None, + 'reason_not_safe': 'Volume already managed', + 'reference': { + 'source-guid': guid, + 'source-name': volume['name'] + }, + 'safe_to_manage': False, + 'size': volume['size'] + }] + self.assertEqual(expected, result) + + def test_unmanage(self): + volume = fake_volume(self.ctxt) + self.assertIsNone(self.drv.unmanage(volume)) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefSnapshots.rename') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'iscsi.NexentaISCSIDriver._get_existing_snapshot') + def test_manage_existing_snapshot(self, get_existing_snapshot, + rename_snapshot): + volume = fake_volume(self.ctxt) + existing_snapshot = fake_snapshot(self.ctxt) + existing_snapshot.volume = volume + manage_snapshot_spec = {'id': fake.SNAPSHOT2_ID} + manage_snapshot = fake_snapshot(self.ctxt, **manage_snapshot_spec) + manage_snapshot.volume = volume + existing_name = existing_snapshot['name'] + manage_name = manage_snapshot['name'] + volume_name = volume['name'] + volume_size = volume['size'] + existing_path = self.drv._get_snapshot_path(existing_snapshot) + get_existing_snapshot.return_value = { + 'name': existing_name, + 'path': existing_path, + 'volume_name': volume_name, + 'volume_size': volume_size + } + rename_snapshot.return_value = {} + payload = {'source-name': existing_name} + self.assertIsNone( + self.drv.manage_existing_snapshot(manage_snapshot, payload) + ) + get_existing_snapshot.assert_called_with(manage_snapshot, payload) + payload = {'newName': manage_name} + rename_snapshot.assert_called_with(existing_path, payload) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'iscsi.NexentaISCSIDriver._get_existing_snapshot') + def test_manage_existing_snapshot_get_size(self, get_snapshot): + volume = fake_volume(self.ctxt) + snapshot = fake_snapshot(self.ctxt) + snapshot.volume = volume + snapshot_name = snapshot['name'] + volume_name = volume['name'] + volume_size = volume['size'] + snapshot_path = self.drv._get_snapshot_path(snapshot) + get_snapshot.return_value = { + 'name': snapshot_name, + 'path': snapshot_path, + 'volume_name': volume_name, + 'volume_size': volume_size + } + payload = {'source-name': snapshot_name} + result = self.drv.manage_existing_snapshot_get_size(volume, payload) + expected = volume['size'] + self.assertEqual(expected, result) + + @mock.patch('cinder.objects.VolumeList.get_all_by_host') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefSnapshots.list') + def test_get_manageable_snapshots(self, list_snapshots, list_volumes): + volume = fake_volume(self.ctxt) + volumes = [volume] + snapshot = fake_snapshot(self.ctxt) + snapshot.volume = volume + snapshots = [snapshot] + guid = 12345 + name = snapshot['name'] + path = self.drv._get_snapshot_path(snapshot) + parent = self.drv._get_volume_path(volume) + list_snapshots.return_value = [{ + 'name': name, + 'path': path, + 'guid': guid, + 'parent': parent, + 'hprService': '', + 'snaplistId': '' + }] + list_volumes.return_value = volumes + result = self.drv.get_manageable_snapshots(snapshots, None, 1, + 0, 'size', 'asc') + payload = { + 'parent': self.drv.root_path, + 'fields': 'name,guid,path,parent,hprService,snaplistId', + 'recursive': True + } + list_snapshots.assert_called_with(payload) + expected = [{ + 'cinder_id': snapshot['id'], + 'extra_info': None, + 'reason_not_safe': 'Snapshot already managed', + 'source_reference': { + 'name': volume['name'] + }, + 'reference': { + 'source-guid': guid, + 'source-name': snapshot['name'] + }, + 'safe_to_manage': False, + 'size': volume['size'] + }] + self.assertEqual(expected, result) + + def test_unmanage_snapshot(self): + volume = fake_volume(self.ctxt) + snapshot = fake_snapshot(self.ctxt) + snapshot.volume = volume + self.assertIsNone(self.drv.unmanage_snapshot(snapshot)) diff --git a/cinder/tests/unit/volume/drivers/nexenta/test_nexenta5_jsonrpc.py b/cinder/tests/unit/volume/drivers/nexenta/test_nexenta5_jsonrpc.py index 82fbfc69ca8..8203c5ec8d1 100644 --- a/cinder/tests/unit/volume/drivers/nexenta/test_nexenta5_jsonrpc.py +++ b/cinder/tests/unit/volume/drivers/nexenta/test_nexenta5_jsonrpc.py @@ -1,4 +1,4 @@ -# Copyright 2016 Nexenta Systems, Inc. +# Copyright 2019 Nexenta Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -16,243 +16,1195 @@ Unit tests for NexentaStor 5 REST API helper """ +import copy +import hashlib +import json +import posixpath import uuid import mock -from mock import patch -from oslo_serialization import jsonutils import requests -from requests import adapters -from six.moves import http_client +import six -from cinder import exception from cinder import test +from cinder.volume import configuration as conf from cinder.volume.drivers.nexenta.ns5 import jsonrpc -HOST = '1.1.1.1' -USERNAME = 'user' -PASSWORD = 'pass' + +class FakeNefProxy(object): + + def __init__(self): + self.scheme = 'https' + self.port = 8443 + self.hosts = ['1.1.1.1', '2.2.2.2'] + self.host = self.hosts[0] + self.root = 'pool/share' + self.username = 'username' + self.password = 'password' + self.retries = 3 + self.timeout = 5 + self.session = mock.Mock() + self.session.headers = {} + + def __getattr__(self, name): + pass + + def delay(self, interval): + pass + + def delete_bearer(self): + pass + + def update_lock(self): + pass + + def update_token(self, token): + pass + + def update_host(self, host): + pass + + def url(self, path): + return '%s://%s:%s/%s' % (self.scheme, self.host, self.port, path) -def gen_response(code=http_client.OK, json=None): - r = requests.Response() - r.headers['Content-Type'] = 'application/json' - r.encoding = 'utf8' - r.status_code = code - r.reason = 'FAKE REASON' - r.raw = mock.Mock() - r._content = '' - if json: - r._content = jsonutils.dumps(json) - return r +class TestNefException(test.TestCase): + + def test_message(self): + message = 'test message 1' + result = jsonrpc.NefException(message) + self.assertIn(message, result.msg) + + def test_message_kwargs(self): + code = 'EAGAIN' + message = 'test message 2' + result = jsonrpc.NefException(message, code=code) + self.assertEqual(code, result.code) + self.assertIn(message, result.msg) + + def test_no_message_kwargs(self): + code = 'ESRCH' + message = 'test message 3' + result = jsonrpc.NefException(None, code=code, message=message) + self.assertEqual(code, result.code) + self.assertIn(message, result.msg) + + def test_message_plus_kwargs(self): + code = 'ENODEV' + message1 = 'test message 4' + message2 = 'test message 5' + result = jsonrpc.NefException(message1, code=code, message=message2) + self.assertEqual(code, result.code) + self.assertIn(message2, result.msg) + + def test_dict(self): + code = 'ENOENT' + message = 'test message 4' + result = jsonrpc.NefException({'code': code, 'message': message}) + self.assertEqual(code, result.code) + self.assertIn(message, result.msg) + + def test_kwargs(self): + code = 'EPERM' + message = 'test message 5' + result = jsonrpc.NefException(code=code, message=message) + self.assertEqual(code, result.code) + self.assertIn(message, result.msg) + + def test_dict_kwargs(self): + code = 'EINVAL' + message = 'test message 6' + result = jsonrpc.NefException({'code': code}, message=message) + self.assertEqual(code, result.code) + self.assertIn(message, result.msg) + + def test_defaults(self): + code = 'EBADMSG' + message = 'NexentaError' + result = jsonrpc.NefException() + self.assertEqual(code, result.code) + self.assertIn(message, result.msg) -class TestNexentaJSONProxyAuth(test.TestCase): - - @patch('cinder.volume.drivers.nexenta.ns5.jsonrpc.requests.post') - def test_https_auth(self, post): - use_https = True - port = 8443 - auth_uri = 'auth/login' - rnd_url = 'some/random/url' - - class PostSideEffect(object): - def __call__(self, *args, **kwargs): - r = gen_response() - if args[0] == '%(scheme)s://%(host)s:%(port)s/%(uri)s' % { - 'scheme': 'https', - 'host': HOST, - 'port': port, - 'uri': auth_uri}: - token = uuid.uuid4().hex - content = {'token': token} - r._content = jsonutils.dumps(content) - return r - post_side_effect = PostSideEffect() - post.side_effect = post_side_effect - - class TestAdapter(adapters.HTTPAdapter): - - def __init__(self): - super(TestAdapter, self).__init__() - self.counter = 0 - - def send(self, request, *args, **kwargs): - # an url is being requested for the second time - if self.counter == 1: - # make the fake backend respond 401 - r = gen_response(http_client.UNAUTHORIZED) - r._content = '' - r.connection = mock.Mock() - r_ = gen_response(json={'data': []}) - r.connection.send = lambda prep, **kwargs_: r_ - else: - r = gen_response(json={'data': []}) - r.request = request - self.counter += 1 - return r - - nef = jsonrpc.NexentaJSONProxy(HOST, port, USERNAME, PASSWORD, - use_https) - adapter = TestAdapter() - nef.session.mount( - '%(scheme)s://%(host)s:%(port)s/%(uri)s' % { - 'scheme': 'https', - 'host': HOST, - 'port': port, - 'uri': rnd_url}, - adapter) - - # successful authorization - self.assertEqual({'data': []}, nef.get(rnd_url)) - - # session timeout simulation. Client must authenticate newly - self.assertEqual({'data': []}, nef.get(rnd_url)) - # auth URL must be requested two times at this moment - self.assertEqual(2, post.call_count) - - # continue with the last (second) token - self.assertEqual(nef.get(rnd_url), {'data': []}) - # auth URL must be requested two times - self.assertEqual(2, post.call_count) - - -class TestNexentaJSONProxy(test.TestCase): +class TestNefRequest(test.TestCase): def setUp(self): - super(TestNexentaJSONProxy, self).setUp() - self.nef = jsonrpc.NexentaJSONProxy(HOST, 0, USERNAME, PASSWORD, False) + super(TestNefRequest, self).setUp() + self.proxy = FakeNefProxy() - def gen_adapter(self, code, json=None): - class TestAdapter(adapters.HTTPAdapter): + def fake_response(self, method, path, payload, code, content): + request = requests.PreparedRequest() + request.method = method + request.url = self.proxy.url(path) + request.headers = {'Content-Type': 'application/json'} + request.body = None + if method in ['get', 'delete']: + request.params = payload + elif method in ['put', 'post']: + request.data = json.dumps(payload) + response = requests.Response() + response.request = request + response.status_code = code + if content: + response._content = json.dumps(content) + else: + response._content = '' + return response - def __init__(self): - super(TestAdapter, self).__init__() + def test___call___invalid_method(self): + method = 'unsupported' + instance = jsonrpc.NefRequest(self.proxy, method) + path = 'parent/child' + self.assertRaises(jsonrpc.NefException, instance, path) - def send(self, request, *args, **kwargs): - r = gen_response(code, json) - r.request = request - return r + def test___call___none_path(self): + method = 'get' + instance = jsonrpc.NefRequest(self.proxy, method) + self.assertRaises(jsonrpc.NefException, instance, None) - return TestAdapter() + def test___call___empty_path(self): + method = 'get' + instance = jsonrpc.NefRequest(self.proxy, method) + self.assertRaises(jsonrpc.NefException, instance, '') - def _mount_adapter(self, url, adapter): - self.nef.session.mount( - '%(scheme)s://%(host)s:%(port)s/%(uri)s' % { - 'scheme': 'http', - 'host': HOST, - 'port': 8080, - 'uri': url}, - adapter) + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefRequest.request') + def test___call___get(self, request): + method = 'get' + instance = jsonrpc.NefRequest(self.proxy, method) + path = 'parent/child' + payload = {} + content = {'name': 'snapshot'} + response = self.fake_response(method, path, payload, 200, content) + request.return_value = response + result = instance(path, payload) + request.assert_called_with(method, path) + self.assertEqual(content, result) - def test_post(self): - random_dict = {'data': uuid.uuid4().hex} - rnd_url = 'some/random/url' - self._mount_adapter(rnd_url, self.gen_adapter(http_client.CREATED, - random_dict)) - self.assertEqual(random_dict, self.nef.post(rnd_url)) + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefRequest.request') + def test___call___get_payload(self, request): + method = 'get' + instance = jsonrpc.NefRequest(self.proxy, method) + path = 'parent/child' + payload = {'key': 'value'} + content = {'name': 'snapshot'} + response = self.fake_response(method, path, payload, 200, content) + request.return_value = response + result = instance(path, payload) + params = {'params': payload} + request.assert_called_with(method, path, **params) + self.assertEqual(content, result) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefRequest.request') + def test___call___get_data_payload(self, request): + method = 'get' + instance = jsonrpc.NefRequest(self.proxy, method) + path = 'parent/child' + payload = {'key': 'value'} + data = [ + { + 'name': 'fs1', + 'path': 'pool/fs1' + }, + { + 'name': 'fs2', + 'path': 'pool/fs2' + } + ] + content = {'data': data} + response = self.fake_response(method, path, payload, 200, content) + request.return_value = response + instance.data = data + result = instance(path, payload) + params = {'params': payload} + request.assert_called_with(method, path, **params) + self.assertEqual(data, result) + + def test___call___get_invalid_payload(self): + method = 'get' + instance = jsonrpc.NefRequest(self.proxy, method) + path = 'parent/child' + payload = 'bad data' + self.assertRaises(jsonrpc.NefException, instance, path, payload) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefRequest.request') + def test___call___delete(self, request): + method = 'delete' + instance = jsonrpc.NefRequest(self.proxy, method) + path = 'parent/child' + payload = {} + content = {'name': 'snapshot'} + response = self.fake_response(method, path, payload, 200, content) + request.return_value = response + result = instance(path, payload) + request.assert_called_with(method, path) + self.assertEqual(content, result) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefRequest.request') + def test___call___delete_payload(self, request): + method = 'delete' + instance = jsonrpc.NefRequest(self.proxy, method) + path = 'parent/child' + payload = {'key': 'value'} + content = {'name': 'snapshot'} + response = self.fake_response(method, path, payload, 200, content) + request.return_value = response + result = instance(path, payload) + params = {'params': payload} + request.assert_called_with(method, path, **params) + self.assertEqual(content, result) + + def test___call___delete_invalid_payload(self): + method = 'delete' + instance = jsonrpc.NefRequest(self.proxy, method) + path = 'parent/child' + payload = 'bad data' + self.assertRaises(jsonrpc.NefException, instance, path, payload) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefRequest.request') + def test___call___post(self, request): + method = 'post' + instance = jsonrpc.NefRequest(self.proxy, method) + path = 'parent/child' + payload = {} + content = None + response = self.fake_response(method, path, payload, 200, content) + request.return_value = response + result = instance(path, payload) + request.assert_called_with(method, path) + self.assertEqual(content, result) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefRequest.request') + def test___call___post_payload(self, request): + method = 'post' + instance = jsonrpc.NefRequest(self.proxy, method) + path = 'parent/child' + payload = {'key': 'value'} + content = None + response = self.fake_response(method, path, payload, 200, content) + request.return_value = response + result = instance(path, payload) + params = {'data': json.dumps(payload)} + request.assert_called_with(method, path, **params) + self.assertEqual(content, result) + + def test___call___post_invalid_payload(self): + method = 'post' + instance = jsonrpc.NefRequest(self.proxy, method) + path = 'parent/child' + payload = 'bad data' + self.assertRaises(jsonrpc.NefException, instance, path, payload) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefRequest.request') + def test___call___put(self, request): + method = 'put' + instance = jsonrpc.NefRequest(self.proxy, method) + path = 'parent/child' + payload = {} + content = None + response = self.fake_response(method, path, payload, 200, content) + request.return_value = response + result = instance(path, payload) + request.assert_called_with(method, path) + self.assertEqual(content, result) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefRequest.request') + def test___call___put_payload(self, request): + method = 'put' + instance = jsonrpc.NefRequest(self.proxy, method) + path = 'parent/child' + payload = {'key': 'value'} + content = None + response = self.fake_response(method, path, payload, 200, content) + request.return_value = response + result = instance(path, payload) + params = {'data': json.dumps(payload)} + request.assert_called_with(method, path, **params) + self.assertEqual(content, result) + + def test___call___put_invalid_payload(self): + method = 'put' + instance = jsonrpc.NefRequest(self.proxy, method) + path = 'parent/child' + payload = 'bad data' + self.assertRaises(jsonrpc.NefException, instance, path, payload) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefRequest.request') + def test___call___non_ok_response(self, request): + method = 'get' + instance = jsonrpc.NefRequest(self.proxy, method) + path = 'parent/child' + payload = {'key': 'value'} + content = {'code': 'ENOENT', 'message': 'error'} + response = self.fake_response(method, path, payload, 500, content) + request.return_value = response + self.assertRaises(jsonrpc.NefException, instance, path, payload) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefRequest.failover') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefRequest.request') + def test___call___request_after_failover(self, request, failover): + method = 'post' + instance = jsonrpc.NefRequest(self.proxy, method) + path = 'parent/child' + payload = {'key': 'value'} + content = None + response = self.fake_response(method, path, payload, 200, content) + request.side_effect = [requests.exceptions.Timeout, response] + failover.return_value = True + result = instance(path, payload) + params = {'data': json.dumps(payload)} + request.assert_called_with(method, path, **params) + self.assertEqual(content, result) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefRequest.failover') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefRequest.request') + def test___call___request_failover_error(self, request, failover): + method = 'put' + instance = jsonrpc.NefRequest(self.proxy, method) + path = 'parent/child' + payload = {'key': 'value'} + request.side_effect = requests.exceptions.Timeout + failover.return_value = False + self.assertRaises(requests.exceptions.Timeout, instance, path, payload) + + def test_hook_default(self): + method = 'post' + instance = jsonrpc.NefRequest(self.proxy, method) + path = 'parent/child' + payload = {'key': 'value'} + content = {'name': 'dataset'} + response = self.fake_response(method, path, payload, 303, content) + result = instance.hook(response) + self.assertEqual(response, result) + + def test_hook_200_empty(self): + method = 'delete' + instance = jsonrpc.NefRequest(self.proxy, method) + path = 'storage/filesystems' + payload = {'force': True} + content = None + response = self.fake_response(method, path, payload, 200, content) + result = instance.hook(response) + self.assertEqual(response, result) + + def test_hook_201_empty(self): + method = 'post' + instance = jsonrpc.NefRequest(self.proxy, method) + path = 'storage/snapshots' + payload = {'path': 'parent/child@name'} + content = None + response = self.fake_response(method, path, payload, 201, content) + result = instance.hook(response) + self.assertEqual(response, result) + + def test_hook_500_empty(self): + method = 'get' + instance = jsonrpc.NefRequest(self.proxy, method) + path = 'storage/pools' + payload = {'poolName': 'tank'} + content = None + response = self.fake_response(method, path, payload, 500, content) + self.assertRaises(jsonrpc.NefException, instance.hook, response) + + def test_hook_200_bad_content(self): + method = 'get' + instance = jsonrpc.NefRequest(self.proxy, method) + path = 'storage/volumes' + payload = {'name': 'test'} + content = None + response = self.fake_response(method, path, payload, 200, content) + response._content = 'bad_content' + self.assertRaises(jsonrpc.NefException, instance.hook, response) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefRequest.request') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefRequest.auth') + def test_hook_401(self, auth, request): + method = 'get' + instance = jsonrpc.NefRequest(self.proxy, method) + path = 'parent/child' + payload = {'key': 'value'} + content = {'code': 'EAUTH'} + response = self.fake_response(method, path, payload, 401, content) + auth.return_value = True + content2 = {'name': 'test'} + response2 = self.fake_response(method, path, payload, 200, content2) + request.return_value = response2 + self.proxy.session.send.return_value = content2 + result = instance.hook(response) + self.assertEqual(content2, result) + + def test_hook_401_max_retries(self): + method = 'get' + instance = jsonrpc.NefRequest(self.proxy, method) + instance.stat[401] = self.proxy.retries + path = 'parent/child' + payload = {'key': 'value'} + content = {'code': 'EAUTH'} + response = self.fake_response(method, path, payload, 401, content) + self.assertRaises(jsonrpc.NefException, instance.hook, response) + + def test_hook_404_nested(self): + method = 'get' + instance = jsonrpc.NefRequest(self.proxy, method) + instance.lock = True + path = 'parent/child' + payload = {'key': 'value'} + content = {'code': 'ENOENT'} + response = self.fake_response(method, path, payload, 404, content) + result = instance.hook(response) + self.assertEqual(response, result) + + def test_hook_404_max_retries(self): + method = 'get' + instance = jsonrpc.NefRequest(self.proxy, method) + instance.stat[404] = self.proxy.retries + path = 'parent/child' + payload = {'key': 'value'} + content = {'code': 'ENOENT'} + response = self.fake_response(method, path, payload, 404, content) + self.assertRaises(jsonrpc.NefException, instance.hook, response) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefRequest.failover') + def test_hook_404_failover_error(self, failover): + method = 'get' + instance = jsonrpc.NefRequest(self.proxy, method) + path = 'parent/child' + payload = {'key': 'value'} + content = {'code': 'ENOENT'} + response = self.fake_response(method, path, payload, 404, content) + failover.return_value = False + result = instance.hook(response) + self.assertEqual(response, result) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefRequest.request') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefRequest.failover') + def test_hook_404_failover_ok(self, failover, request): + method = 'get' + instance = jsonrpc.NefRequest(self.proxy, method) + path = 'parent/child' + payload = {'key': 'value'} + content = {'code': 'ENOENT'} + response = self.fake_response(method, path, payload, 404, content) + failover.return_value = True + content2 = {'name': 'test'} + response2 = self.fake_response(method, path, payload, 200, content2) + request.return_value = response2 + result = instance.hook(response) + self.assertEqual(response2, result) + + def test_hook_500_permanent(self): + method = 'get' + instance = jsonrpc.NefRequest(self.proxy, method) + path = 'parent/child' + payload = {'key': 'value'} + content = {'code': 'EINVAL'} + response = self.fake_response(method, path, payload, 500, content) + self.assertRaises(jsonrpc.NefException, instance.hook, response) + + def test_hook_500_busy_max_retries(self): + method = 'get' + instance = jsonrpc.NefRequest(self.proxy, method) + instance.stat[500] = self.proxy.retries + path = 'parent/child' + payload = {'key': 'value'} + content = {'code': 'EBUSY'} + response = self.fake_response(method, path, payload, 500, content) + self.assertRaises(jsonrpc.NefException, instance.hook, response) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefRequest.request') + def test_hook_500_busy_ok(self, request): + method = 'get' + instance = jsonrpc.NefRequest(self.proxy, method) + path = 'parent/child' + payload = {'key': 'value'} + content = {'code': 'EBUSY'} + response = self.fake_response(method, path, payload, 500, content) + content2 = {'name': 'test'} + response2 = self.fake_response(method, path, payload, 200, content2) + request.return_value = response2 + result = instance.hook(response) + self.assertEqual(response2, result) + + def test_hook_201_no_monitor(self): + method = 'get' + instance = jsonrpc.NefRequest(self.proxy, method) + path = 'parent/child' + payload = {'key': 'value'} + content = {'monitor': 'unknown'} + response = self.fake_response(method, path, payload, 202, content) + self.assertRaises(jsonrpc.NefException, instance.hook, response) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefRequest.request') + def test_hook_201_ok(self, request): + method = 'delete' + instance = jsonrpc.NefRequest(self.proxy, method) + path = 'parent/child' + payload = {'key': 'value'} + content = { + 'links': [{ + 'rel': 'monitor', + 'href': '/jobStatus/jobID' + }] + } + response = self.fake_response(method, path, payload, 202, content) + content2 = None + response2 = self.fake_response(method, path, payload, 201, content2) + request.return_value = response2 + result = instance.hook(response) + self.assertEqual(response2, result) + + def test_200_no_data(self): + method = 'get' + instance = jsonrpc.NefRequest(self.proxy, method) + path = 'parent/child' + payload = {'key': 'value'} + content = {'name': 'test'} + response = self.fake_response(method, path, payload, 200, content) + result = instance.hook(response) + self.assertEqual(response, result) + + def test_200_pagination_end(self): + method = 'get' + instance = jsonrpc.NefRequest(self.proxy, method) + path = 'parent/child' + payload = {'key': 'value'} + content = {'data': 'value'} + response = self.fake_response(method, path, payload, 200, content) + result = instance.hook(response) + self.assertEqual(response, result) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefRequest.request') + def test_200_pagination_next(self, request): + method = 'get' + instance = jsonrpc.NefRequest(self.proxy, method) + path = 'parent/child' + payload = {'key': 'value'} + content = { + 'data': [{ + 'name': 'test' + }], + 'links': [{ + 'rel': 'next', + 'href': path + }] + } + response = self.fake_response(method, path, payload, 200, content) + response2 = self.fake_response(method, path, payload, 200, content) + request.return_value = response2 + result = instance.hook(response) + self.assertEqual(response2, result) + + def test_request(self): + method = 'get' + instance = jsonrpc.NefRequest(self.proxy, method) + path = 'parent/child' + payload = {'key': 'value'} + expected = {'name': 'dataset'} + url = self.proxy.url(path) + kwargs = payload.copy() + kwargs['timeout'] = self.proxy.timeout + kwargs['hooks'] = {'response': instance.hook} + self.proxy.session.request.return_value = expected + result = instance.request(method, path, **payload) + self.proxy.session.request.assert_called_with(method, url, **kwargs) + self.assertEqual(expected, result) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefRequest.request') + def test_auth(self, request): + method = 'get' + instance = jsonrpc.NefRequest(self.proxy, method) + method = 'post' + path = 'auth/login' + payload = { + 'data': json.dumps({ + 'username': self.proxy.username, + 'password': self.proxy.password + }) + } + content = {'token': 'test'} + response = self.fake_response(method, path, payload, 200, content) + request.return_value = response + instance.auth() + request.assert_called_with(method, path, **payload) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefRequest.request') + def test_auth_error(self, request): + method = 'get' + instance = jsonrpc.NefRequest(self.proxy, method) + method = 'post' + path = 'auth/login' + payload = { + 'data': json.dumps({ + 'username': self.proxy.username, + 'password': self.proxy.password + }) + } + content = {'data': 'noauth'} + response = self.fake_response(method, path, payload, 200, content) + request.return_value = response + self.assertRaises(jsonrpc.NefException, instance.auth) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefRequest.request') + def test_failover(self, request): + method = 'get' + instance = jsonrpc.NefRequest(self.proxy, method) + path = self.proxy.root + payload = {} + content = {'path': path} + response = self.fake_response(method, path, payload, 200, content) + request.return_value = response + result = instance.failover() + request.assert_called_with(method, path) + expected = True + self.assertEqual(expected, result) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefRequest.request') + def test_failover_timeout(self, request): + method = 'get' + instance = jsonrpc.NefRequest(self.proxy, method) + path = self.proxy.root + payload = {} + content = {'path': path} + response = self.fake_response(method, path, payload, 200, content) + request.side_effect = [requests.exceptions.Timeout, response] + result = instance.failover() + request.assert_called_with(method, path) + expected = True + self.assertEqual(expected, result) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefRequest.request') + def test_failover_404(self, request): + method = 'get' + instance = jsonrpc.NefRequest(self.proxy, method) + path = self.proxy.root + payload = {} + content = {} + response = self.fake_response(method, path, payload, 404, content) + request.side_effect = [response, response] + result = instance.failover() + request.assert_called_with(method, path) + expected = False + self.assertEqual(expected, result) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefRequest.request') + def test_failover_error(self, request): + method = 'get' + instance = jsonrpc.NefRequest(self.proxy, method) + path = self.proxy.root + request.side_effect = [ + requests.exceptions.Timeout, + requests.exceptions.ConnectionError + ] + result = instance.failover() + request.assert_called_with(method, path) + expected = False + self.assertEqual(expected, result) + + def test_getpath(self): + method = 'get' + rel = 'monitor' + href = 'jobStatus/jobID' + content = { + 'links': [ + [1, 2], + 'bad link', + { + 'rel': 'next', + 'href': href + }, + { + 'rel': rel, + 'href': href + } + ] + } + instance = jsonrpc.NefRequest(self.proxy, method) + result = instance.getpath(content, rel) + expected = href + self.assertEqual(expected, result) + + def test_getpath_no_content(self): + method = 'get' + rel = 'next' + content = None + instance = jsonrpc.NefRequest(self.proxy, method) + result = instance.getpath(content, rel) + expected = None + self.assertEqual(expected, result) + + def test_getpath_no_links(self): + method = 'get' + rel = 'next' + content = {'a': 'b'} + instance = jsonrpc.NefRequest(self.proxy, method) + result = instance.getpath(content, rel) + expected = None + self.assertEqual(expected, result) + + def test_getpath_no_rel(self): + method = 'get' + rel = 'next' + content = { + 'links': [ + { + 'rel': 'monitor', + 'href': '/jobs/jobID' + } + ] + } + instance = jsonrpc.NefRequest(self.proxy, method) + result = instance.getpath(content, rel) + expected = None + self.assertEqual(expected, result) + + def test_getpath_no_href(self): + method = 'get' + rel = 'next' + content = { + 'links': [ + { + 'rel': rel + } + ] + } + instance = jsonrpc.NefRequest(self.proxy, method) + result = instance.getpath(content, rel) + expected = None + self.assertEqual(expected, result) + + +class TestNefCollections(test.TestCase): + + def setUp(self): + super(TestNefCollections, self).setUp() + self.proxy = mock.Mock() + self.instance = jsonrpc.NefCollections(self.proxy) + + def test_path(self): + path = 'path/to/item name + - & # $ = 0' + result = self.instance.path(path) + quoted_path = six.moves.urllib.parse.quote_plus(path) + expected = posixpath.join(self.instance.root, quoted_path) + self.assertEqual(expected, result) + + def test_get(self): + name = 'parent/child' + payload = {'key': 'value'} + expected = {'name': 'dataset'} + path = self.instance.path(name) + self.proxy.get.return_value = expected + result = self.instance.get(name, payload) + self.proxy.get.assert_called_with(path, payload) + self.assertEqual(expected, result) + + def test_set(self): + name = 'parent/child' + payload = {'key': 'value'} + expected = None + path = self.instance.path(name) + self.proxy.put.return_value = expected + result = self.instance.set(name, payload) + self.proxy.put.assert_called_with(path, payload) + self.assertEqual(expected, result) + + def test_list(self): + payload = {'key': 'value'} + expected = [{'name': 'dataset'}] + self.proxy.get.return_value = expected + result = self.instance.list(payload) + self.proxy.get.assert_called_with(self.instance.root, payload) + self.assertEqual(expected, result) + + def test_create(self): + payload = {'key': 'value'} + expected = None + self.proxy.post.return_value = expected + result = self.instance.create(payload) + self.proxy.post.assert_called_with(self.instance.root, payload) + self.assertEqual(expected, result) + + def test_create_exist(self): + payload = {'key': 'value'} + expected = None + self.proxy.post.side_effect = jsonrpc.NefException(code='EEXIST') + result = self.instance.create(payload) + self.proxy.post.assert_called_with(self.instance.root, payload) + self.assertEqual(expected, result) + + def test_create_error(self): + payload = {'key': 'value'} + self.proxy.post.side_effect = jsonrpc.NefException(code='EBUSY') + self.assertRaises(jsonrpc.NefException, self.instance.create, payload) + self.proxy.post.assert_called_with(self.instance.root, payload) def test_delete(self): - random_dict = {'data': uuid.uuid4().hex} - rnd_url = 'some/random/url' - self._mount_adapter(rnd_url, self.gen_adapter(http_client.CREATED, - random_dict)) - self.assertEqual(random_dict, self.nef.delete(rnd_url)) + name = 'parent/child' + payload = {'key': 'value'} + expected = None + path = self.instance.path(name) + self.proxy.delete.return_value = expected + result = self.instance.delete(name, payload) + self.proxy.delete.assert_called_with(path, payload) + self.assertEqual(expected, result) - def test_put(self): - random_dict = {'data': uuid.uuid4().hex} - rnd_url = 'some/random/url' - self._mount_adapter(rnd_url, self.gen_adapter(http_client.CREATED, - random_dict)) - self.assertEqual(random_dict, self.nef.put(rnd_url)) + def test_delete_not_found(self): + name = 'parent/child' + payload = {'key': 'value'} + expected = None + path = self.instance.path(name) + self.proxy.delete.side_effect = jsonrpc.NefException(code='ENOENT') + result = self.instance.delete(name, payload) + self.proxy.delete.assert_called_with(path, payload) + self.assertEqual(expected, result) - def test_get_200(self): - random_dict = {'data': uuid.uuid4().hex} - rnd_url = 'some/random/url' - self._mount_adapter(rnd_url, self.gen_adapter(http_client.OK, - random_dict)) - self.assertEqual(random_dict, self.nef.get(rnd_url)) + def test_delete_error(self): + name = 'parent/child' + payload = {'key': 'value'} + path = self.instance.path(name) + self.proxy.delete.side_effect = jsonrpc.NefException(code='EINVAL') + self.assertRaises(jsonrpc.NefException, self.instance.delete, name, + payload) + self.proxy.delete.assert_called_with(path, payload) - def test_get_201(self): - random_dict = {'data': uuid.uuid4().hex} - rnd_url = 'some/random/url' - self._mount_adapter(rnd_url, self.gen_adapter(http_client.CREATED, - random_dict)) - self.assertEqual(random_dict, self.nef.get(rnd_url)) - def test_get_500(self): - class TestAdapter(adapters.HTTPAdapter): +class TestNefSettings(test.TestCase): - def __init__(self): - super(TestAdapter, self).__init__() + def setUp(self): + super(TestNefSettings, self).setUp() + self.proxy = mock.Mock() + self.instance = jsonrpc.NefSettings(self.proxy) - def send(self, request, *args, **kwargs): - json = { - 'code': 'NEF_ERROR', - 'message': 'Some error' - } - r = gen_response(http_client.INTERNAL_SERVER_ERROR, json) - r.request = request - return r + def test_create(self): + payload = {'key': 'value'} + result = self.instance.create(payload) + expected = NotImplemented + self.assertEqual(expected, result) - adapter = TestAdapter() - rnd_url = 'some/random/url' - self._mount_adapter(rnd_url, adapter) - self.assertRaises(exception.NexentaException, self.nef.get, rnd_url) + def test_delete(self): + name = 'parent/child' + payload = {'key': 'value'} + result = self.instance.delete(name, payload) + expected = NotImplemented + self.assertEqual(expected, result) - def test_get__not_nef_error(self): - class TestAdapter(adapters.HTTPAdapter): - def __init__(self): - super(TestAdapter, self).__init__() +class TestNefDatasets(test.TestCase): - def send(self, request, *args, **kwargs): - r = gen_response(http_client.NOT_FOUND) - r._content = 'Page Not Found' - r.request = request - return r + def setUp(self): + super(TestNefDatasets, self).setUp() + self.proxy = mock.Mock() + self.instance = jsonrpc.NefDatasets(self.proxy) - adapter = TestAdapter() - rnd_url = 'some/random/url' - self._mount_adapter(rnd_url, adapter) - self.assertRaises(exception.VolumeBackendAPIException, self.nef.get, - rnd_url) + def test_rename(self): + name = 'parent/child' + payload = {'key': 'value'} + expected = None + path = self.instance.path(name) + path = posixpath.join(path, 'rename') + self.proxy.post.return_value = expected + result = self.instance.rename(name, payload) + self.proxy.post.assert_called_with(path, payload) + self.assertEqual(expected, result) - def test_get__not_nef_error_empty_body(self): - class TestAdapter(adapters.HTTPAdapter): - def __init__(self): - super(TestAdapter, self).__init__() +class TestNefSnapshots(test.TestCase): - def send(self, request, *args, **kwargs): - r = gen_response(http_client.NOT_FOUND) - r.request = request - return r + def setUp(self): + super(TestNefSnapshots, self).setUp() + self.proxy = mock.Mock() + self.instance = jsonrpc.NefSnapshots(self.proxy) - adapter = TestAdapter() - rnd_url = 'some/random/url' - self._mount_adapter(rnd_url, adapter) - self.assertRaises(exception.VolumeBackendAPIException, self.nef.get, - rnd_url) + def test_clone(self): + name = 'parent/child' + payload = {'key': 'value'} + expected = None + path = self.instance.path(name) + path = posixpath.join(path, 'clone') + self.proxy.post.return_value = expected + result = self.instance.clone(name, payload) + self.proxy.post.assert_called_with(path, payload) + self.assertEqual(expected, result) - def test_202(self): - redirect_url = 'redirect/url' - class RedirectTestAdapter(adapters.HTTPAdapter): +class TestNefVolumeGroups(test.TestCase): - def __init__(self): - super(RedirectTestAdapter, self).__init__() + def setUp(self): + super(TestNefVolumeGroups, self).setUp() + self.proxy = mock.Mock() + self.instance = jsonrpc.NefVolumeGroups(self.proxy) - def send(self, request, *args, **kwargs): - json = { - 'links': [{'href': redirect_url}] - } - r = gen_response(http_client.ACCEPTED, json) - r.request = request - return r + def test_rollback(self): + name = 'parent/child' + payload = {'key': 'value'} + expected = None + path = self.instance.path(name) + path = posixpath.join(path, 'rollback') + self.proxy.post.return_value = expected + result = self.instance.rollback(name, payload) + self.proxy.post.assert_called_with(path, payload) + self.assertEqual(expected, result) - rnd_url = 'some/random/url' - self._mount_adapter(rnd_url, RedirectTestAdapter()) - self._mount_adapter(redirect_url, self.gen_adapter( - http_client.CREATED)) - self.assertIsNone(self.nef.get(rnd_url)) + +class TestNefVolumes(test.TestCase): + + def setUp(self): + super(TestNefVolumes, self).setUp() + self.proxy = mock.Mock() + self.instance = jsonrpc.NefVolumes(self.proxy) + + def test_promote(self): + name = 'parent/child' + payload = {'key': 'value'} + expected = None + path = self.instance.path(name) + path = posixpath.join(path, 'promote') + self.proxy.post.return_value = expected + result = self.instance.promote(name, payload) + self.proxy.post.assert_called_with(path, payload) + self.assertEqual(expected, result) + + +class TestNefFilesystems(test.TestCase): + + def setUp(self): + super(TestNefFilesystems, self).setUp() + self.proxy = mock.Mock() + self.instance = jsonrpc.NefFilesystems(self.proxy) + + def test_mount(self): + name = 'parent/child' + payload = {'key': 'value'} + expected = None + path = self.instance.path(name) + path = posixpath.join(path, 'mount') + self.proxy.post.return_value = expected + result = self.instance.mount(name, payload) + self.proxy.post.assert_called_with(path, payload) + self.assertEqual(expected, result) + + def test_unmount(self): + name = 'parent/child' + payload = {'key': 'value'} + expected = None + path = self.instance.path(name) + path = posixpath.join(path, 'unmount') + self.proxy.post.return_value = expected + result = self.instance.unmount(name, payload) + self.proxy.post.assert_called_with(path, payload) + self.assertEqual(expected, result) + + def test_acl(self): + name = 'parent/child' + payload = {'key': 'value'} + expected = None + path = self.instance.path(name) + path = posixpath.join(path, 'acl') + self.proxy.post.return_value = expected + result = self.instance.acl(name, payload) + self.proxy.post.assert_called_with(path, payload) + self.assertEqual(expected, result) + + +class TestNefHpr(test.TestCase): + + def setUp(self): + super(TestNefHpr, self).setUp() + self.proxy = mock.Mock() + self.instance = jsonrpc.NefHpr(self.proxy) + + def test_activate(self): + payload = {'key': 'value'} + expected = None + path = posixpath.join(self.instance.root, 'activate') + self.proxy.post.return_value = expected + result = self.instance.activate(payload) + self.proxy.post.assert_called_with(path, payload) + self.assertEqual(expected, result) + + def test_start(self): + name = 'parent/child' + payload = {'key': 'value'} + expected = None + path = posixpath.join(self.instance.path(name), 'start') + self.proxy.post.return_value = expected + result = self.instance.start(name, payload) + self.proxy.post.assert_called_with(path, payload) + self.assertEqual(expected, result) + + +class TestNefProxy(test.TestCase): + + def setUp(self): + super(TestNefProxy, self).setUp() + self.cfg = mock.Mock(spec=conf.Configuration) + self.cfg.nexenta_use_https = True + self.cfg.driver_ssl_cert_verify = True + self.cfg.nexenta_user = 'user' + self.cfg.nexenta_password = 'pass' + self.cfg.nexenta_rest_address = '1.1.1.1,2.2.2.2' + self.cfg.nexenta_rest_port = 8443 + self.cfg.nexenta_rest_backoff_factor = 1 + self.cfg.nexenta_rest_retry_count = 3 + self.cfg.nexenta_rest_connect_timeout = 1 + self.cfg.nexenta_rest_read_timeout = 1 + self.cfg.nas_host = '3.3.3.3' + self.cfg.nas_share_path = 'pool/path/to/share' + self.nef_mock = mock.Mock() + self.mock_object(jsonrpc, 'NefRequest', + return_value=self.nef_mock) + + self.proto = 'nfs' + self.proxy = jsonrpc.NefProxy(self.proto, + self.cfg.nas_share_path, + self.cfg) + + def test___init___http(self): + proto = 'nfs' + cfg = copy.copy(self.cfg) + cfg.nexenta_use_https = False + result = jsonrpc.NefProxy(proto, cfg.nas_share_path, cfg) + self.assertIsInstance(result, jsonrpc.NefProxy) + + def test___init___no_rest_port_http(self): + proto = 'nfs' + cfg = copy.copy(self.cfg) + cfg.nexenta_rest_port = 0 + cfg.nexenta_use_https = False + result = jsonrpc.NefProxy(proto, cfg.nas_share_path, cfg) + self.assertIsInstance(result, jsonrpc.NefProxy) + + def test___init___no_rest_port_https(self): + proto = 'nfs' + cfg = copy.copy(self.cfg) + cfg.nexenta_rest_port = 0 + cfg.nexenta_use_https = True + result = jsonrpc.NefProxy(proto, cfg.nas_share_path, cfg) + self.assertIsInstance(result, jsonrpc.NefProxy) + + def test___init___iscsi(self): + proto = 'iscsi' + cfg = copy.copy(self.cfg) + result = jsonrpc.NefProxy(proto, cfg.nas_share_path, cfg) + self.assertIsInstance(result, jsonrpc.NefProxy) + + def test___init___nfs_no_rest_address(self): + proto = 'nfs' + cfg = copy.copy(self.cfg) + cfg.nexenta_rest_address = '' + result = jsonrpc.NefProxy(proto, cfg.nas_share_path, cfg) + self.assertIsInstance(result, jsonrpc.NefProxy) + + def test___init___iscsi_no_rest_address(self): + proto = 'iscsi' + cfg = copy.copy(self.cfg) + cfg.nexenta_rest_address = '' + cfg.nexenta_host = '4.4.4.4' + result = jsonrpc.NefProxy(proto, cfg.nas_share_path, cfg) + self.assertIsInstance(result, jsonrpc.NefProxy) + + def test___init___invalid_storage_protocol(self): + proto = 'invalid' + cfg = copy.copy(self.cfg) + self.assertRaises(jsonrpc.NefException, jsonrpc.NefProxy, + proto, cfg.nas_share_path, cfg) + + @mock.patch('requests.packages.urllib3.disable_warnings') + def test___init___no_ssl_cert_verify(self, disable_warnings): + proto = 'nfs' + cfg = copy.copy(self.cfg) + cfg.driver_ssl_cert_verify = False + disable_warnings.return_value = None + result = jsonrpc.NefProxy(proto, cfg.nas_share_path, cfg) + disable_warnings.assert_called() + self.assertIsInstance(result, jsonrpc.NefProxy) + + def test_delete_bearer(self): + self.assertIsNone(self.proxy.delete_bearer()) + self.assertNotIn('Authorization', self.proxy.session.headers) + self.proxy.session.headers['Authorization'] = 'Bearer token' + self.assertIsNone(self.proxy.delete_bearer()) + self.assertNotIn('Authorization', self.proxy.session.headers) + + def test_update_bearer(self): + token = 'token' + bearer = 'Bearer %s' % token + self.assertNotIn('Authorization', self.proxy.session.headers) + self.assertIsNone(self.proxy.update_bearer(token)) + self.assertIn('Authorization', self.proxy.session.headers) + self.assertEqual(self.proxy.session.headers['Authorization'], bearer) + + def test_update_token(self): + token = 'token' + bearer = 'Bearer %s' % token + self.assertIsNone(self.proxy.update_token(token)) + self.assertEqual(self.proxy.tokens[self.proxy.host], token) + self.assertEqual(self.proxy.session.headers['Authorization'], bearer) + + def test_update_host(self): + token = 'token' + bearer = 'Bearer %s' % token + host = self.cfg.nexenta_rest_address + self.proxy.tokens[host] = token + self.assertIsNone(self.proxy.update_host(host)) + self.assertEqual(self.proxy.session.headers['Authorization'], bearer) + + def test_skip_update_host(self): + host = 'nonexistent' + self.assertIsNone(self.proxy.update_host(host)) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefSettings.get') + def test_update_lock(self, get_settings): + guid = uuid.uuid4().hex + settings = {'value': guid} + get_settings.return_value = settings + self.assertIsNone(self.proxy.update_lock()) + path = '%s:%s' % (guid, self.proxy.path) + if isinstance(path, six.text_type): + path = path.encode('utf-8') + expected = hashlib.md5(path).hexdigest() + self.assertEqual(expected, self.proxy.lock) + + def test_url(self): + path = '/path/to/api' + result = self.proxy.url(path) + expected = '%s://%s:%s%s' % (self.proxy.scheme, + self.proxy.host, + self.proxy.port, + path) + self.assertEqual(expected, result) + + @mock.patch('eventlet.greenthread.sleep') + def test_delay(self, sleep): + sleep.return_value = None + for attempt in range(0, 10): + expected = int(self.proxy.backoff_factor * (2 ** (attempt - 1))) + self.assertIsNone(self.proxy.delay(attempt)) + sleep.assert_called_with(expected) diff --git a/cinder/tests/unit/volume/drivers/nexenta/test_nexenta5_nfs.py b/cinder/tests/unit/volume/drivers/nexenta/test_nexenta5_nfs.py index 10d89cf70b3..319ccdcf467 100644 --- a/cinder/tests/unit/volume/drivers/nexenta/test_nexenta5_nfs.py +++ b/cinder/tests/unit/volume/drivers/nexenta/test_nexenta5_nfs.py @@ -1,4 +1,4 @@ -# Copyright 2016 Nexenta Systems, Inc. +# Copyright 2019 Nexenta Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -15,211 +15,1123 @@ """ Unit tests for OpenStack Cinder volume driver """ +import hashlib +import os import mock -from mock import patch +from oslo_utils import units from cinder import context from cinder import db from cinder import test +from cinder.tests.unit.consistencygroup.fake_cgsnapshot import ( + fake_cgsnapshot_obj as fake_cgsnapshot) +from cinder.tests.unit.consistencygroup.fake_consistencygroup import ( + fake_consistencyobject_obj as fake_cgroup) from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit.fake_volume import fake_volume_obj +from cinder.tests.unit.fake_snapshot import fake_snapshot_obj as fake_snapshot +from cinder.tests.unit.fake_volume import fake_volume_obj as fake_volume +from cinder.tests.unit.image import fake as fake_image from cinder.volume import configuration as conf from cinder.volume.drivers.nexenta.ns5 import jsonrpc from cinder.volume.drivers.nexenta.ns5 import nfs class TestNexentaNfsDriver(test.TestCase): - TEST_SHARE = 'host1:/pool/share' - TEST_SHARE2_OPTIONS = '-o intr' - TEST_FILE_NAME = 'test.txt' - TEST_SHARES_CONFIG_FILE = '/etc/cinder/nexenta-shares.conf' - TEST_SNAPSHOT_NAME = 'snapshot1' - TEST_VOLUME_NAME = 'volume1' - TEST_VOLUME_NAME2 = 'volume2' - - TEST_VOLUME = fake_volume_obj(None, **{ - 'name': TEST_VOLUME_NAME, - 'id': fake.VOLUME_ID, - 'size': 1, - 'status': 'available', - 'provider_location': TEST_SHARE - }) - - TEST_VOLUME2 = fake_volume_obj(None, **{ - 'name': TEST_VOLUME_NAME2, - 'size': 2, - 'id': fake.VOLUME2_ID, - 'status': 'in-use' - }) - - TEST_SNAPSHOT = { - 'name': TEST_SNAPSHOT_NAME, - 'volume_name': TEST_VOLUME_NAME, - 'volume_size': 1, - 'volume_id': fake.VOLUME_ID - } - - TEST_SHARE_SVC = 'svc:/network/nfs/server:default' def setUp(self): super(TestNexentaNfsDriver, self).setUp() self.ctxt = context.get_admin_context() self.cfg = mock.Mock(spec=conf.Configuration) + self.cfg.volume_backend_name = 'nexenta_nfs' + self.cfg.nexenta_group_snapshot_template = 'group-snapshot-%s' + self.cfg.nexenta_origin_snapshot_template = 'origin-snapshot-%s' self.cfg.nexenta_dataset_description = '' self.cfg.nexenta_mount_point_base = '$state_path/mnt' self.cfg.nexenta_sparsed_volumes = True + self.cfg.nexenta_qcow2_volumes = False self.cfg.nexenta_dataset_compression = 'on' self.cfg.nexenta_dataset_dedup = 'off' self.cfg.nfs_mount_point_base = '/mnt/test' self.cfg.nfs_mount_attempts = 3 - self.cfg.nfs_mount_options = None self.cfg.nas_mount_options = 'vers=4' self.cfg.reserved_percentage = 20 self.cfg.nexenta_use_https = False - self.cfg.nexenta_rest_port = 0 + self.cfg.driver_ssl_cert_verify = False self.cfg.nexenta_user = 'user' self.cfg.nexenta_password = 'pass' self.cfg.max_over_subscription_ratio = 20.0 - self.cfg.nas_host = '1.1.1.1' + self.cfg.nas_host = '1.1.1.2' + self.cfg.nexenta_rest_address = '1.1.1.1' + self.cfg.nexenta_rest_port = 8443 + self.cfg.nexenta_rest_backoff_factor = 1 + self.cfg.nexenta_rest_retry_count = 3 + self.cfg.nexenta_rest_connect_timeout = 1 + self.cfg.nexenta_rest_read_timeout = 1 self.cfg.nas_share_path = 'pool/share' + self.cfg.nfs_mount_options = '-o vers=4' + self.cfg.safe_get = self.fake_safe_get self.nef_mock = mock.Mock() - self.mock_object(jsonrpc, 'NexentaJSONProxy', - lambda *_, **__: self.nef_mock) + self.mock_object(jsonrpc, 'NefRequest', + return_value=self.nef_mock) self.drv = nfs.NexentaNfsDriver(configuration=self.cfg) self.drv.db = db self.drv.do_setup(self.ctxt) - def _create_volume_db_entry(self): - vol = { - 'id': fake.VOLUME_ID, - 'size': 1, - 'status': 'available', - 'provider_location': self.TEST_SHARE + def fake_safe_get(self, key): + try: + value = getattr(self.cfg, key) + except AttributeError: + value = None + return value + + def test_do_setup(self): + self.assertIsNone(self.drv.do_setup(self.ctxt)) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefNfs.get') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefServices.get') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefFilesystems.set') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefFilesystems.get') + def test_check_for_setup_error(self, get_filesystem, + set_filesystem, + get_service, get_nfs): + get_filesystem.return_value = { + 'mountPoint': '/path/to/volume', + 'nonBlockingMandatoryMode': False, + 'isMounted': True } - return db.volume_create(self.ctxt, vol)['id'] - - def test_check_for_setup_error(self): - self.nef_mock.get.return_value = {'data': []} - self.assertRaises( - LookupError, lambda: self.drv.check_for_setup_error()) - - def test_initialize_connection(self): - data = { - 'export': self.TEST_VOLUME['provider_location'], 'name': 'volume'} - self.assertEqual({ - 'driver_volume_type': self.drv.driver_volume_type, - 'data': data - }, self.drv.initialize_connection(self.TEST_VOLUME, None)) - - @patch('cinder.volume.drivers.nexenta.ns5.nfs.' - 'NexentaNfsDriver._create_regular_file') - @patch('cinder.volume.drivers.nexenta.ns5.nfs.' - 'NexentaNfsDriver._create_sparsed_file') - @patch('cinder.volume.drivers.nexenta.ns5.nfs.' - 'NexentaNfsDriver._ensure_share_mounted') - @patch('cinder.volume.drivers.nexenta.ns5.nfs.' - 'NexentaNfsDriver._share_folder') - def test_do_create_volume(self, share, ensure, sparsed, regular): - ensure.return_value = True - share.return_value = True - self.nef_mock.get.return_value = 'on' - self.drv._do_create_volume(self.TEST_VOLUME) - - url = 'storage/pools/pool/filesystems' - data = { - 'name': 'share/volume-' + fake.VOLUME_ID, - 'compressionMode': 'on', - 'dedupMode': 'off', + get_service.return_value = { + 'state': 'online' } - self.nef_mock.post.assert_called_with(url, data) - - @patch('cinder.volume.drivers.nexenta.ns5.nfs.' - 'NexentaNfsDriver._ensure_share_mounted') - def test_delete_volume(self, ensure): - self._create_volume_db_entry() - self.nef_mock.get.return_value = {} - self.drv.delete_volume(self.TEST_VOLUME) - self.nef_mock.delete.assert_called_with( - 'storage/pools/pool/filesystems/share%2Fvolume-' + - fake.VOLUME_ID + '?snapshots=true') - - def test_create_snapshot(self): - self._create_volume_db_entry() - self.drv.create_snapshot(self.TEST_SNAPSHOT) - url = ('storage/pools/pool/filesystems/share%2Fvolume-' + - fake.VOLUME_ID + '/snapshots') - data = {'name': self.TEST_SNAPSHOT['name']} - self.nef_mock.post.assert_called_with(url, data) - - def test_delete_snapshot(self): - self._create_volume_db_entry() - self.drv.delete_snapshot(self.TEST_SNAPSHOT) - url = ('storage/pools/pool/filesystems/share%2Fvolume-' + - fake.VOLUME_ID + '/snapshots/snapshot1') - self.drv.delete_snapshot(self.TEST_SNAPSHOT) - self.nef_mock.delete.assert_called_with(url) - - @patch('cinder.volume.drivers.nexenta.ns5.nfs.' - 'NexentaNfsDriver.extend_volume') - @patch('cinder.volume.drivers.nexenta.ns5.nfs.' - 'NexentaNfsDriver.local_path') - @patch('cinder.volume.drivers.nexenta.ns5.nfs.' - 'NexentaNfsDriver._share_folder') - def test_create_volume_from_snapshot(self, share, path, extend): - self._create_volume_db_entry() - url = ('storage/pools/%(pool)s/' - 'filesystems/%(fs)s/snapshots/%(snap)s/clone') % { - 'pool': 'pool', - 'fs': '%2F'.join(['share', 'volume-' + fake.VOLUME_ID]), - 'snap': self.TEST_SNAPSHOT['name'] + get_nfs.return_value = { + 'shareState': 'online' } - path = '/'.join(['pool/share', self.TEST_VOLUME2['name']]) - data = {'targetPath': path} - self.drv.create_volume_from_snapshot( - self.TEST_VOLUME2, self.TEST_SNAPSHOT) - self.nef_mock.post.assert_called_with(url, data) + self.assertIsNone(self.drv.check_for_setup_error()) + get_filesystem.assert_called_with(self.drv.root_path) + get_service.assert_called_with('nfs') + get_nfs.assert_called_with(self.drv.root_path) + get_filesystem.return_value = { + 'mountPoint': '/path/to/volume', + 'nonBlockingMandatoryMode': True, + 'isMounted': True + } + set_filesystem.return_value = {} + self.assertIsNone(self.drv.check_for_setup_error()) + get_filesystem.return_value = { + 'mountPoint': 'none', + 'nonBlockingMandatoryMode': False, + 'isMounted': False + } + self.assertRaises(jsonrpc.NefException, + self.drv.check_for_setup_error) + get_filesystem.return_value = { + 'mountPoint': '/path/to/volume', + 'nonBlockingMandatoryMode': False, + 'isMounted': False + } + self.assertRaises(jsonrpc.NefException, + self.drv.check_for_setup_error) + get_service.return_value = { + 'state': 'online' + } + self.assertRaises(jsonrpc.NefException, + self.drv.check_for_setup_error) + get_nfs.return_value = { + 'shareState': 'offline' + } + self.assertRaises(jsonrpc.NefException, + self.drv.check_for_setup_error) - # make sure the volume get extended! - extend.assert_called_once_with(self.TEST_VOLUME2, 2) + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'nfs.NexentaNfsDriver._unmount_volume') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefFilesystems.delete') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefFilesystems.set') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'nfs.NexentaNfsDriver._create_regular_file') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'nfs.NexentaNfsDriver._create_sparsed_file') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'nfs.NexentaNfsDriver.local_path') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'nfs.NexentaNfsDriver._mount_volume') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'nfs.NexentaNfsDriver._set_volume_acl') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefFilesystems.create') + def test_create_volume(self, create_volume, set_volume_acl, + mount_volume, get_volume_local_path, + create_sparsed_file, created_regular_file, + set_volume, delete_volume, umount_volume): + volume = fake_volume(self.ctxt) + local_path = '/local/volume/path' + create_volume.return_value = {} + set_volume_acl.return_value = {} + mount_volume.return_value = True + get_volume_local_path.return_value = local_path + create_sparsed_file.return_value = True + created_regular_file.return_value = True + set_volume.return_value = {} + delete_volume.return_value = {} + umount_volume.return_value = {} + with mock.patch.object(self.drv, 'sparsed_volumes', True): + self.assertIsNone(self.drv.create_volume(volume)) + create_sparsed_file.assert_called_with(local_path, volume['size']) + with mock.patch.object(self.drv, 'sparsed_volumes', False): + self.assertIsNone(self.drv.create_volume(volume)) + created_regular_file.assert_called_with(local_path, volume['size']) + volume_path = self.drv._get_volume_path(volume) + payload = { + 'path': volume_path, + 'compressionMode': 'off' + } + create_volume.assert_called_with(payload) + set_volume_acl.assert_called_with(volume) + payload = {'compressionMode': self.cfg.nexenta_dataset_compression} + set_volume.assert_called_with(volume_path, payload) + umount_volume.assert_called_with(volume) - @patch('cinder.volume.drivers.nexenta.ns5.nfs.' - 'NexentaNfsDriver.local_path') - @patch('oslo_concurrency.processutils.execute') - def test_extend_volume_sparsed(self, _execute, path): - self._create_volume_db_entry() - path.return_value = 'path' + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'nfs.NexentaNfsDriver._unmount_volume') + @mock.patch('cinder.volume.drivers.remotefs.' + 'RemoteFSDriver.copy_image_to_volume') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'nfs.NexentaNfsDriver._mount_volume') + def test_copy_image_to_volume(self, mount_volume, + copy_image_to_volume, + unmount_volume): + volume = fake_volume(self.ctxt) + image_service = fake_image.FakeImageService() + image = image_service.images[fake.IMAGE_ID] + mount_volume.return_value = True + copy_image_to_volume.return_value = True + unmount_volume.return_value = True + self.drv.copy_image_to_volume(self.ctxt, volume, + image_service, + image['id']) + mount_volume.assert_called_with(volume) + copy_image_to_volume.assert_called_with(self.ctxt, volume, + image_service, + image['id']) + unmount_volume.assert_called_with(volume) - self.drv.extend_volume(self.TEST_VOLUME, 2) + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'nfs.NexentaNfsDriver._unmount_volume') + @mock.patch('cinder.volume.drivers.remotefs.' + 'RemoteFSSnapDriverDistributed.copy_volume_to_image') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'nfs.NexentaNfsDriver._mount_volume') + def test_copy_volume_to_image(self, mount_volume, + copy_volume_to_image, + unmount_volume): + volume = fake_volume(self.ctxt) + image_service = fake_image.FakeImageService() + image = image_service.images[fake.IMAGE_ID] + mount_volume.return_value = True + copy_volume_to_image.return_value = True + unmount_volume.return_value = True + self.drv.copy_volume_to_image(self.ctxt, volume, + image_service, image) + mount_volume.assert_called_with(volume) + copy_volume_to_image.assert_called_with(self.ctxt, volume, + image_service, image) + unmount_volume.assert_called_with(volume) - _execute.assert_called_with( - 'truncate', '-s', '2G', - 'path', - root_helper='sudo cinder-rootwrap /etc/cinder/rootwrap.conf', - run_as_root=True) + @mock.patch('os.rmdir') + @mock.patch('cinder.privsep.fs.umount') + @mock.patch('os_brick.remotefs.remotefs.' + 'RemoteFsClient._read_mounts') + @mock.patch('cinder.volume.drivers.nfs.' + 'NfsDriver._get_mount_point_for_share') + def test__ensure_share_unmounted(self, get_mount_point, + list_mount_points, + unmount_filesystem, + remove_mount_point): + mount_point = '/mount/point1' + get_mount_point.return_value = mount_point + list_mount_points.return_value = [ + mount_point, + '/mount/point2', + '/mount/point3' + ] + unmount_filesystem.return_value = True + remove_mount_point.return_value = True + share = '1.1.1.1:/path/to/volume' + self.assertIsNone(self.drv._ensure_share_unmounted(share)) + get_mount_point.assert_called_with(share) + unmount_filesystem.assert_called_with(mount_point) + remove_mount_point.assert_called_with(mount_point) - @patch('cinder.volume.drivers.nexenta.ns5.nfs.' - 'NexentaNfsDriver.local_path') - @patch('oslo_concurrency.processutils.execute') - def test_extend_volume_nonsparsed(self, _execute, path): - self._create_volume_db_entry() - path.return_value = 'path' - with mock.patch.object(self.drv, - 'sparsed_volumes', - False): + @mock.patch('cinder.volume.drivers.nfs.' + 'NfsDriver._ensure_share_mounted') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefFilesystems.get') + def test__mount_volume(self, get_filesystem, mount_share): + volume = fake_volume(self.ctxt) + mount_point = '/path/to/volume' + get_filesystem.return_value = { + 'mountPoint': mount_point, + 'isMounted': True + } + mount_share.return_value = True + self.assertIsNone(self.drv._mount_volume(volume)) + path = self.drv._get_volume_path(volume) + payload = {'fields': 'mountPoint,isMounted'} + get_filesystem.assert_called_with(path, payload) + share = '%s:%s' % (self.drv.nas_host, mount_point) + mount_share.assert_called_with(share) - self.drv.extend_volume(self.TEST_VOLUME, 2) + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'nfs.NexentaNfsDriver._ensure_share_unmounted') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'nfs.NexentaNfsDriver._get_volume_share') + def test__unmount_volume(self, get_share, unmount_share): + volume = fake_volume(self.ctxt) + mount_point = '/path/to/volume' + share = '%s:%s' % (self.drv.nas_host, mount_point) + get_share.return_value = share + unmount_share.return_value = True + self.assertIsNone(self.drv._unmount_volume(volume)) + get_share.assert_called_with(volume) + unmount_share.assert_called_with(share) - _execute.assert_called_with( - 'dd', 'if=/dev/zero', 'seek=1073741824', - 'of=path', - 'bs=1M', 'count=1024', - root_helper='sudo cinder-rootwrap /etc/cinder/rootwrap.conf', - run_as_root=True) + @mock.patch('cinder.volume.drivers.remotefs.' + 'RemoteFSDriver._create_qcow2_file') + @mock.patch('cinder.volume.drivers.remotefs.' + 'RemoteFSDriver._create_sparsed_file') + def test__create_sparsed_file(self, create_sparsed_file, + create_qcow2_file): + create_sparsed_file.return_value = True + create_qcow2_file.return_value = True + path = '/path/to/file' + size = 1 + with mock.patch.object(self.cfg, 'nexenta_qcow2_volumes', True): + self.assertIsNone(self.drv._create_sparsed_file(path, size)) + create_qcow2_file.assert_called_with(path, size) + with mock.patch.object(self.cfg, 'nexenta_qcow2_volumes', False): + self.assertIsNone(self.drv._create_sparsed_file(path, size)) + create_sparsed_file.assert_called_with(path, size) - def test_get_capacity_info(self): - self.nef_mock.get.return_value = { - 'bytesAvailable': 1000, - 'bytesUsed': 100} - self.assertEqual( - (1000, 900, 100), self.drv._get_capacity_info('pool/share')) + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'nfs.NexentaNfsDriver.delete_volume') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefHpr.delete') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefHpr.get') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefHpr.start') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefHpr.create') + def test_migrate_volume(self, create_service, + start_service, get_service, + delete_service, delete_volume): + create_service.return_value = {} + start_service.return_value = {} + get_service.return_value = { + 'state': 'disabled' + } + delete_service.return_value = {} + delete_volume.return_value = {} + volume = fake_volume(self.ctxt) + dst_host = '4.4.4.4' + dst_port = 8443 + dst_path = 'tank/nfs' + location_info = 'NexentaNfsDriver:%s:/%s' % (dst_host, dst_path) + host = { + 'host': 'stack@nexenta_nfs#fake_nfs', + 'capabilities': { + 'vendor_name': 'Nexenta', + 'nef_url': dst_host, + 'nef_port': dst_port, + 'storage_protocol': 'NFS', + 'free_capacity_gb': 32, + 'location_info': location_info + } + } + result = self.drv.migrate_volume(self.ctxt, volume, host) + expected = (True, None) + svc = 'cinder-migrate-%s' % volume['name'] + src = self.drv._get_volume_path(volume) + dst = '%s/%s' % (dst_path, volume['name']) + payload = { + 'name': svc, + 'sourceDataset': src, + 'destinationDataset': dst, + 'type': 'scheduled', + 'sendShareNfs': True, + 'isSource': True, + 'remoteNode': { + 'host': dst_host, + 'port': dst_port + } + } + create_service.assert_called_with(payload) + start_service.assert_called_with(svc) + get_service.assert_called_with(svc) + payload = { + 'destroySourceSnapshots': True, + 'destroyDestinationSnapshots': True + } + delete_service.assert_called_with(svc, payload) + delete_volume.assert_called_with(volume) + self.assertEqual(expected, result) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'nfs.NexentaNfsDriver._unmount_volume') + def test_terminate_connection(self, unmount_volume): + unmount_volume.return_value = True + volume = fake_volume(self.ctxt) + connector = { + 'initiator': 'iqn:cinder-client', + 'multipath': True + } + self.assertIsNone(self.drv.terminate_connection(volume, connector)) + unmount_volume.assert_called_with(volume) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'nfs.NexentaNfsDriver._get_volume_share') + def test_initialize_connection(self, get_share): + volume = fake_volume(self.ctxt) + path = self.drv._get_volume_path(volume) + share = '%s:/%s' % (self.drv.nas_host, path) + get_share.return_value = share + connector = { + 'initiator': 'iqn:cinder-client', + 'multipath': True + } + result = self.drv.initialize_connection(volume, connector) + get_share.assert_called_with(volume) + base = self.cfg.nexenta_mount_point_base + expected = { + 'driver_volume_type': 'nfs', + 'mount_point_base': base, + 'data': { + 'export': share, + 'name': 'volume' + } + } + self.assertEqual(expected, result) + + def test_ensure_export(self): + volume = fake_volume(self.ctxt) + self.assertIsNone(self.drv.ensure_export(self.ctxt, volume)) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefFilesystems.delete') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'nfs.NexentaNfsDriver._unmount_volume') + def test_delete_volume(self, unmount_volume, delete_filesystem): + volume = fake_volume(self.ctxt) + path = self.drv._get_volume_path(volume) + unmount_volume.return_value = {} + delete_filesystem.return_value = {} + self.assertIsNone(self.drv.delete_volume(volume)) + unmount_volume.assert_called_with(volume) + payload = {'force': True, 'snapshots': True} + delete_filesystem.assert_called_with(path, payload) + + @mock.patch('os.rmdir') + def test__delete(self, rmdir): + rmdir.return_value = True + path = '/path/to/volume/mountpoint' + self.assertIsNone(self.drv._delete(path)) + rmdir.assert_called_with(path) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'nfs.NexentaNfsDriver._unmount_volume') + @mock.patch('oslo_concurrency.processutils.execute') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'nfs.NexentaNfsDriver.local_path') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'nfs.NexentaNfsDriver._mount_volume') + def test_extend_volume(self, mount_volume, get_volume_local_path, + execute_command, unmount_volume): + volume = fake_volume(self.ctxt) + root_helper = 'sudo cinder-rootwrap /etc/cinder/rootwrap.conf' + local_path = '/path/to/volume/file' + new_size = volume['size'] * 2 + bs = 1 * units.Mi + seek = volume['size'] * units.Ki + count = (new_size - volume['size']) * units.Ki + mount_volume.return_value = True + get_volume_local_path.return_value = local_path + execute_command.return_value = True + unmount_volume.return_value = True + with mock.patch.object(self.drv, 'sparsed_volumes', False): + self.assertIsNone(self.drv.extend_volume(volume, new_size)) + execute_command.assert_called_with('dd', 'if=/dev/zero', + 'of=%s' % local_path, + 'bs=%d' % bs, + 'seek=%d' % seek, + 'count=%d' % count, + run_as_root=True, + root_helper=root_helper) + with mock.patch.object(self.drv, 'sparsed_volumes', True): + self.assertIsNone(self.drv.extend_volume(volume, new_size)) + execute_command.assert_called_with('truncate', '-s', + '%dG' % new_size, + local_path, + run_as_root=True, + root_helper=root_helper) + mount_volume.assert_called_with(volume) + unmount_volume.assert_called_with(volume) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefSnapshots.create') + def test_create_snapshot(self, create_snapshot): + volume = fake_volume(self.ctxt) + snapshot = fake_snapshot(self.ctxt) + snapshot.volume = volume + create_snapshot.return_value = {} + self.assertIsNone(self.drv.create_snapshot(snapshot)) + path = self.drv._get_snapshot_path(snapshot) + payload = {'path': path} + create_snapshot.assert_called_with(payload) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefSnapshots.delete') + def test_delete_snapshot(self, delete_snapshot): + volume = fake_volume(self.ctxt) + snapshot = fake_snapshot(self.ctxt) + snapshot.volume = volume + delete_snapshot.return_value = {} + self.assertIsNone(self.drv.delete_snapshot(snapshot)) + path = self.drv._get_snapshot_path(snapshot) + payload = {'defer': True} + delete_snapshot.assert_called_with(path, payload) + + def test_snapshot_revert_use_temp_snapshot(self): + result = self.drv.snapshot_revert_use_temp_snapshot() + expected = False + self.assertEqual(expected, result) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefFilesystems.rollback') + def test_revert_to_snapshot(self, rollback_volume): + volume = fake_volume(self.ctxt) + snapshot = fake_snapshot(self.ctxt) + snapshot.volume = volume + rollback_volume.return_value = {} + self.assertIsNone( + self.drv.revert_to_snapshot(self.ctxt, volume, snapshot) + ) + path = self.drv._get_volume_path(volume) + payload = {'snapshot': snapshot['name']} + rollback_volume.assert_called_with(path, payload) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'nfs.NexentaNfsDriver.extend_volume') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefFilesystems.mount') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefFilesystems.unmount') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefSnapshots.clone') + def test_create_volume_from_snapshot(self, clone_snapshot, + unmount_filesystem, + mount_filesystem, + extend_volume): + volume = fake_volume(self.ctxt) + snapshot = fake_snapshot(self.ctxt) + snapshot.volume = volume + clone_size = 10 + clone_spec = { + 'id': fake.VOLUME2_ID, + 'size': clone_size + } + clone = fake_volume(self.ctxt, **clone_spec) + snapshot_path = self.drv._get_snapshot_path(snapshot) + clone_path = self.drv._get_volume_path(clone) + clone_snapshot.return_value = {} + unmount_filesystem.return_value = {} + mount_filesystem.return_value = {} + extend_volume.return_value = None + self.assertIsNone( + self.drv.create_volume_from_snapshot(clone, snapshot) + ) + clone_payload = {'targetPath': clone_path} + clone_snapshot.assert_called_with(snapshot_path, clone_payload) + unmount_filesystem.assert_called_with(clone_path) + mount_filesystem.assert_called_with(clone_path) + extend_volume.assert_called_with(clone, clone_size) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'nfs.NexentaNfsDriver.delete_snapshot') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'nfs.NexentaNfsDriver.create_volume_from_snapshot') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'nfs.NexentaNfsDriver.create_snapshot') + def test_create_cloned_volume(self, create_snapshot, create_volume, + delete_snapshot): + volume = fake_volume(self.ctxt) + clone_spec = {'id': fake.VOLUME2_ID} + clone = fake_volume(self.ctxt, **clone_spec) + create_snapshot.return_value = {} + create_volume.return_value = {} + delete_snapshot.return_value = {} + self.assertIsNone(self.drv.create_cloned_volume(clone, volume)) + snapshot = { + 'name': self.drv.origin_snapshot_template % clone['id'], + 'volume_id': volume['id'], + 'volume_name': volume['name'], + 'volume_size': volume['size'] + } + create_snapshot.assert_called_with(snapshot) + create_volume.assert_called_with(clone, snapshot) + create_volume.side_effect = jsonrpc.NefException({ + 'message': 'Failed to create volume', + 'code': 'EBUSY' + }) + self.assertRaises(jsonrpc.NefException, + self.drv.create_cloned_volume, + clone, volume) + create_snapshot.side_effect = jsonrpc.NefException({ + 'message': 'Failed to open dataset', + 'code': 'ENOENT' + }) + self.assertRaises(jsonrpc.NefException, + self.drv.create_cloned_volume, + clone, volume) + + def test_create_consistencygroup(self): + cgroup = fake_cgroup(self.ctxt) + result = self.drv.create_consistencygroup(self.ctxt, cgroup) + expected = {} + self.assertEqual(expected, result) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'nfs.NexentaNfsDriver.delete_volume') + def test_delete_consistencygroup(self, delete_volume): + cgroup = fake_cgroup(self.ctxt) + volume1 = fake_volume(self.ctxt) + volume2_spec = {'id': fake.VOLUME2_ID} + volume2 = fake_volume(self.ctxt, **volume2_spec) + volumes = [volume1, volume2] + delete_volume.return_value = {} + result = self.drv.delete_consistencygroup(self.ctxt, + cgroup, + volumes) + expected = ({}, []) + self.assertEqual(expected, result) + + def test_update_consistencygroup(self): + cgroup = fake_cgroup(self.ctxt) + volume1 = fake_volume(self.ctxt) + volume2_spec = {'id': fake.VOLUME2_ID} + volume2 = fake_volume(self.ctxt, **volume2_spec) + volume3_spec = {'id': fake.VOLUME3_ID} + volume3 = fake_volume(self.ctxt, **volume3_spec) + volume4_spec = {'id': fake.VOLUME4_ID} + volume4 = fake_volume(self.ctxt, **volume4_spec) + add_volumes = [volume1, volume2] + remove_volumes = [volume3, volume4] + result = self.drv.update_consistencygroup(self.ctxt, + cgroup, + add_volumes, + remove_volumes) + expected = ({}, [], []) + self.assertEqual(expected, result) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefSnapshots.delete') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefSnapshots.rename') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefSnapshots.create') + def test_create_cgsnapshot(self, create_snapshot, + rename_snapshot, + delete_snapshot): + cgsnapshot = fake_cgsnapshot(self.ctxt) + volume = fake_volume(self.ctxt) + snapshot = fake_snapshot(self.ctxt) + snapshot.volume = volume + snapshots = [snapshot] + cgsnapshot_name = ( + self.cfg.nexenta_group_snapshot_template % cgsnapshot['id']) + cgsnapshot_path = '%s@%s' % (self.drv.root_path, cgsnapshot_name) + snapshot_path = '%s/%s@%s' % (self.drv.root_path, + snapshot['volume_name'], + cgsnapshot_name) + create_snapshot.return_value = {} + rename_snapshot.return_value = {} + delete_snapshot.return_value = {} + result = self.drv.create_cgsnapshot(self.ctxt, + cgsnapshot, + snapshots) + create_payload = {'path': cgsnapshot_path, 'recursive': True} + create_snapshot.assert_called_with(create_payload) + rename_payload = {'newName': snapshot['name']} + rename_snapshot.assert_called_with(snapshot_path, rename_payload) + delete_payload = {'defer': True, 'recursive': True} + delete_snapshot.assert_called_with(cgsnapshot_path, delete_payload) + expected = ({}, []) + self.assertEqual(expected, result) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'nfs.NexentaNfsDriver.delete_snapshot') + def test_delete_cgsnapshot(self, delete_snapshot): + cgsnapshot = fake_cgsnapshot(self.ctxt) + snapshot = fake_snapshot(self.ctxt) + volume = fake_volume(self.ctxt) + snapshot.volume = volume + snapshots = [snapshot] + delete_snapshot.return_value = {} + result = self.drv.delete_cgsnapshot(self.ctxt, + cgsnapshot, + snapshots) + delete_snapshot.assert_called_with(snapshot) + expected = ({}, []) + self.assertEqual(expected, result) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'nfs.NexentaNfsDriver.create_volume_from_snapshot') + def test_create_consistencygroup_from_src_snapshots(self, create_volume): + cgroup = fake_cgroup(self.ctxt) + cgsnapshot = fake_cgsnapshot(self.ctxt) + volume = fake_volume(self.ctxt) + snapshot = fake_snapshot(self.ctxt) + snapshot.volume = volume + snapshots = [snapshot] + clone_spec = {'id': fake.VOLUME2_ID} + clone = fake_volume(self.ctxt, **clone_spec) + clones = [clone] + create_volume.return_value = {} + result = self.drv.create_consistencygroup_from_src(self.ctxt, cgroup, + clones, cgsnapshot, + snapshots, None, + None) + create_volume.assert_called_with(clone, snapshot) + expected = ({}, []) + self.assertEqual(expected, result) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefSnapshots.delete') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'nfs.NexentaNfsDriver.create_volume_from_snapshot') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefSnapshots.create') + def test_create_consistencygroup_from_src_volumes(self, create_snapshot, + create_volume, + delete_snapshot): + src_cgroup = fake_cgroup(self.ctxt) + dst_cgroup_spec = {'id': fake.CONSISTENCY_GROUP2_ID} + dst_cgroup = fake_cgroup(self.ctxt, **dst_cgroup_spec) + src_volume = fake_volume(self.ctxt) + src_volumes = [src_volume] + dst_volume_spec = {'id': fake.VOLUME2_ID} + dst_volume = fake_volume(self.ctxt, **dst_volume_spec) + dst_volumes = [dst_volume] + create_snapshot.return_value = {} + create_volume.return_value = {} + delete_snapshot.return_value = {} + result = self.drv.create_consistencygroup_from_src(self.ctxt, + dst_cgroup, + dst_volumes, + None, None, + src_cgroup, + src_volumes) + snapshot_name = ( + self.cfg.nexenta_origin_snapshot_template % dst_cgroup['id']) + snapshot_path = '%s@%s' % (self.drv.root_path, snapshot_name) + create_payload = {'path': snapshot_path, 'recursive': True} + create_snapshot.assert_called_with(create_payload) + snapshot = { + 'name': snapshot_name, + 'volume_id': src_volume['id'], + 'volume_name': src_volume['name'], + 'volume_size': src_volume['size'] + } + create_volume.assert_called_with(dst_volume, snapshot) + delete_payload = {'defer': True, 'recursive': True} + delete_snapshot.assert_called_with(snapshot_path, delete_payload) + expected = ({}, []) + self.assertEqual(expected, result) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'nfs.NexentaNfsDriver._get_volume_share') + def test__local_volume_dir(self, get_share): + volume = fake_volume(self.ctxt) + share = '1.1.1.1:/path/to/share' + get_share.return_value = share + result = self.drv._local_volume_dir(volume) + get_share.assert_called_with(volume) + share = share.encode('utf-8') + digest = hashlib.md5(share).hexdigest() + expected = os.path.join(self.cfg.nexenta_mount_point_base, digest) + self.assertEqual(expected, result) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'nfs.NexentaNfsDriver._local_volume_dir') + def test_local_path(self, get_local): + volume = fake_volume(self.ctxt) + local_dir = '/path/to' + get_local.return_value = local_dir + result = self.drv.local_path(volume) + get_local.assert_called_with(volume) + expected = os.path.join(local_dir, 'volume') + self.assertEqual(expected, result) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefFilesystems.acl') + def test__set_volume_acl(self, set_acl): + volume = fake_volume(self.ctxt) + set_acl.return_value = {} + path = self.drv._get_volume_path(volume) + payload = { + 'type': 'allow', + 'principal': 'everyone@', + 'permissions': ['full_set'], + 'flags': ['file_inherit', 'dir_inherit'] + } + self.assertIsNone(self.drv._set_volume_acl(volume)) + set_acl.assert_called_with(path, payload) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefFilesystems.get') + def test__get_volume_share(self, get_filesystem): + volume = fake_volume(self.ctxt) + path = self.drv._get_volume_path(volume) + mount_point = '/path/to' + get_filesystem.return_value = {'mountPoint': mount_point} + result = self.drv._get_volume_share(volume) + payload = {'fields': 'mountPoint'} + get_filesystem.assert_called_with(path, payload) + expected = '%s:%s' % (self.drv.nas_host, mount_point) + self.assertEqual(expected, result) + + def test__get_volume_path(self): + volume = fake_volume(self.ctxt) + result = self.drv._get_volume_path(volume) + expected = '%s/%s' % (self.drv.root_path, volume['name']) + self.assertEqual(expected, result) + + def test__get_snapshot_path(self): + volume = fake_volume(self.ctxt) + snapshot = fake_snapshot(self.ctxt) + snapshot.volume = volume + result = self.drv._get_snapshot_path(snapshot) + expected = '%s/%s@%s' % (self.drv.root_path, + snapshot['volume_name'], + snapshot['name']) + self.assertEqual(expected, result) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefFilesystems.get') + def test_get_volume_stats(self, get_filesystem): + available = 100 + used = 75 + get_filesystem.return_value = { + 'mountPoint': '/path/to', + 'bytesAvailable': available * units.Gi, + 'bytesUsed': used * units.Gi + } + result = self.drv.get_volume_stats(True) + payload = {'fields': 'mountPoint,bytesAvailable,bytesUsed'} + get_filesystem.assert_called_with(self.drv.root_path, payload) + self.assertEqual(self.drv._stats, result) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefFilesystems.get') + def test_update_volume_stats(self, get_filesystem): + available = 8 + used = 2 + share = '%s:/%s' % (self.drv.nas_host, self.drv.root_path) + get_filesystem.return_value = { + 'mountPoint': '/%s' % self.drv.root_path, + 'bytesAvailable': available * units.Gi, + 'bytesUsed': used * units.Gi + } + location_info = '%(driver)s:%(share)s' % { + 'driver': self.drv.__class__.__name__, + 'share': share + } + expected = { + 'vendor_name': 'Nexenta', + 'dedup': self.cfg.nexenta_dataset_dedup, + 'compression': self.cfg.nexenta_dataset_compression, + 'description': self.cfg.nexenta_dataset_description, + 'nef_url': self.cfg.nexenta_rest_address, + 'nef_port': self.cfg.nexenta_rest_port, + 'driver_version': self.drv.VERSION, + 'storage_protocol': 'NFS', + 'sparsed_volumes': self.cfg.nexenta_sparsed_volumes, + 'total_capacity_gb': used + available, + 'free_capacity_gb': available, + 'reserved_percentage': self.cfg.reserved_percentage, + 'QoS_support': False, + 'multiattach': True, + 'consistencygroup_support': True, + 'consistent_group_snapshot_enabled': True, + 'volume_backend_name': self.cfg.volume_backend_name, + 'location_info': location_info, + 'nfs_mount_point_base': self.cfg.nexenta_mount_point_base + } + self.assertIsNone(self.drv._update_volume_stats()) + self.assertEqual(expected, self.drv._stats) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefFilesystems.list') + def test__get_existing_volume(self, list_filesystems): + volume = fake_volume(self.ctxt) + parent = self.drv.root_path + name = volume['name'] + path = self.drv._get_volume_path(volume) + list_filesystems.return_value = [{ + 'name': name, + 'path': path + }] + result = self.drv._get_existing_volume({'source-name': name}) + payload = { + 'path': path, + 'parent': parent, + 'fields': 'path', + 'recursive': False + } + list_filesystems.assert_called_with(payload) + expected = { + 'name': name, + 'path': path + } + self.assertEqual(expected, result) + + def test__check_already_managed_snapshot(self): + volume = fake_volume(self.ctxt) + snapshot = fake_snapshot(self.ctxt) + snapshot.volume = volume + result = self.drv._check_already_managed_snapshot(snapshot) + expected = False + self.assertEqual(expected, result) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefSnapshots.list') + def test__get_existing_snapshot(self, list_snapshots): + volume = fake_volume(self.ctxt) + snapshot = fake_snapshot(self.ctxt) + snapshot.volume = volume + name = snapshot['name'] + path = self.drv._get_snapshot_path(snapshot) + parent = self.drv._get_volume_path(volume) + list_snapshots.return_value = [{ + 'name': name, + 'path': path + }] + payload = {'source-name': name} + result = self.drv._get_existing_snapshot(snapshot, payload) + payload = { + 'parent': parent, + 'fields': 'name,path', + 'recursive': False, + 'name': name + } + list_snapshots.assert_called_with(payload) + expected = { + 'name': name, + 'path': path, + 'volume_name': volume['name'], + 'volume_size': volume['size'] + } + self.assertEqual(expected, result) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefFilesystems.rename') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'nfs.NexentaNfsDriver._get_existing_volume') + def test_manage_existing(self, get_existing_volume, rename_volume): + existing_volume = fake_volume(self.ctxt) + manage_volume_spec = {'id': fake.VOLUME2_ID} + manage_volume = fake_volume(self.ctxt, **manage_volume_spec) + existing_name = existing_volume['name'] + existing_path = self.drv._get_volume_path(existing_volume) + manage_path = self.drv._get_volume_path(manage_volume) + get_existing_volume.return_value = { + 'name': existing_name, + 'path': existing_path + } + rename_volume.return_value = {} + payload = {'source-name': existing_name} + self.assertIsNone(self.drv.manage_existing(manage_volume, payload)) + get_existing_volume.assert_called_with(payload) + payload = {'newPath': manage_path} + rename_volume.assert_called_with(existing_path, payload) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'nfs.NexentaNfsDriver._unmount_volume') + @mock.patch('os.path.getsize') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'nfs.NexentaNfsDriver.local_path') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'nfs.NexentaNfsDriver._mount_volume') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'nfs.NexentaNfsDriver._set_volume_acl') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'nfs.NexentaNfsDriver._get_existing_volume') + def test_manage_existing_get_size(self, get_volume, set_acl, + mount_volume, get_local, + get_size, unmount_volume): + volume = fake_volume(self.ctxt) + name = volume['name'] + size = volume['size'] + path = self.drv._get_volume_path(volume) + get_volume.return_value = { + 'name': name, + 'path': path + } + set_acl.return_value = {} + mount_volume.return_value = True + get_local.return_value = '/path/to/volume/file' + get_size.return_value = size * units.Gi + unmount_volume.return_value = True + payload = {'source-name': name} + result = self.drv.manage_existing_get_size(volume, payload) + expected = size + self.assertEqual(expected, result) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefFilesystems.list') + def test_get_manageable_volumes(self, list_filesystems): + volume = fake_volume(self.ctxt) + volumes = [volume] + size = volume['size'] + path = self.drv._get_volume_path(volume) + guid = 12345 + parent = self.drv.root_path + list_filesystems.return_value = [{ + 'guid': guid, + 'parent': parent, + 'path': path, + 'bytesUsed': size * units.Gi + }] + result = self.drv.get_manageable_volumes(volumes, None, 1, + 0, 'size', 'asc') + payload = { + 'parent': parent, + 'fields': 'guid,parent,path,bytesUsed', + 'recursive': False + } + list_filesystems.assert_called_with(payload) + expected = [{ + 'cinder_id': volume['id'], + 'extra_info': None, + 'reason_not_safe': 'Volume already managed', + 'reference': { + 'source-guid': guid, + 'source-name': volume['name'] + }, + 'safe_to_manage': False, + 'size': volume['size'] + }] + self.assertEqual(expected, result) + + def test_unmanage(self): + volume = fake_volume(self.ctxt) + self.assertIsNone(self.drv.unmanage(volume)) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefSnapshots.rename') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'nfs.NexentaNfsDriver._get_existing_snapshot') + def test_manage_existing_snapshot(self, get_existing_snapshot, + rename_snapshot): + volume = fake_volume(self.ctxt) + existing_snapshot = fake_snapshot(self.ctxt) + existing_snapshot.volume = volume + manage_snapshot_spec = {'id': fake.SNAPSHOT2_ID} + manage_snapshot = fake_snapshot(self.ctxt, **manage_snapshot_spec) + manage_snapshot.volume = volume + existing_name = existing_snapshot['name'] + manage_name = manage_snapshot['name'] + volume_name = volume['name'] + volume_size = volume['size'] + existing_path = self.drv._get_snapshot_path(existing_snapshot) + get_existing_snapshot.return_value = { + 'name': existing_name, + 'path': existing_path, + 'volume_name': volume_name, + 'volume_size': volume_size + } + rename_snapshot.return_value = {} + payload = {'source-name': existing_name} + self.assertIsNone( + self.drv.manage_existing_snapshot(manage_snapshot, payload) + ) + get_existing_snapshot.assert_called_with(manage_snapshot, payload) + payload = {'newName': manage_name} + rename_snapshot.assert_called_with(existing_path, payload) + + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'nfs.NexentaNfsDriver._get_existing_snapshot') + def test_manage_existing_snapshot_get_size(self, get_snapshot): + volume = fake_volume(self.ctxt) + snapshot = fake_snapshot(self.ctxt) + snapshot.volume = volume + snapshot_name = snapshot['name'] + volume_name = volume['name'] + volume_size = volume['size'] + snapshot_path = self.drv._get_snapshot_path(snapshot) + get_snapshot.return_value = { + 'name': snapshot_name, + 'path': snapshot_path, + 'volume_name': volume_name, + 'volume_size': volume_size + } + payload = {'source-name': snapshot_name} + result = self.drv.manage_existing_snapshot_get_size(volume, payload) + expected = volume['size'] + self.assertEqual(expected, result) + + @mock.patch('cinder.objects.VolumeList.get_all_by_host') + @mock.patch('cinder.volume.drivers.nexenta.ns5.' + 'jsonrpc.NefSnapshots.list') + def test_get_manageable_snapshots(self, list_snapshots, list_volumes): + volume = fake_volume(self.ctxt) + volumes = [volume] + snapshot = fake_snapshot(self.ctxt) + snapshot.volume = volume + snapshots = [snapshot] + guid = 12345 + name = snapshot['name'] + path = self.drv._get_snapshot_path(snapshot) + parent = self.drv._get_volume_path(volume) + list_snapshots.return_value = [{ + 'name': name, + 'path': path, + 'guid': guid, + 'parent': parent, + 'hprService': '', + 'snaplistId': '' + }] + list_volumes.return_value = volumes + result = self.drv.get_manageable_snapshots(snapshots, None, 1, + 0, 'size', 'asc') + payload = { + 'parent': self.drv.root_path, + 'fields': 'name,guid,path,parent,hprService,snaplistId', + 'recursive': True + } + list_snapshots.assert_called_with(payload) + expected = [{ + 'cinder_id': snapshot['id'], + 'extra_info': None, + 'reason_not_safe': 'Snapshot already managed', + 'source_reference': { + 'name': volume['name'] + }, + 'reference': { + 'source-guid': guid, + 'source-name': snapshot['name'] + }, + 'safe_to_manage': False, + 'size': volume['size'] + }] + self.assertEqual(expected, result) + + def test_unmanage_snapshot(self): + volume = fake_volume(self.ctxt) + snapshot = fake_snapshot(self.ctxt) + snapshot.volume = volume + self.assertIsNone(self.drv.unmanage_snapshot(snapshot)) diff --git a/cinder/volume/drivers/nexenta/ns5/iscsi.py b/cinder/volume/drivers/nexenta/ns5/iscsi.py index 301603b2195..a2dbf2a4991 100644 --- a/cinder/volume/drivers/nexenta/ns5/iscsi.py +++ b/cinder/volume/drivers/nexenta/ns5/iscsi.py @@ -1,4 +1,4 @@ -# Copyright 2016 Nexenta Systems, Inc. +# Copyright 2019 Nexenta Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -13,192 +13,189 @@ # License for the specific language governing permissions and limitations # under the License. +import ipaddress +import posixpath +import random import uuid from oslo_log import log as logging from oslo_utils import units +import six from cinder import context -from cinder import db -from cinder import exception +from cinder import coordination from cinder.i18n import _ from cinder import interface +from cinder import objects from cinder.volume import driver from cinder.volume.drivers.nexenta.ns5 import jsonrpc from cinder.volume.drivers.nexenta import options -from cinder.volume.drivers.nexenta import utils +from cinder.volume import utils -VERSION = '1.1.0' LOG = logging.getLogger(__name__) -TARGET_GROUP_PREFIX = 'cinder-tg-' @interface.volumedriver class NexentaISCSIDriver(driver.ISCSIDriver): """Executes volume driver commands on Nexenta Appliance. - .. code-block:: default + Version history: - Version history: - 1.0.0 - Initial driver version. - 1.1.0 - Added HTTPS support. - Added use of sessions for REST calls. + .. code-block:: none + 1.0.0 - Initial driver version. + 1.1.0 - Added HTTPS support. + - Added use of sessions for REST calls. + - Added abandoned volumes and snapshots cleanup. + 1.2.0 - Failover support. + 1.2.1 - Configurable luns per parget, target prefix. + 1.3.0 - Removed target/TG caching, added support for target portals + and host groups. + 1.3.1 - Refactored _do_export to query exact lunMapping. + 1.3.2 - Revert to snapshot support. + 1.3.3 - Refactored LUN creation, use host group for LUN mappings. + 1.3.4 - Adapted NexentaException for the latest Cinder. + 1.3.5 - Added deferred deletion for snapshots. + 1.3.6 - Fixed race between volume/clone deletion. + 1.3.7 - Added consistency group support. + 1.3.8 - Added volume multi-attach. + 1.4.0 - Refactored iSCSI driver. + - Added pagination support. + - Added configuration parameters for REST API connect/read + timeouts, connection retries and backoff factor. + - Fixed HA failover. + - Added retries on EBUSY errors. + - Fixed HTTP authentication. + - Added coordination for dataset operations. + 1.4.1 - Support for NexentaStor tenants. + 1.4.2 - Added manage/unmanage/manageable-list volume/snapshot support. + 1.4.3 - Added consistency group capability to generic volume group. """ - VERSION = VERSION - - # ThirdPartySystems wiki page + VERSION = '1.4.3' CI_WIKI_NAME = "Nexenta_CI" + vendor_name = 'Nexenta' + product_name = 'NexentaStor5' + storage_protocol = 'iSCSI' + driver_volume_type = 'iscsi' + def __init__(self, *args, **kwargs): super(NexentaISCSIDriver, self).__init__(*args, **kwargs) + if not self.configuration: + message = (_('%(product_name)s %(storage_protocol)s ' + 'backend configuration not found') + % {'product_name': self.product_name, + 'storage_protocol': self.storage_protocol}) + raise jsonrpc.NefException(code='ENODATA', message=message) + self.configuration.append_config_values( + options.NEXENTA_CONNECTION_OPTS) + self.configuration.append_config_values( + options.NEXENTA_ISCSI_OPTS) + self.configuration.append_config_values( + options.NEXENTA_DATASET_OPTS) self.nef = None - # mapping of targets and groups. Groups are the keys - self.targets = {} - # list of volumes in target group. Groups are the keys - self.volumes = {} - if self.configuration: - self.configuration.append_config_values( - options.NEXENTA_CONNECTION_OPTS) - self.configuration.append_config_values( - options.NEXENTA_ISCSI_OPTS) - self.configuration.append_config_values( - options.NEXENTA_DATASET_OPTS) - self.configuration.append_config_values( - options.NEXENTA_RRMGR_OPTS) - self.use_https = self.configuration.nexenta_use_https - self.nef_host = self.configuration.nexenta_host - self.nef_port = self.configuration.nexenta_rest_port - self.nef_user = self.configuration.nexenta_user - self.nef_password = self.configuration.nexenta_password - self.storage_pool = self.configuration.nexenta_volume + self.volume_backend_name = ( + self.configuration.safe_get('volume_backend_name') or + '%s_%s' % (self.product_name, self.storage_protocol)) + self.target_prefix = self.configuration.nexenta_target_prefix + self.target_group_prefix = ( + self.configuration.nexenta_target_group_prefix) + self.host_group_prefix = self.configuration.nexenta_host_group_prefix + self.luns_per_target = self.configuration.nexenta_luns_per_target + self.lu_writebackcache_disabled = ( + self.configuration.nexenta_lu_writebackcache_disabled) + self.iscsi_host = self.configuration.nexenta_host + self.pool = self.configuration.nexenta_volume self.volume_group = self.configuration.nexenta_volume_group - self.dataset_compression = ( + self.portal_port = self.configuration.nexenta_iscsi_target_portal_port + self.portals = self.configuration.nexenta_iscsi_target_portals + self.sparsed_volumes = self.configuration.nexenta_sparse + self.deduplicated_volumes = self.configuration.nexenta_dataset_dedup + self.compressed_volumes = ( self.configuration.nexenta_dataset_compression) - self.dataset_deduplication = self.configuration.nexenta_dataset_dedup self.dataset_description = ( self.configuration.nexenta_dataset_description) self.iscsi_target_portal_port = ( self.configuration.nexenta_iscsi_target_portal_port) + self.root_path = posixpath.join(self.pool, self.volume_group) + self.dataset_blocksize = self.configuration.nexenta_ns5_blocksize + if not self.configuration.nexenta_ns5_blocksize > 128: + self.dataset_blocksize *= units.Ki + self.group_snapshot_template = ( + self.configuration.nexenta_group_snapshot_template) + self.origin_snapshot_template = ( + self.configuration.nexenta_origin_snapshot_template) @staticmethod def get_driver_options(): return ( options.NEXENTA_CONNECTION_OPTS + options.NEXENTA_ISCSI_OPTS + - options.NEXENTA_DATASET_OPTS + - options.NEXENTA_RRMGR_OPTS + options.NEXENTA_DATASET_OPTS ) - @property - def backend_name(self): - backend_name = None - if self.configuration: - backend_name = self.configuration.safe_get('volume_backend_name') - if not backend_name: - backend_name = self.__class__.__name__ - return backend_name - def do_setup(self, context): - self.nef = jsonrpc.NexentaJSONProxy( - self.nef_host, self.nef_port, self.nef_user, - self.nef_password, self.use_https) - url = 'storage/pools/%s/volumeGroups' % self.storage_pool - data = { - 'name': self.volume_group, - 'volumeBlockSize': ( - self.configuration.nexenta_ns5_blocksize * units.Ki) - } - try: - self.nef.post(url, data) - except exception.NexentaException as e: - if 'EEXIST' in e.args[0]: - LOG.debug('volumeGroup already exists, skipping') - else: - raise - - self._fetch_volumes() - - def _fetch_volumes(self): - url = 'san/iscsi/targets?fields=alias,name&limit=50000' - for target in self.nef.get(url)['data']: - tg_name = target['alias'] - if tg_name.startswith(TARGET_GROUP_PREFIX): - self.targets[tg_name] = target['name'] - self._fill_volumes(tg_name) + self.nef = jsonrpc.NefProxy(self.driver_volume_type, + self.root_path, + self.configuration) def check_for_setup_error(self): - """Verify that the zfs volumes exist. - - :raise: :py:exc:`LookupError` - """ - url = 'storage/pools/%(pool)s/volumeGroups/%(group)s' % { - 'pool': self.storage_pool, - 'group': self.volume_group, - } + """Check root volume group and iSCSI target service.""" try: - self.nef.get(url) - except exception.NexentaException: - raise LookupError(_( - "Dataset group %s not found at Nexenta SA"), '/'.join( - [self.storage_pool, self.volume_group])) - services = self.nef.get('services') - for service in services['data']: - if service['name'] == 'iscsit': - if service['state'] != 'online': - raise exception.NexentaException( - 'iSCSI service is not running on NS appliance') - break - - def _get_volume_path(self, volume): - """Return zfs volume name that corresponds given volume name.""" - return '%s/%s/%s' % (self.storage_pool, self.volume_group, - volume['name']) - - @staticmethod - def _get_clone_snapshot_name(volume): - """Return name for snapshot that will be used to clone the volume.""" - return 'cinder-clone-snapshot-%(id)s' % volume + self.nef.volumegroups.get(self.root_path) + except jsonrpc.NefException as error: + if error.code != 'ENOENT': + raise + payload = {'path': self.root_path, + 'volumeBlockSize': self.dataset_blocksize} + self.nef.volumegroups.create(payload) + service = self.nef.services.get('iscsit') + if service['state'] != 'online': + message = (_('iSCSI target service is not online: %(state)s') + % {'state': service['state']}) + raise jsonrpc.NefException(code='ESRCH', message=message) def create_volume(self, volume): """Create a zfs volume on appliance. :param volume: volume reference - :return: model update dict for volume reference + :returns: model update dict for volume reference """ - url = 'storage/pools/%(pool)s/volumeGroups/%(group)s/volumes' % { - 'pool': self.storage_pool, - 'group': self.volume_group, - } - data = { - 'name': volume['name'], + payload = { + 'path': self._get_volume_path(volume), 'volumeSize': volume['size'] * units.Gi, - 'volumeBlockSize': ( - self.configuration.nexenta_ns5_blocksize * units.Ki), - 'sparseVolume': self.configuration.nexenta_sparse + 'volumeBlockSize': self.dataset_blocksize, + 'compressionMode': self.compressed_volumes, + 'sparseVolume': self.sparsed_volumes } - self.nef.post(url, data) + self.nef.volumes.create(payload) + @coordination.synchronized('{self.nef.lock}') def delete_volume(self, volume): - """Destroy a zfs volume on appliance. + """Deletes a logical volume. :param volume: volume reference """ - - url = ('storage/pools/%(pool)s/volumeGroups/%(group)s' - '/volumes/%(name)s') % { - 'pool': self.storage_pool, - 'group': self.volume_group, - 'name': volume['name'] - } + volume_path = self._get_volume_path(volume) + delete_payload = {'snapshots': True} try: - self.nef.delete(url) - except exception.NexentaException as exc: - # We assume that volume is gone - LOG.warning('Got error trying to delete volume %(volume)s,' - ' assuming it is already gone: %(exc)s', - {'volume': volume, 'exc': exc}) + self.nef.volumes.delete(volume_path, delete_payload) + except jsonrpc.NefException as error: + if error.code != 'EEXIST': + raise + snapshots_tree = {} + snapshots_payload = {'parent': volume_path, 'fields': 'path'} + snapshots = self.nef.snapshots.list(snapshots_payload) + for snapshot in snapshots: + clones_payload = {'fields': 'clones,creationTxg'} + data = self.nef.snapshots.get(snapshot['path'], clones_payload) + if data['clones']: + snapshots_tree[data['creationTxg']] = data['clones'][0] + if snapshots_tree: + clone_path = snapshots_tree[max(snapshots_tree)] + self.nef.volumes.promote(clone_path) + self.nef.volumes.delete(volume_path, delete_payload) def extend_volume(self, volume, new_size): """Extend an existing volume. @@ -206,83 +203,55 @@ class NexentaISCSIDriver(driver.ISCSIDriver): :param volume: volume reference :param new_size: volume new size in GB """ - LOG.info('Extending volume: %(id)s New size: %(size)s GB', - {'id': volume['id'], 'size': new_size}) - pool, group, name = self._get_volume_path(volume).split('/') - url = ('storage/pools/%(pool)s/volumeGroups/%(group)s/' - 'volumes/%(name)s') % { - 'pool': pool, - 'group': group, - 'name': name - } - self.nef.put(url, {'volumeSize': new_size * units.Gi}) + volume_path = self._get_volume_path(volume) + payload = {'volumeSize': new_size * units.Gi} + self.nef.volumes.set(volume_path, payload) + @coordination.synchronized('{self.nef.lock}') def create_snapshot(self, snapshot): """Creates a snapshot. :param snapshot: snapshot reference """ - snapshot_vol = self._get_snapshot_volume(snapshot) - LOG.info('Creating snapshot %(snap)s of volume %(vol)s', { - 'snap': snapshot['name'], - 'vol': snapshot_vol['name'] - }) - volume_path = self._get_volume_path(snapshot_vol) - pool, group, volume = volume_path.split('/') - url = ('storage/pools/%(pool)s/volumeGroups/%(group)s/' - 'volumes/%(volume)s/snapshots') % { - 'pool': pool, - 'group': group, - 'volume': snapshot_vol['name'] - } - self.nef.post(url, {'name': snapshot['name']}) + snapshot_path = self._get_snapshot_path(snapshot) + payload = {'path': snapshot_path} + self.nef.snapshots.create(payload) + @coordination.synchronized('{self.nef.lock}') def delete_snapshot(self, snapshot): - """Delete volume's snapshot on appliance. + """Deletes a snapshot. :param snapshot: snapshot reference """ - LOG.info('Deleting snapshot: %s', snapshot['name']) - snapshot_vol = self._get_snapshot_volume(snapshot) - volume_path = self._get_volume_path(snapshot_vol) - pool, group, volume = volume_path.split('/') - url = ('storage/pools/%(pool)s/volumeGroups/%(group)s/' - 'volumes/%(volume)s/snapshots/%(snapshot)s') % { - 'pool': pool, - 'group': group, - 'volume': volume, - 'snapshot': snapshot['name'] - } - try: - self.nef.delete(url) - except exception.NexentaException as exc: - if 'EBUSY' in exc.args[0]: - LOG.warning( - 'Could not delete snapshot %s - it has dependencies', - snapshot['name']) - else: - LOG.warning(exc) + snapshot_path = self._get_snapshot_path(snapshot) + payload = {'defer': True} + self.nef.snapshots.delete(snapshot_path, payload) + def snapshot_revert_use_temp_snapshot(self): + # Considering that NexentaStor based drivers use COW images + # for storing snapshots, having chains of such images, + # creating a backup snapshot when reverting one is not + # actually helpful. + return False + + def revert_to_snapshot(self, context, volume, snapshot): + """Revert volume to snapshot.""" + volume_path = self._get_volume_path(volume) + payload = {'snapshot': snapshot['name']} + self.nef.volumes.rollback(volume_path, payload) + + @coordination.synchronized('{self.nef.lock}') def create_volume_from_snapshot(self, volume, snapshot): """Create new volume from other's snapshot on appliance. :param volume: reference of volume to be created :param snapshot: reference of source snapshot """ - LOG.info('Creating volume from snapshot: %s', snapshot['name']) - snapshot_vol = self._get_snapshot_volume(snapshot) - volume_path = self._get_volume_path(snapshot_vol) - pool, group, snapshot_vol = volume_path.split('/') - url = ('storage/pools/%(pool)s/volumeGroups/%(group)s/' - 'volumes/%(volume)s/snapshots/%(snapshot)s/clone') % { - 'pool': pool, - 'group': group, - 'volume': snapshot_vol, - 'snapshot': snapshot['name'] - } - self.nef.post(url, {'targetPath': self._get_volume_path(volume)}) - if (('size' in volume) and ( - volume['size'] > snapshot['volume_size'])): + snapshot_path = self._get_snapshot_path(snapshot) + clone_path = self._get_volume_path(volume) + payload = {'targetPath': clone_path} + self.nef.snapshots.clone(snapshot_path, payload) + if volume['size'] > snapshot['volume_size']: self.extend_volume(volume, volume['size']) def create_cloned_volume(self, volume, src_vref): @@ -291,205 +260,1174 @@ class NexentaISCSIDriver(driver.ISCSIDriver): :param volume: new volume reference :param src_vref: source volume reference """ - snapshot = {'volume_name': src_vref['name'], - 'volume_id': src_vref['id'], - 'volume_size': src_vref['size'], - 'name': self._get_clone_snapshot_name(volume)} - LOG.debug('Creating temp snapshot of the original volume: ' - '%s@%s', snapshot['volume_name'], snapshot['name']) + snapshot = { + 'name': self.origin_snapshot_template % volume['id'], + 'volume_id': src_vref['id'], + 'volume_name': src_vref['name'], + 'volume_size': src_vref['size'] + } self.create_snapshot(snapshot) try: self.create_volume_from_snapshot(volume, snapshot) - except exception.NexentaException: - LOG.error('Volume creation failed, deleting created snapshot %s', - '@'.join([snapshot['volume_name'], snapshot['name']])) + except jsonrpc.NefException as error: + LOG.debug('Failed to create clone %(clone)s ' + 'from volume %(volume)s: %(error)s', + {'clone': volume['name'], + 'volume': src_vref['name'], + 'error': error}) + raise + finally: try: self.delete_snapshot(snapshot) - except (exception.NexentaException, exception.SnapshotIsBusy): - LOG.warning('Failed to delete zfs snapshot %s', - '@'.join([snapshot['volume_name'], - snapshot['name']])) - raise + except jsonrpc.NefException as error: + LOG.debug('Failed to delete temporary snapshot ' + '%(volume)s@%(snapshot)s: %(error)s', + {'volume': src_vref['name'], + 'snapshot': snapshot['name'], + 'error': error}) - def _get_snapshot_volume(self, snapshot): - ctxt = context.get_admin_context() - return db.volume_get(ctxt, snapshot['volume_id']) + def create_export(self, context, volume, connector): + """Export a volume.""" + pass - def _do_export(self, _ctx, volume): - """Do all steps to get zfs volume exported at separate target. + def ensure_export(self, context, volume): + """Synchronously recreate an export for a volume.""" + pass - :param volume: reference of volume to be exported + def remove_export(self, context, volume): + """Remove an export for a volume.""" + pass + + def terminate_connection(self, volume, connector, **kwargs): + """Terminate a connection to a volume. + + :param volume: a volume object + :param connector: a connector object + :returns: dictionary of connection information """ + info = {'driver_volume_type': self.driver_volume_type, 'data': {}} + host_iqn = None + host_groups = [] volume_path = self._get_volume_path(volume) - - # Find out whether the volume is exported - vol_map_url = 'san/lunMappings?volume=%s&fields=lun' % ( - volume_path.replace('/', '%2F')) - data = self.nef.get(vol_map_url).get('data') - if data: - model_update = {} + if isinstance(connector, dict) and 'initiator' in connector: + connectors = [] + for attachment in volume['volume_attachment']: + connectors.append(attachment.get('connector')) + if connectors.count(connector) > 1: + LOG.debug('Detected multiple connections on host ' + '%(host_name)s [%(host_ip)s] for volume ' + '%(volume)s, skip terminate volume connection', + {'host_name': connector.get('host', 'unknown'), + 'host_ip': connector.get('ip', 'unknown'), + 'volume': volume['name']}) + return True + host_iqn = connector.get('initiator') + host_groups.append(options.DEFAULT_HOST_GROUP) + host_group = self._get_host_group(host_iqn) + if host_group is not None: + host_groups.append(host_group) + LOG.debug('Terminate connection for volume %(volume)s ' + 'and initiator %(initiator)s', + {'volume': volume['name'], + 'initiator': host_iqn}) else: - # Choose the best target group among existing ones - tg_name = None - for tg in self.volumes: - if len(self.volumes[tg]) < 20: - tg_name = tg - break - if tg_name: - target_name = self.targets[tg_name] + LOG.debug('Terminate all connections for volume %(volume)s', + {'volume': volume['name']}) + + payload = {'volume': volume_path} + mappings = self.nef.mappings.list(payload) + if not mappings: + LOG.debug('There are no LUN mappings found for volume %(volume)s', + {'volume': volume['name']}) + return info + for mapping in mappings: + mapping_id = mapping.get('id') + mapping_tg = mapping.get('targetGroup') + mapping_hg = mapping.get('hostGroup') + if host_iqn is None or mapping_hg in host_groups: + LOG.debug('Delete LUN mapping %(id)s for volume %(volume)s, ' + 'target group %(tg)s and host group %(hg)s', + {'id': mapping_id, 'volume': volume['name'], + 'tg': mapping_tg, 'hg': mapping_hg}) + self._delete_lun_mapping(mapping_id) else: - tg_name = TARGET_GROUP_PREFIX + uuid.uuid4().hex - - # Create new target - url = 'san/iscsi/targets' - data = { - "portals": [ - {"address": self.nef_host} - ], - 'alias': tg_name - } - self.nef.post(url, data) - - # Get the name of just created target - data = self.nef.get( - '%(url)s?fields=name&alias=%(tg_name)s' % { - 'url': url, - 'tg_name': tg_name - })['data'] - target_name = data[0]['name'] - - self._create_target_group(tg_name, target_name) - - self.targets[tg_name] = target_name - self.volumes[tg_name] = set() - - # Export the volume - url = 'san/lunMappings' - data = { - "hostGroup": "all", - "targetGroup": tg_name, - 'volume': volume_path - } - try: - self.nef.post(url, data) - self.volumes[tg_name].add(volume_path) - except exception.NexentaException as e: - if 'No such target group' in e.args[0]: - self._create_target_group(tg_name, target_name) - self._fill_volumes(tg_name) - self.nef.post(url, data) - else: - raise - - # Get LUN of just created volume - data = self.nef.get(vol_map_url).get('data') - lun = data[0]['lun'] - - provider_location = '%(host)s:%(port)s,1 %(name)s %(lun)s' % { - 'host': self.nef_host, - 'port': self.configuration.nexenta_iscsi_target_portal_port, - 'name': target_name, - 'lun': lun, - } - model_update = {'provider_location': provider_location} - return model_update - - def create_export(self, _ctx, volume, connector): - """Create new export for zfs volume. - - :param volume: reference of volume to be exported - :return: iscsiadm-formatted provider location string - """ - model_update = self._do_export(_ctx, volume) - return model_update - - def ensure_export(self, _ctx, volume): - """Recreate parts of export if necessary. - - :param volume: reference of volume to be exported - """ - self._do_export(_ctx, volume) - - def remove_export(self, _ctx, volume): - """Destroy all resources created to export zfs volume. - - :param volume: reference of volume to be unexported - """ - volume_path = self._get_volume_path(volume) - - # Get ID of a LUN mapping if the volume is exported - url = 'san/lunMappings?volume=%s&fields=id' % ( - volume_path.replace('/', '%2F')) - data = self.nef.get(url)['data'] - if data: - url = 'san/lunMappings/%s' % data[0]['id'] - self.nef.delete(url) - else: - LOG.debug('LU already deleted from appliance') - - for tg in self.volumes: - if volume_path in self.volumes[tg]: - self.volumes[tg].remove(volume_path) - break + LOG.debug('Skip LUN mapping %(id)s for volume %(volume)s, ' + 'target group %(tg)s and host group %(hg)s', + {'id': mapping_id, 'volume': volume['name'], + 'tg': mapping_tg, 'hg': mapping_hg}) + return info def get_volume_stats(self, refresh=False): """Get volume stats. If 'refresh' is True, run update the stats first. """ - if refresh: + if refresh or not self._stats: self._update_volume_stats() return self._stats def _update_volume_stats(self): """Retrieve stats info for NexentaStor appliance.""" - LOG.debug('Updating volume stats') - - url = ('storage/pools/%(pool)s/volumeGroups/%(group)s' - '?fields=bytesAvailable,bytesUsed') % { - 'pool': self.storage_pool, - 'group': self.volume_group, - } - stats = self.nef.get(url) - total_amount = utils.str2gib_size(stats['bytesAvailable']) - free_amount = utils.str2gib_size( - stats['bytesAvailable'] - stats['bytesUsed']) - + LOG.debug('Updating volume backend %(volume_backend_name)s stats', + {'volume_backend_name': self.volume_backend_name}) + payload = {'fields': 'bytesAvailable,bytesUsed'} + dataset = self.nef.volumegroups.get(self.root_path, payload) + free = dataset['bytesAvailable'] // units.Gi + used = dataset['bytesUsed'] // units.Gi + total = free + used location_info = '%(driver)s:%(host)s:%(pool)s/%(group)s' % { 'driver': self.__class__.__name__, - 'host': self.nef_host, - 'pool': self.storage_pool, + 'host': self.iscsi_host, + 'pool': self.pool, 'group': self.volume_group, } self._stats = { - 'vendor_name': 'Nexenta', - 'dedup': self.dataset_deduplication, - 'compression': self.dataset_compression, + 'vendor_name': self.vendor_name, + 'dedup': self.deduplicated_volumes, + 'compression': self.compressed_volumes, 'description': self.dataset_description, + 'nef_url': self.nef.host, + 'nef_port': self.nef.port, 'driver_version': self.VERSION, - 'storage_protocol': 'iSCSI', - 'total_capacity_gb': total_amount, - 'free_capacity_gb': free_amount, + 'storage_protocol': self.storage_protocol, + 'sparsed_volumes': self.sparsed_volumes, + 'total_capacity_gb': total, + 'free_capacity_gb': free, 'reserved_percentage': self.configuration.reserved_percentage, 'QoS_support': False, - 'volume_backend_name': self.backend_name, + 'multiattach': True, + 'consistencygroup_support': True, + 'consistent_group_snapshot_enabled': True, 'location_info': location_info, - 'iscsi_target_portal_port': self.iscsi_target_portal_port, - 'nef_url': self.nef.url + 'volume_backend_name': self.volume_backend_name, + 'iscsi_target_portal_port': self.iscsi_target_portal_port } - def _fill_volumes(self, tg_name): - url = ('san/lunMappings?targetGroup=%s&fields=volume' - '&limit=50000' % tg_name) - self.volumes[tg_name] = { - mapping['volume'] for mapping in self.nef.get(url)['data']} + def _get_volume_path(self, volume): + """Return ZFS datset path for the volume.""" + return posixpath.join(self.root_path, volume['name']) - def _create_target_group(self, tg_name, target_name): - # Create new target group - url = 'san/targetgroups' - data = { - 'name': tg_name, - 'members': [target_name] + def _get_snapshot_path(self, snapshot): + """Return ZFS snapshot path for the snapshot.""" + volume_name = snapshot['volume_name'] + snapshot_name = snapshot['name'] + volume_path = posixpath.join(self.root_path, volume_name) + return '%s@%s' % (volume_path, snapshot_name) + + def _get_target_group_name(self, target_name): + """Return Nexenta iSCSI target group name for volume.""" + return target_name.replace( + self.configuration.nexenta_target_prefix, + self.configuration.nexenta_target_group_prefix + ) + + def _get_target_name(self, target_group_name): + """Return Nexenta iSCSI target name for volume.""" + return target_group_name.replace( + self.configuration.nexenta_target_group_prefix, + self.configuration.nexenta_target_prefix + ) + + def _get_host_addresses(self): + """Return Nexenta IP addresses list.""" + host_addresses = [] + items = self.nef.netaddrs.list() + for item in items: + ip_cidr = six.text_type(item['address']) + ip_addr, ip_mask = ip_cidr.split('/') + ip_obj = ipaddress.ip_address(ip_addr) + if not ip_obj.is_loopback: + host_addresses.append(ip_obj.exploded) + LOG.debug('Configured IP addresses: %(addresses)s', + {'addresses': host_addresses}) + return host_addresses + + def _get_host_portals(self): + """Return configured iSCSI portals list.""" + host_portals = [] + host_addresses = self._get_host_addresses() + portal_host = self.iscsi_host + if portal_host: + if portal_host in host_addresses: + if self.portal_port: + portal_port = int(self.portal_port) + else: + portal_port = options.DEFAULT_ISCSI_PORT + host_portal = '%s:%s' % (portal_host, portal_port) + host_portals.append(host_portal) + else: + LOG.debug('Skip not a local portal IP address %(portal)s', + {'portal': portal_host}) + else: + LOG.debug('Configuration parameter nexenta_host is not defined') + for portal in self.portals.split(','): + if not portal: + continue + host_port = portal.split(':') + portal_host = host_port[0] + if portal_host in host_addresses: + if len(host_port) == 2: + portal_port = int(host_port[1]) + else: + portal_port = options.DEFAULT_ISCSI_PORT + host_portal = '%s:%s' % (portal_host, portal_port) + if host_portal not in host_portals: + host_portals.append(host_portal) + else: + LOG.debug('Skip not a local portal IP address %(portal)s', + {'portal': portal_host}) + LOG.debug('Configured iSCSI portals: %(portals)s', + {'portals': host_portals}) + return host_portals + + def _target_group_props(self, group_name, host_portals): + """Check and update an existing targets/portals for given target group. + + :param group_name: target group name + :param host_portals: configured host portals list + :returns: dictionary of portals per target + """ + if not group_name.startswith(self.target_group_prefix): + LOG.debug('Skip not a cinder target group %(group)s', + {'group': group_name}) + return {} + group_props = {} + payload = {'name': group_name} + data = self.nef.targetgroups.list(payload) + if not data: + LOG.debug('Skip target group %(group)s: group not found', + {'group': group_name}) + return {} + target_names = data[0]['members'] + if not target_names: + target_name = self._get_target_name(group_name) + self._create_target(target_name, host_portals) + self._update_target_group(group_name, [target_name]) + group_props[target_name] = host_portals + return group_props + for target_name in target_names: + group_props[target_name] = [] + payload = {'name': target_name} + data = self.nef.targets.list(payload) + if not data: + LOG.debug('Skip target group %(group)s: ' + 'group member %(target)s not found', + {'group': group_name, 'target': target_name}) + return {} + target_portals = data[0]['portals'] + if not target_portals: + LOG.debug('Skip target group %(group)s: ' + 'group member %(target)s has no portals', + {'group': group_name, 'target': target_name}) + return {} + for item in target_portals: + target_portal = '%s:%s' % (item['address'], item['port']) + if target_portal not in host_portals: + LOG.debug('Skip target group %(group)s: ' + 'group member %(target)s bind to a ' + 'non local portal address %(portal)s', + {'group': group_name, + 'target': target_name, + 'portal': target_portal}) + return {} + group_props[target_name].append(target_portal) + return group_props + + def initialize_connection(self, volume, connector): + """Do all steps to get zfs volume exported at separate target. + + :param volume: volume reference + :param connector: connector reference + :returns: dictionary of connection information + """ + volume_path = self._get_volume_path(volume) + host_iqn = connector.get('initiator') + LOG.debug('Initialize connection for volume: %(volume)s ' + 'and initiator: %(initiator)s', + {'volume': volume_path, 'initiator': host_iqn}) + + host_groups = [options.DEFAULT_HOST_GROUP] + host_group = self._get_host_group(host_iqn) + if host_group: + host_groups.append(host_group) + + host_portals = self._get_host_portals() + props_portals = [] + props_iqns = [] + props_luns = [] + payload = {'volume': volume_path} + mappings = self.nef.mappings.list(payload) + for mapping in mappings: + mapping_id = mapping['id'] + mapping_lu = mapping['lun'] + mapping_hg = mapping['hostGroup'] + mapping_tg = mapping['targetGroup'] + if mapping_tg == options.DEFAULT_TARGET_GROUP: + LOG.debug('Delete LUN mapping %(id)s for target group %(tg)s', + {'id': mapping_id, 'tg': mapping_tg}) + self._delete_lun_mapping(mapping_id) + continue + if mapping_hg not in host_groups: + LOG.debug('Skip LUN mapping %(id)s for host group %(hg)s', + {'id': mapping_id, 'hg': mapping_hg}) + continue + group_props = self._target_group_props(mapping_tg, host_portals) + if not group_props: + LOG.debug('Skip LUN mapping %(id)s for target group %(tg)s', + {'id': mapping_id, 'tg': mapping_tg}) + continue + for target_iqn in group_props: + target_portals = group_props[target_iqn] + props_portals += target_portals + props_iqns += [target_iqn] * len(target_portals) + props_luns += [mapping_lu] * len(target_portals) + + props = {} + props['target_discovered'] = False + props['encrypted'] = False + props['qos_specs'] = None + props['volume_id'] = volume['id'] + props['access_mode'] = 'rw' + multipath = connector.get('multipath', False) + + if props_luns: + if multipath: + props['target_portals'] = props_portals + props['target_iqns'] = props_iqns + props['target_luns'] = props_luns + else: + index = random.randrange(0, len(props_luns)) + props['target_portal'] = props_portals[index] + props['target_iqn'] = props_iqns[index] + props['target_lun'] = props_luns[index] + LOG.debug('Use existing LUN mapping(s) %(props)s', + {'props': props}) + return {'driver_volume_type': self.driver_volume_type, + 'data': props} + + if host_group is None: + host_group = '%s-%s' % (self.host_group_prefix, uuid.uuid4().hex) + self._create_host_group(host_group, [host_iqn]) + + mappings_spread = {} + targets_spread = {} + data = self.nef.targetgroups.list() + for item in data: + target_group = item['name'] + group_props = self._target_group_props(target_group, host_portals) + members = len(group_props) + if members == 0: + LOG.debug('Skip unsuitable target group %(tg)s', + {'tg': target_group}) + continue + payload = {'targetGroup': target_group} + data = self.nef.mappings.list(payload) + mappings = len(data) + if not mappings < self.luns_per_target: + LOG.debug('Skip target group %(tg)s: ' + 'group members limit reached: %(limit)s', + {'tg': target_group, 'limit': mappings}) + continue + targets_spread[target_group] = group_props + mappings_spread[target_group] = mappings + LOG.debug('Found target group %(tg)s with %(members)s ' + 'members and %(mappings)s LUNs', + {'tg': target_group, 'members': members, + 'mappings': mappings}) + + if not mappings_spread: + target = '%s-%s' % (self.target_prefix, uuid.uuid4().hex) + target_group = self._get_target_group_name(target) + self._create_target(target, host_portals) + self._create_target_group(target_group, [target]) + props_portals += host_portals + props_iqns += [target] * len(host_portals) + else: + target_group = min(mappings_spread, key=mappings_spread.get) + targets = targets_spread[target_group] + members = targets.keys() + mappings = mappings_spread[target_group] + LOG.debug('Using existing target group %(tg)s ' + 'with members %(members)s and %(mappings)s LUNs', + {'tg': target_group, 'members': members, + 'mappings': mappings}) + for target in targets: + portals = targets[target] + props_portals += portals + props_iqns += [target] * len(portals) + + payload = {'volume': volume_path, + 'targetGroup': target_group, + 'hostGroup': host_group} + self.nef.mappings.create(payload) + mapping = {} + for attempt in range(0, self.nef.retries): + mapping = self.nef.mappings.list(payload) + if mapping: + break + self.nef.delay(attempt) + if not mapping: + message = (_('Failed to get LUN number for %(volume)s') + % {'volume': volume_path}) + raise jsonrpc.NefException(code='ENOTBLK', message=message) + lun = mapping[0]['lun'] + props_luns = [lun] * len(props_iqns) + + if multipath: + props['target_portals'] = props_portals + props['target_iqns'] = props_iqns + props['target_luns'] = props_luns + else: + index = random.randrange(0, len(props_luns)) + props['target_portal'] = props_portals[index] + props['target_iqn'] = props_iqns[index] + props['target_lun'] = props_luns[index] + + if not self.lu_writebackcache_disabled: + LOG.debug('Get LUN guid for volume %(volume)s', + {'volume': volume_path}) + payload = {'fields': 'guid', 'volume': volume_path} + data = self.nef.logicalunits.list(payload) + guid = data[0]['guid'] + payload = {'writebackCacheDisabled': False} + self.nef.logicalunits.set(guid, payload) + + LOG.debug('Created new LUN mapping(s): %(props)s', + {'props': props}) + return {'driver_volume_type': self.driver_volume_type, + 'data': props} + + def _create_target_group(self, name, members): + """Create a new target group with members. + + :param name: group name + :param members: group members list + """ + payload = {'name': name, 'members': members} + self.nef.targetgroups.create(payload) + + def _update_target_group(self, name, members): + """Update a existing target group with new members. + + :param name: group name + :param members: group members list + """ + payload = {'members': members} + self.nef.targetgroups.set(name, payload) + + def _delete_lun_mapping(self, name): + """Delete an existing LUN mapping. + + :param name: LUN mapping ID + """ + self.nef.mappings.delete(name) + + def _create_target(self, name, portals): + """Create a new target with portals. + + :param name: target name + :param portals: target portals list + """ + payload = {'name': name, + 'portals': self._s2d(portals)} + self.nef.targets.create(payload) + + def _get_host_group(self, member): + """Find existing host group by group member. + + :param member: host group member + :returns: host group name + """ + host_groups = self.nef.hostgroups.list() + for host_group in host_groups: + members = host_group['members'] + if member in members: + name = host_group['name'] + LOG.debug('Found host group %(name)s for member %(member)s', + {'name': name, 'member': member}) + return name + return None + + def _create_host_group(self, name, members): + """Create a new host group. + + :param name: host group name + :param members: host group members list + """ + payload = {'name': name, 'members': members} + self.nef.hostgroups.create(payload) + + @staticmethod + def _s2d(css): + """Parse list of colon-separated address and port to dictionary. + + :param css: list of colon-separated address and port + :returns: dictionary + """ + result = [] + for key_val in css: + key, val = key_val.split(':') + result.append({'address': key, 'port': int(val)}) + return result + + @staticmethod + def _d2s(kvp): + """Parse dictionary to list of colon-separated address and port. + + :param kvp: dictionary + :returns: list of colon-separated address and port + """ + result = [] + for key_val in kvp: + result.append('%s:%s' % (key_val['address'], key_val['port'])) + return result + + def create_consistencygroup(self, context, group): + """Creates a consistency group. + + :param context: the context of the caller. + :param group: the dictionary of the consistency group to be created. + :returns: group_model_update + """ + group_model_update = {} + return group_model_update + + def create_group(self, context, group): + """Creates a group. + + :param context: the context of the caller. + :param group: the group object. + :returns: model_update + """ + return self.create_consistencygroup(context, group) + + def delete_consistencygroup(self, context, group, volumes): + """Deletes a consistency group. + + :param context: the context of the caller. + :param group: the dictionary of the consistency group to be deleted. + :param volumes: a list of volume dictionaries in the group. + :returns: group_model_update, volumes_model_update + """ + group_model_update = {} + volumes_model_update = [] + for volume in volumes: + self.delete_volume(volume) + return group_model_update, volumes_model_update + + def delete_group(self, context, group, volumes): + """Deletes a group. + + :param context: the context of the caller. + :param group: the group object. + :param volumes: a list of volume objects in the group. + :returns: model_update, volumes_model_update + """ + return self.delete_consistencygroup(context, group, volumes) + + def update_consistencygroup(self, context, group, add_volumes=None, + remove_volumes=None): + """Updates a consistency group. + + :param context: the context of the caller. + :param group: the dictionary of the consistency group to be updated. + :param add_volumes: a list of volume dictionaries to be added. + :param remove_volumes: a list of volume dictionaries to be removed. + :returns: group_model_update, add_volumes_update, remove_volumes_update + """ + group_model_update = {} + add_volumes_update = [] + remove_volumes_update = [] + return group_model_update, add_volumes_update, remove_volumes_update + + def update_group(self, context, group, + add_volumes=None, remove_volumes=None): + """Updates a group. + + :param context: the context of the caller. + :param group: the group object. + :param add_volumes: a list of volume objects to be added. + :param remove_volumes: a list of volume objects to be removed. + :returns: model_update, add_volumes_update, remove_volumes_update + """ + return self.update_consistencygroup(context, group, add_volumes, + remove_volumes) + + def create_cgsnapshot(self, context, cgsnapshot, snapshots): + """Creates a consistency group snapshot. + + :param context: the context of the caller. + :param cgsnapshot: the dictionary of the cgsnapshot to be created. + :param snapshots: a list of snapshot dictionaries in the cgsnapshot. + :returns: group_model_update, snapshots_model_update + """ + group_model_update = {} + snapshots_model_update = [] + cgsnapshot_name = self.group_snapshot_template % cgsnapshot['id'] + cgsnapshot_path = '%s@%s' % (self.root_path, cgsnapshot_name) + create_payload = {'path': cgsnapshot_path, 'recursive': True} + self.nef.snapshots.create(create_payload) + for snapshot in snapshots: + volume_name = snapshot['volume_name'] + volume_path = posixpath.join(self.root_path, volume_name) + snapshot_name = snapshot['name'] + snapshot_path = '%s@%s' % (volume_path, cgsnapshot_name) + rename_payload = {'newName': snapshot_name} + self.nef.snapshots.rename(snapshot_path, rename_payload) + delete_payload = {'defer': True, 'recursive': True} + self.nef.snapshots.delete(cgsnapshot_path, delete_payload) + return group_model_update, snapshots_model_update + + def create_group_snapshot(self, context, group_snapshot, snapshots): + """Creates a group_snapshot. + + :param context: the context of the caller. + :param group_snapshot: the GroupSnapshot object to be created. + :param snapshots: a list of Snapshot objects in the group_snapshot. + :returns: model_update, snapshots_model_update + """ + return self.create_cgsnapshot(context, group_snapshot, snapshots) + + def delete_cgsnapshot(self, context, cgsnapshot, snapshots): + """Deletes a consistency group snapshot. + + :param context: the context of the caller. + :param cgsnapshot: the dictionary of the cgsnapshot to be created. + :param snapshots: a list of snapshot dictionaries in the cgsnapshot. + :returns: group_model_update, snapshots_model_update + """ + group_model_update = {} + snapshots_model_update = [] + for snapshot in snapshots: + self.delete_snapshot(snapshot) + return group_model_update, snapshots_model_update + + def delete_group_snapshot(self, context, group_snapshot, snapshots): + """Deletes a group_snapshot. + + :param context: the context of the caller. + :param group_snapshot: the GroupSnapshot object to be deleted. + :param snapshots: a list of snapshot objects in the group_snapshot. + :returns: model_update, snapshots_model_update + """ + return self.delete_cgsnapshot(context, group_snapshot, snapshots) + + def create_consistencygroup_from_src(self, context, group, volumes, + cgsnapshot=None, snapshots=None, + source_cg=None, source_vols=None): + """Creates a consistency group from source. + + :param context: the context of the caller. + :param group: the dictionary of the consistency group to be created. + :param volumes: a list of volume dictionaries in the group. + :param cgsnapshot: the dictionary of the cgsnapshot as source. + :param snapshots: a list of snapshot dictionaries in the cgsnapshot. + :param source_cg: the dictionary of a consistency group as source. + :param source_vols: a list of volume dictionaries in the source_cg. + :returns: group_model_update, volumes_model_update + """ + group_model_update = {} + volumes_model_update = [] + if cgsnapshot and snapshots: + for volume, snapshot in zip(volumes, snapshots): + self.create_volume_from_snapshot(volume, snapshot) + elif source_cg and source_vols: + snapshot_name = self.origin_snapshot_template % group['id'] + snapshot_path = '%s@%s' % (self.root_path, snapshot_name) + create_payload = {'path': snapshot_path, 'recursive': True} + self.nef.snapshots.create(create_payload) + for volume, source_vol in zip(volumes, source_vols): + snapshot = { + 'name': snapshot_name, + 'volume_id': source_vol['id'], + 'volume_name': source_vol['name'], + 'volume_size': source_vol['size'] + } + self.create_volume_from_snapshot(volume, snapshot) + delete_payload = {'defer': True, 'recursive': True} + self.nef.snapshots.delete(snapshot_path, delete_payload) + return group_model_update, volumes_model_update + + def create_group_from_src(self, context, group, volumes, + group_snapshot=None, snapshots=None, + source_group=None, source_vols=None): + """Creates a group from source. + + :param context: the context of the caller. + :param group: the Group object to be created. + :param volumes: a list of Volume objects in the group. + :param group_snapshot: the GroupSnapshot object as source. + :param snapshots: a list of snapshot objects in group_snapshot. + :param source_group: the Group object as source. + :param source_vols: a list of volume objects in the source_group. + :returns: model_update, volumes_model_update + """ + return self.create_consistencygroup_from_src(context, group, volumes, + group_snapshot, snapshots, + source_group, source_vols) + + def _get_existing_volume(self, existing_ref): + types = { + 'source-name': 'name', + 'source-guid': 'guid' } - self.nef.post(url, data) + if not any(key in types for key in existing_ref): + keys = ', '.join(types.keys()) + message = (_('Manage existing volume failed ' + 'due to invalid backend reference. ' + 'Volume reference must contain ' + 'at least one valid key: %(keys)s') + % {'keys': keys}) + raise jsonrpc.NefException(code='EINVAL', message=message) + payload = { + 'parent': self.root_path, + 'fields': 'name,path,volumeSize' + } + for key, value in types.items(): + if key in existing_ref: + payload[value] = existing_ref[key] + existing_volumes = self.nef.volumes.list(payload) + if len(existing_volumes) == 1: + volume_path = existing_volumes[0]['path'] + volume_name = existing_volumes[0]['name'] + volume_size = existing_volumes[0]['volumeSize'] // units.Gi + existing_volume = { + 'name': volume_name, + 'path': volume_path, + 'size': volume_size + } + vid = utils.extract_id_from_volume_name(volume_name) + if utils.check_already_managed_volume(vid): + message = (_('Volume %(name)s already managed') + % {'name': volume_name}) + raise jsonrpc.NefException(code='EBUSY', message=message) + return existing_volume + elif not existing_volumes: + code = 'ENOENT' + reason = _('no matching volumes were found') + else: + code = 'EINVAL' + reason = _('too many volumes were found') + message = (_('Unable to manage existing volume by ' + 'reference %(reference)s: %(reason)s') + % {'reference': existing_ref, 'reason': reason}) + raise jsonrpc.NefException(code=code, message=message) + + def _check_already_managed_snapshot(self, snapshot_id): + """Check cinder database for already managed snapshot. + + :param snapshot_id: snapshot id parameter + :returns: return True, if database entry with specified + snapshot id exists, otherwise return False + """ + if not isinstance(snapshot_id, six.string_types): + return False + try: + uuid.UUID(snapshot_id, version=4) + except ValueError: + return False + ctxt = context.get_admin_context() + return objects.Snapshot.exists(ctxt, snapshot_id) + + def _get_existing_snapshot(self, snapshot, existing_ref): + types = { + 'source-name': 'name', + 'source-guid': 'guid' + } + if not any(key in types for key in existing_ref): + keys = ', '.join(types.keys()) + message = (_('Manage existing snapshot failed ' + 'due to invalid backend reference. ' + 'Snapshot reference must contain ' + 'at least one valid key: %(keys)s') + % {'keys': keys}) + raise jsonrpc.NefException(code='EINVAL', message=message) + volume_name = snapshot['volume_name'] + volume_size = snapshot['volume_size'] + volume = {'name': volume_name} + volume_path = self._get_volume_path(volume) + payload = { + 'parent': volume_path, + 'fields': 'name,path', + 'recursive': False + } + for key, value in types.items(): + if key in existing_ref: + payload[value] = existing_ref[key] + existing_snapshots = self.nef.snapshots.list(payload) + if len(existing_snapshots) == 1: + name = existing_snapshots[0]['name'] + path = existing_snapshots[0]['path'] + existing_snapshot = { + 'name': name, + 'path': path, + 'volume_name': volume_name, + 'volume_size': volume_size + } + sid = utils.extract_id_from_snapshot_name(name) + if self._check_already_managed_snapshot(sid): + message = (_('Snapshot %(name)s already managed') + % {'name': name}) + raise jsonrpc.NefException(code='EBUSY', message=message) + return existing_snapshot + elif not existing_snapshots: + code = 'ENOENT' + reason = _('no matching snapshots were found') + else: + code = 'EINVAL' + reason = _('too many snapshots were found') + message = (_('Unable to manage existing snapshot by ' + 'reference %(reference)s: %(reason)s') + % {'reference': existing_ref, 'reason': reason}) + raise jsonrpc.NefException(code=code, message=message) + + @coordination.synchronized('{self.nef.lock}') + def manage_existing(self, volume, existing_ref): + """Brings an existing backend storage object under Cinder management. + + existing_ref is passed straight through from the API request's + manage_existing_ref value, and it is up to the driver how this should + be interpreted. It should be sufficient to identify a storage object + that the driver should somehow associate with the newly-created cinder + volume structure. + + There are two ways to do this: + + 1. Rename the backend storage object so that it matches the, + volume['name'] which is how drivers traditionally map between a + cinder volume and the associated backend storage object. + + 2. Place some metadata on the volume, or somewhere in the backend, that + allows other driver requests (e.g. delete, clone, attach, detach...) + to locate the backend storage object when required. + + If the existing_ref doesn't make sense, or doesn't refer to an existing + backend storage object, raise a ManageExistingInvalidReference + exception. + + The volume may have a volume_type, and the driver can inspect that and + compare against the properties of the referenced backend storage + object. If they are incompatible, raise a + ManageExistingVolumeTypeMismatch, specifying a reason for the failure. + + :param volume: Cinder volume to manage + :param existing_ref: Driver-specific information used to identify a + volume + """ + existing_volume = self._get_existing_volume(existing_ref) + existing_volume_path = existing_volume['path'] + payload = {'volume': existing_volume_path} + mappings = self.nef.mappings.list(payload) + if mappings: + message = (_('Failed to manage existing volume %(path)s ' + 'due to existing LUN mappings: %(mappings)s') + % {'path': existing_volume_path, + 'mappings': mappings}) + raise jsonrpc.NefException(code='EEXIST', message=message) + if existing_volume['name'] != volume['name']: + volume_path = self._get_volume_path(volume) + payload = {'newPath': volume_path} + self.nef.volumes.rename(existing_volume_path, payload) + + def manage_existing_get_size(self, volume, existing_ref): + """Return size of volume to be managed by manage_existing. + + When calculating the size, round up to the next GB. + + :param volume: Cinder volume to manage + :param existing_ref: Driver-specific information used to identify a + volume + :returns size: Volume size in GiB (integer) + """ + existing_volume = self._get_existing_volume(existing_ref) + return existing_volume['size'] + + def get_manageable_volumes(self, cinder_volumes, marker, limit, offset, + sort_keys, sort_dirs): + """List volumes on the backend available for management by Cinder. + + Returns a list of dictionaries, each specifying a volume in the host, + with the following keys: + - reference (dictionary): The reference for a volume, which can be + passed to "manage_existing". + - size (int): The size of the volume according to the storage + backend, rounded up to the nearest GB. + - safe_to_manage (boolean): Whether or not this volume is safe to + manage according to the storage backend. For example, is the volume + in use or invalid for any reason. + - reason_not_safe (string): If safe_to_manage is False, the reason why. + - cinder_id (string): If already managed, provide the Cinder ID. + - extra_info (string): Any extra information to return to the user + + :param cinder_volumes: A list of volumes in this host that Cinder + currently manages, used to determine if + a volume is manageable or not. + :param marker: The last item of the previous page; we return the + next results after this value (after sorting) + :param limit: Maximum number of items to return + :param offset: Number of items to skip after marker + :param sort_keys: List of keys to sort results by (valid keys are + 'identifier' and 'size') + :param sort_dirs: List of directions to sort by, corresponding to + sort_keys (valid directions are 'asc' and 'desc') + """ + manageable_volumes = [] + cinder_volume_names = {} + for cinder_volume in cinder_volumes: + key = cinder_volume['name'] + value = cinder_volume['id'] + cinder_volume_names[key] = value + payload = { + 'parent': self.root_path, + 'fields': 'name,guid,path,volumeSize', + 'recursive': False + } + volumes = self.nef.volumes.list(payload) + for volume in volumes: + safe_to_manage = True + reason_not_safe = None + cinder_id = None + extra_info = None + path = volume['path'] + guid = volume['guid'] + size = volume['volumeSize'] // units.Gi + name = volume['name'] + if name in cinder_volume_names: + cinder_id = cinder_volume_names[name] + safe_to_manage = False + reason_not_safe = _('Volume already managed') + else: + payload = { + 'volume': path, + 'fields': 'hostGroup' + } + mappings = self.nef.mappings.list(payload) + members = [] + for mapping in mappings: + hostgroup = mapping['hostGroup'] + if hostgroup == options.DEFAULT_HOST_GROUP: + members.append(hostgroup) + else: + group = self.nef.hostgroups.get(hostgroup) + members += group['members'] + if members: + safe_to_manage = False + hosts = ', '.join(members) + reason_not_safe = (_('Volume is connected ' + 'to host(s) %(hosts)s') + % {'hosts': hosts}) + reference = { + 'source-name': name, + 'source-guid': guid + } + manageable_volumes.append({ + 'reference': reference, + 'size': size, + 'safe_to_manage': safe_to_manage, + 'reason_not_safe': reason_not_safe, + 'cinder_id': cinder_id, + 'extra_info': extra_info + }) + return utils.paginate_entries_list(manageable_volumes, + marker, limit, offset, + sort_keys, sort_dirs) + + def unmanage(self, volume): + """Removes the specified volume from Cinder management. + + Does not delete the underlying backend storage object. + + For most drivers, this will not need to do anything. However, some + drivers might use this call as an opportunity to clean up any + Cinder-specific configuration that they have associated with the + backend storage object. + + :param volume: Cinder volume to unmanage + """ + pass + + @coordination.synchronized('{self.nef.lock}') + def manage_existing_snapshot(self, snapshot, existing_ref): + """Brings an existing backend storage object under Cinder management. + + existing_ref is passed straight through from the API request's + manage_existing_ref value, and it is up to the driver how this should + be interpreted. It should be sufficient to identify a storage object + that the driver should somehow associate with the newly-created cinder + snapshot structure. + + There are two ways to do this: + + 1. Rename the backend storage object so that it matches the + snapshot['name'] which is how drivers traditionally map between a + cinder snapshot and the associated backend storage object. + + 2. Place some metadata on the snapshot, or somewhere in the backend, + that allows other driver requests (e.g. delete) to locate the + backend storage object when required. + + If the existing_ref doesn't make sense, or doesn't refer to an existing + backend storage object, raise a ManageExistingInvalidReference + exception. + + :param snapshot: Cinder volume snapshot to manage + :param existing_ref: Driver-specific information used to identify a + volume snapshot + """ + existing_snapshot = self._get_existing_snapshot(snapshot, existing_ref) + existing_snapshot_path = existing_snapshot['path'] + if existing_snapshot['name'] != snapshot['name']: + payload = {'newName': snapshot['name']} + self.nef.snapshots.rename(existing_snapshot_path, payload) + + def manage_existing_snapshot_get_size(self, snapshot, existing_ref): + """Return size of snapshot to be managed by manage_existing. + + When calculating the size, round up to the next GB. + + :param snapshot: Cinder volume snapshot to manage + :param existing_ref: Driver-specific information used to identify a + volume snapshot + :returns size: Volume snapshot size in GiB (integer) + """ + existing_snapshot = self._get_existing_snapshot(snapshot, existing_ref) + return existing_snapshot['volume_size'] + + def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset, + sort_keys, sort_dirs): + """List snapshots on the backend available for management by Cinder. + + Returns a list of dictionaries, each specifying a snapshot in the host, + with the following keys: + - reference (dictionary): The reference for a snapshot, which can be + passed to "manage_existing_snapshot". + - size (int): The size of the snapshot according to the storage + backend, rounded up to the nearest GB. + - safe_to_manage (boolean): Whether or not this snapshot is safe to + manage according to the storage backend. For example, is the snapshot + in use or invalid for any reason. + - reason_not_safe (string): If safe_to_manage is False, the reason why. + - cinder_id (string): If already managed, provide the Cinder ID. + - extra_info (string): Any extra information to return to the user + - source_reference (string): Similar to "reference", but for the + snapshot's source volume. + + :param cinder_snapshots: A list of snapshots in this host that Cinder + currently manages, used to determine if + a snapshot is manageable or not. + :param marker: The last item of the previous page; we return the + next results after this value (after sorting) + :param limit: Maximum number of items to return + :param offset: Number of items to skip after marker + :param sort_keys: List of keys to sort results by (valid keys are + 'identifier' and 'size') + :param sort_dirs: List of directions to sort by, corresponding to + sort_keys (valid directions are 'asc' and 'desc') + + """ + manageable_snapshots = [] + cinder_volume_names = {} + cinder_snapshot_names = {} + ctxt = context.get_admin_context() + cinder_volumes = objects.VolumeList.get_all_by_host(ctxt, self.host) + for cinder_volume in cinder_volumes: + key = self._get_volume_path(cinder_volume) + value = { + 'name': cinder_volume['name'], + 'size': cinder_volume['size'] + } + cinder_volume_names[key] = value + for cinder_snapshot in cinder_snapshots: + key = cinder_snapshot['name'] + value = { + 'id': cinder_snapshot['id'], + 'size': cinder_snapshot['volume_size'], + 'parent': cinder_snapshot['volume_name'] + } + cinder_snapshot_names[key] = value + payload = { + 'parent': self.root_path, + 'fields': 'name,guid,path,parent,hprService,snaplistId', + 'recursive': True + } + snapshots = self.nef.snapshots.list(payload) + for snapshot in snapshots: + safe_to_manage = True + reason_not_safe = None + cinder_id = None + extra_info = None + name = snapshot['name'] + guid = snapshot['guid'] + path = snapshot['path'] + parent = snapshot['parent'] + if parent not in cinder_volume_names: + LOG.debug('Skip snapshot %(path)s: parent ' + 'volume %(parent)s is unmanaged', + {'path': path, 'parent': parent}) + continue + if name.startswith(self.origin_snapshot_template): + LOG.debug('Skip temporary origin snapshot %(path)s', + {'path': path}) + continue + if name.startswith(self.group_snapshot_template): + LOG.debug('Skip temporary group snapshot %(path)s', + {'path': path}) + continue + if snapshot['hprService'] or snapshot['snaplistId']: + LOG.debug('Skip HPR/snapping snapshot %(path)s', + {'path': path}) + continue + if name in cinder_snapshot_names: + size = cinder_snapshot_names[name]['size'] + cinder_id = cinder_snapshot_names[name]['id'] + safe_to_manage = False + reason_not_safe = _('Snapshot already managed') + else: + size = cinder_volume_names[parent]['size'] + payload = {'fields': 'clones'} + props = self.nef.snapshots.get(path) + clones = props['clones'] + unmanaged_clones = [] + for clone in clones: + if clone not in cinder_volume_names: + unmanaged_clones.append(clone) + if unmanaged_clones: + safe_to_manage = False + dependent_clones = ', '.join(unmanaged_clones) + reason_not_safe = (_('Snapshot has unmanaged ' + 'dependent clone(s) %(clones)s') + % {'clones': dependent_clones}) + reference = { + 'source-name': name, + 'source-guid': guid + } + source_reference = { + 'name': cinder_volume_names[parent]['name'] + } + manageable_snapshots.append({ + 'reference': reference, + 'size': size, + 'safe_to_manage': safe_to_manage, + 'reason_not_safe': reason_not_safe, + 'cinder_id': cinder_id, + 'extra_info': extra_info, + 'source_reference': source_reference + }) + return utils.paginate_entries_list(manageable_snapshots, + marker, limit, offset, + sort_keys, sort_dirs) + + def unmanage_snapshot(self, snapshot): + """Removes the specified snapshot from Cinder management. + + Does not delete the underlying backend storage object. + + For most drivers, this will not need to do anything. However, some + drivers might use this call as an opportunity to clean up any + Cinder-specific configuration that they have associated with the + backend storage object. + + :param snapshot: Cinder volume snapshot to unmanage + """ + pass diff --git a/cinder/volume/drivers/nexenta/ns5/jsonrpc.py b/cinder/volume/drivers/nexenta/ns5/jsonrpc.py index 8fba11fed4d..6b632288d53 100644 --- a/cinder/volume/drivers/nexenta/ns5/jsonrpc.py +++ b/cinder/volume/drivers/nexenta/ns5/jsonrpc.py @@ -1,4 +1,4 @@ -# Copyright 2016 Nexenta Systems, Inc. +# Copyright 2019 Nexenta Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -13,188 +13,604 @@ # License for the specific language governing permissions and limitations # under the License. -import requests -import time +import hashlib +import json +import posixpath +from eventlet import greenthread from oslo_log import log as logging -from oslo_serialization import jsonutils +import requests +import six from cinder import exception from cinder.i18n import _ -from cinder.utils import retry LOG = logging.getLogger(__name__) -TIMEOUT = 60 -def check_error(response): - code = response.status_code - if code not in (200, 201, 202): - reason = response.reason - body = response.content - try: - content = jsonutils.loads(body) if body else None - except ValueError: - raise exception.VolumeBackendAPIException( - data=_( - 'Could not parse response: %(code)s %(reason)s ' - '%(content)s') % { - 'code': code, 'reason': reason, 'content': body}) - if content and 'code' in content: - raise exception.NexentaException(content) - raise exception.VolumeBackendAPIException( - data=_( - 'Got bad response: %(code)s %(reason)s %(content)s') % { - 'code': code, 'reason': reason, 'content': content}) +class NefException(exception.VolumeDriverException): + def __init__(self, data=None, **kwargs): + defaults = { + 'name': 'NexentaError', + 'code': 'EBADMSG', + 'source': 'CinderDriver', + 'message': 'Unknown error' + } + if isinstance(data, dict): + for key in defaults: + if key in kwargs: + continue + if key in data: + kwargs[key] = data[key] + else: + kwargs[key] = defaults[key] + elif isinstance(data, six.string_types): + if 'message' not in kwargs: + kwargs['message'] = data + for key in defaults: + if key not in kwargs: + kwargs[key] = defaults[key] + message = (_('%(message)s (source: %(source)s, ' + 'name: %(name)s, code: %(code)s)') + % kwargs) + self.code = kwargs['code'] + del kwargs['message'] + super(NefException, self).__init__(message, **kwargs) -class RESTCaller(object): - - retry_exc_tuple = ( - requests.exceptions.ConnectionError, - requests.exceptions.ConnectTimeout - ) - +class NefRequest(object): def __init__(self, proxy, method): - self.__proxy = proxy - self.__method = method + self.proxy = proxy + self.method = method + self.path = None + self.lock = False + self.time = 0 + self.data = [] + self.payload = {} + self.stat = {} + self.hooks = { + 'response': self.hook + } + self.kwargs = { + 'hooks': self.hooks, + 'timeout': self.proxy.timeout + } - def get_full_url(self, path): - return '/'.join((self.__proxy.url, path)) - - @retry(retry_exc_tuple, interval=1, retries=6) - def __call__(self, *args): - url = self.get_full_url(args[0]) - kwargs = {'timeout': TIMEOUT, 'verify': False} - data = None - if len(args) > 1: - data = args[1] - kwargs['json'] = data - - LOG.debug('Sending JSON data: %s, method: %s, data: %s', - url, self.__method, data) - - response = getattr(self.__proxy.session, self.__method)(url, **kwargs) - check_error(response) - content = (jsonutils.loads(response.content) - if response.content else None) - LOG.debug("Got response: %(code)s %(reason)s %(content)s", { - 'code': response.status_code, - 'reason': response.reason, - 'content': content}) - - if response.status_code == 202 and content: - url = self.get_full_url(content['links'][0]['href']) - keep_going = True - while keep_going: - time.sleep(1) - response = self.__proxy.session.get(url, verify=False) - check_error(response) - LOG.debug("Got response: %(code)s %(reason)s", { - 'code': response.status_code, - 'reason': response.reason}) - content = response.json() if response.content else None - keep_going = response.status_code == 202 + def __call__(self, path, payload=None): + LOG.debug('NEF request start: %(method)s %(path)s %(payload)s', + {'method': self.method, 'path': path, 'payload': payload}) + if self.method not in ['get', 'delete', 'put', 'post']: + message = (_('NEF API does not support %(method)s method') + % {'method': self.method}) + raise NefException(code='EINVAL', message=message) + if not path: + message = _('NEF API call requires collection path') + raise NefException(code='EINVAL', message=message) + self.path = path + if payload: + if not isinstance(payload, dict): + message = _('NEF API call payload must be a dictionary') + raise NefException(code='EINVAL', message=message) + if self.method in ['get', 'delete']: + self.payload = {'params': payload} + elif self.method in ['put', 'post']: + self.payload = {'data': json.dumps(payload)} + try: + response = self.request(self.method, self.path, **self.payload) + except (requests.exceptions.ConnectionError, + requests.exceptions.Timeout) as error: + LOG.debug('Failed to %(method)s %(path)s %(payload)s: %(error)s', + {'method': self.method, 'path': self.path, + 'payload': self.payload, 'error': error}) + if not self.failover(): + raise + LOG.debug('Retry initial request after failover: ' + '%(method)s %(path)s %(payload)s', + {'method': self.method, + 'path': self.path, + 'payload': self.payload}) + response = self.request(self.method, self.path, **self.payload) + LOG.debug('NEF request done: %(method)s %(path)s %(payload)s, ' + 'total response time: %(time)s seconds, ' + 'total requests count: %(count)s, ' + 'requests statistics: %(stat)s', + {'method': self.method, + 'path': self.path, + 'payload': self.payload, + 'time': self.time, + 'count': sum(self.stat.values()), + 'stat': self.stat}) + if response.ok and not response.content: + return None + content = json.loads(response.content) + if not response.ok: + raise NefException(content) + if isinstance(content, dict) and 'data' in content: + return self.data return content + def request(self, method, path, **kwargs): + url = self.proxy.url(path) + LOG.debug('Perform session request: %(method)s %(url)s %(body)s', + {'method': method, 'url': url, 'body': kwargs}) + kwargs.update(self.kwargs) + return self.proxy.session.request(method, url, **kwargs) -class HTTPSAuth(requests.auth.AuthBase): + def hook(self, response, **kwargs): + initial_text = (_('initial request %(method)s %(path)s %(body)s') + % {'method': self.method, + 'path': self.path, + 'body': self.payload}) + request_text = (_('session request %(method)s %(url)s %(body)s') + % {'method': response.request.method, + 'url': response.request.url, + 'body': response.request.body}) + response_text = (_('session response %(code)s %(content)s') + % {'code': response.status_code, + 'content': response.content}) + text = (_('%(request_text)s and %(response_text)s') + % {'request_text': request_text, + 'response_text': response_text}) + LOG.debug('Hook start on %(text)s', {'text': text}) - def __init__(self, url, username, password): - self.url = url - self.username = username - self.password = password - self.token = None + if response.status_code not in self.stat: + self.stat[response.status_code] = 0 + self.stat[response.status_code] += 1 + self.time += response.elapsed.total_seconds() - def __eq__(self, other): - return all([ - self.url == getattr(other, 'url', None), - self.username == getattr(other, 'username', None), - self.password == getattr(other, 'password', None), - self.token == getattr(other, 'token', None) - ]) + if response.ok and not response.content: + LOG.debug('Hook done on %(text)s: ' + 'empty response content', + {'text': text}) + return response - def __ne__(self, other): - return not self == other + if not response.content: + message = (_('There is no response content ' + 'is available for %(text)s') + % {'text': text}) + raise NefException(code='ENODATA', message=message) - def handle_401(self, r, **kwargs): - if r.status_code == 401: - LOG.debug('Got 401. Trying to reauth...') - self.token = self.https_auth() - # Consume content and release the original connection - # to allow our new request to reuse the same one. - r.content - r.close() - prep = r.request.copy() - requests.cookies.extract_cookies_to_jar( - prep._cookies, r.request, r.raw) - prep.prepare_cookies(prep._cookies) + try: + content = json.loads(response.content) + except (TypeError, ValueError) as error: + message = (_('Failed to decode JSON for %(text)s: %(error)s') + % {'text': text, 'error': error}) + raise NefException(code='ENOMSG', message=message) - prep.headers['Authorization'] = 'Bearer %s' % self.token - _r = r.connection.send(prep, **kwargs) - _r.history.append(r) - _r.request = prep + method = 'get' + if response.status_code == requests.codes.unauthorized: + if self.stat[response.status_code] > self.proxy.retries: + raise NefException(content) + self.auth() + LOG.debug('Retry %(text)s after authentication', + {'text': request_text}) + request = response.request.copy() + request.headers.update(self.proxy.session.headers) + return self.proxy.session.send(request, **kwargs) + elif response.status_code == requests.codes.not_found: + if self.lock: + LOG.debug('Hook done on %(text)s: ' + 'nested failover is detected', + {'text': text}) + return response + if self.stat[response.status_code] > self.proxy.retries: + raise NefException(content) + if not self.failover(): + LOG.debug('Hook done on %(text)s: ' + 'no valid hosts found', + {'text': text}) + return response + LOG.debug('Retry %(text)s after failover', + {'text': initial_text}) + self.data = [] + return self.request(self.method, self.path, **self.payload) + elif response.status_code == requests.codes.server_error: + if not (isinstance(content, dict) and + 'code' in content and + content['code'] == 'EBUSY'): + raise NefException(content) + if self.stat[response.status_code] > self.proxy.retries: + raise NefException(content) + self.proxy.delay(self.stat[response.status_code]) + LOG.debug('Retry %(text)s after delay', + {'text': initial_text}) + self.data = [] + return self.request(self.method, self.path, **self.payload) + elif response.status_code == requests.codes.accepted: + path = self.getpath(content, 'monitor') + if not path: + message = (_('There is no monitor path ' + 'available for %(text)s') + % {'text': text}) + raise NefException(code='ENOMSG', message=message) + self.proxy.delay(self.stat[response.status_code]) + return self.request(method, path) + elif response.status_code == requests.codes.ok: + if not (isinstance(content, dict) and 'data' in content): + LOG.debug('Hook done on %(text)s: there ' + 'is no JSON data available', + {'text': text}) + return response + LOG.debug('Append %(count)s data items to response', + {'count': len(content['data'])}) + self.data += content['data'] + path = self.getpath(content, 'next') + if not path: + LOG.debug('Hook done on %(text)s: there ' + 'is no next path available', + {'text': text}) + return response + LOG.debug('Perform next session request %(method)s %(path)s', + {'method': method, 'path': path}) + return self.request(method, path) + LOG.debug('Hook done on %(text)s and ' + 'returned original response', + {'text': text}) + return response - return _r - return r + def auth(self): + method = 'post' + path = 'auth/login' + payload = { + 'username': self.proxy.username, + 'password': self.proxy.password + } + data = json.dumps(payload) + kwargs = {'data': data} + self.proxy.delete_bearer() + response = self.request(method, path, **kwargs) + content = json.loads(response.content) + if not (isinstance(content, dict) and 'token' in content): + message = (_('There is no authentication token available ' + 'for authentication request %(method)s %(url)s ' + '%(body)s and response %(code)s %(content)s') + % {'method': response.request.method, + 'url': response.request.url, + 'body': response.request.body, + 'code': response.status_code, + 'content': response.content}) + raise NefException(code='ENODATA', message=message) + token = content['token'] + self.proxy.update_token(token) - def __call__(self, r): - if not self.token: - self.token = self.https_auth() - r.headers['Authorization'] = 'Bearer %s' % self.token - r.register_hook('response', self.handle_401) - return r + def failover(self): + result = False + self.lock = True + method = 'get' + root = self.proxy.root + for host in self.proxy.hosts: + self.proxy.update_host(host) + LOG.debug('Try to failover path ' + '%(root)s to host %(host)s', + {'root': root, 'host': host}) + try: + response = self.request(method, root) + except (requests.exceptions.ConnectionError, + requests.exceptions.Timeout) as error: + LOG.debug('Skip unavailable host %(host)s ' + 'due to error: %(error)s', + {'host': host, 'error': error}) + continue + LOG.debug('Failover result: %(code)s %(content)s', + {'code': response.status_code, + 'content': response.content}) + if response.status_code == requests.codes.ok: + LOG.debug('Successful failover path ' + '%(root)s to host %(host)s', + {'root': root, 'host': host}) + self.proxy.update_lock() + result = True + break + else: + LOG.debug('Skip unsuitable host %(host)s: ' + 'there is no %(root)s path found', + {'host': host, 'root': root}) + self.lock = False + return result - def https_auth(self): - LOG.debug('Sending auth request...') - url = '/'.join((self.url, 'auth/login')) - headers = {'Content-Type': 'application/json'} - data = {'username': self.username, 'password': self.password} - response = requests.post(url, json=data, verify=False, - headers=headers, timeout=TIMEOUT) - check_error(response) - response.close() - if response.content: - content = jsonutils.loads(response.content) - token = content['token'] - del content['token'] - LOG.debug("Got response: %(code)s %(reason)s %(content)s", { - 'code': response.status_code, - 'reason': response.reason, - 'content': content}) - return token - raise exception.VolumeBackendAPIException( - data=_( - 'Got bad response: %(code)s %(reason)s') % { - 'code': response.status_code, 'reason': response.reason}) + @staticmethod + def getpath(content, name): + if isinstance(content, dict) and 'links' in content: + for link in content['links']: + if not isinstance(link, dict): + continue + if 'rel' in link and 'href' in link: + if link['rel'] == name: + return link['href'] + return None -class NexentaJSONProxy(object): +class NefCollections(object): + subj = 'collection' + root = '/collections' - def __init__(self, host, port, user, password, use_https): + def __init__(self, proxy): + self.proxy = proxy + + def path(self, name): + quoted_name = six.moves.urllib.parse.quote_plus(name) + return posixpath.join(self.root, quoted_name) + + def get(self, name, payload=None): + LOG.debug('Get properties of %(subj)s %(name)s: %(payload)s', + {'subj': self.subj, 'name': name, 'payload': payload}) + path = self.path(name) + return self.proxy.get(path, payload) + + def set(self, name, payload=None): + LOG.debug('Modify properties of %(subj)s %(name)s: %(payload)s', + {'subj': self.subj, 'name': name, 'payload': payload}) + path = self.path(name) + return self.proxy.put(path, payload) + + def list(self, payload=None): + LOG.debug('List of %(subj)ss: %(payload)s', + {'subj': self.subj, 'payload': payload}) + return self.proxy.get(self.root, payload) + + def create(self, payload=None): + LOG.debug('Create %(subj)s: %(payload)s', + {'subj': self.subj, 'payload': payload}) + try: + return self.proxy.post(self.root, payload) + except NefException as error: + if error.code != 'EEXIST': + raise + + def delete(self, name, payload=None): + LOG.debug('Delete %(subj)s %(name)s: %(payload)s', + {'subj': self.subj, 'name': name, 'payload': payload}) + path = self.path(name) + try: + return self.proxy.delete(path, payload) + except NefException as error: + if error.code != 'ENOENT': + raise + + +class NefSettings(NefCollections): + subj = 'setting' + root = '/settings/properties' + + def create(self, payload=None): + return NotImplemented + + def delete(self, name, payload=None): + return NotImplemented + + +class NefDatasets(NefCollections): + subj = 'dataset' + root = '/storage/datasets' + + def rename(self, name, payload=None): + LOG.debug('Rename %(subj)s %(name)s: %(payload)s', + {'subj': self.subj, 'name': name, 'payload': payload}) + path = posixpath.join(self.path(name), 'rename') + return self.proxy.post(path, payload) + + +class NefSnapshots(NefDatasets, NefCollections): + subj = 'snapshot' + root = '/storage/snapshots' + + def clone(self, name, payload=None): + LOG.debug('Clone %(subj)s %(name)s: %(payload)s', + {'subj': self.subj, 'name': name, 'payload': payload}) + path = posixpath.join(self.path(name), 'clone') + return self.proxy.post(path, payload) + + +class NefVolumeGroups(NefDatasets, NefCollections): + subj = 'volume group' + root = 'storage/volumeGroups' + + def rollback(self, name, payload=None): + LOG.debug('Rollback %(subj)s %(name)s: %(payload)s', + {'subj': self.subj, 'name': name, 'payload': payload}) + path = posixpath.join(self.path(name), 'rollback') + return self.proxy.post(path, payload) + + +class NefVolumes(NefVolumeGroups, NefDatasets, NefCollections): + subj = 'volume' + root = '/storage/volumes' + + def promote(self, name, payload=None): + LOG.debug('Promote %(subj)s %(name)s: %(payload)s', + {'subj': self.subj, 'name': name, 'payload': payload}) + path = posixpath.join(self.path(name), 'promote') + return self.proxy.post(path, payload) + + +class NefFilesystems(NefVolumes, NefVolumeGroups, NefDatasets, NefCollections): + subj = 'filesystem' + root = '/storage/filesystems' + + def mount(self, name, payload=None): + LOG.debug('Mount %(subj)s %(name)s: %(payload)s', + {'subj': self.subj, 'name': name, 'payload': payload}) + path = posixpath.join(self.path(name), 'mount') + return self.proxy.post(path, payload) + + def unmount(self, name, payload=None): + LOG.debug('Unmount %(subj)s %(name)s: %(payload)s', + {'subj': self.subj, 'name': name, 'payload': payload}) + path = posixpath.join(self.path(name), 'unmount') + return self.proxy.post(path, payload) + + def acl(self, name, payload=None): + LOG.debug('Set %(subj)s %(name)s ACL: %(payload)s', + {'subj': self.subj, 'name': name, 'payload': payload}) + path = posixpath.join(self.path(name), 'acl') + return self.proxy.post(path, payload) + + +class NefHpr(NefCollections): + subj = 'HPR service' + root = '/hpr' + + def activate(self, payload=None): + LOG.debug('Activate %(payload)s', + {'payload': payload}) + path = posixpath.join(self.root, 'activate') + return self.proxy.post(path, payload) + + def start(self, name, payload=None): + LOG.debug('Start %(subj)s %(name)s: %(payload)s', + {'subj': self.subj, 'name': name, 'payload': payload}) + path = posixpath.join(self.path(name), 'start') + return self.proxy.post(path, payload) + + +class NefServices(NefCollections): + subj = 'service' + root = '/services' + + +class NefNfs(NefCollections): + subj = 'NFS' + root = '/nas/nfs' + + +class NefTargets(NefCollections): + subj = 'iSCSI target' + root = '/san/iscsi/targets' + + +class NefHostGroups(NefCollections): + subj = 'host group' + root = '/san/hostgroups' + + +class NefTargetsGroups(NefCollections): + subj = 'target group' + root = '/san/targetgroups' + + +class NefLunMappings(NefCollections): + subj = 'LUN mapping' + root = '/san/lunMappings' + + +class NefLogicalUnits(NefCollections): + subj = 'LU' + root = 'san/logicalUnits' + + +class NefNetAddresses(NefCollections): + subj = 'network address' + root = '/network/addresses' + + +class NefProxy(object): + def __init__(self, proto, path, conf): self.session = requests.Session() - self.session.headers.update({'Content-Type': 'application/json'}) - self.host = host - if use_https: + self.settings = NefSettings(self) + self.filesystems = NefFilesystems(self) + self.volumegroups = NefVolumeGroups(self) + self.volumes = NefVolumes(self) + self.snapshots = NefSnapshots(self) + self.services = NefServices(self) + self.hpr = NefHpr(self) + self.nfs = NefNfs(self) + self.targets = NefTargets(self) + self.hostgroups = NefHostGroups(self) + self.targetgroups = NefTargetsGroups(self) + self.mappings = NefLunMappings(self) + self.logicalunits = NefLogicalUnits(self) + self.netaddrs = NefNetAddresses(self) + self.lock = None + self.tokens = {} + self.headers = { + 'Content-Type': 'application/json', + 'X-XSS-Protection': '1' + } + if conf.nexenta_use_https: self.scheme = 'https' - self.port = port if port else 8443 - self.session.auth = HTTPSAuth(self.url, user, password) else: self.scheme = 'http' - self.port = port if port else 8080 - self.session.auth = (user, password) - - @property - def url(self): - return '%(scheme)s://%(host)s:%(port)s' % { - 'scheme': self.scheme, - 'host': self.host, - 'port': self.port} + self.username = conf.nexenta_user + self.password = conf.nexenta_password + self.hosts = [] + if conf.nexenta_rest_address: + for host in conf.nexenta_rest_address.split(','): + self.hosts.append(host.strip()) + if proto == 'nfs': + self.root = self.filesystems.path(path) + if not self.hosts: + self.hosts.append(conf.nas_host) + elif proto == 'iscsi': + self.root = self.volumegroups.path(path) + if not self.hosts: + self.hosts.append(conf.nexenta_host) + else: + message = (_('Storage protocol %(proto)s not supported') + % {'proto': proto}) + raise NefException(code='EPROTO', message=message) + self.host = self.hosts[0] + if conf.nexenta_rest_port: + self.port = conf.nexenta_rest_port + else: + if conf.nexenta_use_https: + self.port = 8443 + else: + self.port = 8080 + self.proto = proto + self.path = path + self.backoff_factor = conf.nexenta_rest_backoff_factor + self.retries = len(self.hosts) * conf.nexenta_rest_retry_count + self.timeout = requests.packages.urllib3.util.timeout.Timeout( + connect=conf.nexenta_rest_connect_timeout, + read=conf.nexenta_rest_read_timeout) + max_retries = requests.packages.urllib3.util.retry.Retry( + total=conf.nexenta_rest_retry_count, + backoff_factor=conf.nexenta_rest_backoff_factor) + adapter = requests.adapters.HTTPAdapter(max_retries=max_retries) + self.session.verify = conf.driver_ssl_cert_verify + self.session.headers.update(self.headers) + self.session.mount('%s://' % self.scheme, adapter) + if not conf.driver_ssl_cert_verify: + requests.packages.urllib3.disable_warnings() + self.update_lock() def __getattr__(self, name): - if name in ('get', 'post', 'put', 'delete'): - return RESTCaller(self, name) - return super(NexentaJSONProxy, self).__getattribute__(name) + return NefRequest(self, name) - def __repr__(self): - return 'HTTP JSON proxy: %s' % self.url + def delete_bearer(self): + if 'Authorization' in self.session.headers: + del self.session.headers['Authorization'] + + def update_bearer(self, token): + bearer = 'Bearer %s' % token + self.session.headers['Authorization'] = bearer + + def update_token(self, token): + self.tokens[self.host] = token + self.update_bearer(token) + + def update_host(self, host): + self.host = host + if host in self.tokens: + token = self.tokens[host] + self.update_bearer(token) + + def update_lock(self): + prop = self.settings.get('system.guid') + guid = prop.get('value') + path = '%s:%s' % (guid, self.path) + if isinstance(path, six.text_type): + path = path.encode('utf-8') + self.lock = hashlib.md5(path).hexdigest() + + def url(self, path): + netloc = '%s:%d' % (self.host, int(self.port)) + components = (self.scheme, netloc, str(path), None, None) + url = six.moves.urllib.parse.urlunsplit(components) + return url + + def delay(self, attempt): + interval = int(self.backoff_factor * (2 ** (attempt - 1))) + LOG.debug('Waiting for %(interval)s seconds', + {'interval': interval}) + greenthread.sleep(interval) diff --git a/cinder/volume/drivers/nexenta/ns5/nfs.py b/cinder/volume/drivers/nexenta/ns5/nfs.py index c02a2c5c2b7..85de95aa9db 100644 --- a/cinder/volume/drivers/nexenta/ns5/nfs.py +++ b/cinder/volume/drivers/nexenta/ns5/nfs.py @@ -1,4 +1,4 @@ -# Copyright 2016 Nexenta Systems, Inc. +# Copyright 2019 Nexenta Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -13,23 +13,27 @@ # License for the specific language governing permissions and limitations # under the License. +import errno import hashlib import os +import posixpath +import uuid from oslo_log import log as logging from oslo_utils import units +import six from cinder import context -from cinder import db -from cinder import exception +from cinder import coordination from cinder.i18n import _ from cinder import interface +from cinder import objects +from cinder.privsep import fs from cinder.volume.drivers.nexenta.ns5 import jsonrpc from cinder.volume.drivers.nexenta import options -from cinder.volume.drivers.nexenta import utils from cinder.volume.drivers import nfs +from cinder.volume import utils -VERSION = '1.2.0' LOG = logging.getLogger(__name__) @@ -37,50 +41,86 @@ LOG = logging.getLogger(__name__) class NexentaNfsDriver(nfs.NfsDriver): """Executes volume driver commands on Nexenta Appliance. - .. code-block:: default + Version history: - Version history: - 1.0.0 - Initial driver version. - 1.1.0 - Added HTTPS support. - Added use of sessions for REST calls. - 1.2.0 - Support for extend volume. - Support for extending the volume in - create_volume_from_snapshot if the size of new volume - is larger than original volume size. + .. code-block:: none + 1.0.0 - Initial driver version. + 1.1.0 - Support for extend volume. + 1.2.0 - Added HTTPS support. + - Added use of sessions for REST calls. + - Added abandoned volumes and snapshots cleanup. + 1.3.0 - Failover support. + 1.4.0 - Migrate volume support and new NEF API calls. + 1.5.0 - Revert to snapshot support. + 1.6.0 - Get mountPoint from API to support old style mount points. + - Mount and umount shares on each operation to avoid mass + mounts on controller. Clean up mount folders on delete. + 1.6.1 - Fixed volume from image creation. + 1.6.2 - Removed redundant share mount from initialize_connection. + 1.6.3 - Adapted NexentaException for the latest Cinder. + 1.6.4 - Fixed volume mount/unmount. + 1.6.5 - Added driver_ssl_cert_verify for HA failover. + 1.6.6 - Destroy unused snapshots after deletion of it's last clone. + 1.6.7 - Fixed volume migration for HA environment. + 1.6.8 - Added deferred deletion for snapshots. + 1.6.9 - Fixed race between volume/clone deletion. + 1.7.0 - Added consistency group support. + 1.7.1 - Removed redundant hpr/activate call from initialize_connection. + 1.7.2 - Merged upstream changes for umount. + 1.8.0 - Refactored NFS driver. + - Added pagination support. + - Added configuration parameters for REST API connect/read + timeouts, connection retries and backoff factor. + - Fixed HA failover. + - Added retries on EBUSY errors. + - Fixed HTTP authentication. + - Disabled non-blocking mandatory locks. + - Added coordination for dataset operations. + 1.8.1 - Support for NexentaStor tenants. + 1.8.2 - Added manage/unmanage/manageable-list volume/snapshot support. + 1.8.3 - Added consistency group capability to generic volume group. """ - driver_prefix = 'nexenta' - volume_backend_name = 'NexentaNfsDriver' - VERSION = VERSION - - # ThirdPartySystems wiki page + VERSION = '1.8.3' CI_WIKI_NAME = "Nexenta_CI" + vendor_name = 'Nexenta' + product_name = 'NexentaStor5' + storage_protocol = 'NFS' + driver_volume_type = 'nfs' + def __init__(self, *args, **kwargs): super(NexentaNfsDriver, self).__init__(*args, **kwargs) - if self.configuration: - self.configuration.append_config_values( - options.NEXENTA_CONNECTION_OPTS) - self.configuration.append_config_values( - options.NEXENTA_NFS_OPTS) - self.configuration.append_config_values( - options.NEXENTA_DATASET_OPTS) - - self.nfs_mount_point_base = self.configuration.nexenta_mount_point_base - self.dataset_compression = ( + if not self.configuration: + message = (_('%(product_name)s %(storage_protocol)s ' + 'backend configuration not found') + % {'product_name': self.product_name, + 'storage_protocol': self.storage_protocol}) + raise jsonrpc.NefException(code='ENODATA', message=message) + self.configuration.append_config_values( + options.NEXENTA_CONNECTION_OPTS) + self.configuration.append_config_values( + options.NEXENTA_NFS_OPTS) + self.configuration.append_config_values( + options.NEXENTA_DATASET_OPTS) + self.nef = None + self.volume_backend_name = ( + self.configuration.safe_get('volume_backend_name') or + '%s_%s' % (self.product_name, self.storage_protocol)) + self.nas_host = self.configuration.nas_host + self.root_path = self.configuration.nas_share_path + self.sparsed_volumes = self.configuration.nexenta_sparsed_volumes + self.deduplicated_volumes = self.configuration.nexenta_dataset_dedup + self.compressed_volumes = ( self.configuration.nexenta_dataset_compression) - self.dataset_deduplication = self.configuration.nexenta_dataset_dedup self.dataset_description = ( self.configuration.nexenta_dataset_description) - self.sparsed_volumes = self.configuration.nexenta_sparsed_volumes - self.nef = None - self.use_https = self.configuration.nexenta_use_https - self.nef_host = self.configuration.nas_host - self.share = self.configuration.nas_share_path - self.nef_port = self.configuration.nexenta_rest_port - self.nef_user = self.configuration.nexenta_user - self.nef_password = self.configuration.nexenta_password + self.mount_point_base = self.configuration.nexenta_mount_point_base + self.group_snapshot_template = ( + self.configuration.nexenta_group_snapshot_template) + self.origin_snapshot_template = ( + self.configuration.nexenta_origin_snapshot_template) @staticmethod def get_driver_options(): @@ -90,155 +130,335 @@ class NexentaNfsDriver(nfs.NfsDriver): options.NEXENTA_DATASET_OPTS ) - @property - def backend_name(self): - backend_name = None - if self.configuration: - backend_name = self.configuration.safe_get('volume_backend_name') - if not backend_name: - backend_name = self.__class__.__name__ - return backend_name - def do_setup(self, context): - self.nef = jsonrpc.NexentaJSONProxy( - self.nef_host, self.nef_port, self.nef_user, - self.nef_password, self.use_https) + self.nef = jsonrpc.NefProxy(self.driver_volume_type, + self.root_path, + self.configuration) def check_for_setup_error(self): - """Verify that the volume for our folder exists. - - :raise: :py:exc:`LookupError` - """ - pool_name, fs = self._get_share_datasets(self.share) - url = 'storage/pools/%s' % pool_name - self.nef.get(url) - url = 'storage/pools/%s/filesystems/%s' % ( - pool_name, self._escape_path(fs)) - self.nef.get(url) - - shared = False - response = self.nef.get('nas/nfs') - for share in response['data']: - if share.get('filesystem') == self.share: - shared = True - break - if not shared: - raise LookupError(_("Dataset %s is not shared in Nexenta " - "Store appliance") % self.share) - - def initialize_connection(self, volume, connector): - """Allow connection to connector and return connection info. - - :param volume: volume reference - :param connector: connector reference - """ - data = {'export': volume['provider_location'], 'name': 'volume'} - if volume['provider_location'] in self.shares: - data['options'] = self.shares[volume['provider_location']] - return { - 'driver_volume_type': self.driver_volume_type, - 'data': data - } + """Check root filesystem, NFS service and NFS share.""" + filesystem = self.nef.filesystems.get(self.root_path) + if filesystem['mountPoint'] == 'none': + message = (_('NFS root filesystem %(path)s is not writable') + % {'path': filesystem['mountPoint']}) + raise jsonrpc.NefException(code='ENOENT', message=message) + if not filesystem['isMounted']: + message = (_('NFS root filesystem %(path)s is not mounted') + % {'path': filesystem['mountPoint']}) + raise jsonrpc.NefException(code='ENOTDIR', message=message) + if filesystem['nonBlockingMandatoryMode']: + payload = {'nonBlockingMandatoryMode': False} + self.nef.filesystems.set(self.root_path, payload) + service = self.nef.services.get('nfs') + if service['state'] != 'online': + message = (_('NFS server service is not online: %(state)s') + % {'state': service['state']}) + raise jsonrpc.NefException(code='ESRCH', message=message) + share = self.nef.nfs.get(self.root_path) + if share['shareState'] != 'online': + message = (_('NFS share %(share)s is not online: %(state)s') + % {'share': self.root_path, + 'state': share['shareState']}) + raise jsonrpc.NefException(code='ESRCH', message=message) def create_volume(self, volume): """Creates a volume. :param volume: volume reference - :returns: provider_location update dict for database """ - self._do_create_volume(volume) - return {'provider_location': volume['provider_location']} - - def _do_create_volume(self, volume): - pool, fs = self._get_share_datasets(self.share) - filesystem = '%s/%s/%s' % (pool, fs, volume['name']) - LOG.debug('Creating filesystem on NexentaStor %s', filesystem) - url = 'storage/pools/%s/filesystems' % pool - data = { - 'name': '/'.join([fs, volume['name']]), - 'compressionMode': self.dataset_compression, - 'dedupMode': self.dataset_deduplication, - } - self.nef.post(url, data) - volume['provider_location'] = '%s:/%s/%s' % ( - self.nef_host, self.share, volume['name']) + volume_path = self._get_volume_path(volume) + payload = {'path': volume_path, 'compressionMode': 'off'} + self.nef.filesystems.create(payload) try: - self._share_folder(fs, volume['name']) - self._ensure_share_mounted('%s:/%s/%s' % ( - self.nef_host, self.share, volume['name'])) - - volume_size = volume['size'] - if getattr(self.configuration, - self.driver_prefix + '_sparsed_volumes'): - self._create_sparsed_file(self.local_path(volume), volume_size) + self._set_volume_acl(volume) + self._mount_volume(volume) + volume_file = self.local_path(volume) + if self.sparsed_volumes: + self._create_sparsed_file(volume_file, volume['size']) else: - url = 'storage/pools/%s/filesystems/%s' % ( - pool, '%2F'.join([self._escape_path(fs), volume['name']])) - compression = self.nef.get(url).get('compressionMode') - if compression != 'off': - # Disable compression, because otherwise will not use space - # on disk. - self.nef.put(url, {'compressionMode': 'off'}) - try: - self._create_regular_file( - self.local_path(volume), volume_size) - finally: - if compression != 'off': - # Backup default compression value if it was changed. - self.nef.put(url, {'compressionMode': compression}) - - except exception.NexentaException: + self._create_regular_file(volume_file, volume['size']) + if self.compressed_volumes != 'off': + payload = {'compressionMode': self.compressed_volumes} + self.nef.filesystems.set(volume_path, payload) + except jsonrpc.NefException as create_error: try: - url = 'storage/pools/%s/filesystems/%s' % ( - pool, '%2F'.join([self._escape_path(fs), volume['name']])) - self.nef.delete(url) - except exception.NexentaException: - LOG.warning("Cannot destroy created folder: " - "%(vol)s/%(folder)s", - {'vol': pool, 'folder': '/'.join( - [fs, volume['name']])}) - raise + payload = {'force': True} + self.nef.filesystems.delete(volume_path, payload) + except jsonrpc.NefException as delete_error: + LOG.debug('Failed to delete volume %(path)s: %(error)s', + {'path': volume_path, 'error': delete_error}) + raise create_error + finally: + self._unmount_volume(volume) + def copy_image_to_volume(self, context, volume, image_service, image_id): + LOG.debug('Copy image %(image)s to volume %(volume)s', + {'image': image_id, 'volume': volume['name']}) + self._mount_volume(volume) + super(NexentaNfsDriver, self).copy_image_to_volume( + context, volume, image_service, image_id) + self._unmount_volume(volume) + + def copy_volume_to_image(self, context, volume, image_service, image_meta): + LOG.debug('Copy volume %(volume)s to image %(image)s', + {'volume': volume['name'], 'image': image_meta['id']}) + self._mount_volume(volume) + super(NexentaNfsDriver, self).copy_volume_to_image( + context, volume, image_service, image_meta) + self._unmount_volume(volume) + + def _ensure_share_unmounted(self, share): + """Ensure that NFS share is unmounted on the host. + + :param share: share path + """ + attempts = max(1, self.configuration.nfs_mount_attempts) + path = self._get_mount_point_for_share(share) + if path not in self._remotefsclient._read_mounts(): + LOG.debug('NFS share %(share)s is not mounted at %(path)s', + {'share': share, 'path': path}) + return + for attempt in range(0, attempts): + try: + fs.umount(path) + LOG.debug('NFS share %(share)s has been unmounted at %(path)s', + {'share': share, 'path': path}) + break + except Exception as error: + if attempt == (attempts - 1): + LOG.error('Failed to unmount NFS share %(share)s ' + 'after %(attempts)s attempts', + {'share': share, 'attempts': attempts}) + raise + LOG.debug('Unmount attempt %(attempt)s failed: %(error)s, ' + 'retrying unmount %(share)s from %(path)s', + {'attempt': attempt, 'error': error, + 'share': share, 'path': path}) + self.nef.delay(attempt) + self._delete(path) + + def _mount_volume(self, volume): + """Ensure that volume is activated and mounted on the host.""" + volume_path = self._get_volume_path(volume) + payload = {'fields': 'mountPoint,isMounted'} + filesystem = self.nef.filesystems.get(volume_path, payload) + if filesystem['mountPoint'] == 'none': + payload = {'datasetName': volume_path} + self.nef.hpr.activate(payload) + filesystem = self.nef.filesystems.get(volume_path) + elif not filesystem['isMounted']: + self.nef.filesystems.mount(volume_path) + share = '%s:%s' % (self.nas_host, filesystem['mountPoint']) + self._ensure_share_mounted(share) + + def _remount_volume(self, volume): + """Workaround for NEX-16457.""" + volume_path = self._get_volume_path(volume) + self.nef.filesystems.unmount(volume_path) + self.nef.filesystems.mount(volume_path) + + def _unmount_volume(self, volume): + """Ensure that volume is unmounted on the host.""" + share = self._get_volume_share(volume) + self._ensure_share_unmounted(share) + + def _create_sparsed_file(self, path, size): + """Creates file with 0 disk usage.""" + if self.configuration.nexenta_qcow2_volumes: + self._create_qcow2_file(path, size) + else: + super(NexentaNfsDriver, self)._create_sparsed_file(path, size) + + def migrate_volume(self, context, volume, host): + """Migrate if volume and host are managed by Nexenta appliance. + + :param context: context + :param volume: a dictionary describing the volume to migrate + :param host: a dictionary describing the host to migrate to + """ + LOG.debug('Migrate volume %(volume)s to host %(host)s', + {'volume': volume['name'], 'host': host}) + + false_ret = (False, None) + + if volume['status'] not in ('available', 'retyping'): + LOG.error('Volume %(volume)s status must be available or ' + 'retyping, current volume status is %(status)s', + {'volume': volume['name'], 'status': volume['status']}) + return false_ret + + if 'capabilities' not in host: + LOG.error('Unsupported host %(host)s: no capabilities found', + {'host': host}) + return false_ret + + capabilities = host['capabilities'] + + if not ('location_info' in capabilities and + 'vendor_name' in capabilities and + 'free_capacity_gb' in capabilities): + LOG.error('Unsupported host %(host)s: required NFS ' + 'and vendor capabilities are not found', + {'host': host}) + return false_ret + + driver_name = capabilities['location_info'].split(':')[0] + dst_root = capabilities['location_info'].split(':/')[1] + + if not (capabilities['vendor_name'] == 'Nexenta' and + driver_name == self.__class__.__name__): + LOG.error('Unsupported host %(host)s: incompatible ' + 'vendor %(vendor)s or driver %(driver)s', + {'host': host, + 'vendor': capabilities['vendor_name'], + 'driver': driver_name}) + return false_ret + + if capabilities['free_capacity_gb'] < volume['size']: + LOG.error('There is not enough space available on the ' + 'host %(host)s to migrate volume %(volume), ' + 'free space: %(free)d, required: %(size)d', + {'host': host, 'volume': volume['name'], + 'free': capabilities['free_capacity_gb'], + 'size': volume['size']}) + return false_ret + + src_path = self._get_volume_path(volume) + dst_path = posixpath.join(dst_root, volume['name']) + nef_ips = capabilities['nef_url'].split(',') + nef_ips.append(None) + svc = 'cinder-migrate-%s' % volume['name'] + for nef_ip in nef_ips: + payload = {'name': svc, + 'sourceDataset': src_path, + 'destinationDataset': dst_path, + 'type': 'scheduled', + 'sendShareNfs': True} + if nef_ip is not None: + payload['isSource'] = True + payload['remoteNode'] = { + 'host': nef_ip, + 'port': capabilities['nef_port'] + } + try: + self.nef.hpr.create(payload) + break + except jsonrpc.NefException as error: + if nef_ip is None or error.code not in ('EINVAL', 'ENOENT'): + LOG.error('Failed to create replication ' + 'service %(payload)s: %(error)s', + {'payload': payload, 'error': error}) + return false_ret + + try: + self.nef.hpr.start(svc) + except jsonrpc.NefException as error: + LOG.error('Failed to start replication ' + 'service %(svc)s: %(error)s', + {'svc': svc, 'error': error}) + try: + payload = {'force': True} + self.nef.hpr.delete(svc, payload) + except jsonrpc.NefException as error: + LOG.error('Failed to delete replication ' + 'service %(svc)s: %(error)s', + {'svc': svc, 'error': error}) + return false_ret + + payload = {'destroySourceSnapshots': True, + 'destroyDestinationSnapshots': True} + progress = True + retry = 0 + while progress: + retry += 1 + hpr = self.nef.hpr.get(svc) + state = hpr['state'] + if state == 'disabled': + progress = False + elif state == 'enabled': + self.nef.delay(retry) + else: + self.nef.hpr.delete(svc, payload) + return false_ret + self.nef.hpr.delete(svc, payload) + + try: + self.delete_volume(volume) + except jsonrpc.NefException as error: + LOG.debug('Failed to delete source volume %(volume)s: %(error)s', + {'volume': volume['name'], 'error': error}) + return True, None + + def terminate_connection(self, volume, connector, **kwargs): + """Terminate a connection to a volume. + + :param volume: a volume object + :param connector: a connector object + :returns: dictionary of connection information + """ + LOG.debug('Terminate volume connection for %(volume)s', + {'volume': volume['name']}) + self._unmount_volume(volume) + + def initialize_connection(self, volume, connector): + """Terminate a connection to a volume. + + :param volume: a volume object + :param connector: a connector object + :returns: dictionary of connection information + """ + LOG.debug('Initialize volume connection for %(volume)s', + {'volume': volume['name']}) + share = self._get_volume_share(volume) + return { + 'driver_volume_type': self.driver_volume_type, + 'mount_point_base': self.mount_point_base, + 'data': { + 'export': share, + 'name': 'volume' + } + } + + def ensure_export(self, context, volume): + """Synchronously recreate an export for a volume.""" + pass + + @coordination.synchronized('{self.nef.lock}') def delete_volume(self, volume): - """Deletes a logical volume. + """Deletes a volume. :param volume: volume reference """ - pool, fs_ = self._get_share_datasets(self.share) - fs = self._escape_path(fs_) - url = ('storage/pools/%(pool)s/filesystems/%(fs)s') % { - 'pool': pool, - 'fs': '%2F'.join([fs, volume['name']]) - } - origin = self.nef.get(url).get('originalSnapshot') - url = ('storage/pools/%(pool)s/filesystems/' - '%(fs)s?snapshots=true') % { - 'pool': pool, - 'fs': '%2F'.join([fs, volume['name']]) - } + volume_path = self._get_volume_path(volume) + self._unmount_volume(volume) + delete_payload = {'force': True, 'snapshots': True} try: - self.nef.delete(url) - except exception.NexentaException as exc: - if 'Failed to destroy snapshot' in exc.args[0]: - LOG.debug('Snapshot has dependent clones, skipping') - else: + self.nef.filesystems.delete(volume_path, delete_payload) + except jsonrpc.NefException as error: + if error.code != 'EEXIST': raise + snapshots_tree = {} + snapshots_payload = {'parent': volume_path, 'fields': 'path'} + snapshots = self.nef.snapshots.list(snapshots_payload) + for snapshot in snapshots: + clones_payload = {'fields': 'clones,creationTxg'} + data = self.nef.snapshots.get(snapshot['path'], clones_payload) + if data['clones']: + snapshots_tree[data['creationTxg']] = data['clones'][0] + if snapshots_tree: + clone_path = snapshots_tree[max(snapshots_tree)] + self.nef.filesystems.promote(clone_path) + self.nef.filesystems.delete(volume_path, delete_payload) + + def _delete(self, path): + """Override parent method for safe remove mountpoint.""" try: - if origin and self._is_clone_snapshot_name(origin): - path, snap = origin.split('@') - pool, fs = path.split('/', 1) - snap_url = ('storage/pools/%(pool)s/' - 'filesystems/%(fs)s/snapshots/%(snap)s') % { - 'pool': pool, - 'fs': fs, - 'snap': snap - } - self.nef.delete(snap_url) - except exception.NexentaException as exc: - if 'does not exist' in exc.args[0]: - LOG.debug( - 'Volume %s does not exist on appliance', '/'.join( - [pool, fs_])) + os.rmdir(path) + LOG.debug('The mountpoint %(path)s has been successfully removed', + {'path': path}) + except OSError as error: + LOG.debug('Failed to remove mountpoint %(path)s: %(error)s', + {'path': path, 'error': error.strerror}) def extend_volume(self, volume, new_size): """Extend an existing volume. @@ -246,102 +466,80 @@ class NexentaNfsDriver(nfs.NfsDriver): :param volume: volume reference :param new_size: volume new size in GB """ - LOG.info('Extending volume: %(id)s New size: %(size)s GB', - {'id': volume['id'], 'size': new_size}) + LOG.info('Extend volume %(volume)s, new size: %(size)sGB', + {'volume': volume['name'], 'size': new_size}) + self._mount_volume(volume) + volume_file = self.local_path(volume) if self.sparsed_volumes: - self._execute('truncate', '-s', '%sG' % new_size, - self.local_path(volume), - run_as_root=self._execute_as_root) + self._execute('truncate', '-s', + '%dG' % new_size, + volume_file, + run_as_root=True) else: - block_size_mb = 1 - block_count = ((new_size - volume['size']) * units.Gi // - (block_size_mb * units.Mi)) - self._execute( - 'dd', 'if=/dev/zero', - 'seek=%d' % (volume['size'] * units.Gi / block_size_mb), - 'of=%s' % self.local_path(volume), - 'bs=%dM' % block_size_mb, - 'count=%d' % block_count, - run_as_root=True) + seek = volume['size'] * units.Ki + count = (new_size - volume['size']) * units.Ki + self._execute('dd', + 'if=/dev/zero', + 'of=%s' % volume_file, + 'bs=%d' % units.Mi, + 'seek=%d' % seek, + 'count=%d' % count, + run_as_root=True) + self._unmount_volume(volume) + @coordination.synchronized('{self.nef.lock}') def create_snapshot(self, snapshot): """Creates a snapshot. :param snapshot: snapshot reference """ - volume = self._get_snapshot_volume(snapshot) - pool, fs = self._get_share_datasets(self.share) - url = 'storage/pools/%(pool)s/filesystems/%(fs)s/snapshots' % { - 'pool': pool, - 'fs': self._escape_path('/'.join([fs, volume['name']])), - } - data = {'name': snapshot['name']} - self.nef.post(url, data) + snapshot_path = self._get_snapshot_path(snapshot) + payload = {'path': snapshot_path} + self.nef.snapshots.create(payload) + @coordination.synchronized('{self.nef.lock}') def delete_snapshot(self, snapshot): """Deletes a snapshot. :param snapshot: snapshot reference """ - volume = self._get_snapshot_volume(snapshot) - pool, fs = self._get_share_datasets(self.share) - url = ('storage/pools/%(pool)s/' - 'filesystems/%(fs)s/snapshots/%(snap)s') % { - 'pool': pool, - 'fs': self._escape_path('/'.join([fs, volume['name']])), - 'snap': snapshot['name'] - } - try: - self.nef.delete(url) - except exception.NexentaException as exc: - if 'EBUSY' is exc: - LOG.warning( - 'Could not delete snapshot %s - it has dependencies', - snapshot['name']) + snapshot_path = self._get_snapshot_path(snapshot) + payload = {'defer': True} + self.nef.snapshots.delete(snapshot_path, payload) + def snapshot_revert_use_temp_snapshot(self): + # Considering that NexentaStor based drivers use COW images + # for storing snapshots, having chains of such images, + # creating a backup snapshot when reverting one is not + # actually helpful. + return False + + def revert_to_snapshot(self, context, volume, snapshot): + """Revert volume to snapshot.""" + volume_path = self._get_volume_path(volume) + payload = {'snapshot': snapshot['name']} + self.nef.filesystems.rollback(volume_path, payload) + + @coordination.synchronized('{self.nef.lock}') def create_volume_from_snapshot(self, volume, snapshot): """Create new volume from other's snapshot on appliance. :param volume: reference of volume to be created :param snapshot: reference of source snapshot """ - snapshot_vol = self._get_snapshot_volume(snapshot) - volume['provider_location'] = snapshot_vol['provider_location'] - - pool, fs = self._get_share_datasets(self.share) - dataset_path = '%s/%s' % (pool, fs) - url = ('storage/pools/%(pool)s/' - 'filesystems/%(fs)s/snapshots/%(snap)s/clone') % { - 'pool': pool, - 'fs': self._escape_path('/'.join([fs, snapshot_vol['name']])), - 'snap': snapshot['name'] - } - path = '/'.join([pool, fs, volume['name']]) - data = {'targetPath': path} - self.nef.post(url, data) - - try: - self._share_folder(fs, volume['name']) - except exception.NexentaException: - try: - url = ('storage/pools/%(pool)s/' - 'filesystems/%(fs)s') % { - 'pool': pool, - 'fs': self._escape_path('/'.join([fs, volume['name']])) - } - self.nef.delete(url) - except exception.NexentaException: - LOG.warning("Cannot destroy cloned filesystem: " - "%(vol)s/%(filesystem)s", - {'vol': dataset_path, - 'filesystem': volume['name']}) - raise + LOG.debug('Create volume %(volume)s from snapshot %(snapshot)s', + {'volume': volume['name'], 'snapshot': snapshot['name']}) + snapshot_path = self._get_snapshot_path(snapshot) + clone_path = self._get_volume_path(volume) + payload = {'targetPath': clone_path} + self.nef.snapshots.clone(snapshot_path, payload) + self._remount_volume(volume) + self._set_volume_acl(volume) if volume['size'] > snapshot['volume_size']: new_size = volume['size'] volume['size'] = snapshot['volume_size'] self.extend_volume(volume, new_size) volume['size'] = new_size - return {'provider_location': volume['provider_location']} def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume. @@ -349,146 +547,776 @@ class NexentaNfsDriver(nfs.NfsDriver): :param volume: new volume reference :param src_vref: source volume reference """ - LOG.info('Creating clone of volume: %s', src_vref['id']) - snapshot = {'volume_name': src_vref['name'], - 'volume_id': src_vref['id'], - 'volume_size': src_vref['size'], - 'name': self._get_clone_snapshot_name(volume)} + snapshot = { + 'name': self.origin_snapshot_template % volume['id'], + 'volume_id': src_vref['id'], + 'volume_name': src_vref['name'], + 'volume_size': src_vref['size'] + } self.create_snapshot(snapshot) try: - return self.create_volume_from_snapshot(volume, snapshot) - except exception.NexentaException: - LOG.error('Volume creation failed, deleting created snapshot ' - '%(volume_name)s@%(name)s', snapshot) + self.create_volume_from_snapshot(volume, snapshot) + except jsonrpc.NefException as error: + LOG.debug('Failed to create clone %(clone)s ' + 'from volume %(volume)s: %(error)s', + {'clone': volume['name'], + 'volume': src_vref['name'], + 'error': error}) + raise + finally: try: self.delete_snapshot(snapshot) - except (exception.NexentaException, exception.SnapshotIsBusy): - LOG.warning('Failed to delete zfs snapshot ' - '%(volume_name)s@%(name)s', snapshot) - raise + except jsonrpc.NefException as error: + LOG.debug('Failed to delete temporary snapshot ' + '%(volume)s@%(snapshot)s: %(error)s', + {'volume': src_vref['name'], + 'snapshot': snapshot['name'], + 'error': error}) + + def create_consistencygroup(self, context, group): + """Creates a consistency group. + + :param context: the context of the caller. + :param group: the dictionary of the consistency group to be created. + :returns: group_model_update + """ + group_model_update = {} + return group_model_update + + def create_group(self, context, group): + """Creates a group. + + :param context: the context of the caller. + :param group: the group object. + :returns: model_update + """ + return self.create_consistencygroup(context, group) + + def delete_consistencygroup(self, context, group, volumes): + """Deletes a consistency group. + + :param context: the context of the caller. + :param group: the dictionary of the consistency group to be deleted. + :param volumes: a list of volume dictionaries in the group. + :returns: group_model_update, volumes_model_update + """ + group_model_update = {} + volumes_model_update = [] + for volume in volumes: + self.delete_volume(volume) + return group_model_update, volumes_model_update + + def delete_group(self, context, group, volumes): + """Deletes a group. + + :param context: the context of the caller. + :param group: the group object. + :param volumes: a list of volume objects in the group. + :returns: model_update, volumes_model_update + """ + return self.delete_consistencygroup(context, group, volumes) + + def update_consistencygroup(self, context, group, add_volumes=None, + remove_volumes=None): + """Updates a consistency group. + + :param context: the context of the caller. + :param group: the dictionary of the consistency group to be updated. + :param add_volumes: a list of volume dictionaries to be added. + :param remove_volumes: a list of volume dictionaries to be removed. + :returns: group_model_update, add_volumes_update, remove_volumes_update + """ + group_model_update = {} + add_volumes_update = [] + remove_volumes_update = [] + return group_model_update, add_volumes_update, remove_volumes_update + + def update_group(self, context, group, add_volumes=None, + remove_volumes=None): + """Updates a group. + + :param context: the context of the caller. + :param group: the group object. + :param add_volumes: a list of volume objects to be added. + :param remove_volumes: a list of volume objects to be removed. + :returns: model_update, add_volumes_update, remove_volumes_update + """ + return self.update_consistencygroup(context, group, add_volumes, + remove_volumes) + + def create_cgsnapshot(self, context, cgsnapshot, snapshots): + """Creates a consistency group snapshot. + + :param context: the context of the caller. + :param cgsnapshot: the dictionary of the cgsnapshot to be created. + :param snapshots: a list of snapshot dictionaries in the cgsnapshot. + :returns: group_model_update, snapshots_model_update + """ + group_model_update = {} + snapshots_model_update = [] + cgsnapshot_name = self.group_snapshot_template % cgsnapshot['id'] + cgsnapshot_path = '%s@%s' % (self.root_path, cgsnapshot_name) + create_payload = {'path': cgsnapshot_path, 'recursive': True} + self.nef.snapshots.create(create_payload) + for snapshot in snapshots: + volume_name = snapshot['volume_name'] + volume_path = posixpath.join(self.root_path, volume_name) + snapshot_name = snapshot['name'] + snapshot_path = '%s@%s' % (volume_path, cgsnapshot_name) + rename_payload = {'newName': snapshot_name} + self.nef.snapshots.rename(snapshot_path, rename_payload) + delete_payload = {'defer': True, 'recursive': True} + self.nef.snapshots.delete(cgsnapshot_path, delete_payload) + return group_model_update, snapshots_model_update + + def create_group_snapshot(self, context, group_snapshot, snapshots): + """Creates a group_snapshot. + + :param context: the context of the caller. + :param group_snapshot: the GroupSnapshot object to be created. + :param snapshots: a list of Snapshot objects in the group_snapshot. + :returns: model_update, snapshots_model_update + """ + return self.create_cgsnapshot(context, group_snapshot, snapshots) + + def delete_cgsnapshot(self, context, cgsnapshot, snapshots): + """Deletes a consistency group snapshot. + + :param context: the context of the caller. + :param cgsnapshot: the dictionary of the cgsnapshot to be created. + :param snapshots: a list of snapshot dictionaries in the cgsnapshot. + :returns: group_model_update, snapshots_model_update + """ + group_model_update = {} + snapshots_model_update = [] + for snapshot in snapshots: + self.delete_snapshot(snapshot) + return group_model_update, snapshots_model_update + + def delete_group_snapshot(self, context, group_snapshot, snapshots): + """Deletes a group_snapshot. + + :param context: the context of the caller. + :param group_snapshot: the GroupSnapshot object to be deleted. + :param snapshots: a list of snapshot objects in the group_snapshot. + :returns: model_update, snapshots_model_update + """ + return self.delete_cgsnapshot(context, group_snapshot, snapshots) + + def create_consistencygroup_from_src(self, context, group, volumes, + cgsnapshot=None, snapshots=None, + source_cg=None, source_vols=None): + """Creates a consistency group from source. + + :param context: the context of the caller. + :param group: the dictionary of the consistency group to be created. + :param volumes: a list of volume dictionaries in the group. + :param cgsnapshot: the dictionary of the cgsnapshot as source. + :param snapshots: a list of snapshot dictionaries in the cgsnapshot. + :param source_cg: the dictionary of a consistency group as source. + :param source_vols: a list of volume dictionaries in the source_cg. + :returns: group_model_update, volumes_model_update + """ + group_model_update = {} + volumes_model_update = [] + if cgsnapshot and snapshots: + for volume, snapshot in zip(volumes, snapshots): + self.create_volume_from_snapshot(volume, snapshot) + elif source_cg and source_vols: + snapshot_name = self.origin_snapshot_template % group['id'] + snapshot_path = '%s@%s' % (self.root_path, snapshot_name) + create_payload = {'path': snapshot_path, 'recursive': True} + self.nef.snapshots.create(create_payload) + for volume, source_vol in zip(volumes, source_vols): + snapshot = { + 'name': snapshot_name, + 'volume_id': source_vol['id'], + 'volume_name': source_vol['name'], + 'volume_size': source_vol['size'] + } + self.create_volume_from_snapshot(volume, snapshot) + delete_payload = {'defer': True, 'recursive': True} + self.nef.snapshots.delete(snapshot_path, delete_payload) + return group_model_update, volumes_model_update + + def create_group_from_src(self, context, group, volumes, + group_snapshot=None, snapshots=None, + source_group=None, source_vols=None): + """Creates a group from source. + + :param context: the context of the caller. + :param group: the Group object to be created. + :param volumes: a list of Volume objects in the group. + :param group_snapshot: the GroupSnapshot object as source. + :param snapshots: a list of snapshot objects in group_snapshot. + :param source_group: the Group object as source. + :param source_vols: a list of volume objects in the source_group. + :returns: model_update, volumes_model_update + """ + return self.create_consistencygroup_from_src(context, group, volumes, + group_snapshot, snapshots, + source_group, source_vols) + + def _local_volume_dir(self, volume): + """Get volume dir (mounted locally fs path) for given volume. + + :param volume: volume reference + """ + share = self._get_volume_share(volume) + if isinstance(share, six.text_type): + share = share.encode('utf-8') + path = hashlib.md5(share).hexdigest() + return os.path.join(self.mount_point_base, path) def local_path(self, volume): """Get volume path (mounted locally fs path) for given volume. :param volume: volume reference """ - nfs_share = volume['provider_location'] - return os.path.join(self._get_mount_point_for_share(nfs_share), - 'volume') + volume_dir = self._local_volume_dir(volume) + return os.path.join(volume_dir, 'volume') - def _get_mount_point_for_share(self, nfs_share): - """Returns path to mount point NFS share. + def _set_volume_acl(self, volume): + """Sets access permissions for given volume. - :param nfs_share: example 172.18.194.100:/var/nfs + :param volume: volume reference """ - nfs_share = nfs_share.encode('utf-8') - return os.path.join(self.configuration.nexenta_mount_point_base, - hashlib.md5(nfs_share).hexdigest()) - - def _share_folder(self, path, filesystem): - """Share NFS filesystem on NexentaStor Appliance. - - :param nef: nef object - :param path: path to parent filesystem - :param filesystem: filesystem that needs to be shared - """ - pool = self.share.split('/')[0] - LOG.debug( - 'Creating ACL for filesystem %s on Nexenta Store', filesystem) - url = 'storage/pools/%s/filesystems/%s/acl' % ( - pool, self._escape_path('/'.join([path, filesystem]))) - data = { - "type": "allow", - "principal": "everyone@", - "permissions": [ - "list_directory", - "read_data", - "add_file", - "write_data", - "add_subdirectory", - "append_data", - "read_xattr", - "write_xattr", - "execute", - "delete_child", - "read_attributes", - "write_attributes", - "delete", - "read_acl", - "write_acl", - "write_owner", - "synchronize" + volume_path = self._get_volume_path(volume) + payload = { + 'type': 'allow', + 'principal': 'everyone@', + 'permissions': [ + 'full_set' ], - "flags": [ - "file_inherit", - "dir_inherit" + 'flags': [ + 'file_inherit', + 'dir_inherit' ] } - self.nef.post(url, data) + self.nef.filesystems.acl(volume_path, payload) - LOG.debug( - 'Successfully shared filesystem %s', '/'.join( - [path, filesystem])) + def _get_volume_share(self, volume): + """Return NFS share path for the volume.""" + volume_path = self._get_volume_path(volume) + payload = {'fields': 'mountPoint'} + filesystem = self.nef.filesystems.get(volume_path, payload) + return '%s:%s' % (self.nas_host, filesystem['mountPoint']) - def _get_capacity_info(self, path): - """Calculate available space on the NFS share. + def _get_volume_path(self, volume): + """Return ZFS dataset path for the volume.""" + return posixpath.join(self.root_path, volume['name']) - :param path: example pool/nfs + def _get_snapshot_path(self, snapshot): + """Return ZFS snapshot path for the snapshot.""" + volume_name = snapshot['volume_name'] + snapshot_name = snapshot['name'] + volume_path = posixpath.join(self.root_path, volume_name) + return '%s@%s' % (volume_path, snapshot_name) + + def get_volume_stats(self, refresh=False): + """Get volume stats. + + If 'refresh' is True, update the stats first. """ - pool, fs = self._get_share_datasets(path) - url = 'storage/pools/%s/filesystems/%s' % ( - pool, self._escape_path(fs)) - data = self.nef.get(url) - total = utils.str2size(data['bytesAvailable']) - allocated = utils.str2size(data['bytesUsed']) - free = total - allocated - return total, free, allocated + if refresh or not self._stats: + self._update_volume_stats() - def _get_snapshot_volume(self, snapshot): - ctxt = context.get_admin_context() - return db.volume_get(ctxt, snapshot['volume_id']) - - def _get_share_datasets(self, nfs_share): - pool_name, fs = nfs_share.split('/', 1) - return pool_name, fs - - def _get_clone_snapshot_name(self, volume): - """Return name for snapshot that will be used to clone the volume.""" - return 'cinder-clone-snapshot-%(id)s' % volume - - def _is_clone_snapshot_name(self, snapshot): - """Check if snapshot is created for cloning.""" - name = snapshot.split('@')[-1] - return name.startswith('cinder-clone-snapshot-') + return self._stats def _update_volume_stats(self): - """Retrieve stats info for NexentaStor appliance.""" - LOG.debug('Updating volume stats') - share = ':/'.join([self.nef_host, self.share]) - total, free, allocated = self._get_capacity_info(self.share) - total_space = utils.str2gib_size(total) - free_space = utils.str2gib_size(free) - + """Retrieve stats info for NexentaStor Appliance.""" + LOG.debug('Updating volume backend %(volume_backend_name)s stats', + {'volume_backend_name': self.volume_backend_name}) + payload = {'fields': 'mountPoint,bytesAvailable,bytesUsed'} + dataset = self.nef.filesystems.get(self.root_path, payload) + free = dataset['bytesAvailable'] // units.Gi + used = dataset['bytesUsed'] // units.Gi + total = free + used + share = '%s:%s' % (self.nas_host, dataset['mountPoint']) location_info = '%(driver)s:%(share)s' % { 'driver': self.__class__.__name__, 'share': share } self._stats = { - 'vendor_name': 'Nexenta', - 'dedup': self.dataset_deduplication, - 'compression': self.dataset_compression, + 'vendor_name': self.vendor_name, + 'dedup': self.deduplicated_volumes, + 'compression': self.compressed_volumes, 'description': self.dataset_description, - 'nef_url': self.nef_host, + 'nef_url': self.nef.host, + 'nef_port': self.nef.port, 'driver_version': self.VERSION, - 'storage_protocol': 'NFS', - 'total_capacity_gb': total_space, - 'free_capacity_gb': free_space, + 'storage_protocol': self.storage_protocol, + 'sparsed_volumes': self.sparsed_volumes, + 'total_capacity_gb': total, + 'free_capacity_gb': free, 'reserved_percentage': self.configuration.reserved_percentage, 'QoS_support': False, + 'multiattach': True, + 'consistencygroup_support': True, + 'consistent_group_snapshot_enabled': True, 'location_info': location_info, - 'volume_backend_name': self.backend_name, - 'nfs_mount_point_base': self.nfs_mount_point_base + 'volume_backend_name': self.volume_backend_name, + 'nfs_mount_point_base': self.mount_point_base } - def _escape_path(self, path): - return path.replace('/', '%2F') + def _get_existing_volume(self, existing_ref): + types = { + 'source-name': 'path', + 'source-guid': 'guid' + } + if not any(key in types for key in existing_ref): + keys = ', '.join(types.keys()) + message = (_('Manage existing volume failed ' + 'due to invalid backend reference. ' + 'Volume reference must contain ' + 'at least one valid key: %(keys)s') + % {'keys': keys}) + raise jsonrpc.NefException(code='EINVAL', message=message) + payload = { + 'parent': self.root_path, + 'fields': 'path', + 'recursive': False + } + for key, value in types.items(): + if key in existing_ref: + if value == 'path': + path = posixpath.join(self.root_path, + existing_ref[key]) + else: + path = existing_ref[key] + payload[value] = path + existing_volumes = self.nef.filesystems.list(payload) + if len(existing_volumes) == 1: + volume_path = existing_volumes[0]['path'] + volume_name = posixpath.basename(volume_path) + existing_volume = { + 'name': volume_name, + 'path': volume_path + } + vid = utils.extract_id_from_volume_name(volume_name) + if utils.check_already_managed_volume(vid): + message = (_('Volume %(name)s already managed') + % {'name': volume_name}) + raise jsonrpc.NefException(code='EBUSY', message=message) + return existing_volume + elif not existing_volumes: + code = 'ENOENT' + reason = _('no matching volumes were found') + else: + code = 'EINVAL' + reason = _('too many volumes were found') + message = (_('Unable to manage existing volume by ' + 'reference %(reference)s: %(reason)s') + % {'reference': existing_ref, 'reason': reason}) + raise jsonrpc.NefException(code=code, message=message) + + def _check_already_managed_snapshot(self, snapshot_id): + """Check cinder database for already managed snapshot. + + :param snapshot_id: snapshot id parameter + :returns: return True, if database entry with specified + snapshot id exists, otherwise return False + """ + if not isinstance(snapshot_id, six.string_types): + return False + try: + uuid.UUID(snapshot_id, version=4) + except ValueError: + return False + ctxt = context.get_admin_context() + return objects.Snapshot.exists(ctxt, snapshot_id) + + def _get_existing_snapshot(self, snapshot, existing_ref): + types = { + 'source-name': 'name', + 'source-guid': 'guid' + } + if not any(key in types for key in existing_ref): + keys = ', '.join(types.keys()) + message = (_('Manage existing snapshot failed ' + 'due to invalid backend reference. ' + 'Snapshot reference must contain ' + 'at least one valid key: %(keys)s') + % {'keys': keys}) + raise jsonrpc.NefException(code='EINVAL', message=message) + volume_name = snapshot['volume_name'] + volume_size = snapshot['volume_size'] + volume = {'name': volume_name} + volume_path = self._get_volume_path(volume) + payload = { + 'parent': volume_path, + 'fields': 'name,path', + 'recursive': False + } + for key, value in types.items(): + if key in existing_ref: + payload[value] = existing_ref[key] + existing_snapshots = self.nef.snapshots.list(payload) + if len(existing_snapshots) == 1: + name = existing_snapshots[0]['name'] + path = existing_snapshots[0]['path'] + existing_snapshot = { + 'name': name, + 'path': path, + 'volume_name': volume_name, + 'volume_size': volume_size + } + sid = utils.extract_id_from_snapshot_name(name) + if self._check_already_managed_snapshot(sid): + message = (_('Snapshot %(name)s already managed') + % {'name': name}) + raise jsonrpc.NefException(code='EBUSY', message=message) + return existing_snapshot + elif not existing_snapshots: + code = 'ENOENT' + reason = _('no matching snapshots were found') + else: + code = 'EINVAL' + reason = _('too many snapshots were found') + message = (_('Unable to manage existing snapshot by ' + 'reference %(reference)s: %(reason)s') + % {'reference': existing_ref, 'reason': reason}) + raise jsonrpc.NefException(code=code, message=message) + + @coordination.synchronized('{self.nef.lock}') + def manage_existing(self, volume, existing_ref): + """Brings an existing backend storage object under Cinder management. + + existing_ref is passed straight through from the API request's + manage_existing_ref value, and it is up to the driver how this should + be interpreted. It should be sufficient to identify a storage object + that the driver should somehow associate with the newly-created cinder + volume structure. + + There are two ways to do this: + + 1. Rename the backend storage object so that it matches the, + volume['name'] which is how drivers traditionally map between a + cinder volume and the associated backend storage object. + + 2. Place some metadata on the volume, or somewhere in the backend, that + allows other driver requests (e.g. delete, clone, attach, detach...) + to locate the backend storage object when required. + + If the existing_ref doesn't make sense, or doesn't refer to an existing + backend storage object, raise a ManageExistingInvalidReference + exception. + + The volume may have a volume_type, and the driver can inspect that and + compare against the properties of the referenced backend storage + object. If they are incompatible, raise a + ManageExistingVolumeTypeMismatch, specifying a reason for the failure. + + :param volume: Cinder volume to manage + :param existing_ref: Driver-specific information used to identify a + volume + """ + existing_volume = self._get_existing_volume(existing_ref) + existing_volume_path = existing_volume['path'] + if existing_volume['name'] != volume['name']: + volume_path = self._get_volume_path(volume) + payload = {'newPath': volume_path} + self.nef.filesystems.rename(existing_volume_path, payload) + + def manage_existing_get_size(self, volume, existing_ref): + """Return size of volume to be managed by manage_existing. + + When calculating the size, round up to the next GB. + + :param volume: Cinder volume to manage + :param existing_ref: Driver-specific information used to identify a + volume + :returns size: Volume size in GiB (integer) + """ + existing_volume = self._get_existing_volume(existing_ref) + self._set_volume_acl(existing_volume) + self._mount_volume(existing_volume) + local_path = self.local_path(existing_volume) + try: + volume_size = os.path.getsize(local_path) + except OSError as error: + code = errno.errorcode[error.errno] + message = (_('Manage existing volume %(name)s failed: ' + 'unable to get size of volume data file ' + '%(file)s: %(error)s') + % {'name': existing_volume['name'], + 'file': local_path, + 'error': error.strerror}) + raise jsonrpc.NefException(code=code, message=message) + finally: + self._unmount_volume(existing_volume) + return volume_size // units.Gi + + def get_manageable_volumes(self, cinder_volumes, marker, limit, offset, + sort_keys, sort_dirs): + """List volumes on the backend available for management by Cinder. + + Returns a list of dictionaries, each specifying a volume in the host, + with the following keys: + - reference (dictionary): The reference for a volume, which can be + passed to "manage_existing". + - size (int): The size of the volume according to the storage + backend, rounded up to the nearest GB. + - safe_to_manage (boolean): Whether or not this volume is safe to + manage according to the storage backend. For example, is the volume + in use or invalid for any reason. + - reason_not_safe (string): If safe_to_manage is False, the reason why. + - cinder_id (string): If already managed, provide the Cinder ID. + - extra_info (string): Any extra information to return to the user + + :param cinder_volumes: A list of volumes in this host that Cinder + currently manages, used to determine if + a volume is manageable or not. + :param marker: The last item of the previous page; we return the + next results after this value (after sorting) + :param limit: Maximum number of items to return + :param offset: Number of items to skip after marker + :param sort_keys: List of keys to sort results by (valid keys are + 'identifier' and 'size') + :param sort_dirs: List of directions to sort by, corresponding to + sort_keys (valid directions are 'asc' and 'desc') + """ + manageable_volumes = [] + cinder_volume_names = {} + for cinder_volume in cinder_volumes: + key = cinder_volume['name'] + value = cinder_volume['id'] + cinder_volume_names[key] = value + payload = { + 'parent': self.root_path, + 'fields': 'guid,parent,path,bytesUsed', + 'recursive': False + } + volumes = self.nef.filesystems.list(payload) + for volume in volumes: + safe_to_manage = True + reason_not_safe = None + cinder_id = None + extra_info = None + path = volume['path'] + guid = volume['guid'] + parent = volume['parent'] + size = volume['bytesUsed'] // units.Gi + name = posixpath.basename(path) + if path == self.root_path: + continue + if parent != self.root_path: + continue + if name in cinder_volume_names: + cinder_id = cinder_volume_names[name] + safe_to_manage = False + reason_not_safe = _('Volume already managed') + reference = { + 'source-name': name, + 'source-guid': guid + } + manageable_volumes.append({ + 'reference': reference, + 'size': size, + 'safe_to_manage': safe_to_manage, + 'reason_not_safe': reason_not_safe, + 'cinder_id': cinder_id, + 'extra_info': extra_info + }) + return utils.paginate_entries_list(manageable_volumes, + marker, limit, offset, + sort_keys, sort_dirs) + + def unmanage(self, volume): + """Removes the specified volume from Cinder management. + + Does not delete the underlying backend storage object. + + For most drivers, this will not need to do anything. However, some + drivers might use this call as an opportunity to clean up any + Cinder-specific configuration that they have associated with the + backend storage object. + + :param volume: Cinder volume to unmanage + """ + pass + + @coordination.synchronized('{self.nef.lock}') + def manage_existing_snapshot(self, snapshot, existing_ref): + """Brings an existing backend storage object under Cinder management. + + existing_ref is passed straight through from the API request's + manage_existing_ref value, and it is up to the driver how this should + be interpreted. It should be sufficient to identify a storage object + that the driver should somehow associate with the newly-created cinder + snapshot structure. + + There are two ways to do this: + + 1. Rename the backend storage object so that it matches the + snapshot['name'] which is how drivers traditionally map between a + cinder snapshot and the associated backend storage object. + + 2. Place some metadata on the snapshot, or somewhere in the backend, + that allows other driver requests (e.g. delete) to locate the + backend storage object when required. + + If the existing_ref doesn't make sense, or doesn't refer to an existing + backend storage object, raise a ManageExistingInvalidReference + exception. + + :param snapshot: Cinder volume snapshot to manage + :param existing_ref: Driver-specific information used to identify a + volume snapshot + """ + existing_snapshot = self._get_existing_snapshot(snapshot, existing_ref) + existing_snapshot_path = existing_snapshot['path'] + if existing_snapshot['name'] != snapshot['name']: + payload = {'newName': snapshot['name']} + self.nef.snapshots.rename(existing_snapshot_path, payload) + + def manage_existing_snapshot_get_size(self, snapshot, existing_ref): + """Return size of snapshot to be managed by manage_existing. + + When calculating the size, round up to the next GB. + + :param snapshot: Cinder volume snapshot to manage + :param existing_ref: Driver-specific information used to identify a + volume snapshot + :returns size: Volume snapshot size in GiB (integer) + """ + existing_snapshot = self._get_existing_snapshot(snapshot, existing_ref) + return existing_snapshot['volume_size'] + + def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset, + sort_keys, sort_dirs): + """List snapshots on the backend available for management by Cinder. + + Returns a list of dictionaries, each specifying a snapshot in the host, + with the following keys: + - reference (dictionary): The reference for a snapshot, which can be + passed to "manage_existing_snapshot". + - size (int): The size of the snapshot according to the storage + backend, rounded up to the nearest GB. + - safe_to_manage (boolean): Whether or not this snapshot is safe to + manage according to the storage backend. For example, is the snapshot + in use or invalid for any reason. + - reason_not_safe (string): If safe_to_manage is False, the reason why. + - cinder_id (string): If already managed, provide the Cinder ID. + - extra_info (string): Any extra information to return to the user + - source_reference (string): Similar to "reference", but for the + snapshot's source volume. + + :param cinder_snapshots: A list of snapshots in this host that Cinder + currently manages, used to determine if + a snapshot is manageable or not. + :param marker: The last item of the previous page; we return the + next results after this value (after sorting) + :param limit: Maximum number of items to return + :param offset: Number of items to skip after marker + :param sort_keys: List of keys to sort results by (valid keys are + 'identifier' and 'size') + :param sort_dirs: List of directions to sort by, corresponding to + sort_keys (valid directions are 'asc' and 'desc') + + """ + manageable_snapshots = [] + cinder_volume_names = {} + cinder_snapshot_names = {} + ctxt = context.get_admin_context() + cinder_volumes = objects.VolumeList.get_all_by_host(ctxt, self.host) + for cinder_volume in cinder_volumes: + key = self._get_volume_path(cinder_volume) + value = { + 'name': cinder_volume['name'], + 'size': cinder_volume['size'] + } + cinder_volume_names[key] = value + for cinder_snapshot in cinder_snapshots: + key = cinder_snapshot['name'] + value = { + 'id': cinder_snapshot['id'], + 'size': cinder_snapshot['volume_size'], + 'parent': cinder_snapshot['volume_name'] + } + cinder_snapshot_names[key] = value + payload = { + 'parent': self.root_path, + 'fields': 'name,guid,path,parent,hprService,snaplistId', + 'recursive': True + } + snapshots = self.nef.snapshots.list(payload) + for snapshot in snapshots: + safe_to_manage = True + reason_not_safe = None + cinder_id = None + extra_info = None + name = snapshot['name'] + guid = snapshot['guid'] + path = snapshot['path'] + parent = snapshot['parent'] + if parent not in cinder_volume_names: + LOG.debug('Skip snapshot %(path)s: parent ' + 'volume %(parent)s is unmanaged', + {'path': path, 'parent': parent}) + continue + if name.startswith(self.origin_snapshot_template): + LOG.debug('Skip temporary origin snapshot %(path)s', + {'path': path}) + continue + if name.startswith(self.group_snapshot_template): + LOG.debug('Skip temporary group snapshot %(path)s', + {'path': path}) + continue + if snapshot['hprService'] or snapshot['snaplistId']: + LOG.debug('Skip HPR/snapping snapshot %(path)s', + {'path': path}) + continue + if name in cinder_snapshot_names: + size = cinder_snapshot_names[name]['size'] + cinder_id = cinder_snapshot_names[name]['id'] + safe_to_manage = False + reason_not_safe = _('Snapshot already managed') + else: + size = cinder_volume_names[parent]['size'] + payload = {'fields': 'clones'} + props = self.nef.snapshots.get(path) + clones = props['clones'] + unmanaged_clones = [] + for clone in clones: + if clone not in cinder_volume_names: + unmanaged_clones.append(clone) + if unmanaged_clones: + safe_to_manage = False + dependent_clones = ', '.join(unmanaged_clones) + reason_not_safe = (_('Snapshot has unmanaged ' + 'dependent clone(s) %(clones)s') + % {'clones': dependent_clones}) + reference = { + 'source-name': name, + 'source-guid': guid + } + source_reference = { + 'name': cinder_volume_names[parent]['name'] + } + manageable_snapshots.append({ + 'reference': reference, + 'size': size, + 'safe_to_manage': safe_to_manage, + 'reason_not_safe': reason_not_safe, + 'cinder_id': cinder_id, + 'extra_info': extra_info, + 'source_reference': source_reference + }) + return utils.paginate_entries_list(manageable_snapshots, + marker, limit, offset, + sort_keys, sort_dirs) + + def unmanage_snapshot(self, snapshot): + """Removes the specified snapshot from Cinder management. + + Does not delete the underlying backend storage object. + + For most drivers, this will not need to do anything. However, some + drivers might use this call as an opportunity to clean up any + Cinder-specific configuration that they have associated with the + backend storage object. + + :param snapshot: Cinder volume snapshot to unmanage + """ + pass diff --git a/cinder/volume/drivers/nexenta/options.py b/cinder/volume/drivers/nexenta/options.py index 9255d2209ae..912478b8d76 100644 --- a/cinder/volume/drivers/nexenta/options.py +++ b/cinder/volume/drivers/nexenta/options.py @@ -1,4 +1,4 @@ -# Copyright 2016 Nexenta Systems, Inc. +# Copyright 2019 Nexenta Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -17,7 +17,6 @@ from oslo_config import cfg from cinder.volume import configuration as conf -POLL_RETRIES = 5 DEFAULT_ISCSI_PORT = 3260 DEFAULT_HOST_GROUP = 'all' DEFAULT_TARGET_GROUP = 'all' @@ -63,31 +62,53 @@ NEXENTA_EDGE_OPTS = [ ] NEXENTA_CONNECTION_OPTS = [ + cfg.StrOpt('nexenta_host', + default='', + help='IP address of NexentaStor Appliance'), cfg.StrOpt('nexenta_rest_address', deprecated_for_removal=True, deprecated_reason='Rest address should now be set using ' 'the common param depending on driver type, ' 'san_ip or nas_host', default='', - help='IP address of NexentaEdge management REST API endpoint'), - cfg.StrOpt('nexenta_host', - default='', - help='IP address of Nexenta SA'), + help='IP address of NexentaStor management REST API endpoint'), cfg.IntOpt('nexenta_rest_port', deprecated_for_removal=True, deprecated_reason='Rest address should now be set using ' 'the common param san_api_port.', default=0, - help='HTTP(S) port to connect to Nexenta REST API server. ' - 'If it is equal zero, 8443 for HTTPS and 8080 for HTTP ' - 'is used'), + help='HTTP(S) port to connect to NexentaStor management ' + 'REST API server. If it is equal zero, 8443 for ' + 'HTTPS and 8080 for HTTP is used'), cfg.StrOpt('nexenta_rest_protocol', default='auto', choices=['http', 'https', 'auto'], - help='Use http or https for REST connection (default auto)'), + help='Use http or https for NexentaStor management ' + 'REST API connection (default auto)'), + cfg.FloatOpt('nexenta_rest_connect_timeout', + default=30, + help='Specifies the time limit (in seconds), within ' + 'which the connection to NexentaStor management ' + 'REST API server must be established'), + cfg.FloatOpt('nexenta_rest_read_timeout', + default=300, + help='Specifies the time limit (in seconds), ' + 'within which NexentaStor management ' + 'REST API server must send a response'), + cfg.FloatOpt('nexenta_rest_backoff_factor', + default=0.5, + help='Specifies the backoff factor to apply ' + 'between connection attempts to NexentaStor ' + 'management REST API server'), + cfg.IntOpt('nexenta_rest_retry_count', + default=3, + help='Specifies the number of times to repeat NexentaStor ' + 'management REST API call in case of connection errors ' + 'and NexentaStor appliance EBUSY or ENOENT errors'), cfg.BoolOpt('nexenta_use_https', default=True, - help='Use secure HTTP for REST connection (default True)'), + help='Use HTTP secure protocol for NexentaStor ' + 'management REST API connections'), cfg.BoolOpt('nexenta_lu_writebackcache_disabled', default=False, help='Postponed write to backing store or not'), @@ -97,24 +118,26 @@ NEXENTA_CONNECTION_OPTS = [ 'depending on the driver type: ' 'san_login or nas_login', default='admin', - help='User name to connect to Nexenta SA'), + help='User name to connect to NexentaStor ' + 'management REST API server'), cfg.StrOpt('nexenta_password', deprecated_for_removal=True, deprecated_reason='Common password parameters should be used ' 'depending on the driver type: ' 'san_password or nas_password', default='nexenta', - help='Password to connect to Nexenta SA', - secret=True), + help='Password to connect to NexentaStor ' + 'management REST API server', + secret=True) ] NEXENTA_ISCSI_OPTS = [ cfg.StrOpt('nexenta_iscsi_target_portal_groups', default='', - help='Nexenta target portal groups'), + help='NexentaStor target portal groups'), cfg.StrOpt('nexenta_iscsi_target_portals', default='', - help='Comma separated list of portals for NexentaStor5, in ' + help='Comma separated list of portals for NexentaStor5, in' 'format of IP1:port1,IP2:port2. Port is optional, ' 'default=3260. Example: 10.10.10.1:3267,10.10.1.2'), cfg.StrOpt('nexenta_iscsi_target_host_group', @@ -122,22 +145,22 @@ NEXENTA_ISCSI_OPTS = [ help='Group of hosts which are allowed to access volumes'), cfg.IntOpt('nexenta_iscsi_target_portal_port', default=3260, - help='Nexenta target portal port'), + help='Nexenta appliance iSCSI target portal port'), cfg.IntOpt('nexenta_luns_per_target', default=100, - help='Amount of iSCSI LUNs per each target'), + help='Amount of LUNs per iSCSI target'), cfg.StrOpt('nexenta_volume', default='cinder', - help='SA Pool that holds all volumes'), + help='NexentaStor pool name that holds all volumes'), cfg.StrOpt('nexenta_target_prefix', default='iqn.1986-03.com.sun:02:cinder', - help='IQN prefix for iSCSI targets'), + help='iqn prefix for NexentaStor iSCSI targets'), cfg.StrOpt('nexenta_target_group_prefix', default='cinder', - help='Prefix for iSCSI target groups on SA'), + help='Prefix for iSCSI target groups on NexentaStor'), cfg.StrOpt('nexenta_host_group_prefix', default='cinder', - help='Prefix for iSCSI host groups on SA'), + help='Prefix for iSCSI host groups on NexentaStor'), cfg.StrOpt('nexenta_volume_group', default='iscsi', help='Volume group for NexentaStor5 iSCSI'), @@ -156,6 +179,9 @@ NEXENTA_NFS_OPTS = [ 'sparsed files that take no space. If disabled ' '(False), volume is created as a regular file, ' 'which takes a long time.'), + cfg.BoolOpt('nexenta_qcow2_volumes', + default=False, + help='Create volumes as QCOW2 files rather than raw files'), cfg.BoolOpt('nexenta_nms_cache_volroot', default=True, help=('If set True cache NexentaStor appliance volroot option ' @@ -188,6 +214,12 @@ NEXENTA_DATASET_OPTS = [ cfg.BoolOpt('nexenta_sparse', default=False, help='Enables or disables the creation of sparse datasets'), + cfg.StrOpt('nexenta_origin_snapshot_template', + default='origin-snapshot-%s', + help='Template string to generate origin name of clone'), + cfg.StrOpt('nexenta_group_snapshot_template', + default='group-snapshot-%s', + help='Template string to generate group snapshot name') ] NEXENTA_RRMGR_OPTS = [ diff --git a/cinder/volume/drivers/nexenta/utils.py b/cinder/volume/drivers/nexenta/utils.py index 975383b659b..517911ede66 100644 --- a/cinder/volume/drivers/nexenta/utils.py +++ b/cinder/volume/drivers/nexenta/utils.py @@ -1,4 +1,4 @@ -# Copyright 2013 Nexenta Systems, Inc. +# Copyright 2018 Nexenta Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -38,11 +38,12 @@ def str2size(s, scale=1024): match = re.match(r'^([\.\d]+)\s*([BbKkMmGgTtPpEeZzYy]?)', s) if match is None: - raise ValueError(_('Invalid value: "%s"') % s) + raise ValueError(_('Invalid value: %(value)s') + % {'value': s}) groups = match.groups() value = float(groups[0]) - suffix = groups[1].upper() or 'B' + suffix = groups[1].upper() if groups[1] else 'B' types = ('B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') for i, t in enumerate(types): @@ -61,7 +62,7 @@ def get_rrmgr_cmd(src, dst, compression=None, tcp_buf_size=None, """Returns rrmgr command for source and destination.""" cmd = ['rrmgr', '-s', 'zfs'] if compression: - cmd.extend(['-c', '%s' % compression]) + cmd.extend(['-c', six.text_type(compression)]) cmd.append('-q') cmd.append('-e') if tcp_buf_size: @@ -116,50 +117,11 @@ def parse_nms_url(url): return auto, scheme, user, password, host, port, '/rest/nms/' -def parse_nef_url(url): - """Parse NMS url into normalized parts like scheme, user, host and others. - - Example NMS URL: - auto://admin:nexenta@192.168.1.1:8080/ - - NMS URL parts: - - .. code-block:: none - - auto True if url starts with auto://, protocol - will be automatically switched to https - if http not supported; - scheme (auto) connection protocol (http or https); - user (admin) NMS user; - password (nexenta) NMS password; - host (192.168.1.1) NMS host; - port (8080) NMS port. - - :param url: url string - :return: tuple (auto, scheme, user, password, host, port) - """ - pr = urlparse.urlparse(url) - scheme = pr.scheme - auto = scheme == 'auto' - if auto: - scheme = 'http' - user = 'admin' - password = 'nexenta' - if '@' not in pr.netloc: - host_and_port = pr.netloc - else: - user_and_password, host_and_port = pr.netloc.split('@', 1) - if ':' in user_and_password: - user, password = user_and_password.split(':') - else: - user = user_and_password - if ':' in host_and_port: - host, port = host_and_port.split(':', 1) - else: - host, port = host_and_port, '8080' - return auto, scheme, user, password, host, port - - def get_migrate_snapshot_name(volume): """Return name for snapshot that will be used to migrate the volume.""" return 'cinder-migrate-snapshot-%(id)s' % volume + + +def ex2err(ex): + """Convert a Cinder Exception to a Nexenta Error.""" + return ex.msg diff --git a/releasenotes/notes/nexentastor5-driver-update-937d2a1ba76a504a.yaml b/releasenotes/notes/nexentastor5-driver-update-937d2a1ba76a504a.yaml new file mode 100644 index 00000000000..4fcd8c30e04 --- /dev/null +++ b/releasenotes/notes/nexentastor5-driver-update-937d2a1ba76a504a.yaml @@ -0,0 +1,34 @@ +--- +features: + - Added revert to snapshot support for NexentaStor5 iSCSI and NFS drivers. + - NexentaStor5 iSCSI and NFS drivers multiattach capability enabled. + - Added support for creating, deleting, and updating consistency groups + for NexentaStor5 iSCSI and NFS drivers. + - Added support for taking, deleting, and restoring consistency group + snapshots for NexentaStor5 iSCSI and NFS drivers. + - Added consistency group capability to generic volume groups for + NexentaStor5 iSCSI and NFS drivers. + - Added volume manage/unmanage support for NexentaStor5 iSCSI and NFS + drivers. + - Added snapshot manage/unmanage support for NexentaStor5 iSCSI and NFS + drivers. + - Added the ability to list manageable volumes and snapshots for + NexentaStor5 iSCSI and NFS drivers. +upgrade: + - Added a new config option ``nexenta_rest_connect_timeout``. This option + specifies the time limit (in seconds), within which the connection to + NexentaStor management REST API server must be established. + - Added a new config option ``nexenta_rest_read_timeout``. This option + specifies the time limit (in seconds), within which NexentaStor + management REST API server must send a response. + - Added a new config option ``nexenta_rest_backoff_factor``. This option + specifies the backoff factor to apply between connection attempts to + NexentaStor management REST API server. + - Added a new config option ``nexenta_rest_retry_count``. This option + specifies the number of times to repeat NexentaStor management REST + API call in case of connection errors and NexentaStor appliance EBUSY + or ENOENT errors. + - Added a new config option ``nexenta_origin_snapshot_template``. + This option specifies template string to generate origin name of clone. + - Added a new config option ``nexenta_group_snapshot_template``. + This option specifies template string to generate group snapshot name.