Remove the Tintri Driver

The Tintri driver was marked as unsupported in
the Stein release.  It hasn't reported on a patch in 312
days which is outside of Cinder's 3rd Party CI requirements.
As a result the driver should be removed.

This patch proposes that removal.

Change-Id: I8e69dc2199ad28b99144ba2618d114392e6fa5e5
This commit is contained in:
Jay S. Bryant 2019-03-26 16:29:54 -05:00
parent 6cbc53b718
commit 0423642951
8 changed files with 18 additions and 1363 deletions

View File

@ -147,7 +147,6 @@ from cinder.volume.drivers import solidfire as cinder_volume_drivers_solidfire
from cinder.volume.drivers import storpool as cinder_volume_drivers_storpool
from cinder.volume.drivers.synology import synology_common as \
cinder_volume_drivers_synology_synologycommon
from cinder.volume.drivers import tintri as cinder_volume_drivers_tintri
from cinder.volume.drivers.veritas_access import veritas_iscsi as \
cinder_volume_drivers_veritas_access_veritasiscsi
from cinder.volume.drivers.vmware import vmdk as \
@ -352,7 +351,6 @@ def list_opts():
cinder_volume_drivers_sheepdog.sheepdog_opts,
cinder_volume_drivers_solidfire.sf_opts,
cinder_volume_drivers_synology_synologycommon.cinder_opts,
cinder_volume_drivers_tintri.tintri_opts,
cinder_volume_drivers_vmware_vmdk.vmdk_opts,
cinder_volume_drivers_vzstorage.vzstorage_opts,
cinder_volume_drivers_windows_iscsi.windows_opts,

View File

@ -1,285 +0,0 @@
# Copyright (c) 2015 Tintri. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver test for Tintri storage.
"""
import ddt
import mock
from oslo_utils import units
from cinder import context
from cinder import exception
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.tests.unit import utils as cinder_utils
from cinder.tests.unit.volume import test_driver
from cinder.volume.drivers.tintri import TClient
from cinder.volume.drivers.tintri import TintriDriver
class FakeImage(object):
def __init__(self):
self.id = 'image-id'
self.name = 'image-name'
self.properties = {'provider_location': 'nfs://share'}
def __getitem__(self, key):
return self.__dict__[key]
@ddt.ddt
class TintriDriverTestCase(test_driver.BaseDriverTestCase):
driver_name = 'cinder.volume.drivers.tintri.TintriDriver'
def setUp(self):
super(TintriDriverTestCase, self).setUp()
self.context = context.get_admin_context()
self.configuration.nfs_mount_point_base = '/mnt/test'
self.configuration.nfs_mount_options = None
self.configuration.nas_mount_options = None
self._driver = TintriDriver(configuration=self.configuration)
self._driver._hostname = 'host'
self._driver._username = 'user'
self._driver._password = 'password'
self._driver._api_version = 'v310'
self._driver._image_cache_expiry = 30
self._provider_location = 'localhost:/share'
self._driver._mounted_shares = [self._provider_location]
self.fake_stubs()
def fake_stubs(self):
self.mock_object(TClient, 'login', self.fake_login)
self.mock_object(TClient, 'logout', self.fake_logout)
self.mock_object(TClient, 'get_snapshot', self.fake_get_snapshot)
self.mock_object(TClient, 'get_image_snapshots_to_date',
self.fake_get_image_snapshots_to_date)
self.mock_object(TintriDriver, '_move_cloned_volume',
self.fake_move_cloned_volume)
self.mock_object(TintriDriver, '_get_provider_location',
self.fake_get_provider_location)
self.mock_object(TintriDriver, '_set_rw_permissions',
self.fake_set_rw_permissions)
self.mock_object(TintriDriver, '_is_volume_present',
self.fake_is_volume_present)
self.mock_object(TintriDriver, '_is_share_vol_compatible',
self.fake_is_share_vol_compatible)
self.mock_object(TintriDriver, '_is_file_size_equal',
self.fake_is_file_size_equal)
def fake_login(self, user_name, password):
return 'session-id'
def fake_logout(self):
pass
def fake_get_snapshot(self, volume_id):
return fake.SNAPSHOT_ID
def fake_get_image_snapshots_to_date(self, date):
return [{'uuid': {'uuid': 'image_snapshot-id'}}]
def fake_move_cloned_volume(self, clone_name, volume_id, share=None):
pass
def fake_get_provider_location(self, volume_path):
return self._provider_location
def fake_set_rw_permissions(self, path):
pass
def fake_is_volume_present(self, volume_path):
return True
def fake_is_share_vol_compatible(self, volume, share):
return True
def fake_is_file_size_equal(self, path, size):
return True
@mock.patch.object(TClient, 'create_snapshot',
mock.Mock(return_value=fake.PROVIDER_ID))
def test_create_snapshot(self):
snapshot = fake_snapshot.fake_snapshot_obj(self.context)
volume = fake_volume.fake_volume_obj(self.context)
provider_id = fake.PROVIDER_ID
snapshot.volume = volume
with mock.patch('cinder.objects.snapshot.Snapshot.save'):
self.assertEqual({'provider_id': fake.PROVIDER_ID},
self._driver.create_snapshot(snapshot))
self.assertEqual(provider_id, snapshot.provider_id)
@mock.patch.object(TClient, 'create_snapshot', mock.Mock(
side_effect=exception.VolumeDriverException))
def test_create_snapshot_failure(self):
snapshot = fake_snapshot.fake_snapshot_obj(self.context)
volume = fake_volume.fake_volume_obj(self.context)
snapshot.volume = volume
self.assertRaises(exception.VolumeDriverException,
self._driver.create_snapshot, snapshot)
@mock.patch.object(TClient, 'delete_snapshot', mock.Mock())
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=
cinder_utils.ZeroIntervalLoopingCall)
def test_cleanup_cache(self):
self.assertFalse(self._driver.cache_cleanup)
timer = self._driver._initiate_image_cache_cleanup()
# wait for cache cleanup to complete
timer.wait()
self.assertFalse(self._driver.cache_cleanup)
@mock.patch.object(TClient, 'delete_snapshot', mock.Mock(
side_effect=exception.VolumeDriverException))
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=
cinder_utils.ZeroIntervalLoopingCall)
def test_cleanup_cache_delete_fail(self):
self.assertFalse(self._driver.cache_cleanup)
timer = self._driver._initiate_image_cache_cleanup()
# wait for cache cleanup to complete
timer.wait()
self.assertFalse(self._driver.cache_cleanup)
@mock.patch.object(TClient, 'delete_snapshot', mock.Mock())
def test_delete_snapshot(self):
snapshot = fake_snapshot.fake_snapshot_obj(self.context)
snapshot.provider_id = fake.PROVIDER_ID
self.assertIsNone(self._driver.delete_snapshot(snapshot))
@mock.patch.object(TClient, 'delete_snapshot', mock.Mock(
side_effect=exception.VolumeDriverException))
def test_delete_snapshot_failure(self):
snapshot = fake_snapshot.fake_snapshot_obj(self.context)
snapshot.provider_id = fake.PROVIDER_ID
self.assertRaises(exception.VolumeDriverException,
self._driver.delete_snapshot, snapshot)
@mock.patch.object(TClient, 'clone_volume', mock.Mock())
def test_create_volume_from_snapshot(self):
snapshot = fake_snapshot.fake_snapshot_obj(self.context)
volume = fake_volume.fake_volume_obj(self.context)
self.assertEqual({'provider_location': self._provider_location},
self._driver.create_volume_from_snapshot(
volume, snapshot))
@mock.patch.object(TClient, 'clone_volume', mock.Mock(
side_effect=exception.VolumeDriverException))
def test_create_volume_from_snapshot_failure(self):
snapshot = fake_snapshot.fake_snapshot_obj(self.context)
volume = fake_volume.fake_volume_obj(self.context)
self.assertRaises(exception.VolumeDriverException,
self._driver.create_volume_from_snapshot,
volume, snapshot)
@mock.patch.object(TClient, 'clone_volume', mock.Mock())
@mock.patch.object(TClient, 'create_snapshot', mock.Mock())
def test_create_cloned_volume(self):
volume = fake_volume.fake_volume_obj(self.context)
self.assertEqual({'provider_location': self._provider_location},
self._driver.create_cloned_volume(volume, volume))
@mock.patch.object(TClient, 'clone_volume', mock.Mock(
side_effect=exception.VolumeDriverException))
@mock.patch.object(TClient, 'create_snapshot', mock.Mock())
def test_create_cloned_volume_failure(self):
volume = fake_volume.fake_volume_obj(self.context)
self.assertRaises(exception.VolumeDriverException,
self._driver.create_cloned_volume, volume, volume)
@mock.patch.object(TClient, 'clone_volume', mock.Mock())
def test_clone_image(self):
volume = fake_volume.fake_volume_obj(self.context)
self.assertEqual(({'provider_location': self._provider_location,
'bootable': True}, True),
self._driver.clone_image(
None, volume, 'image-name', FakeImage().__dict__,
None))
@mock.patch.object(TClient, 'clone_volume', mock.Mock(
side_effect=exception.VolumeDriverException))
def test_clone_image_failure(self):
volume = fake_volume.fake_volume_obj(self.context)
self.assertEqual(({'provider_location': None,
'bootable': False}, False),
self._driver.clone_image(
None, volume, 'image-name', FakeImage().__dict__,
None))
def test_manage_existing(self):
volume = fake_volume.fake_volume_obj(self.context)
existing = {'source-name': self._provider_location + '/' +
volume.name}
with mock.patch('os.path.isfile', return_value=True):
self.assertEqual({'provider_location': self._provider_location},
self._driver.manage_existing(volume, existing))
def test_manage_existing_invalid_ref(self):
existing = fake_volume.fake_volume_obj(self.context)
volume = fake_volume.fake_volume_obj(self.context)
self.assertRaises(exception.ManageExistingInvalidReference,
self._driver.manage_existing, volume, existing)
def test_manage_existing_not_found(self):
volume = fake_volume.fake_volume_obj(self.context)
existing = {'source-name': self._provider_location + '/' +
volume.name}
with mock.patch('os.path.isfile', return_value=False):
self.assertRaises(exception.ManageExistingInvalidReference,
self._driver.manage_existing, volume, existing)
@mock.patch.object(TintriDriver, '_move_file', mock.Mock(
return_value=False))
def test_manage_existing_move_failure(self):
volume = fake_volume.fake_volume_obj(self.context)
existing = {'source-name': self._provider_location + '/source-volume'}
with mock.patch('os.path.isfile', return_value=True):
self.assertRaises(exception.VolumeDriverException,
self._driver.manage_existing,
volume, existing)
@ddt.data((123, 123), (123.5, 124))
@ddt.unpack
def test_manage_existing_get_size(self, st_size, exp_size):
volume = fake_volume.fake_volume_obj(self.context)
existing = {'source-name': self._provider_location + '/' +
volume.name}
file = mock.Mock(st_size=int(st_size * units.Gi))
with mock.patch('os.path.isfile', return_value=True):
with mock.patch('os.stat', return_value=file):
self.assertEqual(exp_size,
self._driver.manage_existing_get_size(
volume, existing))
def test_manage_existing_get_size_failure(self):
volume = fake_volume.fake_volume_obj(self.context)
existing = {'source-name': self._provider_location + '/' +
volume.name}
with mock.patch('os.path.isfile', return_value=True):
with mock.patch('os.stat', side_effect=OSError):
self.assertRaises(exception.VolumeDriverException,
self._driver.manage_existing_get_size,
volume, existing)
def test_unmanage(self):
volume = fake_volume.fake_volume_obj(self.context)
volume.provider_location = self._provider_location
self._driver.unmanage(volume)
def test_retype(self):
volume = fake_volume.fake_volume_obj(self.context)
retype, update = self._driver.retype(None, volume, None, None, None)
self.assertTrue(retype)
self.assertIsNone(update)

View File

@ -1,978 +0,0 @@
# Copyright (c) 2015 Tintri. All rights reserved.
# Copyright (c) 2012 NetApp, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver for Tintri storage.
"""
import datetime
import json
import math
import os
import re
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import units
import requests
from six.moves import urllib
from cinder import exception
from cinder.i18n import _
from cinder.image import image_utils
from cinder import interface
from cinder import utils
from cinder.volume import configuration
from cinder.volume import driver
from cinder.volume.drivers import nfs
LOG = logging.getLogger(__name__)
default_api_version = 'v310'
img_prefix = 'image-'
tintri_path = '/tintri/'
tintri_opts = [
cfg.StrOpt('tintri_server_hostname',
help='The hostname (or IP address) for the storage system'),
cfg.StrOpt('tintri_server_username',
help='User name for the storage system'),
cfg.StrOpt('tintri_server_password',
help='Password for the storage system',
secret=True),
cfg.StrOpt('tintri_api_version',
default=default_api_version,
help='API version for the storage system'),
cfg.IntOpt('tintri_image_cache_expiry_days',
default=30,
help='Delete unused image snapshots older than mentioned days'),
cfg.StrOpt('tintri_image_shares_config',
help='Path to image nfs shares file'),
]
CONF = cfg.CONF
CONF.register_opts(tintri_opts, group=configuration.SHARED_CONF_GROUP)
@interface.volumedriver
class TintriDriver(driver.ManageableVD,
driver.CloneableImageVD,
nfs.NfsDriver):
"""Base class for Tintri driver.
Version History
.. code-block:: none
2.1.0.1 - Liberty driver
2.2.0.1 - Mitaka driver
-- Retype
-- Image cache clean up
-- Direct image clone fix
"""
VENDOR = 'Tintri'
VERSION = '2.2.0.1'
# ThirdPartySystems wiki page
CI_WIKI_NAME = "Tintri_CI"
# TODO(jsbryant) Remove driver in the 'T' release if CI is not fixed
SUPPORTED = False
REQUIRED_OPTIONS = ['tintri_server_hostname', 'tintri_server_username',
'tintri_server_password']
def __init__(self, *args, **kwargs):
self._execute = None
self._context = None
super(TintriDriver, self).__init__(*args, **kwargs)
self._execute_as_root = True
self.configuration.append_config_values(tintri_opts)
self.cache_cleanup = False
self._mounted_image_shares = []
def do_setup(self, context):
self._image_shares_config = getattr(self.configuration,
'tintri_image_shares_config')
super(TintriDriver, self).do_setup(context)
self._context = context
self._check_ops(self.REQUIRED_OPTIONS, self.configuration)
self._hostname = getattr(self.configuration, 'tintri_server_hostname')
self._username = getattr(self.configuration, 'tintri_server_username')
self._password = getattr(self.configuration, 'tintri_server_password')
self._api_version = getattr(self.configuration, 'tintri_api_version')
self._image_cache_expiry = getattr(self.configuration,
'tintri_image_cache_expiry_days')
self.verify_ssl = getattr(self.configuration, 'driver_ssl_cert_verify')
self.ssl_cert_path = getattr(self.configuration,
'driver_ssl_cert_path')
def get_pool(self, volume):
"""Returns pool name where volume resides.
:param volume: The volume hosted by the driver.
:return: Name of the pool where given volume is hosted.
"""
return volume['provider_location']
def _get_client(self):
"""Returns a Tintri REST client connection."""
return TClient(self._hostname, self._username, self._password,
self._api_version)
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
(__, path) = self._get_export_ip_path(snapshot.volume_id)
volume_path = '%s/%s' % (path, snapshot.volume_name)
volume_path = '%(path)s/%(volume_name)s' % {
'path': path,
'volume_name': snapshot.volume_name,
}
model_update = {}
with self._get_client() as c:
provider_id = c.create_snapshot(volume_path,
snapshot.volume.display_name or
snapshot.volume_name,
snapshot.volume_id,
snapshot.display_name or
snapshot.name)
snapshot.provider_id = provider_id
# Store Tintri snapshot ID as snapshot provider_id
model_update['provider_id'] = provider_id
return model_update
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
if snapshot.provider_id:
with self._get_client() as c:
c.delete_snapshot(snapshot.provider_id)
else:
LOG.info('Snapshot %s not found', snapshot.name)
def _check_ops(self, required_ops, configuration):
"""Ensures that the options we care about are set."""
for op in required_ops:
if not getattr(configuration, op):
LOG.error('Configuration value %s is not set.', op)
raise exception.InvalidConfigurationValue(option=op,
value=None)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from snapshot."""
vol_size = volume.size
snap_size = snapshot.volume_size
self._clone_snapshot(snapshot.provider_id, volume.name,
snapshot.volume_id)
share = self._get_provider_location(snapshot.volume_id)
volume['provider_location'] = share
path = self.local_path(volume)
self._set_rw_permissions(path)
if vol_size != snap_size:
try:
self.extend_volume(volume, vol_size)
except Exception:
LOG.error('Resizing %s failed. Cleaning volume.',
volume.name)
self._delete_file(path)
raise
return {'provider_location': volume['provider_location']}
def _clone_snapshot(self, snapshot_id, clone_name, volume_id, share=None):
"""Clones volume from snapshot."""
(host, path) = self._get_export_ip_path(volume_id, share)
clone_path = '%s/%s-d' % (path, clone_name)
with self._get_client() as c:
c.clone_volume(snapshot_id, clone_path)
self._move_cloned_volume(clone_name, volume_id, share)
def _move_cloned_volume(self, clone_name, volume_id, share=None):
local_path = self._get_local_path(volume_id, share)
source_path = os.path.join(local_path, clone_name + '-d')
if self._is_volume_present(source_path):
source_file = os.listdir(source_path)[0]
source = os.path.join(source_path, source_file)
target = os.path.join(local_path, clone_name)
moved = self._move_file(source, target)
self._execute('rm', '-rf', source_path,
run_as_root=self._execute_as_root)
if not moved:
msg = (_('Failed to move volume %s.') % source)
raise exception.VolumeDriverException(msg)
else:
raise exception.VolumeDriverException(
_('Volume %s not found.') % source_path)
def _clone_volume_to_volume(self, volume_name, clone_name,
volume_display_name, volume_id,
share=None, dst=None, image_id=None):
"""Creates volume snapshot then clones volume."""
(__, path) = self._get_export_ip_path(volume_id, share)
volume_path = '%s/%s' % (path, volume_name)
if dst:
(___, dst_path) = self._get_export_ip_path(None, dst)
clone_path = '%s/%s-d' % (dst_path, clone_name)
else:
clone_path = '%s/%s-d' % (path, clone_name)
with self._get_client() as c:
if share and image_id:
snapshot_id = self._create_image_snapshot(volume_name,
share,
image_id,
volume_display_name)
else:
snapshot_id = c.create_snapshot(
volume_path, volume_display_name, volume_id, volume_name,
deletion_policy='DELETE_ON_ZERO_CLONE_REFERENCES')
c.clone_volume(snapshot_id, clone_path)
self._move_cloned_volume(clone_name, volume_id, dst or share)
@utils.synchronized('cache_cleanup')
def _initiate_image_cache_cleanup(self):
if self.cache_cleanup:
LOG.debug('Image cache cleanup in progress.')
return
else:
self.cache_cleanup = True
timer = loopingcall.FixedIntervalLoopingCall(
self._cleanup_cache)
timer.start(interval=None)
return timer
def _cleanup_cache(self):
LOG.debug('Cache cleanup: starting.')
try:
# Cleanup used cached image snapshots 30 days and older
t = datetime.datetime.utcnow() - datetime.timedelta(
days=self._image_cache_expiry)
date = t.strftime("%Y-%m-%dT%H:%M:%S")
with self._get_client() as c:
# Get eligible snapshots to clean
image_snaps = c.get_image_snapshots_to_date(date)
if image_snaps:
for snap in image_snaps:
uuid = snap['uuid']['uuid']
LOG.debug(
'Cache cleanup: deleting image snapshot %s', uuid)
try:
c.delete_snapshot(uuid)
except Exception:
LOG.exception('Unexpected exception during '
'cache cleanup of snapshot %s',
uuid)
else:
LOG.debug('Cache cleanup: nothing to clean')
finally:
self.cache_cleanup = False
LOG.debug('Cache cleanup: finished')
raise loopingcall.LoopingCallDone()
def _update_volume_stats(self):
"""Retrieves stats info from volume group."""
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or self.VENDOR
data['vendor_name'] = self.VENDOR
data['driver_version'] = self.get_version()
data['storage_protocol'] = self.driver_volume_type
self._ensure_shares_mounted()
self._initiate_image_cache_cleanup()
pools = []
for share in self._mounted_shares:
pool = dict()
capacity, free, used = self._get_capacity_info(share)
pool['pool_name'] = share
pool['total_capacity_gb'] = capacity / float(units.Gi)
pool['free_capacity_gb'] = free / float(units.Gi)
pool['reserved_percentage'] = 0
pool['QoS_support'] = True
pools.append(pool)
data['pools'] = pools
self._stats = data
def _get_provider_location(self, volume_id):
"""Returns provider location for given volume."""
volume = self.db.volume_get(self._context, volume_id)
return volume.provider_location
def _get_host_ip(self, volume_id):
"""Returns IP address for the given volume."""
return self._get_provider_location(volume_id).split(':')[0]
def _get_export_path(self, volume_id):
"""Returns NFS export path for the given volume."""
return self._get_provider_location(volume_id).split(':')[1]
def _is_volume_present(self, volume_path):
"""Checks if volume exists."""
try:
self._execute('ls', volume_path,
run_as_root=self._execute_as_root)
except Exception:
return False
return True
def _get_volume_path(self, nfs_share, volume_name):
"""Gets local volume path for given volume name on given nfs share."""
return os.path.join(self._get_mount_point_for_share(nfs_share),
volume_name)
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
vol_size = volume.size
src_vol_size = src_vref.size
self._clone_volume_to_volume(src_vref.name, volume.name,
src_vref.display_name,
src_vref.id)
share = self._get_provider_location(src_vref.id)
volume['provider_location'] = share
path = self.local_path(volume)
self._set_rw_permissions(path)
if vol_size != src_vol_size:
try:
self.extend_volume(volume, vol_size)
except Exception:
LOG.error('Resizing %s failed. Cleaning volume.',
volume.name)
self._delete_file(path)
raise
return {'provider_location': volume['provider_location']}
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetches the image from image_service and write it to the volume."""
super(TintriDriver, self).copy_image_to_volume(
context, volume, image_service, image_id)
LOG.info('Copied image to volume %s using regular download.',
volume['name'])
self._create_image_snapshot(volume['name'],
volume['provider_location'], image_id,
img_prefix + image_id)
def _create_image_snapshot(self, volume_name, share, image_id, image_name):
"""Creates an image snapshot."""
snapshot_name = img_prefix + image_id
LOG.info('Creating image snapshot %s', snapshot_name)
(host, path) = self._get_export_ip_path(None, share)
volume_path = '%s/%s' % (path, volume_name)
@utils.synchronized(snapshot_name, external=True)
def _do_snapshot():
with self._get_client() as c:
snapshot_id = c.get_snapshot(image_id)
if not snapshot_id:
snapshot_id = c.create_snapshot(volume_path, image_name,
image_id, snapshot_name)
return snapshot_id
try:
return _do_snapshot()
except Exception as e:
LOG.warning('Exception while creating image %(image_id)s '
'snapshot. Exception: %(exc)s',
{'image_id': image_id, 'exc': e})
def _find_image_snapshot(self, image_id):
"""Finds image snapshot."""
with self._get_client() as c:
return c.get_snapshot(image_id)
def _clone_image_snapshot(self, snapshot_id, dst, share):
"""Clones volume from image snapshot."""
file_path = self._get_volume_path(share, dst)
if not os.path.exists(file_path):
LOG.info('Cloning from snapshot to destination %s', dst)
self._clone_snapshot(snapshot_id, dst, volume_id=None,
share=share)
def _delete_file(self, path):
"""Deletes file from disk and return result as boolean."""
try:
LOG.debug('Deleting file at path %s', path)
cmd = ['rm', '-f', path]
self._execute(*cmd, run_as_root=self._execute_as_root)
return True
except Exception as e:
LOG.warning('Exception during deleting %s', e)
return False
def _move_file(self, source_path, dest_path):
"""Moves source to destination."""
@utils.synchronized(dest_path, external=True)
def _do_move(src, dst):
if os.path.exists(dst):
LOG.warning('Destination %s already exists.', dst)
return False
self._execute('mv', src, dst, run_as_root=self._execute_as_root)
return True
try:
return _do_move(source_path, dest_path)
except Exception as e:
LOG.warning('Exception moving file %(src)s. Message: %(e)s',
{'src': source_path, 'e': e})
return False
def clone_image(self, context, volume,
image_location, image_meta,
image_service):
"""Creates a volume efficiently from an existing image.
image_location is a string whose format depends on the
image service backend in use. The driver should use it
to determine whether cloning is possible.
Returns a dict of volume properties eg. provider_location,
boolean indicating whether cloning occurred.
"""
image_name = image_meta['name']
image_id = image_meta['id']
if 'properties' in image_meta:
provider_location = image_meta['properties'].get(
'provider_location')
if provider_location:
image_location = (provider_location, None)
cloned = False
post_clone = False
try:
snapshot_id = self._find_image_snapshot(image_id)
if snapshot_id:
cloned = self._clone_from_snapshot(volume, image_id,
snapshot_id)
else:
cloned = self._direct_clone(volume, image_location,
image_id, image_name)
if cloned:
post_clone = self._post_clone_image(volume)
except Exception as e:
LOG.info('Image cloning unsuccessful for image '
'%(image_id)s. Message: %(msg)s',
{'image_id': image_id, 'msg': e})
vol_path = self.local_path(volume)
volume['provider_location'] = None
if os.path.exists(vol_path):
self._delete_file(vol_path)
finally:
cloned = cloned and post_clone
share = volume['provider_location'] if cloned else None
bootable = True if cloned else False
return {'provider_location': share, 'bootable': bootable}, cloned
def _clone_from_snapshot(self, volume, image_id, snapshot_id):
"""Clones a copy from image snapshot."""
cloned = False
LOG.info('Cloning image %s from snapshot.', image_id)
for share in self._mounted_shares:
# Repeat tries in other shares if failed in some
LOG.debug('Image share: %s', share)
if (share and
self._is_share_vol_compatible(volume, share)):
try:
self._clone_image_snapshot(snapshot_id, volume['name'],
share)
cloned = True
volume['provider_location'] = share
break
except Exception:
LOG.warning('Unexpected exception during '
'image cloning in share %s', share)
return cloned
def _direct_clone(self, volume, image_location, image_id, image_name):
"""Clones directly in nfs share."""
LOG.info('Checking image clone %s from glance share.', image_id)
cloned = False
image_location = self._get_image_nfs_url(image_location)
share = self._is_cloneable_share(image_location)
run_as_root = self._execute_as_root
dst_share = None
for dst in self._mounted_shares:
if dst and self._is_share_vol_compatible(volume, dst):
dst_share = dst
LOG.debug('Image dst share: %s', dst)
break
if not dst_share:
return cloned
LOG.debug('Share is cloneable %s', dst_share)
volume['provider_location'] = dst_share
(__, ___, img_file) = image_location.rpartition('/')
dir_path = self._get_mount_point_for_share(share)
dst_path = self._get_mount_point_for_share(dst_share)
img_path = '%s/%s' % (dir_path, img_file)
img_info = image_utils.qemu_img_info(img_path,
run_as_root=run_as_root)
if img_info.file_format == 'raw':
LOG.debug('Image is raw %s', image_id)
self._clone_volume_to_volume(
img_file, volume['name'], image_name,
volume_id=None, share=share, dst=dst_share, image_id=image_id)
cloned = True
else:
LOG.info('Image will locally be converted to raw %s',
image_id)
dst = '%s/%s' % (dst_path, volume['name'])
image_utils.convert_image(img_path, dst, 'raw',
run_as_root=run_as_root)
data = image_utils.qemu_img_info(dst, run_as_root=run_as_root)
if data.file_format != "raw":
raise exception.InvalidResults(
_('Converted to raw, but '
'format is now %s') % data.file_format)
else:
cloned = True
self._create_image_snapshot(
volume['name'], volume['provider_location'],
image_id, image_name)
return cloned
def _post_clone_image(self, volume):
"""Performs operations post image cloning."""
LOG.info('Performing post clone for %s', volume['name'])
vol_path = self.local_path(volume)
self._set_rw_permissions(vol_path)
self._resize_image_file(vol_path, volume['size'])
return True
def _resize_image_file(self, path, new_size):
"""Resizes the image file on share to new size."""
LOG.debug('Checking file for resize.')
if self._is_file_size_equal(path, new_size):
return
else:
LOG.info('Resizing file to %sG', new_size)
image_utils.resize_image(path, new_size,
run_as_root=self._execute_as_root)
if self._is_file_size_equal(path, new_size):
return
else:
raise exception.InvalidResults(
_('Resizing image file failed.'))
def _is_cloneable_share(self, image_location):
"""Finds if the image at location is cloneable."""
conn, dr = self._check_nfs_path(image_location)
return self._is_share_in_use(conn, dr)
def _check_nfs_path(self, image_location):
"""Checks if the nfs path format is matched.
WebNFS url format with relative-path is supported.
Accepting all characters in path-names and checking against
the mounted shares which will contain only allowed path segments.
Returns connection and dir details.
"""
conn, dr = None, None
if image_location:
nfs_loc_pattern = \
r'^nfs://(([\w\-\.]+:[\d]+|[\w\-\.]+)(/[^/].*)*(/[^/\\\\]+))$'
matched = re.match(nfs_loc_pattern, image_location)
if not matched:
LOG.debug('Image location not in the expected format %s',
image_location)
else:
conn = matched.group(2)
dr = matched.group(3) or '/'
return conn, dr
def _is_share_in_use(self, conn, dr):
"""Checks if share is cinder mounted and returns it."""
try:
if conn:
host = conn.split(':')[0]
ip = utils.resolve_hostname(host)
for sh in self._mounted_shares + self._mounted_image_shares:
sh_ip = utils.resolve_hostname(sh.split(':')[0])
sh_exp = sh.split(':')[1]
if sh_ip == ip and sh_exp == dr:
LOG.debug('Found share match %s', sh)
return sh
except Exception:
LOG.warning('Unexpected exception while listing used share.')
def _get_image_nfs_url(self, image_location):
"""Gets direct url for nfs backend.
It creates direct url from image_location
which is a tuple with direct_url and locations.
Returns url with nfs scheme if nfs store else returns url.
It needs to be verified by backend before use.
"""
direct_url, locations = image_location
if not direct_url and not locations:
raise exception.NotFound(_('Image location not present.'))
# Locations will be always a list of one until
# bp multiple-image-locations is introduced
if not locations:
return direct_url
location = locations[0]
url = location['url']
if not location['metadata']:
return url
location_type = location['metadata'].get('type')
if not location_type or location_type.lower() != "nfs":
return url
share_location = location['metadata'].get('share_location')
mount_point = location['metadata'].get('mount_point')
if not share_location or not mount_point:
return url
url_parse = urllib.parse.urlparse(url)
abs_path = os.path.join(url_parse.netloc, url_parse.path)
rel_path = os.path.relpath(abs_path, mount_point)
direct_url = "%s/%s" % (share_location, rel_path)
return direct_url
def _is_share_vol_compatible(self, volume, share):
"""Checks if share is compatible with volume to host it."""
return self._is_share_eligible(share, volume['size'])
def _can_share_hold_size(self, share, size):
"""Checks if volume can hold image with size."""
_tot_size, tot_available, _tot_allocated = self._get_capacity_info(
share)
if tot_available < size:
msg = _('Container size smaller than required file size.')
raise exception.VolumeDriverException(msg)
def _get_export_ip_path(self, volume_id=None, share=None):
"""Returns export ip and path.
One of volume id or share is used to return the values.
"""
if volume_id:
host_ip = self._get_host_ip(volume_id)
export_path = self._get_export_path(volume_id)
elif share:
host_ip = share.split(':')[0]
export_path = share.split(':')[1]
else:
raise exception.InvalidInput(
reason=_('A volume ID or share was not specified.'))
return host_ip, export_path
def _get_local_path(self, volume_id=None, share=None):
"""Returns local path.
One of volume id or share is used to return the values.
"""
if volume_id:
local_path = self._get_mount_point_for_share(
self._get_provider_location(volume_id))
elif share:
local_path = self._get_mount_point_for_share(share)
else:
raise exception.InvalidInput(
reason=_('A volume ID or share was not specified.'))
return local_path
def manage_existing(self, volume, existing_ref):
"""Brings an existing backend storage object under Cinder management.
existing_ref is passed straight through from the API request's
manage_existing_ref value, and it is up to the driver how this should
be interpreted. It should be sufficient to identify a storage object
that the driver should somehow associate with the newly-created cinder
volume structure.
:param volume: Cinder volume to manage
:param existing_ref: Driver-specific information used to identify a
volume
"""
nfs_share, nfs_mount, volume_name = self._get_share_mount(existing_ref)
LOG.debug('Managing volume %(vol)s with ref %(ref)s',
{'vol': volume['id'], 'ref': existing_ref})
if volume_name != volume['name']:
src = os.path.join(nfs_mount, volume_name)
dst = os.path.join(nfs_mount, volume['name'])
if not self._move_file(src, dst):
msg = (_('Failed to manage volume %s.') %
existing_ref['source-name'])
raise exception.VolumeDriverException(msg)
self._set_rw_permissions(dst)
LOG.info('Manage volume %s', volume['name'])
return {'provider_location': nfs_share}
def manage_existing_get_size(self, volume, existing_ref):
"""Returns size of volume to be managed by manage_existing.
When calculating the size, round up to the next GB.
:param volume: Cinder volume to manage
:param existing_ref: Driver-specific information used to identify a
volume
"""
nfs_share, nfs_mount, volume_name = self._get_share_mount(existing_ref)
try:
volume_path = os.path.join(nfs_mount, volume_name)
vol_size = int(math.ceil(float(utils.get_file_size(volume_path)) /
units.Gi))
except OSError:
msg = (_('Failed to get size of volume %s') %
existing_ref['source-name'])
raise exception.VolumeDriverException(msg)
return vol_size
def unmanage(self, volume):
"""Removes the specified volume from Cinder management.
Does not delete the underlying backend storage object.
:param volume: Cinder volume to unmanage
"""
volume_path = self.local_path(volume)
LOG.info('Unmanage volume %s', volume_path)
def _convert_volume_share(self, volume_share):
"""Converts the share name to IP address."""
share_split = volume_share.rsplit(':', 1)
return utils.resolve_hostname(share_split[0]) + ':' + share_split[1]
def _get_share_mount(self, vol_ref):
"""Get the NFS share, NFS mount, and volume path from reference.
:param vol_ref: Driver-specific information used to identify a volume
:return: NFS Share, NFS mount, volume path
"""
if 'source-name' not in vol_ref or not vol_ref['source-name']:
msg = _('Volume reference must contain source-name element.')
raise exception.ManageExistingInvalidReference(
existing_ref=vol_ref, reason=msg)
volume_share = self._convert_volume_share(vol_ref['source-name'])
for nfs_share in self._mounted_shares:
share = self._convert_volume_share(nfs_share)
(__, match, volume_name) = volume_share.partition(share)
if match == share:
volume_name = volume_name.lstrip('/')
nfs_mount = self._get_mount_point_for_share(nfs_share)
volume_path = os.path.join(nfs_mount, volume_name)
if os.path.isfile(volume_path):
LOG.debug('Found volume %(path)s on share %(share)s',
{'path': volume_path, 'share': nfs_share})
return nfs_share, nfs_mount, volume_name
else:
LOG.debug('Volume ref %(ref)s not on share %(share)s',
{'ref': vol_ref, 'share': nfs_share})
raise exception.ManageExistingInvalidReference(
existing_ref=vol_ref, reason=_('Volume not found.'))
def retype(self, context, volume, new_type, diff, host):
"""Retype from one volume type to another.
At this point Tintri VMstore does not differentiate between
volume types on the same array. This is a no-op for us.
"""
return True, None
def _ensure_shares_mounted(self):
# Mount image shares, we do not need to store these mounts
# in _mounted_shares
mounted_image_shares = []
if self._image_shares_config:
self._load_shares_config(self._image_shares_config)
for share in self.shares:
try:
self._ensure_share_mounted(share)
mounted_image_shares.append(share)
except Exception:
LOG.exception('Exception during mounting.')
self._mounted_image_shares = mounted_image_shares
# Mount Cinder shares
super(TintriDriver, self)._ensure_shares_mounted()
class TClient(object):
"""REST client for Tintri storage."""
def __init__(self, hostname, username, password,
api_version=default_api_version):
"""Initializes a connection to Tintri server."""
self.api_url = 'https://' + hostname + '/api'
self.api_version = api_version
self.session_id = self.login(username, password)
self.headers = {'content-type': 'application/json',
'cookie': 'JSESSIONID=' + self.session_id}
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.logout()
def get(self, api):
return self.get_query(api, None)
def get_query(self, api, query):
url = self.api_url + api
return requests.get(url, headers=self.headers,
params=query, verify=self.verify_ssl,
cert=self.ssl_cert_path)
def delete(self, api):
url = self.api_url + api
return requests.delete(url, headers=self.headers,
verify=self.verify_ssl,
cert=self.ssl_cert_path)
def put(self, api, payload):
url = self.api_url + api
return requests.put(url, data=json.dumps(payload),
headers=self.headers,
verify=self.verify_ssl,
cert=self.ssl_cert_path)
def post(self, api, payload):
url = self.api_url + api
return requests.post(url, data=json.dumps(payload),
headers=self.headers,
verify=self.verify_ssl,
cert=self.ssl_cert_path)
def login(self, username, password):
# Payload, header and URL for login
headers = {'content-type': 'application/json',
'Tintri-Api-Client':
'Tintri-Cinder-Driver-%s' % TintriDriver.VERSION}
payload = {'username': username,
'password': password,
'typeId': 'com.tintri.api.rest.vcommon.dto.rbac.'
'RestApiCredentials'}
url = self.api_url + '/' + self.api_version + '/session/login'
r = requests.post(url, data=json.dumps(payload),
headers=headers,
verify=self.verify_ssl,
cert=self.ssl_cert_path)
if r.status_code != 200:
msg = _('Failed to login for user %s.') % username
raise exception.VolumeDriverException(msg)
return r.cookies['JSESSIONID']
def logout(self):
url = self.api_url + '/' + self.api_version + '/session/logout'
requests.get(url, headers=self.headers,
verify=self.verify_ssl,
cert=self.ssl_cert_path)
@staticmethod
def _remove_prefix(volume_path, prefix):
if volume_path.startswith(prefix):
return volume_path[len(prefix):]
else:
return volume_path
def create_snapshot(self, volume_path, volume_name, volume_id,
snapshot_name, deletion_policy=None):
"""Creates a volume snapshot."""
request = {'typeId': 'com.tintri.api.rest.' + self.api_version +
'.dto.domain.beans.cinder.CinderSnapshotSpec',
'file': TClient._remove_prefix(volume_path, tintri_path),
'vmName': volume_name or snapshot_name,
'description': snapshot_name + ' (' + volume_id + ')',
'vmTintriUuid': volume_id,
'instanceId': volume_id,
'snapshotCreator': 'Cinder',
'deletionPolicy': deletion_policy,
}
payload = '/' + self.api_version + '/cinder/snapshot'
r = self.post(payload, request)
if r.status_code != 200:
msg = _('Failed to create snapshot for volume %s.') % volume_path
raise exception.VolumeDriverException(msg)
return r.json()[0]
def get_snapshot(self, volume_id):
"""Gets a volume snapshot."""
filter = {'vmUuid': volume_id}
payload = '/' + self.api_version + '/snapshot'
r = self.get_query(payload, filter)
if r.status_code != 200:
msg = _('Failed to get snapshot for volume %s.') % volume_id
raise exception.VolumeDriverException(msg)
if int(r.json()['filteredTotal']) > 0:
return r.json()['items'][0]['uuid']['uuid']
def get_image_snapshots_to_date(self, date):
filter = {'sortedBy': 'createTime',
'target': 'SNAPSHOT',
'consistency': 'CRASH_CONSISTENT',
'hasClone': 'No',
'type': 'CINDER_GENERATED_SNAPSHOT',
'contain': 'image-',
'limit': '100',
'page': '1',
'sortOrder': 'DESC',
'since': '1970-01-01T00:00:00',
'until': date,
}
payload = '/' + self.api_version + '/snapshot'
r = self.get_query(payload, filter)
if r.status_code != 200:
msg = _('Failed to get image snapshots.')
raise exception.VolumeDriverException(msg)
return r.json()['items']
def delete_snapshot(self, snapshot_uuid):
"""Deletes a snapshot."""
url = '/' + self.api_version + '/snapshot/'
self.delete(url + snapshot_uuid)
def clone_volume(self, snapshot_uuid, volume_path):
"""Clones a volume from snapshot."""
request = {'typeId': 'com.tintri.api.rest.' + self.api_version +
'.dto.domain.beans.cinder.CinderCloneSpec',
'destinationPaths':
[TClient._remove_prefix(volume_path, tintri_path)],
'tintriSnapshotUuid': snapshot_uuid,
}
url = '/' + self.api_version + '/cinder/clone'
r = self.post(url, request)
if r.status_code != 200 and r.status_code != 204:
msg = _('Failed to clone volume from snapshot %s.') % snapshot_uuid
raise exception.VolumeDriverException(msg)

View File

@ -1,84 +0,0 @@
======
Tintri
======
Tintri VMstore is a smart storage that sees, learns, and adapts for cloud and
virtualization. The Tintri Block Storage driver interacts with configured
VMstore running Tintri OS 4.0 and above. It supports various operations using
Tintri REST APIs and NFS protocol.
To configure the use of a Tintri VMstore with Block Storage, perform the
following actions:
#. Edit the ``etc/cinder/cinder.conf`` file and set the
``cinder.volume.drivers.tintri`` options:
.. code-block:: ini
volume_driver=cinder.volume.drivers.tintri.TintriDriver
# Mount options passed to the nfs client. See section of the
# nfs man page for details. (string value)
nfs_mount_options = vers=3,lookupcache=pos,nolock
#
# Options defined in cinder.volume.drivers.tintri
#
# The hostname (or IP address) for the storage system (string
# value)
tintri_server_hostname = {Tintri VMstore Management IP}
# User name for the storage system (string value)
tintri_server_username = {username}
# Password for the storage system (string value)
tintri_server_password = {password}
# API version for the storage system (string value)
# tintri_api_version = v310
# Following options needed for NFS configuration
# File with the list of available nfs shares (string value)
# nfs_shares_config = /etc/cinder/nfs_shares
# Tintri driver will clean up unused image snapshots. With the following
# option, users can configure how long unused image snapshots are
# retained. Default retention policy is 30 days
# tintri_image_cache_expiry_days = 30
# Path to NFS shares file storing images.
# Users can store Glance images in the NFS share of the same VMstore
# mentioned in the following file. These images need to have additional
# metadata ``provider_location`` configured in Glance, which should point
# to the NFS share path of the image.
# This option will enable Tintri driver to directly clone from Glance
# image stored on same VMstore (rather than downloading image
# from Glance)
# tintri_image_shares_config = <Path to image NFS share>
#
# For example:
# Glance image metadata
# provider_location =>
# nfs://<data_ip>/tintri/glance/84829294-c48b-4e16-a878-8b2581efd505
#. Edit the ``/etc/nova/nova.conf`` file and set the ``nfs_mount_options``:
.. code-block:: ini
[libvirt]
nfs_mount_options = vers=3,nolock
#. Edit the ``/etc/cinder/nfs_shares`` file and add the Tintri VMstore mount
points associated with the configured VMstore management IP in the
``cinder.conf`` file:
.. code-block:: bash
{vmstore_data_ip}:/tintri/{submount1}
{vmstore_data_ip}:/tintri/{submount2}
.. config-table::
:config-target: Tintri
cinder.volume.drivers.tintri

View File

@ -165,9 +165,6 @@ title=StorPool Storage Driver (storpool)
[driver.synology]
title=Synology Storage Driver (iSCSI)
[driver.tintri]
title=Tintri Storage Driver (NFS)
[driver.vrtshyperscale]
title=Veritas HyperScale Driver (veritas)
@ -252,7 +249,6 @@ driver.rbd=complete
driver.sheepdog=complete
driver.storpool=complete
driver.synology=complete
driver.tintri=missing
driver.vrtshyperscale=complete
driver.vrtsaccess=missing
driver.vrtscnfs=complete
@ -317,7 +313,6 @@ driver.rbd=complete
driver.sheepdog=complete
driver.storpool=complete
driver.synology=complete
driver.tintri=complete
driver.vrtsaccess=complete
driver.vrtscnfs=complete
driver.vrtshyperscale=missing
@ -382,7 +377,6 @@ driver.rbd=missing
driver.sheepdog=missing
driver.storpool=missing
driver.synology=missing
driver.tintri=missing
driver.vrtshyperscale=missing
driver.vrtsaccess=missing
driver.vrtscnfs=missing
@ -450,7 +444,6 @@ driver.rbd=missing
driver.sheepdog=missing
driver.storpool=missing
driver.synology=missing
driver.tintri=missing
driver.vrtshyperscale=missing
driver.vrtsaccess=missing
driver.vrtscnfs=missing
@ -517,7 +510,6 @@ driver.rbd=complete
driver.sheepdog=missing
driver.storpool=complete
driver.synology=missing
driver.tintri=missing
driver.vrtshyperscale=missing
driver.vrtsaccess=missing
driver.vrtscnfs=missing
@ -585,7 +577,6 @@ driver.rbd=missing
driver.sheepdog=missing
driver.storpool=missing
driver.synology=missing
driver.tintri=missing
driver.vrtshyperscale=missing
driver.vrtsaccess=missing
driver.vrtscnfs=missing
@ -652,7 +643,6 @@ driver.rbd=complete
driver.sheepdog=missing
driver.storpool=missing
driver.synology=missing
driver.tintri=missing
driver.vrtshyperscale=missing
driver.vrtsaccess=missing
driver.vrtscnfs=missing
@ -720,7 +710,6 @@ driver.rbd=missing
driver.sheepdog=missing
driver.storpool=complete
driver.synology=missing
driver.tintri=missing
driver.vrtshyperscale=missing
driver.vrtsaccess=missing
driver.vrtscnfs=missing
@ -788,7 +777,6 @@ driver.rbd=missing
driver.sheepdog=missing
driver.storpool=missing
driver.synology=missing
driver.tintri=missing
driver.vrtshyperscale=missing
driver.vrtsaccess=missing
driver.vrtscnfs=missing
@ -853,7 +841,6 @@ driver.rbd=missing
driver.sheepdog=missing
driver.storpool=missing
driver.synology=missing
driver.tintri=missing
driver.vrtshyperscale=missing
driver.vrtsaccess=missing
driver.vrtscnfs=missing

View File

@ -78,3 +78,6 @@ release.
* HGST Flash Storage Suite Driver (vgc)
* ITRI DISCO Driver
* NetApp E-Series Driver
* Train
* Tintri Storage Driver

View File

@ -102,7 +102,6 @@ multipath: CommandFilter, multipath, root
multipathd: CommandFilter, multipathd, root
# cinder/volume/drivers/ibm/gpfs.py
# cinder/volume/drivers/tintri.py
# cinder/volume/drivers/netapp/dataontap/nfs_base.py
mv: CommandFilter, mv, root

View File

@ -0,0 +1,15 @@
---
upgrade:
- |
The Tintri storage driver has been removed after completion of its
deprecation period without a reliable 3rd Party CI system being
supported. Customers using the Tintri driver should not upgrade
Cinder without first migrating all volumes from their Tintri backend
to a supported storage backend. Failure to migrate volumes will
result in no longer being able to access volumes backed by the Tintri
storage backend.
other:
- |
The Tintri storage driver was marked unsupported in Stein due to
3rd Party CI not meeting Cinder's requirements. As a result the
driver is removed starting from the Train release.