Hedvig Cinder driver implementation

This patch introduces Hedvig cinder volume driver along with unit tests

implements: blueprint hedvig-cinder

Change-Id: Ib701edb733567831ee80a317d0e3b3945a312760
Signed-off-by: Dhinesh Balasubramaniam <dhinesh@hedviginc.com>
This commit is contained in:
dhinesh 2017-02-13 15:39:47 -08:00 committed by swathi
parent ea844fbee8
commit e08707b06f
8 changed files with 1626 additions and 0 deletions

View File

@ -0,0 +1,226 @@
# Copyright (c) 2017 Hedvig Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
from cinder import context
from cinder import exception
from cinder import test
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.volume import configuration as conf
from cinder.volume.drivers.hedvig import hedvig_cinder as hdvg
from cinder.volume import qos_specs
from cinder.volume import volume_types
def _fake_volume_type(*args, **kwargs):
ctxt = context.get_admin_context()
type_ref = volume_types.create(ctxt, "qos_extra_specs", {})
qos_ref = qos_specs.create(ctxt, 'qos-specs', {})
qos_specs.associate_qos_with_type(ctxt, qos_ref['id'],
type_ref['id'])
qos_type = volume_types.get_volume_type(ctxt, type_ref['id'])
return qos_type
def _fake_volume(*args, **kwargs):
qos_type = _fake_volume_type()
return fake_volume.fake_volume_obj(context,
name='hedvig',
volume_type_id=qos_type['id'],
volume_type=qos_type,
volume_name='hedvig',
display_name='hedvig',
display_description='test volume',
size=2)
class HedvigDriverTest(test.TestCase):
def setUp(self):
super(HedvigDriverTest, self).setUp()
self.context = context.get_admin_context()
self._create_fake_config()
self.assertIsNone(self.driver.do_setup(self.ctxt))
def _create_fake_config(self):
self.configuration = mock.Mock(spec=conf.Configuration)
self.configuration.san_ip = '1.0.0.1'
self.configuration.san_login = 'dummy_user'
self.configuration.san_password = 'dummy_password'
self.configuration.san_clustername = 'dummy_cluster'
self.configuration.san_is_local = False
self.ctxt = context.get_admin_context()
self.vol = fake_volume.fake_volume_obj(self.context)
self.vol.volume_type = fake_volume.fake_volume_type_obj(self.context)
self.snap = fake_snapshot.fake_snapshot_obj(self.context)
self.snap.volume = self.vol
self.driver = hdvg.HedvigISCSIDriver(configuration=self.configuration)
@mock.patch('cinder.volume.drivers.hedvig.rest_client.RestClient'
'.create_vdisk')
def test_create_volume(self, *args, **keywargs):
result = self.driver.create_volume(self.vol)
self.assertIsNone(result)
def test_create_volume_negative(self, *args, **keywargs):
self.driver.hrs = exception.VolumeDriverException()
self.assertRaises(exception.VolumeDriverException,
self.driver.create_volume, self.vol)
@mock.patch('cinder.volume.drivers.hedvig.rest_client.RestClient'
'.delete_vdisk')
def test_create_delete_volume(self, *args, **keywargs):
result = self.driver.delete_volume(self.vol)
self.assertIsNone(result)
def test_create_delete_volume_negative(self, *args, **keywargs):
self.driver.hrs = exception.VolumeDriverException()
self.assertRaises(exception.VolumeDriverException,
self.driver.delete_volume, self.vol)
@mock.patch('cinder.volume.drivers.hedvig.rest_client.RestClient'
'.resize_vdisk')
def test_extend_volume(self, *args, **keywargs):
self.assertIsNone(self.driver.extend_volume(self.vol, 10))
def test_extend_volume_negative(self, *args, **keywargs):
self.driver.hrs = exception.VolumeDriverException()
self.assertRaises(exception.VolumeDriverException,
self.driver.extend_volume, self.vol, 10)
@mock.patch('cinder.volume.drivers.hedvig.rest_client.RestClient'
'.resize_vdisk')
def test_extend_volume_shrinking(self, *args, **keywargs):
volume = _fake_volume()
self.assertRaises(exception.VolumeDriverException,
self.driver.extend_volume, volume, 1)
@mock.patch('cinder.volume.drivers.hedvig.rest_client.RestClient'
'.clone_vdisk')
def test_create_cloned_volume(self, *args, **keywargs):
result = self.driver.create_cloned_volume(self.vol, self.vol)
self.assertIsNone(result)
def test_create_cloned_volume_negative(self, *args, **keywargs):
self.driver.hrs = exception.VolumeDriverException()
self.assertRaises(exception.VolumeDriverException,
self.driver.create_cloned_volume, self.vol, self.vol)
@mock.patch('cinder.volume.drivers.hedvig.rest_client.RestClient'
'.create_snapshot')
def test_create_snapshot(self, *args, **keywargs):
result = self.driver.create_snapshot(self.snap)
self.assertIsNone(result)
def test_create_snapshot_negative(self, *args, **keywargs):
self.driver.hrs = exception.VolumeDriverException()
self.assertRaises(exception.VolumeDriverException,
self.driver.create_snapshot, self.snap)
@mock.patch('cinder.volume.drivers.hedvig.rest_client.RestClient'
'.delete_snapshot')
def test_delete_snapshot(self, *args, **keywargs):
result = self.driver.delete_snapshot(self.snap)
self.assertIsNone(result)
def test_delete_snapshot_negative(self, *args, **keywargs):
self.driver.hrs = exception.VolumeDriverException()
self.assertRaises(exception.VolumeDriverException,
self.driver.delete_snapshot, self.snap)
@mock.patch('cinder.volume.drivers.hedvig.rest_client.RestClient'
'.clone_hedvig_snapshot')
def test_create_volume_from_snapshot(self, *args, **keywargs):
result = self.driver.create_volume_from_snapshot(self.vol, self.snap)
self.assertIsNone(result)
def test_create_volume_from_snapshot_negative(self, *args, **keywargs):
self.driver.hrs = exception.VolumeDriverException()
self.assertRaises(exception.VolumeDriverException,
self.driver.create_volume_from_snapshot,
self.vol, self.snap)
def test_do_setup(self):
self.driver.do_setup(self.context)
def test_do_setup_san_ip_negative(self):
self.configuration.san_ip = None
# check the driver for setup errors
self.assertRaises(exception.VolumeDriverException,
self.driver.do_setup, self.context)
self.configuration.san_ip = "1.0.0.1"
def test_do_setup_san_cluster_negative(self):
self.configuration.san_clustername = None
# check the driver for setup errors
self.assertRaises(exception.VolumeDriverException,
self.driver.do_setup, self.context)
self.configuration.san_clustername = "dummy_cluster"
def test_do_setup_san_login_negative(self):
self.configuration.san_login = None
# check the driver for setup errors
self.assertRaises(exception.VolumeDriverException,
self.driver.do_setup, self.context)
self.configuration.san_login = "dummy_user"
def test_do_setup_san_password_negative(self):
self.configuration.san_password = None
# check the driver for setup errors
self.assertRaises(exception.VolumeDriverException,
self.driver.do_setup, self.context)
self.configuration.san_password = "dummy_password"
@mock.patch('cinder.volume.drivers.hedvig.rest_client.RestClient'
'.list_targets')
def test_hedvig_lookup_tgt(self, *args, **keywargs):
host = "hostname"
result = self.driver.hedvig_lookup_tgt(host)
self.assertIsNone(result)
def test_hedvig_lookup_tgt_negative(self, *args, **keywargs):
host = "hostname"
self.driver.hrs = exception.VolumeDriverException()
self.assertRaises(exception.VolumeDriverException,
self.driver.hedvig_lookup_tgt, host)
def test_hedvig_get_lun_negative(self, *args, **keywargs):
host = "hostname"
volname = "volume"
self.driver.hrs = exception.VolumeDriverException()
self.assertRaises(exception.VolumeDriverException,
self.driver.hedvig_get_lun, host, volname)
@mock.patch('cinder.volume.drivers.hedvig.rest_client.RestClient'
'.get_iqn')
def test_hedvig_get_iqn(self, *args, **keywargs):
host = "hostname"
result = self.driver.hedvig_get_iqn(host)
self.assertIsNotNone(result)
def test_hedvig_get_iqn_negative(self, *args, **keywargs):
host = "hostname"
self.driver.hrs = exception.VolumeDriverException()
self.assertRaises(exception.VolumeDriverException,
self.driver.hedvig_get_iqn, host)
@mock.patch('cinder.volume.drivers.hedvig.rest_client.RestClient'
'.list_targets')
def test_terminate_connection_no_connector(self, *args, **keywargs):
self.assertIsNone(self.driver.
terminate_connection(_fake_volume(), None))

View File

View File

@ -0,0 +1,39 @@
# Copyright (c) 2018 Hedvig, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class Config(object):
ReplicationPolicy = {
0: "Agnostic",
1: "RackAware",
2: "DataCenterAware",
}
DiskResidence = {
0: "Flash",
1: "HDD",
}
# Default Port Configuration
defaultHControllerPort_ = 50000
# Default Cinder Configuration
defaultCinderReplicationFactor = 3
defaultCinderDedupEnable = False
defaultCinderCompressEnable = False
defaultCinderCacheEnable = False
defaultCinderDiskResidence = DiskResidence[1]
defaultCinderReplicationPolicy = ReplicationPolicy[0]
retryCount = 5

View File

@ -0,0 +1,606 @@
# Copyright (c) 2018 Hedvig, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver for Hedvig Block Storage.
"""
import socket
from oslo_log import log as logging
from oslo_utils import strutils
from oslo_utils import units
from cinder import exception
from cinder.i18n import _
from cinder import interface
from cinder.volume import driver
from cinder.volume.drivers.hedvig import config
from cinder.volume.drivers.hedvig import rest_client
from cinder.volume.drivers.san import san
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
@interface.volumedriver
class HedvigISCSIDriver(driver.ISCSIDriver, san.SanDriver):
"""OpenStack Cinder driver to enable Hedvig storage.
Version history:
1.0 - Initial driver
"""
DEFAULT_VOL_BLOCK_SIZE = 4 * units.Ki
DEFAULT_CREATEDBY = "OpenStack"
DEFAULT_EXPORT_BLK_SIZE = 4096
DEFAULT_CAPACITY = units.Gi
DEFAULT_ISCSI_PORT = 3260
DEFAULT_TARGET_NAME = "iqn.2012-05.com.hedvig:storage."
VERSION = "1.0.0"
CI_WIKI_NAME = "Hedvig_CI"
def __init__(self, *args, **kwargs):
super(HedvigISCSIDriver, self).__init__(*args, **kwargs)
self.group_stats = {}
self.hrs = None
def check_for_setup_error(self):
self.hrs.connect()
LOG.info("Initialization complete")
def do_setup(self, context):
# Ensure that the data required by hedvig are provided
required_config = ['san_login', 'san_password', 'san_ip',
'san_clustername']
for attr in required_config:
if not getattr(self.configuration, attr, None):
msg = _('Hedvig param %s is not set.') % attr
LOG.error(msg)
raise exception.VolumeDriverException(msg)
self.san_ip = self.configuration.san_ip
self.san_login = self.configuration.san_login
self.san_password = self.configuration.san_password
self.san_clustername = self.configuration.san_clustername
LOG.info('Initializing hedvig cinder driver with '
'server: %s', self.san_ip)
self.hrs = rest_client.RestClient(self.san_ip,
self.san_login,
self.san_password,
self.san_clustername)
def get_volume_stats(self, refresh=False):
# we need to get get stats for server.
if refresh is True:
total_capacity, free_capacity = self.update_volume_stats()
stats = dict()
stats["volume_backend_name"] = "hedvig"
stats["vendor_name"] = "Hedvig Inc"
stats["driver_version"] = self.VERSION
stats["storage_protocol"] = "iSCSI"
stats["total_capacity_gb"] = total_capacity
stats["free_capacity_gb"] = free_capacity
stats["QoS_support"] = True
self.group_stats = stats
return self.group_stats
def create_volume(self, volume):
"""Driver entry point for creating a new volume."""
try:
qos_specs = None
name, description, size = self.get_hedvig_volume_details(volume)
vol_type_id = volume.volume_type_id
if vol_type_id is not None:
qos = volume_types.get_volume_type_qos_specs(vol_type_id)
qos_specs = qos['qos_specs']
self.hedvig_create_virtualdisk(name, description, size, qos_specs)
except exception.VolumeDriverException:
msg = _('Failed to create volume %s. Rest API failed'
) % volume.name
LOG.exception(msg)
raise exception.VolumeDriverException(msg)
except Exception:
msg = _('Failed to create volume: %s') % volume.name
LOG.exception(msg)
raise exception.VolumeDriverException(msg)
def delete_volume(self, volume):
"""Driver entry point for deleting volume."""
LOG.debug("Deleting volume: %s", volume.name)
name = volume.name
try:
self.hedvig_delete_virtualdisk(name)
except exception.VolumeDriverException:
msg = _('Failed to delete volume %s. Rest API failed'
) % volume.name
LOG.exception(msg)
raise exception.VolumeDriverException(msg)
except Exception:
msg = _('Failed to delete volume: %s') % volume.name
LOG.exception(msg)
raise exception.VolumeDriverException(msg)
def create_cloned_volume(self, volume, src_vref):
"""Create a clone of the volume."""
try:
LOG.debug('Create cloned volume called '
'volume_id = %(volume)s and src_vol_id = %(src_vol_id)s',
{'volume': volume.id, 'src_vol_id': src_vref.id})
name, desc, size = self.get_hedvig_volume_details(volume)
self.hrs.clone_vdisk(srcVolName=src_vref.name, dstVolName=name,
size=size)
except exception.VolumeDriverException:
msg = _('Failed to create cloned volume. Rest API failed')
LOG.exception(msg)
raise exception.VolumeDriverException(msg)
except Exception:
msg = _('Failed to create cloned volume')
LOG.exception(msg)
raise exception.VolumeDriverException(msg)
def initialize_connection(self, volume, connector):
"""Driver entry point to attach a volume to an instance.
Assign any created volume to a compute node/controllerVM so
that it can be attached to a instance.
This driver returns a driver_volume_type of 'iscsi'.
The format of the driver data is defined as follows -- similar
to _get_iscsi_properties.
"""
LOG.debug('Initializing connection. volume: %s, '
'connector: %s', volume, connector)
try:
computeHost = self.get_compute_host(connector)
volName = volume.name
tgtHost = self.hedvig_lookup_tgt(computeHost)
if tgtHost is None:
LOG.warning("No target registered for compute host %s",
computeHost)
tgtHost = self.hedvig_lookup_tgt()
lunnum = self.hedvig_get_lun(tgtHost, volName)
if lunnum == -1:
LOG.error('Failed to get lun for volume: %s, '
'hedvig controller: %s', volume, tgtHost)
raise exception.VolumeDriverException()
# Add access to the mgmt interface addr and iqn of compute host
LOG.debug("Calling add access %(host)s : %(vol)s : %(iqn)s ",
{'host': tgtHost, 'vol': volName,
'iqn': connector['initiator']})
self.hedvig_add_access(tgtHost, volName, connector['initiator'])
# Add access to both storage and mgmt interface addrs for
# iscsi discovery to succeed
LOG.debug("Calling hedvig_get_iqn %s", socket.getfqdn())
controller_host_iqn = self.hedvig_get_iqn(socket.getfqdn())
LOG.debug("Calling add access with %s : %s : %s ", tgtHost,
volName, controller_host_iqn)
self.hedvig_add_access(tgtHost, volName, controller_host_iqn)
targetName = ("%s%s-%s" % (self.DEFAULT_TARGET_NAME, tgtHost,
lunnum))
portal = ("%s:%s" % (socket.gethostbyname(tgtHost),
self.DEFAULT_ISCSI_PORT))
iscsi_properties = ({'target_discovered': True,
'target_iqn': targetName,
'target_portal': portal,
'target_lun': lunnum})
LOG.debug("iscsi_properties: %s", iscsi_properties)
return {'driver_volume_type': 'iscsi', 'data': iscsi_properties}
except exception.VolumeDriverException:
msg = _('Volume assignment to connect failed. volume: %s '
'Rest API failed') % volume
LOG.exception(msg)
raise exception.VolumeDriverException(msg)
except Exception:
msg = _('Volume assignment to connect failed. volume: %s') % volume
LOG.exception(msg)
raise exception.VolumeDriverException(msg)
def terminate_connection(self, volume, connector, **kwargs):
"""Driver entry point to detach volume from instance."""
LOG.debug("Terminating connection. volume: %s, connector: %s",
volume, connector)
try:
volName = volume.name
if connector is None:
LOG.debug("Removing ALL host connections for volume %s",
volume)
targetList = self.hrs.list_targets(computeHost=None)
for target in targetList:
self.hedvig_delete_lun(target, volName)
return
computeHost = self.get_compute_host(connector)
tgtHost = self.hedvig_lookup_tgt(computeHost)
if tgtHost is None:
LOG.debug("No target registered for compute host %s",
computeHost)
tgtHost = self.hedvig_lookup_tgt()
if tgtHost is None:
msg = _('Failed to get hedvig controller')
LOG.error(msg)
raise exception.VolumeDriverException(msg)
self.hedvig_delete_lun(tgtHost, volName)
except exception.VolumeDriverException:
msg = _('Failed to terminate connection. volume: %s '
'Rest API failed') % volume
LOG.exception(msg)
raise exception.VolumeDriverException(msg)
except Exception:
msg = _('Failed to terminate connection. volume: %s') % volume
LOG.exception(msg)
raise exception.VolumeDriverException(msg)
def create_snapshot(self, snapshot):
"""Driver entry point for creating a snapshot."""
try:
volName = snapshot.volume_name
snapshotName = snapshot.name
project = snapshot.project_id
snapshotId = snapshot.id
LOG.info("Creating snapshot. volName: %s, snapshotName: %s, "
"project: %s, snapshotId: %s", volName,
snapshotName, project, snapshotId)
self.hedvig_create_snapshot(volName, snapshotId)
except exception.VolumeDriverException:
msg = (_('Failed to create snapshot. snapshotName: %s'
'Rest API failed') % snapshotName)
LOG.exception(msg)
raise exception.VolumeDriverException(msg)
except Exception:
msg = (_('Failed to create snapshot. snapshotName: %s')
% snapshotName)
LOG.exception(msg)
raise exception.VolumeDriverException(msg)
def delete_snapshot(self, snapshot):
"""Driver entry point for deleting a snapshot."""
try:
volName = snapshot.volume_name
snapshotName = snapshot.display_name
project = snapshot.project_id
snapshotId = snapshot.id
LOG.info("Deleting snapshot. volName: %s, snapshotName: %s, "
"project: %s", volName, snapshotName, project)
self.hrs.delete_snapshot(snapshotName, volName, snapshotId)
except exception.VolumeDriverException:
msg = _('Failed to delete snapshot: %s'
'Rest API failed') % snapshotName
LOG.exception(msg)
raise exception.VolumeDriverException(msg)
except Exception:
msg = _('Failed to delete snapshot: %s') % snapshotName
LOG.exception(msg)
raise exception.VolumeDriverException(msg)
def create_volume_from_snapshot(self, volume, snapshot):
"""Driver entry point for creating a new volume from a snapshot.
This is the same as cloning.
"""
name, description, size = self.get_hedvig_volume_details(volume)
snapshotName = snapshot.display_name
snapshotId = snapshot.id
srcVolName = snapshot.volume_name
try:
LOG.info('Creating volume from snapshot. Name: %(volname)s,'
' SrcVolName: %(src)s, Snap_id: %(sid)s',
{'volname': name, 'src': srcVolName, 'sid': snapshotId})
self.hedvig_clone_snapshot(name, snapshotId, srcVolName, size)
except exception.VolumeDriverException:
msg = _('Failed to create volume from snapshot %s'
' Rest API failed') % snapshotName
LOG.exception(msg)
raise exception.VolumeDriverException(msg)
except Exception:
msg = _('Failed to create volume from snapshot %s') % snapshotName
LOG.exception(msg)
raise exception.VolumeDriverException(msg)
def extend_volume(self, volume, newSize):
"""Resizes virtual disk.
newSize should be greater than current size.
"""
try:
name, description, size = self.get_hedvig_volume_details(volume)
LOG.info('Resizing virtual disk. name: %s, '
'newSize: %s', name, newSize)
if (size / units.Gi) >= newSize:
err = _("Shrinking of volumes are not allowed")
LOG.error(err)
raise exception.VolumeDriverException(err)
self.hrs.resize_vdisk(
name,
newSize)
except exception.VolumeDriverException:
msg = _('Failed to extend volume. Rest API failed')
LOG.exception(msg)
raise exception.VolumeDriverException(msg)
except Exception:
msg = _('Failed to extend volume')
LOG.exception(msg)
raise exception.VolumeDriverException(msg)
def check_for_export(self, context, volume_id):
"""Not relevant to Hedvig"""
pass
def get_export(self, volume):
"""Get the iSCSI export details for a volume."""
pass
def ensure_export(self, context, volume):
"""Driver entry point to get the export info for an existing volume.
Irrelevant for Hedvig. Export is created during attachment to instance.
"""
pass
def create_export(self, context, volume, properties):
"""Driver entry point to get the export info for a new volume.
Irrelevant for Hedvig. Export is created during attachment to instance.
"""
pass
def remove_export(self, context, volume):
"""Driver entry point to remove an export for a volume.
Irrelevant for Hedvig. Export should be deleted on detachment.
"""
pass
def detach_volume(self, context, volume, attachment):
pass
def hedvig_create_snapshot(self, vDiskName, snapshotId=None):
"""Hedvig call to create snapshot of vdisk."""
LOG.debug("Creating snapshot..%s , %s.", vDiskName, snapshotId)
try:
snapshotName = self.hrs.create_snapshot(vDiskName, snapshotId)
LOG.debug("Received snapshotName %s from rest call",
snapshotName)
return snapshotName
except exception.VolumeDriverException:
msg = _('Failed to create snapshot for vdisk %s '
'Rest API failed') % vDiskName
LOG.exception(msg)
raise exception.VolumeDriverException()
except Exception:
msg = _('Failed to create snapshot for vdisk %s') % vDiskName
LOG.exception(msg)
raise exception.VolumeDriverException()
def update_volume_stats(self):
LOG.debug('Update volume stats called')
try:
total_capacity, free_capacity = self.hrs.update_volume_stats()
except exception.VolumeDriverException:
msg = _('Unable to fetch volume stats. Rest API failed')
LOG.exception(msg)
raise exception.VolumeDriverException(msg)
except Exception:
msg = _('Unable to fetch volume stats')
LOG.exception(msg)
raise exception.VolumeDriverException(msg)
return (total_capacity, free_capacity)
def get_hedvig_volume_details(self, volume):
volName = volume.name
project = volume.project_id
displayName = volume.display_name
displayDescription = volume.display_description
description = ("%s\n%s\n%s" % (project, displayName,
displayDescription))
size = volume.size * units.Gi
return volName, description, size
def get_compute_host(self, connector):
connectorHost = socket.getfqdn(connector['host'])
localHost = socket.gethostname()
computeHost = localHost
if connectorHost != localHost:
computeHost = connectorHost
return computeHost
def hedvig_lookup_tgt(self, host=None):
"""Get the tgt instance associated with the compute host"""
LOG.debug("Looking up hedvig controller for compute host: %s",
host)
try:
targetList = self.hrs.list_targets(computeHost=host)
tgt = None
if len(targetList) > 0:
tgt = targetList[0]
LOG.debug("Found hedvig controller: %s, for host: %s", tgt, host)
return tgt
except exception.VolumeDriverException:
msg = _('Failed to get hedvig controller for compute %s'
'Rest API failed') % host
LOG.exception(msg)
raise exception.VolumeDriverException(msg)
except Exception:
msg = _('Failed to get hedvig controller for compute %s ') % host
LOG.exception(msg)
raise exception.VolumeDriverException(msg)
def hedvig_delete_lun(self, tgtHost, vDiskName):
try:
LOG.debug("Deleting lun. hedvig controller: %s, vDiskName: %s,",
tgtHost, vDiskName)
self.hrs.unmap_lun(tgtHost, vDiskName)
except Exception:
msg = _('Failed to delete lun')
LOG.exception(msg)
raise exception.VolumeDriverException(msg)
def hedvig_get_lun(self, tgtHost, vDiskName):
"""Looks up lun based on tgthost and vDiskName.
If lun does not exist then call add_lun and return the lun number.
If lun exists, just return the lun number.
"""
LOG.debug("Getting lun. hedvig controller: %s, vDiskName: %s",
tgtHost, vDiskName)
try:
lunNo = self.hrs.get_lun(tgtHost, vDiskName)
if lunNo > -1:
return lunNo
# If the lun is not found, add lun for the vdisk
LOG.debug("Calling add lun on target : %s vdisk %s", tgtHost,
vDiskName)
self.hrs.add_lun(tgtHost, vDiskName, False)
lunNo = self.hrs.get_lun(tgtHost, vDiskName)
return lunNo
except Exception:
msg = _('Failed to get lun for vdisk: %s') % vDiskName
LOG.exception(msg)
raise exception.VolumeDriverException(msg)
def hedvig_get_iqn(self, hostname):
"""Looks up the iqn for the given host."""
try:
iqn = self.hrs.get_iqn(hostname)
LOG.debug("Got IQN: %s, for hostname: %s", iqn, hostname)
return iqn
except Exception:
msg = _('Failed to get iqn for hostname: %s') % hostname
LOG.exception(msg)
raise exception.VolumeDriverException(msg)
def hedvig_add_access(self, tgtHost, volName, initiator):
"""Adds access to LUN for initiator's ip/iqn."""
try:
LOG.info("Adding access. hedvig controller: %s, vol name %s, "
"initiator: %s", tgtHost, volName, initiator)
self.hrs.add_access(tgtHost, volName, "iqn", initiator)
except Exception:
msg = _('Failed to add access. hedvig controller: %s') % tgtHost
LOG.exception(msg)
raise exception.VolumeDriverException(msg)
def hedvig_create_virtualdisk(self, name, description, size, qos_specs):
try:
LOG.info('Creating virtual disk. name: %s, description: %s,'
'size: %s', name, description, size)
vDiskInfo = {
'name': name.encode('utf-8'),
'blockSize': HedvigISCSIDriver.DEFAULT_VOL_BLOCK_SIZE,
'size': size,
'createdBy':
HedvigISCSIDriver.DEFAULT_CREATEDBY,
'description': description.encode('utf-8'),
'residence': config.Config.DiskResidence[1],
'replicationFactor': 3,
'replicationPolicy': 'Agnostic',
'clusteredFileSystem': False,
'exportedBlockSize': HedvigISCSIDriver.DEFAULT_EXPORT_BLK_SIZE,
'cacheEnabled': config.Config.defaultCinderCacheEnable,
'diskType': 'BLOCK',
'immutable': False,
'deduplication': config.Config.defaultCinderDedupEnable,
'compressed': config.Config.defaultCinderCompressEnable,
'cloudEnabled': False,
'cloudProvider': 0,
'isClone': False,
'consistency': 'STRONG',
'scsi3pr': False
}
if qos_specs:
kvs = qos_specs['specs']
for key, value in kvs.items():
if "dedup_enable" == key:
val = self.parse_and_get_boolean_entry(
value)
if val:
vDiskInfo['deduplication'] = val
elif "compressed_enable" == key:
val = self.parse_and_get_boolean_entry(
value)
if val:
vDiskInfo['compressed'] = True
elif "cache_enable" == key:
val = self.parse_and_get_boolean_entry(
value.encode('utf-8'))
if val:
vDiskInfo['cacheEnabled'] = val
elif "encryption" == key:
val = self.parse_and_get_boolean_entry(
value.encode('utf-8'))
if val:
vDiskInfo['encryption'] = val
elif "replication_factor" == key:
val = int(value)
if val > 0:
vDiskInfo['replicationFactor'] = val
elif "replication_policy" == key:
val = value.strip(" \n\t").lower()
if val:
vDiskInfo['replicationPolicy'] = val
elif "disk_residence" == key:
val = value.strip(" \n\t").lower()
if val:
vDiskInfo['residence'] = val
elif "replication_policy_info" == key:
val = value.split(',')
if len(val) != 0:
dcList = []
for dataCenter in val:
dcList.append(dataCenter.encode('utf-8'))
vDiskInfo['dataCenters'] = dcList
if vDiskInfo['deduplication'] and (
vDiskInfo['compressed'] is False):
LOG.error('Cannot create dedup enabled disk without'
' compression enabled')
raise exception.VolumeDriverException()
self.hrs.create_vdisk(vDiskInfo)
except Exception:
msg = _('Failed to create volume')
LOG.exception(msg)
raise exception.VolumeDriverException(msg)
def hedvig_delete_virtualdisk(self, name):
LOG.info('Deleting virtual disk. name - %s', name)
try:
self.hrs.delete_vdisk(name)
except Exception:
msg = _('Failed to delete Vdisk')
LOG.exception(msg)
raise exception.VolumeDriverException(msg)
def hedvig_clone_snapshot(self, dstVolName,
openstackSID, srcVolName, size):
LOG.info("Cloning a snapshot.dstVolName: %s,openstackSID:%s,"
"srcVolName: %s", dstVolName, openstackSID, srcVolName)
try:
self.hrs.clone_hedvig_snapshot(
dstVolName=dstVolName,
snapshotID=openstackSID,
srcVolName=srcVolName,
size=size)
except Exception:
msg = _('Failed to clone snapshot')
LOG.exception(msg)
raise exception.VolumeDriverException(msg)
def parse_and_get_boolean_entry(self, entry):
entry = entry.strip(" \t\n")
return strutils.bool_from_string(entry)

View File

@ -0,0 +1,625 @@
# Copyright (c) 2018 Hedvig, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Rest Client for Hedvig Openstack implementation.
"""
import json
import random
from oslo_log import log as logging
from oslo_utils import units
from six.moves import http_client
from six.moves import urllib
from cinder import exception
from cinder.i18n import _
from cinder.volume.drivers.hedvig import config
LOG = logging.getLogger(__name__)
class RestClient(object):
def __init__(self, nodes, username, password, cluster):
"""Hedvig Rest Client
:param node: hostname of one of the nodes in the cluster
:param username: username of the cluster
:param password: password of the cluster
:param cluster: clustername of the cluster
"""
LOG.debug('init called with %s , %s', nodes, cluster)
self.username = username
self.password = password
self.cluster = cluster
self.nodes = nodes
self.nodeMap = {}
def connect(self):
self.store_node_map(self.nodes)
if len(self.nodeMap) is 0:
msg = _('Unable to connect to the nodes')
raise exception.VolumeDriverException(msg)
def get_session_id(self, node):
"""Retrieves the session Id
:param node: hostname of the node
:return: session ID which is valid for 15 minutes
"""
LOG.debug("get_session_id called with node %s", node)
data = {
'request': {
'type': 'Login',
'category': 'UserManagement',
'params': {
'userName': self.username,
'password': self.password,
'cluster': self.cluster
}
}
}
obj = self.query(data=data, node=node)
if obj['status'] != 'ok':
msg = _('GetSessionId failure')
raise exception.VolumeDriverException(msg)
return (obj['result']['sessionId']).encode('utf-8')
def get_all_cluster_nodes(self, node):
"""Retrieves all the nodes present in the cluster
:param node: hostname of the node
:return: nodes present in the cluster
"""
LOG.debug("get_all_cluster_nodes called with node %s", node)
data = {
'request': {
'type': 'ListClusterNodes',
'category': 'VirtualDiskManagement',
'sessionId': self.get_session_id(node),
}
}
obj = self.make_rest_call(data=data, node=node)
return obj['result']
def store_node_map(self, nodes):
"""Stores all the node information along with their sessionID in dict
:param nodes: hostname of the nodes in the cluster
"""
LOG.debug("store_node_map called with node %s", nodes)
exitFlag = False
node_list = []
for n in nodes.split(','):
node_list.append(n.strip())
for node in node_list:
try:
LOG.debug("Attempting store_node_map with node %s", node)
nodeList = self.get_all_cluster_nodes(node)
exitFlag = True
for node_ in nodeList:
self.nodeMap[node_] = self.get_session_id(node_)
except urllib.error.HTTPError as e:
if e.code == http_client.NOT_FOUND:
LOG.debug("Client not found")
else:
LOG.debug("Client not available")
except Exception:
LOG.exception('Retrying store_node_map with next node')
if exitFlag:
return
def refresh_session_ids(self):
"""In case of session failure , it refreshes all the
session ID stored in nodeMap
"""
LOG.debug("refresh_session_ids called")
if len(self.nodeMap.keys()) == 0:
msg = _('NodeMap is empty')
raise exception.VolumeDriverException(msg)
for node, val in self.nodeMap.items():
self.nodeMap[node] = self.get_session_id(node)
def query(self, data, node):
"""Makes a rest query with given params
:param data: json given as param to Rest call
:param node: hostname of the node
:return: REST response
"""
data = urllib.parse.urlencode(data)
req = urllib.request.Request("http://%s/rest/" % node, data)
response = urllib.request.urlopen(req)
json_str = response.read()
obj = json.loads(json_str)
LOG.debug("Rest call output %s ", json_str)
return obj
def make_rest_call(self, data, node):
"""Makes a rest Call and retries it 5 times in case of rest failure
:param data: json given as param to Rest call
:param node: hostname of the node
:return:
"""
retryCount = 0
while retryCount < config.Config.retryCount:
retryCount = retryCount + 1
try:
LOG.debug("Rest call started with node %s "
"and data: %s", node, data)
obj = self.query(data, node)
if obj['status'] == 'ok' or obj['status'] == 'warning':
return obj
# We need to refresh sessionIDs if earlier ones are expired
elif 'session-failure' in obj['status']:
self.refresh_session_ids()
session_id = self.retrieve_session_id(node)
data['request']['sessionId'] = session_id
except Exception as e:
LOG.debug("Exception details: data - %s, node - %s "
"exception - %s", data, node, e.args)
node = self.get_pages_host()
else:
msg = _('REST call status - %s') % obj['status']
raise exception.VolumeDriverException(msg)
def create_vdisk(self, vDiskInfo):
"""Rest call to create a vdisk
:param vDiskInfo: json passsed to the rest call
"""
LOG.debug("create_vdisk called")
node = self.get_pages_host()
sessionId = self.retrieve_session_id(node)
sizeInB = vDiskInfo['size'] / units.Gi
sizeInJson = {'unit': "GB",
'value': float(sizeInB)}
vDiskInfo['size'] = sizeInJson
data = {
'request': {
'type': 'AddVirtualDisk',
'category': 'VirtualDiskManagement',
'params': vDiskInfo,
'sessionId': sessionId,
}
}
obj = self.make_rest_call(data=data, node=node)
if obj['result'][0]['status'] != 'ok':
errmsg = _('create_vdisk REST call status - %s') % obj['status']
raise exception.VolumeDriverException(errmsg)
def resize_vdisk(self, vDiskName, value):
"""Rest Call to resize Vdisk
:param vDiskName: name of the vdisk
:param unit: unit is GB for openstack
:param value: size of the resized vdisk in GB
"""
node = self.get_pages_host()
sessionId = self.retrieve_session_id(node)
LOG.debug("resize_vdisk called")
data = {
'request': {
'type': 'ResizeDisks',
'category': 'VirtualDiskManagement',
'params': {
'virtualDisks': [vDiskName.encode('utf-8')],
'size': {
'unit': "GB",
'value': value
},
},
'sessionId': sessionId,
}
}
obj = self.make_rest_call(data=data, node=node)
if obj['result'][0]['status'] != 'ok':
errmsg = _('resize_vdisk REST call status - %s') % obj['status']
raise exception.VolumeDriverException(errmsg)
def delete_vdisk(self, vDiskName):
"""Rest call to delete Vdisk
:param vDiskName: name of the vdisk
:return: Status of the rest call
"""
LOG.debug("delete_vdisk called %s", vDiskName)
node = self.get_pages_host()
sessionId = self.retrieve_session_id(node)
data = {
'request': {
'type': 'DeleteVDisk',
'category': 'VirtualDiskManagement',
'params': {
'virtualDisks': [vDiskName.encode('utf-8')],
},
'sessionId': sessionId,
}
}
obj = self.make_rest_call(data=data, node=node)
if obj['status'] != 'ok':
if "couldn't be found" not in obj['message']:
errmsg = _('REST call status - %s') % obj['status']
raise exception.VolumeDriverException(errmsg)
def get_lun(self, target, vDiskName):
"""Retrieve lun number
:param target: hostname of the target
:param vDiskName: name of the Vdisk
:return: lun number
"""
try:
LOG.debug("get_lun called for vdisk %s", vDiskName)
node = self.get_pages_host()
sessionId = self.retrieve_session_id(node)
data = {
'request': {
'type': 'GetLun',
'category': 'VirtualDiskManagement',
'params': {
'virtualDisk': vDiskName.encode('utf-8'),
'target': target.encode('utf-8'),
},
'sessionId': sessionId,
}
}
obj = self.make_rest_call(data=data, node=node)
if obj['status'] != 'ok':
return -1
return obj['result']['lun']
except Exception:
return -1
def get_iqn(self, host):
"""Retrieve IQN of the host.
:param host: hostname
:return: iqn of the host
"""
LOG.debug("get_iqn called for host %s", host)
node = self.get_pages_host()
sessionId = self.retrieve_session_id(node)
data = {
'request': {
'type': 'GetIqn',
'category': 'VirtualDiskManagement',
'params': {
'host': host.encode('utf-8'),
},
'sessionId': sessionId,
}
}
obj = self.make_rest_call(data=data, node=node)
if obj['status'] != 'ok':
if "IQN not found" in obj['message']:
return "ALL"
errmsg = _('REST call status - %s') % obj['status']
raise exception.VolumeDriverException(errmsg)
return obj['result']['iqn']
def add_lun(self, tgtHost, vDiskName, readonly):
"""Rest Call to Add Lun
:param tgtHost: hostname of target
:param vDiskName: name of vdisk
:param readonly: boolean readonly value
"""
LOG.debug(
"add_lun called with target %s, vdisk %s", tgtHost, vDiskName)
node = self.get_pages_host()
sessionId = self.retrieve_session_id(node)
data = {
'request': {
'type': 'AddLun',
'category': 'VirtualDiskManagement',
'params': {
'virtualDisks': [vDiskName.encode('utf-8')],
'targets': [tgtHost.encode('utf-8')],
'readonly': readonly,
},
'sessionId': sessionId,
}
}
obj = self.make_rest_call(data=data, node=node)
restCallStatus = obj['result'][0]['status']
tgts = obj['result'][0]['targets']
addLunStatus = tgts[0]['status']
if restCallStatus != 'ok' or addLunStatus != 'ok':
errmsg = _('REST call status - %s') % obj['status']
raise exception.VolumeDriverException(errmsg)
def unmap_lun(self, target, vDiskName):
"""Rest call to unmap Lun
:param target: hostname of the target
:param vDiskName: name of the vdisk
:return: true if successful
"""
LOG.debug("unmap_lun called with target %s, vdisk %s", target,
vDiskName)
node = self.get_pages_host()
sessionId = self.retrieve_session_id(node)
data = {
'request': {
'type': 'UnmapLun',
'category': 'VirtualDiskManagement',
'params': {
'virtualDisk': vDiskName.encode('utf-8'),
'target': target.encode('utf-8'),
},
'sessionId': sessionId,
}
}
obj = self.make_rest_call(data=data, node=node)
if obj['status'] != 'ok':
msg = "is not mapped to the specified controller"
if(msg not in obj['message']):
errmsg = _('REST call status - %s') % obj['status']
raise exception.VolumeDriverException(errmsg)
return True
def add_access(self, host, vDiskName, type, address):
"""Rest Call to Add access
:param host: hostname
:param vDiskName: name of vdisk
:param type: type is iqn for openstack
:param address: iqn address
"""
LOG.debug(
"add_access called with param host %s, vdisk %s",
host, vDiskName)
node = self.get_pages_host()
sessionId = self.retrieve_session_id(node)
data = {
'request': {
'type': 'PersistACLAccess',
'category': 'VirtualDiskManagement',
'params': {
'virtualDisks': [vDiskName.encode('utf-8')],
'host': host.encode('utf-8'),
'type': type.encode('utf-8'),
'address': address.encode('utf-8')
},
'sessionId': sessionId,
}
}
obj = self.make_rest_call(data=data, node=node)
if obj['status'] != 'ok' or obj['result'][0]['status'] != 'ok':
errmsg = _('REST call status - %s') % obj['status']
raise exception.VolumeDriverException(errmsg)
def create_snapshot(self, vDiskName, snapshotId):
"""Rest Call to create snapshot
:param vDiskName: name of the vdisk
:param snapshotId: snapshotId of the snapshot
:return: status of the rest call
"""
LOG.debug("create_snapshot called with vdisk %s", vDiskName)
node = self.get_pages_host()
sessionId = self.retrieve_session_id(node)
data = {
'request': {
'type': 'MakeSnapshot',
'category': 'SnapshotManagement',
'params': {
'virtualDisks': [vDiskName.encode('utf-8')],
},
'sessionId': sessionId,
}
}
if snapshotId:
param = data['request']['params']
param['openstackSID'] = snapshotId.encode('utf-8')
obj = self.make_rest_call(data=data, node=node)
if obj['status'] != 'ok' or obj['result'][0]['status'] != 'ok':
errmsg = _('REST call status - %s') % obj['status']
raise exception.VolumeDriverException(errmsg)
return obj['result'][0]['snapshotName'].encode('utf-8')
def clone_vdisk(self, srcVolName, dstVolName, size):
"""Rest Call to clone vdisk
"""
LOG.debug("clonevdisk called vdisk %s, %s", srcVolName, dstVolName)
node = self.get_pages_host()
sessionId = self.retrieve_session_id(node)
data = {
'request': {
'type': 'CloneVdisk',
'category': 'SnapshotManagement',
'params': {
'srcVolName': srcVolName.encode('utf-8'),
'cloneVolName': dstVolName.encode('utf-8'),
'size': size
},
'sessionId': sessionId,
}
}
obj = self.make_rest_call(data=data, node=node)
if obj['status'] != 'ok':
errmsg = _('REST call status - %s') % obj['status']
raise exception.VolumeDriverException(errmsg)
def get_val_in_gb(self, value, unit):
unitRef = {
'B': 1,
'KB': units.Ki,
'MB': units.Mi,
'GB': units.Gi,
'TB': units.Ti,
'PB': units.Pi
}
return value * unitRef[unit] / units.Gi
def update_volume_stats(self):
"""Fetch cluster level details"""
LOG.debug("Update volume stats called")
node = self.get_pages_host()
sessionId = self.retrieve_session_id(node)
data = {
'request': {
'type': 'ClusterInformation',
'category': 'ClusterWatch',
'sessionId': sessionId,
}
}
obj = self.make_rest_call(data=data, node=node)
if obj['status'] != 'ok':
errmsg = _('REST call status - %s') % obj['status']
raise exception.VolumeDriverException(errmsg)
total = obj['result']['capacity']['total']['value']
used = obj['result']['capacity']['used']['value']
capacity = obj['result']['capacity']
total_unit = capacity['total']['units'].encode('utf-8')
used_unit = capacity['used']['units'].encode('utf-8')
total_capacity = self.get_val_in_gb(total, total_unit)
used_capacity = self.get_val_in_gb(used, used_unit)
free_capacity = total_capacity - used_capacity
LOG.debug("total_capacity %s free_capactity %s", total_capacity,
free_capacity)
return (total_capacity, free_capacity)
def clone_hedvig_snapshot(self, dstVolName, snapshotID, srcVolName, size):
"""Rest Call to clone hedvig snapshot
"""
LOG.debug("clone_hedvig_snapshot %s, %s", dstVolName, srcVolName)
node = self.get_pages_host()
sessionId = self.retrieve_session_id(node)
data = {
'request': {
'type': 'CloneVdisk',
'category': 'SnapshotManagement',
'params': {
'cloneVolName': dstVolName.encode('utf-8'),
'openstackSID': snapshotID.encode('utf-8'),
'srcVolName': srcVolName.encode('utf-8'),
'size': size
},
'sessionId': sessionId,
}
}
obj = self.make_rest_call(data=data, node=node)
if obj['status'] != 'ok':
errmsg = _('REST call status - %s') % obj['status']
raise exception.VolumeDriverException(errmsg)
def delete_snapshot(self, snapshotName, vDiskName, snapshotId):
"""Rest call to delete snapshot
:param snapshotName: name of the snapshot to be deleted
"""
LOG.debug(
"delete_snapshot called with snapshot %s", snapshotName)
node = self.get_pages_host()
sessionId = self.retrieve_session_id(node)
data = {
'request': {
'type': 'DeleteSnapshot',
'category': 'SnapshotManagement',
'params': {
'snapshotName': snapshotName.encode('utf-8'),
'openstackSID': snapshotId.encode('utf-8'),
'openstackVolName': vDiskName.encode('utf-8')
},
'sessionId': sessionId,
}
}
obj = self.make_rest_call(data=data, node=node)
if obj['status'] != 'ok':
errmsg = _('REST call status - %s') % obj['status']
raise exception.VolumeDriverException(errmsg)
def list_targets(self, computeHost):
"""Rest Call to ListTargets for a given hostname
:param computeHost: hostname of the computeHost
:return: list of targets
"""
LOG.debug("list_targets called with computehost %s", computeHost)
node = self.get_pages_host()
sessionId = self.retrieve_session_id(node)
targets = []
data = {
'request': {
'type': 'ListTargets',
'category': 'VirtualDiskManagement',
'sessionId': sessionId,
}
}
if computeHost:
data['request']['params'] = {}
data['request']['params']['computeHost'] = computeHost
obj = self.make_rest_call(data=data, node=node)
if obj['status'] != 'ok':
errmsg = _('REST call status - %s') % obj['status']
raise exception.VolumeDriverException(errmsg)
for ch in obj['result']:
if ch['protocol'] == 'block':
targets.append(ch['target'])
return targets
def get_pages_host(self):
"""Returns a random host from nodemap
:return: hostname
"""
LOG.debug("get_pages_host called")
if not self.nodeMap:
msg = _('NodeMap is empty')
raise exception.VolumeDriverException(msg)
return random.choice(self.nodeMap.keys())
def retrieve_session_id(self, node):
"""returns sessionID of the given node
:param node: hostname of the node
:return: session ID of the given host
"""
LOG.debug("retrieve_session_id called with node %s", node)
if len(self.nodeMap.keys()) == 0:
msg = _('NodeMap is empty')
raise exception.VolumeDriverException(msg)
return self.nodeMap[str(node)]

View File

@ -0,0 +1,126 @@
====================
Hedvig Volume Driver
====================
Hedvig provides software-defined storage for enterprises building private,
hybrid, or multi-cloud environments. Hedvig's patented Universal Data Plane
technology forms a distributed, scale-out cluster that transforms commodity
servers or cloud computing into a unified data fabric.
The Hedvig Cinder Driver interacts with a configured backend Hedvig Cluster
using REST APIs.
Using the Hedvig Volume Driver
------------------------------
With the Hedvig Volume Driver for OpenStack, you can :
- Integrate public and private clouds:
Build a unified hybrid environment to easily migrate to or from your
data center and public clouds.
- Set granular virtual disk policies:
Assign enterprise-class features on a per volume basis to best fit your
application requirements.
- Connect to any compute environment:
Use with any hypervisor, application, or bare-metal system.
- Grow seamlessly with an elastic cluster:
Scale storage performance and capacity on-the-fly with off-the-shelf x86
servers.
- Deliver predictable performance:
Receive consistent high-IOPS performance for demanding applications
through massive parallelism, dedicated flash, and edge cache configurations.
Requirement
-----------
Hedvig Volume Driver, version 1.0.0 and later, supports Hedvig release 3.0 and
later.
Supported operations
--------------------
Hedvig supports the core features of OpenStack Cinder:
- Create and delete volumes
- Attach and detach volumes
- Create and delete snapshots
- Create volume from snapshot
- Get volume stats
- Copy image to volume
- Copy volume to image
- Clone volume
- Extend volume
- Enable deduplication, encryption, cache, compression, custom replication
policy on a volume level using volume-type extra-specs
Hedvig Volume Driver configuration
-----------------------------------
The Hedvig Volume Driver can be configured by editing the cinder.conf file
located in the /etc/cinder/ directory.
.. code-block:: ini
[DEFAULT]
enabled_backends=hedvig
[HEDVIG_BACKEND_NAME]
volume_driver=cinder.volume.drivers.hedvig.hedvig_cinder.HedvigISCSIDriver
san_ip=<Comma-separated list of HEDVIG_IP/HOSTNAME of the cluster nodes>
san_login=HEDVIG_USER
san_password=HEDVIG_PASSWORD
san_clustername=HEDVIG_CLUSTER
Run the following commands on the OpenStack Cinder Node to create a Volume
Type for Hedvig:
.. code-block:: console
cinder type-create HEDVIG_VOLUME_TYPE
cinder type-key HEDVIG_VOLUME_TYPE set volume_backend_name=HEDVIG_BACKEND_NAME
This section contains definitions of the terms used above.
HEDVIG_IP/HOSTNAME
The IP address or hostnames of the Hedvig Storage Cluster Nodes
HEDVIG_USER
Username to login to the Hedvig Cluster with minimum ``super user``
(admin) privilege
HEDVIG_PASSWORD
Password to login to the Hedvig Cluster
HEDVIG_CLUSTER
Name of the Hedvig Cluster
.. note::
Restart the ``cinder-volume`` service after updating the ``cinder.conf``
file to apply the changes and to initialize the Hedvig Volume Driver.
Hedvig QoS Spec parameters and values
-------------------------------------
- dedup_enable true/false
- compressed_enable true/false
- cache_enable true/false
- replication_factor 1-6
- replication_policy Agnostic/RackAware/DataCenterAware
- replication_policy_info comma-separated list of data center names
(applies only to a replication_policy of DataCenterAware)
- disk_residence Flash/HDD
- encryption true/false
Creating a Hedvig Cinder Volume with custom attributes (QoS Specs)
------------------------------------------------------------------
1. Create a QoS Spec with the list of attributes that you want to
associate with a volume. For example, to create a Cinder Volume with
deduplication enabled, create a QoS Spec called dedup_enable with
dedup_enable=true
#. Create a new volume type and associate this QoS Spec with it,
OR associate the QoS Spec with an existing volume type.
#. Every Cinder Volume that you create of the above volume type
will have deduplication enabled.
#. If you do create a new volume type, make sure to add the key
volume_backend_name so OpenStack knows that the Hedvig Volume
Driver handles all requests for this volume.

View File

@ -75,3 +75,4 @@ Driver Configuration Reference
drivers/zadara-volume-driver
drivers/zfssa-iscsi-driver
drivers/zfssa-nfs-driver
drivers/hedvig-volume-driver

View File

@ -0,0 +1,3 @@
---
features:
- Added backend driver for Hedvig iSCSI storage.