Update fuel plugin repo for Kaminario

Update fuel plugin repo for Kamianrio with latest
Kaminario driver files and configuration files

Change-Id: I060393bc66f20a9dbc497524423088cbf9ccf8f8
Co-Authored-By: Chaithanya Kopparthi<chaithanyak@biarca.com>
This commit is contained in:
Pradip Rawat 2016-09-30 16:15:42 +05:30
parent 602a88bcdf
commit be18f78d2d
13 changed files with 724 additions and 443 deletions

View File

@ -1,4 +1,52 @@
fuel-plugin-cinder-kaminario
============
Cinder Kaminario plugin for Fuel
=============================
Plugin description
Overview
--------
The plugin configures Kaminario backend for Cinder using multibackend feature.
The plugin support Kaminario K2 All-Flash arrays.
This repo contains all necessary files to build Cinder Kaminario Fuel plugin.
Requirements
------------
| Requirement | Version/Comment |
|------------------------------------------------------------------------------------------|------------------------------------------------------------------------|
| Mirantis Openstack compatibility | 9.0 |
| Kaminario K2 All-Flash array is reachable via one of the Mirantis OpenStack networks | |
How to build plugin:
- Install fuel plugin builder (fpb)
- Clone plugin repo
```markdown
# git clone https://github.com/openstack/fuel-plugin-cinder-kaminario.git
```
- Build fuel plugin
```markdown
# cd fuel-plugin-cinder-kaminario/
# fpb --build .
```
- Install the plugin
```markdown
# fuel plugins --install cinder_kaminario-1.0-1.0.0-1.noarch.rpm
```
- Verify that the plugin is installed correctly
```markdown
# fuel plugins
```
| id | name | version | package_version |
| ---|------------------------------------------------|---------|---------------- |
| 1 | cinder_kaminario-1.0-1.0.0-1.noarch.rpm | 1.0.0 | 4.0.0 |

View File

@ -0,0 +1 @@
include kaminario::controller_config

View File

@ -1,8 +0,0 @@
ini_setting { 'parser':
ensure => present,
path => '/etc/puppet/puppet.conf',
section => 'main',
setting => 'parser',
value => 'future',
}

View File

@ -149,10 +149,6 @@ class GlanceConnectionFailed(CinderException):
message = _("Connection to glance failed: %(reason)s")
class ProgrammingError(CinderException):
message = _('Programming error in Cinder: %(reason)s')
class NotAuthorized(CinderException):
message = _("Not authorized.")
code = 403
@ -204,10 +200,6 @@ class InvalidVolumeType(Invalid):
message = _("Invalid volume type: %(reason)s")
class InvalidGroupType(Invalid):
message = _("Invalid group type: %(reason)s")
class InvalidVolume(Invalid):
message = _("Invalid volume: %(reason)s")
@ -247,10 +239,6 @@ class DeviceUnavailable(Invalid):
message = _("The device in the path %(path)s is unavailable: %(reason)s")
class SnapshotUnavailable(VolumeBackendAPIException):
message = _("The snapshot is unavailable: %(data)s")
class InvalidUUID(Invalid):
message = _("Expected a uuid but received %(uuid)s.")
@ -269,10 +257,6 @@ class InvalidGlobalAPIVersion(Invalid):
"is %(min_ver)s and maximum is %(max_ver)s.")
class MissingRequired(Invalid):
message = _("Missing required element '%(element)s' in request body.")
class APIException(CinderException):
message = _("Error while requesting %(service)s API.")
@ -292,10 +276,6 @@ class RPCTimeout(CinderException):
code = 502
class Duplicate(CinderException):
pass
class NotFound(CinderException):
message = _("Resource could not be found.")
code = 404
@ -306,10 +286,6 @@ class VolumeNotFound(NotFound):
message = _("Volume %(volume_id)s could not be found.")
class MessageNotFound(NotFound):
message = _("Message %(message_id)s could not be found.")
class VolumeAttachmentNotFound(NotFound):
message = _("Volume attachment could not be found with "
"filter: %(filter)s .")
@ -362,30 +338,6 @@ class VolumeTypeInUse(CinderException):
"volumes present with the type.")
class GroupTypeNotFound(NotFound):
message = _("Group type %(group_type_id)s could not be found.")
class GroupTypeNotFoundByName(GroupTypeNotFound):
message = _("Group type with name %(group_type_name)s "
"could not be found.")
class GroupTypeAccessNotFound(NotFound):
message = _("Group type access not found for %(group_type_id)s / "
"%(project_id)s combination.")
class GroupTypeSpecsNotFound(NotFound):
message = _("Group Type %(group_type_id)s has no specs with "
"key %(group_specs_key)s.")
class GroupTypeInUse(CinderException):
message = _("Group Type %(group_type_id)s deletion is not allowed with "
"groups present with the type.")
class SnapshotNotFound(NotFound):
message = _("Snapshot %(snapshot_id)s could not be found.")
@ -430,32 +382,6 @@ class ServiceTooOld(Invalid):
message = _("Service is too old to fulfil this request.")
class WorkerNotFound(NotFound):
message = _("Worker with %s could not be found.")
def __init__(self, message=None, **kwargs):
keys_list = ('{0}=%({0})s'.format(key) for key in kwargs)
placeholder = ', '.join(keys_list)
self.message = self.message % placeholder
super(WorkerNotFound, self).__init__(message, **kwargs)
class WorkerExists(Duplicate):
message = _("Worker for %(type)s %(id)s already exists.")
class ClusterNotFound(NotFound):
message = _('Cluster %(id)s could not be found.')
class ClusterHasHosts(Invalid):
message = _("Cluster %(id)s still has hosts.")
class ClusterExists(Duplicate):
message = _("Cluster %(name)s already exists.")
class HostNotFound(NotFound):
message = _("Host %(host)s could not be found.")
@ -514,6 +440,10 @@ class FileNotFound(NotFound):
message = _("File %(file_path)s could not be found.")
class Duplicate(CinderException):
pass
class VolumeTypeExists(Duplicate):
message = _("Volume Type %(id)s already exists.")
@ -531,23 +461,6 @@ class VolumeTypeEncryptionNotFound(NotFound):
message = _("Volume type encryption for type %(type_id)s does not exist.")
class GroupTypeExists(Duplicate):
message = _("Group Type %(id)s already exists.")
class GroupTypeAccessExists(Duplicate):
message = _("Group type access for %(group_type_id)s / "
"%(project_id)s combination already exists.")
class GroupTypeEncryptionExists(Invalid):
message = _("Group type encryption for type %(type_id)s already exists.")
class GroupTypeEncryptionNotFound(NotFound):
message = _("Group type encryption for type %(type_id)s does not exist.")
class MalformedRequestBody(CinderException):
message = _("Malformed message body: %(reason)s")
@ -615,18 +528,10 @@ class SnapshotLimitExceeded(QuotaError):
message = _("Maximum number of snapshots allowed (%(allowed)d) exceeded")
class UnexpectedOverQuota(QuotaError):
message = _("Unexpected over quota on %(name)s.")
class BackupLimitExceeded(QuotaError):
message = _("Maximum number of backups allowed (%(allowed)d) exceeded")
class ImageLimitExceeded(QuotaError):
message = _("Image quota exceeded")
class DuplicateSfVolumeNames(Duplicate):
message = _("Detected more than one volume with name %(vol_name)s")
@ -640,15 +545,6 @@ class VolumeTypeUpdateFailed(CinderException):
message = _("Cannot update volume_type %(id)s")
class GroupTypeCreateFailed(CinderException):
message = _("Cannot create group_type with "
"name %(name)s and specs %(group_specs)s")
class GroupTypeUpdateFailed(CinderException):
message = _("Cannot update group_type %(id)s")
class UnknownCmd(VolumeDriverException):
message = _("Unknown or unsupported command %(cmd)s")
@ -875,29 +771,16 @@ class VolumeGroupCreationFailed(CinderException):
message = _('Failed to create Volume Group: %(vg_name)s')
class VolumeNotDeactivated(CinderException):
message = _('Volume %(name)s was not deactivated in time.')
class VolumeDeviceNotFound(CinderException):
message = _('Volume device not found at %(device)s.')
# Driver specific exceptions
# Dell
class DellDriverRetryableException(VolumeBackendAPIException):
message = _("Retryable Dell Exception encountered")
# Pure Storage
class PureDriverException(VolumeDriverException):
message = _("Pure Storage Cinder driver failure: %(reason)s")
class PureRetryableException(VolumeBackendAPIException):
message = _("Retryable Pure Storage Exception encountered")
# SolidFire
class SolidFireAPIException(VolumeBackendAPIException):
message = _("Bad response from SolidFire API")
@ -1111,59 +994,26 @@ class XIODriverException(VolumeDriverException):
# Violin Memory drivers
class ViolinInvalidBackendConfig(VolumeDriverException):
class ViolinInvalidBackendConfig(CinderException):
message = _("Volume backend config is invalid: %(reason)s")
class ViolinRequestRetryTimeout(VolumeDriverException):
class ViolinRequestRetryTimeout(CinderException):
message = _("Backend service retry timeout hit: %(timeout)s sec")
class ViolinBackendErr(VolumeBackendAPIException):
class ViolinBackendErr(CinderException):
message = _("Backend reports: %(message)s")
class ViolinBackendErrExists(VolumeBackendAPIException):
class ViolinBackendErrExists(CinderException):
message = _("Backend reports: item already exists")
class ViolinBackendErrNotFound(NotFound):
class ViolinBackendErrNotFound(CinderException):
message = _("Backend reports: item not found")
class ViolinResourceNotFound(NotFound):
message = _("Backend reports: %(message)s")
class BadHTTPResponseStatus(VolumeDriverException):
message = _("Bad HTTP response status %(status)s")
# ZADARA STORAGE VPSA driver exception
class ZadaraServerCreateFailure(VolumeDriverException):
message = _("Unable to create server object for initiator %(name)s")
class ZadaraServerNotFound(NotFound):
message = _("Unable to find server object for initiator %(name)s")
class ZadaraVPSANoActiveController(VolumeDriverException):
message = _("Unable to find any active VPSA controller")
class ZadaraAttachmentsNotFound(NotFound):
message = _("Failed to retrieve attachments for volume %(name)s")
class ZadaraInvalidAttachmentInfo(Invalid):
message = _("Invalid attachment info for volume %(name)s: %(reason)s")
class ZadaraVolumeNotFound(VolumeDriverException):
message = _("%(reason)s")
# ZFSSA NFS driver exception.
class WebDAVClientError(CinderException):
message = _("The WebDAV request failed. Reason: %(msg)s, "
@ -1217,8 +1067,7 @@ class DotHillNotTargetPortal(CinderException):
# Sheepdog
class SheepdogError(VolumeBackendAPIException):
message = _("An error has occurred in SheepdogDriver. "
"(Reason: %(reason)s)")
message = _("An error has occured in SheepdogDriver. (Reason: %(reason)s)")
class SheepdogCmdError(SheepdogError):
@ -1277,16 +1126,3 @@ class KaminarioCinderDriverException(VolumeDriverException):
class KaminarioRetryableException(VolumeDriverException):
message = _("Kaminario retryable exception: %(reason)s")
# Synology driver
class SynoAPIHTTPError(CinderException):
message = _("HTTP exit code: [%(code)s]")
class SynoAuthError(CinderException):
message = _("Synology driver authentication failed: %(reason)s.")
class SynoLUNNotExist(CinderException):
message = _("LUN not found by UUID: %(uuid)s.")

View File

@ -17,6 +17,7 @@
import math
import re
import threading
import time
import eventlet
from oslo_config import cfg
@ -30,6 +31,7 @@ import six
import cinder
from cinder import exception
from cinder.i18n import _, _LE, _LW, _LI
from cinder import objects
from cinder.objects import fields
from cinder import utils
from cinder.volume.drivers.san import san
@ -38,8 +40,9 @@ from cinder.volume import utils as vol_utils
krest = importutils.try_import("krest")
K2_MIN_VERSION = '2.2.0'
K2_LOCK_PREFIX = 'Kaminario'
K2_LOCK_NAME = 'Kaminario'
MAX_K2_RETRY = 5
K2_REP_FAILED_OVER = fields.ReplicationStatus.FAILED_OVER
LOG = logging.getLogger(__name__)
kaminario1_opts = [
@ -139,8 +142,6 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver):
self.configuration.append_config_values(kaminario2_opts)
self.replica = None
self._protocol = None
k2_lock_sfx = self.configuration.safe_get('volume_backend_name') or ''
self.k2_lock_name = "%s-%s" % (K2_LOCK_PREFIX, k2_lock_sfx)
def check_for_setup_error(self):
if krest is None:
@ -290,6 +291,62 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver):
raise exception.KaminarioCinderDriverException(
reason=six.text_type(ex.message))
@kaminario_logger
def _create_failover_volume_replica(self, volume, vg_name, vol_name):
"""Volume replica creation in K2 needs session and remote volume.
- create a session
- create a volume in the volume group
"""
session_name = self.get_session_name(volume.id)
rsession_name = self.get_rep_name(session_name)
rvg_name = self.get_rep_name(vg_name)
rvol_name = self.get_rep_name(vol_name)
rvg = self.target.search("volume_groups", name=rvg_name).hits[0]
rvol = self.target.search("volumes", name=rvol_name).hits[0]
k2peer_rs = self.target.search("replication/peer_k2arrays",
mgmt_host=self.configuration.san_ip)
if hasattr(k2peer_rs, 'hits') and k2peer_rs.total != 0:
k2peer = k2peer_rs.hits[0]
else:
msg = _("Unable to find K2peer in source K2:")
LOG.error(msg)
raise exception.KaminarioCinderDriverException(reason=msg)
try:
LOG.debug("Creating source session with name: %(sname)s and "
" target session name: %(tname)s",
{'sname': rsession_name, 'tname': session_name})
tgt_ssn = self.target.new("replication/sessions")
tgt_ssn.replication_peer_k2array = k2peer
tgt_ssn.auto_configure_peer_volumes = "False"
tgt_ssn.local_volume_group = rvg
tgt_ssn.replication_peer_volume_group_name = vg_name
tgt_ssn.remote_replication_session_name = session_name
tgt_ssn.name = rsession_name
tgt_ssn.rpo = self.replica.rpo
tgt_ssn.save()
LOG.debug("Creating remote volume with name: %s",
rvol_name)
self.target.new("replication/peer_volumes",
local_volume=rvol,
name=vol_name,
replication_session=tgt_ssn).save()
tgt_ssn.state = "in_sync"
tgt_ssn.save()
except Exception as ex:
LOG.exception(_LE("Replication for the volume %s has "
"failed."), rvol_name)
self._delete_by_ref(self.target, "replication/sessions",
rsession_name, 'session')
self._delete_by_ref(self.client, "replication/sessions",
session_name, 'remote session')
self._delete_by_ref(self.client, "volumes", vol_name, "volume")
self._delete_by_ref(self.client, "volume_groups", vg_name, "vg")
raise exception.KaminarioCinderDriverException(
reason=six.text_type(ex.message))
def _delete_by_ref(self, device, url, name, msg):
rs = device.search(url, name=name)
for result in rs.hits:
@ -313,27 +370,183 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver):
def failover_host(self, context, volumes, secondary_id=None):
"""Failover to replication target."""
volume_updates = []
back_end_ip = None
svc_host = vol_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(context, svc_host,
'cinder-volume')
if secondary_id and secondary_id != self.replica.backend_id:
LOG.error(_LE("Kaminario driver received failover_host "
"request, But backend is non replicated device"))
raise exception.UnableToFailOver(reason=_("Failover requested "
"on non replicated "
"backend."))
for v in volumes:
vol_name = self.get_volume_name(v['id'])
rv = self.get_rep_name(vol_name)
if self.target.search("volumes", name=rv).total:
self._failover_volume(v)
volume_updates.append(
{'volume_id': v['id'],
'updates':
{'replication_status':
fields.ReplicationStatus.FAILED_OVER}})
else:
volume_updates.append({'volume_id': v['id'],
'updates': {'status': 'error', }})
return self.replica.backend_id, volume_updates
if (service.active_backend_id and
service.active_backend_id != self.configuration.san_ip):
self.snap_updates = []
rep_volumes = []
# update status for non-replicated primary volumes
for v in volumes:
vol_name = self.get_volume_name(v['id'])
vol = self.client.search("volumes", name=vol_name)
if v.replication_status != K2_REP_FAILED_OVER and vol.total:
status = 'available'
if v.volume_attachment:
map_rs = self.client.search("mappings",
volume=vol.hits[0])
status = 'in-use'
if map_rs.total:
map_rs.hits[0].delete()
volume_updates.append({'volume_id': v['id'],
'updates':
{'status': status}})
else:
rep_volumes.append(v)
# In-sync from secondaray array to primary array
for v in rep_volumes:
vol_name = self.get_volume_name(v['id'])
vol = self.client.search("volumes", name=vol_name)
rvol_name = self.get_rep_name(vol_name)
rvol = self.target.search("volumes", name=rvol_name)
session_name = self.get_session_name(v['id'])
rsession_name = self.get_rep_name(session_name)
ssn = self.target.search("replication/sessions",
name=rsession_name)
if ssn.total:
tgt_ssn = ssn.hits[0]
ssn = self.client.search("replication/sessions",
name=session_name)
if ssn.total:
src_ssn = ssn.hits[0]
if (tgt_ssn.state == 'failed_over' and
tgt_ssn.current_role == 'target' and vol.total and src_ssn):
map_rs = self.client.search("mappings", volume=vol.hits[0])
if map_rs.total:
map_rs.hits[0].delete()
tgt_ssn.state = 'in_sync'
tgt_ssn.save()
self._check_for_status(src_ssn, 'in_sync')
if (rvol.total and src_ssn.state == 'in_sync' and
src_ssn.current_role == 'target'):
gen_no = self._create_volume_replica_user_snap(self.target,
tgt_ssn)
self.snap_updates.append({'tgt_ssn': tgt_ssn,
'gno': gen_no,
'stime': time.time()})
LOG.debug("The target session: %s state is "
"changed to in sync", rsession_name)
self._is_user_snap_sync_finished()
# Delete secondary volume mappings and create snapshot
for v in rep_volumes:
vol_name = self.get_volume_name(v['id'])
vol = self.client.search("volumes", name=vol_name)
rvol_name = self.get_rep_name(vol_name)
rvol = self.target.search("volumes", name=rvol_name)
session_name = self.get_session_name(v['id'])
rsession_name = self.get_rep_name(session_name)
ssn = self.target.search("replication/sessions",
name=rsession_name)
if ssn.total:
tgt_ssn = ssn.hits[0]
ssn = self.client.search("replication/sessions",
name=session_name)
if ssn.total:
src_ssn = ssn.hits[0]
if (rvol.total and src_ssn.state == 'in_sync' and
src_ssn.current_role == 'target'):
map_rs = self.target.search("mappings",
volume=rvol.hits[0])
if map_rs.total:
map_rs.hits[0].delete()
gen_no = self._create_volume_replica_user_snap(self.target,
tgt_ssn)
self.snap_updates.append({'tgt_ssn': tgt_ssn,
'gno': gen_no,
'stime': time.time()})
self._is_user_snap_sync_finished()
# changing source sessions to failed-over
for v in rep_volumes:
vol_name = self.get_volume_name(v['id'])
vol = self.client.search("volumes", name=vol_name)
rvol_name = self.get_rep_name(vol_name)
rvol = self.target.search("volumes", name=rvol_name)
session_name = self.get_session_name(v['id'])
rsession_name = self.get_rep_name(session_name)
ssn = self.target.search("replication/sessions",
name=rsession_name)
if ssn.total:
tgt_ssn = ssn.hits[0]
ssn = self.client.search("replication/sessions",
name=session_name)
if ssn.total:
src_ssn = ssn.hits[0]
if (rvol.total and src_ssn.state == 'in_sync' and
src_ssn.current_role == 'target'):
src_ssn.state = 'failed_over'
src_ssn.save()
self._check_for_status(tgt_ssn, 'suspended')
LOG.debug("The target session: %s state is "
"changed to failed over", session_name)
src_ssn.state = 'in_sync'
src_ssn.save()
LOG.debug("The target session: %s state is "
"changed to in sync", session_name)
rep_status = fields.ReplicationStatus.DISABLED
volume_updates.append({'volume_id': v['id'],
'updates':
{'replication_status': rep_status}})
back_end_ip = self.configuration.san_ip
else:
"""Failover to replication target."""
for v in volumes:
vol_name = self.get_volume_name(v['id'])
rv = self.get_rep_name(vol_name)
if self.target.search("volumes", name=rv).total:
self._failover_volume(v)
volume_updates.append(
{'volume_id': v['id'],
'updates':
{'replication_status': K2_REP_FAILED_OVER}})
else:
volume_updates.append({'volume_id': v['id'],
'updates': {'status': 'error', }})
back_end_ip = self.replica.backend_id
return back_end_ip, volume_updates
def _create_volume_replica_user_snap(self, k2, sess):
snap = k2.new("snapshots")
snap.is_application_consistent = "False"
snap.replication_session = sess
snap.save()
return snap.generation_number
def _is_user_snap_sync_finished(self):
# waiting for user snapshot to be synced
while len(self.snap_updates) > 0:
for l in self.snap_updates:
sess = l.get('tgt_ssn')
gno = l.get('gno')
stime = l.get('stime')
sess.refresh()
if (sess.generation_number == gno and
sess.current_snapshot_progress == 100
and sess.current_snapshot_id is None):
if time.time() - stime > 300:
gen_no = self._create_volume_replica_user_snap(
self.target,
sess)
self.snap_updates.append({'tgt_ssn': sess,
'gno': gen_no,
'stime': time.time()})
self.snap_updates.remove(l)
eventlet.sleep(1)
@kaminario_logger
def create_volume_from_snapshot(self, volume, snapshot):
@ -385,9 +598,14 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver):
snapshot.volume.size * units.Ki,
self.configuration.volume_dd_blocksize,
sparse=True)
self._kaminario_disconnect_volume(src_attach_info,
dest_attach_info)
self.terminate_connection(volume, properties)
self.terminate_connection(cview, properties)
cview.delete()
except Exception as ex:
self._kaminario_disconnect_volume(src_attach_info,
dest_attach_info)
self.terminate_connection(cview, properties)
self.terminate_connection(volume, properties)
cview.delete()
@ -410,6 +628,7 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver):
src_name = self.get_volume_name(src_vref.id)
src_vol = self.client.search("volumes", name=src_name)
src_map = self.client.search("mappings", volume=src_vol)
src_attach_info = dest_attach_info = None
if src_map.total != 0:
msg = _("K2 driver does not support clone of a attached volume. "
"To get this done, create a snapshot from the attached "
@ -428,10 +647,13 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver):
src_vref.size * units.Ki,
self.configuration.volume_dd_blocksize,
sparse=True)
self._kaminario_disconnect_volume(src_attach_info,
dest_attach_info)
self.terminate_connection(volume, properties)
self.terminate_connection(src_vref, properties)
except Exception as ex:
self._kaminario_disconnect_volume(src_attach_info,
dest_attach_info)
self.terminate_connection(src_vref, properties)
self.terminate_connection(volume, properties)
self.delete_volume(volume)
@ -499,6 +721,26 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver):
self._delete_by_ref(self.target, "volume_groups",
rvg_name, "remote vg")
@kaminario_logger
def _delete_failover_volume_replica(self, volume, vg_name, vol_name):
rvg_name = self.get_rep_name(vg_name)
rvol_name = self.get_rep_name(vol_name)
session_name = self.get_session_name(volume.id)
rsession_name = self.get_rep_name(session_name)
tgt_ssn = self.target.search('replication/sessions',
name=rsession_name).hits[0]
tgt_ssn.state = 'idle'
tgt_ssn.save()
tgt_ssn.delete()
LOG.debug("Searching and deleting snapshots for target volume group "
"and target volume: %(vol)s, %(vg)s in K2.",
{'vol': rvol_name, 'vg': rvg_name})
rvg = self.target.search('volume_groups', name=rvg_name).hits
rsnaps = self.target.search('snapshots', volume_group=rvg).hits
for s in rsnaps:
s.delete()
@kaminario_logger
def _check_for_status(self, obj, status):
while obj.state != status:
@ -664,9 +906,8 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver):
@kaminario_logger
def _get_volume_object(self, volume):
vol_name = self.get_volume_name(volume.id)
if volume.replication_status == 'failed-over':
if volume.replication_status == K2_REP_FAILED_OVER:
vol_name = self.get_rep_name(vol_name)
self.client = self.target
LOG.debug("Searching volume : %s in K2.", vol_name)
vol_rs = self.client.search("volumes", name=vol_name)
if not hasattr(vol_rs, 'hits') or vol_rs.total == 0:
@ -696,9 +937,8 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver):
# Get volume object
if type(volume).__name__ != 'RestObject':
vol_name = self.get_volume_name(volume.id)
if volume.replication_status == 'failed-over':
if volume.replication_status == K2_REP_FAILED_OVER:
vol_name = self.get_rep_name(vol_name)
self.client = self.target
LOG.debug("Searching volume: %s in K2.", vol_name)
volume_rs = self.client.search("volumes", name=vol_name)
if hasattr(volume_rs, "hits") and volume_rs.total != 0:
@ -779,12 +1019,13 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver):
return replica
def _get_replica_status(self, vg_name):
vg = self.client.search("volume_groups", name=vg_name).hits[0]
if self.client.search("replication/sessions",
local_volume_group=vg).total != 0:
return True
else:
return False
vg_rs = self.client.search("volume_groups", name=vg_name)
if vg_rs.total:
vg = vg_rs.hits[0]
if self.client.search("replication/sessions",
local_volume_group=vg).total:
return True
return False
def manage_existing(self, volume, existing_ref):
vol_name = existing_ref['source-name']
@ -853,6 +1094,11 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver):
def retype(self, ctxt, volume, new_type, diff, host):
old_type = volume.get('volume_type')
vg_name = self.get_volume_group_name(volume.id)
vol_name = self.get_volume_name(volume.id)
vol_rs = self.client.search("volumes", name=vol_name)
if vol_rs.total:
vol = vol_rs.hits[0]
vmap = self.client.search("mappings", volume=vol).total
old_rep_type = self._get_replica_status(vg_name)
new_rep_type = self._get_is_replica(new_type)
new_prov_type = self._get_is_dedup(new_type)
@ -867,8 +1113,11 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver):
self._delete_replication(volume)
return True
elif not new_rep_type and not old_rep_type:
LOG.debug("Use '--migration-policy on-demand' to change 'dedup "
"without replication'<->'nodedup without replication'.")
msg = ("Use '--migration-policy on-demand' to change 'dedup "
"without replication'<->'nodedup without replication'.")
if vol_rs.total and vmap:
msg = "Unattach volume and {0}".format(msg)
LOG.debug(msg)
return False
else:
LOG.error(_LE('Change from type1: %(type1)s to type2: %(type2)s '
@ -879,15 +1128,28 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver):
def _add_replication(self, volume):
vg_name = self.get_volume_group_name(volume.id)
vol_name = self.get_volume_name(volume.id)
LOG.debug("Searching volume group with name: %(name)s",
{'name': vg_name})
vg = self.client.search("volume_groups", name=vg_name).hits[0]
LOG.debug("Searching volume with name: %(name)s",
{'name': vol_name})
vol = self.client.search("volumes", name=vol_name).hits[0]
self._create_volume_replica(volume, vg, vol, self.replica.rpo)
if volume.replication_status == K2_REP_FAILED_OVER:
self._create_failover_volume_replica(volume, vg_name, vol_name)
else:
LOG.debug("Searching volume group with name: %(name)s",
{'name': vg_name})
vg = self.client.search("volume_groups", name=vg_name).hits[0]
LOG.debug("Searching volume with name: %(name)s",
{'name': vol_name})
vol = self.client.search("volumes", name=vol_name).hits[0]
self._create_volume_replica(volume, vg, vol, self.replica.rpo)
def _delete_replication(self, volume):
vg_name = self.get_volume_group_name(volume.id)
vol_name = self.get_volume_name(volume.id)
self._delete_volume_replica(volume, vg_name, vol_name)
if volume.replication_status == K2_REP_FAILED_OVER:
self._delete_failover_volume_replica(volume, vg_name, vol_name)
else:
self._delete_volume_replica(volume, vg_name, vol_name)
def _kaminario_disconnect_volume(self, *attach_info):
for info in attach_info:
if (info and info.get('connector') and
info.get('conn', {}).get('data') and info.get('device')):
info['connector'].disconnect_volume(info['conn']['data'],
info['device'])

View File

@ -17,13 +17,14 @@ import six
from oslo_log import log as logging
from cinder import coordination
from cinder import exception
from cinder import utils
from cinder.i18n import _, _LE
from cinder.objects import fields
from cinder.volume.drivers.kaminario import kaminario_common as common
from cinder.zonemanager import utils as fczm_utils
K2_REP_FAILED_OVER = fields.ReplicationStatus.FAILED_OVER
LOG = logging.getLogger(__name__)
kaminario_logger = common.kaminario_logger
@ -32,13 +33,10 @@ class KaminarioFCDriver(common.KaminarioCinderDriver):
"""Kaminario K2 FC Volume Driver.
Version history:
1.0 - Initial driver
1.1 - Added manage/unmanage and extra-specs support for nodedup
1.2 - Added replication support
1.3 - Added retype support
1.0.2.0 - Initial driver
"""
VERSION = '1.3'
VERSION = '1.0.2.0'
# ThirdPartySystems wiki page name
CI_WIKI_NAME = "Kaminario_K2_CI"
@ -51,7 +49,7 @@ class KaminarioFCDriver(common.KaminarioCinderDriver):
@fczm_utils.AddFCZone
@kaminario_logger
@coordination.synchronized('{self.k2_lock_name}')
@utils.synchronized(common.K2_LOCK_NAME, external=True)
def initialize_connection(self, volume, connector):
"""Attach K2 volume to host."""
# Check wwpns in host connector.
@ -59,6 +57,12 @@ class KaminarioFCDriver(common.KaminarioCinderDriver):
msg = _("No wwpns found in host connector.")
LOG.error(msg)
raise exception.KaminarioCinderDriverException(reason=msg)
# To support replication failback
temp_client = None
if (hasattr(volume, 'replication_status') and
volume.replication_status == K2_REP_FAILED_OVER):
temp_client = self.client
self.client = self.target
# Get target wwpns.
target_wwpns = self.get_target_info(volume)
# Map volume.
@ -66,6 +70,9 @@ class KaminarioFCDriver(common.KaminarioCinderDriver):
# Create initiator-target mapping.
target_wwpns, init_target_map = self._build_initiator_target_map(
connector, target_wwpns)
# To support replication failback
if temp_client:
self.client = temp_client
# Return target volume information.
return {'driver_volume_type': 'fibre_channel',
'data': {"target_discovered": True,
@ -75,8 +82,14 @@ class KaminarioFCDriver(common.KaminarioCinderDriver):
@fczm_utils.RemoveFCZone
@kaminario_logger
@coordination.synchronized('{self.k2_lock_name}')
@utils.synchronized(common.K2_LOCK_NAME, external=True)
def terminate_connection(self, volume, connector, **kwargs):
# To support replication failback
temp_client = None
if (hasattr(volume, 'replication_status') and
volume.replication_status == K2_REP_FAILED_OVER):
temp_client = self.client
self.client = self.target
super(KaminarioFCDriver, self).terminate_connection(volume, connector)
properties = {"driver_volume_type": "fibre_channel", "data": {}}
host_name = self.get_initiator_host_name(connector)
@ -90,14 +103,13 @@ class KaminarioFCDriver(common.KaminarioCinderDriver):
connector, target_wwpns)
properties["data"] = {"target_wwn": target_wwpns,
"initiator_target_map": init_target_map}
# To support replication failback
if temp_client:
self.client = temp_client
return properties
@kaminario_logger
def get_target_info(self, volume):
rep_status = fields.ReplicationStatus.FAILED_OVER
if (hasattr(volume, 'replication_status') and
volume.replication_status == rep_status):
self.client = self.target
LOG.debug("Searching target wwpns in K2.")
fc_ports_rs = self.client.search("system/fc_ports")
target_wwpns = []

View File

@ -17,30 +17,26 @@ import six
from oslo_log import log as logging
from cinder import coordination
from cinder import exception
from cinder import utils
from cinder.i18n import _, _LE
from cinder import interface
from cinder.objects import fields
from cinder.volume.drivers.kaminario import kaminario_common as common
ISCSI_TCP_PORT = "3260"
K2_REP_FAILED_OVER = fields.ReplicationStatus.FAILED_OVER
LOG = logging.getLogger(__name__)
kaminario_logger = common.kaminario_logger
@interface.volumedriver
class KaminarioISCSIDriver(common.KaminarioCinderDriver):
"""Kaminario K2 iSCSI Volume Driver.
Version history:
1.0 - Initial driver
1.1 - Added manage/unmanage and extra-specs support for nodedup
1.2 - Added replication support
1.3 - Added retype support
1.0.2.0 - Initial driver
"""
VERSION = '1.3'
VERSION = '1.0.2.0'
# ThirdPartySystems wiki page name
CI_WIKI_NAME = "Kaminario_K2_CI"
@ -51,13 +47,22 @@ class KaminarioISCSIDriver(common.KaminarioCinderDriver):
self._protocol = 'iSCSI'
@kaminario_logger
@coordination.synchronized('{self.k2_lock_name}')
@utils.synchronized(common.K2_LOCK_NAME, external=True)
def initialize_connection(self, volume, connector):
"""Attach K2 volume to host."""
# To support replication failback
temp_client = None
if (hasattr(volume, 'replication_status') and
volume.replication_status == K2_REP_FAILED_OVER):
temp_client = self.client
self.client = self.target
# Get target_portal and target iqn.
iscsi_portal, target_iqn = self.get_target_info(volume)
# Map volume.
lun = self.k2_initialize_connection(volume, connector)
# To support replication failback
if temp_client:
self.client = temp_client
# Return target volume information.
return {"driver_volume_type": "iscsi",
"data": {"target_iqn": target_iqn,
@ -66,17 +71,22 @@ class KaminarioISCSIDriver(common.KaminarioCinderDriver):
"target_discovered": True}}
@kaminario_logger
@coordination.synchronized('{self.k2_lock_name}')
@utils.synchronized(common.K2_LOCK_NAME, external=True)
def terminate_connection(self, volume, connector, **kwargs):
# To support replication failback
temp_client = None
if (hasattr(volume, 'replication_status') and
volume.replication_status == K2_REP_FAILED_OVER):
temp_client = self.client
self.client = self.target
super(KaminarioISCSIDriver, self).terminate_connection(volume,
connector)
# To support replication failback
if temp_client:
self.client = temp_client
@kaminario_logger
def get_target_info(self, volume):
rep_status = fields.ReplicationStatus.FAILED_OVER
if (hasattr(volume, 'replication_status') and
volume.replication_status == rep_status):
self.client = self.target
LOG.debug("Searching first iscsi port ip without wan in K2.")
iscsi_ip_rs = self.client.search("system/net_ips", wan_port="")
iscsi_ip = target_iqn = None

View File

@ -1,18 +1,18 @@
require 'hiera'
require 'facter'
hiera = Hiera.new(:config => '/etc/hiera.yaml')
cinder_kaminario=hiera.lookup('cinder_kaminario', {}, {},'nil')
5.times do |i|
add_backend = "add_backend_" + i.to_s
type_name = "type_name_" + i.to_s
if cinder_kaminario[add_backend] == true
default_type = cinder_kaminario[type_name]
end
end
Facter.add("default_volume_type") do
setcode do
default_type
hiera = Hiera.new(:config => '/etc/hiera.yaml')
cinder_kaminario=hiera.lookup('cinder_kaminario', {}, {},'nil')
5.times do |i|
default_type = "default_type_" + i.to_s
type_name = "type_name_" + i.to_s
if cinder_kaminario[default_type] == true
default_type = cinder_kaminario[type_name]
setcode do
default_type
end
break
end
end
end

View File

@ -0,0 +1,38 @@
class kaminario::controller_config{
$config_file='/etc/cinder/cinder.conf'
$plugin_settings = hiera('cinder_kaminario')
if $plugin_settings['scheduler_default_filters'] != ''
{
ini_subsetting {"scheduler_default_filters":
ensure => present,
section => 'DEFAULT',
key_val_separator => '=',
path => $config_file,
setting => 'scheduler_default_filters',
subsetting => $plugin_settings['scheduler_default_filters'],
subsetting_separator => ',',
}
}
if $plugin_settings['scheduler_default_weighers'] != ''
{
cinder_config {
"DEFAULT/scheduler_default_weighers" : value => $plugin_settings['scheduler_default_weighers'];
}
}
if $plugin_settings['rpc_response_timeout'] != ''
{
cinder_config {
"DEFAULT/rpc_response_timeout" : value => $plugin_settings['rpc_response_timeout'];
}
}
cinder_config {
"DEFAULT/default_volume_type" : value => $default_volume_type
}~> Exec[cinder_api]
exec {'cinder_api':
command => '/usr/sbin/service cinder-api restart',}
}

View File

@ -1,36 +1,52 @@
class kaminario::config {
$num = [ '0', '1', '2', '3', '4', '5' ]
recursion { 'start':
value => 5,
}
define recursion(
$value
) {
$plugin_settings = hiera('cinder_kaminario')
each($num) |$value| {
config {"plugin_${value}":
add_backend => $plugin_settings["add_backend_${value}"],
cinder_node => $plugin_settings["cinder_node_${value}"],
storage_protocol => $plugin_settings["storage_protocol_${value}"],
backend_name => $plugin_settings["backend_name_${value}"],
storage_user => $plugin_settings["storage_user_${value}"],
storage_password => $plugin_settings["storage_password_${value}"],
storage_ip => $plugin_settings["storage_ip_${value}"],
enable_replication => $plugin_settings["enable_replication_${value}"],
replication_ip => $plugin_settings["replication_ip_${value}"],
replication_login => $plugin_settings["replication_login_${value}"],
replication_rpo => $plugin_settings["replication_rpo_${value}"],
replication_password => $plugin_settings["replication_password_${value}"],
enable_multipath => $plugin_settings["enable_multipath_${value}"],
suppress_logs => $plugin_settings["suppress_logs_${value}"],
filter_function => $plugin_settings["filter_function_${value}"],
oversubscription_ratio => $plugin_settings["oversubscription_ratio_${value}"],
num => $value
}
config {"plugin_${value}":
add_backend => $plugin_settings["add_backend_${value}"],
cinder_node => $plugin_settings["cinder_node_${value}"],
storage_protocol => $plugin_settings["storage_protocol_${value}"],
backend_name => $plugin_settings["backend_name_${value}"],
storage_user => $plugin_settings["storage_user_${value}"],
storage_password => $plugin_settings["storage_password_${value}"],
storage_ip => $plugin_settings["storage_ip_${value}"],
enable_replication => $plugin_settings["enable_replication_${value}"],
replication_ip => $plugin_settings["replication_ip_${value}"],
replication_login => $plugin_settings["replication_login_${value}"],
replication_rpo => $plugin_settings["replication_rpo_${value}"],
replication_password => $plugin_settings["replication_password_${value}"],
enable_multipath => $plugin_settings["enable_multipath_${value}"],
suppress_logs => $plugin_settings["suppress_logs_${value}"],
filter_function => $plugin_settings["filter_function_${value}"],
goodness_function => $plugin_settings["goodness_function_${value}"],
oversubscription_ratio => $plugin_settings["oversubscription_ratio_${value}"],
num => $value
}
$minus1 = inline_template('<%= @value.to_i - 1 %>')
if "${minus1}" < '0' {
} else {
recursion { "value-${minus1}":
value => $minus1,
}
}
}
}
define config($add_backend,$storage_protocol,$backend_name,$storage_user,$storage_password,$storage_ip,$num,$cinder_node,$enable_replication,$replication_ip,$replication_login,$replication_rpo,$replication_password,$enable_multipath,$suppress_logs,$filter_function,$oversubscription_ratio) {
define config($add_backend,$storage_protocol,$backend_name,$storage_user,$storage_password,$storage_ip,$num,$cinder_node,$enable_replication,$replication_ip,$replication_login,$replication_rpo,$replication_password,$enable_multipath,$suppress_logs,$filter_function,$oversubscription_ratio,$goodness_function) {
$sec_name = section_name( $storage_ip , $backend_name )
$config_file = "/etc/cinder/cinder.conf"
if $cinder_node == hiera(user_node_name) {
if $add_backend == true {
if $storage_protocol == 'FC'{
ini_subsetting {"enable_backend_${num}":
ensure => present,
@ -38,18 +54,26 @@ define config($add_backend,$storage_protocol,$backend_name,$storage_user,$storag
key_val_separator => '=',
path => $config_file,
setting => 'enabled_backends',
subsetting => $backend_name,
subsetting => $sec_name,
subsetting_separator => ',',
}->
cinder_config {
"$sec_name/volume_driver" : value => "cinder.volume.drivers.kaminario.kaminario_fc.KaminarioFCDriver";
"$sec_name/volume_backend_name" : value => $backend_name;
"$sec_name/san_ip" : value => $storage_ip;
"$sec_name/san_login" : value => $storage_user;
"$sec_name/san_password" : value => $storage_password;
"$sec_name/filter_function" : value => $filter_function;
}
if $storage_protocol == 'FC'{
cinder_config {
"$sec_name/volume_driver" : value => "cinder.volume.drivers.kaminario.kaminario_fc.KaminarioFCDriver";
}
}
elsif $storage_protocol == 'ISCSI'{
cinder_config {
"$sec_name/volume_driver" : value => "cinder.volume.drivers.kaminario.kaminario_iscsi.KaminarioISCSIDriver";
}
}
if $enable_replication == true {
$replication_device = get_replication_device($replication_ip, $replication_login , $replication_password , $replication_rpo)
cinder_config {
@ -69,56 +93,23 @@ define config($add_backend,$storage_protocol,$backend_name,$storage_user,$storag
}
}
if $filter_function != '' {
cinder_config {
"$sec_name/filter_function" : value => $filter_function;
}
}
if $goodness_function != '' {
cinder_config {
"$sec_name/goodness_function" : value => $goodness_function;
}
}
if $oversubscription_ratio == true {
cinder_config {
"$sec_name/auto_calc_max_oversubscription_ratio" : value => "True";
}
}
}
if $storage_protocol == 'ISCSI'{
ini_subsetting {"enable_backend_${num}":
ensure => present,
section => 'DEFAULT',
key_val_separator => '=',
path => $config_file,
setting => 'enabled_backends',
subsetting => $backend_name,
subsetting_separator => ',',
}->
cinder_config {
"$sec_name/volume_driver" : value => "cinder.volume.drivers.kaminario.kaminario_iscsi.KaminarioISCSIDriver";
"$sec_name/volume_backend_name" : value => $backend_name;
"$sec_name/san_ip" : value => $storage_ip;
"$sec_name/san_login" : value => $storage_user;
"$sec_name/san_password" : value => $storage_password;
"$sec_name/filter_function" : value => $filter_function;
}
if $enable_replication == true {
$replication_device = get_replication_device($replication_ip, $replication_login , $replication_password , $replication_rpo)
cinder_config {
"$sec_name/replication_device" : value => $replication_device;
}
}
if $enable_multipath == true {
cinder_config {
"$sec_name/use_multipath_for_image_xfer" : value => "True";
"$sec_name/enforce_multipath_for_image_xfer" : value => "True";
}
}
if $suppress_logs == true {
cinder_config {
"$sec_name/suppress_requests_ssl_warnings" : value => "True";
}
}
if $oversubscription_ratio == true {
cinder_config {
"$sec_name/auto_calc_max_oversubscription_ratio" : value => "True";
}
}
}
}
}
}

View File

@ -1,38 +1,53 @@
class kaminario::type {
$num = [ '0', '1', '2', '3', '4', '5' ]
$plugin_settings = hiera('cinder_kaminario')
each($num) |$value| {
kaminario_type {"plugin_${value}":
create_type => $plugin_settings["create_type_${value}"],
options => $plugin_settings["options_${value}"],
backend_name => $plugin_settings["backend_name_${value}"]
recursion { 'start':
value => 5,
}
define recursion(
$value
) {
$plugin_settings = hiera('cinder_kaminario')
kaminario_type {"plugin_${value}":
create_type => $plugin_settings["create_type_${value}"],
options => $plugin_settings["options_${value}"],
backend_name => $plugin_settings["backend_name_${value}"],
type_name => $plugin_settings["type_name_${value}"]
}
$minus1 = inline_template('<%= @value.to_i - 1 %>')
if "${minus1}" < '0' {
} else {
recursion { "value-${minus1}":
value => $minus1,
}
}
}
}
define kaminario_type ($create_type,$options,$backend_name) {
define kaminario_type ($create_type,$options,$backend_name,$type_name) {
if $create_type == true {
case $options {
"enable_replication_type": {
cinder_type {$backend_name:
cinder_type {$type_name:
ensure => present,
properties => ["volume_backend_name=${backend_name}",'kaminario:replication=enabled'],
}
}
"enable_dedup": {
cinder_type {$backend_name:
cinder_type {$type_name:
ensure => present,
properties => ["volume_backend_name=${backend_name}",'kaminario:thin_prov_type=nodedup'],
}
}
"replication_dedup": {
cinder_type {$backend_name:
cinder_type {$type_name:
ensure => present,
properties => ["volume_backend_name=${backend_name}",'kaminario:thin_prov_type=nodedup','kaminario:thin_prov_type=nodedup'],
properties => ["volume_backend_name=${backend_name}",'kaminario:thin_prov_type=nodedup','kaminario:replication=enabled'],
}
}
"default": {
cinder_type {$backend_name:
cinder_type {$type_name:
ensure => present,
properties => ["volume_backend_name=${backend_name}"],
}

View File

@ -1,21 +1,8 @@
- id: kaminario_parser
type: puppet
version: 2.1.0
groups: [cinder,primary-controller,controller]
requires: [openstack-cinder,top-role-cinder,netconfig]
required_for: [kaminario_cinder,kaminario_types]
condition:
yaql_exp: "changedAny($.storage, $.cinder_kaminario)"
parameters:
puppet_manifest: puppet/manifests/cinder_parser.pp
puppet_modules: puppet/modules:/etc/puppet/modules
timeout: 360
- id: kaminario_cinder
type: puppet
version: 2.1.0
groups: [cinder]
requires: [kaminario_parser,top-role-cinder]
requires: [top-role-cinder]
required_for: [deploy_end]
condition:
yaql_exp: "changedAny($.storage, $.cinder_kaminario)"
@ -24,11 +11,24 @@
puppet_modules: puppet/modules:/etc/puppet/modules
timeout: 360
- id: kaminario_config
type: puppet
version: 2.1.0
groups: [primary-controller,controller]
requires: [openstack-cinder]
required_for: [deploy_end]
condition:
yaql_exp: "changedAny($.storage, $.cinder_kaminario)"
parameters:
puppet_manifest: puppet/manifests/cinder_controller_config.pp
puppet_modules: puppet/modules:/etc/puppet/modules
timeout: 360
- id: kaminario_types
type: puppet
version: 2.1.0
groups: [primary-controller]
requires: [kaminario_parser]
requires: [openstack-cinder]
required_for: [deploy_end]
condition:
yaql_exp: "changedAny($.storage, $.cinder_kaminario)"
@ -47,3 +47,4 @@
puppet_manifest: puppet/manifests/cinder_multipath.pp
puppet_modules: puppet/modules:/etc/puppet/modules
timeout: 360

View File

@ -13,7 +13,7 @@ attributes:
type: "radio"
weight: 10
value: "FC"
label: "Kaminario Storage Protocol"
label: "Storage protocol to be used on the data path with storage system"
values:
- data: "ISCSI"
label: "ISCSI"
@ -37,7 +37,7 @@ attributes:
storage_ip_0:
value: ""
label: 'Kaminario Storage Hostname/IP'
description: 'IP address of Kaminario Storage Array'
description: 'Provide management IP of kaminario K2 All-Flash array'
weight: 20
type: "text"
regex:
@ -47,7 +47,7 @@ attributes:
storage_user_0:
value: ""
label: 'Username'
description: 'user name of Kaminario Storage Array'
description: 'Provide username of kaminario K2 All-Flash array'
weight: 25
type: "text"
regex:
@ -57,14 +57,14 @@ attributes:
storage_password_0:
value: ""
label: 'Password'
description: 'password of Kaminario Storage Array'
description: 'Provide password of kaminario K2 All-Flash array'
weight: 30
type: "password"
add_backend_0:
value: true
label: 'Add a new kaminario backend or new kaminario Array'
description: 'Add a new kaminario backend or scale an existing backend'
label: 'Add a new kaminario backend or new kaminario Array'
description: 'Add a new kaminario backend or scale an existing backend'
weight: 35
type: 'checkbox'
restrictions:
@ -74,13 +74,13 @@ attributes:
enable_replication_0:
value: false
label: 'Enable Replication'
description: Enable replication for Kaminario Array
description: 'Enable replication for Kaminario Array'
weight: 40
type: 'checkbox'
replication_ip_0:
value: ""
label: 'Ipaddress'
label: 'IPaddress'
description: 'Ipaddress of Kaminario replication array'
weight: 45
type: "text"
@ -112,9 +112,9 @@ attributes:
action: 'hide'
replication_rpo_0:
value: ""
value: "60"
label: 'RPO'
description: 'Value (in seconds) should be either 1 minute or multiple of 5 minutes'
description: 'Value (in seconds) should be either 60 sec or multiple of 300 sec'
weight: 60
type: "text"
restrictions:
@ -150,9 +150,9 @@ attributes:
- data: "enable_replication_type"
label: "Enable Replication"
- data: "enable_dedup"
label: "Enable Non Deduplication"
label: "Enable nodedup"
- data: "replication_dedup"
label: "Enable both Replication and Non Deduplication"
label: "Enable both Replication and nodedup"
restrictions:
- condition: "settings:cinder_kaminario.create_type_0.value == false"
action: 'hide'
@ -160,7 +160,7 @@ attributes:
default_type_0:
value: false
label: 'Default Type'
description: 'Make this type as default'
description: 'Make this volume type as default volume type'
weight: 80
type: 'checkbox'
restrictions:
@ -182,32 +182,60 @@ attributes:
type: 'checkbox'
filter_function_0:
value: ""
value: "capabilities.total_volumes < 250"
label: 'Filter Function'
description: 'Filter function for backend'
weight: 87
type: "text"
goodness_function_0:
value: ""
label: 'Goodness Function'
description: 'Goodness function for backend'
weight: 88
type: "text"
oversubscription_ratio_0:
value: false
label: 'Oversubscription Ratio'
description: 'Enable Oversubscription Ratio for backend'
weight: 88
description: 'Enable Oversubscription Ratio for backend If "auto_calc_max_oversubscription_ratio = True", kaminario driver will calculate dynamically "max_over_subscription_ratio"'
weight: 89
type: 'checkbox'
scheduler_default_weighers:
value: "CapacityWeigher"
label: 'Scheduler weighers'
description: 'Default weighers for scheduler. For enabling multiple weighers, provide weighers seperated by ","'
weight: 90
type: "text"
scheduler_default_filters:
value: "DriverFilters"
label: 'Scheduler filters'
description: 'Default filters for scheduler. For enabling multiple weighers, provide weighers seperated by ","'
weight: 91
type: "text"
rpc_response_timeout:
value: "60"
label: 'RPC timeout'
description: 'Timeout for RPC. Default timeout is 60'
weight: 92
type: "text"
add_backend_1:
value: false
label: 'Add a new kaminario backend or scale an existing backend'
description: 'Add a new kaminario backend or scale an existing backend'
weight: 90
weight: 93
type: 'checkbox'
storage_protocol_1:
type: "radio"
weight: 95
value: "FC"
label: "Kaminario Storage Protocol"
label: "Storage protocol to be used on the data path with storage system"
values:
- data: "ISCSI"
label: "ISCSI"
@ -240,7 +268,7 @@ attributes:
storage_ip_1:
value: ""
label: 'Kaminario Storage Hostname/IP'
description: 'IP address of Kaminario Storage Array'
description: 'Provide management IP of kaminario K2 All-Flash array'
weight: 110
type: "text"
restrictions:
@ -253,7 +281,7 @@ attributes:
storage_user_1:
value: ""
label: 'Username'
description: 'user name of Kaminario Storage Array'
description: 'Provide username of kaminario K2 All-Flash array'
weight: 115
type: "text"
regex:
@ -266,7 +294,7 @@ attributes:
storage_password_1:
value: ""
label: 'Password'
description: 'password of Kaminario Storage Array'
description: 'Provide password of kaminario K2 All-Flash array'
weight: 120
type: "password"
restrictions:
@ -285,7 +313,7 @@ attributes:
replication_ip_1:
value: ""
label: 'Ipaddress'
label: 'IPaddress'
description: 'Ipaddress of Kaminario replication array'
weight: 130
type: "text"
@ -317,9 +345,9 @@ attributes:
action: 'hide'
replication_rpo_1:
value: ""
value: "60"
label: 'RPO'
description: 'Value (in seconds) should be either 1 minute or multiple of 5 minutes'
description: 'Value (in seconds) should be either 60 sec or multiple of 300 sec'
weight: 145
type: "text"
restrictions:
@ -359,9 +387,9 @@ attributes:
- data: "enable_replication_type"
label: "Enable Replication"
- data: "enable_dedup"
label: "Enable Non Deduplication"
label: "Enable nodedup"
- data: "replication_dedup"
label: "Enable both Replication and Non Deduplication"
label: "Enable both Replication and nodedup"
restrictions:
- condition: "settings:cinder_kaminario.create_type_1.value == false"
action: 'hide'
@ -369,7 +397,7 @@ attributes:
default_type_1:
value: false
label: 'Default Type'
description: 'Make this type as default'
description: 'Make this volume type as default volume type'
weight: 165
type: 'checkbox'
restrictions:
@ -397,7 +425,7 @@ attributes:
action: 'hide'
filter_function_1:
value: ""
value: "capabilities.total_volumes < 250"
label: 'Filter Function'
description: 'Filter function for backend'
weight: 172
@ -406,12 +434,21 @@ attributes:
- condition: "settings:cinder_kaminario.add_backend_1.value != true"
action: 'hide'
goodness_function_1:
value: ""
label: 'Goodness Function'
description: 'Goodness function for backend'
weight: 173
type: "text"
restrictions:
- condition: "settings:cinder_kaminario.add_backend_1.value != true"
action: 'hide'
oversubscription_ratio_1:
value: false
label: 'Oversubscription Ratio'
description: 'Enable Oversubscription Ratio for backend'
weight: 173
description: ''
weight: 174
type: 'checkbox'
restrictions:
- condition: "settings:cinder_kaminario.add_backend_1.value != true"
@ -433,7 +470,7 @@ attributes:
type: "radio"
weight: 180
value: "FC"
label: "Kaminario Storage Protocol"
label: "Storage protocol to be used on the data path with storage system"
values:
- data: "ISCSI"
label: "ISCSI"
@ -466,7 +503,7 @@ attributes:
storage_ip_2:
value: ""
label: 'Kaminario Storage Hostname/IP'
description: 'IP address of Kaminario Storage Array'
description: 'Provide management IP of kaminario K2 All-Flash array'
weight: 195
type: "text"
restrictions:
@ -479,7 +516,7 @@ attributes:
storage_user_2:
value: ""
label: 'Username'
description: 'user name of Kaminario Storage Array'
description: 'Provide username of kaminario K2 All-Flash array'
weight: 200
type: "text"
regex:
@ -492,7 +529,7 @@ attributes:
storage_password_2:
value: ""
label: 'Password'
description: 'password of Kaminario Storage Array'
description: 'Provide password of kaminario K2 All-Flash array'
weight: 205
type: "password"
restrictions:
@ -511,7 +548,7 @@ attributes:
replication_ip_2:
value: ""
label: 'Ipaddress'
label: 'IPaddress'
description: 'Ipaddress of Kaminario replication array'
weight: 215
type: "text"
@ -543,9 +580,9 @@ attributes:
action: 'hide'
replication_rpo_2:
value: ""
value: "60"
label: 'RPO'
description: 'Value (in seconds) should be either 1 minute or multiple of 5 minutes'
description: 'Value (in seconds) should be either 60 sec or multiple of 300 sec'
weight: 230
type: "text"
restrictions:
@ -584,9 +621,9 @@ attributes:
- data: "enable_replication_type"
label: "Enable Replication"
- data: "enable_dedup"
label: "Enable Non Deduplication"
label: "Enable nodedup"
- data: "replication_dedup"
label: "Enable both Replication and Non Deduplication"
label: "Enable both Replication and nodedup"
restrictions:
- condition: "settings:cinder_kaminario.create_type_2.value == false"
action: 'hide'
@ -594,7 +631,7 @@ attributes:
default_type_2:
value: false
label: 'Default_type'
description: 'Make this type as default'
description: 'Make this volume type as default volume type'
weight: 250
type: 'checkbox'
restrictions:
@ -622,7 +659,7 @@ attributes:
action: 'hide'
filter_function_2:
value: ""
value: "capabilities.total_volumes < 250"
label: 'Filter Function'
description: 'Filter function for backend'
weight: 262
@ -631,12 +668,21 @@ attributes:
- condition: "settings:cinder_kaminario.add_backend_2.value != true or settings:cinder_kaminario.add_backend_1.value != true"
action: 'hide'
goodness_function_2:
value: ""
label: 'Goodness Function'
description: 'Goodness function for backend'
weight: 263
type: "text"
restrictions:
- condition: "settings:cinder_kaminario.add_backend_2.value != true or settings:cinder_kaminario.add_backend_1.value != true"
action: 'hide'
oversubscription_ratio_2:
value: false
label: 'Oversubscription Ratio'
description: 'Enable Oversubscription Ratio for backend'
weight: 263
description: 'Enable Oversubscription Ratio for backend If "auto_calc_max_oversubscription_ratio = True", kaminario driver will calculate dynamically "max_over_subscription_ratio"'
weight: 264
type: 'checkbox'
restrictions:
- condition: "settings:cinder_kaminario.add_backend_2.value != true or settings:cinder_kaminario.add_backend_1.value != true"
@ -658,7 +704,7 @@ attributes:
type: "radio"
weight: 270
value: "FC"
label: "Kaminario Storage Protocol"
label: "Storage protocol to be used on the data path with storage system"
values:
- data: "ISCSI"
label: "ISCSI"
@ -690,7 +736,7 @@ attributes:
storage_ip_3:
value: ""
label: 'Kaminario Storage Hostname/IP'
description: 'IP address of Kaminario Storage Array'
description: 'Provide management IP of kaminario K2 All-Flash array'
weight: 285
type: "text"
restrictions:
@ -703,7 +749,7 @@ attributes:
storage_user_3:
value: ""
label: 'Username'
description: 'user name of Kaminario Storage Array'
description: 'Provide username of kaminario K2 All-Flash array'
weight: 290
type: "text"
regex:
@ -716,7 +762,7 @@ attributes:
storage_password_3:
value: ""
label: 'Password'
description: 'password of Kaminario Storage Array'
description: 'Provide management password of kaminario K2 All-Flash array'
weight: 295
type: "password"
restrictions:
@ -735,7 +781,7 @@ attributes:
replication_ip_3:
value: ""
label: 'Ipaddress'
label: 'IPaddress'
description: 'Ipaddress of Kaminario replication array'
weight: 305
type: "text"
@ -767,9 +813,9 @@ attributes:
action: 'hide'
replication_rpo_3:
value: ""
value: "60"
label: 'RPO'
description: 'Value (in seconds) should be either 1 minute or multiple of 5 minutes'
description: 'Value (in seconds) should be either 60 sec or multiple of 300 sec'
weight: 320
type: "text"
restrictions:
@ -808,9 +854,9 @@ attributes:
- data: "enable_replication_type"
label: "Enable Replication"
- data: "enable_dedup"
label: "Enable Non Deduplication"
label: "Enable nodedup"
- data: "replication_dedup"
label: "Enable both Replication and Non Deduplication"
label: "Enable both Replication and nodedup"
restrictions:
- condition: "settings:cinder_kaminario.create_type_3.value == false"
action: 'hide'
@ -818,7 +864,7 @@ attributes:
default_type_3:
value: false
label: 'Default_type'
description: 'Make this type as default'
description: 'Make this volume type as default volume type'
weight: 335
type: 'checkbox'
restrictions:
@ -846,7 +892,7 @@ attributes:
action: 'hide'
filter_function_3:
value: ""
value: "capabilities.total_volumes < 250"
label: 'Filter Function'
description: 'Filter function for backend'
weight: 342
@ -855,12 +901,21 @@ attributes:
- condition: "settings:cinder_kaminario.add_backend_3.value != true or settings:cinder_kaminario.add_backend_2.value != true or settings:cinder_kaminario.add_backend_1.value != true"
action: 'hide'
goodness_function_3:
value: ""
label: 'Goodness Function'
description: 'Goodness function for backend'
weight: 343
type: "text"
restrictions:
- condition: "settings:cinder_kaminario.add_backend_3.value != true or settings:cinder_kaminario.add_backend_2.value != true or settings:cinder_kaminario.add_backend_1.value != true"
action: 'hide'
oversubscription_ratio_3:
value: false
label: 'Oversubscription Ratio'
description: 'Enable Oversubscription Ratio for backend'
weight: 343
description: 'Enable Oversubscription Ratio for backend If "auto_calc_max_oversubscription_ratio = True", kaminario driver will calculate dynamically "max_over_subscription_ratio"'
weight: 344
type: 'checkbox'
restrictions:
- condition: "settings:cinder_kaminario.add_backend_3.value != true or settings:cinder_kaminario.add_backend_2.value != true or settings:cinder_kaminario.add_backend_1.value != true"
@ -881,7 +936,7 @@ attributes:
type: "radio"
weight: 350
value: "FC"
label: "Kaminario Storage Protocol"
label: "Storage protocol to be used on the data path with storage system"
values:
- data: "ISCSI"
label: "ISCSI"
@ -914,7 +969,7 @@ attributes:
storage_ip_4:
value: ""
label: 'Kaminario Storage Hostname/IP'
description: 'IP address of Kaminario Storage Array'
description: 'Provide management IP of kaminario K2 All-Flash array'
weight: 365
type: "text"
restrictions:
@ -927,7 +982,7 @@ attributes:
storage_user_4:
value: ""
label: 'Username'
description: 'user name of Kaminario Storage Array'
description: 'Provide username of kaminario K2 All-Flash array'
weight: 370
type: "text"
regex:
@ -940,7 +995,7 @@ attributes:
storage_password_4:
value: ""
label: 'Password'
description: 'password of Kaminario Storage Array'
description: 'Provide password of kaminario K2 All-Flash array'
weight: 375
type: "password"
restrictions:
@ -959,7 +1014,7 @@ attributes:
replication_ip_4:
value: ""
label: 'Ipaddress'
label: 'IPaddress'
description: 'Ipaddress of Kaminario replication array'
weight: 385
type: "text"
@ -991,9 +1046,9 @@ attributes:
action: 'hide'
replication_rpo_4:
value: ""
value: "60"
label: 'RPO'
description: 'Value (in seconds) should be either 1 minute or multiple of 5 minutes'
description: 'Value (in seconds) should be either 60 sec or multiple of 300 sec'
weight: 400
type: "text"
restrictions:
@ -1033,9 +1088,9 @@ attributes:
- data: "enable_replication_type"
label: "Enable Replication"
- data: "enable_dedup"
label: "Enable Non Deduplication"
label: "Enable nodedup"
- data: "replication_dedup"
label: "Enable both Replication and Non Deduplication"
label: "Enable both Replication and nodedup"
restrictions:
- condition: "settings:cinder_kaminario.create_type_4.value == false"
action: 'hide'
@ -1043,7 +1098,7 @@ attributes:
default_type_4:
value: false
label: 'Default type'
description: 'Make this type as default'
description: 'Make this volume type as default volume type'
weight: 420
type: 'checkbox'
restrictions:
@ -1071,7 +1126,7 @@ attributes:
action: 'hide'
filter_function_4:
value: ""
value: "capabilities.total_volumes < 250"
label: 'Filter Function'
description: 'Filter function for backend'
weight: 427
@ -1080,12 +1135,21 @@ attributes:
- condition: "settings:cinder_kaminario.add_backend_4.value != true or settings:cinder_kaminario.add_backend_3.value != true or settings:cinder_kaminario.add_backend_2.value != true or settings:cinder_kaminario.add_backend_1.value != true"
action: 'hide'
goodness_function_4:
value: ""
label: 'Goodness Function'
description: 'Goodness function for backend'
weight: 428
type: "text"
restrictions:
- condition: "settings:cinder_kaminario.add_backend_4.value != true or settings:cinder_kaminario.add_backend_3.value != true or settings:cinder_kaminario.add_backend_2.value != true or settings:cinder_kaminario.add_backend_1.value != true"
action: 'hide'
oversubscription_ratio_4:
value: false
label: 'Oversubscription Ratio'
description: 'Enable Oversubscription Ratio for backend'
weight: 428
description: 'Enable Oversubscription Ratio for backend If "auto_calc_max_oversubscription_ratio = True", kaminario driver will calculate dynamically "max_over_subscription_ratio"'
weight: 429
type: 'checkbox'
restrictions:
- condition: "settings:cinder_kaminario.add_backend_4.value != true or settings:cinder_kaminario.add_backend_3.value != true or settings:cinder_kaminario.add_backend_2.value != true or settings:cinder_kaminario.add_backend_1.value != true"
@ -1107,7 +1171,7 @@ attributes:
type: "radio"
weight: 435
value: "FC"
label: "Kaminario Storage Protocol"
label: "Storage protocol to be used on the data path with storage system"
values:
- data: "ISCSI"
label: "ISCSI"
@ -1140,7 +1204,7 @@ attributes:
storage_ip_5:
value: ""
label: 'Kaminario Storage Hostname/IP'
description: 'IP address of Kaminario Storage Array'
description: 'Provide management IP of kaminario K2 All-Flash array'
weight: 450
type: "text"
restrictions:
@ -1153,7 +1217,7 @@ attributes:
storage_user_5:
value: ""
label: 'Username'
description: 'user name of Kaminario Storage Array'
description: 'Provide username of kaminario K2 All-Flash array'
weight: 455
type: "text"
regex:
@ -1166,7 +1230,7 @@ attributes:
storage_password_5:
value: ""
label: 'Password'
description: 'password of Kaminario Storage Array'
description: 'Provide password of kaminario K2 All-Flash array'
weight: 460
type: "password"
restrictions:
@ -1185,7 +1249,7 @@ attributes:
replication_ip_5:
value: ""
label: 'Ipaddress'
label: 'IPaddress'
description: 'Ipaddress of Kaminario replication array'
weight: 470
type: "text"
@ -1217,9 +1281,9 @@ attributes:
action: 'hide'
replication_rpo_5:
value: ""
value: "60"
label: 'RPO'
description: 'Value (in seconds) should be either 1 minute or multiple of 5 minutes'
description: 'Value (in seconds) should be either 60 sec or multiple of 300 sec'
weight: 485
type: "text"
restrictions:
@ -1259,9 +1323,9 @@ attributes:
- data: "enable_replication_type"
label: "Enable Replication"
- data: "enable_dedup"
label: "Enable Non Deduplication"
label: "Enable nodedup"
- data: "replication_dedup"
label: "Enable both Replication and Non Deduplication"
label: "Enable both Replication and nodedup"
restrictions:
- condition: "settings:cinder_kaminario.create_type_5.value == false"
action: 'hide'
@ -1269,7 +1333,7 @@ attributes:
default_type_5:
value: false
label: 'Default Type'
description: 'Make this type as default'
description: 'Make this volume type as default volume type'
weight: 505
type: 'checkbox'
restrictions:
@ -1297,7 +1361,7 @@ attributes:
action: 'hide'
filter_function_5:
value: ""
value: "capabilities.total_volumes < 250"
label: 'Filter Function'
description: 'Filter function for backend'
weight: 512
@ -1306,11 +1370,22 @@ attributes:
- condition: "settings:cinder_kaminario.add_backend_5.value != true or settings:cinder_kaminario.add_backend_4.value != true or settings:cinder_kaminario.add_backend_3.value != true or settings:cinder_kaminario.add_backend_2.value != true or settings:cinder_kaminario.add_backend_1.value != true"
action: 'hide'
goodness_function_5:
value: ""
label: 'Goodness Function'
description: 'Goodness function for backend'
weight: 513
type: "text"
restrictions:
- condition: "settings:cinder_kaminario.add_backend_5.value != true or settings:cinder_kaminario.add_backend_4.value != true or settings:cinder_kaminario.add_backend_3.value != true or settings:cinder_kaminario.add_backend_2.value != true or settings:cinder_kaminario.add_backend_1.value != true"
action: 'hide'
oversubscription_ratio_5:
value: false
label: 'Oversubscription Ratio'
description: 'Enable Oversubscription Ratio for backend'
weight: 513
description: 'Enable Oversubscription Ratio for backend If "auto_calc_max_oversubscription_ratio = True", kaminario driver will calculate dynamically "max_over_subscription_ratio"'
weight: 514
type: 'checkbox'
restrictions:
- condition: "settings:cinder_kaminario.add_backend_5.value != true or settings:cinder_kaminario.add_backend_4.value != true or settings:cinder_kaminario.add_backend_3.value != true or settings:cinder_kaminario.add_backend_2.value != true or settings:cinder_kaminario.add_backend_1.value != true"