From 9550a4d0e62a676740b7ddede3df90aa7f0e5048 Mon Sep 17 00:00:00 2001 From: Pradip Rawat Date: Sun, 16 Oct 2016 11:36:29 +0530 Subject: [PATCH] Update fuel plugin repo for Kamianrio with latest Kaminario driver files from github Change-Id: Ib16114525eed6066c19dfc4b2b28f5e2128eb56f Co-Authored-By: Chaithanya Kopparthi --- .gitreview | 4 + .../puppet/manifests/cinder_kaminario.pp | 2 +- .../modules/kaminario/files/__init__.py | 0 .../modules/kaminario/files/exception.py | 1128 ---------------- .../modules/kaminario/files/exception.sh | 2 + .../kaminario/files/kaminario_common.py | 1155 ----------------- .../modules/kaminario/files/kaminario_fc.py | 196 --- .../kaminario/files/kaminario_iscsi.py | 137 -- .../kaminario/manifests/controller_config.pp | 8 +- .../modules/kaminario/manifests/driver.pp | 62 +- .../modules/kaminario/manifests/init.pp | 46 +- .../modules/kaminario/manifests/krest.pp | 2 + .../modules/kaminario/manifests/type.pp | 18 +- .../modules/multipath/manifests/init.pp | 21 +- deployment_tasks.yaml | 2 +- 15 files changed, 90 insertions(+), 2693 deletions(-) create mode 100644 .gitreview delete mode 100644 deployment_scripts/puppet/modules/kaminario/files/__init__.py delete mode 100644 deployment_scripts/puppet/modules/kaminario/files/exception.py create mode 100644 deployment_scripts/puppet/modules/kaminario/files/exception.sh delete mode 100644 deployment_scripts/puppet/modules/kaminario/files/kaminario_common.py delete mode 100644 deployment_scripts/puppet/modules/kaminario/files/kaminario_fc.py delete mode 100644 deployment_scripts/puppet/modules/kaminario/files/kaminario_iscsi.py diff --git a/.gitreview b/.gitreview new file mode 100644 index 0000000..d0d3710 --- /dev/null +++ b/.gitreview @@ -0,0 +1,4 @@ +[gerrit] +host=review.openstack.org +port=29418 +project=openstack/fuel-plugin-cinder-kaminario.git diff --git a/deployment_scripts/puppet/manifests/cinder_kaminario.pp b/deployment_scripts/puppet/manifests/cinder_kaminario.pp index 78727fa..481f928 100644 --- a/deployment_scripts/puppet/manifests/cinder_kaminario.pp +++ b/deployment_scripts/puppet/manifests/cinder_kaminario.pp @@ -1,8 +1,8 @@ notice('MODULAR: cinder_kaminario') -class { 'kaminario::driver': }-> class { 'kaminario::krest': }-> +class { 'kaminario::driver': }-> class { 'kaminario::config': }~> Exec[cinder_volume] exec {'cinder_volume': diff --git a/deployment_scripts/puppet/modules/kaminario/files/__init__.py b/deployment_scripts/puppet/modules/kaminario/files/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/deployment_scripts/puppet/modules/kaminario/files/exception.py b/deployment_scripts/puppet/modules/kaminario/files/exception.py deleted file mode 100644 index 1927e8d..0000000 --- a/deployment_scripts/puppet/modules/kaminario/files/exception.py +++ /dev/null @@ -1,1128 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Cinder base exception handling. - -Includes decorator for re-raising Cinder-type exceptions. - -SHOULD include dedicated exception logging. - -""" - -import sys - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_versionedobjects import exception as obj_exc -import six -import webob.exc -from webob.util import status_generic_reasons -from webob.util import status_reasons - -from cinder.i18n import _, _LE - - -LOG = logging.getLogger(__name__) - -exc_log_opts = [ - cfg.BoolOpt('fatal_exception_format_errors', - default=False, - help='Make exception message format errors fatal.'), -] - -CONF = cfg.CONF -CONF.register_opts(exc_log_opts) - - -class ConvertedException(webob.exc.WSGIHTTPException): - def __init__(self, code=500, title="", explanation=""): - self.code = code - # There is a strict rule about constructing status line for HTTP: - # '...Status-Line, consisting of the protocol version followed by a - # numeric status code and its associated textual phrase, with each - # element separated by SP characters' - # (http://www.faqs.org/rfcs/rfc2616.html) - # 'code' and 'title' can not be empty because they correspond - # to numeric status code and its associated text - if title: - self.title = title - else: - try: - self.title = status_reasons[self.code] - except KeyError: - generic_code = self.code // 100 - self.title = status_generic_reasons[generic_code] - self.explanation = explanation - super(ConvertedException, self).__init__() - - -class Error(Exception): - pass - - -class CinderException(Exception): - """Base Cinder Exception - - To correctly use this class, inherit from it and define - a 'message' property. That message will get printf'd - with the keyword arguments provided to the constructor. - - """ - message = _("An unknown exception occurred.") - code = 500 - headers = {} - safe = False - - def __init__(self, message=None, **kwargs): - self.kwargs = kwargs - self.kwargs['message'] = message - - if 'code' not in self.kwargs: - try: - self.kwargs['code'] = self.code - except AttributeError: - pass - - for k, v in self.kwargs.items(): - if isinstance(v, Exception): - self.kwargs[k] = six.text_type(v) - - if self._should_format(): - try: - message = self.message % kwargs - - except Exception: - exc_info = sys.exc_info() - # kwargs doesn't match a variable in the message - # log the issue and the kwargs - LOG.exception(_LE('Exception in string format operation')) - for name, value in kwargs.items(): - LOG.error(_LE("%(name)s: %(value)s"), - {'name': name, 'value': value}) - if CONF.fatal_exception_format_errors: - six.reraise(*exc_info) - # at least get the core message out if something happened - message = self.message - elif isinstance(message, Exception): - message = six.text_type(message) - - # NOTE(luisg): We put the actual message in 'msg' so that we can access - # it, because if we try to access the message via 'message' it will be - # overshadowed by the class' message attribute - self.msg = message - super(CinderException, self).__init__(message) - - def _should_format(self): - return self.kwargs['message'] is None or '%(message)' in self.message - - def __unicode__(self): - return six.text_type(self.msg) - - -class VolumeBackendAPIException(CinderException): - message = _("Bad or unexpected response from the storage volume " - "backend API: %(data)s") - - -class VolumeDriverException(CinderException): - message = _("Volume driver reported an error: %(message)s") - - -class BackupDriverException(CinderException): - message = _("Backup driver reported an error: %(message)s") - - -class GlanceConnectionFailed(CinderException): - message = _("Connection to glance failed: %(reason)s") - - -class NotAuthorized(CinderException): - message = _("Not authorized.") - code = 403 - - -class AdminRequired(NotAuthorized): - message = _("User does not have admin privileges") - - -class PolicyNotAuthorized(NotAuthorized): - message = _("Policy doesn't allow %(action)s to be performed.") - - -class ImageNotAuthorized(CinderException): - message = _("Not authorized for image %(image_id)s.") - - -class DriverNotInitialized(CinderException): - message = _("Volume driver not ready.") - - -class Invalid(CinderException): - message = _("Unacceptable parameters.") - code = 400 - - -class InvalidSnapshot(Invalid): - message = _("Invalid snapshot: %(reason)s") - - -class InvalidVolumeAttachMode(Invalid): - message = _("Invalid attaching mode '%(mode)s' for " - "volume %(volume_id)s.") - - -class VolumeAttached(Invalid): - message = _("Volume %(volume_id)s is still attached, detach volume first.") - - -class InvalidResults(Invalid): - message = _("The results are invalid.") - - -class InvalidInput(Invalid): - message = _("Invalid input received: %(reason)s") - - -class InvalidVolumeType(Invalid): - message = _("Invalid volume type: %(reason)s") - - -class InvalidVolume(Invalid): - message = _("Invalid volume: %(reason)s") - - -class InvalidContentType(Invalid): - message = _("Invalid content type %(content_type)s.") - - -class InvalidHost(Invalid): - message = _("Invalid host: %(reason)s") - - -# Cannot be templated as the error syntax varies. -# msg needs to be constructed when raised. -class InvalidParameterValue(Invalid): - message = _("%(err)s") - - -class InvalidAuthKey(Invalid): - message = _("Invalid auth key: %(reason)s") - - -class InvalidConfigurationValue(Invalid): - message = _('Value "%(value)s" is not valid for ' - 'configuration option "%(option)s"') - - -class ServiceUnavailable(Invalid): - message = _("Service is unavailable at this time.") - - -class ImageUnacceptable(Invalid): - message = _("Image %(image_id)s is unacceptable: %(reason)s") - - -class DeviceUnavailable(Invalid): - message = _("The device in the path %(path)s is unavailable: %(reason)s") - - -class InvalidUUID(Invalid): - message = _("Expected a uuid but received %(uuid)s.") - - -class InvalidAPIVersionString(Invalid): - message = _("API Version String %(version)s is of invalid format. Must " - "be of format MajorNum.MinorNum.") - - -class VersionNotFoundForAPIMethod(Invalid): - message = _("API version %(version)s is not supported on this method.") - - -class InvalidGlobalAPIVersion(Invalid): - message = _("Version %(req_ver)s is not supported by the API. Minimum " - "is %(min_ver)s and maximum is %(max_ver)s.") - - -class APIException(CinderException): - message = _("Error while requesting %(service)s API.") - - def __init__(self, message=None, **kwargs): - if 'service' not in kwargs: - kwargs['service'] = 'unknown' - super(APIException, self).__init__(message, **kwargs) - - -class APITimeout(APIException): - message = _("Timeout while requesting %(service)s API.") - - -class RPCTimeout(CinderException): - message = _("Timeout while requesting capabilities from backend " - "%(service)s.") - code = 502 - - -class NotFound(CinderException): - message = _("Resource could not be found.") - code = 404 - safe = True - - -class VolumeNotFound(NotFound): - message = _("Volume %(volume_id)s could not be found.") - - -class VolumeAttachmentNotFound(NotFound): - message = _("Volume attachment could not be found with " - "filter: %(filter)s .") - - -class VolumeMetadataNotFound(NotFound): - message = _("Volume %(volume_id)s has no metadata with " - "key %(metadata_key)s.") - - -class VolumeAdminMetadataNotFound(NotFound): - message = _("Volume %(volume_id)s has no administration metadata with " - "key %(metadata_key)s.") - - -class InvalidVolumeMetadata(Invalid): - message = _("Invalid metadata: %(reason)s") - - -class InvalidVolumeMetadataSize(Invalid): - message = _("Invalid metadata size: %(reason)s") - - -class SnapshotMetadataNotFound(NotFound): - message = _("Snapshot %(snapshot_id)s has no metadata with " - "key %(metadata_key)s.") - - -class VolumeTypeNotFound(NotFound): - message = _("Volume type %(volume_type_id)s could not be found.") - - -class VolumeTypeNotFoundByName(VolumeTypeNotFound): - message = _("Volume type with name %(volume_type_name)s " - "could not be found.") - - -class VolumeTypeAccessNotFound(NotFound): - message = _("Volume type access not found for %(volume_type_id)s / " - "%(project_id)s combination.") - - -class VolumeTypeExtraSpecsNotFound(NotFound): - message = _("Volume Type %(volume_type_id)s has no extra specs with " - "key %(extra_specs_key)s.") - - -class VolumeTypeInUse(CinderException): - message = _("Volume Type %(volume_type_id)s deletion is not allowed with " - "volumes present with the type.") - - -class SnapshotNotFound(NotFound): - message = _("Snapshot %(snapshot_id)s could not be found.") - - -class ServerNotFound(NotFound): - message = _("Instance %(uuid)s could not be found.") - - -class VolumeIsBusy(CinderException): - message = _("deleting volume %(volume_name)s that has snapshot") - - -class SnapshotIsBusy(CinderException): - message = _("deleting snapshot %(snapshot_name)s that has " - "dependent volumes") - - -class ISCSITargetNotFoundForVolume(NotFound): - message = _("No target id found for volume %(volume_id)s.") - - -class InvalidImageRef(Invalid): - message = _("Invalid image href %(image_href)s.") - - -class ImageNotFound(NotFound): - message = _("Image %(image_id)s could not be found.") - - -class ServiceNotFound(NotFound): - - def __init__(self, message=None, **kwargs): - if kwargs.get('host', None): - self.message = _("Service %(service_id)s could not be " - "found on host %(host)s.") - else: - self.message = _("Service %(service_id)s could not be found.") - super(ServiceNotFound, self).__init__(None, **kwargs) - - -class ServiceTooOld(Invalid): - message = _("Service is too old to fulfil this request.") - - -class HostNotFound(NotFound): - message = _("Host %(host)s could not be found.") - - -class SchedulerHostFilterNotFound(NotFound): - message = _("Scheduler Host Filter %(filter_name)s could not be found.") - - -class SchedulerHostWeigherNotFound(NotFound): - message = _("Scheduler Host Weigher %(weigher_name)s could not be found.") - - -class InvalidReservationExpiration(Invalid): - message = _("Invalid reservation expiration %(expire)s.") - - -class InvalidQuotaValue(Invalid): - message = _("Change would make usage less than 0 for the following " - "resources: %(unders)s") - - -class InvalidNestedQuotaSetup(CinderException): - message = _("Project quotas are not properly setup for nested quotas: " - "%(reason)s.") - - -class QuotaNotFound(NotFound): - message = _("Quota could not be found") - - -class QuotaResourceUnknown(QuotaNotFound): - message = _("Unknown quota resources %(unknown)s.") - - -class ProjectQuotaNotFound(QuotaNotFound): - message = _("Quota for project %(project_id)s could not be found.") - - -class QuotaClassNotFound(QuotaNotFound): - message = _("Quota class %(class_name)s could not be found.") - - -class QuotaUsageNotFound(QuotaNotFound): - message = _("Quota usage for project %(project_id)s could not be found.") - - -class ReservationNotFound(QuotaNotFound): - message = _("Quota reservation %(uuid)s could not be found.") - - -class OverQuota(CinderException): - message = _("Quota exceeded for resources: %(overs)s") - - -class FileNotFound(NotFound): - message = _("File %(file_path)s could not be found.") - - -class Duplicate(CinderException): - pass - - -class VolumeTypeExists(Duplicate): - message = _("Volume Type %(id)s already exists.") - - -class VolumeTypeAccessExists(Duplicate): - message = _("Volume type access for %(volume_type_id)s / " - "%(project_id)s combination already exists.") - - -class VolumeTypeEncryptionExists(Invalid): - message = _("Volume type encryption for type %(type_id)s already exists.") - - -class VolumeTypeEncryptionNotFound(NotFound): - message = _("Volume type encryption for type %(type_id)s does not exist.") - - -class MalformedRequestBody(CinderException): - message = _("Malformed message body: %(reason)s") - - -class ConfigNotFound(NotFound): - message = _("Could not find config at %(path)s") - - -class ParameterNotFound(NotFound): - message = _("Could not find parameter %(param)s") - - -class PasteAppNotFound(NotFound): - message = _("Could not load paste app '%(name)s' from %(path)s") - - -class NoValidHost(CinderException): - message = _("No valid host was found. %(reason)s") - - -class NoMoreTargets(CinderException): - """No more available targets.""" - pass - - -class QuotaError(CinderException): - message = _("Quota exceeded: code=%(code)s") - code = 413 - headers = {'Retry-After': '0'} - safe = True - - -class VolumeSizeExceedsAvailableQuota(QuotaError): - message = _("Requested volume or snapshot exceeds allowed %(name)s " - "quota. Requested %(requested)sG, quota is %(quota)sG and " - "%(consumed)sG has been consumed.") - - def __init__(self, message=None, **kwargs): - kwargs.setdefault('name', 'gigabytes') - super(VolumeSizeExceedsAvailableQuota, self).__init__( - message, **kwargs) - - -class VolumeSizeExceedsLimit(QuotaError): - message = _("Requested volume size %(size)d is larger than " - "maximum allowed limit %(limit)d.") - - -class VolumeBackupSizeExceedsAvailableQuota(QuotaError): - message = _("Requested backup exceeds allowed Backup gigabytes " - "quota. Requested %(requested)sG, quota is %(quota)sG and " - "%(consumed)sG has been consumed.") - - -class VolumeLimitExceeded(QuotaError): - message = _("Maximum number of volumes allowed (%(allowed)d) exceeded for " - "quota '%(name)s'.") - - def __init__(self, message=None, **kwargs): - kwargs.setdefault('name', 'volumes') - super(VolumeLimitExceeded, self).__init__(message, **kwargs) - - -class SnapshotLimitExceeded(QuotaError): - message = _("Maximum number of snapshots allowed (%(allowed)d) exceeded") - - -class BackupLimitExceeded(QuotaError): - message = _("Maximum number of backups allowed (%(allowed)d) exceeded") - - -class DuplicateSfVolumeNames(Duplicate): - message = _("Detected more than one volume with name %(vol_name)s") - - -class VolumeTypeCreateFailed(CinderException): - message = _("Cannot create volume_type with " - "name %(name)s and specs %(extra_specs)s") - - -class VolumeTypeUpdateFailed(CinderException): - message = _("Cannot update volume_type %(id)s") - - -class UnknownCmd(VolumeDriverException): - message = _("Unknown or unsupported command %(cmd)s") - - -class MalformedResponse(VolumeDriverException): - message = _("Malformed response to command %(cmd)s: %(reason)s") - - -class FailedCmdWithDump(VolumeDriverException): - message = _("Operation failed with status=%(status)s. Full dump: %(data)s") - - -class InvalidConnectorException(VolumeDriverException): - message = _("Connector doesn't have required information: %(missing)s") - - -class GlanceMetadataExists(Invalid): - message = _("Glance metadata cannot be updated, key %(key)s" - " exists for volume id %(volume_id)s") - - -class GlanceMetadataNotFound(NotFound): - message = _("Glance metadata for volume/snapshot %(id)s cannot be found.") - - -class ExportFailure(Invalid): - message = _("Failed to export for volume: %(reason)s") - - -class RemoveExportException(VolumeDriverException): - message = _("Failed to remove export for volume %(volume)s: %(reason)s") - - -class MetadataCreateFailure(Invalid): - message = _("Failed to create metadata for volume: %(reason)s") - - -class MetadataUpdateFailure(Invalid): - message = _("Failed to update metadata for volume: %(reason)s") - - -class MetadataCopyFailure(Invalid): - message = _("Failed to copy metadata to volume: %(reason)s") - - -class InvalidMetadataType(Invalid): - message = _("The type of metadata: %(metadata_type)s for volume/snapshot " - "%(id)s is invalid.") - - -class ImageCopyFailure(Invalid): - message = _("Failed to copy image to volume: %(reason)s") - - -class BackupInvalidCephArgs(BackupDriverException): - message = _("Invalid Ceph args provided for backup rbd operation") - - -class BackupOperationError(Invalid): - message = _("An error has occurred during backup operation") - - -class BackupMetadataUnsupportedVersion(BackupDriverException): - message = _("Unsupported backup metadata version requested") - - -class BackupVerifyUnsupportedDriver(BackupDriverException): - message = _("Unsupported backup verify driver") - - -class VolumeMetadataBackupExists(BackupDriverException): - message = _("Metadata backup already exists for this volume") - - -class BackupRBDOperationFailed(BackupDriverException): - message = _("Backup RBD operation failed") - - -class EncryptedBackupOperationFailed(BackupDriverException): - message = _("Backup operation of an encrypted volume failed.") - - -class BackupNotFound(NotFound): - message = _("Backup %(backup_id)s could not be found.") - - -class BackupFailedToGetVolumeBackend(NotFound): - message = _("Failed to identify volume backend.") - - -class InvalidBackup(Invalid): - message = _("Invalid backup: %(reason)s") - - -class SwiftConnectionFailed(BackupDriverException): - message = _("Connection to swift failed: %(reason)s") - - -class TransferNotFound(NotFound): - message = _("Transfer %(transfer_id)s could not be found.") - - -class VolumeMigrationFailed(CinderException): - message = _("Volume migration failed: %(reason)s") - - -class SSHInjectionThreat(CinderException): - message = _("SSH command injection detected: %(command)s") - - -class QoSSpecsExists(Duplicate): - message = _("QoS Specs %(specs_id)s already exists.") - - -class QoSSpecsCreateFailed(CinderException): - message = _("Failed to create qos_specs: " - "%(name)s with specs %(qos_specs)s.") - - -class QoSSpecsUpdateFailed(CinderException): - message = _("Failed to update qos_specs: " - "%(specs_id)s with specs %(qos_specs)s.") - - -class QoSSpecsNotFound(NotFound): - message = _("No such QoS spec %(specs_id)s.") - - -class QoSSpecsAssociateFailed(CinderException): - message = _("Failed to associate qos_specs: " - "%(specs_id)s with type %(type_id)s.") - - -class QoSSpecsDisassociateFailed(CinderException): - message = _("Failed to disassociate qos_specs: " - "%(specs_id)s with type %(type_id)s.") - - -class QoSSpecsKeyNotFound(NotFound): - message = _("QoS spec %(specs_id)s has no spec with " - "key %(specs_key)s.") - - -class InvalidQoSSpecs(Invalid): - message = _("Invalid qos specs: %(reason)s") - - -class QoSSpecsInUse(CinderException): - message = _("QoS Specs %(specs_id)s is still associated with entities.") - - -class KeyManagerError(CinderException): - message = _("key manager error: %(reason)s") - - -class ManageExistingInvalidReference(CinderException): - message = _("Manage existing volume failed due to invalid backend " - "reference %(existing_ref)s: %(reason)s") - - -class ManageExistingAlreadyManaged(CinderException): - message = _("Unable to manage existing volume. " - "Volume %(volume_ref)s already managed.") - - -class InvalidReplicationTarget(Invalid): - message = _("Invalid Replication Target: %(reason)s") - - -class UnableToFailOver(CinderException): - message = _("Unable to failover to replication target:" - "%(reason)s).") - - -class ReplicationError(CinderException): - message = _("Volume %(volume_id)s replication " - "error: %(reason)s") - - -class ReplicationNotFound(NotFound): - message = _("Volume replication for %(volume_id)s " - "could not be found.") - - -class ManageExistingVolumeTypeMismatch(CinderException): - message = _("Manage existing volume failed due to volume type mismatch: " - "%(reason)s") - - -class ExtendVolumeError(CinderException): - message = _("Error extending volume: %(reason)s") - - -class EvaluatorParseException(Exception): - message = _("Error during evaluator parsing: %(reason)s") - - -class LockCreationFailed(CinderException): - message = _('Unable to create lock. Coordination backend not started.') - - -class LockingFailed(CinderException): - message = _('Lock acquisition failed.') - - -UnsupportedObjectError = obj_exc.UnsupportedObjectError -OrphanedObjectError = obj_exc.OrphanedObjectError -IncompatibleObjectVersion = obj_exc.IncompatibleObjectVersion -ReadOnlyFieldError = obj_exc.ReadOnlyFieldError -ObjectActionError = obj_exc.ObjectActionError -ObjectFieldInvalid = obj_exc.ObjectFieldInvalid - - -class CappedVersionUnknown(CinderException): - message = _('Unrecoverable Error: Versioned Objects in DB are capped to ' - 'unknown version %(version)s.') - - -class VolumeGroupNotFound(CinderException): - message = _('Unable to find Volume Group: %(vg_name)s') - - -class VolumeGroupCreationFailed(CinderException): - message = _('Failed to create Volume Group: %(vg_name)s') - - -class VolumeDeviceNotFound(CinderException): - message = _('Volume device not found at %(device)s.') - - -# Driver specific exceptions -# Pure Storage -class PureDriverException(VolumeDriverException): - message = _("Pure Storage Cinder driver failure: %(reason)s") - - -# SolidFire -class SolidFireAPIException(VolumeBackendAPIException): - message = _("Bad response from SolidFire API") - - -class SolidFireDriverException(VolumeDriverException): - message = _("SolidFire Cinder Driver exception") - - -class SolidFireAPIDataException(SolidFireAPIException): - message = _("Error in SolidFire API response: data=%(data)s") - - -class SolidFireAccountNotFound(SolidFireDriverException): - message = _("Unable to locate account %(account_name)s on " - "Solidfire device") - - -class SolidFireRetryableException(VolumeBackendAPIException): - message = _("Retryable SolidFire Exception encountered") - - -# HP 3Par -class Invalid3PARDomain(VolumeDriverException): - message = _("Invalid 3PAR Domain: %(err)s") - - -# RemoteFS drivers -class RemoteFSException(VolumeDriverException): - message = _("Unknown RemoteFS exception") - - -class RemoteFSConcurrentRequest(RemoteFSException): - message = _("A concurrent, possibly contradictory, request " - "has been made.") - - -class RemoteFSNoSharesMounted(RemoteFSException): - message = _("No mounted shares found") - - -class RemoteFSNoSuitableShareFound(RemoteFSException): - message = _("There is no share which can host %(volume_size)sG") - - -# NFS driver -class NfsException(RemoteFSException): - message = _("Unknown NFS exception") - - -class NfsNoSharesMounted(RemoteFSNoSharesMounted): - message = _("No mounted NFS shares found") - - -class NfsNoSuitableShareFound(RemoteFSNoSuitableShareFound): - message = _("There is no share which can host %(volume_size)sG") - - -# Smbfs driver -class SmbfsException(RemoteFSException): - message = _("Unknown SMBFS exception.") - - -class SmbfsNoSharesMounted(RemoteFSNoSharesMounted): - message = _("No mounted SMBFS shares found.") - - -class SmbfsNoSuitableShareFound(RemoteFSNoSuitableShareFound): - message = _("There is no share which can host %(volume_size)sG.") - - -# Gluster driver -class GlusterfsException(RemoteFSException): - message = _("Unknown Gluster exception") - - -class GlusterfsNoSharesMounted(RemoteFSNoSharesMounted): - message = _("No mounted Gluster shares found") - - -class GlusterfsNoSuitableShareFound(RemoteFSNoSuitableShareFound): - message = _("There is no share which can host %(volume_size)sG") - - -# Virtuozzo Storage Driver - -class VzStorageException(RemoteFSException): - message = _("Unknown Virtuozzo Storage exception") - - -class VzStorageNoSharesMounted(RemoteFSNoSharesMounted): - message = _("No mounted Virtuozzo Storage shares found") - - -class VzStorageNoSuitableShareFound(RemoteFSNoSuitableShareFound): - message = _("There is no share which can host %(volume_size)sG") - - -# Fibre Channel Zone Manager -class ZoneManagerException(CinderException): - message = _("Fibre Channel connection control failure: %(reason)s") - - -class FCZoneDriverException(CinderException): - message = _("Fibre Channel Zone operation failed: %(reason)s") - - -class FCSanLookupServiceException(CinderException): - message = _("Fibre Channel SAN Lookup failure: %(reason)s") - - -class BrocadeZoningCliException(CinderException): - message = _("Brocade Fibre Channel Zoning CLI error: %(reason)s") - - -class BrocadeZoningHttpException(CinderException): - message = _("Brocade Fibre Channel Zoning HTTP error: %(reason)s") - - -class CiscoZoningCliException(CinderException): - message = _("Cisco Fibre Channel Zoning CLI error: %(reason)s") - - -class NetAppDriverException(VolumeDriverException): - message = _("NetApp Cinder Driver exception.") - - -class EMCVnxCLICmdError(VolumeBackendAPIException): - message = _("EMC VNX Cinder Driver CLI exception: %(cmd)s " - "(Return Code: %(rc)s) (Output: %(out)s).") - - -class EMCSPUnavailableException(EMCVnxCLICmdError): - message = _("EMC VNX Cinder Driver SPUnavailableException: %(cmd)s " - "(Return Code: %(rc)s) (Output: %(out)s).") - - -# ConsistencyGroup -class ConsistencyGroupNotFound(NotFound): - message = _("ConsistencyGroup %(consistencygroup_id)s could not be found.") - - -class InvalidConsistencyGroup(Invalid): - message = _("Invalid ConsistencyGroup: %(reason)s") - - -# CgSnapshot -class CgSnapshotNotFound(NotFound): - message = _("CgSnapshot %(cgsnapshot_id)s could not be found.") - - -class InvalidCgSnapshot(Invalid): - message = _("Invalid CgSnapshot: %(reason)s") - - -# Hitachi Block Storage Driver -class HBSDError(CinderException): - message = _("HBSD error occurs.") - - -class HBSDCmdError(HBSDError): - - def __init__(self, message=None, ret=None, err=None): - self.ret = ret - self.stderr = err - - super(HBSDCmdError, self).__init__(message=message) - - -class HBSDBusy(HBSDError): - message = "Device or resource is busy." - - -class HBSDNotFound(NotFound): - message = _("Storage resource could not be found.") - - -class HBSDVolumeIsBusy(VolumeIsBusy): - message = _("Volume %(volume_name)s is busy.") - - -# Datera driver -class DateraAPIException(VolumeBackendAPIException): - message = _("Bad response from Datera API") - - -# Target drivers -class ISCSITargetCreateFailed(CinderException): - message = _("Failed to create iscsi target for volume %(volume_id)s.") - - -class ISCSITargetRemoveFailed(CinderException): - message = _("Failed to remove iscsi target for volume %(volume_id)s.") - - -class ISCSITargetAttachFailed(CinderException): - message = _("Failed to attach iSCSI target for volume %(volume_id)s.") - - -class ISCSITargetDetachFailed(CinderException): - message = _("Failed to detach iSCSI target for volume %(volume_id)s.") - - -class ISCSITargetHelperCommandFailed(CinderException): - message = _("%(error_message)s") - - -# X-IO driver exception. -class XIODriverException(VolumeDriverException): - message = _("X-IO Volume Driver exception!") - - -# Violin Memory drivers -class ViolinInvalidBackendConfig(CinderException): - message = _("Volume backend config is invalid: %(reason)s") - - -class ViolinRequestRetryTimeout(CinderException): - message = _("Backend service retry timeout hit: %(timeout)s sec") - - -class ViolinBackendErr(CinderException): - message = _("Backend reports: %(message)s") - - -class ViolinBackendErrExists(CinderException): - message = _("Backend reports: item already exists") - - -class ViolinBackendErrNotFound(CinderException): - message = _("Backend reports: item not found") - - -# ZFSSA NFS driver exception. -class WebDAVClientError(CinderException): - message = _("The WebDAV request failed. Reason: %(msg)s, " - "Return code/reason: %(code)s, Source Volume: %(src)s, " - "Destination Volume: %(dst)s, Method: %(method)s.") - - -# XtremIO Drivers -class XtremIOAlreadyMappedError(CinderException): - message = _("Volume to Initiator Group mapping already exists") - - -class XtremIOArrayBusy(CinderException): - message = _("System is busy, retry operation.") - - -class XtremIOSnapshotsLimitExceeded(CinderException): - message = _("Exceeded the limit of snapshots per volume") - - -# Infortrend EonStor DS Driver -class InfortrendCliException(CinderException): - message = _("Infortrend CLI exception: %(err)s Param: %(param)s " - "(Return Code: %(rc)s) (Output: %(out)s)") - - -# DOTHILL drivers -class DotHillInvalidBackend(CinderException): - message = _("Backend doesn't exist (%(backend)s)") - - -class DotHillConnectionError(CinderException): - message = _("%(message)s") - - -class DotHillAuthenticationError(CinderException): - message = _("%(message)s") - - -class DotHillNotEnoughSpace(CinderException): - message = _("Not enough space on backend (%(backend)s)") - - -class DotHillRequestError(CinderException): - message = _("%(message)s") - - -class DotHillNotTargetPortal(CinderException): - message = _("No active iSCSI portals with supplied iSCSI IPs") - - -# Sheepdog -class SheepdogError(VolumeBackendAPIException): - message = _("An error has occured in SheepdogDriver. (Reason: %(reason)s)") - - -class SheepdogCmdError(SheepdogError): - message = _("(Command: %(cmd)s) " - "(Return Code: %(exit_code)s) " - "(Stdout: %(stdout)s) " - "(Stderr: %(stderr)s)") - - -class MetadataAbsent(CinderException): - message = _("There is no metadata in DB object.") - - -class NotSupportedOperation(Invalid): - message = _("Operation not supported: %(operation)s.") - code = 405 - - -# Hitachi HNAS drivers -class HNASConnError(CinderException): - message = _("%(message)s") - - -# Coho drivers -class CohoException(VolumeDriverException): - message = _("Coho Data Cinder driver failure: %(message)s") - - -# Tegile Storage drivers -class TegileAPIException(VolumeBackendAPIException): - message = _("Unexpected response from Tegile IntelliFlash API") - - -# NexentaStor driver exception -class NexentaException(VolumeDriverException): - message = _("%(message)s") - - -# Google Cloud Storage(GCS) backup driver -class GCSConnectionFailure(BackupDriverException): - message = _("Google Cloud Storage connection failure: %(reason)s") - - -class GCSApiFailure(BackupDriverException): - message = _("Google Cloud Storage api failure: %(reason)s") - - -class GCSOAuth2Failure(BackupDriverException): - message = _("Google Cloud Storage oauth2 failure: %(reason)s") - - -# Kaminario K2 -class KaminarioCinderDriverException(VolumeDriverException): - message = _("KaminarioCinderDriver failure: %(reason)s") - - -class KaminarioRetryableException(VolumeDriverException): - message = _("Kaminario retryable exception: %(reason)s") diff --git a/deployment_scripts/puppet/modules/kaminario/files/exception.sh b/deployment_scripts/puppet/modules/kaminario/files/exception.sh new file mode 100644 index 0000000..a5993e6 --- /dev/null +++ b/deployment_scripts/puppet/modules/kaminario/files/exception.sh @@ -0,0 +1,2 @@ +grep -q -F 'Kaminario' /usr/lib/python2.7/dist-packages/cinder/exception.py || sudo sed -i '$a \ \ \n\nclass KaminarioCinderDriverException(VolumeDriverException):\n\ \message = _("KaminarioCinderDriver failure: %(reason)s")\n\n\nclass KaminarioRetryableException(VolumeDriverException):\n\ \message = _("Kaminario retryable exception: %(reason)s")' /usr/lib/python2.7/dist-packages/cinder/exception.py + diff --git a/deployment_scripts/puppet/modules/kaminario/files/kaminario_common.py b/deployment_scripts/puppet/modules/kaminario/files/kaminario_common.py deleted file mode 100644 index 9e79b80..0000000 --- a/deployment_scripts/puppet/modules/kaminario/files/kaminario_common.py +++ /dev/null @@ -1,1155 +0,0 @@ -# Copyright (c) 2016 by Kaminario Technologies, Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Volume driver for Kaminario K2 all-flash arrays.""" - -import math -import re -import threading -import time - -import eventlet -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import importutils -from oslo_utils import units -from oslo_utils import versionutils -import requests -import six - -import cinder -from cinder import exception -from cinder.i18n import _, _LE, _LW, _LI -from cinder import objects -from cinder.objects import fields -from cinder import utils -from cinder.volume.drivers.san import san -from cinder.volume import utils as vol_utils - -krest = importutils.try_import("krest") - -K2_MIN_VERSION = '2.2.0' -K2_LOCK_NAME = 'Kaminario' -MAX_K2_RETRY = 5 -K2_REP_FAILED_OVER = fields.ReplicationStatus.FAILED_OVER -LOG = logging.getLogger(__name__) - -kaminario1_opts = [ - cfg.StrOpt('kaminario_nodedup_substring', - default='K2-nodedup', - help="If volume-type name contains this substring " - "nodedup volume will be created, otherwise " - "dedup volume wil be created.", - deprecated_for_removal=True, - deprecated_reason="This option is deprecated in favour of " - "'kaminario:thin_prov_type' in extra-specs " - "and will be removed in the next release.")] -kaminario2_opts = [ - cfg.BoolOpt('auto_calc_max_oversubscription_ratio', - default=False, - help="K2 driver will calculate max_oversubscription_ratio " - "on setting this option as True.")] - -CONF = cfg.CONF -CONF.register_opts(kaminario1_opts) - -K2HTTPError = requests.exceptions.HTTPError -K2_RETRY_ERRORS = ("MC_ERR_BUSY", "MC_ERR_BUSY_SPECIFIC", - "MC_ERR_INPROGRESS", "MC_ERR_START_TIMEOUT") - -if krest: - class KrestWrap(krest.EndPoint): - def __init__(self, *args, **kwargs): - self.krestlock = threading.Lock() - super(KrestWrap, self).__init__(*args, **kwargs) - - def _should_retry(self, err_code, err_msg): - if err_code == 400: - for er in K2_RETRY_ERRORS: - if er in err_msg: - LOG.debug("Retry ERROR: %d with status %s", - err_code, err_msg) - return True - return False - - @utils.retry(exception.KaminarioRetryableException, - retries=MAX_K2_RETRY) - def _request(self, method, *args, **kwargs): - try: - LOG.debug("running through the _request wrapper...") - self.krestlock.acquire() - return super(KrestWrap, self)._request(method, - *args, **kwargs) - except K2HTTPError as err: - err_code = err.response.status_code - err_msg = err.response.text - if self._should_retry(err_code, err_msg): - raise exception.KaminarioRetryableException( - reason=six.text_type(err_msg)) - raise - finally: - self.krestlock.release() - - -def kaminario_logger(func): - """Return a function wrapper. - - The wrapper adds log for entry and exit to the function. - """ - def func_wrapper(*args, **kwargs): - LOG.debug('Entering %(function)s of %(class)s with arguments: ' - ' %(args)s, %(kwargs)s', - {'class': args[0].__class__.__name__, - 'function': func.__name__, - 'args': args[1:], - 'kwargs': kwargs}) - ret = func(*args, **kwargs) - LOG.debug('Exiting %(function)s of %(class)s ' - 'having return value: %(ret)s', - {'class': args[0].__class__.__name__, - 'function': func.__name__, - 'ret': ret}) - return ret - return func_wrapper - - -class Replication(object): - def __init__(self, config, *args, **kwargs): - self.backend_id = config.get('backend_id') - self.login = config.get('login') - self.password = config.get('password') - self.rpo = config.get('rpo') - - -class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): - VENDOR = "Kaminario" - stats = {} - - def __init__(self, *args, **kwargs): - super(KaminarioCinderDriver, self).__init__(*args, **kwargs) - self.configuration.append_config_values(san.san_opts) - self.configuration.append_config_values(kaminario2_opts) - self.replica = None - self._protocol = None - - def check_for_setup_error(self): - if krest is None: - msg = _("Unable to import 'krest' python module.") - LOG.error(msg) - raise exception.KaminarioCinderDriverException(reason=msg) - else: - conf = self.configuration - self.client = KrestWrap(conf.san_ip, - conf.san_login, - conf.san_password, - ssl_validate=False) - if self.replica: - self.target = KrestWrap(self.replica.backend_id, - self.replica.login, - self.replica.password, - ssl_validate=False) - v_rs = self.client.search("system/state") - if hasattr(v_rs, 'hits') and v_rs.total != 0: - ver = v_rs.hits[0].rest_api_version - ver_exist = versionutils.convert_version_to_int(ver) - ver_min = versionutils.convert_version_to_int(K2_MIN_VERSION) - if ver_exist < ver_min: - msg = _("K2 rest api version should be " - ">= %s.") % K2_MIN_VERSION - LOG.error(msg) - raise exception.KaminarioCinderDriverException(reason=msg) - - else: - msg = _("K2 rest api version search failed.") - LOG.error(msg) - raise exception.KaminarioCinderDriverException(reason=msg) - - @kaminario_logger - def _check_ops(self): - """Ensure that the options we care about are set.""" - required_ops = ['san_ip', 'san_login', 'san_password'] - for attr in required_ops: - if not getattr(self.configuration, attr, None): - raise exception.InvalidInput(reason=_('%s is not set.') % attr) - - replica = self.configuration.safe_get('replication_device') - if replica and isinstance(replica, list): - replica_ops = ['backend_id', 'login', 'password', 'rpo'] - for attr in replica_ops: - if attr not in replica[0]: - msg = _('replication_device %s is not set.') % attr - raise exception.InvalidInput(reason=msg) - self.replica = Replication(replica[0]) - - @kaminario_logger - def do_setup(self, context): - super(KaminarioCinderDriver, self).do_setup(context) - self._check_ops() - - @kaminario_logger - def create_volume(self, volume): - """Volume creation in K2 needs a volume group. - - - create a volume group - - create a volume in the volume group - """ - vg_name = self.get_volume_group_name(volume.id) - vol_name = self.get_volume_name(volume.id) - prov_type = self._get_is_dedup(volume.get('volume_type')) - try: - LOG.debug("Creating volume group with name: %(name)s, " - "quota: unlimited and dedup_support: %(dedup)s", - {'name': vg_name, 'dedup': prov_type}) - - vg = self.client.new("volume_groups", name=vg_name, quota=0, - is_dedup=prov_type).save() - LOG.debug("Creating volume with name: %(name)s, size: %(size)s " - "GB, volume_group: %(vg)s", - {'name': vol_name, 'size': volume.size, 'vg': vg_name}) - vol = self.client.new("volumes", name=vol_name, - size=volume.size * units.Mi, - volume_group=vg).save() - except Exception as ex: - vg_rs = self.client.search("volume_groups", name=vg_name) - if vg_rs.total != 0: - LOG.debug("Deleting vg: %s for failed volume in K2.", vg_name) - vg_rs.hits[0].delete() - LOG.exception(_LE("Creation of volume %s failed."), vol_name) - raise exception.KaminarioCinderDriverException( - reason=six.text_type(ex.message)) - - if self._get_is_replica(volume.volume_type) and self.replica: - self._create_volume_replica(volume, vg, vol, self.replica.rpo) - - @kaminario_logger - def _create_volume_replica(self, volume, vg, vol, rpo): - """Volume replica creation in K2 needs session and remote volume. - - - create a session - - create a volume in the volume group - - """ - session_name = self.get_session_name(volume.id) - rsession_name = self.get_rep_name(session_name) - - rvg_name = self.get_rep_name(vg.name) - rvol_name = self.get_rep_name(vol.name) - - k2peer_rs = self.client.search("replication/peer_k2arrays", - mgmt_host=self.replica.backend_id) - if hasattr(k2peer_rs, 'hits') and k2peer_rs.total != 0: - k2peer = k2peer_rs.hits[0] - else: - msg = _("Unable to find K2peer in source K2:") - LOG.error(msg) - raise exception.KaminarioCinderDriverException(reason=msg) - try: - LOG.debug("Creating source session with name: %(sname)s and " - " target session name: %(tname)s", - {'sname': session_name, 'tname': rsession_name}) - src_ssn = self.client.new("replication/sessions") - src_ssn.replication_peer_k2array = k2peer - src_ssn.auto_configure_peer_volumes = "False" - src_ssn.local_volume_group = vg - src_ssn.replication_peer_volume_group_name = rvg_name - src_ssn.remote_replication_session_name = rsession_name - src_ssn.name = session_name - src_ssn.rpo = rpo - src_ssn.save() - LOG.debug("Creating remote volume with name: %s", - rvol_name) - self.client.new("replication/peer_volumes", - local_volume=vol, - name=rvol_name, - replication_session=src_ssn).save() - src_ssn.state = "in_sync" - src_ssn.save() - except Exception as ex: - LOG.exception(_LE("Replication for the volume %s has " - "failed."), vol.name) - self._delete_by_ref(self.client, "replication/sessions", - session_name, 'session') - self._delete_by_ref(self.target, "replication/sessions", - rsession_name, 'remote session') - self._delete_by_ref(self.target, "volumes", - rvol_name, 'remote volume') - self._delete_by_ref(self.client, "volumes", vol.name, "volume") - self._delete_by_ref(self.target, "volume_groups", - rvg_name, "remote vg") - self._delete_by_ref(self.client, "volume_groups", vg.name, "vg") - raise exception.KaminarioCinderDriverException( - reason=six.text_type(ex.message)) - - @kaminario_logger - def _create_failover_volume_replica(self, volume, vg_name, vol_name): - """Volume replica creation in K2 needs session and remote volume. - - - create a session - - create a volume in the volume group - - """ - session_name = self.get_session_name(volume.id) - rsession_name = self.get_rep_name(session_name) - - rvg_name = self.get_rep_name(vg_name) - rvol_name = self.get_rep_name(vol_name) - rvg = self.target.search("volume_groups", name=rvg_name).hits[0] - rvol = self.target.search("volumes", name=rvol_name).hits[0] - k2peer_rs = self.target.search("replication/peer_k2arrays", - mgmt_host=self.configuration.san_ip) - if hasattr(k2peer_rs, 'hits') and k2peer_rs.total != 0: - k2peer = k2peer_rs.hits[0] - else: - msg = _("Unable to find K2peer in source K2:") - LOG.error(msg) - raise exception.KaminarioCinderDriverException(reason=msg) - try: - LOG.debug("Creating source session with name: %(sname)s and " - " target session name: %(tname)s", - {'sname': rsession_name, 'tname': session_name}) - tgt_ssn = self.target.new("replication/sessions") - tgt_ssn.replication_peer_k2array = k2peer - tgt_ssn.auto_configure_peer_volumes = "False" - tgt_ssn.local_volume_group = rvg - tgt_ssn.replication_peer_volume_group_name = vg_name - tgt_ssn.remote_replication_session_name = session_name - tgt_ssn.name = rsession_name - tgt_ssn.rpo = self.replica.rpo - tgt_ssn.save() - LOG.debug("Creating remote volume with name: %s", - rvol_name) - self.target.new("replication/peer_volumes", - local_volume=rvol, - name=vol_name, - replication_session=tgt_ssn).save() - tgt_ssn.state = "in_sync" - tgt_ssn.save() - except Exception as ex: - LOG.exception(_LE("Replication for the volume %s has " - "failed."), rvol_name) - self._delete_by_ref(self.target, "replication/sessions", - rsession_name, 'session') - self._delete_by_ref(self.client, "replication/sessions", - session_name, 'remote session') - self._delete_by_ref(self.client, "volumes", vol_name, "volume") - self._delete_by_ref(self.client, "volume_groups", vg_name, "vg") - raise exception.KaminarioCinderDriverException( - reason=six.text_type(ex.message)) - - def _delete_by_ref(self, device, url, name, msg): - rs = device.search(url, name=name) - for result in rs.hits: - result.delete() - LOG.debug("Deleting %(msg)s: %(name)s", {'msg': msg, 'name': name}) - - @kaminario_logger - def _failover_volume(self, volume): - """Promoting a secondary volume to primary volume.""" - session_name = self.get_session_name(volume.id) - rsession_name = self.get_rep_name(session_name) - tgt_ssn = self.target.search("replication/sessions", - name=rsession_name).hits[0] - if tgt_ssn.state == 'in_sync': - tgt_ssn.state = 'failed_over' - tgt_ssn.save() - LOG.debug("The target session: %s state is " - "changed to failed_over ", rsession_name) - - @kaminario_logger - def failover_host(self, context, volumes, secondary_id=None): - """Failover to replication target.""" - volume_updates = [] - back_end_ip = None - svc_host = vol_utils.extract_host(self.host, 'backend') - service = objects.Service.get_by_args(context, svc_host, - 'cinder-volume') - - if secondary_id and secondary_id != self.replica.backend_id: - LOG.error(_LE("Kaminario driver received failover_host " - "request, But backend is non replicated device")) - raise exception.UnableToFailOver(reason=_("Failover requested " - "on non replicated " - "backend.")) - - if (service.active_backend_id and - service.active_backend_id != self.configuration.san_ip): - self.snap_updates = [] - rep_volumes = [] - # update status for non-replicated primary volumes - for v in volumes: - vol_name = self.get_volume_name(v['id']) - vol = self.client.search("volumes", name=vol_name) - if v.replication_status != K2_REP_FAILED_OVER and vol.total: - status = 'available' - if v.volume_attachment: - map_rs = self.client.search("mappings", - volume=vol.hits[0]) - status = 'in-use' - if map_rs.total: - map_rs.hits[0].delete() - volume_updates.append({'volume_id': v['id'], - 'updates': - {'status': status}}) - else: - rep_volumes.append(v) - - # In-sync from secondaray array to primary array - for v in rep_volumes: - vol_name = self.get_volume_name(v['id']) - vol = self.client.search("volumes", name=vol_name) - rvol_name = self.get_rep_name(vol_name) - rvol = self.target.search("volumes", name=rvol_name) - session_name = self.get_session_name(v['id']) - rsession_name = self.get_rep_name(session_name) - ssn = self.target.search("replication/sessions", - name=rsession_name) - if ssn.total: - tgt_ssn = ssn.hits[0] - ssn = self.client.search("replication/sessions", - name=session_name) - if ssn.total: - src_ssn = ssn.hits[0] - - if (tgt_ssn.state == 'failed_over' and - tgt_ssn.current_role == 'target' and vol.total and src_ssn): - map_rs = self.client.search("mappings", volume=vol.hits[0]) - if map_rs.total: - map_rs.hits[0].delete() - tgt_ssn.state = 'in_sync' - tgt_ssn.save() - self._check_for_status(src_ssn, 'in_sync') - if (rvol.total and src_ssn.state == 'in_sync' and - src_ssn.current_role == 'target'): - gen_no = self._create_volume_replica_user_snap(self.target, - tgt_ssn) - self.snap_updates.append({'tgt_ssn': tgt_ssn, - 'gno': gen_no, - 'stime': time.time()}) - LOG.debug("The target session: %s state is " - "changed to in sync", rsession_name) - - self._is_user_snap_sync_finished() - - # Delete secondary volume mappings and create snapshot - for v in rep_volumes: - vol_name = self.get_volume_name(v['id']) - vol = self.client.search("volumes", name=vol_name) - rvol_name = self.get_rep_name(vol_name) - rvol = self.target.search("volumes", name=rvol_name) - session_name = self.get_session_name(v['id']) - rsession_name = self.get_rep_name(session_name) - ssn = self.target.search("replication/sessions", - name=rsession_name) - if ssn.total: - tgt_ssn = ssn.hits[0] - ssn = self.client.search("replication/sessions", - name=session_name) - if ssn.total: - src_ssn = ssn.hits[0] - if (rvol.total and src_ssn.state == 'in_sync' and - src_ssn.current_role == 'target'): - map_rs = self.target.search("mappings", - volume=rvol.hits[0]) - if map_rs.total: - map_rs.hits[0].delete() - gen_no = self._create_volume_replica_user_snap(self.target, - tgt_ssn) - self.snap_updates.append({'tgt_ssn': tgt_ssn, - 'gno': gen_no, - 'stime': time.time()}) - self._is_user_snap_sync_finished() - # changing source sessions to failed-over - for v in rep_volumes: - vol_name = self.get_volume_name(v['id']) - vol = self.client.search("volumes", name=vol_name) - rvol_name = self.get_rep_name(vol_name) - rvol = self.target.search("volumes", name=rvol_name) - session_name = self.get_session_name(v['id']) - rsession_name = self.get_rep_name(session_name) - ssn = self.target.search("replication/sessions", - name=rsession_name) - if ssn.total: - tgt_ssn = ssn.hits[0] - ssn = self.client.search("replication/sessions", - name=session_name) - if ssn.total: - src_ssn = ssn.hits[0] - if (rvol.total and src_ssn.state == 'in_sync' and - src_ssn.current_role == 'target'): - src_ssn.state = 'failed_over' - src_ssn.save() - self._check_for_status(tgt_ssn, 'suspended') - LOG.debug("The target session: %s state is " - "changed to failed over", session_name) - - src_ssn.state = 'in_sync' - src_ssn.save() - LOG.debug("The target session: %s state is " - "changed to in sync", session_name) - rep_status = fields.ReplicationStatus.DISABLED - volume_updates.append({'volume_id': v['id'], - 'updates': - {'replication_status': rep_status}}) - - back_end_ip = self.configuration.san_ip - else: - """Failover to replication target.""" - for v in volumes: - vol_name = self.get_volume_name(v['id']) - rv = self.get_rep_name(vol_name) - if self.target.search("volumes", name=rv).total: - self._failover_volume(v) - volume_updates.append( - {'volume_id': v['id'], - 'updates': - {'replication_status': K2_REP_FAILED_OVER}}) - else: - volume_updates.append({'volume_id': v['id'], - 'updates': {'status': 'error', }}) - back_end_ip = self.replica.backend_id - return back_end_ip, volume_updates - - def _create_volume_replica_user_snap(self, k2, sess): - snap = k2.new("snapshots") - snap.is_application_consistent = "False" - snap.replication_session = sess - snap.save() - return snap.generation_number - - def _is_user_snap_sync_finished(self): - # waiting for user snapshot to be synced - while len(self.snap_updates) > 0: - for l in self.snap_updates: - sess = l.get('tgt_ssn') - gno = l.get('gno') - stime = l.get('stime') - sess.refresh() - if (sess.generation_number == gno and - sess.current_snapshot_progress == 100 - and sess.current_snapshot_id is None): - if time.time() - stime > 300: - gen_no = self._create_volume_replica_user_snap( - self.target, - sess) - self.snap_updates.append({'tgt_ssn': sess, - 'gno': gen_no, - 'stime': time.time()}) - self.snap_updates.remove(l) - eventlet.sleep(1) - - @kaminario_logger - def create_volume_from_snapshot(self, volume, snapshot): - """Create volume from snapshot. - - - search for snapshot and retention_policy - - create a view from snapshot and attach view - - create a volume and attach volume - - copy data from attached view to attached volume - - detach volume and view and finally delete view - """ - snap_name = self.get_snap_name(snapshot.id) - view_name = self.get_view_name(volume.id) - vol_name = self.get_volume_name(volume.id) - cview = src_attach_info = dest_attach_info = None - rpolicy = self.get_policy() - properties = utils.brick_get_connector_properties() - LOG.debug("Searching for snapshot: %s in K2.", snap_name) - snap_rs = self.client.search("snapshots", short_name=snap_name) - if hasattr(snap_rs, 'hits') and snap_rs.total != 0: - snap = snap_rs.hits[0] - LOG.debug("Creating a view: %(view)s from snapshot: %(snap)s", - {'view': view_name, 'snap': snap_name}) - try: - cview = self.client.new("snapshots", - short_name=view_name, - source=snap, retention_policy=rpolicy, - is_exposable=True).save() - except Exception as ex: - LOG.exception(_LE("Creating a view: %(view)s from snapshot: " - "%(snap)s failed"), {"view": view_name, - "snap": snap_name}) - raise exception.KaminarioCinderDriverException( - reason=six.text_type(ex.message)) - - else: - msg = _("Snapshot: %s search failed in K2.") % snap_name - LOG.error(msg) - raise exception.KaminarioCinderDriverException(reason=msg) - - try: - conn = self.initialize_connection(cview, properties) - src_attach_info = self._connect_device(conn) - self.create_volume(volume) - conn = self.initialize_connection(volume, properties) - dest_attach_info = self._connect_device(conn) - vol_utils.copy_volume(src_attach_info['device']['path'], - dest_attach_info['device']['path'], - snapshot.volume.size * units.Ki, - self.configuration.volume_dd_blocksize, - sparse=True) - self._kaminario_disconnect_volume(src_attach_info, - dest_attach_info) - self.terminate_connection(volume, properties) - self.terminate_connection(cview, properties) - cview.delete() - except Exception as ex: - self._kaminario_disconnect_volume(src_attach_info, - dest_attach_info) - self.terminate_connection(cview, properties) - self.terminate_connection(volume, properties) - cview.delete() - self.delete_volume(volume) - LOG.exception(_LE("Copy to volume: %(vol)s from view: %(view)s " - "failed"), {"vol": vol_name, "view": view_name}) - raise exception.KaminarioCinderDriverException( - reason=six.text_type(ex.message)) - - @kaminario_logger - def create_cloned_volume(self, volume, src_vref): - """Create a clone from source volume. - - - attach source volume - - create and attach new volume - - copy data from attached source volume to attached new volume - - detach both volumes - """ - clone_name = self.get_volume_name(volume.id) - src_name = self.get_volume_name(src_vref.id) - src_vol = self.client.search("volumes", name=src_name) - src_map = self.client.search("mappings", volume=src_vol) - src_attach_info = dest_attach_info = None - if src_map.total != 0: - msg = _("K2 driver does not support clone of a attached volume. " - "To get this done, create a snapshot from the attached " - "volume and then create a volume from the snapshot.") - LOG.error(msg) - raise exception.KaminarioCinderDriverException(reason=msg) - try: - properties = utils.brick_get_connector_properties() - conn = self.initialize_connection(src_vref, properties) - src_attach_info = self._connect_device(conn) - self.create_volume(volume) - conn = self.initialize_connection(volume, properties) - dest_attach_info = self._connect_device(conn) - vol_utils.copy_volume(src_attach_info['device']['path'], - dest_attach_info['device']['path'], - src_vref.size * units.Ki, - self.configuration.volume_dd_blocksize, - sparse=True) - self._kaminario_disconnect_volume(src_attach_info, - dest_attach_info) - self.terminate_connection(volume, properties) - self.terminate_connection(src_vref, properties) - except Exception as ex: - self._kaminario_disconnect_volume(src_attach_info, - dest_attach_info) - self.terminate_connection(src_vref, properties) - self.terminate_connection(volume, properties) - self.delete_volume(volume) - LOG.exception(_LE("Create a clone: %s failed."), clone_name) - raise exception.KaminarioCinderDriverException( - reason=six.text_type(ex.message)) - - @kaminario_logger - def delete_volume(self, volume): - """Volume in K2 exists in a volume group. - - - delete the volume - - delete the corresponding volume group - """ - vg_name = self.get_volume_group_name(volume.id) - vol_name = self.get_volume_name(volume.id) - try: - if self._get_is_replica(volume.volume_type) and self.replica: - self._delete_volume_replica(volume, vg_name, vol_name) - - LOG.debug("Searching and deleting volume: %s in K2.", vol_name) - vol_rs = self.client.search("volumes", name=vol_name) - if vol_rs.total != 0: - vol_rs.hits[0].delete() - LOG.debug("Searching and deleting vg: %s in K2.", vg_name) - vg_rs = self.client.search("volume_groups", name=vg_name) - if vg_rs.total != 0: - vg_rs.hits[0].delete() - except Exception as ex: - LOG.exception(_LE("Deletion of volume %s failed."), vol_name) - raise exception.KaminarioCinderDriverException( - reason=six.text_type(ex.message)) - - @kaminario_logger - def _delete_volume_replica(self, volume, vg_name, vol_name): - rvg_name = self.get_rep_name(vg_name) - rvol_name = self.get_rep_name(vol_name) - session_name = self.get_session_name(volume.id) - rsession_name = self.get_rep_name(session_name) - src_ssn = self.client.search('replication/sessions', - name=session_name).hits[0] - tgt_ssn = self.target.search('replication/sessions', - name=rsession_name).hits[0] - src_ssn.state = 'suspended' - src_ssn.save() - self._check_for_status(tgt_ssn, 'suspended') - src_ssn.state = 'idle' - src_ssn.save() - self._check_for_status(tgt_ssn, 'idle') - tgt_ssn.delete() - src_ssn.delete() - - LOG.debug("Searching and deleting snapshots for volume groups:" - "%(vg1)s, %(vg2)s in K2.", {'vg1': vg_name, 'vg2': rvg_name}) - vg = self.client.search('volume_groups', name=vg_name).hits - rvg = self.target.search('volume_groups', name=rvg_name).hits - snaps = self.client.search('snapshots', volume_group=vg).hits - for s in snaps: - s.delete() - rsnaps = self.target.search('snapshots', volume_group=rvg).hits - for s in rsnaps: - s.delete() - - self._delete_by_ref(self.target, "volumes", rvol_name, 'remote volume') - self._delete_by_ref(self.target, "volume_groups", - rvg_name, "remote vg") - - @kaminario_logger - def _delete_failover_volume_replica(self, volume, vg_name, vol_name): - rvg_name = self.get_rep_name(vg_name) - rvol_name = self.get_rep_name(vol_name) - session_name = self.get_session_name(volume.id) - rsession_name = self.get_rep_name(session_name) - tgt_ssn = self.target.search('replication/sessions', - name=rsession_name).hits[0] - tgt_ssn.state = 'idle' - tgt_ssn.save() - tgt_ssn.delete() - - LOG.debug("Searching and deleting snapshots for target volume group " - "and target volume: %(vol)s, %(vg)s in K2.", - {'vol': rvol_name, 'vg': rvg_name}) - rvg = self.target.search('volume_groups', name=rvg_name).hits - rsnaps = self.target.search('snapshots', volume_group=rvg).hits - for s in rsnaps: - s.delete() - - @kaminario_logger - def _check_for_status(self, obj, status): - while obj.state != status: - obj.refresh() - eventlet.sleep(1) - - @kaminario_logger - def get_volume_stats(self, refresh=False): - if refresh: - self.update_volume_stats() - stats = self.stats - stats['storage_protocol'] = self._protocol - stats['driver_version'] = self.VERSION - stats['vendor_name'] = self.VENDOR - backend_name = self.configuration.safe_get('volume_backend_name') - stats['volume_backend_name'] = (backend_name or - self.__class__.__name__) - return stats - - def create_export(self, context, volume, connector): - pass - - def ensure_export(self, context, volume): - pass - - def remove_export(self, context, volume): - pass - - @kaminario_logger - def create_snapshot(self, snapshot): - """Create a snapshot from a volume_group.""" - vg_name = self.get_volume_group_name(snapshot.volume_id) - snap_name = self.get_snap_name(snapshot.id) - rpolicy = self.get_policy() - try: - LOG.debug("Searching volume_group: %s in K2.", vg_name) - vg = self.client.search("volume_groups", name=vg_name).hits[0] - LOG.debug("Creating a snapshot: %(snap)s from vg: %(vg)s", - {'snap': snap_name, 'vg': vg_name}) - self.client.new("snapshots", short_name=snap_name, - source=vg, retention_policy=rpolicy, - is_auto_deleteable=False).save() - except Exception as ex: - LOG.exception(_LE("Creation of snapshot: %s failed."), snap_name) - raise exception.KaminarioCinderDriverException( - reason=six.text_type(ex.message)) - - @kaminario_logger - def delete_snapshot(self, snapshot): - """Delete a snapshot.""" - snap_name = self.get_snap_name(snapshot.id) - try: - LOG.debug("Searching and deleting snapshot: %s in K2.", snap_name) - snap_rs = self.client.search("snapshots", short_name=snap_name) - if snap_rs.total != 0: - snap_rs.hits[0].delete() - except Exception as ex: - LOG.exception(_LE("Deletion of snapshot: %s failed."), snap_name) - raise exception.KaminarioCinderDriverException( - reason=six.text_type(ex.message)) - - @kaminario_logger - def extend_volume(self, volume, new_size): - """Extend volume.""" - vol_name = self.get_volume_name(volume.id) - try: - LOG.debug("Searching volume: %s in K2.", vol_name) - vol = self.client.search("volumes", name=vol_name).hits[0] - vol.size = new_size * units.Mi - LOG.debug("Extending volume: %s in K2.", vol_name) - vol.save() - except Exception as ex: - LOG.exception(_LE("Extending volume: %s failed."), vol_name) - raise exception.KaminarioCinderDriverException( - reason=six.text_type(ex.message)) - - @kaminario_logger - def update_volume_stats(self): - conf = self.configuration - LOG.debug("Searching system capacity in K2.") - cap = self.client.search("system/capacity").hits[0] - LOG.debug("Searching total volumes in K2 for updating stats.") - total_volumes = self.client.search("volumes").total - 1 - provisioned_vol = cap.provisioned_volumes - if (conf.auto_calc_max_oversubscription_ratio and cap.provisioned - and (cap.total - cap.free) != 0): - ratio = provisioned_vol / float(cap.total - cap.free) - else: - ratio = conf.max_over_subscription_ratio - self.stats = {'QoS_support': False, - 'free_capacity_gb': cap.free / units.Mi, - 'total_capacity_gb': cap.total / units.Mi, - 'thin_provisioning_support': True, - 'sparse_copy_volume': True, - 'total_volumes': total_volumes, - 'thick_provisioning_support': False, - 'provisioned_capacity_gb': provisioned_vol / units.Mi, - 'max_oversubscription_ratio': ratio, - 'kaminario:thin_prov_type': 'dedup/nodedup', - 'replication_enabled': True, - 'kaminario:replication': True} - - @kaminario_logger - def get_initiator_host_name(self, connector): - """Return the initiator host name. - - Valid characters: 0-9, a-z, A-Z, '-', '_' - All other characters are replaced with '_'. - Total characters in initiator host name: 32 - """ - return re.sub('[^0-9a-zA-Z-_]', '_', connector.get('host', ''))[:32] - - @kaminario_logger - def get_volume_group_name(self, vid): - """Return the volume group name.""" - return "cvg-{0}".format(vid) - - @kaminario_logger - def get_volume_name(self, vid): - """Return the volume name.""" - return "cv-{0}".format(vid) - - @kaminario_logger - def get_session_name(self, vid): - """Return the volume name.""" - return "ssn-{0}".format(vid) - - @kaminario_logger - def get_snap_name(self, sid): - """Return the snapshot name.""" - return "cs-{0}".format(sid) - - @kaminario_logger - def get_view_name(self, vid): - """Return the view name.""" - return "cview-{0}".format(vid) - - @kaminario_logger - def get_rep_name(self, name): - """Return the corresponding replication names.""" - return "r{0}".format(name) - - @kaminario_logger - def _delete_host_by_name(self, name): - """Deleting host by name.""" - host_rs = self.client.search("hosts", name=name) - if hasattr(host_rs, "hits") and host_rs.total != 0: - host = host_rs.hits[0] - host.delete() - - @kaminario_logger - def get_policy(self): - """Return the retention policy.""" - try: - LOG.debug("Searching for retention_policy in K2.") - return self.client.search("retention_policies", - name="Best_Effort_Retention").hits[0] - except Exception as ex: - LOG.exception(_LE("Retention policy search failed in K2.")) - raise exception.KaminarioCinderDriverException( - reason=six.text_type(ex.message)) - - @kaminario_logger - def _get_volume_object(self, volume): - vol_name = self.get_volume_name(volume.id) - if volume.replication_status == K2_REP_FAILED_OVER: - vol_name = self.get_rep_name(vol_name) - LOG.debug("Searching volume : %s in K2.", vol_name) - vol_rs = self.client.search("volumes", name=vol_name) - if not hasattr(vol_rs, 'hits') or vol_rs.total == 0: - msg = _("Unable to find volume: %s from K2.") % vol_name - LOG.error(msg) - raise exception.KaminarioCinderDriverException(reason=msg) - return vol_rs.hits[0] - - @kaminario_logger - def _get_lun_number(self, vol, host): - volsnap = None - LOG.debug("Searching volsnaps in K2.") - volsnap_rs = self.client.search("volsnaps", snapshot=vol) - if hasattr(volsnap_rs, 'hits') and volsnap_rs.total != 0: - volsnap = volsnap_rs.hits[0] - - LOG.debug("Searching mapping of volsnap in K2.") - map_rs = self.client.search("mappings", volume=volsnap, host=host) - return map_rs.hits[0].lun - - def initialize_connection(self, volume, connector): - pass - - @kaminario_logger - def terminate_connection(self, volume, connector): - """Terminate connection of volume from host.""" - # Get volume object - if type(volume).__name__ != 'RestObject': - vol_name = self.get_volume_name(volume.id) - if volume.replication_status == K2_REP_FAILED_OVER: - vol_name = self.get_rep_name(vol_name) - LOG.debug("Searching volume: %s in K2.", vol_name) - volume_rs = self.client.search("volumes", name=vol_name) - if hasattr(volume_rs, "hits") and volume_rs.total != 0: - volume = volume_rs.hits[0] - else: - vol_name = volume.name - - # Get host object. - host_name = self.get_initiator_host_name(connector) - host_rs = self.client.search("hosts", name=host_name) - if hasattr(host_rs, "hits") and host_rs.total != 0 and volume: - host = host_rs.hits[0] - LOG.debug("Searching and deleting mapping of volume: %(name)s to " - "host: %(host)s", {'host': host_name, 'name': vol_name}) - map_rs = self.client.search("mappings", volume=volume, host=host) - if hasattr(map_rs, "hits") and map_rs.total != 0: - map_rs.hits[0].delete() - if self.client.search("mappings", host=host).total == 0: - LOG.debug("Deleting initiator hostname: %s in K2.", host_name) - host.delete() - else: - LOG.warning(_LW("Host: %s not found on K2."), host_name) - - def k2_initialize_connection(self, volume, connector): - # Get volume object. - if type(volume).__name__ != 'RestObject': - vol = self._get_volume_object(volume) - else: - vol = volume - # Get host object. - host, host_rs, host_name = self._get_host_object(connector) - try: - # Map volume object to host object. - LOG.debug("Mapping volume: %(vol)s to host: %(host)s", - {'host': host_name, 'vol': vol.name}) - mapping = self.client.new("mappings", volume=vol, host=host).save() - except Exception as ex: - if host_rs.total == 0: - self._delete_host_by_name(host_name) - LOG.exception(_LE("Unable to map volume: %(vol)s to host: " - "%(host)s"), {'host': host_name, - 'vol': vol.name}) - raise exception.KaminarioCinderDriverException( - reason=six.text_type(ex.message)) - # Get lun number. - if type(volume).__name__ == 'RestObject': - return self._get_lun_number(vol, host) - else: - return mapping.lun - - def _get_host_object(self, connector): - pass - - def _get_is_dedup(self, vol_type): - if vol_type: - specs_val = vol_type.get('extra_specs', {}).get( - 'kaminario:thin_prov_type') - if specs_val == 'nodedup': - return False - elif CONF.kaminario_nodedup_substring in vol_type.get('name'): - LOG.info(_LI("'kaminario_nodedup_substring' option is " - "deprecated in favour of 'kaminario:thin_prov_" - "type' in extra-specs and will be removed in " - "the 10.0.0 release.")) - return False - else: - return True - else: - return True - - def _get_is_replica(self, vol_type): - replica = False - if vol_type and vol_type.get('extra_specs'): - specs = vol_type.get('extra_specs') - if (specs.get('kaminario:replication') == 'enabled' and - self.replica): - replica = True - return replica - - def _get_replica_status(self, vg_name): - vg_rs = self.client.search("volume_groups", name=vg_name) - if vg_rs.total: - vg = vg_rs.hits[0] - if self.client.search("replication/sessions", - local_volume_group=vg).total: - return True - return False - - def manage_existing(self, volume, existing_ref): - vol_name = existing_ref['source-name'] - new_name = self.get_volume_name(volume.id) - vg_new_name = self.get_volume_group_name(volume.id) - vg_name = None - is_dedup = self._get_is_dedup(volume.get('volume_type')) - try: - LOG.debug("Searching volume: %s in K2.", vol_name) - vol = self.client.search("volumes", name=vol_name).hits[0] - vg = vol.volume_group - vg_replica = self._get_replica_status(vg.name) - vol_map = False - if self.client.search("mappings", volume=vol).total != 0: - vol_map = True - if is_dedup != vg.is_dedup or vg_replica or vol_map: - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, - reason=_('Manage volume type invalid.')) - vol.name = new_name - vg_name = vg.name - LOG.debug("Manage new volume name: %s", new_name) - vg.name = vg_new_name - LOG.debug("Manage volume group name: %s", vg_new_name) - vg.save() - LOG.debug("Manage volume: %s in K2.", vol_name) - vol.save() - except Exception as ex: - vg_rs = self.client.search("volume_groups", name=vg_new_name) - if hasattr(vg_rs, 'hits') and vg_rs.total != 0: - vg = vg_rs.hits[0] - if vg_name and vg.name == vg_new_name: - vg.name = vg_name - LOG.debug("Updating vg new name to old name: %s ", vg_name) - vg.save() - LOG.exception(_LE("manage volume: %s failed."), vol_name) - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, - reason=six.text_type(ex.message)) - - def manage_existing_get_size(self, volume, existing_ref): - vol_name = existing_ref['source-name'] - v_rs = self.client.search("volumes", name=vol_name) - if hasattr(v_rs, 'hits') and v_rs.total != 0: - vol = v_rs.hits[0] - size = vol.size / units.Mi - return math.ceil(size) - else: - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, - reason=_('Unable to get size of manage volume.')) - - def after_volume_copy(self, ctxt, volume, new_volume, remote=None): - self.delete_volume(volume) - vg_name_old = self.get_volume_group_name(volume.id) - vol_name_old = self.get_volume_name(volume.id) - vg_name_new = self.get_volume_group_name(new_volume.id) - vol_name_new = self.get_volume_name(new_volume.id) - vg_new = self.client.search("volume_groups", name=vg_name_new).hits[0] - vg_new.name = vg_name_old - vg_new.save() - vol_new = self.client.search("volumes", name=vol_name_new).hits[0] - vol_new.name = vol_name_old - vol_new.save() - - def retype(self, ctxt, volume, new_type, diff, host): - old_type = volume.get('volume_type') - vg_name = self.get_volume_group_name(volume.id) - vol_name = self.get_volume_name(volume.id) - vol_rs = self.client.search("volumes", name=vol_name) - if vol_rs.total: - vol = vol_rs.hits[0] - vmap = self.client.search("mappings", volume=vol).total - old_rep_type = self._get_replica_status(vg_name) - new_rep_type = self._get_is_replica(new_type) - new_prov_type = self._get_is_dedup(new_type) - old_prov_type = self._get_is_dedup(old_type) - # Change dedup<->nodedup with add/remove replication is complex in K2 - # since K2 does not have api to change dedup<->nodedup. - if new_prov_type == old_prov_type: - if not old_rep_type and new_rep_type: - self._add_replication(volume) - return True - elif old_rep_type and not new_rep_type: - self._delete_replication(volume) - return True - elif not new_rep_type and not old_rep_type: - msg = ("Use '--migration-policy on-demand' to change 'dedup " - "without replication'<->'nodedup without replication'.") - if vol_rs.total and vmap: - msg = "Unattach volume and {0}".format(msg) - LOG.debug(msg) - return False - else: - LOG.error(_LE('Change from type1: %(type1)s to type2: %(type2)s ' - 'is not supported directly in K2.'), - {'type1': old_type, 'type2': new_type}) - return False - - def _add_replication(self, volume): - vg_name = self.get_volume_group_name(volume.id) - vol_name = self.get_volume_name(volume.id) - if volume.replication_status == K2_REP_FAILED_OVER: - self._create_failover_volume_replica(volume, vg_name, vol_name) - else: - LOG.debug("Searching volume group with name: %(name)s", - {'name': vg_name}) - vg = self.client.search("volume_groups", name=vg_name).hits[0] - LOG.debug("Searching volume with name: %(name)s", - {'name': vol_name}) - vol = self.client.search("volumes", name=vol_name).hits[0] - self._create_volume_replica(volume, vg, vol, self.replica.rpo) - - def _delete_replication(self, volume): - vg_name = self.get_volume_group_name(volume.id) - vol_name = self.get_volume_name(volume.id) - if volume.replication_status == K2_REP_FAILED_OVER: - self._delete_failover_volume_replica(volume, vg_name, vol_name) - else: - self._delete_volume_replica(volume, vg_name, vol_name) - - def _kaminario_disconnect_volume(self, *attach_info): - for info in attach_info: - if (info and info.get('connector') and - info.get('conn', {}).get('data') and info.get('device')): - info['connector'].disconnect_volume(info['conn']['data'], - info['device']) diff --git a/deployment_scripts/puppet/modules/kaminario/files/kaminario_fc.py b/deployment_scripts/puppet/modules/kaminario/files/kaminario_fc.py deleted file mode 100644 index 202be92..0000000 --- a/deployment_scripts/puppet/modules/kaminario/files/kaminario_fc.py +++ /dev/null @@ -1,196 +0,0 @@ -# Copyright (c) 2016 by Kaminario Technologies, Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Volume driver for Kaminario K2 all-flash arrays.""" -import six - -from oslo_log import log as logging - -from cinder import exception -from cinder import utils -from cinder.i18n import _, _LE -from cinder.objects import fields -from cinder.volume.drivers.kaminario import kaminario_common as common -from cinder.zonemanager import utils as fczm_utils - -K2_REP_FAILED_OVER = fields.ReplicationStatus.FAILED_OVER -LOG = logging.getLogger(__name__) -kaminario_logger = common.kaminario_logger - - -class KaminarioFCDriver(common.KaminarioCinderDriver): - """Kaminario K2 FC Volume Driver. - - Version history: - 1.0.2.0 - Initial driver - """ - - VERSION = '1.0.2.0' - - # ThirdPartySystems wiki page name - CI_WIKI_NAME = "Kaminario_K2_CI" - - @kaminario_logger - def __init__(self, *args, **kwargs): - super(KaminarioFCDriver, self).__init__(*args, **kwargs) - self._protocol = 'FC' - self.lookup_service = fczm_utils.create_lookup_service() - - @fczm_utils.AddFCZone - @kaminario_logger - @utils.synchronized(common.K2_LOCK_NAME, external=True) - def initialize_connection(self, volume, connector): - """Attach K2 volume to host.""" - # Check wwpns in host connector. - if not connector.get('wwpns'): - msg = _("No wwpns found in host connector.") - LOG.error(msg) - raise exception.KaminarioCinderDriverException(reason=msg) - # To support replication failback - temp_client = None - if (hasattr(volume, 'replication_status') and - volume.replication_status == K2_REP_FAILED_OVER): - temp_client = self.client - self.client = self.target - # Get target wwpns. - target_wwpns = self.get_target_info(volume) - # Map volume. - lun = self.k2_initialize_connection(volume, connector) - # Create initiator-target mapping. - target_wwpns, init_target_map = self._build_initiator_target_map( - connector, target_wwpns) - # To support replication failback - if temp_client: - self.client = temp_client - # Return target volume information. - return {'driver_volume_type': 'fibre_channel', - 'data': {"target_discovered": True, - "target_lun": lun, - "target_wwn": target_wwpns, - "initiator_target_map": init_target_map}} - - @fczm_utils.RemoveFCZone - @kaminario_logger - @utils.synchronized(common.K2_LOCK_NAME, external=True) - def terminate_connection(self, volume, connector, **kwargs): - # To support replication failback - temp_client = None - if (hasattr(volume, 'replication_status') and - volume.replication_status == K2_REP_FAILED_OVER): - temp_client = self.client - self.client = self.target - super(KaminarioFCDriver, self).terminate_connection(volume, connector) - properties = {"driver_volume_type": "fibre_channel", "data": {}} - host_name = self.get_initiator_host_name(connector) - host_rs = self.client.search("hosts", name=host_name) - # In terminate_connection, host_entry is deleted if host - # is not attached to any volume - if host_rs.total == 0: - # Get target wwpns. - target_wwpns = self.get_target_info(volume) - target_wwpns, init_target_map = self._build_initiator_target_map( - connector, target_wwpns) - properties["data"] = {"target_wwn": target_wwpns, - "initiator_target_map": init_target_map} - # To support replication failback - if temp_client: - self.client = temp_client - return properties - - @kaminario_logger - def get_target_info(self, volume): - LOG.debug("Searching target wwpns in K2.") - fc_ports_rs = self.client.search("system/fc_ports") - target_wwpns = [] - if hasattr(fc_ports_rs, 'hits') and fc_ports_rs.total != 0: - for port in fc_ports_rs.hits: - if port.pwwn: - target_wwpns.append((port.pwwn).replace(':', '')) - if not target_wwpns: - msg = _("Unable to get FC target wwpns from K2.") - LOG.error(msg) - raise exception.KaminarioCinderDriverException(reason=msg) - return target_wwpns - - @kaminario_logger - def _get_host_object(self, connector): - host_name = self.get_initiator_host_name(connector) - LOG.debug("Searching initiator hostname: %s in K2.", host_name) - host_rs = self.client.search("hosts", name=host_name) - host_wwpns = connector['wwpns'] - if host_rs.total == 0: - try: - LOG.debug("Creating initiator hostname: %s in K2.", host_name) - host = self.client.new("hosts", name=host_name, - type="Linux").save() - except Exception as ex: - LOG.exception(_LE("Unable to create host : %s in K2."), - host_name) - raise exception.KaminarioCinderDriverException( - reason=six.text_type(ex.message)) - else: - # Use existing host. - LOG.debug("Use existing initiator hostname: %s in K2.", host_name) - host = host_rs.hits[0] - # Adding host wwpn. - for wwpn in host_wwpns: - wwpn = ":".join([wwpn[i:i + 2] for i in range(0, len(wwpn), 2)]) - if self.client.search("host_fc_ports", pwwn=wwpn, - host=host).total == 0: - LOG.debug("Adding wwpn: %(wwpn)s to host: " - "%(host)s in K2.", {'wwpn': wwpn, - 'host': host_name}) - try: - self.client.new("host_fc_ports", pwwn=wwpn, - host=host).save() - except Exception as ex: - if host_rs.total == 0: - self._delete_host_by_name(host_name) - LOG.exception(_LE("Unable to add wwpn : %(wwpn)s to " - "host: %(host)s in K2."), - {'wwpn': wwpn, 'host': host_name}) - raise exception.KaminarioCinderDriverException( - reason=six.text_type(ex.message)) - return host, host_rs, host_name - - @kaminario_logger - def _build_initiator_target_map(self, connector, all_target_wwns): - """Build the target_wwns and the initiator target map.""" - target_wwns = [] - init_targ_map = {} - - if self.lookup_service is not None: - # use FC san lookup. - dev_map = self.lookup_service.get_device_mapping_from_network( - connector.get('wwpns'), - all_target_wwns) - - for fabric_name in dev_map: - fabric = dev_map[fabric_name] - target_wwns += fabric['target_port_wwn_list'] - for initiator in fabric['initiator_port_wwn_list']: - if initiator not in init_targ_map: - init_targ_map[initiator] = [] - init_targ_map[initiator] += fabric['target_port_wwn_list'] - init_targ_map[initiator] = list(set( - init_targ_map[initiator])) - target_wwns = list(set(target_wwns)) - else: - initiator_wwns = connector.get('wwpns', []) - target_wwns = all_target_wwns - - for initiator in initiator_wwns: - init_targ_map[initiator] = target_wwns - - return target_wwns, init_targ_map diff --git a/deployment_scripts/puppet/modules/kaminario/files/kaminario_iscsi.py b/deployment_scripts/puppet/modules/kaminario/files/kaminario_iscsi.py deleted file mode 100644 index dae1634..0000000 --- a/deployment_scripts/puppet/modules/kaminario/files/kaminario_iscsi.py +++ /dev/null @@ -1,137 +0,0 @@ -# Copyright (c) 2016 by Kaminario Technologies, Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Volume driver for Kaminario K2 all-flash arrays.""" -import six - -from oslo_log import log as logging - -from cinder import exception -from cinder import utils -from cinder.i18n import _, _LE -from cinder.objects import fields -from cinder.volume.drivers.kaminario import kaminario_common as common - -ISCSI_TCP_PORT = "3260" -K2_REP_FAILED_OVER = fields.ReplicationStatus.FAILED_OVER -LOG = logging.getLogger(__name__) -kaminario_logger = common.kaminario_logger - - -class KaminarioISCSIDriver(common.KaminarioCinderDriver): - """Kaminario K2 iSCSI Volume Driver. - - Version history: - 1.0.2.0 - Initial driver - """ - - VERSION = '1.0.2.0' - - # ThirdPartySystems wiki page name - CI_WIKI_NAME = "Kaminario_K2_CI" - - @kaminario_logger - def __init__(self, *args, **kwargs): - super(KaminarioISCSIDriver, self).__init__(*args, **kwargs) - self._protocol = 'iSCSI' - - @kaminario_logger - @utils.synchronized(common.K2_LOCK_NAME, external=True) - def initialize_connection(self, volume, connector): - """Attach K2 volume to host.""" - # To support replication failback - temp_client = None - if (hasattr(volume, 'replication_status') and - volume.replication_status == K2_REP_FAILED_OVER): - temp_client = self.client - self.client = self.target - # Get target_portal and target iqn. - iscsi_portal, target_iqn = self.get_target_info(volume) - # Map volume. - lun = self.k2_initialize_connection(volume, connector) - # To support replication failback - if temp_client: - self.client = temp_client - # Return target volume information. - return {"driver_volume_type": "iscsi", - "data": {"target_iqn": target_iqn, - "target_portal": iscsi_portal, - "target_lun": lun, - "target_discovered": True}} - - @kaminario_logger - @utils.synchronized(common.K2_LOCK_NAME, external=True) - def terminate_connection(self, volume, connector, **kwargs): - # To support replication failback - temp_client = None - if (hasattr(volume, 'replication_status') and - volume.replication_status == K2_REP_FAILED_OVER): - temp_client = self.client - self.client = self.target - super(KaminarioISCSIDriver, self).terminate_connection(volume, - connector) - # To support replication failback - if temp_client: - self.client = temp_client - - @kaminario_logger - def get_target_info(self, volume): - LOG.debug("Searching first iscsi port ip without wan in K2.") - iscsi_ip_rs = self.client.search("system/net_ips", wan_port="") - iscsi_ip = target_iqn = None - if hasattr(iscsi_ip_rs, 'hits') and iscsi_ip_rs.total != 0: - iscsi_ip = iscsi_ip_rs.hits[0].ip_address - if not iscsi_ip: - msg = _("Unable to get ISCSI IP address from K2.") - LOG.error(msg) - raise exception.KaminarioCinderDriverException(reason=msg) - iscsi_portal = "{0}:{1}".format(iscsi_ip, ISCSI_TCP_PORT) - LOG.debug("Searching system state for target iqn in K2.") - sys_state_rs = self.client.search("system/state") - - if hasattr(sys_state_rs, 'hits') and sys_state_rs.total != 0: - target_iqn = sys_state_rs.hits[0].iscsi_qualified_target_name - - if not target_iqn: - msg = _("Unable to get target iqn from K2.") - LOG.error(msg) - raise exception.KaminarioCinderDriverException(reason=msg) - return iscsi_portal, target_iqn - - @kaminario_logger - def _get_host_object(self, connector): - host_name = self.get_initiator_host_name(connector) - LOG.debug("Searching initiator hostname: %s in K2.", host_name) - host_rs = self.client.search("hosts", name=host_name) - """Create a host if not exists.""" - if host_rs.total == 0: - try: - LOG.debug("Creating initiator hostname: %s in K2.", host_name) - host = self.client.new("hosts", name=host_name, - type="Linux").save() - LOG.debug("Adding iqn: %(iqn)s to host: %(host)s in K2.", - {'iqn': connector['initiator'], 'host': host_name}) - iqn = self.client.new("host_iqns", iqn=connector['initiator'], - host=host) - iqn.save() - except Exception as ex: - self._delete_host_by_name(host_name) - LOG.exception(_LE("Unable to create host: %s in K2."), - host_name) - raise exception.KaminarioCinderDriverException( - reason=six.text_type(ex.message)) - else: - LOG.debug("Use existing initiator hostname: %s in K2.", host_name) - host = host_rs.hits[0] - return host, host_rs, host_name diff --git a/deployment_scripts/puppet/modules/kaminario/manifests/controller_config.pp b/deployment_scripts/puppet/modules/kaminario/manifests/controller_config.pp index 3b9022c..ceb35e0 100644 --- a/deployment_scripts/puppet/modules/kaminario/manifests/controller_config.pp +++ b/deployment_scripts/puppet/modules/kaminario/manifests/controller_config.pp @@ -5,7 +5,7 @@ $plugin_settings = hiera('cinder_kaminario') if $plugin_settings['scheduler_default_filters'] != '' { - ini_subsetting {"scheduler_default_filters": + ini_subsetting {'scheduler_default_filters': ensure => present, section => 'DEFAULT', key_val_separator => '=', @@ -18,18 +18,18 @@ $plugin_settings = hiera('cinder_kaminario') if $plugin_settings['scheduler_default_weighers'] != '' { cinder_config { - "DEFAULT/scheduler_default_weighers" : value => $plugin_settings['scheduler_default_weighers']; + 'DEFAULT/scheduler_default_weighers' : value => $plugin_settings['scheduler_default_weighers']; } } if $plugin_settings['rpc_response_timeout'] != '' { cinder_config { - "DEFAULT/rpc_response_timeout" : value => $plugin_settings['rpc_response_timeout']; + 'DEFAULT/rpc_response_timeout' : value => $plugin_settings['rpc_response_timeout']; } } cinder_config { - "DEFAULT/default_volume_type" : value => $default_volume_type + 'DEFAULT/default_volume_type' : value => $default_volume_type }~> Exec[cinder_api] exec {'cinder_api': diff --git a/deployment_scripts/puppet/modules/kaminario/manifests/driver.pp b/deployment_scripts/puppet/modules/kaminario/manifests/driver.pp index ef583e7..c4150e4 100644 --- a/deployment_scripts/puppet/modules/kaminario/manifests/driver.pp +++ b/deployment_scripts/puppet/modules/kaminario/manifests/driver.pp @@ -1,39 +1,35 @@ class kaminario::driver{ -file { '/usr/lib/python2.7/dist-packages/cinder/volume/drivers/kaminario': - ensure => 'directory', - owner => 'root', - group => 'root', - mode => '0755',} +$source_directory = '/tmp/openstack-cinder-driver/source/kaminario' +$target_directory = '/usr/lib/python2.7/dist-packages/cinder/volume/drivers/kaminario' +vcsrepo { '/tmp/openstack-cinder-driver': + ensure => present, + provider => git, + source => 'https://github.com/Kaminario/openstack-cinder-driver.git', + user => 'root', + revision => 'Mitaka', + } +file {$target_directory: + ensure => 'directory', + recurse => true, + source => "file:///${source_directory}", + } -file { '/usr/lib/python2.7/dist-packages/cinder/volume/drivers/kaminario/__init__.py': - mode => '0644', - owner => root, - group => root, - source => 'puppet:///modules/kaminario/__init__.py'} +file {'/usr/lib/python2.7/dist-packages/cinder/tests/unit/volume/drivers/': + ensure => 'file', + recurse => true, + source => 'file:///tmp/openstack-cinder-driver/test', + } -file { '/usr/lib/python2.7/dist-packages/cinder/volume/drivers/kaminario/kaminario_common.py': - mode => '0644', - owner => root, - group => root, - source => 'puppet:///modules/kaminario/kaminario_common.py'} - -file { '/usr/lib/python2.7/dist-packages/cinder/volume/drivers/kaminario/kaminario_fc.py': - mode => '0644', - owner => root, - group => root, - source => 'puppet:///modules/kaminario/kaminario_fc.py'} - -file { '/usr/lib/python2.7/dist-packages/cinder/volume/drivers/kaminario/kaminario_iscsi.py': - mode => '0644', - owner => root, - group => root, - source => 'puppet:///modules/kaminario/kaminario_iscsi.py'} - -file { '/usr/lib/python2.7/dist-packages/cinder/exception.py': - mode => '0644', - owner => root, - group => root, - source => 'puppet:///modules/kaminario/exception.py'} +file { '/tmp/exception.sh': + source => 'puppet:///modules/kaminario/exception.sh', + recurse => true, + mode => '0744', + notify => Exec['modify_exception'], + } +exec { 'modify_exception': + command => '/tmp/exception.sh', + refreshonly => true, + } } diff --git a/deployment_scripts/puppet/modules/kaminario/manifests/init.pp b/deployment_scripts/puppet/modules/kaminario/manifests/init.pp index 23ac189..f7960c1 100644 --- a/deployment_scripts/puppet/modules/kaminario/manifests/init.pp +++ b/deployment_scripts/puppet/modules/kaminario/manifests/init.pp @@ -30,9 +30,9 @@ $plugin_settings = hiera('cinder_kaminario') num => $value } $minus1 = inline_template('<%= @value.to_i - 1 %>') - if "${minus1}" < '0' { - - } else { + if $minus1 < '0' { + + } else { recursion { "value-${minus1}": value => $minus1, } @@ -44,7 +44,7 @@ $plugin_settings = hiera('cinder_kaminario') define config($add_backend,$storage_protocol,$backend_name,$storage_user,$storage_password,$storage_ip,$num,$cinder_node,$enable_replication,$replication_ip,$replication_login,$replication_rpo,$replication_password,$enable_multipath,$suppress_logs,$filter_function,$oversubscription_ratio,$goodness_function) { $sec_name = section_name( $storage_ip , $backend_name ) - $config_file = "/etc/cinder/cinder.conf" + $config_file = '/etc/cinder/cinder.conf' if $cinder_node == hiera(user_node_name) { if $add_backend == true { @@ -56,58 +56,58 @@ define config($add_backend,$storage_protocol,$backend_name,$storage_user,$storag setting => 'enabled_backends', subsetting => $sec_name, subsetting_separator => ',', - }-> + }-> cinder_config { - "$sec_name/volume_backend_name" : value => $backend_name; - "$sec_name/san_ip" : value => $storage_ip; - "$sec_name/san_login" : value => $storage_user; - "$sec_name/san_password" : value => $storage_password; - } + "${sec_name}/volume_backend_name" : value => $backend_name; + "${sec_name}/san_ip" : value => $storage_ip; + "${sec_name}/san_login" : value => $storage_user; + "${sec_name}/san_password" : value => $storage_password; + } if $storage_protocol == 'FC'{ cinder_config { - "$sec_name/volume_driver" : value => "cinder.volume.drivers.kaminario.kaminario_fc.KaminarioFCDriver"; + "${sec_name}/volume_driver" : value => 'cinder.volume.drivers.kaminario.kaminario_fc.KaminarioFCDriver'; } } elsif $storage_protocol == 'ISCSI'{ cinder_config { - "$sec_name/volume_driver" : value => "cinder.volume.drivers.kaminario.kaminario_iscsi.KaminarioISCSIDriver"; + "${sec_name}/volume_driver" : value => 'cinder.volume.drivers.kaminario.kaminario_iscsi.KaminarioISCSIDriver'; } } if $enable_replication == true { $replication_device = get_replication_device($replication_ip, $replication_login , $replication_password , $replication_rpo) cinder_config { - "$sec_name/replication_device" : value => $replication_device; + "${sec_name}/replication_device" : value => $replication_device; } } - + if $enable_multipath == true { cinder_config { - "$sec_name/use_multipath_for_image_xfer" : value => "True"; - "$sec_name/enforce_multipath_for_image_xfer" : value => "True"; - } + "${sec_name}/use_multipath_for_image_xfer" : value => 'True'; + "${sec_name}/enforce_multipath_for_image_xfer" : value => 'True'; + } } if $suppress_logs == true { cinder_config { - "$sec_name/suppress_requests_ssl_warnings" : value => "True"; + "${sec_name}/suppress_requests_ssl_warnings" : value => 'True'; } } if $filter_function != '' { cinder_config { - "$sec_name/filter_function" : value => $filter_function; + "${sec_name}/filter_function" : value => $filter_function; } } if $goodness_function != '' { cinder_config { - "$sec_name/goodness_function" : value => $goodness_function; - } + "${sec_name}/goodness_function" : value => $goodness_function; } - + } + if $oversubscription_ratio == true { cinder_config { - "$sec_name/auto_calc_max_oversubscription_ratio" : value => "True"; + "${sec_name}/auto_calc_max_oversubscription_ratio" : value => 'True'; } } } diff --git a/deployment_scripts/puppet/modules/kaminario/manifests/krest.pp b/deployment_scripts/puppet/modules/kaminario/manifests/krest.pp index 6fcb046..cf4b18f 100644 --- a/deployment_scripts/puppet/modules/kaminario/manifests/krest.pp +++ b/deployment_scripts/puppet/modules/kaminario/manifests/krest.pp @@ -5,4 +5,6 @@ package { 'krest': ensure => installed, provider => pip, require => Package['python-pip'],} + package { 'git': + ensure => installed,} } diff --git a/deployment_scripts/puppet/modules/kaminario/manifests/type.pp b/deployment_scripts/puppet/modules/kaminario/manifests/type.pp index 43357f9..9c6c782 100644 --- a/deployment_scripts/puppet/modules/kaminario/manifests/type.pp +++ b/deployment_scripts/puppet/modules/kaminario/manifests/type.pp @@ -15,42 +15,42 @@ define recursion( type_name => $plugin_settings["type_name_${value}"] } $minus1 = inline_template('<%= @value.to_i - 1 %>') - if "${minus1}" < '0' { - - } else { + if $minus1 < '0' { + + } else { recursion { "value-${minus1}": value => $minus1, } - } + } } } define kaminario_type ($create_type,$options,$backend_name,$type_name) { if $create_type == true { case $options { - "enable_replication_type": { + 'enable_replication_type': { cinder_type {$type_name: ensure => present, properties => ["volume_backend_name=${backend_name}",'kaminario:replication=enabled'], } } - "enable_dedup": { + 'enable_dedup': { cinder_type {$type_name: ensure => present, properties => ["volume_backend_name=${backend_name}",'kaminario:thin_prov_type=nodedup'], } } - "replication_dedup": { + 'replication_dedup': { cinder_type {$type_name: ensure => present, properties => ["volume_backend_name=${backend_name}",'kaminario:thin_prov_type=nodedup','kaminario:replication=enabled'], } } - "default": { + 'default': { cinder_type {$type_name: ensure => present, properties => ["volume_backend_name=${backend_name}"], - } + } } } diff --git a/deployment_scripts/puppet/modules/multipath/manifests/init.pp b/deployment_scripts/puppet/modules/multipath/manifests/init.pp index ac0b41f..bda78a1 100644 --- a/deployment_scripts/puppet/modules/multipath/manifests/init.pp +++ b/deployment_scripts/puppet/modules/multipath/manifests/init.pp @@ -1,12 +1,21 @@ class multipath { + +include ::nova::params + $multipath_packages = [ 'sg3-utils', 'multipath-tools' ] package { $multipath_packages: ensure => 'installed' } nova_config { -'libvirt/iscsi_use_multipath' : value => True, -}~> Exec[cinder_volume] - -exec {'cinder_volume': - command => '/usr/sbin/service nova-compute restart',} - + 'libvirt/iscsi_use_multipath' : value => True, +} + +service { 'nova_compute': + ensure => running, + name => $::nova::params::compute_service_name, + enable => true, + hasstatus => true, + hasrestart => true, +} + +Nova_config<||> ~> Service['nova-compute'] } diff --git a/deployment_tasks.yaml b/deployment_tasks.yaml index 803ea95..49eafc7 100644 --- a/deployment_tasks.yaml +++ b/deployment_tasks.yaml @@ -41,7 +41,7 @@ type: puppet version: 2.1.0 groups: [compute] - requires: [top-role-compute] + requires: [top-role-compute,enable_nova_compute_service] required_for: [deploy_end] parameters: puppet_manifest: puppet/manifests/cinder_multipath.pp