Update fuel plugin repo for Kamianrio with latest

Kaminario driver files from github

Change-Id: Ib16114525eed6066c19dfc4b2b28f5e2128eb56f
Co-Authored-By: Chaithanya Kopparthi<chaithanyak@biarca.com>
This commit is contained in:
Pradip Rawat 2016-10-16 11:36:29 +05:30
parent be18f78d2d
commit 9550a4d0e6
15 changed files with 90 additions and 2693 deletions

4
.gitreview Normal file
View File

@ -0,0 +1,4 @@
[gerrit]
host=review.openstack.org
port=29418
project=openstack/fuel-plugin-cinder-kaminario.git

View File

@ -1,8 +1,8 @@
notice('MODULAR: cinder_kaminario')
class { 'kaminario::driver': }->
class { 'kaminario::krest': }->
class { 'kaminario::driver': }->
class { 'kaminario::config': }~> Exec[cinder_volume]
exec {'cinder_volume':

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,2 @@
grep -q -F 'Kaminario' /usr/lib/python2.7/dist-packages/cinder/exception.py || sudo sed -i '$a \ \ \n\nclass KaminarioCinderDriverException(VolumeDriverException):\n\ \message = _("KaminarioCinderDriver failure: %(reason)s")\n\n\nclass KaminarioRetryableException(VolumeDriverException):\n\ \message = _("Kaminario retryable exception: %(reason)s")' /usr/lib/python2.7/dist-packages/cinder/exception.py

View File

@ -1,196 +0,0 @@
# Copyright (c) 2016 by Kaminario Technologies, Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Volume driver for Kaminario K2 all-flash arrays."""
import six
from oslo_log import log as logging
from cinder import exception
from cinder import utils
from cinder.i18n import _, _LE
from cinder.objects import fields
from cinder.volume.drivers.kaminario import kaminario_common as common
from cinder.zonemanager import utils as fczm_utils
K2_REP_FAILED_OVER = fields.ReplicationStatus.FAILED_OVER
LOG = logging.getLogger(__name__)
kaminario_logger = common.kaminario_logger
class KaminarioFCDriver(common.KaminarioCinderDriver):
"""Kaminario K2 FC Volume Driver.
Version history:
1.0.2.0 - Initial driver
"""
VERSION = '1.0.2.0'
# ThirdPartySystems wiki page name
CI_WIKI_NAME = "Kaminario_K2_CI"
@kaminario_logger
def __init__(self, *args, **kwargs):
super(KaminarioFCDriver, self).__init__(*args, **kwargs)
self._protocol = 'FC'
self.lookup_service = fczm_utils.create_lookup_service()
@fczm_utils.AddFCZone
@kaminario_logger
@utils.synchronized(common.K2_LOCK_NAME, external=True)
def initialize_connection(self, volume, connector):
"""Attach K2 volume to host."""
# Check wwpns in host connector.
if not connector.get('wwpns'):
msg = _("No wwpns found in host connector.")
LOG.error(msg)
raise exception.KaminarioCinderDriverException(reason=msg)
# To support replication failback
temp_client = None
if (hasattr(volume, 'replication_status') and
volume.replication_status == K2_REP_FAILED_OVER):
temp_client = self.client
self.client = self.target
# Get target wwpns.
target_wwpns = self.get_target_info(volume)
# Map volume.
lun = self.k2_initialize_connection(volume, connector)
# Create initiator-target mapping.
target_wwpns, init_target_map = self._build_initiator_target_map(
connector, target_wwpns)
# To support replication failback
if temp_client:
self.client = temp_client
# Return target volume information.
return {'driver_volume_type': 'fibre_channel',
'data': {"target_discovered": True,
"target_lun": lun,
"target_wwn": target_wwpns,
"initiator_target_map": init_target_map}}
@fczm_utils.RemoveFCZone
@kaminario_logger
@utils.synchronized(common.K2_LOCK_NAME, external=True)
def terminate_connection(self, volume, connector, **kwargs):
# To support replication failback
temp_client = None
if (hasattr(volume, 'replication_status') and
volume.replication_status == K2_REP_FAILED_OVER):
temp_client = self.client
self.client = self.target
super(KaminarioFCDriver, self).terminate_connection(volume, connector)
properties = {"driver_volume_type": "fibre_channel", "data": {}}
host_name = self.get_initiator_host_name(connector)
host_rs = self.client.search("hosts", name=host_name)
# In terminate_connection, host_entry is deleted if host
# is not attached to any volume
if host_rs.total == 0:
# Get target wwpns.
target_wwpns = self.get_target_info(volume)
target_wwpns, init_target_map = self._build_initiator_target_map(
connector, target_wwpns)
properties["data"] = {"target_wwn": target_wwpns,
"initiator_target_map": init_target_map}
# To support replication failback
if temp_client:
self.client = temp_client
return properties
@kaminario_logger
def get_target_info(self, volume):
LOG.debug("Searching target wwpns in K2.")
fc_ports_rs = self.client.search("system/fc_ports")
target_wwpns = []
if hasattr(fc_ports_rs, 'hits') and fc_ports_rs.total != 0:
for port in fc_ports_rs.hits:
if port.pwwn:
target_wwpns.append((port.pwwn).replace(':', ''))
if not target_wwpns:
msg = _("Unable to get FC target wwpns from K2.")
LOG.error(msg)
raise exception.KaminarioCinderDriverException(reason=msg)
return target_wwpns
@kaminario_logger
def _get_host_object(self, connector):
host_name = self.get_initiator_host_name(connector)
LOG.debug("Searching initiator hostname: %s in K2.", host_name)
host_rs = self.client.search("hosts", name=host_name)
host_wwpns = connector['wwpns']
if host_rs.total == 0:
try:
LOG.debug("Creating initiator hostname: %s in K2.", host_name)
host = self.client.new("hosts", name=host_name,
type="Linux").save()
except Exception as ex:
LOG.exception(_LE("Unable to create host : %s in K2."),
host_name)
raise exception.KaminarioCinderDriverException(
reason=six.text_type(ex.message))
else:
# Use existing host.
LOG.debug("Use existing initiator hostname: %s in K2.", host_name)
host = host_rs.hits[0]
# Adding host wwpn.
for wwpn in host_wwpns:
wwpn = ":".join([wwpn[i:i + 2] for i in range(0, len(wwpn), 2)])
if self.client.search("host_fc_ports", pwwn=wwpn,
host=host).total == 0:
LOG.debug("Adding wwpn: %(wwpn)s to host: "
"%(host)s in K2.", {'wwpn': wwpn,
'host': host_name})
try:
self.client.new("host_fc_ports", pwwn=wwpn,
host=host).save()
except Exception as ex:
if host_rs.total == 0:
self._delete_host_by_name(host_name)
LOG.exception(_LE("Unable to add wwpn : %(wwpn)s to "
"host: %(host)s in K2."),
{'wwpn': wwpn, 'host': host_name})
raise exception.KaminarioCinderDriverException(
reason=six.text_type(ex.message))
return host, host_rs, host_name
@kaminario_logger
def _build_initiator_target_map(self, connector, all_target_wwns):
"""Build the target_wwns and the initiator target map."""
target_wwns = []
init_targ_map = {}
if self.lookup_service is not None:
# use FC san lookup.
dev_map = self.lookup_service.get_device_mapping_from_network(
connector.get('wwpns'),
all_target_wwns)
for fabric_name in dev_map:
fabric = dev_map[fabric_name]
target_wwns += fabric['target_port_wwn_list']
for initiator in fabric['initiator_port_wwn_list']:
if initiator not in init_targ_map:
init_targ_map[initiator] = []
init_targ_map[initiator] += fabric['target_port_wwn_list']
init_targ_map[initiator] = list(set(
init_targ_map[initiator]))
target_wwns = list(set(target_wwns))
else:
initiator_wwns = connector.get('wwpns', [])
target_wwns = all_target_wwns
for initiator in initiator_wwns:
init_targ_map[initiator] = target_wwns
return target_wwns, init_targ_map

View File

@ -1,137 +0,0 @@
# Copyright (c) 2016 by Kaminario Technologies, Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Volume driver for Kaminario K2 all-flash arrays."""
import six
from oslo_log import log as logging
from cinder import exception
from cinder import utils
from cinder.i18n import _, _LE
from cinder.objects import fields
from cinder.volume.drivers.kaminario import kaminario_common as common
ISCSI_TCP_PORT = "3260"
K2_REP_FAILED_OVER = fields.ReplicationStatus.FAILED_OVER
LOG = logging.getLogger(__name__)
kaminario_logger = common.kaminario_logger
class KaminarioISCSIDriver(common.KaminarioCinderDriver):
"""Kaminario K2 iSCSI Volume Driver.
Version history:
1.0.2.0 - Initial driver
"""
VERSION = '1.0.2.0'
# ThirdPartySystems wiki page name
CI_WIKI_NAME = "Kaminario_K2_CI"
@kaminario_logger
def __init__(self, *args, **kwargs):
super(KaminarioISCSIDriver, self).__init__(*args, **kwargs)
self._protocol = 'iSCSI'
@kaminario_logger
@utils.synchronized(common.K2_LOCK_NAME, external=True)
def initialize_connection(self, volume, connector):
"""Attach K2 volume to host."""
# To support replication failback
temp_client = None
if (hasattr(volume, 'replication_status') and
volume.replication_status == K2_REP_FAILED_OVER):
temp_client = self.client
self.client = self.target
# Get target_portal and target iqn.
iscsi_portal, target_iqn = self.get_target_info(volume)
# Map volume.
lun = self.k2_initialize_connection(volume, connector)
# To support replication failback
if temp_client:
self.client = temp_client
# Return target volume information.
return {"driver_volume_type": "iscsi",
"data": {"target_iqn": target_iqn,
"target_portal": iscsi_portal,
"target_lun": lun,
"target_discovered": True}}
@kaminario_logger
@utils.synchronized(common.K2_LOCK_NAME, external=True)
def terminate_connection(self, volume, connector, **kwargs):
# To support replication failback
temp_client = None
if (hasattr(volume, 'replication_status') and
volume.replication_status == K2_REP_FAILED_OVER):
temp_client = self.client
self.client = self.target
super(KaminarioISCSIDriver, self).terminate_connection(volume,
connector)
# To support replication failback
if temp_client:
self.client = temp_client
@kaminario_logger
def get_target_info(self, volume):
LOG.debug("Searching first iscsi port ip without wan in K2.")
iscsi_ip_rs = self.client.search("system/net_ips", wan_port="")
iscsi_ip = target_iqn = None
if hasattr(iscsi_ip_rs, 'hits') and iscsi_ip_rs.total != 0:
iscsi_ip = iscsi_ip_rs.hits[0].ip_address
if not iscsi_ip:
msg = _("Unable to get ISCSI IP address from K2.")
LOG.error(msg)
raise exception.KaminarioCinderDriverException(reason=msg)
iscsi_portal = "{0}:{1}".format(iscsi_ip, ISCSI_TCP_PORT)
LOG.debug("Searching system state for target iqn in K2.")
sys_state_rs = self.client.search("system/state")
if hasattr(sys_state_rs, 'hits') and sys_state_rs.total != 0:
target_iqn = sys_state_rs.hits[0].iscsi_qualified_target_name
if not target_iqn:
msg = _("Unable to get target iqn from K2.")
LOG.error(msg)
raise exception.KaminarioCinderDriverException(reason=msg)
return iscsi_portal, target_iqn
@kaminario_logger
def _get_host_object(self, connector):
host_name = self.get_initiator_host_name(connector)
LOG.debug("Searching initiator hostname: %s in K2.", host_name)
host_rs = self.client.search("hosts", name=host_name)
"""Create a host if not exists."""
if host_rs.total == 0:
try:
LOG.debug("Creating initiator hostname: %s in K2.", host_name)
host = self.client.new("hosts", name=host_name,
type="Linux").save()
LOG.debug("Adding iqn: %(iqn)s to host: %(host)s in K2.",
{'iqn': connector['initiator'], 'host': host_name})
iqn = self.client.new("host_iqns", iqn=connector['initiator'],
host=host)
iqn.save()
except Exception as ex:
self._delete_host_by_name(host_name)
LOG.exception(_LE("Unable to create host: %s in K2."),
host_name)
raise exception.KaminarioCinderDriverException(
reason=six.text_type(ex.message))
else:
LOG.debug("Use existing initiator hostname: %s in K2.", host_name)
host = host_rs.hits[0]
return host, host_rs, host_name

View File

@ -5,7 +5,7 @@ $plugin_settings = hiera('cinder_kaminario')
if $plugin_settings['scheduler_default_filters'] != ''
{
ini_subsetting {"scheduler_default_filters":
ini_subsetting {'scheduler_default_filters':
ensure => present,
section => 'DEFAULT',
key_val_separator => '=',
@ -18,18 +18,18 @@ $plugin_settings = hiera('cinder_kaminario')
if $plugin_settings['scheduler_default_weighers'] != ''
{
cinder_config {
"DEFAULT/scheduler_default_weighers" : value => $plugin_settings['scheduler_default_weighers'];
'DEFAULT/scheduler_default_weighers' : value => $plugin_settings['scheduler_default_weighers'];
}
}
if $plugin_settings['rpc_response_timeout'] != ''
{
cinder_config {
"DEFAULT/rpc_response_timeout" : value => $plugin_settings['rpc_response_timeout'];
'DEFAULT/rpc_response_timeout' : value => $plugin_settings['rpc_response_timeout'];
}
}
cinder_config {
"DEFAULT/default_volume_type" : value => $default_volume_type
'DEFAULT/default_volume_type' : value => $default_volume_type
}~> Exec[cinder_api]
exec {'cinder_api':

View File

@ -1,39 +1,35 @@
class kaminario::driver{
file { '/usr/lib/python2.7/dist-packages/cinder/volume/drivers/kaminario':
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0755',}
$source_directory = '/tmp/openstack-cinder-driver/source/kaminario'
$target_directory = '/usr/lib/python2.7/dist-packages/cinder/volume/drivers/kaminario'
vcsrepo { '/tmp/openstack-cinder-driver':
ensure => present,
provider => git,
source => 'https://github.com/Kaminario/openstack-cinder-driver.git',
user => 'root',
revision => 'Mitaka',
}
file {$target_directory:
ensure => 'directory',
recurse => true,
source => "file:///${source_directory}",
}
file { '/usr/lib/python2.7/dist-packages/cinder/volume/drivers/kaminario/__init__.py':
mode => '0644',
owner => root,
group => root,
source => 'puppet:///modules/kaminario/__init__.py'}
file {'/usr/lib/python2.7/dist-packages/cinder/tests/unit/volume/drivers/':
ensure => 'file',
recurse => true,
source => 'file:///tmp/openstack-cinder-driver/test',
}
file { '/usr/lib/python2.7/dist-packages/cinder/volume/drivers/kaminario/kaminario_common.py':
mode => '0644',
owner => root,
group => root,
source => 'puppet:///modules/kaminario/kaminario_common.py'}
file { '/usr/lib/python2.7/dist-packages/cinder/volume/drivers/kaminario/kaminario_fc.py':
mode => '0644',
owner => root,
group => root,
source => 'puppet:///modules/kaminario/kaminario_fc.py'}
file { '/usr/lib/python2.7/dist-packages/cinder/volume/drivers/kaminario/kaminario_iscsi.py':
mode => '0644',
owner => root,
group => root,
source => 'puppet:///modules/kaminario/kaminario_iscsi.py'}
file { '/usr/lib/python2.7/dist-packages/cinder/exception.py':
mode => '0644',
owner => root,
group => root,
source => 'puppet:///modules/kaminario/exception.py'}
file { '/tmp/exception.sh':
source => 'puppet:///modules/kaminario/exception.sh',
recurse => true,
mode => '0744',
notify => Exec['modify_exception'],
}
exec { 'modify_exception':
command => '/tmp/exception.sh',
refreshonly => true,
}
}

View File

@ -30,9 +30,9 @@ $plugin_settings = hiera('cinder_kaminario')
num => $value
}
$minus1 = inline_template('<%= @value.to_i - 1 %>')
if "${minus1}" < '0' {
} else {
if $minus1 < '0' {
} else {
recursion { "value-${minus1}":
value => $minus1,
}
@ -44,7 +44,7 @@ $plugin_settings = hiera('cinder_kaminario')
define config($add_backend,$storage_protocol,$backend_name,$storage_user,$storage_password,$storage_ip,$num,$cinder_node,$enable_replication,$replication_ip,$replication_login,$replication_rpo,$replication_password,$enable_multipath,$suppress_logs,$filter_function,$oversubscription_ratio,$goodness_function) {
$sec_name = section_name( $storage_ip , $backend_name )
$config_file = "/etc/cinder/cinder.conf"
$config_file = '/etc/cinder/cinder.conf'
if $cinder_node == hiera(user_node_name) {
if $add_backend == true {
@ -56,58 +56,58 @@ define config($add_backend,$storage_protocol,$backend_name,$storage_user,$storag
setting => 'enabled_backends',
subsetting => $sec_name,
subsetting_separator => ',',
}->
}->
cinder_config {
"$sec_name/volume_backend_name" : value => $backend_name;
"$sec_name/san_ip" : value => $storage_ip;
"$sec_name/san_login" : value => $storage_user;
"$sec_name/san_password" : value => $storage_password;
}
"${sec_name}/volume_backend_name" : value => $backend_name;
"${sec_name}/san_ip" : value => $storage_ip;
"${sec_name}/san_login" : value => $storage_user;
"${sec_name}/san_password" : value => $storage_password;
}
if $storage_protocol == 'FC'{
cinder_config {
"$sec_name/volume_driver" : value => "cinder.volume.drivers.kaminario.kaminario_fc.KaminarioFCDriver";
"${sec_name}/volume_driver" : value => 'cinder.volume.drivers.kaminario.kaminario_fc.KaminarioFCDriver';
}
}
elsif $storage_protocol == 'ISCSI'{
cinder_config {
"$sec_name/volume_driver" : value => "cinder.volume.drivers.kaminario.kaminario_iscsi.KaminarioISCSIDriver";
"${sec_name}/volume_driver" : value => 'cinder.volume.drivers.kaminario.kaminario_iscsi.KaminarioISCSIDriver';
}
}
if $enable_replication == true {
$replication_device = get_replication_device($replication_ip, $replication_login , $replication_password , $replication_rpo)
cinder_config {
"$sec_name/replication_device" : value => $replication_device;
"${sec_name}/replication_device" : value => $replication_device;
}
}
if $enable_multipath == true {
cinder_config {
"$sec_name/use_multipath_for_image_xfer" : value => "True";
"$sec_name/enforce_multipath_for_image_xfer" : value => "True";
}
"${sec_name}/use_multipath_for_image_xfer" : value => 'True';
"${sec_name}/enforce_multipath_for_image_xfer" : value => 'True';
}
}
if $suppress_logs == true {
cinder_config {
"$sec_name/suppress_requests_ssl_warnings" : value => "True";
"${sec_name}/suppress_requests_ssl_warnings" : value => 'True';
}
}
if $filter_function != '' {
cinder_config {
"$sec_name/filter_function" : value => $filter_function;
"${sec_name}/filter_function" : value => $filter_function;
}
}
if $goodness_function != '' {
cinder_config {
"$sec_name/goodness_function" : value => $goodness_function;
}
"${sec_name}/goodness_function" : value => $goodness_function;
}
}
if $oversubscription_ratio == true {
cinder_config {
"$sec_name/auto_calc_max_oversubscription_ratio" : value => "True";
"${sec_name}/auto_calc_max_oversubscription_ratio" : value => 'True';
}
}
}

View File

@ -5,4 +5,6 @@ package { 'krest':
ensure => installed,
provider => pip,
require => Package['python-pip'],}
package { 'git':
ensure => installed,}
}

View File

@ -15,42 +15,42 @@ define recursion(
type_name => $plugin_settings["type_name_${value}"]
}
$minus1 = inline_template('<%= @value.to_i - 1 %>')
if "${minus1}" < '0' {
} else {
if $minus1 < '0' {
} else {
recursion { "value-${minus1}":
value => $minus1,
}
}
}
}
}
define kaminario_type ($create_type,$options,$backend_name,$type_name) {
if $create_type == true {
case $options {
"enable_replication_type": {
'enable_replication_type': {
cinder_type {$type_name:
ensure => present,
properties => ["volume_backend_name=${backend_name}",'kaminario:replication=enabled'],
}
}
"enable_dedup": {
'enable_dedup': {
cinder_type {$type_name:
ensure => present,
properties => ["volume_backend_name=${backend_name}",'kaminario:thin_prov_type=nodedup'],
}
}
"replication_dedup": {
'replication_dedup': {
cinder_type {$type_name:
ensure => present,
properties => ["volume_backend_name=${backend_name}",'kaminario:thin_prov_type=nodedup','kaminario:replication=enabled'],
}
}
"default": {
'default': {
cinder_type {$type_name:
ensure => present,
properties => ["volume_backend_name=${backend_name}"],
}
}
}
}

View File

@ -1,12 +1,21 @@
class multipath {
include ::nova::params
$multipath_packages = [ 'sg3-utils', 'multipath-tools' ]
package { $multipath_packages: ensure => 'installed' }
nova_config {
'libvirt/iscsi_use_multipath' : value => True,
}~> Exec[cinder_volume]
exec {'cinder_volume':
command => '/usr/sbin/service nova-compute restart',}
'libvirt/iscsi_use_multipath' : value => True,
}
service { 'nova_compute':
ensure => running,
name => $::nova::params::compute_service_name,
enable => true,
hasstatus => true,
hasrestart => true,
}
Nova_config<||> ~> Service['nova-compute']
}

View File

@ -41,7 +41,7 @@
type: puppet
version: 2.1.0
groups: [compute]
requires: [top-role-compute]
requires: [top-role-compute,enable_nova_compute_service]
required_for: [deploy_end]
parameters:
puppet_manifest: puppet/manifests/cinder_multipath.pp