fixed syntax by puppet-lint

Change-Id: I739186518398928f47fa63b4c687dca8f19e02b9
This commit is contained in:
Andrey Pavlov 2016-08-05 16:44:55 +03:00
parent a9e89dec39
commit f2138fe7dd
14 changed files with 233 additions and 233 deletions

View File

@ -16,14 +16,14 @@ if $scaleio['metadata']['enabled'] {
true => $scaleio['gateway_ip'],
default => hiera('management_vip')
}
class {'scaleio_openstack::cinder':
ensure => present,
gateway_user => $::gateway_user,
gateway_password => $scaleio['password'],
gateway_ip => $gateway_ip,
gateway_port => $::gateway_port,
protection_domains => $scaleio['protection_domain'],
storage_pools => $::storage_pools,
provisioning_type => $provisioning_type,
class {'::scaleio_openstack::cinder':
ensure => present,
gateway_user => $::gateway_user,
gateway_password => $scaleio['password'],
gateway_ip => $gateway_ip,
gateway_port => $::gateway_port,
protection_domains => $scaleio['protection_domain'],
storage_pools => $::storage_pools,
provisioning_type => $provisioning_type,
}
}

View File

@ -6,12 +6,12 @@ define mdm_standby() {
$ip = $title
notify {"Configure Standby MDM ${ip}": } ->
scaleio::mdm {"Standby MDM ${ip}":
ensure => 'present',
ensure_properties => 'present',
sio_name => $ip,
role => 'manager',
ips => $ip,
management_ips => $ip,
ensure => 'present',
ensure_properties => 'present',
sio_name => $ip,
role => 'manager',
ips => $ip,
management_ips => $ip,
}
}
@ -19,12 +19,12 @@ define mdm_tb() {
$ip = $title
notify {"Configure Tie-Breaker MDM ${ip}": } ->
scaleio::mdm {"Tie-Breaker MDM ${ip}":
ensure => 'present',
ensure_properties => 'present',
sio_name => $ip,
role => 'tb',
ips => $ip,
management_ips => undef,
ensure => 'present',
ensure_properties => 'present',
sio_name => $ip,
role => 'tb',
ips => $ip,
management_ips => undef,
}
}
@ -37,7 +37,7 @@ define storage_pool_ensure(
$rmcache_passthrough_pools,
$rmcache_cached_pools,
) {
$parsed_pool_name = split($title, ":")
$parsed_pool_name = split($title, ':')
$protection_domain = $parsed_pool_name[0]
$sp_name = $parsed_pool_name[1]
if $::scaleio_storage_pools and $::scaleio_storage_pools != '' {
@ -77,7 +77,7 @@ define storage_pool_ensure(
}
} else {
notify {"Skip storage pool ${sp_name} because it is already exists in ${::scaleio_storage_pools}": }
}
}
}
define protection_domain_ensure(
@ -116,7 +116,7 @@ define sds_ensure(
) {
$sds_name = $title
$protection_domain = $sds_to_pd_map[$sds_name]
$sds_node_ = filter_nodes($sds_nodes, 'name', $sds_name)
$sds_node_ = filter_nodes($sds_nodes, 'name', $sds_name)
$sds_node = $sds_node_[0]
#ips for data path traffic
$storage_ips = $sds_node['storage_address']
@ -128,7 +128,7 @@ define sds_ensure(
fail("TODO: behaviour changed: address becomes comma-separated list ${storage_ips} or ${mgmt_ips}, so it is needed to add the generation of ip roles")
}
if $mgmt_ips == $storage_ips {
$sds_ips = "${storage_ips}"
$sds_ips = $storage_ips
$sds_ip_roles = 'all'
}
else {
@ -166,30 +166,30 @@ define sds_ensure(
}
notify { "sds ${sds_name}: pools:devices:rfcache: '${sds_pools}': '${sds_device}': '${sds_rfcache_devices}'": } ->
scaleio::sds {$sds_name:
ensure => 'present',
sio_name => $sds_name,
protection_domain => $protection_domain,
ips => $sds_ips,
ip_roles => $sds_ip_roles,
storage_pools => $sds_pools,
device_paths => $sds_device,
rfcache_devices => $sds_rfcache_devices,
ensure => 'present',
sio_name => $sds_name,
protection_domain => $protection_domain,
ips => $sds_ips,
ip_roles => $sds_ip_roles,
storage_pools => $sds_pools,
device_paths => $sds_device,
rfcache_devices => $sds_rfcache_devices,
}
}
define cleanup_sdc () {
$sdc_ip = $title
scaleio::sdc {"Remove SDC ${sdc_ip}":
ensure => 'absent',
ip => $sdc_ip,
ensure => 'absent',
ip => $sdc_ip,
}
}
define cleanup_sds () {
$sds_name = $title
scaleio::sds {"Remove SDS ${sds_name}":
ensure => 'absent',
sio_name => $sds_name,
ensure => 'absent',
sio_name => $sds_name,
}
}
@ -205,11 +205,11 @@ if $scaleio['metadata']['enabled'] {
$use_plugin_roles = $scaleio['enable_sds_role']
if ! $use_plugin_roles {
$storage_nodes = filter_nodes($all_nodes, 'role', 'compute')
if $scaleio['sds_on_controller'] {
$controller_nodes = filter_nodes($all_nodes, 'role', 'controller')
if $scaleio['sds_on_controller'] {
$controller_nodes = filter_nodes($all_nodes, 'role', 'controller')
$pr_controller_nodes = filter_nodes($all_nodes, 'role', 'primary-controller')
$sds_nodes = concat(concat($pr_controller_nodes, $controller_nodes), $storage_nodes)
} else {
} else {
$sds_nodes = $storage_nodes
}
} else {
@ -217,7 +217,7 @@ if $scaleio['metadata']['enabled'] {
}
$sds_nodes_names = keys(nodes_to_hash($sds_nodes, 'name', 'internal_address'))
$sds_nodes_count = count($sds_nodes_names)
$sdc_nodes =concat(filter_nodes($all_nodes, 'role', 'compute'), filter_nodes($all_nodes, 'role', 'cinder'))
$sdc_nodes = concat(filter_nodes($all_nodes, 'role', 'compute'), filter_nodes($all_nodes, 'role', 'cinder'))
$sdc_nodes_ips = values(nodes_to_hash($sdc_nodes, 'name', 'internal_address'))
$mdm_ip_array = split($::managers_ips, ',')
$tb_ip_array = split($::tb_ips, ',')
@ -235,13 +235,13 @@ if $scaleio['metadata']['enabled'] {
$standby_ips = delete($mdm_ip_array, $mdm_ip_array[0])
if $mdm_count < 3 or $tb_count == 1 {
$cluster_mode = 3
$slave_names = join(values_at($standby_ips, "0-0"), ',')
$tb_names = join(values_at($tb_ip_array, "0-0"), ',')
$slave_names = join(values_at($standby_ips, '0-0'), ',')
$tb_names = join(values_at($tb_ip_array, '0-0'), ',')
} else {
$cluster_mode = 5
# incase of switch 3 to 5 nodes add only standby mdm/tb
$to_add_slaves = difference(values_at($standby_ips, "0-1"), intersection(values_at($standby_ips, "0-1"), split($::scaleio_mdm_ips, ',')))
$to_add_tb = difference(values_at($tb_ip_array, "0-1"), intersection(values_at($tb_ip_array, "0-1"), split($::scaleio_tb_ips, ',')))
$to_add_slaves = difference(values_at($standby_ips, '0-1'), intersection(values_at($standby_ips, '0-1'), split($::scaleio_mdm_ips, ',')))
$to_add_tb = difference(values_at($tb_ip_array, '0-1'), intersection(values_at($tb_ip_array, '0-1'), split($::scaleio_tb_ips, ',')))
$slave_names = join($to_add_slaves, ',')
$tb_names = join($to_add_tb, ',')
}
@ -250,7 +250,7 @@ if $scaleio['metadata']['enabled'] {
# parse config from centralized DB if exists
if $::scaleio_sds_config and $::scaleio_sds_config != '' {
$sds_devices_config = parsejson($::scaleio_sds_config)
}
}
else {
$sds_devices_config = undef
}
@ -262,7 +262,7 @@ if $scaleio['metadata']['enabled'] {
}
if $scaleio['storage_pools'] and $scaleio['storage_pools'] != '' {
# if storage pools come from settings remove probable trailing commas
$pools_array = split($scaleio['storage_pools'], ',')
$pools_array = split($scaleio['storage_pools'], ',')
$pools = join($pools_array, ',')
} else {
$pools_array = get_pools_from_sds_config($sds_devices_config)
@ -365,11 +365,11 @@ if $scaleio['metadata']['enabled'] {
} ->
mdm_tb{$tb_ip_array:} ->
scaleio::cluster {'Configure cluster mode':
ensure => 'present',
cluster_mode => $cluster_mode,
slave_names => $slave_names,
tb_names => $tb_names,
require => Scaleio::Login['Normal'],
ensure => 'present',
cluster_mode => $cluster_mode,
slave_names => $slave_names,
tb_names => $tb_names,
require => Scaleio::Login['Normal'],
}
}
protection_domain_ensure {$protection_domain_array:
@ -384,13 +384,13 @@ if $scaleio['metadata']['enabled'] {
require => Scaleio::Login['Normal'],
} ->
sds_ensure {$to_add_sds_names:
sds_nodes => $sds_nodes,
sds_to_pd_map => $sds_to_pd_map,
storage_pools => $pools,
device_paths => $paths,
rfcache_devices => $rfcache_devices,
sds_devices_config => $sds_devices_config,
require => Protection_domain_ensure[$protection_domain_array],
sds_nodes => $sds_nodes,
sds_to_pd_map => $sds_to_pd_map,
storage_pools => $pools,
device_paths => $paths,
rfcache_devices => $rfcache_devices,
sds_devices_config => $sds_devices_config,
require => Protection_domain_ensure[$protection_domain_array],
}
if $capacity_high_alert_threshold and $capacity_critical_alert_threshold {
scaleio::cluster {'Configure alerts':
@ -412,19 +412,19 @@ if $scaleio['metadata']['enabled'] {
notify {"Not Master MDM IP ${master_mdm}": }
}
file_line {'SCALEIO_mdm_ips':
ensure => present,
path => '/etc/environment',
match => "^SCALEIO_mdm_ips=",
line => "SCALEIO_mdm_ips=${::managers_ips}",
ensure => present,
path => '/etc/environment',
match => '^SCALEIO_mdm_ips=',
line => "SCALEIO_mdm_ips=${::managers_ips}",
} ->
# forbid requesting sdc/sds from discovery facters,
# this is a workaround of the ScaleIO problem -
# these requests hangs in some reason if cluster is in degraded state
file_line {'SCALEIO_discovery_allowed':
ensure => present,
path => '/etc/environment',
match => "^SCALEIO_discovery_allowed=",
line => "SCALEIO_discovery_allowed=no",
ensure => present,
path => '/etc/environment',
match => '^SCALEIO_discovery_allowed=',
line => 'SCALEIO_discovery_allowed=no',
}
} else {

View File

@ -12,23 +12,23 @@ if $scaleio['metadata']['enabled'] {
$discovered_tbs_ips = join($current_tbs, ',')
notify {"ScaleIO cluster: discovery: discovered_managers_ips='${discovered_managers_ips}', discovered_tbs_ips='${discovered_tbs_ips}'": } ->
file_line {'SCALEIO_mdm_ips':
ensure => present,
path => '/etc/environment',
match => "^SCALEIO_mdm_ips=",
line => "SCALEIO_mdm_ips=${discovered_mdms_ips}",
ensure => present,
path => '/etc/environment',
match => '^SCALEIO_mdm_ips=',
line => "SCALEIO_mdm_ips=${discovered_mdms_ips}",
} ->
file_line {'SCALEIO_managers_ips':
ensure => present,
path => '/etc/environment',
match => "^SCALEIO_managers_ips=",
line => "SCALEIO_managers_ips=${discovered_managers_ips}",
ensure => present,
path => '/etc/environment',
match => '^SCALEIO_managers_ips=',
line => "SCALEIO_managers_ips=${discovered_managers_ips}",
} ->
file_line {'SCALEIO_tb_ips':
ensure => present,
path => '/etc/environment',
match => "^SCALEIO_tb_ips=",
line => "SCALEIO_tb_ips=${discovered_tbs_ips}",
}
ensure => present,
path => '/etc/environment',
match => '^SCALEIO_tb_ips=',
line => "SCALEIO_tb_ips=${discovered_tbs_ips}",
}
} else {
notify{'Skip configuring cluster because of using existing cluster': }
}

View File

@ -3,11 +3,11 @@
define env_fact($role, $fact, $value) {
file_line { "Append a SCALEIO_${role}_${fact} line to /etc/environment":
ensure => present,
path => '/etc/environment',
match => "^SCALEIO_${role}_${fact}=",
line => "SCALEIO_${role}_${fact}=${value}",
}
ensure => present,
path => '/etc/environment',
match => "^SCALEIO_${role}_${fact}=",
line => "SCALEIO_${role}_${fact}=${value}",
}
}
$scaleio = hiera('scaleio')
@ -19,10 +19,10 @@ if $scaleio['metadata']['enabled'] {
# can be found for example in astute docker container during deloyment) should be set to high values.
# It'll be invoked only if /tmp/scaleio_debug file exists on particular node and you can use
# "touch /tmp/go" when you're ready to resume.
exec { "Wait on debug interrupt: use touch /tmp/go to resume":
exec { 'Wait on debug interrupt: use touch /tmp/go to resume':
command => "bash -c 'while [ ! -f /tmp/go ]; do :; done'",
path => [ '/bin/' ],
onlyif => "ls /tmp/scaleio_debug",
path => [ '/bin/' ],
onlyif => 'ls /tmp/scaleio_debug',
}
case $::osfamily {
'RedHat': {
@ -43,32 +43,32 @@ if $scaleio['metadata']['enabled'] {
# Existing ScaleIO cluster attaching
notify{'Use existing ScaleIO cluster': }
env_fact{"Environment fact: role gateway, ips: ${scaleio['gateway_ip']}":
role => 'gateway',
fact => 'ips',
role => 'gateway',
fact => 'ips',
value => $scaleio['gateway_ip']
} ->
env_fact{"Environment fact: role gateway, user: ${scaleio['gateway_user']}":
role => 'gateway',
fact => 'user',
role => 'gateway',
fact => 'user',
value => $scaleio['gateway_user']
} ->
env_fact{"Environment fact: role gateway, password: ${scaleio['password']}":
role => 'gateway',
fact => 'password',
role => 'gateway',
fact => 'password',
value => $scaleio['password']
} ->
env_fact{"Environment fact: role gateway, port: ${scaleio['gateway_port']}":
role => 'gateway',
fact => 'port',
role => 'gateway',
fact => 'port',
value => $scaleio['gateway_port']
} ->
env_fact{"Environment fact: role storage, pools: ${scaleio['existing_storage_pools']}":
role => 'storage',
fact => 'pools',
role => 'storage',
fact => 'pools',
value => $scaleio['existing_storage_pools']
}
# mdm_ips are requested from gateways in separate manifest because no way to pass args to facter
}
}
else {
# New ScaleIO cluster deployment
notify{'Deploy ScaleIO cluster': }
@ -86,7 +86,7 @@ if $scaleio['metadata']['enabled'] {
if ! $use_plugin_roles {
$controller_sds_count = $scaleio['sds_on_controller'] ? {
true => count($controller_ips_array),
default => 0
default => 0
}
$total_sds_count = count(filter_nodes($all_nodes, 'role', 'compute')) + $controller_sds_count
if $total_sds_count < 3 {
@ -94,7 +94,7 @@ if $scaleio['metadata']['enabled'] {
}
} else {
$tier1_sds_count = count(filter_nodes($all_nodes, 'role', 'scaleio-storage-tier1'))
$tier2_sds_count = count(filter_nodes($all_nodes, 'role', 'scaleio-storage-tier2'))
$tier2_sds_count = count(filter_nodes($all_nodes, 'role', 'scaleio-storage-tier2'))
if $tier1_sds_count != 0 and $tier1_sds_count < 3 {
$sds_check_msg = 'There are less than 3 nodes with Scaleio Storage Tier1 role.'
}
@ -103,11 +103,11 @@ if $scaleio['metadata']['enabled'] {
}
}
if $sds_check_msg {
if ! $scaleio['skip_checks'] {
if ! $scaleio['skip_checks'] {
fail($sds_check_msg)
} else{
warning($sds_check_msg)
}
}
}
$nodes = filter_nodes($all_nodes, 'name', $::hostname)
if ! empty(concat(filter_nodes($nodes, 'role', 'controller'), filter_nodes($nodes, 'role', 'primary-controller'))) {
@ -121,51 +121,51 @@ if $scaleio['metadata']['enabled'] {
}
if $::sds_storage_small_devices {
if ! $scaleio['skip_checks'] {
fail("Storage devices minimal size is 100GB. The following devices do not meet this requirement ${::sds_storage_small_devices}")
fail("Storage devices minimal size is 100GB. The following devices do not meet this requirement ${::sds_storage_small_devices}")
} else {
warning("Storage devices minimal size is 100GB. The following devices do not meet this requirement ${::sds_storage_small_devices}")
warning("Storage devices minimal size is 100GB. The following devices do not meet this requirement ${::sds_storage_small_devices}")
}
}
# mdm ips and tb ips must be emtpy to avoid queries from ScaleIO about SDC/SDS,
# the next task (cluster discovering) will set them into correct values.
env_fact{'Environment fact: mdm ips':
role => 'mdm',
fact => 'ips',
role => 'mdm',
fact => 'ips',
value => ''
} ->
env_fact{'Environment fact: managers ips':
role => 'managers',
fact => 'ips',
role => 'managers',
fact => 'ips',
value => ''
} ->
env_fact{'Environment fact: tb ips':
role => 'tb',
fact => 'ips',
role => 'tb',
fact => 'ips',
value => ''
} ->
env_fact{'Environment fact: gateway ips':
role => 'gateway',
fact => 'ips',
role => 'gateway',
fact => 'ips',
value => $ctrl_ips
} ->
env_fact{'Environment fact: controller ips':
role => 'controller',
fact => 'ips',
role => 'controller',
fact => 'ips',
value => $ctrl_ips
} ->
env_fact{'Environment fact: role gateway, user: admin':
role => 'gateway',
fact => 'user',
role => 'gateway',
fact => 'user',
value => 'admin'
} ->
env_fact{'Environment fact: role gateway, port: 4443':
role => 'gateway',
fact => 'port',
role => 'gateway',
fact => 'port',
value => 4443
} ->
env_fact{"Environment fact: role storage, pools: ${scaleio['storage_pools']}":
role => 'storage',
fact => 'pools',
role => 'storage',
fact => 'pools',
value => $scaleio['storage_pools']
}
}

View File

@ -3,11 +3,11 @@
#TODO: move it from this file and from environment.pp into modules
define env_fact($role, $fact, $value) {
file_line { "Append a SCALEIO_${role}_${fact} line to /etc/environment":
ensure => present,
path => '/etc/environment',
match => "^SCALEIO_${role}_${fact}=",
line => "SCALEIO_${role}_${fact}=${value}",
}
ensure => present,
path => '/etc/environment',
match => "^SCALEIO_${role}_${fact}=",
line => "SCALEIO_${role}_${fact}=${value}",
}
}
$scaleio = hiera('scaleio')
@ -18,8 +18,8 @@ if $scaleio['metadata']['enabled'] {
fail('Cannot request MDM IPs from existing cluster. Check Gateway address/port and user name with password.')
}
env_fact{"Environment fact: role mdm, ips from existing cluster ${ips}":
role => 'controller',
fact => 'ips',
role => 'controller',
fact => 'ips',
value => $ips
}
}

View File

@ -12,29 +12,29 @@ if $scaleio['metadata']['enabled'] {
}
Haproxy::Service { use_include => true }
Haproxy::Balancermember { use_include => true }
class {'scaleio::gateway_server':
class {'::scaleio::gateway_server':
ensure => 'present',
mdm_ips => $::managers_ips,
password => $scaleio['password'],
} ->
notify { "Configure Haproxy for Gateway nodes: ${gw_ips}": } ->
openstack::ha::haproxy_service { 'scaleio-gateway':
order => 201,
server_names => $gw_ips,
ipaddresses => $gw_ips,
listen_port => $::gateway_port,
public_virtual_ip => hiera('public_vip'),
internal_virtual_ip => hiera('management_vip'),
define_backups => true,
public => true,
haproxy_config_options => $haproxy_config_options,
balancermember_options => 'check inter 10s fastinter 2s downinter 3s rise 3 fall 3',
}
order => 201,
server_names => $gw_ips,
ipaddresses => $gw_ips,
listen_port => $::gateway_port,
public_virtual_ip => hiera('public_vip'),
internal_virtual_ip => hiera('management_vip'),
define_backups => true,
public => true,
haproxy_config_options => $haproxy_config_options,
balancermember_options => 'check inter 10s fastinter 2s downinter 3s rise 3 fall 3',
}
} else {
fail('Empty MDM IPs configuration')
}
}
} else {
notify{'Skip deploying gateway server because of using existing cluster': }
}
}

View File

@ -4,10 +4,10 @@ $scaleio = hiera('scaleio')
if $scaleio['metadata']['enabled'] {
if ! $scaleio['existing_cluster'] {
$node_ips = split($::ip_address_array, ',')
if ! empty(intersection(split($::controller_ips, ','), $node_ips))
if ! empty(intersection(split($::controller_ips, ','), $node_ips))
{
notify {"Mdm server installation": } ->
class {'scaleio::mdm_server':
notify {'Mdm server installation': } ->
class {'::scaleio::mdm_server':
ensure => 'present',
}
} else {

View File

@ -31,11 +31,11 @@ if $scaleio['metadata']['enabled'] {
}
$password = $scaleio['password']
notify {"Controller server is_manager=${is_manager} master_mdm_name=${master_mdm_name} master_ip=${master_ip}": } ->
class {'scaleio::mdm_server':
ensure => 'present',
is_manager => $is_manager,
master_mdm_name => $master_mdm_name,
mdm_ips => $master_ip,
class {'::scaleio::mdm_server':
ensure => 'present',
is_manager => $is_manager,
master_mdm_name => $master_mdm_name,
mdm_ips => $master_ip,
}
if $old_password != $password {
if $master_mdm_name {
@ -44,16 +44,16 @@ if $scaleio['metadata']['enabled'] {
require => Class['scaleio::mdm_server']
} ->
scaleio::cluster {'Set password':
password => $old_password,
new_password => $password,
before => File_line['Append a SCALEIO_mdm_password line to /etc/environment']
password => $old_password,
new_password => $password,
before => File_line['Append a SCALEIO_mdm_password line to /etc/environment']
}
}
file_line {'Append a SCALEIO_mdm_password line to /etc/environment':
ensure => present,
path => '/etc/environment',
match => "^SCALEIO_mdm_password=",
line => "SCALEIO_mdm_password=${password}",
ensure => present,
path => '/etc/environment',
match => '^SCALEIO_mdm_password=',
line => "SCALEIO_mdm_password=${password}",
}
}
} else {

View File

@ -13,17 +13,17 @@ if $scaleio['metadata']['enabled'] {
$provisioning_type = undef
}
$gateway_ip = $scaleio['existing_cluster'] ? {
true => $scaleio['gateway_ip'],
true => $scaleio['gateway_ip'],
default => hiera('management_vip')
}
class {'scaleio_openstack::nova':
ensure => present,
gateway_user => $::gateway_user,
gateway_password => $scaleio['password'],
gateway_ip => $gateway_ip,
gateway_port => $::gateway_port,
protection_domains => $scaleio['protection_domain'],
storage_pools => $::storage_pools,
provisioning_type => $provisioning_type,
}
class {'::scaleio_openstack::nova':
ensure => present,
gateway_user => $::gateway_user,
gateway_password => $scaleio['password'],
gateway_ip => $gateway_ip,
gateway_port => $::gateway_port,
protection_domains => $scaleio['protection_domain'],
storage_pools => $::storage_pools,
provisioning_type => $provisioning_type,
}
}

View File

@ -10,25 +10,25 @@ define apply_flavor(
$flavor_name = $parsed_name[0]
$flavor = $flavors_hash[$flavor_name]
scaleio_openstack::flavor {$resource_name:
ensure => present,
name => $resource_name,
storage_pool => $flavor['storage_pool'],
id => $flavor['id'],
ram_size => $flavor['ram_size'],
vcpus => $flavor['vcpus'],
disk_size => $flavor['disk_size'],
ephemeral_size => $flavor['ephemeral_size'],
swap_size => $flavor['swap_size'],
rxtx_factor => $flavor['rxtx_factor'],
is_public => $flavor['is_public'],
provisioning => $flavor['provisioning'],
ensure => present,
name => $resource_name,
storage_pool => $flavor['storage_pool'],
id => $flavor['id'],
ram_size => $flavor['ram_size'],
vcpus => $flavor['vcpus'],
disk_size => $flavor['disk_size'],
ephemeral_size => $flavor['ephemeral_size'],
swap_size => $flavor['swap_size'],
rxtx_factor => $flavor['rxtx_factor'],
is_public => $flavor['is_public'],
provisioning => $flavor['provisioning'],
}
} else {
scaleio_openstack::flavor {$resource_name:
ensure => absent,
name => $resource_name,
ensure => absent,
name => $resource_name,
}
}
}
}
$scaleio = hiera('scaleio')
@ -37,7 +37,7 @@ if $scaleio['metadata']['enabled'] {
if ! empty(filter_nodes(filter_nodes($all_nodes, 'name', $::hostname), 'role', 'primary-controller')) {
if $scaleio['storage_pools'] and $scaleio['storage_pools'] != '' {
# if storage pools come from settings remove probable trailing commas
$pools_array = split($scaleio['storage_pools'], ',')
$pools_array = split($scaleio['storage_pools'], ',')
} else {
$pools_array = get_pools_from_sds_config($sds_devices_config)
}

View File

@ -5,8 +5,8 @@
define cleanup_mdm () {
$mdm_name = $title
scaleio::mdm {"Remove MDM ${mdm_name}":
ensure => 'absent',
sio_name => $mdm_name,
ensure => 'absent',
sio_name => $mdm_name,
}
}
@ -49,7 +49,7 @@ if $scaleio['metadata']['enabled'] {
$new_tb_ips = join(concat($tbs_present_tmp, values_at($available_nodes, "${first_tb_index}-${last_tb_index}")), ',')
} else {
$new_tb_ips = join($tbs_present, ',')
}
}
if $to_add_mdm_count > 0 and count($available_nodes) >= $to_add_mdm_count {
$last_mdm_index = $to_add_mdm_count - 1
$mdms_present_tmp = intersection($current_mdms, $controller_ips_array) # use tmp because concat modifys first param
@ -57,7 +57,7 @@ if $scaleio['metadata']['enabled'] {
} else {
$new_mdms_ips = join($mdms_present, ',')
}
$is_primary_controller = !empty(filter_nodes(filter_nodes($all_nodes, 'name', $::hostname), 'role', 'primary-controller'))
$is_primary_controller = !empty(filter_nodes(filter_nodes($all_nodes, 'name', $::hostname), 'role', 'primary-controller'))
notify {"ScaleIO cluster: resize: controller_ips_array='${controller_ips_array}', current_mdms='${current_mdms}', current_tbs='${current_tbs}'": }
if !empty($mdms_absent) or !empty($tbs_absent) {
notify {"ScaleIO cluster: change: mdms_present='${mdms_present}', mdms_absent='${mdms_absent}', tbs_present='${tbs_present}', tbs_absent='${tbs_absent}'": }
@ -70,12 +70,12 @@ if $scaleio['metadata']['enabled'] {
password => $scaleio['password']
} ->
scaleio::cluster {'Resize cluster mode to 1_node and remove other MDMs':
ensure => 'absent',
cluster_mode => 1,
slave_names => $slaves_names,
tb_names => $::scaleio_tb_ips,
require => Scaleio::Login['Normal'],
before => File_line['SCALEIO_mdm_ips']
ensure => 'absent',
cluster_mode => 1,
slave_names => $slaves_names,
tb_names => $::scaleio_tb_ips,
require => Scaleio::Login['Normal'],
before => File_line['SCALEIO_mdm_ips']
} ->
cleanup_mdm {$to_remove_mdms:
before => File_line['SCALEIO_mdm_ips']
@ -87,31 +87,31 @@ if $scaleio['metadata']['enabled'] {
notify {'ScaleIO cluster: resize: nothing to resize': }
}
file_line {'SCALEIO_mdm_ips':
ensure => present,
path => '/etc/environment',
match => "^SCALEIO_mdm_ips=",
line => "SCALEIO_mdm_ips=${mdms_present_str}",
ensure => present,
path => '/etc/environment',
match => '^SCALEIO_mdm_ips=',
line => "SCALEIO_mdm_ips=${mdms_present_str}",
} ->
file_line {'SCALEIO_managers_ips':
ensure => present,
path => '/etc/environment',
match => "^SCALEIO_managers_ips=",
line => "SCALEIO_managers_ips=${new_mdms_ips}",
ensure => present,
path => '/etc/environment',
match => '^SCALEIO_managers_ips=',
line => "SCALEIO_managers_ips=${new_mdms_ips}",
} ->
file_line {'SCALEIO_tb_ips':
ensure => present,
path => '/etc/environment',
match => "^SCALEIO_tb_ips=",
line => "SCALEIO_tb_ips=${new_tb_ips}",
ensure => present,
path => '/etc/environment',
match => '^SCALEIO_tb_ips=',
line => "SCALEIO_tb_ips=${new_tb_ips}",
}
# only primary-controller needs discovery of sds/sdc
if $is_primary_controller {
file_line {'SCALEIO_discovery_allowed':
ensure => present,
path => '/etc/environment',
match => "^SCALEIO_discovery_allowed=",
line => "SCALEIO_discovery_allowed=yes",
require => File_line['SCALEIO_tb_ips']
ensure => present,
path => '/etc/environment',
match => '^SCALEIO_discovery_allowed=',
line => 'SCALEIO_discovery_allowed=yes',
require => File_line['SCALEIO_tb_ips']
}
}
} else {

View File

@ -4,11 +4,11 @@
$scaleio = hiera('scaleio')
if $scaleio['metadata']['enabled'] {
if ! $::controller_ips {
fail('Empty Controller IPs configuration')
fail('Empty Controller IPs configuration')
}
class {'scaleio::sdc_server':
ensure => 'present',
mdm_ip => $::controller_ips,
class {'::scaleio::sdc_server':
ensure => 'present',
mdm_ip => $::controller_ips,
}
}

View File

@ -2,8 +2,8 @@
$scaleio = hiera('scaleio')
if $scaleio['metadata']['enabled'] {
class {'scaleio::sdc_server':
ensure => 'present',
mdm_ip => undef,
class {'::scaleio::sdc_server':
ensure => 'present',
mdm_ip => undef,
}
}

View File

@ -5,14 +5,14 @@ define sds_device_cleanup() {
$device = $title
exec { "device ${device} cleanup":
command => "bash -c 'for i in \$(parted ${device} print | awk \"/^ [0-9]+/ {print(\\\$1)}\"); do parted ${device} rm \$i; done'",
path => [ '/bin/', '/sbin/' , '/usr/bin/', '/usr/sbin/' ],
path => [ '/bin/', '/sbin/' , '/usr/bin/', '/usr/sbin/' ],
}
}
# Just install packages
$scaleio = hiera('scaleio')
if $scaleio['metadata']['enabled'] {
if ! $scaleio['existing_cluster'] {
if ! $scaleio['existing_cluster'] {
$use_plugin_roles = $scaleio['enable_sds_role']
if ! $use_plugin_roles {
#it is supposed that task is run on compute or controller
@ -46,9 +46,9 @@ if $scaleio['metadata']['enabled'] {
sds_device_cleanup {$devices:
before => Class['Scaleio::Sds_server']
} ->
class {'scaleio::sds_server':
ensure => 'present',
xcache => $use_xcache,
class {'::scaleio::sds_server':
ensure => 'present',
xcache => $use_xcache,
}
} else {
# save devices in shared DB
@ -73,30 +73,30 @@ if $scaleio['metadata']['enabled'] {
} else {
$use_xcache = 'absent'
}
$sds_name = $::hostname
$sds_name = $::hostname
$sds_config = {
"${sds_name}" => {
'devices' => {
'tier1' => "${tier1_devices}",
'tier2' => "${tier2_devices}",
'tier3' => "${tier3_devices}",
'tier1' => $tier1_devices,
'tier2' => $tier2_devices,
'tier3' => $tier3_devices,
},
'rfcache_devices' => "${rfcache_devices}",
'rfcache_devices' => $rfcache_devices,
}
}
# convert hash to string and add escaping of qoutes
$sds_config_str = regsubst(regsubst(inline_template('<%= @sds_config.to_s %>'), '=>', ":", 'G'), '"', '\"', 'G')
$sds_config_str = regsubst(regsubst(inline_template('<%= @sds_config.to_s %>'), '=>', ':', 'G'), '"', '\"', 'G')
$galera_host = hiera('management_vip')
$mysql_opts = hiera('mysql')
$mysql_password = $mysql_opts['root_password']
$sql_connect = "mysql -h ${galera_host} -uroot -p${mysql_password}"
$sql_connect = "mysql -h ${galera_host} -uroot -p${mysql_password}"
$db_query = 'CREATE DATABASE IF NOT EXISTS scaleio; USE scaleio'
$table_query = 'CREATE TABLE IF NOT EXISTS sds (name VARCHAR(64), PRIMARY KEY(name), value TEXT(1024))'
$update_query = "INSERT INTO sds (name, value) VALUES ('${sds_name}', '${sds_config_str}') ON DUPLICATE KEY UPDATE value='${sds_config_str}'"
$sql_query = "${sql_connect} -e \"${db_query}; ${table_query}; ${update_query};\""
class {'scaleio::sds_server':
ensure => 'present',
xcache => $use_xcache,
class {'::scaleio::sds_server':
ensure => 'present',
xcache => $use_xcache,
} ->
package {'mysql-client':
ensure => present,