Non-hyper-converged deployment support

Change-Id: I7dd94514c0802ea7764d4d5d249bbd21f18eb2db
This commit is contained in:
alexey-mr 2016-09-06 13:58:10 +03:00
parent c59f44c019
commit 3fd9fb1fca
8 changed files with 191 additions and 43 deletions

View File

@ -205,13 +205,17 @@ if $scaleio['metadata']['enabled'] {
if ! empty(filter_nodes(filter_nodes($all_nodes, 'name', $::hostname), 'role', 'primary-controller')) {
$use_plugin_roles = $scaleio['enable_sds_role']
if ! $use_plugin_roles {
$storage_nodes = filter_nodes($all_nodes, 'role', 'compute')
if $scaleio['sds_on_controller'] {
$controller_nodes = filter_nodes($all_nodes, 'role', 'controller')
$pr_controller_nodes = filter_nodes($all_nodes, 'role', 'primary-controller')
$sds_nodes = concat(concat($pr_controller_nodes, $controller_nodes), $storage_nodes)
if $scaleio['hyper_converged_deployment'] {
$storage_nodes = filter_nodes($all_nodes, 'role', 'compute')
if $scaleio['sds_on_controller'] {
$controller_nodes = filter_nodes($all_nodes, 'role', 'controller')
$pr_controller_nodes = filter_nodes($all_nodes, 'role', 'primary-controller')
$sds_nodes = concat(concat($pr_controller_nodes, $controller_nodes), $storage_nodes)
} else {
$sds_nodes = $storage_nodes
}
} else {
$sds_nodes = $storage_nodes
$sds_nodes = filter_nodes($all_nodes, 'role', 'scaleio')
}
} else {
$sds_nodes = concat(filter_nodes($all_nodes, 'role', 'scaleio-storage-tier1'), filter_nodes($all_nodes, 'role', 'scaleio-storage-tier2'))

View File

@ -86,11 +86,15 @@ if $scaleio['metadata']['enabled'] {
# Check SDS count
$use_plugin_roles = $scaleio['enable_sds_role']
if ! $use_plugin_roles {
$controller_sds_count = $scaleio['sds_on_controller'] ? {
true => count($controller_ips_array),
default => 0
if $scaleio['hyper_converged_deployment'] {
$controller_sds_count = $scaleio['sds_on_controller'] ? {
true => count($controller_ips_array),
default => 0
}
$total_sds_count = count(filter_nodes($all_nodes, 'role', 'compute')) + $controller_sds_count
} else {
$total_sds_count = count(filter_nodes($all_nodes, 'role', 'scaleio'))
}
$total_sds_count = count(filter_nodes($all_nodes, 'role', 'compute')) + $controller_sds_count
if $total_sds_count < 3 {
$sds_check_msg = 'There should be at least 3 nodes with SDSs, either add Compute node or use Controllers as SDS.'
}

View File

@ -15,14 +15,19 @@ notice('MODULAR: scaleio: sds_server')
$scaleio = hiera('scaleio')
if $scaleio['metadata']['enabled'] {
if ! $scaleio['existing_cluster'] {
$all_nodes = hiera('nodes')
$nodes = filter_nodes($all_nodes, 'name', $::hostname)
$use_plugin_roles = $scaleio['enable_sds_role']
if ! $use_plugin_roles {
#it is supposed that task is run on compute or controller
$node_ips = split($::ip_address_array, ',')
$is_sds_server = empty(intersection(split($::controller_ips, ','), $node_ips)) or $scaleio['sds_on_controller']
if $scaleio['hyper_converged_deployment'] {
$is_controller = !empty(concat(filter_nodes($nodes, 'role', 'primary-controller'), filter_nodes($nodes, 'role', 'controller')))
$is_sds_on_controller = $is_controller and $scaleio['sds_on_controller']
$is_sds_on_compute = !empty(filter_nodes($nodes, 'role', 'compute'))
$is_sds_server = $is_sds_on_controller or $is_sds_on_compute
} else {
$is_sds_server = !empty(filter_nodes($nodes, 'role', 'scaleio'))
}
} else {
$all_nodes = hiera('nodes')
$nodes = filter_nodes($all_nodes, 'name', $::hostname)
$is_sds_server = ! empty(concat(
concat(filter_nodes($nodes, 'role', 'scaleio-storage-tier1'), filter_nodes($nodes, 'role', 'scaleio-storage-tier2')),
filter_nodes($nodes, 'role', 'scaleio-storage-tier3')))

View File

@ -23,11 +23,12 @@
# ---------------------------------------------------------------------------------
# | Name | Description
# |--------------------------------------------------------------------------------
# | scaleio_sds_ips | Comma separated list of SDS IPs.
# | scaleio_sds_names | Comma separated list of SDS names.
# | scaleio_sdc_ips | Comma separated list of SDC IPs,
# | | it is list of management IPs, not storage IPs.
# | scaleio_sds_ips | Comma separated list of SDS IPs.
# | scaleio_sds_names | Comma separated list of SDS names.
# | scaleio_sdc_ips | Comma separated list of SDC IPs,
# | | it is list of management IPs, not storage IPs.
# | scaleio_sds_with_protection_domain_list | Comma separated list of SDS
# | | with its Protection domains: sds1,pd1,sds2,pd2,..
# Facts about MDM from Gateway:
# (It requests them from Gateway via curl and requires the fact 'gateway_ips'.
# An user is 'admin' by default or the fact 'gateway_user' if it exists.

View File

@ -1,16 +1,19 @@
require 'date'
require 'facter'
$scaleio_tier1_guid = 'f2e81bdc-99b3-4bf6-a68f-dc794da6cd8e'
$scaleio_tier2_guid = 'd5321bb3-1098-433e-b4f5-216712fcd06f'
$scaleio_tier3_guid = '97987bfc-a9ba-40f3-afea-13e1a228e492'
$scaleio_rfcache_guid = '163ddeea-95dd-4af0-a329-140623590c47'
$scaleio_storage_guid = '5e9bd278-9919-4db3-9756-4b82c7e9df52'
## Experimental:
#$scaleio_tier1_guid = 'f2e81bdc-99b3-4bf6-a68f-dc794da6cd8e'
#$scaleio_tier2_guid = 'd5321bb3-1098-433e-b4f5-216712fcd06f'
#$scaleio_tier3_guid = '97987bfc-a9ba-40f3-afea-13e1a228e492'
#$scaleio_rfcache_guid = '163ddeea-95dd-4af0-a329-140623590c47'
$scaleio_tiers = {
'tier1' => $scaleio_tier1_guid,
'tier2' => $scaleio_tier2_guid,
'tier3' => $scaleio_tier3_guid,
'rfcache' => $scaleio_rfcache_guid,
'all' => $scaleio_storage_guid,
# 'tier1' => $scaleio_tier1_guid,
# 'tier2' => $scaleio_tier2_guid,
# 'tier3' => $scaleio_tier3_guid,
# 'rfcache' => $scaleio_rfcache_guid,
}
$scaleio_log_file = "/var/log/fuel-plugin-scaleio.log"
@ -47,11 +50,12 @@ end
Facter.add('sds_storage_small_devices') do
setcode do
result = nil
disks0 = Facter.value('sds_storage_devices_all')
disks1 = Facter.value('sds_storage_devices_tier1')
disks2 = Facter.value('sds_storage_devices_tier2')
disks3 = Facter.value('sds_storage_devices_tier3')
if disks1 or disks2 or disks3
disks = [disks1, disks2, disks3].join(',')
disks = [disks0, disks1, disks2, disks3].join(',')
end
if disks
devices = disks.split(',')

View File

@ -3,16 +3,6 @@ attributes:
# Settings group can be one of "general", "security", "compute", "network",
# "storage", "logging", "openstack_services" and "other".
group: 'storage'
enable_sds_role:
type: "checkbox"
value: false
label: "Use ScaleIO SDS role."
description: "Enables role based deployment."
weight: 5
restrictions:
- condition: "true"
action: hide
existing_cluster:
type: "checkbox"
@ -73,7 +63,10 @@ attributes:
type: "text"
value: "default"
label: "Protection domain"
description: "Name of first protection domain. Next domains will get names like default_2, default_3."
description:
Name of first protection domain. In case of auto-scaling next domains will get names like default_2, default_3.
Auto-scaling works if the Use Existing Cluster option is disabled. Next domain is created
if number of SDS-es reaches the limit in the setting Maximum number of nodes in one protection domain.
weight: 70
regex:
source: '^(\w+){1}((,){1}(?=\w+))*'
@ -84,8 +77,9 @@ attributes:
value: "100"
label: "Maximum number of nodes in one protection domain"
description:
If number of nodes gets lasrgert than this threshould new protection domain will be created.
Note, in that case it is needed to add at least 3 new nodes with Storage role to make new domain operationable.
If number of nodes gets larger than this threshold then new protection domain will be created.
Note, in that case it is needed to add at least 3 new nodes to make new domain operationable.
In case of hyper-converged deployment they should be compute nodes, otherwise - the ScaleIO nodes.
weight: 75
regex:
source: '^[1-9]{1}[0-9]*$'
@ -94,6 +88,16 @@ attributes:
- condition: "settings:scaleio.existing_cluster.value == true"
action: hide
enable_sds_role:
type: "checkbox"
value: false
label: "Experimental role-based deployment"
description: "Hidden option to disable experimental feature"
weight: 5
restrictions:
- condition: "true"
action: hide
storage_pools:
type: "text"
value: "default"
@ -135,6 +139,18 @@ attributes:
- condition: "settings:scaleio.existing_cluster.value == true or settings:scaleio.enable_sds_role.value == true"
action: hide
hyper_converged_deployment:
type: "checkbox"
value: true
label: "Hyper-converged deployment"
description:
Deploy SDS component on all compute nodes automatically and optionally on controller nodes.
If the option disabled then SDS will be deployed only on the nodes with the ScaleIO role.
weight: 103
restrictions:
- condition: "settings:scaleio.existing_cluster.value == true"
action: hide
sds_on_controller:
type: "checkbox"
value: true
@ -142,7 +158,7 @@ attributes:
description: "Setup SDS-es on controller nodes."
weight: 105
restrictions:
- condition: "settings:scaleio.existing_cluster.value == true or settings:scaleio.enable_sds_role.value == true"
- condition: "settings:scaleio.existing_cluster.value == true or settings:scaleio.hyper_converged_deployment.value == false or settings:scaleio.enable_sds_role.value == true"
action: hide
provisioning_type:

30
node_roles.yaml Normal file
View File

@ -0,0 +1,30 @@
scaleio:
name: "ScaleIO"
description: "ScaleIO"
update_required:
- primary-controller
restrictions:
- condition: "settings:scaleio.hyper_converged_deployment.value == true"
message:
ScaleIO role is available only in the non hyper-converged deployment mode.
To select non hyper-converged mode navigate to the ScaleIO plugin settings and un-check appropriate checkbox.
metadata:
group: 'storage'
## Experimental features: disabled in production
# scaleio-storage-tier1:
# name: "ScaleIO Storage Tier1"
# description: "Devices of a node with this role will be assigned to the storage poll tier1. If both tier roles are assigned, devices will be slit according to plugin settings."
# limits:
# min: 3
# update_required:
# - controller
# - primary-controller
#
# scaleio-storage-tier2:
# name: "ScaleIO Storage Tier2"
# description: "Devices of a node with this role will be assigned to the storage poll tier2. If both tier roles are assigned, devices will be slit according to plugin settings."
# limits:
# min: 3
# update_required:
# - controller
# - primary-controller

84
volumes.yaml Normal file
View File

@ -0,0 +1,84 @@
volumes:
- id: "scaleio"
type: "partition"
min_size:
generator: "calc_gb_to_mb"
generator_args: [0]
label: "ScaleIO"
name: "ScaleIO"
mount: "none"
file_system: "none"
partition_guid: "5e9bd278-9919-4db3-9756-4b82c7e9df52"
## Experimental features: disabled in production
# - id: "scaleio-storage-tier1"
# type: "partition"
# min_size:
# generator: "calc_gb_to_mb"
# generator_args: [0]
# label: "ScaleIO Tier1"
# name: "ScaleIOTier1"
# mount: "none"
# file_system: "none"
# partition_guid: "f2e81bdc-99b3-4bf6-a68f-dc794da6cd8e"
# - id: "scaleio-storage-tier2"
# type: "partition"
# min_size:
# generator: "calc_gb_to_mb"
# generator_args: [0]
# label: "ScaleIO Tier2"
# name: "ScaleIOTier2"
# mount: "none"
# file_system: "none"
# partition_guid: "d5321bb3-1098-433e-b4f5-216712fcd06f"
# - id: "scaleio-storage-tier3"
# type: "partition"
# min_size:
# generator: "calc_gb_to_mb"
# generator_args: [0]
# label: "ScaleIO Tier3"
# name: "ScaleIOTier3"
# mount: "none"
# file_system: "none"
# partition_guid: "97987bfc-a9ba-40f3-afea-13e1a228e492"
# - id: "scaleio-storage-rfcache"
# type: "partition"
# min_size:
# generator: "calc_gb_to_mb"
# generator_args: [0]
# label: "ScaleIO RFCahe"
# name: "ScaleIORFCache"
# mount: "none"
# file_system: "none"
# partition_guid: "163ddeea-95dd-4af0-a329-140623590c47"
volumes_roles_mapping:
scaleio:
- {allocate_size: "min", id: "os"}
- {allocate_size: "full-disk", id: "scaleio"}
## Experimental features: disabled in production
# scaleio-storage-tier1:
# - {allocate_size: "min", id: "os"}
# - {allocate_size: "full-disk", id: "scaleio-storage-tier1"}
# - {allocate_size: "full-disk", id: "scaleio-storage-tier2"}
# - {allocate_size: "full-disk", id: "scaleio-storage-tier3"}
# - {allocate_size: "full-disk", id: "scaleio-storage-rfcache"}
# scaleio-storage-tier2:
# - {allocate_size: "min", id: "os"}
# - {allocate_size: "full-disk", id: "scaleio-storage-tier1"}
# - {allocate_size: "full-disk", id: "scaleio-storage-tier2"}
# - {allocate_size: "full-disk", id: "scaleio-storage-tier3"}
# - {allocate_size: "full-disk", id: "scaleio-storage-rfcache"}
# scaleio-storage-tier3:
# - {allocate_size: "min", id: "os"}
# - {allocate_size: "full-disk", id: "scaleio-storage-tier1"}
# - {allocate_size: "full-disk", id: "scaleio-storage-tier2"}
# - {allocate_size: "full-disk", id: "scaleio-storage-tier3"}
# - {allocate_size: "full-disk", id: "scaleio-storage-rfcache"}
# scaleio-storage-rfcache:
# - {allocate_size: "min", id: "os"}
# - {allocate_size: "full-disk", id: "scaleio-storage-tier1"}
# - {allocate_size: "full-disk", id: "scaleio-storage-tier2"}
# - {allocate_size: "full-disk", id: "scaleio-storage-tier3"}
# - {allocate_size: "full-disk", id: "scaleio-storage-rfcache"}