Non-hyper-converged deployment support
Change-Id: I7dd94514c0802ea7764d4d5d249bbd21f18eb2db
This commit is contained in:
parent
c59f44c019
commit
3fd9fb1fca
|
@ -205,13 +205,17 @@ if $scaleio['metadata']['enabled'] {
|
||||||
if ! empty(filter_nodes(filter_nodes($all_nodes, 'name', $::hostname), 'role', 'primary-controller')) {
|
if ! empty(filter_nodes(filter_nodes($all_nodes, 'name', $::hostname), 'role', 'primary-controller')) {
|
||||||
$use_plugin_roles = $scaleio['enable_sds_role']
|
$use_plugin_roles = $scaleio['enable_sds_role']
|
||||||
if ! $use_plugin_roles {
|
if ! $use_plugin_roles {
|
||||||
$storage_nodes = filter_nodes($all_nodes, 'role', 'compute')
|
if $scaleio['hyper_converged_deployment'] {
|
||||||
if $scaleio['sds_on_controller'] {
|
$storage_nodes = filter_nodes($all_nodes, 'role', 'compute')
|
||||||
$controller_nodes = filter_nodes($all_nodes, 'role', 'controller')
|
if $scaleio['sds_on_controller'] {
|
||||||
$pr_controller_nodes = filter_nodes($all_nodes, 'role', 'primary-controller')
|
$controller_nodes = filter_nodes($all_nodes, 'role', 'controller')
|
||||||
$sds_nodes = concat(concat($pr_controller_nodes, $controller_nodes), $storage_nodes)
|
$pr_controller_nodes = filter_nodes($all_nodes, 'role', 'primary-controller')
|
||||||
|
$sds_nodes = concat(concat($pr_controller_nodes, $controller_nodes), $storage_nodes)
|
||||||
|
} else {
|
||||||
|
$sds_nodes = $storage_nodes
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
$sds_nodes = $storage_nodes
|
$sds_nodes = filter_nodes($all_nodes, 'role', 'scaleio')
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
$sds_nodes = concat(filter_nodes($all_nodes, 'role', 'scaleio-storage-tier1'), filter_nodes($all_nodes, 'role', 'scaleio-storage-tier2'))
|
$sds_nodes = concat(filter_nodes($all_nodes, 'role', 'scaleio-storage-tier1'), filter_nodes($all_nodes, 'role', 'scaleio-storage-tier2'))
|
||||||
|
|
|
@ -86,11 +86,15 @@ if $scaleio['metadata']['enabled'] {
|
||||||
# Check SDS count
|
# Check SDS count
|
||||||
$use_plugin_roles = $scaleio['enable_sds_role']
|
$use_plugin_roles = $scaleio['enable_sds_role']
|
||||||
if ! $use_plugin_roles {
|
if ! $use_plugin_roles {
|
||||||
$controller_sds_count = $scaleio['sds_on_controller'] ? {
|
if $scaleio['hyper_converged_deployment'] {
|
||||||
true => count($controller_ips_array),
|
$controller_sds_count = $scaleio['sds_on_controller'] ? {
|
||||||
default => 0
|
true => count($controller_ips_array),
|
||||||
|
default => 0
|
||||||
|
}
|
||||||
|
$total_sds_count = count(filter_nodes($all_nodes, 'role', 'compute')) + $controller_sds_count
|
||||||
|
} else {
|
||||||
|
$total_sds_count = count(filter_nodes($all_nodes, 'role', 'scaleio'))
|
||||||
}
|
}
|
||||||
$total_sds_count = count(filter_nodes($all_nodes, 'role', 'compute')) + $controller_sds_count
|
|
||||||
if $total_sds_count < 3 {
|
if $total_sds_count < 3 {
|
||||||
$sds_check_msg = 'There should be at least 3 nodes with SDSs, either add Compute node or use Controllers as SDS.'
|
$sds_check_msg = 'There should be at least 3 nodes with SDSs, either add Compute node or use Controllers as SDS.'
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,14 +15,19 @@ notice('MODULAR: scaleio: sds_server')
|
||||||
$scaleio = hiera('scaleio')
|
$scaleio = hiera('scaleio')
|
||||||
if $scaleio['metadata']['enabled'] {
|
if $scaleio['metadata']['enabled'] {
|
||||||
if ! $scaleio['existing_cluster'] {
|
if ! $scaleio['existing_cluster'] {
|
||||||
|
$all_nodes = hiera('nodes')
|
||||||
|
$nodes = filter_nodes($all_nodes, 'name', $::hostname)
|
||||||
$use_plugin_roles = $scaleio['enable_sds_role']
|
$use_plugin_roles = $scaleio['enable_sds_role']
|
||||||
if ! $use_plugin_roles {
|
if ! $use_plugin_roles {
|
||||||
#it is supposed that task is run on compute or controller
|
if $scaleio['hyper_converged_deployment'] {
|
||||||
$node_ips = split($::ip_address_array, ',')
|
$is_controller = !empty(concat(filter_nodes($nodes, 'role', 'primary-controller'), filter_nodes($nodes, 'role', 'controller')))
|
||||||
$is_sds_server = empty(intersection(split($::controller_ips, ','), $node_ips)) or $scaleio['sds_on_controller']
|
$is_sds_on_controller = $is_controller and $scaleio['sds_on_controller']
|
||||||
|
$is_sds_on_compute = !empty(filter_nodes($nodes, 'role', 'compute'))
|
||||||
|
$is_sds_server = $is_sds_on_controller or $is_sds_on_compute
|
||||||
|
} else {
|
||||||
|
$is_sds_server = !empty(filter_nodes($nodes, 'role', 'scaleio'))
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
$all_nodes = hiera('nodes')
|
|
||||||
$nodes = filter_nodes($all_nodes, 'name', $::hostname)
|
|
||||||
$is_sds_server = ! empty(concat(
|
$is_sds_server = ! empty(concat(
|
||||||
concat(filter_nodes($nodes, 'role', 'scaleio-storage-tier1'), filter_nodes($nodes, 'role', 'scaleio-storage-tier2')),
|
concat(filter_nodes($nodes, 'role', 'scaleio-storage-tier1'), filter_nodes($nodes, 'role', 'scaleio-storage-tier2')),
|
||||||
filter_nodes($nodes, 'role', 'scaleio-storage-tier3')))
|
filter_nodes($nodes, 'role', 'scaleio-storage-tier3')))
|
||||||
|
|
|
@ -23,11 +23,12 @@
|
||||||
# ---------------------------------------------------------------------------------
|
# ---------------------------------------------------------------------------------
|
||||||
# | Name | Description
|
# | Name | Description
|
||||||
# |--------------------------------------------------------------------------------
|
# |--------------------------------------------------------------------------------
|
||||||
# | scaleio_sds_ips | Comma separated list of SDS IPs.
|
# | scaleio_sds_ips | Comma separated list of SDS IPs.
|
||||||
# | scaleio_sds_names | Comma separated list of SDS names.
|
# | scaleio_sds_names | Comma separated list of SDS names.
|
||||||
# | scaleio_sdc_ips | Comma separated list of SDC IPs,
|
# | scaleio_sdc_ips | Comma separated list of SDC IPs,
|
||||||
# | | it is list of management IPs, not storage IPs.
|
# | | it is list of management IPs, not storage IPs.
|
||||||
|
# | scaleio_sds_with_protection_domain_list | Comma separated list of SDS
|
||||||
|
# | | with its Protection domains: sds1,pd1,sds2,pd2,..
|
||||||
# Facts about MDM from Gateway:
|
# Facts about MDM from Gateway:
|
||||||
# (It requests them from Gateway via curl and requires the fact 'gateway_ips'.
|
# (It requests them from Gateway via curl and requires the fact 'gateway_ips'.
|
||||||
# An user is 'admin' by default or the fact 'gateway_user' if it exists.
|
# An user is 'admin' by default or the fact 'gateway_user' if it exists.
|
||||||
|
|
|
@ -1,16 +1,19 @@
|
||||||
require 'date'
|
require 'date'
|
||||||
require 'facter'
|
require 'facter'
|
||||||
|
|
||||||
$scaleio_tier1_guid = 'f2e81bdc-99b3-4bf6-a68f-dc794da6cd8e'
|
$scaleio_storage_guid = '5e9bd278-9919-4db3-9756-4b82c7e9df52'
|
||||||
$scaleio_tier2_guid = 'd5321bb3-1098-433e-b4f5-216712fcd06f'
|
## Experimental:
|
||||||
$scaleio_tier3_guid = '97987bfc-a9ba-40f3-afea-13e1a228e492'
|
#$scaleio_tier1_guid = 'f2e81bdc-99b3-4bf6-a68f-dc794da6cd8e'
|
||||||
$scaleio_rfcache_guid = '163ddeea-95dd-4af0-a329-140623590c47'
|
#$scaleio_tier2_guid = 'd5321bb3-1098-433e-b4f5-216712fcd06f'
|
||||||
|
#$scaleio_tier3_guid = '97987bfc-a9ba-40f3-afea-13e1a228e492'
|
||||||
|
#$scaleio_rfcache_guid = '163ddeea-95dd-4af0-a329-140623590c47'
|
||||||
|
|
||||||
$scaleio_tiers = {
|
$scaleio_tiers = {
|
||||||
'tier1' => $scaleio_tier1_guid,
|
'all' => $scaleio_storage_guid,
|
||||||
'tier2' => $scaleio_tier2_guid,
|
# 'tier1' => $scaleio_tier1_guid,
|
||||||
'tier3' => $scaleio_tier3_guid,
|
# 'tier2' => $scaleio_tier2_guid,
|
||||||
'rfcache' => $scaleio_rfcache_guid,
|
# 'tier3' => $scaleio_tier3_guid,
|
||||||
|
# 'rfcache' => $scaleio_rfcache_guid,
|
||||||
}
|
}
|
||||||
|
|
||||||
$scaleio_log_file = "/var/log/fuel-plugin-scaleio.log"
|
$scaleio_log_file = "/var/log/fuel-plugin-scaleio.log"
|
||||||
|
@ -47,11 +50,12 @@ end
|
||||||
Facter.add('sds_storage_small_devices') do
|
Facter.add('sds_storage_small_devices') do
|
||||||
setcode do
|
setcode do
|
||||||
result = nil
|
result = nil
|
||||||
|
disks0 = Facter.value('sds_storage_devices_all')
|
||||||
disks1 = Facter.value('sds_storage_devices_tier1')
|
disks1 = Facter.value('sds_storage_devices_tier1')
|
||||||
disks2 = Facter.value('sds_storage_devices_tier2')
|
disks2 = Facter.value('sds_storage_devices_tier2')
|
||||||
disks3 = Facter.value('sds_storage_devices_tier3')
|
disks3 = Facter.value('sds_storage_devices_tier3')
|
||||||
if disks1 or disks2 or disks3
|
if disks1 or disks2 or disks3
|
||||||
disks = [disks1, disks2, disks3].join(',')
|
disks = [disks0, disks1, disks2, disks3].join(',')
|
||||||
end
|
end
|
||||||
if disks
|
if disks
|
||||||
devices = disks.split(',')
|
devices = disks.split(',')
|
||||||
|
|
|
@ -3,16 +3,6 @@ attributes:
|
||||||
# Settings group can be one of "general", "security", "compute", "network",
|
# Settings group can be one of "general", "security", "compute", "network",
|
||||||
# "storage", "logging", "openstack_services" and "other".
|
# "storage", "logging", "openstack_services" and "other".
|
||||||
group: 'storage'
|
group: 'storage'
|
||||||
|
|
||||||
enable_sds_role:
|
|
||||||
type: "checkbox"
|
|
||||||
value: false
|
|
||||||
label: "Use ScaleIO SDS role."
|
|
||||||
description: "Enables role based deployment."
|
|
||||||
weight: 5
|
|
||||||
restrictions:
|
|
||||||
- condition: "true"
|
|
||||||
action: hide
|
|
||||||
|
|
||||||
existing_cluster:
|
existing_cluster:
|
||||||
type: "checkbox"
|
type: "checkbox"
|
||||||
|
@ -73,7 +63,10 @@ attributes:
|
||||||
type: "text"
|
type: "text"
|
||||||
value: "default"
|
value: "default"
|
||||||
label: "Protection domain"
|
label: "Protection domain"
|
||||||
description: "Name of first protection domain. Next domains will get names like default_2, default_3."
|
description:
|
||||||
|
Name of first protection domain. In case of auto-scaling next domains will get names like default_2, default_3.
|
||||||
|
Auto-scaling works if the Use Existing Cluster option is disabled. Next domain is created
|
||||||
|
if number of SDS-es reaches the limit in the setting Maximum number of nodes in one protection domain.
|
||||||
weight: 70
|
weight: 70
|
||||||
regex:
|
regex:
|
||||||
source: '^(\w+){1}((,){1}(?=\w+))*'
|
source: '^(\w+){1}((,){1}(?=\w+))*'
|
||||||
|
@ -84,8 +77,9 @@ attributes:
|
||||||
value: "100"
|
value: "100"
|
||||||
label: "Maximum number of nodes in one protection domain"
|
label: "Maximum number of nodes in one protection domain"
|
||||||
description:
|
description:
|
||||||
If number of nodes gets lasrgert than this threshould new protection domain will be created.
|
If number of nodes gets larger than this threshold then new protection domain will be created.
|
||||||
Note, in that case it is needed to add at least 3 new nodes with Storage role to make new domain operationable.
|
Note, in that case it is needed to add at least 3 new nodes to make new domain operationable.
|
||||||
|
In case of hyper-converged deployment they should be compute nodes, otherwise - the ScaleIO nodes.
|
||||||
weight: 75
|
weight: 75
|
||||||
regex:
|
regex:
|
||||||
source: '^[1-9]{1}[0-9]*$'
|
source: '^[1-9]{1}[0-9]*$'
|
||||||
|
@ -94,6 +88,16 @@ attributes:
|
||||||
- condition: "settings:scaleio.existing_cluster.value == true"
|
- condition: "settings:scaleio.existing_cluster.value == true"
|
||||||
action: hide
|
action: hide
|
||||||
|
|
||||||
|
enable_sds_role:
|
||||||
|
type: "checkbox"
|
||||||
|
value: false
|
||||||
|
label: "Experimental role-based deployment"
|
||||||
|
description: "Hidden option to disable experimental feature"
|
||||||
|
weight: 5
|
||||||
|
restrictions:
|
||||||
|
- condition: "true"
|
||||||
|
action: hide
|
||||||
|
|
||||||
storage_pools:
|
storage_pools:
|
||||||
type: "text"
|
type: "text"
|
||||||
value: "default"
|
value: "default"
|
||||||
|
@ -135,6 +139,18 @@ attributes:
|
||||||
- condition: "settings:scaleio.existing_cluster.value == true or settings:scaleio.enable_sds_role.value == true"
|
- condition: "settings:scaleio.existing_cluster.value == true or settings:scaleio.enable_sds_role.value == true"
|
||||||
action: hide
|
action: hide
|
||||||
|
|
||||||
|
hyper_converged_deployment:
|
||||||
|
type: "checkbox"
|
||||||
|
value: true
|
||||||
|
label: "Hyper-converged deployment"
|
||||||
|
description:
|
||||||
|
Deploy SDS component on all compute nodes automatically and optionally on controller nodes.
|
||||||
|
If the option disabled then SDS will be deployed only on the nodes with the ScaleIO role.
|
||||||
|
weight: 103
|
||||||
|
restrictions:
|
||||||
|
- condition: "settings:scaleio.existing_cluster.value == true"
|
||||||
|
action: hide
|
||||||
|
|
||||||
sds_on_controller:
|
sds_on_controller:
|
||||||
type: "checkbox"
|
type: "checkbox"
|
||||||
value: true
|
value: true
|
||||||
|
@ -142,7 +158,7 @@ attributes:
|
||||||
description: "Setup SDS-es on controller nodes."
|
description: "Setup SDS-es on controller nodes."
|
||||||
weight: 105
|
weight: 105
|
||||||
restrictions:
|
restrictions:
|
||||||
- condition: "settings:scaleio.existing_cluster.value == true or settings:scaleio.enable_sds_role.value == true"
|
- condition: "settings:scaleio.existing_cluster.value == true or settings:scaleio.hyper_converged_deployment.value == false or settings:scaleio.enable_sds_role.value == true"
|
||||||
action: hide
|
action: hide
|
||||||
|
|
||||||
provisioning_type:
|
provisioning_type:
|
||||||
|
|
|
@ -0,0 +1,30 @@
|
||||||
|
scaleio:
|
||||||
|
name: "ScaleIO"
|
||||||
|
description: "ScaleIO"
|
||||||
|
update_required:
|
||||||
|
- primary-controller
|
||||||
|
restrictions:
|
||||||
|
- condition: "settings:scaleio.hyper_converged_deployment.value == true"
|
||||||
|
message:
|
||||||
|
ScaleIO role is available only in the non hyper-converged deployment mode.
|
||||||
|
To select non hyper-converged mode navigate to the ScaleIO plugin settings and un-check appropriate checkbox.
|
||||||
|
metadata:
|
||||||
|
group: 'storage'
|
||||||
|
## Experimental features: disabled in production
|
||||||
|
# scaleio-storage-tier1:
|
||||||
|
# name: "ScaleIO Storage Tier1"
|
||||||
|
# description: "Devices of a node with this role will be assigned to the storage poll tier1. If both tier roles are assigned, devices will be slit according to plugin settings."
|
||||||
|
# limits:
|
||||||
|
# min: 3
|
||||||
|
# update_required:
|
||||||
|
# - controller
|
||||||
|
# - primary-controller
|
||||||
|
#
|
||||||
|
# scaleio-storage-tier2:
|
||||||
|
# name: "ScaleIO Storage Tier2"
|
||||||
|
# description: "Devices of a node with this role will be assigned to the storage poll tier2. If both tier roles are assigned, devices will be slit according to plugin settings."
|
||||||
|
# limits:
|
||||||
|
# min: 3
|
||||||
|
# update_required:
|
||||||
|
# - controller
|
||||||
|
# - primary-controller
|
|
@ -0,0 +1,84 @@
|
||||||
|
volumes:
|
||||||
|
- id: "scaleio"
|
||||||
|
type: "partition"
|
||||||
|
min_size:
|
||||||
|
generator: "calc_gb_to_mb"
|
||||||
|
generator_args: [0]
|
||||||
|
label: "ScaleIO"
|
||||||
|
name: "ScaleIO"
|
||||||
|
mount: "none"
|
||||||
|
file_system: "none"
|
||||||
|
partition_guid: "5e9bd278-9919-4db3-9756-4b82c7e9df52"
|
||||||
|
|
||||||
|
## Experimental features: disabled in production
|
||||||
|
# - id: "scaleio-storage-tier1"
|
||||||
|
# type: "partition"
|
||||||
|
# min_size:
|
||||||
|
# generator: "calc_gb_to_mb"
|
||||||
|
# generator_args: [0]
|
||||||
|
# label: "ScaleIO Tier1"
|
||||||
|
# name: "ScaleIOTier1"
|
||||||
|
# mount: "none"
|
||||||
|
# file_system: "none"
|
||||||
|
# partition_guid: "f2e81bdc-99b3-4bf6-a68f-dc794da6cd8e"
|
||||||
|
# - id: "scaleio-storage-tier2"
|
||||||
|
# type: "partition"
|
||||||
|
# min_size:
|
||||||
|
# generator: "calc_gb_to_mb"
|
||||||
|
# generator_args: [0]
|
||||||
|
# label: "ScaleIO Tier2"
|
||||||
|
# name: "ScaleIOTier2"
|
||||||
|
# mount: "none"
|
||||||
|
# file_system: "none"
|
||||||
|
# partition_guid: "d5321bb3-1098-433e-b4f5-216712fcd06f"
|
||||||
|
# - id: "scaleio-storage-tier3"
|
||||||
|
# type: "partition"
|
||||||
|
# min_size:
|
||||||
|
# generator: "calc_gb_to_mb"
|
||||||
|
# generator_args: [0]
|
||||||
|
# label: "ScaleIO Tier3"
|
||||||
|
# name: "ScaleIOTier3"
|
||||||
|
# mount: "none"
|
||||||
|
# file_system: "none"
|
||||||
|
# partition_guid: "97987bfc-a9ba-40f3-afea-13e1a228e492"
|
||||||
|
# - id: "scaleio-storage-rfcache"
|
||||||
|
# type: "partition"
|
||||||
|
# min_size:
|
||||||
|
# generator: "calc_gb_to_mb"
|
||||||
|
# generator_args: [0]
|
||||||
|
# label: "ScaleIO RFCahe"
|
||||||
|
# name: "ScaleIORFCache"
|
||||||
|
# mount: "none"
|
||||||
|
# file_system: "none"
|
||||||
|
# partition_guid: "163ddeea-95dd-4af0-a329-140623590c47"
|
||||||
|
|
||||||
|
volumes_roles_mapping:
|
||||||
|
scaleio:
|
||||||
|
- {allocate_size: "min", id: "os"}
|
||||||
|
- {allocate_size: "full-disk", id: "scaleio"}
|
||||||
|
|
||||||
|
## Experimental features: disabled in production
|
||||||
|
# scaleio-storage-tier1:
|
||||||
|
# - {allocate_size: "min", id: "os"}
|
||||||
|
# - {allocate_size: "full-disk", id: "scaleio-storage-tier1"}
|
||||||
|
# - {allocate_size: "full-disk", id: "scaleio-storage-tier2"}
|
||||||
|
# - {allocate_size: "full-disk", id: "scaleio-storage-tier3"}
|
||||||
|
# - {allocate_size: "full-disk", id: "scaleio-storage-rfcache"}
|
||||||
|
# scaleio-storage-tier2:
|
||||||
|
# - {allocate_size: "min", id: "os"}
|
||||||
|
# - {allocate_size: "full-disk", id: "scaleio-storage-tier1"}
|
||||||
|
# - {allocate_size: "full-disk", id: "scaleio-storage-tier2"}
|
||||||
|
# - {allocate_size: "full-disk", id: "scaleio-storage-tier3"}
|
||||||
|
# - {allocate_size: "full-disk", id: "scaleio-storage-rfcache"}
|
||||||
|
# scaleio-storage-tier3:
|
||||||
|
# - {allocate_size: "min", id: "os"}
|
||||||
|
# - {allocate_size: "full-disk", id: "scaleio-storage-tier1"}
|
||||||
|
# - {allocate_size: "full-disk", id: "scaleio-storage-tier2"}
|
||||||
|
# - {allocate_size: "full-disk", id: "scaleio-storage-tier3"}
|
||||||
|
# - {allocate_size: "full-disk", id: "scaleio-storage-rfcache"}
|
||||||
|
# scaleio-storage-rfcache:
|
||||||
|
# - {allocate_size: "min", id: "os"}
|
||||||
|
# - {allocate_size: "full-disk", id: "scaleio-storage-tier1"}
|
||||||
|
# - {allocate_size: "full-disk", id: "scaleio-storage-tier2"}
|
||||||
|
# - {allocate_size: "full-disk", id: "scaleio-storage-tier3"}
|
||||||
|
# - {allocate_size: "full-disk", id: "scaleio-storage-rfcache"}
|
Loading…
Reference in New Issue