From 3fd9fb1fcaced891acbb85ff097227f562406f22 Mon Sep 17 00:00:00 2001 From: alexey-mr Date: Tue, 6 Sep 2016 13:58:10 +0300 Subject: [PATCH] Non-hyper-converged deployment support Change-Id: I7dd94514c0802ea7764d4d5d249bbd21f18eb2db --- .../puppet/manifests/cluster.pp | 16 ++-- .../puppet/manifests/environment.pp | 12 ++- .../puppet/manifests/sds_server.pp | 15 ++-- .../scaleio_fuel/lib/facter/cluster_info.rb | 11 +-- .../modules/scaleio_fuel/lib/facter/disks.rb | 22 +++-- environment_config.yaml | 44 ++++++---- node_roles.yaml | 30 +++++++ volumes.yaml | 84 +++++++++++++++++++ 8 files changed, 191 insertions(+), 43 deletions(-) create mode 100644 node_roles.yaml create mode 100644 volumes.yaml diff --git a/deployment_scripts/puppet/manifests/cluster.pp b/deployment_scripts/puppet/manifests/cluster.pp index 8a8caf1..54fc775 100644 --- a/deployment_scripts/puppet/manifests/cluster.pp +++ b/deployment_scripts/puppet/manifests/cluster.pp @@ -205,13 +205,17 @@ if $scaleio['metadata']['enabled'] { if ! empty(filter_nodes(filter_nodes($all_nodes, 'name', $::hostname), 'role', 'primary-controller')) { $use_plugin_roles = $scaleio['enable_sds_role'] if ! $use_plugin_roles { - $storage_nodes = filter_nodes($all_nodes, 'role', 'compute') - if $scaleio['sds_on_controller'] { - $controller_nodes = filter_nodes($all_nodes, 'role', 'controller') - $pr_controller_nodes = filter_nodes($all_nodes, 'role', 'primary-controller') - $sds_nodes = concat(concat($pr_controller_nodes, $controller_nodes), $storage_nodes) + if $scaleio['hyper_converged_deployment'] { + $storage_nodes = filter_nodes($all_nodes, 'role', 'compute') + if $scaleio['sds_on_controller'] { + $controller_nodes = filter_nodes($all_nodes, 'role', 'controller') + $pr_controller_nodes = filter_nodes($all_nodes, 'role', 'primary-controller') + $sds_nodes = concat(concat($pr_controller_nodes, $controller_nodes), $storage_nodes) + } else { + $sds_nodes = $storage_nodes + } } else { - $sds_nodes = $storage_nodes + $sds_nodes = filter_nodes($all_nodes, 'role', 'scaleio') } } else { $sds_nodes = concat(filter_nodes($all_nodes, 'role', 'scaleio-storage-tier1'), filter_nodes($all_nodes, 'role', 'scaleio-storage-tier2')) diff --git a/deployment_scripts/puppet/manifests/environment.pp b/deployment_scripts/puppet/manifests/environment.pp index 3224466..0d23906 100644 --- a/deployment_scripts/puppet/manifests/environment.pp +++ b/deployment_scripts/puppet/manifests/environment.pp @@ -86,11 +86,15 @@ if $scaleio['metadata']['enabled'] { # Check SDS count $use_plugin_roles = $scaleio['enable_sds_role'] if ! $use_plugin_roles { - $controller_sds_count = $scaleio['sds_on_controller'] ? { - true => count($controller_ips_array), - default => 0 + if $scaleio['hyper_converged_deployment'] { + $controller_sds_count = $scaleio['sds_on_controller'] ? { + true => count($controller_ips_array), + default => 0 + } + $total_sds_count = count(filter_nodes($all_nodes, 'role', 'compute')) + $controller_sds_count + } else { + $total_sds_count = count(filter_nodes($all_nodes, 'role', 'scaleio')) } - $total_sds_count = count(filter_nodes($all_nodes, 'role', 'compute')) + $controller_sds_count if $total_sds_count < 3 { $sds_check_msg = 'There should be at least 3 nodes with SDSs, either add Compute node or use Controllers as SDS.' } diff --git a/deployment_scripts/puppet/manifests/sds_server.pp b/deployment_scripts/puppet/manifests/sds_server.pp index 2691c84..0e181da 100644 --- a/deployment_scripts/puppet/manifests/sds_server.pp +++ b/deployment_scripts/puppet/manifests/sds_server.pp @@ -15,14 +15,19 @@ notice('MODULAR: scaleio: sds_server') $scaleio = hiera('scaleio') if $scaleio['metadata']['enabled'] { if ! $scaleio['existing_cluster'] { + $all_nodes = hiera('nodes') + $nodes = filter_nodes($all_nodes, 'name', $::hostname) $use_plugin_roles = $scaleio['enable_sds_role'] if ! $use_plugin_roles { - #it is supposed that task is run on compute or controller - $node_ips = split($::ip_address_array, ',') - $is_sds_server = empty(intersection(split($::controller_ips, ','), $node_ips)) or $scaleio['sds_on_controller'] + if $scaleio['hyper_converged_deployment'] { + $is_controller = !empty(concat(filter_nodes($nodes, 'role', 'primary-controller'), filter_nodes($nodes, 'role', 'controller'))) + $is_sds_on_controller = $is_controller and $scaleio['sds_on_controller'] + $is_sds_on_compute = !empty(filter_nodes($nodes, 'role', 'compute')) + $is_sds_server = $is_sds_on_controller or $is_sds_on_compute + } else { + $is_sds_server = !empty(filter_nodes($nodes, 'role', 'scaleio')) + } } else { - $all_nodes = hiera('nodes') - $nodes = filter_nodes($all_nodes, 'name', $::hostname) $is_sds_server = ! empty(concat( concat(filter_nodes($nodes, 'role', 'scaleio-storage-tier1'), filter_nodes($nodes, 'role', 'scaleio-storage-tier2')), filter_nodes($nodes, 'role', 'scaleio-storage-tier3'))) diff --git a/deployment_scripts/puppet/modules/scaleio_fuel/lib/facter/cluster_info.rb b/deployment_scripts/puppet/modules/scaleio_fuel/lib/facter/cluster_info.rb index 69dc38c..d9d8786 100644 --- a/deployment_scripts/puppet/modules/scaleio_fuel/lib/facter/cluster_info.rb +++ b/deployment_scripts/puppet/modules/scaleio_fuel/lib/facter/cluster_info.rb @@ -23,11 +23,12 @@ # --------------------------------------------------------------------------------- # | Name | Description # |-------------------------------------------------------------------------------- -# | scaleio_sds_ips | Comma separated list of SDS IPs. -# | scaleio_sds_names | Comma separated list of SDS names. -# | scaleio_sdc_ips | Comma separated list of SDC IPs, -# | | it is list of management IPs, not storage IPs. - +# | scaleio_sds_ips | Comma separated list of SDS IPs. +# | scaleio_sds_names | Comma separated list of SDS names. +# | scaleio_sdc_ips | Comma separated list of SDC IPs, +# | | it is list of management IPs, not storage IPs. +# | scaleio_sds_with_protection_domain_list | Comma separated list of SDS +# | | with its Protection domains: sds1,pd1,sds2,pd2,.. # Facts about MDM from Gateway: # (It requests them from Gateway via curl and requires the fact 'gateway_ips'. # An user is 'admin' by default or the fact 'gateway_user' if it exists. diff --git a/deployment_scripts/puppet/modules/scaleio_fuel/lib/facter/disks.rb b/deployment_scripts/puppet/modules/scaleio_fuel/lib/facter/disks.rb index b58a992..f0d72db 100644 --- a/deployment_scripts/puppet/modules/scaleio_fuel/lib/facter/disks.rb +++ b/deployment_scripts/puppet/modules/scaleio_fuel/lib/facter/disks.rb @@ -1,16 +1,19 @@ require 'date' require 'facter' -$scaleio_tier1_guid = 'f2e81bdc-99b3-4bf6-a68f-dc794da6cd8e' -$scaleio_tier2_guid = 'd5321bb3-1098-433e-b4f5-216712fcd06f' -$scaleio_tier3_guid = '97987bfc-a9ba-40f3-afea-13e1a228e492' -$scaleio_rfcache_guid = '163ddeea-95dd-4af0-a329-140623590c47' +$scaleio_storage_guid = '5e9bd278-9919-4db3-9756-4b82c7e9df52' +## Experimental: +#$scaleio_tier1_guid = 'f2e81bdc-99b3-4bf6-a68f-dc794da6cd8e' +#$scaleio_tier2_guid = 'd5321bb3-1098-433e-b4f5-216712fcd06f' +#$scaleio_tier3_guid = '97987bfc-a9ba-40f3-afea-13e1a228e492' +#$scaleio_rfcache_guid = '163ddeea-95dd-4af0-a329-140623590c47' $scaleio_tiers = { - 'tier1' => $scaleio_tier1_guid, - 'tier2' => $scaleio_tier2_guid, - 'tier3' => $scaleio_tier3_guid, - 'rfcache' => $scaleio_rfcache_guid, + 'all' => $scaleio_storage_guid, +# 'tier1' => $scaleio_tier1_guid, +# 'tier2' => $scaleio_tier2_guid, +# 'tier3' => $scaleio_tier3_guid, +# 'rfcache' => $scaleio_rfcache_guid, } $scaleio_log_file = "/var/log/fuel-plugin-scaleio.log" @@ -47,11 +50,12 @@ end Facter.add('sds_storage_small_devices') do setcode do result = nil + disks0 = Facter.value('sds_storage_devices_all') disks1 = Facter.value('sds_storage_devices_tier1') disks2 = Facter.value('sds_storage_devices_tier2') disks3 = Facter.value('sds_storage_devices_tier3') if disks1 or disks2 or disks3 - disks = [disks1, disks2, disks3].join(',') + disks = [disks0, disks1, disks2, disks3].join(',') end if disks devices = disks.split(',') diff --git a/environment_config.yaml b/environment_config.yaml index 9d0e5f4..9d7e6dc 100644 --- a/environment_config.yaml +++ b/environment_config.yaml @@ -3,16 +3,6 @@ attributes: # Settings group can be one of "general", "security", "compute", "network", # "storage", "logging", "openstack_services" and "other". group: 'storage' - - enable_sds_role: - type: "checkbox" - value: false - label: "Use ScaleIO SDS role." - description: "Enables role based deployment." - weight: 5 - restrictions: - - condition: "true" - action: hide existing_cluster: type: "checkbox" @@ -73,7 +63,10 @@ attributes: type: "text" value: "default" label: "Protection domain" - description: "Name of first protection domain. Next domains will get names like default_2, default_3." + description: + Name of first protection domain. In case of auto-scaling next domains will get names like default_2, default_3. + Auto-scaling works if the Use Existing Cluster option is disabled. Next domain is created + if number of SDS-es reaches the limit in the setting Maximum number of nodes in one protection domain. weight: 70 regex: source: '^(\w+){1}((,){1}(?=\w+))*' @@ -84,8 +77,9 @@ attributes: value: "100" label: "Maximum number of nodes in one protection domain" description: - If number of nodes gets lasrgert than this threshould new protection domain will be created. - Note, in that case it is needed to add at least 3 new nodes with Storage role to make new domain operationable. + If number of nodes gets larger than this threshold then new protection domain will be created. + Note, in that case it is needed to add at least 3 new nodes to make new domain operationable. + In case of hyper-converged deployment they should be compute nodes, otherwise - the ScaleIO nodes. weight: 75 regex: source: '^[1-9]{1}[0-9]*$' @@ -94,6 +88,16 @@ attributes: - condition: "settings:scaleio.existing_cluster.value == true" action: hide + enable_sds_role: + type: "checkbox" + value: false + label: "Experimental role-based deployment" + description: "Hidden option to disable experimental feature" + weight: 5 + restrictions: + - condition: "true" + action: hide + storage_pools: type: "text" value: "default" @@ -135,6 +139,18 @@ attributes: - condition: "settings:scaleio.existing_cluster.value == true or settings:scaleio.enable_sds_role.value == true" action: hide + hyper_converged_deployment: + type: "checkbox" + value: true + label: "Hyper-converged deployment" + description: + Deploy SDS component on all compute nodes automatically and optionally on controller nodes. + If the option disabled then SDS will be deployed only on the nodes with the ScaleIO role. + weight: 103 + restrictions: + - condition: "settings:scaleio.existing_cluster.value == true" + action: hide + sds_on_controller: type: "checkbox" value: true @@ -142,7 +158,7 @@ attributes: description: "Setup SDS-es on controller nodes." weight: 105 restrictions: - - condition: "settings:scaleio.existing_cluster.value == true or settings:scaleio.enable_sds_role.value == true" + - condition: "settings:scaleio.existing_cluster.value == true or settings:scaleio.hyper_converged_deployment.value == false or settings:scaleio.enable_sds_role.value == true" action: hide provisioning_type: diff --git a/node_roles.yaml b/node_roles.yaml new file mode 100644 index 0000000..d57ba77 --- /dev/null +++ b/node_roles.yaml @@ -0,0 +1,30 @@ +scaleio: + name: "ScaleIO" + description: "ScaleIO" + update_required: + - primary-controller + restrictions: + - condition: "settings:scaleio.hyper_converged_deployment.value == true" + message: + ScaleIO role is available only in the non hyper-converged deployment mode. + To select non hyper-converged mode navigate to the ScaleIO plugin settings and un-check appropriate checkbox. + metadata: + group: 'storage' +## Experimental features: disabled in production +# scaleio-storage-tier1: +# name: "ScaleIO Storage Tier1" +# description: "Devices of a node with this role will be assigned to the storage poll tier1. If both tier roles are assigned, devices will be slit according to plugin settings." +# limits: +# min: 3 +# update_required: +# - controller +# - primary-controller +# +# scaleio-storage-tier2: +# name: "ScaleIO Storage Tier2" +# description: "Devices of a node with this role will be assigned to the storage poll tier2. If both tier roles are assigned, devices will be slit according to plugin settings." +# limits: +# min: 3 +# update_required: +# - controller +# - primary-controller diff --git a/volumes.yaml b/volumes.yaml new file mode 100644 index 0000000..c60729a --- /dev/null +++ b/volumes.yaml @@ -0,0 +1,84 @@ +volumes: + - id: "scaleio" + type: "partition" + min_size: + generator: "calc_gb_to_mb" + generator_args: [0] + label: "ScaleIO" + name: "ScaleIO" + mount: "none" + file_system: "none" + partition_guid: "5e9bd278-9919-4db3-9756-4b82c7e9df52" + +## Experimental features: disabled in production +# - id: "scaleio-storage-tier1" +# type: "partition" +# min_size: +# generator: "calc_gb_to_mb" +# generator_args: [0] +# label: "ScaleIO Tier1" +# name: "ScaleIOTier1" +# mount: "none" +# file_system: "none" +# partition_guid: "f2e81bdc-99b3-4bf6-a68f-dc794da6cd8e" +# - id: "scaleio-storage-tier2" +# type: "partition" +# min_size: +# generator: "calc_gb_to_mb" +# generator_args: [0] +# label: "ScaleIO Tier2" +# name: "ScaleIOTier2" +# mount: "none" +# file_system: "none" +# partition_guid: "d5321bb3-1098-433e-b4f5-216712fcd06f" +# - id: "scaleio-storage-tier3" +# type: "partition" +# min_size: +# generator: "calc_gb_to_mb" +# generator_args: [0] +# label: "ScaleIO Tier3" +# name: "ScaleIOTier3" +# mount: "none" +# file_system: "none" +# partition_guid: "97987bfc-a9ba-40f3-afea-13e1a228e492" +# - id: "scaleio-storage-rfcache" +# type: "partition" +# min_size: +# generator: "calc_gb_to_mb" +# generator_args: [0] +# label: "ScaleIO RFCahe" +# name: "ScaleIORFCache" +# mount: "none" +# file_system: "none" +# partition_guid: "163ddeea-95dd-4af0-a329-140623590c47" + +volumes_roles_mapping: + scaleio: + - {allocate_size: "min", id: "os"} + - {allocate_size: "full-disk", id: "scaleio"} + +## Experimental features: disabled in production +# scaleio-storage-tier1: +# - {allocate_size: "min", id: "os"} +# - {allocate_size: "full-disk", id: "scaleio-storage-tier1"} +# - {allocate_size: "full-disk", id: "scaleio-storage-tier2"} +# - {allocate_size: "full-disk", id: "scaleio-storage-tier3"} +# - {allocate_size: "full-disk", id: "scaleio-storage-rfcache"} +# scaleio-storage-tier2: +# - {allocate_size: "min", id: "os"} +# - {allocate_size: "full-disk", id: "scaleio-storage-tier1"} +# - {allocate_size: "full-disk", id: "scaleio-storage-tier2"} +# - {allocate_size: "full-disk", id: "scaleio-storage-tier3"} +# - {allocate_size: "full-disk", id: "scaleio-storage-rfcache"} +# scaleio-storage-tier3: +# - {allocate_size: "min", id: "os"} +# - {allocate_size: "full-disk", id: "scaleio-storage-tier1"} +# - {allocate_size: "full-disk", id: "scaleio-storage-tier2"} +# - {allocate_size: "full-disk", id: "scaleio-storage-tier3"} +# - {allocate_size: "full-disk", id: "scaleio-storage-rfcache"} +# scaleio-storage-rfcache: +# - {allocate_size: "min", id: "os"} +# - {allocate_size: "full-disk", id: "scaleio-storage-tier1"} +# - {allocate_size: "full-disk", id: "scaleio-storage-tier2"} +# - {allocate_size: "full-disk", id: "scaleio-storage-tier3"} +# - {allocate_size: "full-disk", id: "scaleio-storage-rfcache"}