Create disk partitions on Ubuntu.

Now we create a partition /dev/sdb1 instead of using the whole disk.
Also as we need to reboot the node after modifying disk partitions we
moved from fpb 1.0.0 to fpb 2.0.0.

Change-Id: I8b5bdde546858d1e4ad9fc30719415453a7268ab
This commit is contained in:
Guillaume Thouvenin 2015-03-09 15:37:49 +01:00
parent 302599edd4
commit 3422f3c244
11 changed files with 130 additions and 136 deletions

View File

@ -3,11 +3,11 @@ $fuel_settings = parseyaml(file('/etc/astute.yaml'))
if $fuel_settings['elasticsearch_kibana']['node_name'] == $fuel_settings['user_node_name'] { if $fuel_settings['elasticsearch_kibana']['node_name'] == $fuel_settings['user_node_name'] {
$disks = regsubst($fuel_settings['elasticsearch_kibana']['dedicated_disks'], '([a-z]+)', '/dev/\1', 'G') $disks = regsubst($fuel_settings['elasticsearch_kibana']['dedicated_disks'], '([a-z]+)', '/dev/\1', 'G')
$array_disks = split($disks, ',')
class { 'disk_management': class { 'disk_management': }
disks => split($disks, ','),
directory => $fuel_settings['elasticsearch_kibana']['data_dir'], disk_management::partition { $array_disks:
lv_name => "es", require => Class['disk_management']
vg_name => "data",
} }
} }

View File

@ -0,0 +1,21 @@
$fuel_settings = parseyaml(file('/etc/astute.yaml'))
if $fuel_settings['elasticsearch_kibana']['node_name'] == $fuel_settings['user_node_name'] {
$directory = $fuel_settings['elasticsearch_kibana']['data_dir']
$disks = split($::unallocated_pvs, ',')
validate_array($disks)
if empty($disks) {
file { $directory:
ensure => "directory",
}
} else {
disk_management::lvm_fs { $directory:
disks => $disks,
lv_name => "es",
vg_name => "data",
}
}
}

View File

@ -0,0 +1,41 @@
#!/bin/bash
# Use this script if you want to allocate a new partition.
# Ubuntu and CentOS are not configured the same way by Fuel. CentOS is doing
# RAID 1 with /boot on all disks so we need to deal with that.
# $1 -> The disk (example: "/dev/sdb")
set -eux
DISK=$1
PARTED="$(which parted 2>/dev/null) -s -m"
if ${PARTED} ${DISK} p | grep -q "unrecognised disk label"; then
# We need to create a new label
${PARTED} ${DISK} mklabel gpt
fi
# We take the free space at the end of the disk.
FREESPACE=$(${PARTED} ${DISK} unit s p free | grep "free" | tail -1 | awk -F: '{print $2, $3}')
if [[ -z "${FREESPACE}" ]]; then
echo "Failed to find free space"
exit 1
fi
# If you create a partition on a mounted disk, this command returns 1
# So we need a different way to catch the error
if ${PARTED} ${DISK} unit s mkpart primary ${FREESPACE} | grep -q "^Error"; then
echo "Failed to create a new primary partition"
exit 1
fi
# Get the ID of the partition and set flags to LVM
# Like when we create a new partition, if you run this command on a mounted
# FS the kernel failed to re-read the partition and the command returns 1
# event in case of success.
PARTID=$(${PARTED} ${DISK} p | tail -1 | awk -F: {'print $1'})
if ${PARTED} ${DISK} set ${PARTID} lvm on | grep -q "^Error"; then
echo "Failed to set the lvm flag on partition ${PARTID}."
exit 1
fi

View File

@ -1,39 +0,0 @@
#!/bin/bash
# Use this script if you want to allocate a new partition that is already used
# in RAID. It is the case for example with the current deployment of CentOS.
# $1 is the disk (for example: /dev/sdc)
# $2 is the raid : default is "/dev/md0"
set -eux
DISK=$1
RAID=${2:-/dev/md0}
MDADM=$(which mdadm 2>/dev/null)
PARTED=$(which parted 2>/dev/null)
PARTPROBE=$(which partprobe 2>/dev/null)
function add_new_partition {
FREESPACE=$(${PARTED} "$1" unit s p free | grep "Free Space" | awk '{print $1, $2}')
if [[ -z "${FREESPACE}" ]]
then
echo "Failed to find free space"
exit 1
fi
${PARTED} -s -- $1 unit s mkpart primary ${FREESPACE} &> /dev/null
}
# Get the partition involved into RAID.
PARTITION=$(${MDADM} -D ${RAID} | grep "active" | grep ${DISK} | awk '{print $7}')
# Remove the partition from RAID.
$MDADM $RAID --fail $PARTITION --remove $PARTITION &>/dev/null
# Create a new partition
add_new_partition $DISK
# Add the partition that belongs to the raid.
$MDADM --add $RAID $PARTITION

View File

@ -6,19 +6,21 @@ devices = Dir.entries('/sys/block/').select do |d|
File.exist?( "/sys/block/#{ d }/device" ) File.exist?( "/sys/block/#{ d }/device" )
end end
devices.each do |device| if Facter::Util::Resolution.which("parted") and Facter::Util::Resolution.which('pvs') then
device = "/dev/#{ device }" devices.each do |device|
# Filter only partitions flagged as LVM device = "/dev/#{ device }"
lvm_partitions = Facter::Util::Resolution.exec( # Filter only partitions flagged as LVM
"parted -s -m #{ device } print 2>/dev/null").scan(/^(\d+):.+:lvm;$/).flatten lvm_partitions = Facter::Util::Resolution.exec(
lvm_partitions.each do |x| "parted -s -m #{ device } print 2>/dev/null").scan(/^(\d+):.+:lvm;$/).flatten
# Filter only partitions which haven't been created yet lvm_partitions.each do |x|
pvs = Facter::Util::Resolution.exec( # Filter only partitions which haven't been created yet
"pvs --noheadings #{ device }#{ x } 2>/dev/null") pvs = Facter::Util::Resolution.exec(
if pvs.nil? then "pvs --noheadings #{ device }#{ x } 2>/dev/null")
unallocated_pvs.push("#{ device }#{ x }") if pvs.nil? then
unallocated_pvs.push("#{ device }#{ x }")
end
end end
end end
end end
Facter.add("unallocated_pvs") { setcode { unallocated_pvs } } Facter.add("unallocated_pvs") { setcode { unallocated_pvs.sort.join(',') } }

View File

@ -1,53 +1,19 @@
# == Class: disk_management
#
# The disk_management class will create a logical volume above the disks
# given as parameter and mount the direcory on this volume.
#
# === Parameters
#
# [*disks*]
# The disks to use to create the physical volumes.
#
# [*directory*]
# The name of the directory that will be mount on created logical volumes.
#
# === Examples
#
# class { 'disk_management':
# disks => ['/dev/sdb', '/dev/sdc'],
# directory => "/data",
# }
#
# === Authors
#
# Guillaume Thouvenin <gthouvenin@mirantis.com
#
# === copyright
#
# Copyright 2015 Mirantis Inc, unless otherwise noted.
#
class disk_management ( class disk_management (
$disks, $script = $disk_management::params::script,
$directory, $puppet_source = $disk_management::params::puppet_source,
$lv_name, $script_location = $disk_management::params::script_location,
$vg_name, ) inherits disk_management::params {
) {
# CentOS is deployed with a /boot in RAID 1. We create a new partition with package { 'parted':
# an ID 4. Until we improve this we need to deal with it. ensure => installed,
$usedisks = $::operatingsystem ? {
CentOS => regsubst($disks, '/dev/([a-z]+)', '/dev/\14', 'G'),
Ubuntu => $disks
} }
disk_management::partition { $disks: file { $script_location:
ensure => 'file',
source => $puppet_source,
owner => 'root',
group => 'root',
mode => '0700',
require => Package['parted'],
} }
disk_management::lvm_fs { $directory:
disks => $usedisks,
lv_name => $lv_name,
vg_name => $vg_name,
require => Disk_management::Partition[$disks],
}
} }

View File

@ -0,0 +1,5 @@
class disk_management::params {
$script = "add_partition.sh"
$puppet_source = "puppet:///modules/disk_management/${script}"
$script_location = "/usr/local/bin/${script}"
}

View File

@ -1,32 +1,12 @@
define disk_management::partition { define disk_management::partition {
$disk = $title include disk_management::params
$script = "/usr/local/bin/add_partition_on_raid.sh"
$cmd = "${script} ${disk}"
case $::osfamily { $disk = $title
'RedHat': { $script = $disk_management::params::script_location
# CentOS deploys /boot into a RAID on all available disks. So in $cmd = "${script} ${disk}"
# this case we need to create a new partition instead of using the whole
# disks as we do for Debian family.
package { 'parted': exec { $title:
ensure => installed, command => $cmd,
}
file { $script:
ensure => 'file',
source => 'puppet:///modules/disk_management/add_partition_on_raid.sh',
owner => 'root',
group => 'root',
mode => '0700',
require => Package['parted'],
}
exec { 'run_script':
command => $cmd,
require => File[$script],
}
}
} }
} }

View File

@ -8,14 +8,11 @@ attributes:
type: "text" type: "text"
dedicated_disks: dedicated_disks:
value: 'sdb' value: ''
label: 'Dedicated disks' label: 'Dedicated disks'
description: 'Comma-separated list of disk devices used to store Elasticsearch data (for instance "sdb,sdc")' description: 'Comma-separated list of disk devices used to store Elasticsearch data (for instance "sda,sdb"). Keep it empty means using "/"'
weight: 20 weight: 20
type: "text" type: "text"
regex:
source: '\S'
error: "Invalid disk name"
# Parameter hidden in the UI on purpose # Parameter hidden in the UI on purpose
data_dir: data_dir:

View File

@ -3,11 +3,18 @@ name: elasticsearch_kibana
# Human-readable name for your plugin # Human-readable name for your plugin
title: The Elasticsearch-Kibana Server Plugin title: The Elasticsearch-Kibana Server Plugin
# Plugin version # Plugin version
version: 6.1.0 version: '6.1.0'
# Description # Description
description: Deploy Elasticsearch server and the Kibana web interface. description: Deploy Elasticsearch server and the Kibana web interface.
# Required fuel version # Required fuel version
fuel_version: ['6.1'] fuel_version: ['6.1']
# Licences
licenses: ['Apache License Version 2.0']
# Specify author or company name
authors: ['Mirantis Inc.']
# A link to the plugin homepage
homepage: 'https://github.com/stackforge/fuel-plugin-elasticsearch-kibana'
groups: []
# The plugin is compatible with releases in the list # The plugin is compatible with releases in the list
releases: releases:
@ -23,4 +30,4 @@ releases:
repository_path: repositories/centos repository_path: repositories/centos
# Version of plugin package # Version of plugin package
package_version: '1.0.0' package_version: '2.0.0'

View File

@ -14,6 +14,20 @@
puppet_modules: puppet/modules puppet_modules: puppet/modules
timeout: 600 timeout: 600
- role: ['base-os']
stage: post_deployment
type: reboot
parameters:
timeout: 600
- role: ['base-os']
stage: post_deployment
type: puppet
parameters:
puppet_manifest: puppet/manifests/setup_esdir.pp
puppet_modules: puppet/modules
timeout: 600
- role: ['base-os'] - role: ['base-os']
stage: post_deployment stage: post_deployment
type: puppet type: puppet