Create disk partitions on Ubuntu.

Now we create a partition /dev/sdb1 instead of using the whole disk.
Also as we need to reboot the node after modifying disk partitions we
moved from fpb 1.0.0 to fpb 2.0.0.

Change-Id: I8b5bdde546858d1e4ad9fc30719415453a7268ab
This commit is contained in:
Guillaume Thouvenin 2015-03-09 15:37:49 +01:00
parent 302599edd4
commit 3422f3c244
11 changed files with 130 additions and 136 deletions

View File

@ -3,11 +3,11 @@ $fuel_settings = parseyaml(file('/etc/astute.yaml'))
if $fuel_settings['elasticsearch_kibana']['node_name'] == $fuel_settings['user_node_name'] {
$disks = regsubst($fuel_settings['elasticsearch_kibana']['dedicated_disks'], '([a-z]+)', '/dev/\1', 'G')
$array_disks = split($disks, ',')
class { 'disk_management':
disks => split($disks, ','),
directory => $fuel_settings['elasticsearch_kibana']['data_dir'],
lv_name => "es",
vg_name => "data",
class { 'disk_management': }
disk_management::partition { $array_disks:
require => Class['disk_management']
}
}

View File

@ -0,0 +1,21 @@
$fuel_settings = parseyaml(file('/etc/astute.yaml'))
if $fuel_settings['elasticsearch_kibana']['node_name'] == $fuel_settings['user_node_name'] {
$directory = $fuel_settings['elasticsearch_kibana']['data_dir']
$disks = split($::unallocated_pvs, ',')
validate_array($disks)
if empty($disks) {
file { $directory:
ensure => "directory",
}
} else {
disk_management::lvm_fs { $directory:
disks => $disks,
lv_name => "es",
vg_name => "data",
}
}
}

View File

@ -0,0 +1,41 @@
#!/bin/bash
# Use this script if you want to allocate a new partition.
# Ubuntu and CentOS are not configured the same way by Fuel. CentOS is doing
# RAID 1 with /boot on all disks so we need to deal with that.
# $1 -> The disk (example: "/dev/sdb")
set -eux
DISK=$1
PARTED="$(which parted 2>/dev/null) -s -m"
if ${PARTED} ${DISK} p | grep -q "unrecognised disk label"; then
# We need to create a new label
${PARTED} ${DISK} mklabel gpt
fi
# We take the free space at the end of the disk.
FREESPACE=$(${PARTED} ${DISK} unit s p free | grep "free" | tail -1 | awk -F: '{print $2, $3}')
if [[ -z "${FREESPACE}" ]]; then
echo "Failed to find free space"
exit 1
fi
# If you create a partition on a mounted disk, this command returns 1
# So we need a different way to catch the error
if ${PARTED} ${DISK} unit s mkpart primary ${FREESPACE} | grep -q "^Error"; then
echo "Failed to create a new primary partition"
exit 1
fi
# Get the ID of the partition and set flags to LVM
# Like when we create a new partition, if you run this command on a mounted
# FS the kernel failed to re-read the partition and the command returns 1
# event in case of success.
PARTID=$(${PARTED} ${DISK} p | tail -1 | awk -F: {'print $1'})
if ${PARTED} ${DISK} set ${PARTID} lvm on | grep -q "^Error"; then
echo "Failed to set the lvm flag on partition ${PARTID}."
exit 1
fi

View File

@ -1,39 +0,0 @@
#!/bin/bash
# Use this script if you want to allocate a new partition that is already used
# in RAID. It is the case for example with the current deployment of CentOS.
# $1 is the disk (for example: /dev/sdc)
# $2 is the raid : default is "/dev/md0"
set -eux
DISK=$1
RAID=${2:-/dev/md0}
MDADM=$(which mdadm 2>/dev/null)
PARTED=$(which parted 2>/dev/null)
PARTPROBE=$(which partprobe 2>/dev/null)
function add_new_partition {
FREESPACE=$(${PARTED} "$1" unit s p free | grep "Free Space" | awk '{print $1, $2}')
if [[ -z "${FREESPACE}" ]]
then
echo "Failed to find free space"
exit 1
fi
${PARTED} -s -- $1 unit s mkpart primary ${FREESPACE} &> /dev/null
}
# Get the partition involved into RAID.
PARTITION=$(${MDADM} -D ${RAID} | grep "active" | grep ${DISK} | awk '{print $7}')
# Remove the partition from RAID.
$MDADM $RAID --fail $PARTITION --remove $PARTITION &>/dev/null
# Create a new partition
add_new_partition $DISK
# Add the partition that belongs to the raid.
$MDADM --add $RAID $PARTITION

View File

@ -6,19 +6,21 @@ devices = Dir.entries('/sys/block/').select do |d|
File.exist?( "/sys/block/#{ d }/device" )
end
devices.each do |device|
device = "/dev/#{ device }"
# Filter only partitions flagged as LVM
lvm_partitions = Facter::Util::Resolution.exec(
"parted -s -m #{ device } print 2>/dev/null").scan(/^(\d+):.+:lvm;$/).flatten
lvm_partitions.each do |x|
# Filter only partitions which haven't been created yet
pvs = Facter::Util::Resolution.exec(
"pvs --noheadings #{ device }#{ x } 2>/dev/null")
if pvs.nil? then
unallocated_pvs.push("#{ device }#{ x }")
if Facter::Util::Resolution.which("parted") and Facter::Util::Resolution.which('pvs') then
devices.each do |device|
device = "/dev/#{ device }"
# Filter only partitions flagged as LVM
lvm_partitions = Facter::Util::Resolution.exec(
"parted -s -m #{ device } print 2>/dev/null").scan(/^(\d+):.+:lvm;$/).flatten
lvm_partitions.each do |x|
# Filter only partitions which haven't been created yet
pvs = Facter::Util::Resolution.exec(
"pvs --noheadings #{ device }#{ x } 2>/dev/null")
if pvs.nil? then
unallocated_pvs.push("#{ device }#{ x }")
end
end
end
end
Facter.add("unallocated_pvs") { setcode { unallocated_pvs } }
Facter.add("unallocated_pvs") { setcode { unallocated_pvs.sort.join(',') } }

View File

@ -1,53 +1,19 @@
# == Class: disk_management
#
# The disk_management class will create a logical volume above the disks
# given as parameter and mount the direcory on this volume.
#
# === Parameters
#
# [*disks*]
# The disks to use to create the physical volumes.
#
# [*directory*]
# The name of the directory that will be mount on created logical volumes.
#
# === Examples
#
# class { 'disk_management':
# disks => ['/dev/sdb', '/dev/sdc'],
# directory => "/data",
# }
#
# === Authors
#
# Guillaume Thouvenin <gthouvenin@mirantis.com
#
# === copyright
#
# Copyright 2015 Mirantis Inc, unless otherwise noted.
#
class disk_management (
$disks,
$directory,
$lv_name,
$vg_name,
) {
$script = $disk_management::params::script,
$puppet_source = $disk_management::params::puppet_source,
$script_location = $disk_management::params::script_location,
) inherits disk_management::params {
# CentOS is deployed with a /boot in RAID 1. We create a new partition with
# an ID 4. Until we improve this we need to deal with it.
$usedisks = $::operatingsystem ? {
CentOS => regsubst($disks, '/dev/([a-z]+)', '/dev/\14', 'G'),
Ubuntu => $disks
package { 'parted':
ensure => installed,
}
disk_management::partition { $disks:
file { $script_location:
ensure => 'file',
source => $puppet_source,
owner => 'root',
group => 'root',
mode => '0700',
require => Package['parted'],
}
disk_management::lvm_fs { $directory:
disks => $usedisks,
lv_name => $lv_name,
vg_name => $vg_name,
require => Disk_management::Partition[$disks],
}
}

View File

@ -0,0 +1,5 @@
class disk_management::params {
$script = "add_partition.sh"
$puppet_source = "puppet:///modules/disk_management/${script}"
$script_location = "/usr/local/bin/${script}"
}

View File

@ -1,32 +1,12 @@
define disk_management::partition {
$disk = $title
$script = "/usr/local/bin/add_partition_on_raid.sh"
$cmd = "${script} ${disk}"
include disk_management::params
case $::osfamily {
'RedHat': {
# CentOS deploys /boot into a RAID on all available disks. So in
# this case we need to create a new partition instead of using the whole
# disks as we do for Debian family.
$disk = $title
$script = $disk_management::params::script_location
$cmd = "${script} ${disk}"
package { 'parted':
ensure => installed,
}
file { $script:
ensure => 'file',
source => 'puppet:///modules/disk_management/add_partition_on_raid.sh',
owner => 'root',
group => 'root',
mode => '0700',
require => Package['parted'],
}
exec { 'run_script':
command => $cmd,
require => File[$script],
}
}
exec { $title:
command => $cmd,
}
}

View File

@ -8,14 +8,11 @@ attributes:
type: "text"
dedicated_disks:
value: 'sdb'
value: ''
label: 'Dedicated disks'
description: 'Comma-separated list of disk devices used to store Elasticsearch data (for instance "sdb,sdc")'
description: 'Comma-separated list of disk devices used to store Elasticsearch data (for instance "sda,sdb"). Keep it empty means using "/"'
weight: 20
type: "text"
regex:
source: '\S'
error: "Invalid disk name"
# Parameter hidden in the UI on purpose
data_dir:

View File

@ -3,11 +3,18 @@ name: elasticsearch_kibana
# Human-readable name for your plugin
title: The Elasticsearch-Kibana Server Plugin
# Plugin version
version: 6.1.0
version: '6.1.0'
# Description
description: Deploy Elasticsearch server and the Kibana web interface.
# Required fuel version
fuel_version: ['6.1']
# Licences
licenses: ['Apache License Version 2.0']
# Specify author or company name
authors: ['Mirantis Inc.']
# A link to the plugin homepage
homepage: 'https://github.com/stackforge/fuel-plugin-elasticsearch-kibana'
groups: []
# The plugin is compatible with releases in the list
releases:
@ -23,4 +30,4 @@ releases:
repository_path: repositories/centos
# Version of plugin package
package_version: '1.0.0'
package_version: '2.0.0'

View File

@ -14,6 +14,20 @@
puppet_modules: puppet/modules
timeout: 600
- role: ['base-os']
stage: post_deployment
type: reboot
parameters:
timeout: 600
- role: ['base-os']
stage: post_deployment
type: puppet
parameters:
puppet_manifest: puppet/manifests/setup_esdir.pp
puppet_modules: puppet/modules
timeout: 600
- role: ['base-os']
stage: post_deployment
type: puppet