Refactor to pcmk_ resources

Use pcmk_* resources from the new pacemaker module
instead of the old cs_* resources from the upstream
corosync modules and custom cs_rsc_* resources from the
duct tape patched corosync module.

The new pacemaker module is rewritten from the scratch
and provides improved performance, transparency and
debuggability. It uses it's own XML engine and depends
neither on "pcs" nor on "crm" applications.

"corosync" modules will still be used to install the
corosync cluster, and the "pacemaker" modules will
be used to work with the cluster's configuration.

Refactor pacemaker services to use pacemaker::service
wrapper. It controlls primitive creation and run in the
uniform way and creates ocf_handler helpers for
manual primitive management.

Add run_failed_log feature to fuel_noop_tests to
replay the failed tests after they were fixed.

Change-Id: Ifd5813c657031cdd7b86368cd971742483a5b8a1
Related-Bug: 1414583
Related-Bug: 1453773
Related-Bug: 1482248
This commit is contained in:
Vladimir Kuklin 2016-02-29 16:53:55 +03:00 committed by Alex Schultz
parent 574820d062
commit 2f2239025a
80 changed files with 587 additions and 6057 deletions

View File

@ -41,3 +41,4 @@ puppet/tftp
puppet/vcsrepo
puppet/xinetd
puppet/rabbitmq
puppet/pacemaker

View File

@ -26,6 +26,11 @@ mod 'stdlib',
:git => 'https://github.com/fuel-infra/puppetlabs-stdlib.git',
:ref => '4.9.0'
# Pull in puppet-pacemaker modules
mod 'pacemaker',
:git => 'https://github.com/fuel-infra/puppet-pacemaker.git',
:ref => '1.0.0'
# Pull in puppetlabs-concat
mod 'concat',
:git => 'https://github.com/fuel-infra/puppetlabs-concat.git',

View File

@ -1,5 +1,5 @@
class ceilometer_ha::agent::central inherits ceilometer::agent::central {
pacemaker_wrappers::service { $::ceilometer::params::agent_central_service_name :
pacemaker::service { $::ceilometer::params::agent_central_service_name :
primitive_type => 'ceilometer-agent-central',
metadata => { 'resource-stickiness' => '1' },
parameters => { 'user' => 'ceilometer' },

View File

@ -1,5 +1,5 @@
class ceilometer_ha::alarm::evaluator inherits ceilometer::alarm::evaluator {
pacemaker_wrappers::service { $::ceilometer::params::alarm_evaluator_service_name :
pacemaker::service { $::ceilometer::params::alarm_evaluator_service_name :
primitive_type => 'ceilometer-alarm-evaluator',
metadata => { 'resource-stickiness' => '1' },
parameters => { 'user' => 'ceilometer' },

View File

@ -4,10 +4,9 @@ fixtures:
corosync: "#{source_dir}/../corosync"
openstack: "#{source_dir}/../openstack"
stdlib: "#{source_dir}/../stdlib"
pacemaker: "#{source_dir}/../pacemaker"
pacemaker_wrappers: "#{source_dir}/../pacemaker_wrappers"
ntp: "#{source_dir}/../ntp"
heat: "#{source_dir}/../heat"
openstacklib: "#{source_dir}/../openstacklib"
inifile: "#{source_dir}/../inifile"
mysql: "#{source_dir}/../mysql"
pacemaker: "#{source_dir}/../pacemaker"

View File

@ -8,6 +8,8 @@
class cluster::aodh_evaluator {
include ::aodh::params
$service_name = $::aodh::params::evaluator_service_name
# migration-threshold is number of tries to
# start resource on each controller node
$metadata = {
@ -30,11 +32,16 @@ class cluster::aodh_evaluator {
},
}
pacemaker_wrappers::service { $::aodh::params::evaluator_service_name:
primitive_type => 'aodh-evaluator',
metadata => $metadata,
parameters => { 'user' => 'aodh' },
operations => $operations,
$primitive_type = 'aodh-evaluator'
$parameters = { 'user' => 'aodh' }
pacemaker::service { $service_name :
primitive_type => $primitive_type,
metadata => $metadata,
parameters => $parameters,
operations => $operations
}
Pcmk_resource["p_${service_name}"] ->
Service[$service_name]
}

View File

@ -0,0 +1,86 @@
class cluster::conntrackd_ocf (
$vrouter_name,
$bind_address,
$mgmt_bridge,
) {
$service_name = 'p_conntrackd'
case $operatingsystem {
'Centos': { $conntrackd_package = 'conntrack-tools' }
'Ubuntu': { $conntrackd_package = 'conntrackd' }
}
package { $conntrackd_package:
ensure => 'installed',
} ->
file { '/etc/conntrackd/conntrackd.conf':
content => template('cluster/conntrackd.conf.erb'),
} ->
service { $service_name :
ensure => 'running',
enable => true,
}
$primitive_class = 'ocf'
$primitive_provider = 'fuel'
$primitive_type = 'ns_conntrackd'
$metadata = {
'migration-threshold' => 'INFINITY',
'failure-timeout' => '180s'
}
$parameters = {
'bridge' => $mgmt_bridge,
}
$complex_type = 'master'
$complex_metadata = {
'notify' => 'true',
'ordered' => 'false',
'interleave' => 'true',
'clone-node-max' => '1',
'master-max' => '1',
'master-node-max' => '1',
'target-role' => 'Master'
}
$operations = {
'monitor' => {
'interval' => '30',
'timeout' => '60'
},
'monitor:Master' => {
'role' => 'Master',
'interval' => '27',
'timeout' => '60'
},
}
pacemaker::service { $service_name :
prefix => false,
primitive_class => $primitive_class,
primitive_provider => $primitive_provider,
primitive_type => $primitive_type,
metadata => $metadata,
parameters => $parameters,
complex_type => $complex_type,
complex_metadata => $complex_metadata,
operations => $operations,
}
pcmk_colocation { "conntrackd-with-${vrouter_name}-vip":
first => "vip__vrouter_${vrouter_name}",
second => 'master_p_conntrackd:Master',
}
File['/etc/conntrackd/conntrackd.conf'] ->
Pcmk_resource[$service_name] ->
Service[$service_name] ->
Pcmk_colocation["conntrackd-with-${vrouter_name}-vip"]
# Workaround to ensure log is rotated properly
file { '/etc/logrotate.d/conntrackd':
content => template('openstack/95-conntrackd.conf.erb'),
}
Package[$conntrackd_package] -> File['/etc/logrotate.d/conntrackd']
}

View File

@ -24,16 +24,16 @@ define cluster::corosync::cs_service (
}
if $primary {
cs_resource { "p_${service_name}":
ensure => present,
primitive_class => 'ocf',
provided_by => 'fuel',
primitive_type => $ocf_script,
complex_type => $csr_complex_type,
ms_metadata => $csr_ms_metadata,
parameters => $csr_parameters,
metadata => $csr_metadata,
operations => {
pcmk_resource { "p_${service_name}":
ensure => 'present',
primitive_class => 'ocf',
primitive_provider => 'fuel',
primitive_type => $ocf_script,
complex_type => $csr_complex_type,
complex_metadata => $csr_ms_metadata,
parameters => $csr_parameters,
metadata => $csr_metadata,
operations => {
'monitor' => {
'interval' => $csr_mon_intr,
'timeout' => $csr_mon_timeout
@ -48,7 +48,7 @@ define cluster::corosync::cs_service (
}
}
}
Cs_resource["p_${service_name}"] -> Service[$service_true_title]
Pcmk_resource["p_${service_name}"] -> Service[$service_true_title]
}
if ! $package_name {
warning('Cluster::corosync::cs_service: Without package definition can\'t protect service for autostart correctly.')

View File

@ -1,30 +0,0 @@
# not a doc string
#TODO (bogdando) move to extras ha wrappers
define cluster::corosync::cs_with_service (
$first,
$second,
$cib = undef,
$score = 'INFINITY',
$order = true,
)
{
cs_rsc_colocation { "${second}-with-${first}":
ensure => present,
cib => $cib,
primitives => [$second, $first],
score => $score,
}
if $order {
cs_rsc_order { "${second}-after-${first}":
ensure => present,
cib => $cib,
first => $first,
second => $second,
score => $score,
require => Cs_rsc_colocation["${second}-with-${first}"]
}
}
}

View File

@ -2,60 +2,63 @@
#
# Configure OCF service for DNS managed by corosync/pacemaker
#
class cluster::dns_ocf ( $primary_controller ) {
$service_name = 'p_dns'
if $primary_controller {
cs_resource { $service_name:
ensure => present,
primitive_class => 'ocf',
provided_by => 'fuel',
primitive_type => 'ns_dns',
complex_type => 'clone',
ms_metadata => {
'interleave' => 'true',
},
metadata => {
'migration-threshold' => '3',
'failure-timeout' => '120',
},
parameters => {
'ns' => 'vrouter',
},
operations => {
'monitor' => {
'interval' => '20',
'timeout' => '10'
},
'start' => {
'interval' => '0',
'timeout' => '30'
},
'stop' => {
'interval' => '0',
'timeout' => '30'
},
},
} ->
cs_rsc_colocation { 'dns-with-vrouter-ns':
ensure => present,
score => 'INFINITY',
primitives => [
"clone_${service_name}",
"clone_p_vrouter"
],
}
Cs_resource[$service_name] ~> Service[$service_name]
class cluster::dns_ocf {
$service_name = 'p_dns'
$primitive_class = 'ocf'
$primitive_provider = 'fuel'
$primitive_type = 'ns_dns'
$complex_type = 'clone'
$complex_metadata = {
'interleave' => 'true',
}
$metadata = {
'migration-threshold' => '3',
'failure-timeout' => '120',
}
$parameters = {
'ns' => 'vrouter',
}
$operations = {
'monitor' => {
'interval' => '20',
'timeout' => '10'
},
'start' => {
'timeout' => '30'
},
'stop' => {
'timeout' => '30'
},
}
pacemaker::service { $service_name :
primitive_class => $primitive_class,
primitive_provider => $primitive_provider,
primitive_type => $primitive_type,
complex_type => $complex_type,
complex_metadata => $complex_metadata,
metadata => $metadata,
parameters => $parameters,
operations => $operations,
prefix => false,
}
pcmk_colocation { 'dns-with-vrouter-ns' :
ensure => 'present',
score => 'INFINITY',
first => "clone_p_vrouter",
second => "clone_${service_name}",
}
Pcmk_resource[$service_name] ->
Pcmk_colocation['dns-with-vrouter-ns'] ->
Service[$service_name]
service { $service_name:
name => $service_name,
enable => true,
ensure => 'running',
enable => true,
hasstatus => true,
hasrestart => true,
provider => 'pacemaker',
}
}

View File

@ -6,69 +6,64 @@ class cluster::haproxy_ocf (
$debug = false,
$other_networks = false,
) inherits cluster::haproxy {
$primitive_type = 'ns_haproxy'
$complex_type = 'clone'
$ms_metadata = {
$complex_metadata = {
'interleave' => true,
}
$metadata = {
'migration-threshold' => '3',
'failure-timeout' => '120',
}
$parameters = {
'ns' => 'haproxy',
'debug' => $debug,
'other_networks' => $other_networks,
}
$operations = {
'monitor' => {
'interval' => '30',
'timeout' => '60'
},
'start' => {
'interval' => '0',
'timeout' => '60'
},
'stop' => {
'interval' => '0',
'timeout' => '60'
},
'start' => {
'timeout' => '60'
},
'stop' => {
'timeout' => '60'
},
}
pacemaker_wrappers::service { $service_name :
primitive_type => $primitive_type,
parameters => $parameters,
metadata => $metadata,
operations => $operations,
ms_metadata => $ms_metadata,
complex_type => $complex_type,
prefix => false,
pacemaker::service { $service_name :
primitive_type => $primitive_type,
parameters => $parameters,
metadata => $metadata,
operations => $operations,
complex_metadata => $complex_metadata,
complex_type => $complex_type,
prefix => false,
}
cs_rsc_colocation { 'vip_public-with-haproxy':
ensure => present,
pcmk_colocation { 'vip_public-with-haproxy':
ensure => 'present',
score => 'INFINITY',
primitives => [
"vip__public",
"clone_${service_name}"
],
}
cs_rsc_colocation { 'vip_management-with-haproxy':
ensure => present,
score => 'INFINITY',
primitives => [
"vip__management",
"clone_${service_name}"
],
first => "clone_${service_name}",
second => "vip__public",
}
Cs_resource[$service_name] -> Cs_rsc_colocation['vip_public-with-haproxy'] -> Service[$service_name]
Cs_resource[$service_name] -> Cs_rsc_colocation['vip_management-with-haproxy'] -> Service[$service_name]
pcmk_colocation { 'vip_management-with-haproxy':
ensure => 'present',
score => 'INFINITY',
first => "clone_${service_name}",
second => 'vip__management',
}
Pcmk_resource[$service_name] ->
Service[$service_name] ->
Pcmk_colocation['vip_public-with-haproxy']
Pcmk_resource[$service_name] ->
Service[$service_name] ->
Pcmk_colocation['vip_management-with-haproxy']
}

View File

@ -42,12 +42,12 @@ class cluster::heat_engine {
'interleave' => true,
}
pacemaker_wrappers::service { $::heat::params::engine_service_name :
primitive_type => $primitive_type,
metadata => $metadata,
complex_type => 'clone',
ms_metadata => $ms_metadata,
operations => $operations,
pacemaker::service { $::heat::params::engine_service_name :
primitive_type => $primitive_type,
metadata => $metadata,
complex_type => 'clone',
complex_metadata => $ms_metadata,
operations => $operations,
}
}

View File

@ -33,12 +33,12 @@ class cluster::mysql (
$service_name = 'mysqld'
if $primary_controller {
cs_resource { "p_${service_name}":
ensure => present,
primitive_class => 'ocf',
provided_by => 'fuel',
primitive_type => 'mysql-wss',
complex_type => 'clone',
pcmk_resource { "p_${service_name}":
ensure => 'present',
primitive_class => 'ocf',
primitive_provider => 'fuel',
primitive_type => 'mysql-wss',
complex_type => 'clone',
parameters => {
'config' => $mysql_config,
'test_user' => $mysql_user,
@ -61,7 +61,7 @@ class cluster::mysql (
},
}
Cs_resource["p_${service_name}"] ~>
Pcmk_resource["p_${service_name}"] ~>
Service[$service_name]
}

View File

@ -6,7 +6,7 @@ class cluster::ntp_ocf inherits ntp::params {
$primitive_type = 'ns_ntp'
$complex_type = 'clone'
$ms_metadata = {
$complex_metadata = {
'interleave' => 'true',
}
@ -34,26 +34,26 @@ class cluster::ntp_ocf inherits ntp::params {
},
}
pacemaker_wrappers::service { $service_name :
primitive_type => $primitive_type,
parameters => $parameters,
metadata => $metadata,
operations => $operations,
ms_metadata => $ms_metadata,
complex_type => $complex_type,
prefix => true,
}
cs_rsc_colocation { 'ntp-with-vrouter-ns' :
pcmk_colocation { 'ntp-with-vrouter-ns' :
ensure => 'present',
score => 'INFINITY',
primitives => [
"clone_p_$service_name",
"clone_p_vrouter",
],
first => "clone_p_$service_name",
second => "clone_p_vrouter",
}
Cs_resource["p_${service_name}"] -> Cs_rsc_colocation['ntp-with-vrouter-ns'] -> Service[$service_name]
pacemaker::service { $service_name :
primitive_type => $primitive_type,
parameters => $parameters,
metadata => $metadata,
operations => $operations,
complex_metadata => $complex_metadata,
complex_type => $complex_type,
prefix => true,
}
Pcmk_resource["p_${service_name}"] ->
Pcmk_colocation['ntp-with-vrouter-ns'] ->
Service['ntp']
if ! defined(Service[$service_name]) {
service { $service_name:

View File

@ -1,4 +1,4 @@
# == Class: pacemaker_wrappers::rabbitmq
# == Class: cluster::rabbitmq_ocf
#
# Overrides rabbitmq service provider as a pacemaker
#
@ -63,7 +63,7 @@
# Defaults to empty string
#
class pacemaker_wrappers::rabbitmq (
class cluster::rabbitmq_ocf (
$primitive_type = 'rabbitmq-server',
$service_name = $::rabbitmq::service_name,
$port = $::rabbitmq::port,
@ -106,7 +106,7 @@ class pacemaker_wrappers::rabbitmq (
'resource-stickiness' => '100',
}
$ms_metadata = {
$complex_metadata = {
'notify' => 'true',
# We shouldn't enable ordered start for parallel start of RA.
'ordered' => 'false',
@ -155,11 +155,11 @@ class pacemaker_wrappers::rabbitmq (
},
}
pacemaker_wrappers::service { $service_name :
pacemaker::service { $service_name :
primitive_type => $primitive_type,
complex_type => 'master',
complex_metadata => $complex_metadata,
metadata => $metadata,
ms_metadata => $ms_metadata,
operations => $operations,
parameters => $parameters,
# ocf_script_file => $ocf_script_file,

View File

@ -30,34 +30,32 @@ class cluster::sysinfo (
$min_disk_free = '100M',
$disk_unit = 'M',
$monitor_interval = '15s',
$monitor_ensure = present,
$monitor_ensure = 'present',
) {
# NOTE: We do not use a clone resource here as disks may be different per host
cs_resource { "sysinfo_${::fqdn}":
ensure => $monitor_ensure,
primitive_class => 'ocf',
provided_by => 'pacemaker',
primitive_type => 'SysInfo',
parameters => {
pcmk_resource { "sysinfo_${::fqdn}" :
ensure => $monitor_ensure,
primitive_class => 'ocf',
primitive_provider => 'pacemaker',
primitive_type => 'SysInfo',
parameters => {
'disks' => join(any2array($disks), ' '),
'min_disk_free' => $min_disk_free,
'disk_unit' => $disk_unit,
},
operations => { 'monitor' => { 'interval' => $monitor_interval } },
operations => { 'monitor' => { 'interval' => $monitor_interval } },
}
# Have service migrate if health turns red from the failed disk check
cs_property { 'node-health-strategy':
ensure => present,
pcmk_property { 'node-health-strategy':
ensure => $monitor_ensure,
value => 'migrate-on-red',
provider => 'crm',
}
cs_rsc_location { "sysinfo-on-${::fqdn}":
primitive => "sysinfo_${::fqdn}",
node_name => $::fqdn,
node_score => 'INFINITY',
cib => "sysinfo_${::fqdn}",
pcmk_location { "sysinfo-on-${::fqdn}":
ensure => $monitor_ensure,
primitive => "sysinfo_${::fqdn}",
node => $::fqdn,
score => 'INFINITY',
}
}

View File

@ -95,12 +95,12 @@ define cluster::virtual_ip (
enable => true,
}
pacemaker_wrappers::service { $vip_name :
primitive_type => $primitive_type,
parameters => $parameters,
metadata => $metadata,
operations => $operations,
prefix => false,
pacemaker::service { $vip_name :
primitive_type => $primitive_type,
parameters => $parameters,
metadata => $metadata,
operations => $operations,
prefix => false,
}
# I'am running before this other vip
@ -109,19 +109,17 @@ define cluster::virtual_ip (
if $colocation_before {
$colocation_before_vip_name = "vip__${colocation_before}"
$colocation_before_constraint_name = "${colocation_before_vip_name}-with-${vip_name}"
cs_rsc_colocation { $colocation_before_constraint_name :
pcmk_colocation { $colocation_before_constraint_name :
ensure => 'present',
score => 'INFINITY',
primitives => [
$colocation_before_vip_name,
$vip_name,
],
first => $vip_name,
second => $colocation_before_vip_name,
}
Cs_resource <| title == $vip_name |> -> Cs_resource <| title == $colocation_before_vip_name |>
Pcmk_resource <| title == $vip_name |> -> Pcmk_resource <| title == $colocation_before_vip_name |>
Service <| title == $vip_name |> -> Service <| title == $colocation_before_vip_name |>
Service <| title == $colocation_before_vip_name |> -> Cs_rsc_colocation[$colocation_before_constraint_name]
Service <| title == $vip_name |> -> Cs_rsc_colocation[$colocation_before_constraint_name]
Service <| title == $colocation_before_vip_name |> -> Pcmk_colocation[$colocation_before_constraint_name]
Service <| title == $vip_name |> -> Pcmk_colocation[$colocation_before_constraint_name]
}
# I'm running after this other vip
@ -130,19 +128,17 @@ define cluster::virtual_ip (
if $colocation_after {
$colocation_after_vip_name = "vip__${colocation_after}"
$colocation_after_constraint_name = "${vip_name}-with-${colocation_after_vip_name}"
cs_rsc_colocation { $colocation_after_constraint_name :
pcmk_colocation { $colocation_after_constraint_name :
ensure => 'present',
score => 'INFINITY',
primitives => [
$vip_name,
$colocation_after_vip_name,
],
first => $colocation_after_vip_name,
second => $vip_name,
}
Cs_resource <| title == $colocation_after_vip_name |> -> Cs_resource <| title == $vip_name |>
Pcmk_resource <| title == $colocation_after_vip_name |> -> Pcmk_resource <| title == $vip_name |>
Service <| title == $colocation_after_vip_name |> -> Service <| title == $vip_name |>
Service <| title == $colocation_after_vip_name |> -> Cs_rsc_colocation[$colocation_after_constraint_name]
Service <| title == $vip_name |> -> Cs_rsc_colocation[$colocation_after_constraint_name]
Service <| title == $colocation_after_vip_name |> -> Pcmk_colocation[$colocation_after_constraint_name]
Service <| title == $vip_name |> -> Pcmk_colocation[$colocation_after_constraint_name]
}
}

View File

@ -1,59 +1,64 @@
define cluster::virtual_ip_ping (
$host_list = '127.0.0.1',
) {
$vip_name = $title
$vip_name = $name
$service_name = "ping_${vip_name}"
$location_name = "loc_ping_${vip_name}"
cs_resource { "ping_${vip_name}":
ensure => present,
primitive_class => 'ocf',
provided_by => 'pacemaker',
primitive_type => 'ping',
parameters => {
'host_list' => $host_list,
'multiplier' => '1000',
'dampen' => '30s',
'timeout' => '3s',
},
operations => {
'monitor' => {
'interval' => '20',
'timeout' => '30',
},
},
complex_type => 'clone',
$primitive_class = 'ocf'
$primitive_provider = 'pacemaker'
$primitive_type = 'ping'
$parameters = {
'host_list' => $host_list,
'multiplier' => '1000',
'dampen' => '30s',
'timeout' => '3s',
}
$operations = {
'monitor' => {
'interval' => '20',
'timeout' => '30',
},
}
$complex_type = 'clone'
service { "ping_${vip_name}":
service { $service_name :
ensure => 'running',
enable => true,
provider => 'pacemaker',
}
cs_rsc_location { "loc_ping_${vip_name}":
pacemaker::service { $service_name :
prefix => false,
primitive_class => $primitive_class,
primitive_provider => $primitive_provider,
primitive_type => $primitive_type,
parameters => $parameters,
operations => $operations,
complex_type => $complex_type,
}
pcmk_location { $location_name :
primitive => $vip_name,
cib => "ping_${vip_name}",
rules => [
{
score => '-inf',
boolean => 'or',
date_expressions => [],
expressions => [
'score' => '-inf',
'expressions' => [
{
attribute => 'pingd',
operation => 'not_defined',
'attribute' => "pingd",
'operation' => 'not_defined',
'value' => 'or',
},
{
attribute => "pingd",
operation =>'lte',
value => '0',
'attribute' => "pingd",
'operation'=>'lte',
'value' => '0',
},
],
},
],
}
Cs_resource["ping_${vip_name}"] ->
Cs_rsc_location["loc_ping_${vip_name}"] ->
Service["ping_${vip_name}"]
Pcmk_resource[$service_name] ->
Pcmk_location[$location_name] ->
Service[$service_name]
}

View File

@ -6,10 +6,9 @@ class cluster::vrouter_ocf (
$other_networks = false,
) {
$service_name = 'p_vrouter'
$primitive_type = 'ns_vrouter'
$complex_type = 'clone'
$ms_metadata = {
$complex_metadata = {
'interleave' => true,
}
$metadata = {
@ -43,13 +42,13 @@ class cluster::vrouter_ocf (
provider => 'pacemaker',
}
pacemaker_wrappers::service { $service_name :
primitive_type => $primitive_type,
parameters => $parameters,
metadata => $metadata,
operations => $operations,
ms_metadata => $ms_metadata,
complex_type => $complex_type,
prefix => false,
pacemaker::service { $service_name :
primitive_type => $primitive_type,
parameters => $parameters,
metadata => $metadata,
operations => $operations,
complex_metadata => $complex_metadata,
complex_type => $complex_type,
prefix => false,
}
}

View File

@ -2,61 +2,35 @@ require 'spec_helper'
describe 'cluster::dns_ocf' do
let(:default_params) { {
} }
let(:default_params) do
{}
end
shared_examples_for 'dns_ocf configuration' do
let :params do
default_params
end
context 'with default params' do
it_raises 'a Puppet::Error', /primary_controller/
end
it 'configures with the params params' do
should contain_class('cluster::dns_ocf')
context 'with primary_controller = true' do
let :params do
default_params.merge!({
:primary_controller => true,
})
end
should contain_pcmk_resource('p_dns')
it 'configures with the params params' do
should contain_class('cluster::dns_ocf')
should contain_cs_resource('p_dns').with_before('Cs_rsc_colocation[dns-with-vrouter-ns]')
should contain_cs_rsc_colocation('dns-with-vrouter-ns').with(
:ensure => 'present',
:score => 'INFINITY',
:primitives => [ 'clone_p_dns', 'clone_p_vrouter' ])
should contain_service('p_dns').with(
:name => 'p_dns',
:enable => true,
:ensure => 'running',
:hasstatus => true,
:hasrestart => true,
:provider => 'pacemaker')
end
end
should contain_pcmk_colocation('dns-with-vrouter-ns').with(
:ensure => 'present',
:score => 'INFINITY',
:first => 'clone_p_vrouter',
:second => 'clone_p_dns'
).that_requires('Pcmk_resource[p_dns]')
context 'with primary_controller = false' do
let :params do
default_params.merge!({
:primary_controller => false,
})
end
it 'configures with the params params' do
should_not contain_cs_resource('p_dns')
should_not contain_cs_rsc_colocation('dns-with-vrouter-ns')
should contain_service('p_dns').with(
:name => 'p_dns',
:enable => true,
:ensure => 'running',
:hasstatus => true,
:hasrestart => true,
:provider => 'pacemaker')
end
should contain_service('p_dns').with(
:name => 'p_dns',
:enable => true,
:ensure => 'running',
:hasstatus => true,
:hasrestart => true,
:provider => 'pacemaker',
).that_requires('Pcmk_colocation[dns-with-vrouter-ns]')
end
end

View File

@ -16,14 +16,14 @@ describe 'cluster::heat_engine' do
}
it 'configures a heat engine pacemaker service' do
should contain_pacemaker_wrappers__service(platform_params[:engine_service_name]).with(
should contain_pacemaker__service(platform_params[:engine_service_name]).with(
:primitive_type => 'heat-engine',
:metadata => {
'resource-stickiness' => '1',
'migration-threshold' => '3'
},
:complex_type => 'clone',
:ms_metadata => {
:complex_metadata => {
'interleave' => true
},
:operations => {

View File

@ -15,7 +15,7 @@ describe 'cluster::mysql' do
end
it 'configures a cs_resource' do
should contain_cs_resource('p_mysqld').with(
should contain_pcmk_resource('p_mysqld').with(
:ensure => 'present',
:parameters => {
'config' => '/etc/mysql/my.cnf',
@ -24,7 +24,7 @@ describe 'cluster::mysql' do
'socket' =>'/var/run/mysqld/mysqld.sock'
}
)
should contain_cs_resource('p_mysqld').that_notifies('Service[mysqld]')
should contain_pcmk_resource('p_mysqld').that_notifies('Service[mysqld]')
end
it 'creates init-file with grants' do

View File

@ -6,11 +6,12 @@ describe 'cluster::ntp_ocf' do
it 'configures with the default params' do
should contain_class('cluster::ntp_ocf')
should contain_cs_resource('p_ntp').with_before(["Cs_rsc_colocation[ntp-with-vrouter-ns]", "Service[ntp]"])
should contain_cs_rsc_colocation('ntp-with-vrouter-ns').with(
should contain_pcmk_resource('p_ntp').with_before(["Pcmk_colocation[ntp-with-vrouter-ns]", "Service[ntp]"])
should contain_pcmk_colocation('ntp-with-vrouter-ns').with(
:ensure => 'present',
:score => 'INFINITY',
:primitives => [ 'clone_p_ntp', 'clone_p_vrouter' ])
:first => 'clone_p_ntp',
:second => 'clone_p_vrouter')
end
end

View File

@ -29,5 +29,4 @@ fixtures:
pacemaker: "#{source_dir}/../pacemaker"
haproxy: "#{source_dir}/../haproxy"
ceilometer_ha: "#{source_dir}/../ceilometer_ha"
pacemaker_wrappers: "#{source_dir}/../pacemaker_wrappers"
mellanox_openstack: "#{source_dir}/../mellanox_openstack"

View File

@ -24,15 +24,16 @@ class openstack::corosync (
anchor {'corosync':}
Anchor['corosync'] -> Cs_property<||>
Anchor['corosync'] ->
Pcmk_property<||>
Class['::corosync']->Cs_shadow<||>
Class['::corosync']->Cs_property<||>->Cs_resource<||>
Cs_property<||>->Cs_shadow<||>
Class['::corosync']->
Pcmk_property<||>->
Pcmk_resource<||>
Cs_property['no-quorum-policy']->
Cs_property['stonith-enabled']->
Cs_property['start-failure-is-fatal']
Pcmk_property['no-quorum-policy']->
Pcmk_property['stonith-enabled']->
Pcmk_property['start-failure-is-fatal']
if $corosync_version == '2' {
$version_real = '1'
@ -60,28 +61,27 @@ class openstack::corosync (
debug => false,
} -> Anchor['corosync-done']
Cs_property {
ensure => present,
provider => 'crm',
Pcmk_property {
ensure => 'present',
}
cs_property { 'no-quorum-policy':
pcmk_property { 'no-quorum-policy':
value => $quorum_policy,
} -> Anchor['corosync-done']
cs_property { 'stonith-enabled':
pcmk_property { 'stonith-enabled':
value => $stonith,
} -> Anchor['corosync-done']
cs_property { 'start-failure-is-fatal':
pcmk_property { 'start-failure-is-fatal':
value => false,
} -> Anchor['corosync-done']
cs_property { 'symmetric-cluster':
pcmk_property { 'symmetric-cluster':
value => false,
} -> Anchor['corosync-done']
cs_property { 'cluster-recheck-interval':
pcmk_property { 'cluster-recheck-interval':
value => $cluster_recheck_interval,
} -> Anchor['corosync-done']

View File

@ -49,9 +49,8 @@ describe 'openstack::corosync' do
'symmetric-cluster' => false,
'cluster-recheck-interval' => p[:cluster_recheck_interval],
}.each do |prop, val|
should contain_cs_property(prop).with(
should contain_pcmk_property(prop).with(
:ensure => 'present',
:provider => 'crm',
:value => val,
).that_comes_before('Anchor[corosync-done]')
end

View File

@ -25,6 +25,5 @@ if $network_metadata['vips']["vrouter_${vrouter_name}"]['namespace'] {
} ->
class { 'cluster::dns_ocf':
primary_controller => $primary_controller,
}
}

View File

@ -116,23 +116,23 @@ class { 'nova::network::neutron':
neutron_admin_auth_url => "${admin_identity_uri}/v3",
}
cs_resource { "p_nova_compute_ironic":
ensure => present,
primitive_class => 'ocf',
provided_by => 'fuel',
primitive_type => 'nova-compute',
pcmk_resource { "p_nova_compute_ironic":
ensure => 'present',
primitive_class => 'ocf',
primitive_provider => 'fuel',
primitive_type => 'nova-compute',
metadata => {
resource-stickiness => '1'
'resource-stickiness' => '1'
},
parameters => {
config => "/etc/nova/nova.conf",
pid => "/var/run/nova/nova-compute-ironic.pid",
additional_parameters => "--config-file=/etc/nova/nova-compute.conf",
'config' => "/etc/nova/nova.conf",
'pid' => "/var/run/nova/nova-compute-ironic.pid",
'additional_parameters' => "--config-file=/etc/nova/nova-compute.conf",
},
operations => {
monitor => { timeout => '30', interval => '60' },
start => { timeout => '30' },
stop => { timeout => '30' }
monitor => { 'timeout' => '30', 'interval' => '60' },
start => { 'timeout' => '30' },
stop => { 'timeout' => '30' }
}
}

View File

@ -165,7 +165,7 @@ if $queue_provider == 'rabbitmq' {
}
if ($use_pacemaker) {
class { 'pacemaker_wrappers::rabbitmq':
class { 'cluster::rabbitmq_ocf':
command_timeout => $command_timeout,
debug => $debug,
erlang_cookie => $erlang_cookie,

View File

@ -3,80 +3,14 @@ notice('MODULAR: conntrackd.pp')
$network_metadata = hiera_hash('network_metadata', {})
prepare_network_config(hiera_hash('network_scheme', {}))
$vrouter_name = hiera('vrouter_name', 'pub')
$bind_address = get_network_role_property('mgmt/vip', 'ipaddr')
$mgmt_bridge = get_network_role_property('mgmt/vip', 'interface')
case $operatingsystem {
Centos: { $conntrackd_package = 'conntrack-tools' }
Ubuntu: { $conntrackd_package = 'conntrackd' }
}
# If VIP has namespace set to 'false' or 'undef' then we do not configure
# it under corosync cluster. So we should not configure colocation with it.
if $network_metadata['vips']["vrouter_${vrouter_name}"]['namespace'] {
### CONNTRACKD for CentOS 6 doesn't work under namespaces ##
if $operatingsystem == 'Ubuntu' {
$bind_address = get_network_role_property('mgmt/vip', 'ipaddr')
$mgmt_bridge = get_network_role_property('mgmt/vip', 'interface')
package { $conntrackd_package:
ensure => installed,
} ->
file { '/etc/conntrackd/conntrackd.conf':
content => template('cluster/conntrackd.conf.erb'),
} ->
cs_resource {'p_conntrackd':
ensure => present,
primitive_class => 'ocf',
provided_by => 'fuel',
primitive_type => 'ns_conntrackd',
metadata => {
'migration-threshold' => 'INFINITY',
'failure-timeout' => '180s'
},
parameters => {
'bridge' => $mgmt_bridge,
},
complex_type => 'master',
ms_metadata => {
'notify' => 'true',
'ordered' => 'false',
'interleave' => 'true',
'clone-node-max' => '1',
'master-max' => '1',
'master-node-max' => '1',
'target-role' => 'Master'
},
operations => {
'monitor' => {
'interval' => '30',
'timeout' => '60'
},
'monitor:Master' => {
'role' => 'Master',
'interval' => '27',
'timeout' => '60'
},
},
}
cs_colocation { "conntrackd-with-${vrouter_name}-vip":
primitives => [ 'master_p_conntrackd:Master', "vip__vrouter_${vrouter_name}" ],
}
File['/etc/conntrackd/conntrackd.conf'] -> Cs_resource['p_conntrackd'] -> Service['p_conntrackd'] -> Cs_colocation["conntrackd-with-${vrouter_name}-vip"]
service { 'p_conntrackd':
ensure => 'running',
enable => true,
provider => 'pacemaker',
}
# Workaround to ensure log is rotated properly
file { '/etc/logrotate.d/conntrackd':
content => template('openstack/95-conntrackd.conf.erb'),
}
Package[$conntrackd_package] -> File['/etc/logrotate.d/conntrackd']
# CONNTRACKD for CentOS 6 doesn't work under namespaces
if $operatingsystem == 'Ubuntu' {
class { 'cluster::conntrackd_ocf' :
vrouter_name => $vrouter_name,
bind_address => $bind_address,
mgmt_bridge => $mgmt_bridge,
}
}

View File

@ -1,4 +0,0 @@
fixtures:
symlinks:
pacemaker: "#{source_dir}"
stdlib: "#{source_dir}/../stdlib"

View File

@ -1,3 +0,0 @@
Gemfile.lock
spec/fixtures
.bundle

View File

@ -1,2 +0,0 @@
-f doc
--color

View File

@ -1,35 +0,0 @@
language: ruby
bundler_args: --without development
before_install:
- gem update --system 2.1.11
- gem --version
script: "bundle exec rake spec SPEC_OPTS='--format documentation'"
rvm:
- 1.8.7
- 1.9.3
- 2.0.0
env:
- PUPPET_GEM_VERSION="~> 2.7.0"
- PUPPET_GEM_VERSION="~> 3.0.0"
- PUPPET_GEM_VERSION="~> 3.1.0"
- PUPPET_GEM_VERSION="~> 3.2.0"
- PUPPET_GEM_VERSION="~> 3.3.0"
- PUPPET_GEM_VERSION="~> 3.4.0"
matrix:
exclude:
- rvm: 1.9.3
env: PUPPET_GEM_VERSION="~> 2.7.0"
- rvm: 2.0.0
env: PUPPET_GEM_VERSION="~> 2.7.0"
- rvm: 2.0.0
env: PUPPET_GEM_VERSION="~> 3.0.0"
- rvm: 2.0.0
env: PUPPET_GEM_VERSION="~> 3.1.0"
- rvm: 1.8.7
env: PUPPET_GEM_VERSION="~> 3.2.0"
- rvm: 1.8.7
env: PUPPET_GEM_VERSION="~> 3.3.0"
- rvm: 1.8.7
env: PUPPET_GEM_VERSION="~> 3.4.0"
notifications:
email: false

View File

@ -1,17 +0,0 @@
source 'https://rubygems.org'
group :development, :test do
gem 'rake', :require => false
gem 'rspec-puppet', :require => false
gem 'puppetlabs_spec_helper', :require => false
gem 'puppet-lint', :require => false
gem 'pry'
end
if puppetversion = ENV['PUPPET_GEM_VERSION']
gem 'puppet', puppetversion, :require => false
else
gem 'puppet', :require => false
end
# vim:ft=ruby

View File

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2013 Puppet Labs
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,108 +0,0 @@
Fuel module for Pacemaker
=========================
These manfisests are derived from puppetlabs-corosync modules v0.1.0.
Basic usage
-----------
Configuring primitives
------------------------
The resources that Pacemaker will manage can be referred to as a primitive.
These are things like virtual IPs or services like drbd, nginx, and apache.
*To assign a VIP to a network interface to be used by Nginx*
```puppet
cs_resource { 'nginx_vip':
primitive_class => 'ocf',
primitive_type => 'IPaddr2',
provided_by => 'heartbeat',
parameters => { 'ip' => '172.16.210.100', 'cidr_netmask' => '24' },
operations => { 'monitor' => { 'interval' => '10s' } },
}
```
*Make Pacemaker manage and monitor the state of Nginx using a custom OCF agent*
```puppet
cs_resource { 'nginx_service':
primitive_class => 'ocf',
primitive_type => 'nginx_fixed',
provided_by => 'pacemaker',
operations => {
'monitor' => { 'interval' => '10s', 'timeout' => '30s' },
'start' => { 'interval' => '0', 'timeout' => '30s', 'on-fail' => 'restart' }
},
require => Cs_primitive['nginx_vip'],
}
```
*Make Pacemaker manage and monitor the state of Apache using a LSB agent*
```puppet
cs_resource { 'nginx_service':
primitive_class => 'lsb',
primitive_type => 'apache2',
provided_by => 'heartbeat',
operations => {
'monitor' => { 'interval' => '10s', 'timeout' => '30s' },
'start' => { 'interval' => '0', 'timeout' => '30s', 'on-fail' => 'restart' }
},
require => Cs_primitive['apache2_vip'],
}
```
*You can also specify multi-state resource such as clone or master
```puppet
cs_resource {'nginx_service':
primitive_class => 'lsb',
provided_by => 'heartbeat',
operations => {
'monitor' => { 'interval' => '10s', 'timeout' => '30s' },
'start' => { 'interval' => '0', 'timeout' => '30s', 'on-fail' => 'restart' }
},
require => Cs_primitive['apache2_vip'],
multistate_hash => {'type'=>'clone','name'=>'nginx_clone'
ms_metadata => {'interleave'=>'true'}
}
}
```
Dependencies
------------
Tested and built on Ubuntu 12.04 with 1.4.2 of Corosync is validated to function.
Notes
-----
Contributors
------------
* Mirantis Inc.
* [See Puppet Labs Github](https://github.com/puppetlabs/puppetlabs-corosync/graphs/contributors)
Copyright and License
---------------------
Copyright (C) 2012 [Puppet Labs](https://www.puppetlabs.com/) Inc
Puppet Labs can be contacted at: info@puppetlabs.com
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1 +0,0 @@
require 'puppetlabs_spec_helper/rake_tasks'

View File

@ -1,238 +0,0 @@
require File.join File.dirname(__FILE__), '../pacemaker_base.rb'
require 'pp'
Puppet::Type.type(:cs_resource).provide(:crm, :parent => Puppet::Provider::Pacemaker) do
desc 'Specific provider for a rather specific type since I currently have no
plan to abstract pacemaker vs. keepalived. Primitives in
Pacemaker are the thing we desire to monitor; websites, ipaddresses,
databases, etc, etc. Here we manage the creation and deletion of
these primitives. We will accept a hash for what Pacemaker calls
operations and parameters. A hash is used instead of constucting a
better model since these values can be almost anything.'
commands :cibadmin => 'cibadmin'
commands :crm_shadow => 'crm_shadow'
commands :crm => 'crm'
commands :pcs => 'pcs'
commands :crm_diff => 'crm_diff'
commands :crm_attribute => 'crm_attribute'
# parse CIB XML and create the array of found primitives
# @return [Array<Puppet::Provider::Crm>]
def self.instances
block_until_ready
instances = []
raw, status = dump_cib
doc = REXML::Document.new(raw)
REXML::XPath.each(doc, '//primitive') do |e|
items = e.attributes
primitive = {
:ensure => :present,
:name => items['id'].to_s,
:primitive_class => items['class'].to_s,
:primitive_type => items['type'].to_s,
:provided_by => items['provider'].to_s,
}
primitive[:parameters] = {}
primitive[:operations] = {}
primitive[:metadata] = {}
primitive[:ms_metadata] = {}
if e.elements['instance_attributes']
e.elements['instance_attributes'].each_element do |i|
primitive[:parameters].store i.attributes['name'].to_s, i.attributes['value'].to_s
end
end
if e.elements['meta_attributes']
e.elements['meta_attributes'].each_element do |m|
primitive[:metadata].store m.attributes['name'].to_s, m.attributes['value'].to_s
end
end
if e.elements['operations']
e.elements['operations'].each_element do |o|
op_name = o.attributes['name'].to_s
op_name += ":#{o.attributes['role']}" if o.attributes['role']
primitive[:operations][op_name] = {}
o.attributes.each do |k,v|
next if k == 'name'
next if k == 'id'
primitive[:operations][op_name].store k.to_s, v.to_s
end
if o.elements['instance_attributes']
o.elements['instance_attributes'].each_element do |inst|
primitive[:operations][op_name].store inst.attributes['name'].to_s, inst.attributes['value'].to_s
end
end
end
end
if e.parent.name == 'master' or e.parent.name == 'clone'
primitive[:complex_type] = e.parent.name
if e.parent.elements['meta_attributes']
e.parent.elements['meta_attributes'].each_element do |m|
primitive[:ms_metadata].store m.attributes['name'].to_s, m.attributes['value'].to_s
end
end
end
instances << new(primitive)
end
instances
end
# Create just adds our resource to the property_hash and flush will take care
# of actually doing the work.
def create
debug "Call: create on cs_resource '#{@resource[:name]}'"
@property_hash = {
:name => @resource[:name],
:ensure => :present,
:primitive_class => @resource[:primitive_class],
:provided_by => @resource[:provided_by],
:primitive_type => @resource[:primitive_type],
:complex_type => @resource[:complex_type],
}
@property_hash[:parameters] = @resource[:parameters] if @resource[:parameters]
@property_hash[:operations] = @resource[:operations] if @resource[:operations]
@property_hash[:metadata] = @resource[:metadata] if @resource[:metadata]
@property_hash[:ms_metadata] = @resource[:ms_metadata] if @resource[:ms_metadata]
@property_hash[:cib] = @resource[:cib] if @resource[:cib]
end
# Unlike create we actually immediately delete the item. Pacemaker forces us
# to "stop" the primitive before we are able to remove it.
def destroy
debug "Call: destroy on cs_resource '#{@resource[:name]}'"
pcs 'resource', 'disable', @resource[:name]
pcs 'resource', 'cleanup', @resource[:name]
pcs 'resource', 'delete', @resource[:name]
end
# Getters that obtains the parameters and operations defined in our primitive
# that have been populated by prefetch or instances (depends on if your using
# puppet resource or not).
def parameters
@property_hash[:parameters]
end
def operations
@property_hash[:operations]
end
def metadata
@property_hash[:metadata]
end
def ms_metadata
@property_hash[:ms_metadata]
end
def complex_type
@property_hash[:complex_type]
end
# Our setters for parameters and operations. Setters are used when the
# resource already exists so we just update the current value in the
# property_hash and doing this marks it to be flushed.
def parameters=(should)
Puppet.debug "Set paramemter:\n#{should.pretty_inspect}"
@property_hash[:parameters] = should
end
def operations=(should)
Puppet.debug "Set operations:\n#{should.pretty_inspect}"
@property_hash[:operations] = should
end
def metadata=(should)
Puppet.debug "Set metadata:\n#{should.pretty_inspect}"
@property_hash[:metadata] = should
end
def ms_metadata=(should)
Puppet.debug "Set ms_metadata:\n#{should.pretty_inspect}"
@property_hash[:ms_metadata] = should
end
def complex_type=(should)
Puppet.debug "Set complex_type:\n#{should.pretty_inspect}"
# try to change complex_type of the existing primitive by deleting and recreating it later
if should != @property_hash[:complex_type]
self.destroy
end
@property_hash[:complex_type] = should
end
# Flush is triggered on anything that has been detected as being
# modified in the property_hash. It generates a temporary file with
# the updates that need to be made. The temporary file is then used
# as stdin for the crm command. We have to do a bit of munging of our
# operations and parameters hash to eventually flatten them into a string
# that can be used by the crm command.
def flush
debug "Call: flush on cs_resource '#{@resource[:name]}'"
return if @property_hash.empty?
self.class.block_until_ready
unless @property_hash[:operations].empty?
operations = ''
@property_hash[:operations].each do |o|
op_namerole = o[0].to_s.split(':')
if op_namerole[1]
o[1]['role'] = o[1]['role'] || op_namerole[1] # Hash['role'] has more priority, than Name
end
operations << "op #{op_namerole[0]} "
o[1].each_pair do |k,v|
operations << "#{k}='#{v}' " unless v.empty?
end
end
end
unless @property_hash[:parameters].empty?
parameters = @property_hash[:parameters].reject{|k,v| v.empty?}.map{|k,v| "#{k}='#{v}'"}.join(' ')
parameters = (parameters.empty? ? nil : "params #{parameters}")
end
unless @property_hash[:metadata].empty?
metadatas = @property_hash[:metadata].reject{|k,v| v.empty?}.map{|k,v| "#{k}='#{v}'"}.join(' ')
metadatas = (metadatas.empty? ? nil : "meta #{metadatas}")
end
updated = 'primitive '
updated << "#{@property_hash[:name]} #{@property_hash[:primitive_class]}:"
updated << "#{@property_hash[:provided_by]}:" if @property_hash[:provided_by]
updated << "#{@property_hash[:primitive_type]} "
updated << "#{operations} " unless operations.nil?
updated << "#{parameters} " unless parameters.nil?
updated << "#{metadatas} " unless metadatas.nil?
if @property_hash[:complex_type]
complex_name = "#{@property_hash[:complex_type]}_#{@property_hash[:name]}"
crm_cmd_type = @property_hash[:complex_type].to_s == 'master' ? 'ms' : 'clone'
debug "Creating '#{crm_cmd_type}' parent named '#{complex_name}' for #{@property_hash[:name]} resource"
updated << "\n"
updated << " #{crm_cmd_type} #{complex_name} #{@property_hash[:name]} "
unless @property_hash[:ms_metadata].empty?
ms_metadatas = @property_hash[:ms_metadata].reject{|k,v| v.empty?}.map{|k,v| "#{k}='#{v}'"}.join(' ')
unless ms_metadatas.empty?
updated << "meta #{ms_metadatas}"
end
end
end
debug("Will update tmpfile with '#{updated}'")
Tempfile.open('puppet_crm_update') do |tmpfile|
tmpfile.write(updated)
tmpfile.flush
apply_changes(@resource[:name], tmpfile, 'resource')
end
end
end

View File

@ -1,115 +0,0 @@
require 'pathname'
require Pathname.new(__FILE__).dirname.dirname.expand_path + 'pacemaker_base'
Puppet::Type.type(:cs_rsc_colocation).provide(:crm, :parent => Puppet::Provider::Pacemaker) do
desc 'Specific provider for a rather specific type since I currently have no plan to
abstract pacemaker vs. keepalived. This provider will check the state
of current primitive colocations on the system; add, delete, or adjust various
aspects.'
# Path to the crm binary for interacting with the cluster configuration.
# Decided to just go with relative.
commands :cibadmin => 'cibadmin'
commands :crm_shadow => 'crm_shadow'
commands :crm => 'crm'
commands :crm_diff => 'crm_diff'
commands :crm_attribute => 'crm_attribute'
def self.instances
block_until_ready
instances = []
#cmd = [ command(:crm), 'configure', 'show', 'xml' ]
raw, status = dump_cib
doc = REXML::Document.new(raw)
doc.root.elements['configuration'].elements['constraints'].each_element('rsc_colocation') do |e|
items = e.attributes
if items['rsc-role']
rsc = "#{items['rsc']}:#{items['rsc-role']}"
else
rsc = items['rsc']
end
if items ['with-rsc-role']
with_rsc = "#{items['with-rsc']}:#{items['with-rsc-role']}"
else
with_rsc = items['with-rsc']
end
colocation_instance = {
:name => items['id'],
:ensure => :present,
:primitives => [rsc, with_rsc],
:score => items['score'],
:provider => self.name
}
instances << new(colocation_instance)
end
instances
end
# Create just adds our resource to the property_hash and flush will take care
# of actually doing the work.
def create
@property_hash = {
:name => @resource[:name],
:ensure => :present,
:primitives => @resource[:primitives],
:score => @resource[:score],
:cib => @resource[:cib],
}
end
# Unlike create we actually immediately delete the item.
def destroy
debug('Revmoving colocation')
crm('configure', 'delete', @resource[:name])
@property_hash.clear
end
# Getter that obtains the primitives array for us that should have
# been populated by prefetch or instances (depends on if your using
# puppet resource or not).
def primitives
@property_hash[:primitives]
end
# Getter that obtains the our score that should have been populated by
# prefetch or instances (depends on if your using puppet resource or not).
def score
@property_hash[:score]
end
# Our setters for the primitives array and score. Setters are used when the
# resource already exists so we just update the current value in the property
# hash and doing this marks it to be flushed.
def primitives=(should)
@property_hash[:primitives] = should
end
def score=(should)
@property_hash[:score] = should
end
# Flush is triggered on anything that has been detected as being
# modified in the property_hash. It generates a temporary file with
# the updates that need to be made. The temporary file is then used
# as stdin for the crm command.
def flush
unless @property_hash.empty?
self.class.block_until_ready
updated = "colocation "
updated << "#{@property_hash[:name]} #{@property_hash[:score]}: #{@property_hash[:primitives].join(' ')}"
Tempfile.open('puppet_crm_update') do |tmpfile|
tmpfile.write(updated.rstrip)
tmpfile.flush
apply_changes(@resource[:name],tmpfile,'colocation')
end
end
end
end

View File

@ -1,82 +0,0 @@
require 'pathname' # JJM WORK_AROUND #14073
require Pathname.new(__FILE__).dirname.dirname.expand_path + 'pacemaker_base'
Puppet::Type.type(:cs_rsc_defaults).provide(:crm, :parent => Puppet::Provider::Pacemaker) do
desc 'Specific rsc_defaults for a rather specific type since I currently have no plan to
abstract pacemaker vs. keepalived. This rsc_defaults will check the state
of Pacemaker cluster configuration properties.'
# Path to the crm binary for interacting with the cluster configuration.
commands :crm => 'crm'
commands :cibadmin => 'cibadmin'
commands :crm_attribute => 'crm_attribute'
def self.instances
block_until_ready
instances = []
cmd = [ command(:crm), 'configure', 'show', 'xml' ]
raw, status = Puppet::Util::SUIDManager.run_and_capture(cmd)
doc = REXML::Document.new(raw)
defined?doc.root.elements['configuration/rsc_defaults/meta_attributes'].each_element do |e|
items = e.attributes
rsc_defaults = { :name => items['name'], :value => items['value'] }
rsc_defaults_instance = {
:name => rsc_defaults[:name],
:ensure => :present,
:value => rsc_defaults[:value],
:provider => self.name
}
instances << new(rsc_defaults_instance)
end
instances
end
# Create just adds our resource to the property_hash and flush will take care
# of actually doing the work.
def create
@property_hash = {
:name => @resource[:name],
:ensure => :present,
:value => @resource[:value],
}
end
# Unlike create we actually immediately delete the item.
def destroy
debug('Revmoving cluster property')
cibadmin('--scope', 'crm_config', '--delete', '--xpath', "//nvpair[@name='#{resource[:name]}']")
@property_hash.clear
end
# Getters that obtains the first and second primitives and score in our
# ordering definintion that have been populated by prefetch or instances
# (depends on if your using puppet resource or not).
def value
@property_hash[:value]
end
# Our setters for the first and second primitives and score. Setters are
# used when the resource already exists so we just update the current value
# in the property hash and doing this marks it to be flushed.
def value=(should)
@property_hash[:value] = should
end
# Flush is triggered on anything that has been detected as being
# modified in the property_hash. It generates a temporary file with
# the updates that need to be made. The temporary file is then used
# as stdin for the crm command.
def flush
unless @property_hash.empty?
# clear this on properties, in case it's set from a previous
# run of a different pacemaker type
ENV['CIB_shadow'] = nil
crm('configure', 'rsc_defaults', "#{@property_hash[:name]}=#{@property_hash[:value]}")
end
end
end

View File

@ -1,228 +0,0 @@
require 'pathname'
require Pathname.new(__FILE__).dirname.dirname.expand_path + 'pacemaker_base'
Puppet::Type.type(:cs_rsc_location).provide(:crm, :parent => Puppet::Provider::Pacemaker) do
desc 'Specific provider for a rather specific type since I currently have no plan to
abstract pacemaker vs. keepalived. This provider will check the state
of current primitive colocations on the system; add, delete, or adjust various
aspects.'
# Path to the crm binary for interacting with the cluster configuration.
# Decided to just go with relative.
commands :cibadmin => 'cibadmin'
commands :crm_shadow => 'crm_shadow'
commands :crm => 'crm'
commands :crm_diff => 'crm_diff'
commands :crm_attribute => 'crm_attribute'
def self.instances
block_until_ready
instances = []
#cmd = [ command(:crm), 'configure', 'show', 'xml' ]
raw, status = dump_cib
doc = REXML::Document.new(raw)
doc.root.elements['configuration'].elements['constraints'].each_element('rsc_location') do |e|
items = e.attributes
#
# if ! e.elements['primitive'].nil?
# e.each_element do |p|
# primitives << p.attributes['id']
# end
# end
rules = []
if ! items['node'].nil?
node_name = items['node'].to_s
node_score = items['score'].to_s
elsif ! e.elements['rule'].nil?
e.each_element('rule') do |r|
boolean_op = r.attributes['boolean-op'].to_s || "and"
score = r.attributes['score']
rule={:boolean => boolean_op, :score => score,
:expressions => [], :date_expressions => [] }
r.each_element('expression') do |expr|
expr_attrs=Hash.new
expr_id = expr.attributes['id']
expr.attributes.reject{|key,value| key=='id' }.each{|key,value| expr_attrs[key.to_sym] = value }
rule[:expressions] << expr_attrs
end
r.each_element('date_expression') do |date_expr|
date_expr_hash={}
if date_expr.attributes['operation'] == 'date_spec'
date_expr_hash[:date_spec] = date_expr.elements[1].attributes.reject{|key,value| key=='id' }
elsif date_expr.attributes['operation'] == 'in_range' and !date_expr.elements['duration'].nil?
date_expr_hash[:duration] = date_expr.elements[1].attributes.reject{|key,value| key=='id' }
end
date_expr_hash.merge!({ :operation => date_expr.attributes['operation'].to_s,
:start=> date_expr.attributes['start'].to_s,
:end => date_expr.attributes['end'].to_s})
rule[:date_expressions] << convert_to_sym(date_expr_hash)
end
rules << rule
end
end
location_instance = {
:name => items['id'],
:ensure => :present,
:primitive => items['rsc'],
:node_score => node_score,
:node_name => node_name,
:rules => rules,
:provider => self.name
}
instances << new(location_instance)
end
instances
end
# Create just adds our resource to the property_hash and flush will take care
# of actually doing the work.
def create
@property_hash = {
:name => @resource[:name],
:ensure => :present,
:primitive => @resource[:primitive],
:node_name => @resource[:node_name],
:node_score => @resource[:node_score],
:rules => @resource[:rules],
:cib => @resource[:cib],
}
end
# Unlike create we actually immediately delete the item.
def destroy
debug('Removing location')
crm('configure', 'delete', @resource[:name])
@property_hash.clear
end
# Getter that obtains the primitives array for us that should have
# been populated by prefetch or instances (depends on if your using
# puppet resource or not).
def primitive
@property_hash[:primitive]
end
# Getter that obtains the our score that should have been populated by
# prefetch or instances (depends on if your using puppet resource or not).
def node_score
@property_hash[:node_score]
end
def rules
@property_hash[:rules]
end
def node_name
@property_hash[:node_name]
end
# Our setters for the primitives array and score. Setters are used when the
# resource already exists so we just update the current value in the property
# hash and doing this marks it to be flushed.
def rules=(should)
@property_hash[:rules] = should
end
def primitives=(should)
@property_hash[:primitive] = should
end
def node_score=(should)
@property_hash[:node_score] = should
end
def node_name=(should)
@property_hash[:node_name] = should
end
# Flush is triggered on anything that has been detected as being
# modified in the property_hash. It generates a temporary file with
# the updates that need to be made. The temporary file is then used
# as stdin for the crm command.
def flush
unless @property_hash.empty?
self.class.block_until_ready
updated = "location "
updated << "#{@property_hash[:name]} #{@property_hash[:primitive]} "
if !@property_hash[:node_name].nil?
updated << "#{@property_hash[:node_score]}: "
updated << "#{@property_hash[:node_name]}"
elsif !@property_hash[:rules].nil?
debug("Evaluating #{@property_hash.inspect}")
@property_hash[:rules].each do |rule_hash|
updated << "rule "
#updated << "$id-ref = #{rule_hash[:id_ref]}"
updated << "$role = #{rule_hash[:role]} " if !rule_hash[:role].nil?
updated << "#{rule_hash[:score]}: "
if !rule_hash[:expressions].nil?
rule_hash[:expressions].each do |expr|
updated << "#{expr[:attribute]} "
updated << "#{expr[:type]}:" if !expr[:type].nil?
updated << "#{expr[:operation]} "
updated << "#{expr[:value]} " if !expr[:value].nil?
end
end
if !rule_hash[:date_expressions].nil?
rule_hash[:date_expressions].each do |date_expr|
updated << "date "
if !date_expr[:date_spec].nil?
updated << "date_spec "
date_expr[:date_spec].each{|key,value| updated << "#{key}=#{value} " }
else
updated << "#{date_expr[:operation]} "
if date_expr[:operation] == 'in_range'
updated << "start=#{date_expr[:start]} "
if date_expr[:duration].nil?
updated << "end=#{date_expr[:end]} "
else
date_expr[:duration].each{|key,value| updated << "#{key}=#{value} " }
end
elsif date_expr[:operation] == 'gt'
updated << "#{date_expr[:start]} "
elsif date_expr[:operation] == 'lt'
updated << "#{date_expr[:end]} "
end
end
end
end
rule_number = 0
rule_number += rule_hash[:expressions].size if !rule_hash[:expressions].nil?
rule_number += rule_hash[:date_expressions].size if !rule_hash[:date_expressions].nil?
updated << "#{rule_hash[:boolean].to_s} " if rule_number > 1
end
end
debug("creating location with command\n #{updated}\n")
Tempfile.open('puppet_crm_update') do |tmpfile|
tmpfile.write(updated.rstrip)
tmpfile.flush
apply_changes(@resource[:name],tmpfile,'location')
end
end
end
end
def convert_to_sym(hash)
if hash.is_a? Hash
hash.inject({}) do |memo,(key,value)|
value = convert_to_sym(value)
if value.is_a?(Array)
value.collect! do |arr_el|
convert_to_sym(arr_el)
end
end
memo[key.to_sym] = value
memo
end
else
hash
end
end

View File

@ -1,123 +0,0 @@
require 'pathname'
require Pathname.new(__FILE__).dirname.dirname.expand_path + 'pacemaker_base'
Puppet::Type.type(:cs_rsc_order).provide(:crm, :parent => Puppet::Provider::Pacemaker) do
desc 'Specific provider for a rather specific type since I currently have no plan to
abstract pacemaker vs. keepalived. This provider will check the state
of current primitive start orders on the system; add, delete, or adjust various
aspects.'
# Path to the crm binary for interacting with the cluster configuration.
commands :cibadmin => 'cibadmin'
commands :crm_shadow => 'crm_shadow'
commands :crm => 'crm'
commands :crm_diff => 'crm_diff'
commands :crm_attribute => 'crm_attribute'
def self.instances
block_until_ready
instances = []
#cmd = [ command(:crm), 'configure', 'show', 'xml' ]
raw, status = dump_cib
doc = REXML::Document.new(raw)
doc.root.elements['configuration'].elements['constraints'].each_element('rsc_order') do |e|
items = e.attributes
if items['first-action']
first = "#{items['first']}:#{items['first-action']}"
else
first = items['first']
end
if items['then-action']
second = "#{items['then']}:#{items['then-action']}"
else
second = items['then']
end
order_instance = {
:name => items['id'],
:ensure => :present,
:first => first,
:second => second,
:score => items['score'],
:provider => self.name
}
instances << new(order_instance)
end
instances
end
# Create just adds our resource to the property_hash and flush will take care
# of actually doing the work.
def create
@property_hash = {
:name => @resource[:name],
:ensure => :present,
:first => @resource[:first],
:second => @resource[:second],
:score => @resource[:score],
:cib => @resource[:cib],
}
end
# Unlike create we actually immediately delete the item.
def destroy
debug('Revmoving order directive')
crm('configure', 'delete', @resource[:name])
@property_hash.clear
end
# Getters that obtains the first and second primitives and score in our
# ordering definintion that have been populated by prefetch or instances
# (depends on if your using puppet resource or not).
def first
@property_hash[:first]
end
def second
@property_hash[:second]
end
def score
@property_hash[:score]
end
# Our setters for the first and second primitives and score. Setters are
# used when the resource already exists so we just update the current value
# in the property hash and doing this marks it to be flushed.
def first=(should)
@property_hash[:first] = should
end
def second=(should)
@property_hash[:second] = should
end
def score=(should)
@property_hash[:score] = should
end
# Flush is triggered on anything that has been detected as being
# modified in the property_hash. It generates a temporary file with
# the updates that need to be made. The temporary file is then used
# as stdin for the crm command.
def flush
unless @property_hash.empty?
self.class.block_until_ready
updated = 'order '
updated << "#{@property_hash[:name]} #{@property_hash[:score]}: "
updated << "#{@property_hash[:first]} #{@property_hash[:second]}"
Tempfile.open('puppet_crm_update') do |tmpfile|
tmpfile.write(updated.rstrip)
tmpfile.flush
apply_changes(@resource[:name],tmpfile,'order')
end
end
end
end

View File

@ -1,161 +0,0 @@
require 'pp'
require 'open3'
require 'rexml/document'
include REXML
class Puppet::Provider::Pacemaker < Puppet::Provider
def self.dump_cib
self.block_until_ready
stdout = Open3.popen3("#{command(:crm)} configure show xml")[1].read
return stdout, nil
end
def try_command(command,resource_name,should=nil,cib=nil,timeout=120)
cmd = "#{command(:crm)} configure #{command} #{resource_name} #{should} ".rstrip
env = {}
if cib
env["CIB_shadow"]=cib.to_s
end
Timeout::timeout(timeout) do
debug("Issuing #{cmd} for CIB #{cib} ")
loop do
break if exec_withenv(cmd,env) == 0
sleep 2
end
end
end
def exec_withenv(cmd,env=nil)
self.class.exec_withenv(cmd,env)
end
def self.exec_withenv(cmd,env=nil)
old_env = nil
if env
old_env = ENV.to_hash if env
ENV.update(env)
end
out = `#{cmd} 2>&1`
rc = $?.exitstatus
if ! $?.success?
debug("Command '#{cmd}' failed, and return RC=#{rc}, output:")
out.split("\n").map{|l| debug(l)}
end
if ! env.nil?
# remove all keys, existing only in "additional" env
env.keys.each do |k|
ENV[k] = nil
end
end
ENV.update(old_env) if old_env
return rc
end
# Pacemaker takes a while to build the initial CIB configuration once the
# service is started for the first time. This provides us a way to wait
# until we're up so we can make changes that don't disappear in to a black
# hole.
def self.block_until_ready(timeout = 120)
cmd = "#{command(:crm_attribute)} --type crm_config --query --name dc-version 2>/dev/null"
Timeout::timeout(timeout) do
until exec_withenv(cmd) == 0
debug('Pacemaker not ready, retrying')
sleep 2
end
# Sleeping a spare two since it seems that dc-version is returning before
# It is really ready to take config changes, but it is close enough.
# Probably need to find a better way to check for reediness.
sleep 2
end
end
def self.prefetch(resources)
instances.each do |prov|
if res = resources[prov.name.to_s]
res.provider = prov
end
end
end
def exists?
self.class.block_until_ready
Puppet.debug "Call exists? on cs_resource '#{@resource[:name]}'"
out = !(@property_hash[:ensure] == :absent or @property_hash.empty?)
Puppet.debug "Return: #{out}"
Puppet.debug "Current state:\n#{@property_hash.pretty_inspect}" if @property_hash.any?
out
end
def get_scope(type)
case type
when 'resource'
scope='resources'
when /^(colocation|order|location)$/
scope='constraints'
when 'rsc_defaults'
scope='rsc_defaults'
else
fail('unknown resource type')
scope=nil
end
return scope
end
def apply_changes(res_name,tmpfile,res_type)
env={}
shadow_name="#{res_type}_#{res_name}"
original_cib="/tmp/#{shadow_name}_orig.xml"
new_cib="/tmp/#{shadow_name}_new.xml"
begin
debug('trying to delete old shadow if exists')
crm_shadow("-b","-f","-D",shadow_name)
rescue Puppet::ExecutionFailure
debug('delete failed but proceeding anyway')
end
if !get_scope(res_type).nil?
cibadmin_scope = "-o #{get_scope(res_type)}"
else
cibadmin_scope = nil
end
crm_shadow("-b","-c",shadow_name)
env["CIB_shadow"] = shadow_name
orig_status = exec_withenv("#{command(:cibadmin)} #{cibadmin_scope} -Q > /tmp/#{shadow_name}_orig.xml", env)
#cibadmin returns code 6 if scope is empty
#in this case write empty file
if orig_status == 6 or File.open("/tmp/#{shadow_name}_orig.xml").read.empty?
cur_scope=Element.new('cib')
cur_scope.add_element('configuration')
cur_scope.add_element(get_scope(res_type))
emptydoc=Document.new(cur_scope.to_s)
emptydoc.write(File.new("/tmp/#{shadow_name}_orig.xml",'w'))
end
exec_withenv("#{command(:crm)} configure load update #{tmpfile.path.to_s}",env)
exec_withenv("#{command(:cibadmin)} #{cibadmin_scope} -Q > /tmp/#{shadow_name}_new.xml",env)
patch = Open3.popen3("#{command(:crm_diff)} --original #{original_cib} --new #{new_cib}")[1].read
if patch.empty?
debug("no difference - nothing to apply")
return
end
xml_patch = Document.new(patch)
xml_patch.root.attributes.delete 'digest'
wrap_cib=Element.new('cib')
wrap_configuration=Element.new('configuration')
wrap_cib.add_element(wrap_configuration)
wrap_cib_a=Marshal.load(Marshal.dump(wrap_cib))
wrap_cib_r=Marshal.load(Marshal.dump(wrap_cib))
diff_a=XPath.first(xml_patch,'//diff-added')
diff_r=XPath.first(xml_patch,'//diff-removed')
diff_a_elements=diff_a.elements
diff_r_elements=diff_r.elements
wrap_configuration_a=XPath.first(wrap_cib_a,'//configuration')
wrap_configuration_r=XPath.first(wrap_cib_r,'//configuration')
diff_a_elements.each {|element| wrap_configuration_a.add_element(element)}
diff_r_elements.each {|element| wrap_configuration_r.add_element(element)}
diff_a.add_element(wrap_cib_a)
diff_r.add_element(wrap_cib_r)
cibadmin '--patch', '--sync-call', '--xml-text', xml_patch
end
end

View File

@ -1,791 +0,0 @@
require 'rexml/document'
class Puppet::Provider::Pacemaker_common < Puppet::Provider
@raw_cib = nil
@cib = nil
@primitives = nil
@primitives_structure = nil
RETRY_COUNT = 100
RETRY_STEP = 6
# get a raw CIB from cibadmin
# or from a debug file if raw_cib_file is set
# @return [String] cib xml
def raw_cib
@raw_cib = cibadmin '-Q'
if @raw_cib == '' or not @raw_cib
fail 'Could not dump CIB XML using "cibadmin -Q" command!'
end
@raw_cib
end
# create a new REXML CIB document
# @return [REXML::Document] at '/'
def cib
return @cib if @cib
@cib = REXML::Document.new(raw_cib)
end
# reset all saved variables to obtain new data
def cib_reset
# Puppet.debug 'Reset CIB memoization'
@raw_cib = nil
@cib = nil
@primitives = nil
@primitives_structure = nil
@nodes_structure = nil
@node_ids = nil
end
# get lrm_rsc_ops section from lrm_resource section CIB section
# @param lrm_resource [REXML::Element]
# at /cib/status/node_state/lrm[@id="node-name"]/lrm_resources/lrm_resource[@id="resource-name"]/lrm_rsc_op
# @return [REXML::Element]
def cib_section_lrm_rsc_ops(lrm_resource)
return unless lrm_resource.is_a? REXML::Element
REXML::XPath.match lrm_resource, 'lrm_rsc_op'
end
# get node_state CIB section
# @return [REXML::Element] at /cib/status/node_state
def cib_section_nodes_state
REXML::XPath.match cib, '//node_state'
end
# get nodes CIB section
# @return [REXML::Element] at /cib/configuration/nodes
def cib_section_node_ids
REXML::XPath.match cib, '/cib/configuration/nodes/*'
end
# get primitives CIB section
# @return [Array<REXML::Element>] at /cib/configuration/resources/primitive
def cib_section_primitives
REXML::XPath.match cib, '//primitive'
end
# get lrm_rsc_ops section from lrm_resource section CIB section
# @param lrm [REXML::Element]
# at /cib/status/node_state/lrm[@id="node-name"]/lrm_resources/lrm_resource
# @return [REXML::Element]
def cib_section_lrm_resources(lrm)
return unless lrm.is_a? REXML::Element
REXML::XPath.match lrm, 'lrm_resources/lrm_resource'
end
# get all 'rsc_location', 'rsc_order' and 'rsc_colocation' sections from CIB
# @return [Array<REXML::Element>] at /cib/configuration/constraints/*
def cib_section_constraints
REXML::XPath.match cib, '//constraints/*'
end
# determine the status of a single operation
# @param op [Hash<String => String>]
# @return ['start','stop','master',nil]
def operation_status(op)
# skip pending ops
# we should wait for status for become known
return if op['op-status'] == '-1'
if op['operation'] == 'monitor'
# for monitor operation status is determined by its rc-code
# 0 - start, 8 - master, 7 - stop, else - error
case op['rc-code']
when '0'
'start'
when '7'
'stop'
when '8'
'master'
else
# not entirely correct but count failed monitor as 'stop'
'stop'
end
elsif %w(start stop promote).include? op['operation']
# for start/stop/promote status is set if op was successful
# use master instead of promote
return unless %w(0 7 8).include? op['rc-code']
if op['operation'] == 'promote'
'master'
else
op['operation']
end
else
# other operations are irrelevant
nil
end
end
# determine resource status by parsing last operations
# @param ops [Array<Hash>]
# @return ['start','stop','master',nil]
# nil means that status is unknown
def determine_primitive_status(ops)
status = nil
ops.each do |op|
op_status = operation_status op
status = op_status if op_status
end
status
end
# check if operations have same failed operations
# that should be cleaned up later
# @param ops [Array<Hash>]
# @return [TrueClass,FalseClass]
def failed_operations_found?(ops)
ops.each do |op|
# skip incompleate ops
next unless op['op-status'] == '0'
# skip useless ops
next unless %w(start stop monitor promote).include? op['operation']
# are there failed start, stop
if %w(start stop promote).include? op['operation']
return true if op['rc-code'] != '0'
end
# are there failed monitors
if op['operation'] == 'monitor'
return true unless %w(0 7 8).include? op['rc-code']
end
end
false
end
# convert elements's attributes to hash
# @param element [REXML::Element]
# @return [Hash<String => String>]
def attributes_to_hash(element)
hash = {}
element.attributes.each do |a, v|
hash.store a.to_s, v.to_s
end
hash
end
# convert element's children to hash
# of their attributes using key and hash key
# @param element [REXML::Element]
# @param key <String>
# @return [Hash<String => String>]
def elements_to_hash(element, key, tag = nil)
elements = {}
children = element.get_elements tag
return elements unless children
children.each do |child|
child_structure = attributes_to_hash child
name = child_structure[key]
next unless name
elements.store name, child_structure
end
elements
end
# decode lrm_resources section of CIB
# @param lrm_resources [REXML::Element]
# @return [Hash<String => Hash>]
def decode_lrm_resources(lrm_resources)
resources = {}
lrm_resources.each do |lrm_resource|
resource = attributes_to_hash lrm_resource
id = resource['id']
next unless id
lrm_rsc_ops = cib_section_lrm_rsc_ops lrm_resource
next unless lrm_rsc_ops
ops = decode_lrm_rsc_ops lrm_rsc_ops
resource.store 'ops', ops
resource.store 'status', determine_primitive_status(ops)
resource.store 'failed', failed_operations_found?(ops)
resources.store id, resource
end
resources
end
# decode lrm_rsc_ops section of the resource's CIB
# @param lrm_rsc_ops [REXML::Element]
# @return [Array<Hash>]
def decode_lrm_rsc_ops(lrm_rsc_ops)
ops = []
lrm_rsc_ops.each do |lrm_rsc_op|
op = attributes_to_hash lrm_rsc_op
next unless op['call-id']
ops << op
end
ops.sort { |a,b| a['call-id'].to_i <=> b['call-id'].to_i }
end
# get nodes structure with resources and their statuses
# @return [Hash<String => Hash>]
def nodes
return @nodes_structure if @nodes_structure
@nodes_structure = {}
cib_section_nodes_state.each do |node_state|
node = attributes_to_hash node_state
node_name = node['uname']
next unless node_name
lrm = node_state.elements['lrm']
next unless lrm
lrm_resources = cib_section_lrm_resources lrm
next unless lrm_resources
resources = decode_lrm_resources lrm_resources
node.store 'primitives', resources
@nodes_structure.store node_name, node
end
@nodes_structure
end
# decode a single constraint element to the data structure
# @param element [REXML::Element]
# @return [Hash<String => String>]
def decode_constraint(element)
return unless element.is_a? REXML::Element
return unless element.attributes['id']
return unless element.name
constraint_structure = attributes_to_hash element
constraint_structure.store 'type', element.name
constraint_structure
end
# location constraints found in the CIB
# filter them by the provided tag name
# @return [Hash<String => Hash>]
def constraint_locations
locations = {}
cib_section_constraints.each do |constraint|
constraint_structure = decode_constraint constraint
next unless constraint_structure
next unless constraint_structure['id']
next unless constraint_structure['type'] == 'rsc_location'
constraint_structure.delete 'type'
locations.store constraint_structure['id'], constraint_structure
end
locations
end
# the nodes structure
# uname => id
# @return [Hash<String => Hash>]
def node_ids
return @node_ids if @node_ids
@node_ids = {}
cib_section_node_ids.each do |node_block|
node = attributes_to_hash node_block
next unless node['id'] and node['uname']
@node_ids.store node['uname'], node
end
@node_ids
end
# get primitives configuration structure with primitives and their attributes
# @return [Hash<String => Hash>]
def primitives
return @primitives_structure if @primitives_structure
@primitives_structure = {}
cib_section_primitives.each do |primitive|
primitive_structure = {}
id = primitive.attributes['id']
next unless id
primitive_structure.store 'name', id
primitive.attributes.each do |k, v|
primitive_structure.store k.to_s, v
end
if primitive.parent.name and primitive.parent.attributes['id']
parent_structure = {
'id' => primitive.parent.attributes['id'],
'type' => primitive.parent.name
}
primitive_structure.store 'name', parent_structure['id']
primitive_structure.store 'parent', parent_structure
end
instance_attributes = primitive.elements['instance_attributes']
if instance_attributes
instance_attributes_structure = elements_to_hash instance_attributes, 'name', 'nvpair'
primitive_structure.store 'instance_attributes', instance_attributes_structure
end
meta_attributes = primitive.elements['meta_attributes']
if meta_attributes
meta_attributes_structure = elements_to_hash meta_attributes, 'name', 'nvpair'
primitive_structure.store 'meta_attributes', meta_attributes_structure
end
operations = primitive.elements['operations']
if operations
operations_structure = elements_to_hash operations, 'id', 'op'
primitive_structure.store 'operations', operations_structure
end
@primitives_structure.store id, primitive_structure
end
@primitives_structure
end
# check if primitive is clone or multistate
# @param primitive [String] primitive id
# @return [TrueClass,FalseClass]
def primitive_is_complex?(primitive)
return unless primitive_exists? primitive
primitives[primitive].key? 'parent'
end
# check if primitive is clone
# @param primitive [String] primitive id
# @return [TrueClass,FalseClass]
def primitive_is_clone?(primitive)
is_complex = primitive_is_complex? primitive
return is_complex unless is_complex
primitives[primitive]['parent']['type'] == 'clone'
end
# check if primitive is multistate
# @param primitive [String] primitive id
# @return [TrueClass,FalseClass]
def primitive_is_multistate?(primitive)
is_complex = primitive_is_complex? primitive
return is_complex unless is_complex
primitives[primitive]['parent']['type'] == 'master'
end
# disable this primitive
# @param primitive [String]
def disable_primitive(primitive)
retry_command {
pcs 'resource', 'disable', primitive
}
end
alias :stop_primitive :disable_primitive
# enable this primitive
# @param primitive [String]
def enable_primitive(primitive)
retry_command {
pcs 'resource', 'enable', primitive
}
end
alias :start_primitive :enable_primitive
# ban this primitive
# @param primitive [String]
def ban_primitive(primitive, node = '')
retry_command {
pcs 'resource', 'ban', primitive, node
}
end
# move this primitive
# @param primitive [String]
def move_primitive(primitive, node = '')
retry_command {
pcs 'resource', 'move', primitive, node
}
end
# unban/unmove this primitive
# @param primitive [String]
def unban_primitive(primitive, node = '')
retry_command {
pcs 'resource', 'clear', primitive, node
}
end
alias :clear_primitive :unban_primitive
alias :unmove_primitive :unban_primitive
# cleanup this primitive
# @param primitive [String]
def cleanup_primitive(primitive, node = nil)
opts = ['--cleanup', "--resource=#{primitive}"]
opts << "--node=#{node}" if node
retry_command {
crm_resource opts
}
end
# manage this primitive
# @param primitive [String]
def manage_primitive(primitive)
retry_command {
pcs 'resource', 'manage', primitive
}
end
# unamanage this primitive
# @param primitive [String]
def unmanage_primitive(primitive)
retry_command {
pcs 'resource', 'unmanage', primitive
}
end
# set quorum_policy of the cluster
# @param primitive [String]
def no_quorum_policy(primitive)
retry_command {
pcs 'property', 'set', "no-quorum-policy=#{primitive}"
}
end
# set maintenance_mode of the cluster
# @param primitive [TrueClass,FalseClass]
def maintenance_mode(primitive)
retry_command {
pcs 'property', 'set', "maintenance-mode=#{primitive}"
}
end
# add a location constraint
# @param primitive [String] the primitive's name
# @param node [String] the node's name
# @param score [Numeric,String] score value
def constraint_location_add(primitive, node, score = 100)
id = "#{primitive}-on-#{node}"
xml = <<-EOF
<diff>
<diff-added>
<cib>
<configuration>
<constraints>
<rsc_location id="#{id}" node="#{node}" rsc="#{primitive}" score="#{score}" __crm_diff_marker__="added:top"/>
</constraints>
</configuration>
</cib>
</diff-added>
</diff>
EOF
retry_command {
cibadmin '--patch', '--sync-call', '--xml-text', xml
}
end
# remove a location constraint
# @param primitive [String] the primitive's name
# @param node [String] the node's name
def constraint_location_remove(primitive, node)
id = "#{primitive}-on-#{node}"
retry_command {
pcs 'constraint', 'location', 'remove', id
}
end
# check if location constraint exists
# @param primitive [String] the primitive's name
# @param node [String] the node's name
def constraint_location_exists?(primitive, node)
id = "#{primitive}-on-#{node}"
constraint_locations.key? id
end
# get a status of a primitive on the entire cluster
# of on a node if node name param given
# @param primitive [String]
# @param node [String]
# @return [String]
def primitive_status(primitive, node = nil)
if node
nodes.
fetch(node, {}).
fetch('primitives',{}).
fetch(primitive, {}).
fetch('status', nil)
else
statuses = []
nodes.each do |k,v|
status = v.fetch('primitives',{}).
fetch(primitive, {}).
fetch('status', nil)
statuses << status
end
status_values = {
'stop' => 0,
'start' => 1,
'master' => 2,
}
statuses.max_by do |status|
return unless status
status_values[status]
end
end
end
# generate report of primitive statuses by node
# mostly for debugging
# @return [Hash]
def primitives_status_by_node
report = {}
return unless nodes.is_a? Hash
nodes.each do |node_name, node_data|
primitives_of_node = node_data['primitives']
next unless primitives_of_node.is_a? Hash
primitives_of_node.each do |primitive, primitive_data|
primitive_status = primitive_data['status']
report[primitive] = {} unless report[primitive].is_a? Hash
report[primitive][node_name] = primitive_status
end
end
report
end
# form a cluster status report for debugging
# @return [String]
def get_cluster_debug_report
report = "\n"
primitives_status_by_node.each do |primitive, data|
primitive_name = primitive
primitive_name = primitives[primitive]['name'] if primitives[primitive]['name']
primitive_type = 'Simple'
primitive_type = 'Cloned' if primitive_is_clone? primitive
primitive_type = 'Multistate' if primitive_is_multistate? primitive
primitive_status = primitive_status primitive
report += "-> #{primitive_type} primitive '#{primitive_name}' global status: #{primitive_status}"
report += ' (UNMANAGE)' unless primitive_is_managed? primitive
report += "\n"
report += ' ' if data.any?
nodes = []
data.keys.sort.each do |node_name|
node_status = data.fetch node_name
node_block = "#{node_name}: #{node_status}"
node_block += ' (FAIL)' if primitive_has_failures? primitive, node_name
nodes << node_block
end
report += nodes.join ' | '
report += "\n"
end
report
end
# does this primitive have failed operations?
# @param primitive [String] primitive name
# @param node [String] on this node if given
# @return [TrueClass,FalseClass]
def primitive_has_failures?(primitive, node = nil)
return unless primitive_exists? primitive
if node
nodes.
fetch(node, {}).
fetch('primitives',{}).
fetch(primitive, {}).
fetch('failed', nil)
else
nodes.each do |k,v|
failed = v.fetch('primitives',{}).
fetch(primitive, {}).
fetch('failed', nil)
return true if failed
end
false
end
end
# determine if a primitive is running on the entire cluster
# of on a node if node name param given
# @param primitive [String] primitive id
# @param node [String] on this node if given
# @return [TrueClass,FalseClass]
def primitive_is_running?(primitive, node = nil)
return unless primitive_exists? primitive
status = primitive_status primitive, node
return status unless status
%w(start master).include? status
end
# check if primitive is running as a master
# either anywhere or on the give node
# @param primitive [String] primitive id
# @param node [String] on this node if given
# @return [TrueClass,FalseClass]
def primitive_has_master_running?(primitive, node = nil)
is_multistate = primitive_is_multistate? primitive
return is_multistate unless is_multistate
status = primitive_status primitive, node
return status unless status
status == 'master'
end
# return service status value expected by Puppet
# puppet wants :running or :stopped symbol
# @param primitive [String] primitive id
# @param node [String] on this node if given
# @return [:running,:stopped]
def get_primitive_puppet_status(primitive, node = nil)
if primitive_is_running? primitive, node
:running
else
:stopped
end
end
# return service enabled status value expected by Puppet
# puppet wants :true or :false symbols
# @param primitive [String]
# @return [:true,:false]
def get_primitive_puppet_enable(primitive)
if primitive_is_managed? primitive
:true
else
:false
end
end
# check if primitive exists in the confiuguration
# @param primitive primitive id or name
def primitive_exists?(primitive)
primitives.key? primitive
end
# determine if primitive is managed
# @param primitive [String] primitive id
# @return [TrueClass,FalseClass]
# TODO: will not work correctly if cluster is in management mode
def primitive_is_managed?(primitive)
return unless primitive_exists? primitive
is_managed = primitives.fetch(primitive).fetch('meta_attributes', {}).fetch('is-managed', {}).fetch('value', 'true')
is_managed == 'true'
end
# determine if primitive has target-state started
# @param primitive [String] primitive id
# @return [TrueClass,FalseClass]
# TODO: will not work correctly if target state is set globally to stopped
def primitive_is_started?(primitive)
return unless primitive_exists? primitive
target_role = primitives.fetch(primitive).fetch('meta_attributes', {}).fetch('target-role', {}).fetch('value', 'Started')
target_role == 'Started'
end
# check if pacemaker is online
# and we can work with it
# @return [TrueClass,FalseClass]
def is_online?
begin
dc_version = crm_attribute '-q', '--type', 'crm_config', '--query', '--name', 'dc-version'
return false unless dc_version
return false if dc_version.empty?
return false unless cib_section_nodes_state
true
rescue Puppet::ExecutionFailure
false
end
end
# retry the given command until it runs without errors
# or for RETRY_COUNT times with RETRY_STEP sec step
# print cluster status report on fail
# returns normal command output on success
# @return [String]
def retry_command
(0..RETRY_COUNT).each do
begin
out = yield
rescue Puppet::ExecutionFailure => e
Puppet.debug "Command failed: #{e.message}"
sleep RETRY_STEP
else
return out
end
end
Puppet.debug get_cluster_debug_report if is_online?
fail "Execution timeout after #{RETRY_COUNT * RETRY_STEP} seconds!"
end
# retry the given block until it returns true
# or for RETRY_COUNT times with RETRY_STEP sec step
# print cluster status report on fail
def retry_block_until_true
(0..RETRY_COUNT).each do
return if yield
sleep RETRY_STEP
end
Puppet.debug get_cluster_debug_report if is_online?
fail "Execution timeout after #{RETRY_COUNT * RETRY_STEP} seconds!"
end
# wait for pacemaker to become online
def wait_for_online
Puppet.debug "Waiting #{RETRY_COUNT * RETRY_STEP} seconds for Pacemaker to become online"
retry_block_until_true do
is_online?
end
Puppet.debug 'Pacemaker is online'
end
# wait until we can get a known status of the primitive
# @param primitive [String] primitive name
def wait_for_status(primitive, node = nil)
msg = "Wait for known status of '#{primitive}'"
msg += " on node '#{node}'" if node
Puppet.debug msg
retry_block_until_true do
cib_reset
primitive_status(primitive) != nil
end
msg = "Primitive '#{primitive}' has status '#{primitive_status primitive}'"
msg += " on node '#{node}'" if node
Puppet.debug msg
end
# wait for primitive to start
# if node is given then start on this node
# @param primitive [String] primitive id
# @param node [String] on this node if given
def wait_for_start(primitive, node = nil)
message = "Waiting #{RETRY_COUNT * RETRY_STEP} seconds for service '#{primitive}' to start"
message += " on node '#{node}'" if node
Puppet.debug get_cluster_debug_report
Puppet.debug message
retry_block_until_true do
cib_reset
primitive_is_running? primitive, node
end
Puppet.debug get_cluster_debug_report
message = "Service '#{primitive}' have started"
message += " on node '#{node}'" if node
Puppet.debug message
end
# wait for primitive to start as a master
# if node is given then start as a master on this node
# @param primitive [String] primitive id
# @param node [String] on this node if given
def wait_for_master(primitive, node = nil)
message = "Waiting #{RETRY_COUNT * RETRY_STEP} seconds for service '#{primitive}' to start master"
message += " on node '#{node}'" if node
Puppet.debug get_cluster_debug_report
Puppet.debug message
retry_block_until_true do
cib_reset
primitive_has_master_running? primitive, node
end
Puppet.debug get_cluster_debug_report
message = "Service '#{primitive}' have started master"
message += " on node '#{node}'" if node
Puppet.debug message
end
# wait for primitive to stop
# if node is given then start on this node
# @param primitive [String] primitive id
# @param node [String] on this node if given
def wait_for_stop(primitive, node = nil)
message = "Waiting #{RETRY_COUNT * RETRY_STEP} seconds for service '#{primitive}' to stop"
message += " on node '#{node}'" if node
Puppet.debug get_cluster_debug_report
Puppet.debug message
retry_block_until_true do
cib_reset
result = primitive_is_running? primitive, node
result.is_a? FalseClass
end
Puppet.debug get_cluster_debug_report
message = "Service '#{primitive}' was stopped"
message += " on node '#{node}'" if node
Puppet.debug message
end
end

View File

@ -1,319 +0,0 @@
require File.join File.dirname(__FILE__), '../pacemaker_common.rb'
Puppet::Type.type(:pcmk_nodes).provide(:ruby, :parent => Puppet::Provider::Pacemaker_common) do
commands 'cmapctl' => '/usr/sbin/corosync-cmapctl'
commands 'cibadmin' => '/usr/sbin/cibadmin'
commands 'crm_node' => '/usr/sbin/crm_node'
commands 'crm_attribute' => '/usr/sbin/crm_attribute'
def node_name
return @node_name if @node_name
@node_name = crm_node('-n').chomp.strip
end
def cmapctl_nodelist
cmapctl '-b', 'nodelist.node'
end
def cmapctl_safe(*args)
if @resource[:debug]
debug (['cmapctl'] + args).join ' '
return
end
begin
cmapctl *args
rescue => e
info "Command failed: #{e.message}"
end
end
def cibadmin_safe(*args)
if @resource[:debug]
debug (['cibadmin'] + args).join ' '
return
end
begin
cibadmin *args
rescue => e
info "Command failed: #{e.message}"
end
end
def crm_node_safe(*args)
if @resource[:debug]
debug (['crm_node'] + args).join ' '
return
end
begin
crm_node *args
rescue => e
info "Command failed: #{e.message}"
end
end
###################################
def nodes_data
@resource[:nodes]
end
def corosync_nodes_data
@resource[:corosync_nodes]
end
def pacemaker_nodes_data
@resource[:pacemaker_nodes]
end
###################################
def corosync_nodes_state
return @corosync_nodes_data if @corosync_nodes_data
@corosync_nodes_data = {}
cmapctl_nodelist.split("\n").each do |line|
if line =~ %r(^nodelist\.node\.(\d+)\.nodeid\s+\(u32\)\s+=\s+(\d+))
node_number = $1
node_id = $2
@corosync_nodes_data[node_number] = {} unless @corosync_nodes_data[node_number]
@corosync_nodes_data[node_number]['id'] = node_id
@corosync_nodes_data[node_number]['number'] = node_number
end
if line =~ %r(^nodelist\.node\.(\d+)\.ring(\d+)_addr\s+\(str\)\s+=\s+(\S+))
node_number = $1
node_ip_addr = $3
@corosync_nodes_data[node_number] = {} unless @corosync_nodes_data[node_number]
@corosync_nodes_data[node_number]['ip'] = node_ip_addr
end
end
@corosync_nodes_data
end
def corosync_nodes_structure
return @corosync_nodes_structure if @corosync_nodes_structure
@corosync_nodes_structure = {}
corosync_nodes_state.each do |number, node|
id = node['id']
ip = node['ip']
next unless id and ip
@corosync_nodes_structure.store id, ip
end
@corosync_nodes_structure
end
def pcmk_nodes_reset
@corosync_nodes_structure = nil
@corosync_nodes_data = nil
@pacemaker_nodes_structure = nil
@node_name = nil
end
###
def change_fqdn_to_name?
begin
return false if nodes_data.keys.include? node_name
return true if nodes_data.keys.map { |fqdn| fqdn.split('.').first }.include? node_name
false
rescue
false
end
end
def change_fqdn_to_name
debug 'Changing Pacemaker node names from FQDNs to Hostnames'
nodes = {}
@resource[:nodes].each do |fqdn, ip|
name = fqdn.split('.').first
nodes.store name, ip
end
@resource[:nodes] = nodes
@resource.set_corosync_nodes
@resource.set_pacemaker_nodes
pcmk_nodes_reset
end
###
def pacemaker_nodes_structure
@pacemaker_nodes_structure = {}
node_ids.each do |name, node|
id = node['id']
next unless name and id
@pacemaker_nodes_structure.store name, id
end
@pacemaker_nodes_structure
end
def next_corosync_node_number
number = corosync_nodes_state.inject(0) do |max, node|
number = node.last['number'].to_i
max = number if number > max
max
end
number += 1
number.to_s
end
def remove_pacemaker_node(node_name)
debug "Remove pacemaker node: '#{node_name}'"
remove_pacemaker_crm_node node_name
remove_pacemaker_node_record node_name
remove_pacemaker_node_state node_name
purge_node_locations node_name
end
def add_pacemaker_node(node_name)
debug "Add pacemaker node: '#{node_name}'"
node_id = nodes_data.fetch(node_name, {}).fetch 'id', nil
fail "Could not get all the data for the new pacemaker node '#{node_name}'!" unless node_id
add_pacemaker_node_record node_name, node_id
add_pacemaker_node_state node_name, node_id
end
def remove_pacemaker_crm_node(node_name)
crm_node_safe '--force', '--remove', node_name
end
def remove_pacemaker_node_record(node_name)
cibadmin_safe '--delete', '--scope', 'nodes', '--xml-text', "<node uname='#{node_name}'/>"
end
def remove_pacemaker_node_state(node_name)
cibadmin_safe '--delete', '--scope', 'status', '--xml-text', "<node_state uname='#{node_name}'/>"
end
def remove_location_constraint(constraint_id)
cibadmin_safe '--delete', '--scope', 'constraints', '--xml-text', "<rsc_location id='#{constraint_id}'/>"
end
def add_pacemaker_node_record(node_name, node_id)
patch = <<-eos
<diff>
<diff-added>
<cib>
<configuration>
<nodes>
<node id="#{node_id}" uname="#{node_name}" __crm_diff_marker__="added:top"/>
</nodes>
</configuration>
</cib>
</diff-added>
</diff>
eos
cibadmin_safe '--patch', '--sync-call', '--xml-text', patch
end
def add_pacemaker_node_state(node_name, node_id)
patch = <<-eos
<diff>
<diff-added>
<cib>
<status>
<node_state id="#{node_id}" uname="#{node_name}" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member" __crm_diff_marker__="added:top"/>
</status>
</cib>
</diff-added>
</diff>
eos
cibadmin_safe '--patch', '--sync-call', '--xml-text', patch
end
def remove_corosync_node(node_id)
debug "Remove corosync node: '#{node_id}'"
node_number = nil
corosync_nodes_state.each do |number, node|
node_number = number if node['id'] == node_id
end
fail "Could not get node_number of node id: '#{node_id}'!" unless node_number
remove_corosync_node_record node_number
pcmk_nodes_reset
end
def remove_corosync_node_record(node_number)
begin
cmapctl_safe '-D', "nodelist.node.#{node_number}"
rescue => e
debug "Failed: #{e.message}"
end
end
def add_corosync_node(node_id)
debug "Add corosync node: '#{node_id}'"
node_ip = corosync_nodes_data.fetch node_id, nil
node_number = next_corosync_node_number
fail "Could not get all the data for the new corosync node '#{node_name}'!" unless node_id and node_ip and node_number
add_corosync_node_record node_number, node_ip, node_id
pcmk_nodes_reset
end
def add_corosync_node_record(node_number=nil, node_addr=nil, node_id=nil, ring_number=0)
cmapctl_safe '-s', "nodelist.node.#{node_number}.nodeid", 'u32', "#{node_id}"
cmapctl_safe '-s', "nodelist.node.#{node_number}.ring#{ring_number}_addr", 'str', "#{node_addr}"
end
def purge_node_locations(node_name)
debug "Call: purge all location constraints for node: '#{node_name}'"
constraint_locations.each do |constraint_id, constraint|
next unless constraint['node'] == node_name
remove_location_constraint constraint_id
end
end
#################
def corosync_nodes
debug 'Call: corosync_nodes'
wait_for_online
change_fqdn_to_name if change_fqdn_to_name?
debug "Return: #{corosync_nodes_structure.inspect}"
corosync_nodes_structure
end
def corosync_nodes=(expected_nodes)
debug "Call: corosync_nodes='#{expected_nodes.inspect}'"
existing_nodes = corosync_nodes_structure
if @resource[:remove_corosync_nodes]
existing_nodes.each do |existing_node_id, existing_node_ip|
next if expected_nodes[existing_node_id] == existing_node_ip
remove_corosync_node existing_node_id
end
end
if @resource[:add_corosync_nodes]
expected_nodes.each do |expected_node_id, expected_node_ip|
next if existing_nodes[expected_node_id] == expected_node_ip
add_corosync_node expected_node_id
end
end
end
def pacemaker_nodes
debug 'Call: pacemaker_nodes'
wait_for_online
change_fqdn_to_name if change_fqdn_to_name?
debug "Return: #{pacemaker_nodes_structure.inspect}"
pacemaker_nodes_structure
end
def pacemaker_nodes=(expected_nodes)
debug "Call: pacemaker_nodes='#{expected_nodes.inspect}'"
existing_nodes = pacemaker_nodes_structure
if @resource[:remove_pacemaker_nodes]
existing_nodes.each do |existing_node_name, existing_node_id|
next if expected_nodes[existing_node_name] == existing_node_id
remove_pacemaker_node existing_node_name
end
end
if @resource[:add_pacemaker_nodes]
expected_nodes.each do |expected_node_name, expected_node_id|
next if existing_nodes[expected_node_name] == expected_node_id
add_pacemaker_node expected_node_name
end
end
end
end

View File

@ -1,209 +0,0 @@
require File.join File.dirname(__FILE__), '../pacemaker_common.rb'
Puppet::Type.type(:service).provide :pacemaker, :parent => Puppet::Provider::Pacemaker_common do
has_feature :enableable
has_feature :refreshable
commands :uname => 'uname'
commands :pcs => 'pcs'
commands :crm_resource => 'crm_resource'
commands :crm_attribute => 'crm_attribute'
commands :cibadmin => 'cibadmin'
# hostname of the current node
# @return [String]
def hostname
return @hostname if @hostname
@hostname = (uname '-n').chomp.strip
end
# original name passed from the type
# @return [String]
def title
@resource[:name]
end
# primitive name with 'p_' added if needed
# @return [String]
def name
return @name if @name
primitive_name = title
if primitive_exists? primitive_name
Puppet.debug "Primitive with title '#{primitive_name}' was found in CIB"
@name = primitive_name
return @name
end
primitive_name = "p_#{primitive_name}"
if primitive_exists? primitive_name
Puppet.debug "Using '#{primitive_name}' name instead of '#{title}'"
@name = primitive_name
return @name
end
fail "Primitive '#{title}' was not found in CIB!"
end
# full name of the primitive
# if resource is complex use group name
# @return [String]
def full_name
return @full_name if @full_name
if primitive_is_complex? name
full_name = primitives[name]['name']
Puppet.debug "Using full name '#{full_name}' for complex primitive '#{name}'"
@full_name = full_name
else
@full_name = name
end
end
# name of the basic service without 'p_' prefix
# used to disable the basic service
# @return [String]
def basic_service_name
return @basic_service_name if @basic_service_name
if name.start_with? 'p_'
basic_service_name = name.gsub /^p_/, ''
Puppet.debug "Using '#{basic_service_name}' as the basic service name for primitive '#{name}'"
@basic_service_name = basic_service_name
else
@basic_service_name = name
end
end
# cleanup a primitive and
# wait for cleanup to finish
def cleanup
cleanup_primitive full_name, hostname
wait_for_status name
end
# called by Puppet to determine if the service
# is running on the local node
# @return [:running,:stopped]
def status
wait_for_online
Puppet.debug "Call: 'status' for Pacemaker service '#{name}' on node '#{hostname}'"
cib_reset
out = get_primitive_puppet_status name, hostname
out = :stopped unless constraint_location_exists? full_name, hostname
Puppet.debug get_cluster_debug_report
Puppet.debug "Return: '#{out}' (#{out.class})"
out
end
# called by Puppet to start the service
def start
Puppet.debug "Call 'start' for Pacemaker service '#{name}' on node '#{hostname}'"
enable unless primitive_is_managed? name
disable_basic_service
constraint_location_add full_name, hostname
unban_primitive name, hostname
start_primitive full_name
start_primitive name
cleanup
if primitive_is_multistate? name
Puppet.debug "Choose master start for Pacemaker service '#{name}'"
wait_for_master name
else
Puppet.debug "Choose global start for Pacemaker service '#{name}'"
wait_for_start name
end
end
# called by Puppet to stop the service
def stop
Puppet.debug "Call 'stop' for Pacemaker service '#{name}' on node '#{hostname}'"
enable unless primitive_is_managed? name
cleanup
if primitive_is_complex? name
Puppet.debug "Choose local stop for Pacemaker service '#{name}' on node '#{hostname}'"
ban_primitive name, hostname
wait_for_stop name, hostname
else
Puppet.debug "Choose global stop for Pacemaker service '#{name}'"
stop_primitive name
wait_for_stop name
end
end
# called by Puppet to restart the service
def restart
Puppet.debug "Call 'restart' for Pacemaker service '#{name}' on node '#{hostname}'"
unless primitive_is_running? name, hostname
Puppet.info "Pacemaker service '#{name}' is not running on node '#{hostname}'. Skipping restart!"
return
end
begin
stop
rescue
nil
ensure
start
end
end
# called by Puppet to enable the service
def enable
Puppet.debug "Call 'enable' for Pacemaker service '#{name}' on node '#{hostname}'"
manage_primitive name
end
# called by Puppet to disable the service
def disable
Puppet.debug "Call 'disable' for Pacemaker service '#{name}' on node '#{hostname}'"
unmanage_primitive name
end
alias :manual_start :disable
# called by Puppet to determine if the service is enabled
# @return [:true,:false]
def enabled?
Puppet.debug "Call 'enabled?' for Pacemaker service '#{name}' on node '#{hostname}'"
out = get_primitive_puppet_enable name
Puppet.debug "Return: '#{out}' (#{out.class})"
out
end
# create an extra provider instance to deal with the basic service
# the provider will be chosen to match the current system
# @return [Puppet::Type::Service::Provider]
def extra_provider(provider_name = nil)
return @extra_provider if @extra_provider
begin
param_hash = {}
param_hash.store :name, basic_service_name
param_hash.store :provider, provider_name if provider_name
type = Puppet::Type::Service.new param_hash
@extra_provider = type.provider
rescue => e
Puppet.warning "Could not get extra provider for Pacemaker primitive '#{name}': #{e.message}"
@extra_provider = nil
end
end
# disable and stop the basic service
def disable_basic_service
return unless extra_provider
begin
if extra_provider.enableable? and extra_provider.enabled? == :true
Puppet.info "Disable basic service '#{extra_provider.name}' using provider '#{extra_provider.class.name}'"
extra_provider.disable
else
Puppet.info "Basic service '#{extra_provider.name}' is disabled as reported by '#{extra_provider.class.name}' provider"
end
if extra_provider.status == :running
Puppet.info "Stop basic service '#{extra_provider.name}' using provider '#{extra_provider.class.name}'"
extra_provider.stop
else
Puppet.info "Basic service '#{extra_provider.name}' is stopped as reported by '#{extra_provider.class.name}' provider"
end
rescue => e
Puppet.warning "Could not disable basic service for Pacemaker primitive '#{name}' using '#{extra_provider.class.name}' provider: #{e.message}"
end
end
end

View File

@ -1,208 +0,0 @@
module Puppet
newtype(:cs_resource) do
@doc = "Type for manipulating Pacemaker/Pacemaker primitives. Primitives
are probably the most important building block when creating highly
available clusters using Pacemaker and Pacemaker. Each primitive defines
an application, ip address, or similar to monitor and maintain. These
managed primitives are maintained using what is called a resource agent.
These resource agents have a concept of class, type, and subsystem that
provides the functionality. Regretibly these pieces of vocabulary
clash with those used in Puppet so to overcome the name clashing the
property and parameter names have been qualified a bit for clarity.
More information on primitive definitions can be found at the following
link:
* http://www.clusterlabs.org/doc/en-US/Pacemaker/1.1/html/Clusters_from_Scratch/_adding_a_resource.html"
ensurable
newparam(:name) do
desc "Name identifier of primitive. This value needs to be unique
across the entire Pacemaker/Pacemaker configuration since it doesn't have
the concept of name spaces per type."
isnamevar
end
newparam(:primitive_class) do
desc "Pacemaker class of the primitive. Examples of classes are lsb or ocf.
Lsb funtions a lot like the init provider in Puppet for services, an init
script is ran periodically on each host to identify status, or to start
and stop a particular application. Ocf of the other hand is a script with
meta-data and stucture that is specific to Pacemaker and Pacemaker."
isrequired
end
newparam(:primitive_type) do
desc "Pacemaker primitive type. Type generally matches to the specific
'thing' your managing, i.e. ip address or vhost. Though, they can be
completely arbitarily named and manage any number of underlying
applications or resources."
isrequired
end
newparam(:provided_by) do
desc "Pacemaker primitive provider. All resource agents used in a primitve
have something that provides them to the system, be it the Pacemaker or
redhat plugins...they're not always obvious though so currently you're
left to understand Pacemaker enough to figure it out. Usually, if it isn't
obvious it is because there is only one provider for the resource agent.
To find the list of providers for a resource agent run the following
from the command line has Pacemaker installed:
* `crm configure ra providers <ra> <class>`"
isrequired
end
newparam(:cib) do
desc "Pacemaker applies its configuration immediately. Using a CIB allows
you to group multiple primitives and relationships to be applied at
once. This can be necessary to insert complex configurations into
Pacemaker correctly.
This paramater sets the CIB this primitive should be created in. A
cs_shadow resource with a title of the same name as this value should
also be added to your manifest."
end
# Our parameters and operations properties must be hashes.
newproperty(:parameters) do
desc "A hash of params for the primitive. Parameters in a primitive are
used by the underlying resource agent, each class using them slightly
differently. In ocf scripts they are exported and pulled into the
script as variables to be used. Since the list of these parameters
are completely arbitrary and validity not enforced we simply defer
defining a model and just accept a hash."
validate do |value|
unless value.is_a? Hash
raise Puppet::Error, "Puppet::Type::Cs_Primitive: parameters property must be a hash."
end
end
munge do |value|
stringify(value.reject{|k,v| ['', 'nil', 'undef', 'none'].include? v.to_s.downcase})
end
defaultto Hash.new
end
newproperty(:operations) do
desc "A hash of operations for the primitive. Operations defined in a
primitive are little more predictable as they are commonly things like
monitor or start and their values are in seconds. Since each resource
agent can define its own set of operations we are going to defer again
and just accept a hash. There maybe room to model this one but it
would require a review of all resource agents to see if each operation
is valid."
validate do |value|
unless value.is_a? Hash
raise Puppet::Error, "Puppet::Type::Cs_Primitive: operations property must be a hash."
end
end
munge do |value|
stringify value
end
defaultto Hash.new
end
newproperty(:metadata) do
desc "A hash of metadata for the primitive. A primitive can have a set of
metadata that doesn't affect the underlying Pacemaker type/provider but
affect that concept of a resource. This metadata is similar to Puppet's
resources resource and some meta-parameters, they change resource
behavior but have no affect of the data that is synced or manipulated."
validate do |value|
unless value.is_a? Hash
raise Puppet::Error, "Puppet::Type::Cs_Primitive: metadata property must be a hash."
end
end
munge do |value|
stringify value
end
defaultto Hash.new
def insync?(is)
status_metadata = %w(target-role is-managed)
is_without_state = is.reject do |k, v|
status_metadata.include? k.to_s
end
should_without_state = should.reject do |k, v|
status_metadata.include? k.to_s
end
is_without_state == should_without_state
end
end
newproperty(:ms_metadata) do
desc "A hash of metadata for the multistate state."
validate do |value|
unless value.is_a? Hash
raise Puppet::Error, "Puppet::Type::Cs_Primitive: ms_metadata property must be a hash"
end
end
munge do |value|
stringify value
end
defaultto Hash.new
def insync?(is)
status_metadata = %w(target-role is-managed)
is_without_state = is.reject do |k, v|
status_metadata.include? k.to_s
end
should_without_state = should.reject do |k, v|
status_metadata.include? k.to_s
end
is_without_state == should_without_state
end
end
newproperty(:complex_type) do
desc "Designates if the primitive is capable of being managed in a multistate
state. This will create a new ms or clone resource in your Pacemaker config and add
this primitive to it. Concequently Pacemaker will be helpful and update all
your colocation and order resources too but Puppet won't. Hash contains
two key-value pairs: type (master, clone) and its name (${type}_{$primitive_name})
by default"
newvalues('clone', 'master')
end
autorequire(:cs_shadow) do
autos = []
if @parameters[:cib]
Puppet.debug("#{@parameters[:cib].value}")
autos << @parameters[:cib].value
end
autos
end
autorequire(:service) do
%w(corosync pacemaker)
end
validate do
unless self[:ms_metadata].empty? or self[:complex_type]
raise Puppet::Error, 'You should not use ms_metadata if your resource is not clone or master!'
end
end
end
end
# convert data structure to strings
def stringify(data)
if data.is_a? Hash
new_data = {}
data.each do |key, value|
new_data.store stringify(key), stringify(value)
end
data.clear
data.merge! new_data
elsif data.is_a? Array
data.map! do |element|
stringify element
end
else
data.to_s
end
end

View File

@ -1,91 +0,0 @@
module Puppet
newtype(:cs_rsc_colocation) do
@doc = "Type for manipulating pacemaker/pacemaker colocation. Colocation
is the grouping together of a set of primitives so that they travel
together when one of them fails. For instance, if a web server vhost
is colocated with a specific ip address and the web server software
crashes, the ip address with migrate to the new host with the vhost.
More information on Pacemaker colocation can be found here:
* http://www.clusterlabs.org/doc/en-US/Pacemaker/1.1/html/Clusters_from_Scratch/_ensuring_resources_run_on_the_same_host.html"
ensurable
newparam(:name) do
desc "Identifier of the colocation entry. This value needs to be unique
across the entire Pacemaker/Pacemaker configuration since it doesn't have
the concept of name spaces per type."
isnamevar
end
newproperty(:primitives, :array_matching => :all) do
desc "Two Pacemaker primitives to be grouped together. Colocation groups
come in twos. Property will raise an error if
you do not provide a two value array."
def should=(value)
super
if value.is_a? Array
raise Puppet::Error, "Puppet::Type::Cs_Colocation: The primitives property must be a two value array." unless value.size == 2
@should
else
raise Puppet::Error, "Puppet::Type::Cs_Colocation: The primitives property must be a two value array."
@should
end
end
isrequired
end
newparam(:cib) do
desc "Pacemaker applies its configuration immediately. Using a CIB allows
you to group multiple primitives and relationships to be applied at
once. This can be necessary to insert complex configurations into
Pacemaker correctly.
This paramater sets the CIB this colocation should be created in. A
cs_shadow resource with a title of the same name as this value should
also be added to your manifest."
end
newproperty(:score) do
desc "The priority of this colocation. Primitives can be a part of
multiple colocation groups and so there is a way to control which
primitives get priority when forcing the move of other primitives.
This value can be an integer but is often defined as the string
INFINITY."
defaultto 'INFINITY'
validate do |value|
begin
if value !~ /^([+-]){0,1}(inf|INFINITY)$/
score = Integer(value)
end
rescue ArgumentError
raise Puppet::Error("score parameter is invalid, should be +/- INFINITY(or inf) or Integer")
end
end
isrequired
end
autorequire(:cs_shadow) do
autos = []
autos << @parameters[:cib].value if !@parameters[:cib].nil?
autos
end
autorequire(:service) do
%w(corosync pacemaker)
end
autorequire(:cs_resource) do
autos = []
@parameters[:primitives].should.each do |val|
autos << val
end
autos
end
end
end

View File

@ -1,35 +0,0 @@
module Puppet
newtype(:cs_rsc_defaults) do
@doc = "Type for manipulating pacemaker configuration rsc_defaults.
Besides the configuration file that is managed by the module the contains
all these related Pacemaker types and providers, there is a set of cluster
rsc_defaults that can be set and saved inside the CIB (A CIB being a set of
configuration that is synced across the cluster, it can be exported as XML
for processing and backup). The type is pretty simple interface for
setting key/value pairs or removing them completely. Removing them will
result in them taking on their default value.
More information on cluster properties can be found here:
* http://clusterlabs.org/doc/en-US/Pacemaker/1.1-plugin/html/Clusters_from_Scratch/ch05s03s02.html"
ensurable
newparam(:name) do
desc "Name identifier of this rsc_defaults. Simply the name of the cluster
rsc_defaults. Happily most of these are unique."
isnamevar
end
newproperty(:value) do
desc "Value of the rsc_defaults. It is expected that this will be a single
value but we aren't validating string vs. integer vs. boolean because
cluster rsc_resources can range the gambit."
end
autorequire(:service) do
[ 'pacemaker' ]
end
end
end

View File

@ -1,92 +0,0 @@
module Puppet
newtype(:cs_rsc_location) do
@doc = "Type for manipulating pacemaker rsc_location with rules.
Location is the set of rules defining the place where resource will be run.
More information on Pacemaker location can be found here:
* http://www.clusterlabs.org/doc/en-US/Pacemaker/1.1/html/Clusters_from_Scratch/_ensuring_resources_run_on_the_same_host.html"
ensurable
newparam(:name) do
desc "Identifier of the location entry. This value needs to be unique
across the entire Pacemaker/Pacemaker configuration since it doesn't have
the concept of name spaces per type."
isnamevar
end
newproperty(:primitive) do
desc "Pacemaker primitive being managed."
isrequired
end
newparam(:cib) do
desc "Pacemaker applies its configuration immediately. Using a CIB allows
you to group multiple primitives and relationships to be applied at
once. This can be necessary to insert complex configurations into
Pacemaker correctly.
This paramater sets the CIB this location should be created in. A
cs_shadow resource with a title of the same name as this value should
also be added to your manifest."
end
newproperty(:node_score) do
desc "The score for the node"
validate do |value|
begin
if value !~ /^([+-]){0,1}(inf|INFINITY)$/
score = Integer(value)
end
rescue ArgumentError
raise Puppet::Error("score parameter is invalid, should be +/- INFINITY(or inf) or Integer")
end
end
end
newproperty(:rules, :array_matching=>:all) do
desc "Specify rules for location"
munge do |rule|
rule['score'].gsub! 'inf', 'INFINITY'
convert_to_sym(rule)
end
end
newproperty(:node_name) do
desc "The node for which to apply node_score"
end
autorequire(:cs_shadow) do
rv = []
rv << @parameters[:cib].value if !@parameters[:cib].nil?
rv
end
autorequire(:service) do
%w(corosync pacemaker)
end
autorequire(:cs_resource) do
[ @parameters[:primitive].value ]
end
end
end
def convert_to_sym(hash)
if hash.is_a? Hash
hash.inject({}) do |memo,(key,value)|
value = convert_to_sym(value)
if value.is_a?(Array)
value.collect! do |arr_el|
convert_to_sym(arr_el)
end
end
memo[key.to_sym] = value
memo
end
else
hash
end
end

View File

@ -1,71 +0,0 @@
module Puppet
newtype(:cs_rsc_order) do
@doc = "Type for manipulating Pacemkaer ordering entries. Order
entries are another type of constraint that can be put on sets of
primitives but unlike colocation, order does matter. These designate
the order at which you need specific primitives to come into a desired
state before starting up a related primitive.
More information can be found at the following link:
* http://www.clusterlabs.org/doc/en-US/Pacemaker/1.1/html/Clusters_from_Scratch/_controlling_resource_start_stop_ordering.html"
ensurable
newparam(:name) do
desc "Name identifier of this ordering entry. This value needs to be unique
across the entire Pacemaker/Pacemaker configuration since it doesn't have
the concept of name spaces per type."
isnamevar
end
newproperty(:first) do
desc "First Pacemaker primitive."
end
newproperty(:second) do
desc "Second Pacemaker primitive."
end
newparam(:cib) do
desc "Pacemaker applies its configuration immediately. Using a CIB allows
you to group multiple primitives and relationships to be applied at
once. This can be necessary to insert complex configurations into
Pacemaker correctly.
This paramater sets the CIB this order should be created in. A
cs_shadow resource with a title of the same name as this value should
also be added to your manifest."
end
newproperty(:score) do
desc "The priority of the this ordered grouping. Primitives can be a part
of multiple order groups and so there is a way to control which
primitives get priority when forcing the order of state changes on
other primitives. This value can be an integer but is often defined
as the string INFINITY."
defaultto 'INFINITY'
end
autorequire(:cs_shadow) do
rv = []
rv << @parameters[:cib].value if !@parameters[:cib].nil?
rv
end
autorequire(:service) do
%w(corosync pacemaker)
end
autorequire(:cs_resource) do
autos = []
autos << @parameters[:first].should
autos << @parameters[:second].should
autos
end
end
end

View File

@ -1,123 +0,0 @@
module Puppet
newtype(:pcmk_nodes) do
desc %q(Add and remove cluster nodes)
newparam(:name) do
isnamevar
end
newparam(:debug) do
desc %q(Don't actually make changes)
defaultto false
end
newparam(:nodes) do
desc <<-eos
Nodes data structure:
{
'node-1' => { 'id' => '1', 'ip' => '192.168.0.1'},
'node-2' => { 'id' => '2', 'ip' => '192.168.0.2'},
}
eos
validate do |value|
unless value.is_a? Hash and value.any?
fail 'Nodes should be a non-empty hash!'
end
end
end
newproperty(:corosync_nodes) do
desc <<-eos
Corosync_nodes data structure:
{
# 'id' => 'ip',
'1' => '192.168.0.1',
'2' => '192.168.0.2',
}
eos
defaultto { @resource.set_corosync_nodes }
def insync?(is)
is == should
end
def is_to_s(is)
is.inspect
end
def should_to_s(should)
should.inspect
end
end
newproperty(:pacemaker_nodes) do
desc <<-eos
Pacemaker_nodes data structure:
{
# 'name' => 'id',
'node-1' => '1',
'node-2' => '2',
}
eos
defaultto { @resource.set_pacemaker_nodes }
def insync?(is)
is == should
end
def is_to_s(is)
is.inspect
end
def should_to_s(should)
should.inspect
end
end
newparam(:add_pacemaker_nodes) do
defaultto true
end
newparam(:remove_pacemaker_nodes) do
defaultto true
end
newparam(:add_corosync_nodes) do
defaultto true
end
newparam(:remove_corosync_nodes) do
defaultto true
end
def validate
fail 'No corosync_nodes!' unless self[:corosync_nodes].is_a? Hash and self[:corosync_nodes].any?
fail 'No pacemaker_nodes!' unless self[:pacemaker_nodes].is_a? Hash and self[:pacemaker_nodes].any?
end
def set_corosync_nodes
return unless self[:nodes].respond_to? :each
corosync_nodes = {}
self[:nodes].each do |name, node|
id = node['id']
ip = node['ip']
next unless id and ip
corosync_nodes.store id, ip
end
self[:corosync_nodes] = corosync_nodes
end
def set_pacemaker_nodes
return unless self[:nodes].respond_to? :each
pacemaker_nodes = {}
self[:nodes].each do |name, node|
id = node['id']
next unless id and name
pacemaker_nodes.store name, id
end
self[:pacemaker_nodes] = pacemaker_nodes
end
end
end

View File

@ -1,7 +0,0 @@
--format
s
--colour
--loadby
mtime
--backtrace
--trace

View File

@ -1 +0,0 @@
require 'puppetlabs_spec_helper/module_spec_helper'

View File

@ -1,470 +0,0 @@
<cib epoch="622" num_updates="11" admin_epoch="0" validate-with="pacemaker-1.2" crm_feature_set="3.0.7" have-quorum="1" dc-uuid="node-1" cib-last-written="Wed Nov 5 10:54:20 2014" update-origin="node-2" update-client="cibadmin">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="1.1.10-42f2063"/>
<nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="classic openais (with plugin)"/>
<nvpair id="cib-bootstrap-options-expected-quorum-votes" name="expected-quorum-votes" value="3"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/>
<nvpair id="cib-bootstrap-options-start-failure-is-fatal" name="start-failure-is-fatal" value="false"/>
<nvpair id="cib-bootstrap-options-symmetric-cluster" name="symmetric-cluster" value="false"/>
<nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1415124915"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="1" uname="node-1">
<instance_attributes id="nodes-node-1">
<nvpair id="nodes-node-1-gtid" name="gtid" value="b65eb4b3-644a-11e4-afd3-9335a5b6ec3f:80680"/>
</instance_attributes>
</node>
<node id="2" uname="node-2">
<instance_attributes id="nodes-node-2">
<nvpair id="nodes-node-2-gtid" name="gtid" value="b65eb4b3-644a-11e4-afd3-9335a5b6ec3f:80645"/>
</instance_attributes>
</node>
<node id="3" uname="node-3"/>
</nodes>
<resources>
<primitive class="ocf" id="vip__public" provider="fuel" type="ns_IPaddr2">
<instance_attributes id="vip__public-instance_attributes">
<nvpair id="vip__public-instance_attributes-nic" name="nic" value="br-ex"/>
<nvpair id="vip__public-instance_attributes-iflabel" name="iflabel" value="ka"/>
<nvpair id="vip__public-instance_attributes-iptables_comment" name="iptables_comment" value="masquerade-for-public-net"/>
<nvpair id="vip__public-instance_attributes-ns_veth" name="ns_veth" value="hapr-p"/>
<nvpair id="vip__public-instance_attributes-base_veth" name="base_veth" value="br-ex-hapr"/>
<nvpair id="vip__public-instance_attributes-cidr_netmask" name="cidr_netmask" value="24"/>
<nvpair id="vip__public-instance_attributes-gateway" name="gateway" value="link"/>
<nvpair id="vip__public-instance_attributes-iptables_stop_rules" name="iptables_stop_rules" value="iptables -t mangle -D PREROUTING -i br-ex-hapr -j MARK --set-mark 0x2a ; iptables -t nat -D POSTROUTING -m mark --mark 0x2a ! -o br-ex -j MASQUERADE"/>
<nvpair id="vip__public-instance_attributes-ns" name="ns" value="haproxy"/>
<nvpair id="vip__public-instance_attributes-iptables_start_rules" name="iptables_start_rules" value="iptables -t mangle -I PREROUTING -i br-ex-hapr -j MARK --set-mark 0x2a ; iptables -t nat -I POSTROUTING -m mark --mark 0x2a ! -o br-ex -j MASQUERADE"/>
<nvpair id="vip__public-instance_attributes-ip" name="ip" value="10.108.1.2"/>
<nvpair id="vip__public-instance_attributes-gateway_metric" name="gateway_metric" value="10"/>
</instance_attributes>
<meta_attributes id="vip__public-meta_attributes">
<nvpair id="vip__public-meta_attributes-migration-threshold" name="migration-threshold" value="3"/>
<nvpair id="vip__public-meta_attributes-failure-timeout" name="failure-timeout" value="60"/>
<nvpair id="vip__public-meta_attributes-resource-stickiness" name="resource-stickiness" value="1"/>
</meta_attributes>
<operations>
<op id="vip__public-monitor-3" interval="3" name="monitor" timeout="30"/>
<op id="vip__public-start-0" interval="0" name="start" timeout="30"/>
<op id="vip__public-stop-0" interval="0" name="stop" timeout="30"/>
</operations>
</primitive>
<clone id="clone_ping_vip__public">
<primitive class="ocf" id="ping_vip__public" provider="pacemaker" type="ping">
<instance_attributes id="ping_vip__public-instance_attributes">
<nvpair id="ping_vip__public-instance_attributes-dampen" name="dampen" value="30s"/>
<nvpair id="ping_vip__public-instance_attributes-timeout" name="timeout" value="3s"/>
<nvpair id="ping_vip__public-instance_attributes-multiplier" name="multiplier" value="1000"/>
<nvpair id="ping_vip__public-instance_attributes-host_list" name="host_list" value="10.108.1.1"/>
</instance_attributes>
<operations>
<op id="ping_vip__public-monitor-20" interval="20" name="monitor" timeout="30"/>
</operations>
</primitive>
</clone>
<primitive class="ocf" id="vip__management" provider="fuel" type="ns_IPaddr2">
<instance_attributes id="vip__management-instance_attributes">
<nvpair id="vip__management-instance_attributes-nic" name="nic" value="br-mgmt"/>
<nvpair id="vip__management-instance_attributes-iflabel" name="iflabel" value="ka"/>
<nvpair id="vip__management-instance_attributes-iptables_comment" name="iptables_comment" value="masquerade-for-management-net"/>
<nvpair id="vip__management-instance_attributes-ns_veth" name="ns_veth" value="hapr-m"/>
<nvpair id="vip__management-instance_attributes-base_veth" name="base_veth" value="br-mgmt-hapr"/>
<nvpair id="vip__management-instance_attributes-cidr_netmask" name="cidr_netmask" value="24"/>
<nvpair id="vip__management-instance_attributes-gateway" name="gateway" value="link"/>
<nvpair id="vip__management-instance_attributes-iptables_stop_rules" name="iptables_stop_rules" value="iptables -t mangle -D PREROUTING -i br-mgmt-hapr -j MARK --set-mark 0x2b ; iptables -t nat -D POSTROUTING -m mark --mark 0x2b ! -o br-mgmt -j MASQUERADE"/>
<nvpair id="vip__management-instance_attributes-ns" name="ns" value="haproxy"/>
<nvpair id="vip__management-instance_attributes-iptables_start_rules" name="iptables_start_rules" value="iptables -t mangle -I PREROUTING -i br-mgmt-hapr -j MARK --set-mark 0x2b ; iptables -t nat -I POSTROUTING -m mark --mark 0x2b ! -o br-mgmt -j MASQUERADE"/>
<nvpair id="vip__management-instance_attributes-ip" name="ip" value="10.108.2.2"/>
<nvpair id="vip__management-instance_attributes-gateway_metric" name="gateway_metric" value="20"/>
</instance_attributes>
<meta_attributes id="vip__management-meta_attributes">
<nvpair id="vip__management-meta_attributes-migration-threshold" name="migration-threshold" value="3"/>
<nvpair id="vip__management-meta_attributes-failure-timeout" name="failure-timeout" value="60"/>
<nvpair id="vip__management-meta_attributes-resource-stickiness" name="resource-stickiness" value="1"/>
</meta_attributes>
<operations>
<op id="vip__management-monitor-3" interval="3" name="monitor" timeout="30"/>
<op id="vip__management-start-0" interval="0" name="start" timeout="30"/>
<op id="vip__management-stop-0" interval="0" name="stop" timeout="30"/>
</operations>
</primitive>
<master id="master_p_rabbitmq-server">
<meta_attributes id="master_p_rabbitmq-server-meta_attributes">
<nvpair id="master_p_rabbitmq-server-meta_attributes-notify" name="notify" value="true"/>
<nvpair id="master_p_rabbitmq-server-meta_attributes-master-node-max" name="master-node-max" value="1"/>
<nvpair id="master_p_rabbitmq-server-meta_attributes-ordered" name="ordered" value="false"/>
<nvpair id="master_p_rabbitmq-server-meta_attributes-target-role" name="target-role" value="Master"/>
<nvpair id="master_p_rabbitmq-server-meta_attributes-master-max" name="master-max" value="1"/>
<nvpair id="master_p_rabbitmq-server-meta_attributes-interleave" name="interleave" value="true"/>
</meta_attributes>
<primitive class="ocf" id="p_rabbitmq-server" provider="fuel" type="rabbitmq-server">
<instance_attributes id="p_rabbitmq-server-instance_attributes">
<nvpair id="p_rabbitmq-server-instance_attributes-node_port" name="node_port" value="5673"/>
</instance_attributes>
<meta_attributes id="p_rabbitmq-server-meta_attributes">
<nvpair id="p_rabbitmq-server-meta_attributes-migration-threshold" name="migration-threshold" value="INFINITY"/>
<nvpair id="p_rabbitmq-server-meta_attributes-failure-timeout" name="failure-timeout" value="60s"/>
</meta_attributes>
<operations>
<op id="p_rabbitmq-server-promote-0" interval="0" name="promote" timeout="120"/>
<op id="p_rabbitmq-server-monitor-30" interval="30" name="monitor" timeout="60"/>
<op id="p_rabbitmq-server-start-0" interval="0" name="start" timeout="120"/>
<op id="p_rabbitmq-server-monitor-27" interval="27" name="monitor" role="Master" timeout="60"/>
<op id="p_rabbitmq-server-stop-0" interval="0" name="stop" timeout="60"/>
<op id="p_rabbitmq-server-notify-0" interval="0" name="notify" timeout="60"/>
<op id="p_rabbitmq-server-demote-0" interval="0" name="demote" timeout="60"/>
</operations>
</primitive>
</master>
<clone id="clone_p_neutron-plugin-openvswitch-agent">
<meta_attributes id="clone_p_neutron-plugin-openvswitch-agent-meta_attributes">
<nvpair id="clone_p_neutron-plugin-openvswitch-agent-meta_attributes-interleave" name="interleave" value="true"/>
</meta_attributes>
<primitive class="ocf" id="p_neutron-plugin-openvswitch-agent" provider="fuel" type="neutron-agent-ovs">
<instance_attributes id="p_neutron-plugin-openvswitch-agent-instance_attributes">
<nvpair id="p_neutron-plugin-openvswitch-agent-instance_attributes-plugin_config" name="plugin_config" value="/etc/neutron/plugin.ini"/>
</instance_attributes>
<operations>
<op id="p_neutron-plugin-openvswitch-agent-monitor-20" interval="20" name="monitor" timeout="10"/>
<op id="p_neutron-plugin-openvswitch-agent-start-0" interval="0" name="start" timeout="80"/>
<op id="p_neutron-plugin-openvswitch-agent-stop-0" interval="0" name="stop" timeout="80"/>
</operations>
</primitive>
</clone>
<primitive class="ocf" id="p_neutron-dhcp-agent" provider="fuel" type="neutron-agent-dhcp">
<meta_attributes id="p_neutron-dhcp-agent-meta_attributes">
<nvpair id="p_neutron-dhcp-agent-meta_attributes-resource-stickiness" name="resource-stickiness" value="1"/>
</meta_attributes>
<operations>
<op id="p_neutron-dhcp-agent-monitor-20" interval="20" name="monitor" timeout="10"/>
<op id="p_neutron-dhcp-agent-start-0" interval="0" name="start" timeout="60"/>
<op id="p_neutron-dhcp-agent-stop-0" interval="0" name="stop" timeout="60"/>
</operations>
</primitive>
<primitive id="p_heat-engine" class="ocf" provider="fuel" type="heat-engine">
<meta_attributes id="p_heat-engine-meta_attributes">
<nvpair id="p_heat-engine-meta_attributes-resource-stickiness" name="resource-stickiness" value="1"/>
</meta_attributes>
<operations>
<op id="p_heat-engine-monitor-20" interval="20" name="monitor" timeout="30"/>
<op id="p_heat-engine-start-0" interval="0" name="start" timeout="60"/>
<op id="p_heat-engine-stop-0" interval="0" name="stop" timeout="60"/>
</operations>
</primitive>
<clone id="clone_p_neutron-metadata-agent">
<meta_attributes id="clone_p_neutron-metadata-agent-meta_attributes">
<nvpair id="clone_p_neutron-metadata-agent-meta_attributes-interleave" name="interleave" value="true"/>
</meta_attributes>
<primitive class="ocf" id="p_neutron-metadata-agent" provider="fuel" type="neutron-agent-metadata">
<operations>
<op id="p_neutron-metadata-agent-monitor-60" interval="60" name="monitor" timeout="10"/>
<op id="p_neutron-metadata-agent-start-0" interval="0" name="start" timeout="30"/>
<op id="p_neutron-metadata-agent-stop-0" interval="0" name="stop" timeout="30"/>
</operations>
</primitive>
</clone>
<clone id="clone_p_neutron-l3-agent">
<meta_attributes id="clone_p_neutron-l3-agent-meta_attributes">
<nvpair id="clone_p_neutron-l3-agent-meta_attributes-interleave" name="interleave" value="true"/>
</meta_attributes>
<primitive class="ocf" id="p_neutron-l3-agent" provider="fuel" type="neutron-agent-l3">
<instance_attributes id="p_neutron-l3-agent-instance_attributes">
<nvpair id="p_neutron-l3-agent-instance_attributes-syslog" name="syslog" value="true"/>
<nvpair id="p_neutron-l3-agent-instance_attributes-plugin_config" name="plugin_config" value="/etc/neutron/l3_agent.ini"/>
<nvpair id="p_neutron-l3-agent-instance_attributes-debug" name="debug" value="true"/>
</instance_attributes>
<operations>
<op id="p_neutron-l3-agent-monitor-20" interval="20" name="monitor" timeout="10"/>
<op id="p_neutron-l3-agent-start-0" interval="0" name="start" timeout="60"/>
<op id="p_neutron-l3-agent-stop-0" interval="0" name="stop" timeout="60"/>
</operations>
</primitive>
</clone>
<clone id="clone_p_mysql">
<primitive class="ocf" id="p_mysql" provider="fuel" type="mysql-wss">
<instance_attributes id="p_mysql-instance_attributes">
<nvpair id="p_mysql-instance_attributes-socket" name="socket" value="/var/run/mysqld/mysqld.sock"/>
<nvpair id="p_mysql-instance_attributes-test_passwd" name="test_passwd" value="password"/>
<nvpair id="p_mysql-instance_attributes-test_user" name="test_user" value="wsrep_sst"/>
</instance_attributes>
<operations>
<op id="p_mysql-monitor-120" interval="120" name="monitor" timeout="115"/>
<op id="p_mysql-start-0" interval="0" name="start" timeout="475"/>
<op id="p_mysql-stop-0" interval="0" name="stop" timeout="175"/>
</operations>
</primitive>
<meta_attributes id="clone_p_mysql-meta_attributes">
<nvpair id="clone_p_mysql-meta_attributes-target-role" name="target-role" value="Started"/>
</meta_attributes>
</clone>
<primitive class="ocf" id="p_ceilometer-alarm-evaluator" provider="fuel" type="ceilometer-alarm-evaluator">
<instance_attributes id="p_ceilometer-alarm-evaluator-instance_attributes">
<nvpair id="p_ceilometer-alarm-evaluator-instance_attributes-user" name="user" value="ceilometer"/>
</instance_attributes>
<meta_attributes id="p_ceilometer-alarm-evaluator-meta_attributes">
<nvpair id="p_ceilometer-alarm-evaluator-meta_attributes-target-role" name="target-role" value="stopped"/>
</meta_attributes>
<operations>
<op id="p_ceilometer-alarm-evaluator-monitor-20" interval="20" name="monitor" timeout="30"/>
<op id="p_ceilometer-alarm-evaluator-start-0" interval="0" name="start" timeout="360"/>
<op id="p_ceilometer-alarm-evaluator-stop-0" interval="0" name="stop" timeout="360"/>
</operations>
</primitive>
<primitive class="ocf" id="p_ceilometer-agent-central" provider="fuel" type="ceilometer-agent-central">
<instance_attributes id="p_ceilometer-agent-central-instance_attributes">
<nvpair id="p_ceilometer-agent-central-instance_attributes-user" name="user" value="ceilometer"/>
</instance_attributes>
<meta_attributes id="p_ceilometer-agent-central-meta_attributes">
<nvpair id="p_ceilometer-agent-central-meta_attributes-target-role" name="target-role" value="stopped"/>
<nvpair id="p_ceilometer-agent-central-meta_attributes-resource-stickiness" name="resource-stickiness" value="1"/>
</meta_attributes>
<operations>
<op id="p_ceilometer-agent-central-monitor-20" interval="20" name="monitor" timeout="30"/>
<op id="p_ceilometer-agent-central-start-0" interval="0" name="start" timeout="360"/>
<op id="p_ceilometer-agent-central-stop-0" interval="0" name="stop" timeout="360"/>
</operations>
</primitive>
<clone id="clone_p_haproxy">
<meta_attributes id="clone_p_haproxy-meta_attributes">
<nvpair id="clone_p_haproxy-meta_attributes-interleave" name="interleave" value="true"/>
</meta_attributes>
<primitive class="ocf" id="p_haproxy" provider="fuel" type="ns_haproxy">
<instance_attributes id="p_haproxy-instance_attributes">
<nvpair id="p_haproxy-instance_attributes-ns" name="ns" value="haproxy"/>
</instance_attributes>
<meta_attributes id="p_haproxy-meta_attributes">
<nvpair id="p_haproxy-meta_attributes-migration-threshold" name="migration-threshold" value="3"/>
<nvpair id="p_haproxy-meta_attributes-failure-timeout" name="failure-timeout" value="120"/>
</meta_attributes>
<operations>
<op id="p_haproxy-monitor-20" interval="20" name="monitor" timeout="10"/>
<op id="p_haproxy-start-0" interval="0" name="start" timeout="30"/>
<op id="p_haproxy-stop-0" interval="0" name="stop" timeout="30"/>
</operations>
</primitive>
</clone>
</resources>
<constraints>
<rsc_location id="loc_ping_vip__public" rsc="vip__public">
<rule boolean-op="or" id="loc_ping_vip__public-rule" score="-INFINITY">
<expression attribute="pingd" id="loc_ping_vip__public-expression" operation="not_defined"/>
<expression attribute="pingd" id="loc_ping_vip__public-expression-0" operation="lte" value="0"/>
</rule>
</rsc_location>
<rsc_location id="clone_ping_vip__public_on_node-1" node="node-1" rsc="clone_ping_vip__public" score="100"/>
<rsc_location id="vip__management_on_node-1" node="node-1" rsc="vip__management" score="100"/>
<rsc_colocation id="p_neutron-dhcp-agent-with-clone_p_neutron-plugin-openvswitch-agent" rsc="p_neutron-dhcp-agent" score="INFINITY" with-rsc="clone_p_neutron-plugin-openvswitch-agent"/>
<rsc_order first="clone_p_neutron-plugin-openvswitch-agent" id="p_neutron-dhcp-agent-after-clone_p_neutron-plugin-openvswitch-agent" score="INFINITY" then="p_neutron-dhcp-agent"/>
<rsc_location id="master_p_rabbitmq-server_on_node-1" node="node-1" rsc="master_p_rabbitmq-server" score="100"/>
<rsc_colocation id="vip_management-with-haproxy" rsc="vip__management" score="INFINITY" with-rsc="clone_p_haproxy"/>
<rsc_colocation id="vip_public-with-haproxy" rsc="vip__public" score="INFINITY" with-rsc="clone_p_haproxy"/>
<rsc_location id="p_neutron-dhcp-agent_on_node-1" node="node-1" rsc="p_neutron-dhcp-agent" score="100"/>
<rsc_location id="clone_p_neutron-l3-agent_on_node-1" node="node-1" rsc="clone_p_neutron-l3-agent" score="100"/>
<rsc_location id="clone_p_neutron-metadata-agent_on_node-1" node="node-1" rsc="clone_p_neutron-metadata-agent" score="100"/>
<rsc_location id="vip__public_on_node-1" node="node-1" rsc="vip__public" score="100"/>
<rsc_location id="clone_p_mysql_on_node-1" node="node-1" rsc="clone_p_mysql" score="100"/>
<rsc_location id="clone_p_haproxy_on_node-1" node="node-1" rsc="clone_p_haproxy" score="100"/>
<rsc_location id="clone_p_neutron-plugin-openvswitch-agent_on_node-1" node="node-1" rsc="clone_p_neutron-plugin-openvswitch-agent" score="100"/>
<rsc_location id="p_heat-engine_on_node-1" node="node-1" rsc="p_heat-engine" score="100"/>
<rsc_location id="vip__public_on_node-3" node="node-3" rsc="vip__public" score="100"/>
<rsc_location id="vip__public_on_node-2" node="node-2" rsc="vip__public" score="100"/>
<rsc_location id="clone_ping_vip__public_on_node-3" node="node-3" rsc="clone_ping_vip__public" score="100"/>
<rsc_location id="clone_ping_vip__public_on_node-2" node="node-2" rsc="clone_ping_vip__public" score="100"/>
<rsc_location id="vip__management_on_node-3" node="node-3" rsc="vip__management" score="100"/>
<rsc_location id="vip__management_on_node-2" node="node-2" rsc="vip__management" score="100"/>
<rsc_location id="clone_p_mysql_on_node-2" node="node-2" rsc="clone_p_mysql" score="100"/>
<rsc_location id="master_p_rabbitmq-server_on_node-2" node="node-2" rsc="master_p_rabbitmq-server" score="100"/>
<rsc_location id="clone_p_haproxy_on_node-2" node="node-2" rsc="clone_p_haproxy" score="100"/>
</constraints>
</configuration>
<status>
<node_state id="1" uname="node-1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="do_state_transition">
<transient_attributes id="node-1">
<instance_attributes id="status-node-1">
<nvpair id="status-node-1-probe_complete" name="probe_complete" value="true"/>
<nvpair id="status-node-1-pingd" name="pingd" value="1000"/>
<nvpair id="status-node-1-master-p_rabbitmq-server" name="master-p_rabbitmq-server" value="1000"/>
<nvpair id="status-node-1-fail-count-p_neutron-dhcp-agent" name="fail-count-p_neutron-dhcp-agent" value="5"/>
<nvpair id="status-node-1-last-failure-p_neutron-dhcp-agent" name="last-failure-p_neutron-dhcp-agent" value="1415184394"/>
<nvpair id="status-node-1-fail-count-p_heat-engine" name="fail-count-p_heat-engine" value="5"/>
<nvpair id="status-node-1-last-failure-p_heat-engine" name="last-failure-p_heat-engine" value="1415184394"/>
<nvpair id="status-node-1-rabbit-master" name="rabbit-master" value="true"/>
<nvpair id="status-node-1-rabbit-start-time" name="rabbit-start-time" value="1415184397"/>
</instance_attributes>
</transient_attributes>
<lrm id="1">
<lrm_resources>
<lrm_resource id="p_neutron-plugin-openvswitch-agent" type="neutron-agent-ovs" class="ocf" provider="fuel">
<lrm_rsc_op id="p_neutron-plugin-openvswitch-agent_last_failure_0" operation_key="p_neutron-plugin-openvswitch-agent_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="14:94:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;14:94:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="269" rc-code="0" op-status="0" interval="0" last-run="1415124890" last-rc-change="1415124890" exec-time="17" queue-time="0" op-digest="424153f43b0852bb5bccdf71f34784d4"/>
<lrm_rsc_op id="p_neutron-plugin-openvswitch-agent_monitor_20000" operation_key="p_neutron-plugin-openvswitch-agent_monitor_20000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="56:95:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;56:95:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="272" rc-code="0" op-status="0" interval="20000" last-rc-change="1415124890" exec-time="18" queue-time="0" op-digest="cb8604dd2f221b4ea8d2b0670feb8819"/>
</lrm_resource>
<lrm_resource id="p_ceilometer-alarm-evaluator" type="ceilometer-alarm-evaluator" class="ocf" provider="fuel">
<lrm_rsc_op id="p_ceilometer-alarm-evaluator_last_0" operation_key="p_ceilometer-alarm-evaluator_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="8:32:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:7;8:32:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="102" rc-code="7" op-status="0" interval="0" last-run="1415123313" last-rc-change="1415123313" exec-time="9" queue-time="0" op-digest="0af7dfa5435ca11d7d668ed679515e13"/>
</lrm_resource>
<lrm_resource id="p_heat-engine" type="heat-engine" class="ocf" provider="fuel">
<lrm_rsc_op id="p_heat-engine_last_0" operation_key="p_heat-engine_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="62:648:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;62:648:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="456" rc-code="0" op-status="0" interval="0" last-run="1415184593" last-rc-change="1415184593" exec-time="8162" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
<lrm_rsc_op id="p_heat-engine_last_failure_0" operation_key="p_heat-engine_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="60:626:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="2:1;60:626:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="387" rc-code="1" op-status="2" interval="0" last-run="1415184334" last-rc-change="1415184334" exec-time="60001" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
<lrm_rsc_op id="p_heat-engine_monitor_20000" operation_key="p_heat-engine_monitor_20000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="64:649:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;64:649:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="459" rc-code="0" op-status="0" interval="20000" last-rc-change="1415184601" exec-time="20" queue-time="0" op-digest="02a5bcf940fc8d3239701acb11438d6a"/>
</lrm_resource>
<lrm_resource id="p_ceilometer-agent-central" type="ceilometer-agent-central" class="ocf" provider="fuel">
<lrm_rsc_op id="p_ceilometer-agent-central_last_0" operation_key="p_ceilometer-agent-central_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="8:33:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:7;8:33:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="107" rc-code="7" op-status="0" interval="0" last-run="1415123321" last-rc-change="1415123321" exec-time="9" queue-time="0" op-digest="0af7dfa5435ca11d7d668ed679515e13"/>
</lrm_resource>
<lrm_resource id="vip__management" type="ns_IPaddr2" class="ocf" provider="fuel">
<lrm_rsc_op id="vip__management_last_0" operation_key="vip__management_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="15:42:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;15:42:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="146" rc-code="0" op-status="0" interval="0" last-run="1415123419" last-rc-change="1415123419" exec-time="1225" queue-time="0" op-digest="88dd3ef5610eee85fdb12cf6731c0720"/>
<lrm_rsc_op id="vip__management_monitor_3000" operation_key="vip__management_monitor_3000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="16:42:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;16:42:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="158" rc-code="0" op-status="0" interval="3000" last-rc-change="1415123420" exec-time="2077" queue-time="0" op-digest="7e62e225befb35aa4e6bf834801d9954"/>
</lrm_resource>
<lrm_resource id="ping_vip__public" type="ping" class="ocf" provider="pacemaker">
<lrm_rsc_op id="ping_vip__public_last_0" operation_key="ping_vip__public_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="4:11:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;4:11:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="25" rc-code="0" op-status="0" interval="0" last-run="1415123088" last-rc-change="1415123088" exec-time="2009" queue-time="0" op-digest="606e53800773938f91e1c261bb4c725c"/>
<lrm_rsc_op id="ping_vip__public_monitor_20000" operation_key="ping_vip__public_monitor_20000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="5:11:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;5:11:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="28" rc-code="0" op-status="0" interval="20000" last-rc-change="1415123090" exec-time="2009" queue-time="0" op-digest="873bd0812a107843932917e76b49de81"/>
</lrm_resource>
<lrm_resource id="p_neutron-l3-agent" type="neutron-agent-l3" class="ocf" provider="fuel">
<lrm_rsc_op id="p_neutron-l3-agent_last_0" operation_key="p_neutron-l3-agent_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="72:101:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;72:101:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="311" rc-code="0" op-status="0" interval="0" last-run="1415124912" last-rc-change="1415124912" exec-time="1303" queue-time="0" op-digest="8c1166c16a3eccc28a06071394828564"/>
<lrm_rsc_op id="p_neutron-l3-agent_last_failure_0" operation_key="p_neutron-l3-agent_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="14:98:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;14:98:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="292" rc-code="0" op-status="0" interval="0" last-run="1415124901" last-rc-change="1415124901" exec-time="25" queue-time="0" op-digest="8c1166c16a3eccc28a06071394828564"/>
<lrm_rsc_op id="p_neutron-l3-agent_monitor_20000" operation_key="p_neutron-l3-agent_monitor_20000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="72:102:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;72:102:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="317" rc-code="0" op-status="0" interval="20000" last-rc-change="1415124913" exec-time="49" queue-time="0" op-digest="c5692788618f2567fcca9a9865023acd"/>
</lrm_resource>
<lrm_resource id="p_neutron-metadata-agent" type="neutron-agent-metadata" class="ocf" provider="fuel">
<lrm_rsc_op id="p_neutron-metadata-agent_last_failure_0" operation_key="p_neutron-metadata-agent_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="13:101:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;13:101:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="309" rc-code="0" op-status="0" interval="0" last-run="1415124912" last-rc-change="1415124912" exec-time="18" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
<lrm_rsc_op id="p_neutron-metadata-agent_monitor_60000" operation_key="p_neutron-metadata-agent_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="65:102:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;65:102:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="315" rc-code="0" op-status="0" interval="60000" last-rc-change="1415124913" exec-time="42" queue-time="0" op-digest="19240b0a103493c96459e91c1a816b50"/>
</lrm_resource>
<lrm_resource id="p_mysql" type="mysql-wss" class="ocf" provider="fuel">
<lrm_rsc_op id="p_mysql_last_0" operation_key="p_mysql_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="76:617:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;76:617:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="337" rc-code="0" op-status="0" interval="0" last-run="1415184199" last-rc-change="1415184199" exec-time="12137" queue-time="0" op-digest="af83dba8adcaf0e62865958e2b4993b7"/>
<lrm_rsc_op id="p_mysql_last_failure_0" operation_key="p_mysql_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="14:70:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;14:70:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="221" rc-code="0" op-status="0" interval="0" last-run="1415123879" last-rc-change="1415123879" exec-time="70" queue-time="0" op-digest="af83dba8adcaf0e62865958e2b4993b7"/>
<lrm_rsc_op id="p_mysql_monitor_120000" operation_key="p_mysql_monitor_120000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="78:618:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;78:618:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="340" rc-code="0" op-status="0" interval="120000" last-rc-change="1415184211" exec-time="57" queue-time="0" op-digest="a8b28e947b0d74f7953d47c509353648"/>
</lrm_resource>
<lrm_resource id="p_neutron-dhcp-agent" type="neutron-agent-dhcp" class="ocf" provider="fuel">
<lrm_rsc_op id="p_neutron-dhcp-agent_last_0" operation_key="p_neutron-dhcp-agent_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="59:630:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;59:630:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="414" rc-code="0" op-status="0" interval="0" last-run="1415184400" last-rc-change="1415184400" exec-time="4640" queue-time="0" op-digest="1b732a7fb883e1f6a419959b2aa29f92"/>
<lrm_rsc_op id="p_neutron-dhcp-agent_last_failure_0" operation_key="p_neutron-dhcp-agent_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="58:626:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="2:1;58:626:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="385" rc-code="1" op-status="2" interval="0" last-run="1415184334" last-rc-change="1415184334" exec-time="60002" queue-time="0" op-digest="1b732a7fb883e1f6a419959b2aa29f92"/>
<lrm_rsc_op id="p_neutron-dhcp-agent_monitor_20000" operation_key="p_neutron-dhcp-agent_monitor_20000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="60:630:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;60:630:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="424" rc-code="0" op-status="0" interval="20000" last-rc-change="1415184405" exec-time="35" queue-time="0" op-digest="1544ce43fe39b90e744ce887e1e691d6"/>
</lrm_resource>
<lrm_resource id="vip__public" type="ns_IPaddr2" class="ocf" provider="fuel">
<lrm_rsc_op id="vip__public_last_0" operation_key="vip__public_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="7:42:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;7:42:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="144" rc-code="0" op-status="0" interval="0" last-run="1415123419" last-rc-change="1415123419" exec-time="1196" queue-time="0" op-digest="c59062bf796f251f178e8646ea654950"/>
<lrm_rsc_op id="vip__public_monitor_3000" operation_key="vip__public_monitor_3000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="8:42:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;8:42:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="155" rc-code="0" op-status="0" interval="3000" last-rc-change="1415123420" exec-time="2085" queue-time="0" op-digest="11d7dbf9a56a20ff8834890f633710aa"/>
</lrm_resource>
<lrm_resource id="p_haproxy" type="ns_haproxy" class="ocf" provider="fuel">
<lrm_rsc_op id="p_haproxy_last_0" operation_key="p_haproxy_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="84:636:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;84:636:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="445" rc-code="0" op-status="0" interval="0" last-run="1415184446" last-rc-change="1415184446" exec-time="144" queue-time="0" op-digest="2a23892614b6b1d0f70ca66b073b5bc0"/>
<lrm_rsc_op id="p_haproxy_monitor_20000" operation_key="p_haproxy_monitor_20000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="10:636:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;10:636:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="448" rc-code="0" op-status="0" interval="20000" last-rc-change="1415184446" exec-time="53" queue-time="0" op-digest="3513c6578b2be63b3c075d885eb6ac8d"/>
</lrm_resource>
<lrm_resource id="p_rabbitmq-server" type="rabbitmq-server" class="ocf" provider="fuel">
<lrm_rsc_op id="p_rabbitmq-server_last_0" operation_key="p_rabbitmq-server_promote_0" operation="promote" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="28:629:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;28:629:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="404" rc-code="0" op-status="0" interval="0" last-run="1415184394" last-rc-change="1415184394" exec-time="3654" queue-time="0" op-digest="6f1cd990340e90d62b7efecdec17ba24"/>
<lrm_rsc_op id="p_rabbitmq-server_monitor_27000" operation_key="p_rabbitmq-server_monitor_27000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="28:630:8:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:8;28:630:8:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="412" rc-code="8" op-status="0" interval="27000" last-rc-change="1415184401" exec-time="1103" queue-time="0" op-digest="f81fdd633d61ff45f1dfcce00be7c955"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<node_state id="2" uname="node-2" crmd="online" crm-debug-origin="do_update_resource" in_ccm="true" join="member" expected="member">
<transient_attributes id="node-2">
<instance_attributes id="status-node-2">
<nvpair id="status-node-2-probe_complete" name="probe_complete" value="true"/>
<nvpair id="status-node-2-master-p_rabbitmq-server" name="master-p_rabbitmq-server" value="1"/>
<nvpair id="status-node-2-pingd" name="pingd" value="1000"/>
<nvpair id="status-node-2-rabbit-start-time" name="rabbit-start-time" value="1415184812"/>
</instance_attributes>
</transient_attributes>
<lrm id="2">
<lrm_resources>
<lrm_resource id="p_neutron-plugin-openvswitch-agent" type="neutron-agent-ovs" class="ocf" provider="fuel">
<lrm_rsc_op id="p_neutron-plugin-openvswitch-agent_last_0" operation_key="p_neutron-plugin-openvswitch-agent_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="16:94:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:7;16:94:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="141" rc-code="7" op-status="0" interval="0" last-run="1415124890" last-rc-change="1415124890" exec-time="29" queue-time="0" op-digest="424153f43b0852bb5bccdf71f34784d4"/>
</lrm_resource>
<lrm_resource id="p_ceilometer-alarm-evaluator" type="ceilometer-alarm-evaluator" class="ocf" provider="fuel">
<lrm_rsc_op id="p_ceilometer-alarm-evaluator_last_failure_0" operation_key="p_ceilometer-alarm-evaluator_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="26:59:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:5;26:59:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="51" rc-code="5" op-status="0" interval="0" last-run="1415123701" last-rc-change="1415123701" exec-time="1" queue-time="0" op-digest="0af7dfa5435ca11d7d668ed679515e13"/>
</lrm_resource>
<lrm_resource id="p_heat-engine" type="heat-engine" class="ocf" provider="fuel">
<lrm_rsc_op id="p_heat-engine_last_0" operation_key="p_heat-engine_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="16:103:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:7;16:103:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="174" rc-code="7" op-status="0" interval="0" last-run="1415124915" last-rc-change="1415124915" exec-time="11" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
<lrm_resource id="p_ceilometer-agent-central" type="ceilometer-agent-central" class="ocf" provider="fuel">
<lrm_rsc_op id="p_ceilometer-agent-central_last_failure_0" operation_key="p_ceilometer-agent-central_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="27:59:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:5;27:59:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="55" rc-code="5" op-status="0" interval="0" last-run="1415123701" last-rc-change="1415123701" exec-time="0" queue-time="0" op-digest="0af7dfa5435ca11d7d668ed679515e13"/>
</lrm_resource>
<lrm_resource id="vip__management" type="ns_IPaddr2" class="ocf" provider="fuel">
<lrm_rsc_op id="vip__management_last_0" operation_key="vip__management_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="18:59:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:7;18:59:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="14" rc-code="7" op-status="0" interval="0" last-run="1415123701" last-rc-change="1415123701" exec-time="9" queue-time="0" op-digest="88dd3ef5610eee85fdb12cf6731c0720"/>
</lrm_resource>
<lrm_resource id="ping_vip__public" type="ping" class="ocf" provider="pacemaker">
<lrm_rsc_op id="ping_vip__public_last_0" operation_key="ping_vip__public_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="24:654:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;24:654:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="177" rc-code="0" op-status="0" interval="0" last-run="1415184705" last-rc-change="1415184705" exec-time="2012" queue-time="0" op-digest="606e53800773938f91e1c261bb4c725c"/>
<lrm_rsc_op id="ping_vip__public_monitor_20000" operation_key="ping_vip__public_monitor_20000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="25:654:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;25:654:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="180" rc-code="0" op-status="0" interval="20000" last-rc-change="1415184707" exec-time="2010" queue-time="0" op-digest="873bd0812a107843932917e76b49de81"/>
</lrm_resource>
<lrm_resource id="p_neutron-l3-agent" type="neutron-agent-l3" class="ocf" provider="fuel">
<lrm_rsc_op id="p_neutron-l3-agent_last_0" operation_key="p_neutron-l3-agent_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="73:99:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;73:99:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="159" rc-code="0" op-status="0" interval="0" last-run="1415124901" last-rc-change="1415124901" exec-time="4289" queue-time="0" op-digest="8c1166c16a3eccc28a06071394828564"/>
<lrm_rsc_op id="p_neutron-l3-agent_last_failure_0" operation_key="p_neutron-l3-agent_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="16:98:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;16:98:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="156" rc-code="0" op-status="0" interval="0" last-run="1415124901" last-rc-change="1415124901" exec-time="63" queue-time="0" op-digest="8c1166c16a3eccc28a06071394828564"/>
</lrm_resource>
<lrm_resource id="p_neutron-metadata-agent" type="neutron-agent-metadata" class="ocf" provider="fuel">
<lrm_rsc_op id="p_neutron-metadata-agent_last_0" operation_key="p_neutron-metadata-agent_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="15:101:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:7;15:101:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="167" rc-code="7" op-status="0" interval="0" last-run="1415124912" last-rc-change="1415124912" exec-time="30" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
<lrm_resource id="p_mysql" type="mysql-wss" class="ocf" provider="fuel">
<lrm_rsc_op id="p_mysql_last_0" operation_key="p_mysql_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="85:659:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;85:659:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="183" rc-code="0" op-status="0" interval="0" last-run="1415184750" last-rc-change="1415184750" exec-time="34103" queue-time="1" op-digest="af83dba8adcaf0e62865958e2b4993b7"/>
<lrm_rsc_op id="p_mysql_monitor_120000" operation_key="p_mysql_monitor_120000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="86:659:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;86:659:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="186" rc-code="0" op-status="0" interval="120000" last-rc-change="1415184784" exec-time="77" queue-time="0" op-digest="a8b28e947b0d74f7953d47c509353648"/>
</lrm_resource>
<lrm_resource id="p_neutron-dhcp-agent" type="neutron-agent-dhcp" class="ocf" provider="fuel">
<lrm_rsc_op id="p_neutron-dhcp-agent_last_0" operation_key="p_neutron-dhcp-agent_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="15:97:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:7;15:97:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="148" rc-code="7" op-status="0" interval="0" last-run="1415124893" last-rc-change="1415124893" exec-time="20" queue-time="0" op-digest="1b732a7fb883e1f6a419959b2aa29f92"/>
</lrm_resource>
<lrm_resource id="vip__public" type="ns_IPaddr2" class="ocf" provider="fuel">
<lrm_rsc_op id="vip__public_last_0" operation_key="vip__public_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="16:59:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:7;16:59:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="5" rc-code="7" op-status="0" interval="0" last-run="1415123701" last-rc-change="1415123701" exec-time="14" queue-time="0" op-digest="c59062bf796f251f178e8646ea654950"/>
</lrm_resource>
<lrm_resource id="p_haproxy" type="ns_haproxy" class="ocf" provider="fuel">
<lrm_rsc_op id="p_haproxy_last_0" operation_key="p_haproxy_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.7" transition-key="97:664:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;97:664:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="198" rc-code="0" op-status="0" interval="0" last-run="1415184860" last-rc-change="1415184860" exec-time="236" queue-time="0" op-digest="2a23892614b6b1d0f70ca66b073b5bc0"/>
<lrm_rsc_op id="p_haproxy_monitor_20000" operation_key="p_haproxy_monitor_20000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.7" transition-key="98:664:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;98:664:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="201" rc-code="0" op-status="0" interval="20000" last-rc-change="1415184861" exec-time="71" queue-time="0" op-digest="3513c6578b2be63b3c075d885eb6ac8d"/>
</lrm_resource>
<lrm_resource id="p_rabbitmq-server" type="rabbitmq-server" class="ocf" provider="fuel">
<lrm_rsc_op id="p_rabbitmq-server_last_0" operation_key="p_rabbitmq-server_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="38:661:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;38:661:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="189" rc-code="0" op-status="0" interval="0" last-run="1415184803" last-rc-change="1415184803" exec-time="5579" queue-time="0" op-digest="6f1cd990340e90d62b7efecdec17ba24"/>
<lrm_rsc_op id="p_rabbitmq-server_monitor_30000" operation_key="p_rabbitmq-server_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="40:662:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;40:662:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="195" rc-code="0" op-status="0" interval="30000" last-rc-change="1415184812" exec-time="459" queue-time="0" op-digest="f81fdd633d61ff45f1dfcce00be7c955"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<node_state id="3" uname="node-3" crmd="online" crm-debug-origin="do_state_transition" in_ccm="true" join="member" expected="member">
<transient_attributes id="node-3">
<instance_attributes id="status-node-3">
<nvpair id="status-node-3-probe_complete" name="probe_complete" value="true"/>
<nvpair id="status-node-3-master-p_rabbitmq-server" name="master-p_rabbitmq-server" value="0"/>
<nvpair id="status-node-3-pingd" name="pingd" value="1000"/>
</instance_attributes>
</transient_attributes>
<lrm id="3">
<lrm_resources>
<lrm_resource id="p_neutron-plugin-openvswitch-agent" type="neutron-agent-ovs" class="ocf" provider="fuel">
<lrm_rsc_op id="p_neutron-plugin-openvswitch-agent_last_0" operation_key="p_neutron-plugin-openvswitch-agent_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="57:95:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;57:95:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="144" rc-code="0" op-status="0" interval="0" last-run="1415124890" last-rc-change="1415124890" exec-time="34" queue-time="0" op-digest="424153f43b0852bb5bccdf71f34784d4"/>
<lrm_rsc_op id="p_neutron-plugin-openvswitch-agent_last_failure_0" operation_key="p_neutron-plugin-openvswitch-agent_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="18:94:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;18:94:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="141" rc-code="0" op-status="0" interval="0" last-run="1415124890" last-rc-change="1415124890" exec-time="20" queue-time="1" op-digest="424153f43b0852bb5bccdf71f34784d4"/>
</lrm_resource>
<lrm_resource id="p_ceilometer-alarm-evaluator" type="ceilometer-alarm-evaluator" class="ocf" provider="fuel">
<lrm_rsc_op id="p_ceilometer-alarm-evaluator_last_failure_0" operation_key="p_ceilometer-alarm-evaluator_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="40:59:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:5;40:59:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="51" rc-code="5" op-status="0" interval="0" last-run="1415123701" last-rc-change="1415123701" exec-time="1" queue-time="0" op-digest="0af7dfa5435ca11d7d668ed679515e13"/>
</lrm_resource>
<lrm_resource id="p_heat-engine" type="heat-engine" class="ocf" provider="fuel">
<lrm_rsc_op id="p_heat-engine_last_0" operation_key="p_heat-engine_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="18:103:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:7;18:103:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="174" rc-code="7" op-status="0" interval="0" last-run="1415124915" last-rc-change="1415124915" exec-time="14" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
<lrm_resource id="p_ceilometer-agent-central" type="ceilometer-agent-central" class="ocf" provider="fuel">
<lrm_rsc_op id="p_ceilometer-agent-central_last_failure_0" operation_key="p_ceilometer-agent-central_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="41:59:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:5;41:59:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="55" rc-code="5" op-status="0" interval="0" last-run="1415123701" last-rc-change="1415123701" exec-time="0" queue-time="0" op-digest="0af7dfa5435ca11d7d668ed679515e13"/>
</lrm_resource>
<lrm_resource id="vip__management" type="ns_IPaddr2" class="ocf" provider="fuel">
<lrm_rsc_op id="vip__management_last_0" operation_key="vip__management_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="32:59:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:7;32:59:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="14" rc-code="7" op-status="0" interval="0" last-run="1415123701" last-rc-change="1415123701" exec-time="11" queue-time="0" op-digest="88dd3ef5610eee85fdb12cf6731c0720"/>
</lrm_resource>
<lrm_resource id="ping_vip__public" type="ping" class="ocf" provider="pacemaker">
<lrm_rsc_op id="ping_vip__public_last_0" operation_key="ping_vip__public_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="21:653:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;21:653:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="177" rc-code="0" op-status="0" interval="0" last-run="1415184703" last-rc-change="1415184703" exec-time="2011" queue-time="0" op-digest="606e53800773938f91e1c261bb4c725c"/>
<lrm_rsc_op id="ping_vip__public_monitor_20000" operation_key="ping_vip__public_monitor_20000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="23:654:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;23:654:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="180" rc-code="0" op-status="0" interval="20000" last-rc-change="1415184705" exec-time="2008" queue-time="0" op-digest="873bd0812a107843932917e76b49de81"/>
</lrm_resource>
<lrm_resource id="p_neutron-l3-agent" type="neutron-agent-l3" class="ocf" provider="fuel">
<lrm_rsc_op id="p_neutron-l3-agent_last_0" operation_key="p_neutron-l3-agent_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="18:98:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:7;18:98:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="159" rc-code="7" op-status="0" interval="0" last-run="1415124900" last-rc-change="1415124900" exec-time="22" queue-time="0" op-digest="8c1166c16a3eccc28a06071394828564"/>
</lrm_resource>
<lrm_resource id="p_neutron-metadata-agent" type="neutron-agent-metadata" class="ocf" provider="fuel">
<lrm_rsc_op id="p_neutron-metadata-agent_last_0" operation_key="p_neutron-metadata-agent_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="17:101:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:7;17:101:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="167" rc-code="7" op-status="0" interval="0" last-run="1415124912" last-rc-change="1415124912" exec-time="17" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
<lrm_resource id="p_mysql" type="mysql-wss" class="ocf" provider="fuel">
<lrm_rsc_op id="p_mysql_last_0" operation_key="p_mysql_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="18:70:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:7;18:70:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="97" rc-code="7" op-status="0" interval="0" last-run="1415123879" last-rc-change="1415123879" exec-time="20039" queue-time="0" op-digest="af83dba8adcaf0e62865958e2b4993b7"/>
</lrm_resource>
<lrm_resource id="p_neutron-dhcp-agent" type="neutron-agent-dhcp" class="ocf" provider="fuel">
<lrm_rsc_op id="p_neutron-dhcp-agent_last_0" operation_key="p_neutron-dhcp-agent_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="17:97:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:7;17:97:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="151" rc-code="7" op-status="0" interval="0" last-run="1415124893" last-rc-change="1415124893" exec-time="32" queue-time="0" op-digest="1b732a7fb883e1f6a419959b2aa29f92"/>
</lrm_resource>
<lrm_resource id="vip__public" type="ns_IPaddr2" class="ocf" provider="fuel">
<lrm_rsc_op id="vip__public_last_0" operation_key="vip__public_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="30:59:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:7;30:59:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="5" rc-code="7" op-status="0" interval="0" last-run="1415123701" last-rc-change="1415123701" exec-time="9" queue-time="0" op-digest="c59062bf796f251f178e8646ea654950"/>
</lrm_resource>
<lrm_resource id="p_haproxy" type="ns_haproxy" class="ocf" provider="fuel">
<lrm_rsc_op id="p_haproxy_last_0" operation_key="p_haproxy_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="18:91:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:7;18:91:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="133" rc-code="7" op-status="0" interval="0" last-run="1415124881" last-rc-change="1415124881" exec-time="80" queue-time="0" op-digest="2a23892614b6b1d0f70ca66b073b5bc0"/>
</lrm_resource>
<lrm_resource id="p_rabbitmq-server" type="rabbitmq-server" class="ocf" provider="fuel">
<lrm_rsc_op id="p_rabbitmq-server_last_0" operation_key="p_rabbitmq-server_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="18:83:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:7;18:83:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="114" rc-code="7" op-status="0" interval="0" last-run="1415124527" last-rc-change="1415124527" exec-time="132" queue-time="0" op-digest="6f1cd990340e90d62b7efecdec17ba24"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
</status>
</cib>

View File

@ -1,105 +0,0 @@
require 'spec_helper'
describe Puppet::Type.type(:cs_resource).provider(:crm) do
let(:resource) { Puppet::Type.type(:cs_resource).new(:name => 'myresource', :provider=> :crm ) }
let(:provider) { resource.provider }
describe "#create" do
before(:each) do
provider.class.stubs(:exec_withenv).returns(0)
end
it "should create resource with corresponding members" do
pending("Fix crm_shadow invocation")
provider.class.stubs(:prefetch)
resource[:primitive_type] = "Dummy"
resource[:provided_by] = "pacemaker"
resource[:primitive_class] = "ocf"
resource[:operations] = {"monitor"=>{"interval"=>"20"}}
tmpfile = StringIO.new()
Tempfile.stubs(:open).with("puppet_crm_update").yields(tmpfile)
tmpfile.stubs(:path)
tmpfile.expects(:write).with("primitive myresource ocf:pacemaker:Dummy op monitor interval=20 ")
provider.class.prefetch({})
provider.create
provider.flush
end
it "should stop and rename resource when only msname changes" do
pending("fix renaming test")
provider.instance_eval{
@property_hash = {
:name => :myresource,
:provided_by=>"pacemaker",
:ensure=>:present,
:parameters=>{},
:primitive_class=>"ocf",
:primitive_type=>"Dummy",
:metadata=>{},
:ms_metadata=>{}
}
}
resource[:cib] = "shadow"
resource[:primitive_type] = "Dummy"
resource[:provided_by] = "pacemaker"
resource[:primitive_class] = "ocf"
resource[:operations] = {"monitor"=>{"interval"=>"20"}}
provider.expects(:pcs).with('resource', 'disable', 'master_myresource')
provider.expects(:try_command).with('rename','master_myresource', 'SupER_Master')
provider.expects(:try_command).with('rename','master_myresource', 'SupER_Master', 'shadow')
end
it "should stop and delete resource when mstype changes" do
pending("fix mstype change test")
provider.instance_eval{
@property_hash = {
:name => :myresource,
:provided_by=>"pacemaker",
:ensure=>:present,
:parameters=>{},
:primitive_class=>"ocf",
:primitive_type=>"Dummy",
:metadata=>{},
:ms_metadata=>{}
}
}
resource[:cib] = "shadow"
resource[:primitive_type] = "Dummy"
resource[:provided_by] = "pacemaker"
resource[:primitive_class] = "ocf"
resource[:operations] = {"monitor"=>{"interval"=>"20"}}
provider.expects(:pcs).with('resource', 'stop', 'master_myresource')
provider.expects(:try_command).with('delete','master_myresource')
provider.expects(:try_command).with('delete','master_myresource', nil,'shadow')
end
end
describe "#destroy" do
it "should destroy resource with corresponding name" do
provider.expects(:pcs).with('resource', 'disable', 'myresource')
provider.expects(:pcs).with('resource', 'cleanup', 'myresource')
provider.expects(:pcs).with('resource', 'delete', 'myresource')
provider.destroy
end
end
describe "#instances" do
it "should find instances" do
provider.class.stubs(:block_until_ready).returns(true)
out=File.open(File.dirname(__FILE__) + '/../../../../fixtures/cib/cib.xml')
provider.class.stubs(:dump_cib).returns(out,nil)
resources = []
provider.class.instances.each do
|instance|
resources << instance.instance_eval{@property_hash}
end
resources[0].should eql(
{:name=>"bar",:provided_by=>"pacemaker",:ensure=>:present,:parameters=>{},:primitive_class=>"ocf",:primitive_type=>"Dummy",:operations=>{"monitor"=>{"interval"=>"20"}},:metadata=>{},:ms_metadata=>{}}
)
end
end
end

View File

@ -1,40 +0,0 @@
require 'spec_helper'
describe Puppet::Type.type(:cs_rsc_colocation).provider(:crm) do
let(:resource) { Puppet::Type.type(:cs_rsc_colocation).new(:name => 'mycolocation', :provider=> :crm ) }
let(:provider) { resource.provider }
describe "#create" do
it "should create colocation with corresponding members" do
resource[:primitives] = ["p_1", "p_2"]
resource[:score] = "inf"
provider.class.stubs(:exec_withenv).returns(0)
tmpfile = StringIO.new()
Tempfile.stubs(:open).with("puppet_crm_update").yields(tmpfile)
tmpfile.stubs(:path)
tmpfile.expects(:write).with("colocation mycolocation inf: p_1 p_2")
provider.create
provider.flush
end
end
describe "#destroy" do
it "should destroy colocation with corresponding name" do
provider.expects(:crm).with('configure', 'delete', "mycolocation")
provider.destroy
provider.flush
end
end
describe "#instances" do
it "should find instances" do
provider.class.stubs(:block_until_ready).returns(true)
out=File.open(File.dirname(__FILE__) + '/../../../../fixtures/cib/cib.xml')
provider.class.stubs(:dump_cib).returns(out,nil)
instances = provider.class.instances
instances[0].instance_eval{@property_hash}.should eql({:name=>"foo-with-bar",:score=>"INFINITY", :primitives=> ['foo','bar'], :ensure=>:present, :provider=>:crm})
end
end
end

View File

@ -1,126 +0,0 @@
require 'spec_helper'
describe Puppet::Type.type(:cs_rsc_location).provider(:crm) do
let(:resource) { Puppet::Type.type(:cs_rsc_location).new(:name => 'mylocation', :provider=> :crm ) }
let(:provider) { resource.provider }
describe "#create" do
before(:each) do
provider.class.stubs(:exec_withenv).returns(0)
end
it "should create location with corresponding members" do
pending("fix shadow invocation")
resource[:primitive] = "p_1"
resource[:rules] = [
{:score=> "inf",:expressions => [{:attribute=>"pingd",:operation=>"defined"}]}
]
tmpfile = StringIO.new()
Tempfile.stubs(:open).with("puppet_crm_update").yields(tmpfile)
tmpfile.stubs(:path)
tmpfile.expects(:write).with("location mylocation p_1 rule inf: pingd defined")
provider.create
provider.flush
end
it "should create location with date_spec" do
pending("fix shadow invocation")
resource[:primitive] = "p_1"
resource[:rules] = [
{:score=> "inf",:date_expressions => [{:date_spec=>{:hours=>"10", :weeks=>"5"}, :operation=>"date_spec", :start=>"", :end=>""}]}
]
tmpfile = StringIO.new()
Tempfile.stubs(:open).with("puppet_crm_update").yields(tmpfile)
tmpfile.stubs(:path)
tmpfile.expects(:write).with("location mylocation p_1 rule inf: date date_spec hours=10 weeks=5")
provider.create
provider.flush
end
it "should create location with lt" do
pending("fix shadow invocation")
resource[:primitive] = "p_1"
resource[:rules] = [
{:score=> "inf",:date_expressions => [{:operation=>"lt", :end=>"20131212",:start=>""}]}
]
tmpfile = StringIO.new()
Tempfile.stubs(:open).with("puppet_crm_update").yields(tmpfile)
tmpfile.stubs(:path)
tmpfile.expects(:write).with("location mylocation p_1 rule inf: date lt 20131212")
provider.create
provider.flush
end
it "should create location with gt" do
pending("fix shadow invocation")
resource[:primitive] = "p_1"
resource[:rules] = [
{:score=> "inf",:date_expressions => [{:operation=>"gt", :end=>"",:start=>"20121212"}]}
]
tmpfile = StringIO.new()
Tempfile.stubs(:open).with("puppet_crm_update").yields(tmpfile)
tmpfile.stubs(:path)
tmpfile.expects(:write).with("location mylocation p_1 rule inf: date gt 20121212")
provider.create
provider.flush
end
it "should create location with duration" do
pending("fix shadow invocation")
resource[:primitive] = "p_1"
resource[:rules] = [
{:score=> "inf",:date_expressions => [{:operation=>"in_range", :end=>"",:start=>"20121212", :duration=>{:weeks=>"5"}}]}
]
tmpfile = StringIO.new()
Tempfile.stubs(:open).with("puppet_crm_update").yields(tmpfile)
tmpfile.stubs(:path)
tmpfile.expects(:write).with("location mylocation p_1 rule inf: date in_range start=20121212 weeks=5")
provider.create
provider.flush
end
end
describe "#destroy" do
it "should destroy location with corresponding name" do
provider.expects(:crm).with('configure', 'delete', "mylocation")
provider.destroy
provider.flush
end
end
describe "#instances" do
it "should find instances" do
provider.class.stubs(:block_until_ready).returns(true)
out=File.open(File.dirname(__FILE__) + '/../../../../fixtures/cib/cib.xml')
provider.class.stubs(:dump_cib).returns(out,nil)
instances = provider.class.instances
instances[0].instance_eval{@property_hash}.should eql(
{:name=>"l_11",:rules=>[
{:score=>"INFINITY",:boolean=>'',
:expressions=>[
{:attribute=>"#uname",:operation=>'ne',:value=>'ubuntu-1'}
],
:date_expressions => [
{:date_spec=>{:hours=>"10", :weeks=>"5"}, :operation=>"date_spec", :start=>"", :end=>""},
{:operation=>"in_range", :start=>"20121212", :end=>"20131212"},
{:operation=>"gt", :start=>"20121212",:end=>""},
{:operation=>"lt", :end=>"20131212",:start=>""},
{:operation=>"in_range", :start=>"20121212", :end=>"",:duration=>{:years=>"10"}}
]
}
],
:primitive=> 'master_bar', :node_score=>nil,:node_name=>nil, :ensure=>:present, :provider=>:crm})
instances[1].instance_eval{@property_hash}.should eql(:name=>"l_12",:node_score=>"INFINITY",:node_name=>"ubuntu-1",:primitive=>"master_bar",:ensure=>:present,:provider=>:crm,:rules=>[])
end
end
end

View File

@ -1,41 +0,0 @@
require 'spec_helper'
describe Puppet::Type.type(:cs_rsc_order).provider(:crm) do
let(:resource) { Puppet::Type.type(:cs_rsc_order).new(:name => 'myorder', :provider=> :crm ) }
let(:provider) { resource.provider }
describe "#create" do
it "should create order with corresponding members" do
resource[:first] = "p_1"
resource[:second] = "p_2"
resource[:score] = "inf"
provider.class.stubs(:exec_withenv).returns(0)
tmpfile = StringIO.new()
Tempfile.stubs(:open).with("puppet_crm_update").yields(tmpfile)
tmpfile.stubs(:path)
tmpfile.expects(:write).with("order myorder inf: p_1 p_2")
provider.create
provider.flush
end
end
describe "#destroy" do
it "should destroy order with corresponding name" do
provider.expects(:crm).with('configure', 'delete', "myorder")
provider.destroy
provider.flush
end
end
describe "#instances" do
it "should find instances" do
provider.class.stubs(:block_until_ready).returns(true)
out=File.open(File.dirname(__FILE__) + '/../../../../fixtures/cib/cib.xml')
provider.class.stubs(:dump_cib).returns(out,nil)
instances = provider.class.instances
instances[0].instance_eval{@property_hash}.should eql({:name=>"foo-before-bar",:score=>"INFINITY", :first=> 'foo',:second=>'bar', :ensure=>:present, :provider=>:crm})
end
end
end

View File

@ -1,242 +0,0 @@
require 'spec_helper'
require File.expand_path(File.join(File.dirname(__FILE__), '../../../../lib/puppet/provider/pacemaker_common.rb'))
describe Puppet::Provider::Pacemaker_common do
cib_xml_file = File.join File.dirname(__FILE__), 'cib.xml'
let(:raw_cib) do
File.read cib_xml_file
end
let(:resources_regexp) do
%r{nova|cinder|glance|keystone|neutron|sahara|murano|ceilometer|heat|swift}
end
###########################
#-> Cloned primitive 'clone_p_neutron-plugin-openvswitch-agent' global status: start
#node-1: start | node-2: stop | node-3: stop
#-> Cloned primitive 'clone_ping_vip__public' global status: start
#node-1: start | node-2: start | node-3: start
#-> Cloned primitive 'clone_p_neutron-metadata-agent' global status: start
#node-1: start | node-2: stop | node-3: stop
#-> Simple primitive 'vip__management' global status: start
#node-1: start | node-2: stop | node-3: stop
#-> Cloned primitive 'clone_p_mysql' global status: start
#node-1: start | node-2: start | node-3: stop
#-> Multistate primitive 'master_p_rabbitmq-server' global status: master
#node-1: master | node-2: start | node-3: stop
#-> Cloned primitive 'clone_p_haproxy' global status: start
#node-1: start | node-2: start | node-3: stop
#-> Simple primitive 'p_ceilometer-alarm-evaluator' global status: stop
#node-1: stop | node-2: stop (FAIL) | node-3: stop (FAIL)
#-> Simple primitive 'p_ceilometer-agent-central' global status: stop
#node-1: stop | node-2: stop (FAIL) | node-3: stop (FAIL)
#-> Cloned primitive 'clone_p_neutron-l3-agent' global status: start
#node-1: start | node-2: stop | node-3: stop
#-> Simple primitive 'p_neutron-dhcp-agent' global status: start
#node-1: start | node-2: stop | node-3: stop
#-> Simple primitive 'vip__public' global status: start
#node-1: start | node-2: stop | node-3: stop
#-> Simple primitive 'p_heat-engine' global status: start
#node-1: start | node-2: stop | node-3: stop
before(:each) do
@class = subject
@class.stubs(:raw_cib).returns raw_cib
@class.stubs(:pcs).returns true
end
context 'configuration parser' do
it 'can obtain a CIB XML object' do
expect(@class.cib.to_s).to include '<configuration>'
expect(@class.cib.to_s).to include '<nodes>'
expect(@class.cib.to_s).to include '<resources>'
expect(@class.cib.to_s).to include '<status>'
expect(@class.cib.to_s).to include '<operations>'
end
it 'can get primitives section of CIB XML' do
expect(@class.cib_section_primitives).to be_a(Array)
expect(@class.cib_section_primitives.first.to_s).to start_with '<primitive'
expect(@class.cib_section_primitives.first.to_s).to end_with '</primitive>'
end
it 'can get primitives configuration' do
expect(@class.primitives).to be_a Hash
expect(@class.primitives['vip__public']).to be_a Hash
expect(@class.primitives['vip__public']['meta_attributes']).to be_a Hash
expect(@class.primitives['vip__public']['instance_attributes']).to be_a Hash
expect(@class.primitives['vip__public']['instance_attributes']['ip']).to be_a Hash
expect(@class.primitives['vip__public']['operations']).to be_a Hash
expect(@class.primitives['vip__public']['meta_attributes']['resource-stickiness']).to be_a Hash
expect(@class.primitives['vip__public']['operations']['vip__public-start-0']).to be_a Hash
end
it 'can determine is primitive is simple or complex' do
expect(@class.primitive_is_complex? 'p_haproxy').to eq true
expect(@class.primitive_is_complex? 'vip__management').to eq false
end
end
context 'node status parser' do
it 'can produce nodes structure' do
expect(@class.nodes).to be_a Hash
expect(@class.nodes['node-1']['primitives']['p_heat-engine']['status']).to eq('start')
#puts @class.nodes.inspect
#puts @class.get_cluster_debug_report
end
it 'can determite a global primitive status' do
expect(@class.primitive_status 'p_heat-engine').to eq('start')
expect(@class.primitive_is_running? 'p_heat-engine').to eq true
expect(@class.primitive_status 'p_ceilometer-agent-central').to eq('stop')
expect(@class.primitive_is_running? 'p_ceilometer-agent-central').to eq false
expect(@class.primitive_is_running? 'UNKNOWN').to eq nil
expect(@class.primitive_status 'UNKNOWN').to eq nil
end
it 'can determine a local primitive status on a node' do
expect(@class.primitive_status 'p_heat-engine', 'node-1').to eq('start')
expect(@class.primitive_is_running? 'p_heat-engine', 'node-1').to eq true
expect(@class.primitive_status 'p_heat-engine', 'node-2').to eq('stop')
expect(@class.primitive_is_running? 'p_heat-engine', 'node-2').to eq false
expect(@class.primitive_is_running? 'UNKNOWN', 'node-1').to eq nil
expect(@class.primitive_status 'UNKNOWN', 'node-1').to eq nil
end
it 'can determine if primitive is managed or not' do
expect(@class.primitive_is_managed? 'p_heat-engine').to eq true
expect(@class.primitive_is_managed? 'p_haproxy').to eq true
expect(@class.primitive_is_managed? 'UNKNOWN').to eq nil
end
it 'can determine if primitive is started or not' do
expect(@class.primitive_is_started? 'p_heat-engine').to eq true
expect(@class.primitive_is_started? 'p_haproxy').to eq true
expect(@class.primitive_is_started? 'UNKNOWN').to eq nil
end
it 'can determine if primitive is failed or not globally' do
expect(@class.primitive_has_failures? 'p_ceilometer-agent-central').to eq true
expect(@class.primitive_has_failures? 'p_heat-engine').to eq false
expect(@class.primitive_has_failures? 'UNKNOWN').to eq nil
end
it 'can determine if primitive is failed or not locally' do
expect(@class.primitive_has_failures? 'p_ceilometer-agent-central', 'node-1').to eq false
expect(@class.primitive_has_failures? 'p_ceilometer-agent-central', 'node-2').to eq true
expect(@class.primitive_has_failures? 'p_heat-engine', 'node-1').to eq false
expect(@class.primitive_has_failures? 'p_heat-engine', 'node-2').to eq false
expect(@class.primitive_has_failures? 'UNKNOWN', 'node-1').to eq nil
end
it 'can determine that primitive is complex' do
expect(@class.primitive_is_complex? 'p_haproxy').to eq true
expect(@class.primitive_is_complex? 'p_heat-engine').to eq false
expect(@class.primitive_is_complex? 'p_rabbitmq-server').to eq true
expect(@class.primitive_is_complex? 'UNKNOWN').to eq nil
end
it 'can determine that primitive is multistate' do
expect(@class.primitive_is_multistate? 'p_haproxy').to eq false
expect(@class.primitive_is_multistate? 'p_heat-engine').to eq false
expect(@class.primitive_is_multistate? 'p_rabbitmq-server').to eq true
expect(@class.primitive_is_multistate? 'UNKNOWN').to eq nil
end
it 'can determine that primitive has master running' do
expect(@class.primitive_has_master_running? 'p_rabbitmq-server').to eq true
expect(@class.primitive_has_master_running? 'p_heat-engine').to eq false
expect(@class.primitive_has_master_running? 'UNKNOWN').to eq nil
end
it 'can determine that primitive is clone' do
expect(@class.primitive_is_clone? 'p_haproxy').to eq true
expect(@class.primitive_is_clone? 'p_heat-engine').to eq false
expect(@class.primitive_is_clone? 'p_rabbitmq-server').to eq false
expect(@class.primitive_is_clone? 'UNKNOWN').to eq nil
end
end
context 'cluster control' do
it 'can enable maintenance mode' do
@class.expects(:pcs).with 'property', 'set', 'maintenance-mode=true'
@class.maintenance_mode 'true'
end
it 'can disable maintenance mode' do
@class.expects(:pcs).with 'property', 'set', 'maintenance-mode=false'
@class.maintenance_mode 'false'
end
it 'can set no-quorum policy' do
@class.expects(:pcs).with 'property', 'set', 'no-quorum-policy=ignore'
@class.no_quorum_policy 'ignore'
end
end
context 'constraints control' do
it 'can add location constraint' do
@class.expects(:cibadmin).returns(true)
@class.constraint_location_add 'myprimitive', 'mynode', '200'
end
it 'can remove location constraint' do
@class.expects(:pcs).with 'constraint', 'location', 'remove', 'myprimitive-on-mynode'
@class.constraint_location_remove 'myprimitive', 'mynode'
end
it 'can get the location structure from the CIB XML' do
expect(@class.constraint_locations).to be_a(Hash)
expect(@class.constraint_locations['vip__management_on_node-1']).to be_a(Hash)
expect(@class.constraint_locations['vip__management_on_node-1']['rsc']).to be_a String
end
end
context 'wait functions' do
it 'retries block until it becomes true' do
@class.retry_block_until_true { true }
end
it 'waits for Pacemaker to become ready' do
@class.stubs(:is_online?).returns true
@class.wait_for_online
end
it 'waits for status to become known' do
@class.stubs(:cib_reset).returns true
@class.stubs(:primitive_status).returns 'stopped'
@class.wait_for_status 'myprimitive'
end
it 'waits for the service to start' do
@class.stubs(:cib_reset).returns true
@class.stubs(:primitive_is_running?).with('myprimitive', nil).returns true
@class.wait_for_start 'myprimitive'
end
it 'waits for the service to stop' do
@class.stubs(:cib_reset).returns true
@class.stubs(:primitive_is_running?).with('myprimitive', nil).returns false
@class.wait_for_stop 'myprimitive'
end
end
context 'node id functions' do
let(:node_ids) do
{
"node-1" => {"id"=>"1", "uname"=>"node-1"},
"node-2" => {"id"=>"2", "uname"=>"node-2"},
"node-3" => {"id"=>"3", "uname"=>"node-3"},
}
end
it 'can get the node ids structure' do
expect(@class.node_ids).to eq node_ids
end
end
end

View File

@ -1,262 +0,0 @@
require 'spec_helper'
describe Puppet::Type.type(:pcmk_nodes).provider(:ruby) do
let(:resource) do
Puppet::Type.type(:pcmk_nodes).new(
:name => 'paceamker',
:provider => :ruby,
:nodes => nodes_data,
)
end
let(:provider) do
provider = resource.provider
if ENV['SPEC_PUPPET_DEBUG']
class << provider
def debug(str)
puts str
end
end
end
provider
end
# output of corosync_cmapctl -b nodelist
let(:cmap_nodelist) do
<<-eos
nodelist.node.0.nodeid (u32) = 1
nodelist.node.0.ring0_addr (str) = 192.168.0.1
nodelist.node.1.nodeid (u32) = 2
nodelist.node.1.ring0_addr (str) = 192.168.0.2
nodelist.node.2.nodeid (u32) = 3
nodelist.node.2.ring0_addr (str) = 192.168.0.3
eos
end
# comes from 'nodes' library method excluing unrelated data
let(:nodes_input) do
{
"node-1" => {'id' => "1", 'uname' => "node-1"},
"node-2" => {'id' => "2", 'uname' => "node-2"},
"node-3" => {'id' => "3", 'uname' => "node-3"},
}
end
# comes from 'node_ids' library method
let(:node_ids_input) do
{
"node-1" => {'id' => "1", 'uname' => "node-1"},
"node-2" => {'id' => "2", 'uname' => "node-2"},
"node-3" => {'id' => "3", 'uname' => "node-3"},
}
end
# retreived corosync nodes state
let(:corosync_nodes_state) do
{
"0"=>{ "id" => "1", "number" => "0", "ip" => "192.168.0.1" },
"1"=>{ "id" => "2", "number" => "1", "ip" => "192.168.0.2" },
"2"=>{ "id" => "3", "number" => "2", "ip" => "192.168.0.3" },
}
end
# generated existing paceamker nodes structure
let(:pacemaker_nodes_structure) do
{
"node-1" => "1",
"node-2" => "2",
"node-3" => "3",
}
end
# generated existing corosync nodes structure
let(:corosync_nodes_structure) do
{
"1" => "192.168.0.1",
"2" => "192.168.0.2",
"3" => "192.168.0.3",
}
end
# comes from 'constraint_locations' library method
let(:constraint_locations_input) do
{
"p_neutron-dhcp-agent_on_node-1" => {"id" => "p_neutron-dhcp-agent_on_node-1", "node" => "node-1", "rsc" => "p_neutron-dhcp-agent", "score" => "100"},
"p_neutron-dhcp-agent_on_node-2" => {"id" => "p_neutron-dhcp-agent_on_node-2", "node" => "node-2", "rsc" => "p_neutron-dhcp-agent", "score" => "100"},
"p_neutron-dhcp-agent_on_node-3" => {"id" => "p_neutron-dhcp-agent_on_node-3", "node" => "node-3", "rsc" => "p_neutron-dhcp-agent", "score" => "100"},
"clone_p_haproxy_on_node-1" => {"id" => "clone_p_haproxy_on_node-1", "node" => "node-1", "rsc" => "clone_p_haproxy", "score" => "100"},
"clone_p_haproxy_on_node-2" => {"id" => "clone_p_haproxy_on_node-2", "node" => "node-2", "rsc" => "clone_p_haproxy", "score" => "100"},
"clone_p_haproxy_on_node-3" => {"id" => "clone_p_haproxy_on_node-3", "node" => "node-3", "rsc" => "clone_p_haproxy", "score" => "100"},
}
end
# 'nodes' parameter when nodes should be added and removed
let(:nodes_data) do
{
'node-2' => { "ip" => "192.168.0.2", "id" => "2" },
'node-3' => { "ip" => "192.168.0.3", "id" => "3" },
'node-4' => { "ip" => "192.168.0.4", "id" => "4" },
}
end
# 'nodes' parameter when fqdn should be switched to name
let(:fqdn_nodes_data) do
data = {}
nodes_data.each do |name, node|
name = "#{name}.example.com"
data[name] = node
end
data
end
before(:each) do
provider.stubs(:cmapctl_nodelist).returns cmap_nodelist
provider.stubs(:node_ids).returns node_ids_input
provider.stubs(:nodes).returns nodes_input
provider.stubs(:constraint_locations).returns constraint_locations_input
provider.stubs(:node_name).returns 'node-2'
provider.stubs(:wait_for_online).returns true
end
context 'data structures' do
it 'corosync_nodes_state' do
expect(provider.corosync_nodes_state).to eq(corosync_nodes_state)
end
it 'corosync_nodes_structure' do
expect(provider.corosync_nodes_structure).to eq(corosync_nodes_structure)
end
it 'pacemaker_nodes_structure' do
expect(provider.pacemaker_nodes_structure).to eq(pacemaker_nodes_structure)
end
end
context 'main actions' do
before(:each) do
provider.stubs(:add_corosync_node)
provider.stubs(:remove_corosync_node)
provider.stubs(:add_pacemaker_node)
provider.stubs(:remove_pacemaker_node)
end
it 'can get corosync_nodes' do
expect(provider.corosync_nodes).to eq corosync_nodes_structure
end
it 'can get pacemaker_nodes' do
expect(provider.pacemaker_nodes).to eq pacemaker_nodes_structure
end
it 'removes unexpected corosync_nodes' do
provider.expects(:remove_corosync_node).with('1')
provider.corosync_nodes = resource[:corosync_nodes]
end
it 'adds missing corosync_nodes' do
provider.expects(:add_corosync_node).with('4')
provider.corosync_nodes = resource[:corosync_nodes]
end
it 'removes unexpected pacemaker_nodes' do
provider.expects(:remove_pacemaker_node).with('node-1')
provider.pacemaker_nodes = resource[:pacemaker_nodes]
end
it 'adds missing pacemaker_nodes' do
provider.expects(:add_pacemaker_node).with('node-4')
provider.pacemaker_nodes = resource[:pacemaker_nodes]
end
end
context 'when adding a new pacemaker_node' do
it 'it adds a node record' do
provider.expects(:add_pacemaker_node_record).with('node-4', '4')
provider.stubs(:add_pacemaker_node_state)
provider.add_pacemaker_node 'node-4'
end
it 'adds a node_state record' do
provider.stubs(:add_pacemaker_node_record)
provider.expects(:add_pacemaker_node_state).with('node-4', '4')
provider.add_pacemaker_node 'node-4'
end
end
context 'when removing a paceamker_node' do
before(:each) do
provider.stubs(:remove_pacemaker_node_state)
provider.stubs(:remove_pacemaker_node_record)
provider.stubs(:remove_pacemaker_crm_node)
provider.stubs(:remove_location_constraint)
end
it 'removes the crm_node record' do
provider.expects(:remove_pacemaker_crm_node).with 'node-1'
provider.remove_pacemaker_node 'node-1'
end
it 'cleans out node record' do
provider.expects(:remove_pacemaker_node_record).with 'node-1'
provider.remove_pacemaker_node 'node-1'
end
it 'cleans out node states' do
provider.expects(:remove_pacemaker_node_state).with 'node-1'
provider.remove_pacemaker_node 'node-1'
end
it 'cleans out node based locations' do
provider.expects(:remove_location_constraint).with 'p_neutron-dhcp-agent_on_node-1'
provider.expects(:remove_location_constraint).with 'clone_p_haproxy_on_node-1'
provider.remove_pacemaker_node 'node-1'
end
end
context 'when adding a new corosync_node' do
it 'cat get a new free corosync nodes number' do
expect(provider.next_corosync_node_number).to eq '3'
end
it 'adds a new corosync node with the correct parameters' do
provider.expects(:add_corosync_node_record).with '3', '192.168.0.4', '4'
provider.add_corosync_node '4'
end
end
context 'when removing a corosync_node' do
it 'removes a node with the correct number' do
provider.expects(:remove_corosync_node_record).with '0'
provider.remove_corosync_node '1'
end
end
context 'FQDN and Hostname compatibility' do
let(:resource) do
Puppet::Type.type(:pcmk_nodes).new(
:name => 'paceamker',
:provider => :ruby,
:nodes => fqdn_nodes_data,
)
end
it 'can determine when the switch is needed' do
expect(provider.change_fqdn_to_name?).to eq true
end
it 'can rewrite fqdns in the node input data to the hostnames' do
provider.change_fqdn_to_name
expect(provider.resource[:nodes]).to eq nodes_data
expect(provider.corosync_nodes_structure).to eq(corosync_nodes_structure)
end
end
end

View File

@ -1,234 +0,0 @@
require 'spec_helper'
describe Puppet::Type.type(:service).provider(:pacemaker) do
let(:resource) { Puppet::Type.type(:service).new(:name => title, :provider=> :pacemaker) }
let(:provider) { resource.provider }
let(:title) { 'myservice' }
let(:full_name) { 'clone-p_myservice' }
let(:name) { 'p_myservice' }
let(:hostname) { 'mynode' }
before :each do
@class = provider
@class.stubs(:title).returns(title)
@class.stubs(:hostname).returns(hostname)
@class.stubs(:name).returns(name)
@class.stubs(:full_name).returns(full_name)
@class.stubs(:basic_service_name).returns(title)
@class.stubs(:cib_reset).returns(true)
@class.stubs(:wait_for_online).returns(true)
@class.stubs(:wait_for_status).returns(true)
@class.stubs(:wait_for_start).returns(true)
@class.stubs(:wait_for_stop).returns(true)
@class.stubs(:disable_basic_service).returns(true)
@class.stubs(:get_primitive_puppet_status).returns(:running)
@class.stubs(:get_primitive_puppet_enable).returns(:true)
@class.stubs(:constraint_location_exists?).returns(true)
@class.stubs(:primitive_is_managed?).returns(true)
@class.stubs(:primitive_is_running?).returns(true)
@class.stubs(:primitive_has_failures?).returns(false)
@class.stubs(:primitive_is_complex?).returns(false)
@class.stubs(:primitive_is_multistate?).returns(false)
@class.stubs(:primitive_is_clone?).returns(false)
@class.stubs(:unban_primitive).returns(true)
@class.stubs(:ban_primitive).returns(true)
@class.stubs(:start_primitive).returns(true)
@class.stubs(:stop_primitive).returns(true)
@class.stubs(:cleanup_primitive).returns(true)
@class.stubs(:enable).returns(true)
@class.stubs(:disable).returns(true)
@class.stubs(:constraint_location_add).returns(true)
@class.stubs(:constraint_location_remove).returns(true)
@class.stubs(:get_cluster_debug_report).returns(true)
end
context 'service name mangling' do
it 'uses title as the service name if it is found in CIB' do
@class.unstub(:name)
@class.stubs(:primitive_exists?).with(title).returns(true)
expect(@class.name).to eq(title)
end
it 'uses "p_" prefix with name if found name with prefix' do
@class.unstub(:name)
@class.stubs(:primitive_exists?).with(title).returns(false)
@class.stubs(:primitive_exists?).with(name).returns(true)
expect(@class.name).to eq(name)
end
it 'uses name without "p_" to disable basic service' do
@class.stubs(:name).returns(name)
expect(@class.basic_service_name).to eq(title)
end
end
context '#status' do
it 'should wait for pacemaker to become online' do
@class.expects(:wait_for_online)
@class.status
end
it 'should reset cib mnemoization on every call' do
@class.expects(:cib_reset)
@class.status
end
it 'gets service status locally' do
@class.expects(:get_primitive_puppet_status).with name, hostname
@class.status
end
it 'counts a service as stopped if location constraint is missing' do
@class.stubs(:get_primitive_puppet_status).returns(:running)
@class.stubs(:constraint_location_exists?).returns(false)
expect(@class.status).to eq :stopped
@class.stubs(:constraint_location_exists?).returns(true)
expect(@class.status).to eq :running
end
end
context '#start' do
it 'tries to enable service if it is not enabled to work with it' do
@class.stubs(:primitive_is_managed?).returns(false)
@class.expects(:enable).once
@class.start
@class.stubs(:primitive_is_managed?).returns(true)
@class.unstub(:enable)
@class.expects(:enable).never
@class.start
end
it 'tries to disable a basic service with the same name' do
@class.expects(:disable_basic_service)
@class.start
end
it 'should cleanup a primitive' do
@class.stubs(:primitive_has_failures?).returns(true)
@class.expects(:cleanup_primitive).with(full_name, hostname).once
@class.start
end
it 'tries to unban the service on the node by the name' do
@class.expects(:unban_primitive).with(name, hostname)
@class.start
end
it 'tries to start the service by its full name' do
@class.expects(:start_primitive).with(full_name)
@class.start
end
it 'adds a location constraint for the service by its full_name' do
@class.expects(:constraint_location_add).with(full_name, hostname)
@class.start
end
it 'waits for the service to start locally if primitive is clone' do
@class.stubs(:primitive_is_clone?).returns(true)
@class.stubs(:primitive_is_multistate?).returns(false)
@class.stubs(:primitive_is_complex?).returns(true)
@class.expects(:wait_for_start).with name
@class.start
end
it 'waits for the service to start master anywhere if primitive is multistate' do
@class.stubs(:primitive_is_clone?).returns(false)
@class.stubs(:primitive_is_multistate?).returns(true)
@class.stubs(:primitive_is_complex?).returns(true)
@class.expects(:wait_for_master).with name
@class.start
end
it 'waits for the service to start anywhere if primitive is simple' do
@class.stubs(:primitive_is_clone?).returns(false)
@class.stubs(:primitive_is_multistate?).returns(false)
@class.stubs(:primitive_is_complex?).returns(false)
@class.expects(:wait_for_start).with name
@class.start
end
end
context '#stop' do
it 'tries to disable service if it is not enabled to work with it' do
@class.stubs(:primitive_is_managed?).returns(false)
@class.expects(:enable).once
@class.stop
@class.stubs(:primitive_is_managed?).returns(true)
@class.unstub(:enable)
@class.expects(:enable).never
@class.stop
end
it 'should cleanup a primitive on stop' do
@class.expects(:cleanup_primitive).with(full_name, hostname).once.once
@class.stop
end
it 'uses Ban to stop the service and waits for it to stop locally if service is complex' do
@class.stubs(:primitive_is_complex?).returns(true)
@class.expects(:wait_for_stop).with name, hostname
@class.expects(:ban_primitive).with name, hostname
@class.stop
end
it 'uses Stop to stop the service and waits for it to stop globally if service is simple' do
@class.stubs(:primitive_is_complex?).returns(false)
@class.expects(:wait_for_stop).with name
@class.expects(:stop_primitive).with name
@class.stop
end
end
context '#restart' do
it 'does not stop or start the service if it is not locally running' do
@class.stubs(:primitive_is_running?).with(name, hostname).returns(false)
@class.unstub(:stop)
@class.unstub(:start)
@class.expects(:stop).never
@class.expects(:start).never
@class.restart
end
it 'stops and start the service if it is locally running' do
@class.stubs(:primitive_is_running?).with(name, hostname).returns(true)
restart_sequence = sequence('restart')
@class.expects(:stop).in_sequence(restart_sequence)
@class.expects(:start).in_sequence(restart_sequence)
@class.restart
end
end
context 'basic service handling' do
before :each do
@class.unstub(:disable_basic_service)
@class.extra_provider.stubs(:enableable?).returns true
@class.extra_provider.stubs(:enabled?).returns :true
@class.extra_provider.stubs(:disable).returns true
@class.extra_provider.stubs(:stop).returns true
@class.extra_provider.stubs(:status).returns :running
end
it 'tries to disable the basic service if it is enabled' do
@class.extra_provider.expects(:disable)
@class.disable_basic_service
end
it 'tries to stop the service if it is running' do
@class.extra_provider.expects(:stop)
@class.disable_basic_service
end
end
end

View File

@ -1,143 +0,0 @@
require 'spec_helper'
describe Puppet::Type.type(:cs_resource) do
subject do
Puppet::Type.type(:cs_resource)
end
it "should have a 'name' parameter" do
type = subject.new(:name => "mock_resource")
expect(type[:name]).to eq("mock_resource")
end
describe "basic structure" do
it "should be able to create an instance" do
provider_class = Puppet::Type::Cs_resource.provider(Puppet::Type::Cs_resource.providers[0])
Puppet::Type::Cs_resource.expects(:defaultprovider).returns(provider_class)
expect(subject.new(:name => "mock_resource")).to_not be_nil
end
[:name, :primitive_class, :primitive_type, :provided_by, :cib].each do |param|
it "should have a #{param} parameter" do
expect(subject.validparameter?(param)).to be_truthy
end
it "should have documentation for its #{param} parameter" do
expect(subject.paramclass(param).doc).to be_instance_of(String)
end
end
[:parameters, :operations, :ms_metadata, :complex_type].each do |property|
it "should have a #{property} property" do
expect(subject.validproperty?(property)).to be_truthy
end
it "should have documentation for its #{property} property" do
expect(subject.propertybyname(property).doc).to be_instance_of(String)
end
end
end
describe "when validating attributes" do
[:parameters, :operations, :metadata, :ms_metadata].each do |attribute|
it "should validate that the #{attribute} attribute defaults to a hash" do
expect(subject.new(:name => "mock_resource")[:parameters]).to eq({})
end
it "should validate that the #{attribute} attribute must be a hash" do
expect { subject.new(
:name => "mock_resource",
:parameters => "fail"
) }.to raise_error(Puppet::Error, /hash/)
end
end
it "should validate that the complex_type type attribute cannot be other values" do
["fail", 42].each do |value|
expect { subject.new(
:name => "mock_resource",
:complex_type => value,
) }.to raise_error(Puppet::Error, /(master|clone|\'\')/)
end
end
end
describe "when autorequiring resources" do
before :each do
pending("fix cs_shadow invocation")
@shadow = Puppet::Type.type(:cs_shadow).new(:name => 'baz', :cib=> 'baz')
@catalog = Puppet::Resource::Catalog.new
@catalog.add_resource @shadow
end
it "should autorequire the corresponding resources" do
pending("fix this test")
@resource = described_class.new(:name => 'dummy', :cib=>"baz")
@catalog.add_resource @resource
req = @resource.autorequire
expect(req.size).to eq(1)
#rewrite this f*cking should method of property type by the ancestor method
[req[0].target,req[0].source].each do |instance|
class << instance
def should(*args)
Object.instance_method(:should).bind(self).call(*args)
end
end
end
expect(req[0].target).to eql(@resource)
expect(req[0].source).to eql(@shadow)
end
end
describe 'special insync conditions' do
before :each do
@type = subject.new (
{
:name => 'my_resource',
:ms_metadata => {
'a' => 1,
'is-managed' => 'true',
},
:metadata => {
'a' => 2,
'is-managed' => 'true',
},
:complex_type => 'master',
}
)
end
it 'should ignore status metadata from ms_metadata hash comparison' do
ms_metadata = @type.property(:ms_metadata)
expect(ms_metadata.insync?({"a" => "1", "is-managed" => "false"})).to be_truthy
end
it 'should ignore status metadata from metadata hash comparison' do
metadata = @type.property(:metadata)
expect(metadata.insync?({"a" => "2", "is-managed" => "false"})).to be_truthy
end
it 'should compare non-status ms_metadata' do
ms_metadata = @type.property(:ms_metadata)
expect(ms_metadata.insync?({'a' => 2})).to be_falsey
end
it 'should compare non-status metadata' do
metadata = @type.property(:metadata)
expect(metadata.insync?({'a' => 1})).to be_falsey
end
end
describe 'munging of input data' do
it 'should convert hash keys and values to strings' do
@type = subject.new({:name => 'myresource'})
@type[:ms_metadata] = { :a => 1, 'b' => true, 'c' => { :a => true, 'b' => :s, 4 => 'd' } }
expect(@type[:ms_metadata]).to eq({"a"=>"1", "b"=>"true", "c"=>{"a"=>"true", "b"=>"s", "4"=>"d"}})
end
end
end

View File

@ -1,89 +0,0 @@
require 'spec_helper'
describe Puppet::Type.type(:cs_rsc_colocation) do
subject do
Puppet::Type.type(:cs_rsc_colocation)
end
it "should have a 'name' parameter" do
subject.new(:name => "mock_resource")[:name].should == "mock_resource"
end
describe "basic structure" do
it "should be able to create an instance" do
provider_class = Puppet::Type::Cs_rsc_colocation.provider(Puppet::Type::Cs_rsc_colocation.providers[0])
Puppet::Type::Cs_rsc_colocation.expects(:defaultprovider).returns(provider_class)
subject.new(:name => "mock_resource").should_not be_nil
end
[:cib, :name ].each do |param|
it "should have a #{param} parameter" do
subject.validparameter?(param).should be_true
end
it "should have documentation for its #{param} parameter" do
subject.paramclass(param).doc.should be_instance_of(String)
end
end
[:primitives,:score].each do |property|
it "should have a #{property} property" do
subject.validproperty?(property).should be_true
end
it "should have documentation for its #{property} property" do
subject.propertybyname(property).doc.should be_instance_of(String)
end
end
it "should validate the score values" do
['fadsfasdf', nil].each do |value|
expect { subject.new(
:name => "mock_colocation",
:primitives => ['foo','bar'],
:score => value
) }.to raise_error(Puppet::Error)
end
end
it "should validate that the primitives must be a two_value array" do
["1", ["1",],["1","2","3"]].each do |value|
expect { subject.new(
:name => "mock_colocation",
:primitives => value
) }.to raise_error(Puppet::Error, /array/)
end
end
end
describe "when autorequiring resources" do
before :each do
@csresource_foo = Puppet::Type.type(:cs_resource).new(:name => 'foo', :ensure => :present)
@csresource_bar = Puppet::Type.type(:cs_resource).new(:name => 'bar', :ensure => :present)
@shadow = Puppet::Type.type(:cs_shadow).new(:name => 'baz',:cib=>"baz")
@catalog = Puppet::Resource::Catalog.new
@catalog.add_resource @shadow, @csresource_bar, @csresource_foo
end
it "should autorequire the corresponding resources" do
@resource = described_class.new(:name => 'dummy', :primitives => ['foo','bar'], :cib=>"baz", :score=>"inf")
@catalog.add_resource @resource
req = @resource.autorequire
req.size.should == 3
req.each do |e|
#rewrite this f*cking should method of property type by the ancestor method
class << e.target
def should(*args)
Object.instance_method(:should).bind(self).call(*args)
end
end
e.target.should eql(@resource)
[@csresource_bar,@csresource_foo,@shadow].should include(e.source)
end
end
end
end

View File

@ -1,71 +0,0 @@
require 'spec_helper'
describe Puppet::Type.type(:cs_rsc_location) do
subject do
Puppet::Type.type(:cs_rsc_location)
end
it "should have a 'name' parameter" do
subject.new(:name => "mock_resource")[:name].should == "mock_resource"
end
describe "basic structure" do
it "should be able to create an instance" do
provider_class = Puppet::Type::Cs_rsc_location.provider(Puppet::Type::Cs_rsc_location.providers[0])
Puppet::Type::Cs_rsc_location.expects(:defaultprovider).returns(provider_class)
subject.new(:name => "mock_resource").should_not be_nil
end
[:cib, :name ].each do |param|
it "should have a #{param} parameter" do
subject.validparameter?(param).should be_true
end
it "should have documentation for its #{param} parameter" do
subject.paramclass(param).doc.should be_instance_of(String)
end
end
[:primitive,:node_score,:rules,:node_name].each do |property|
it "should have a #{property} property" do
subject.validproperty?(property).should be_true
end
it "should have documentation for its #{property} property" do
subject.propertybyname(property).doc.should be_instance_of(String)
end
end
end
describe "when autorequiring resources" do
before :each do
pending("fix shadow invocation")
@csresource_foo = Puppet::Type.type(:cs_resource).new(:name => 'foo', :ensure => :present)
@shadow = Puppet::Type.type(:cs_shadow).new(:name => 'baz',:cib=>"baz")
@catalog = Puppet::Resource::Catalog.new
@catalog.add_resource @shadow, @csresource_foo
end
it "should autorequire the corresponding resources" do
@resource = described_class.new(:name => 'dummy', :primitive => 'foo', :cib=>"baz")
@catalog.add_resource @resource
req = @resource.autorequire
req.size.should == 2
req.each do |e|
#rewrite this f*cking should method of property type by the ancestor method
class << e.target
def should(*args)
Object.instance_method(:should).bind(self).call(*args)
end
end
e.target.should eql(@resource)
[@csresource_foo,@shadow].should include(e.source)
end
end
end
end

View File

@ -1,81 +0,0 @@
require 'spec_helper'
describe Puppet::Type.type(:cs_rsc_order) do
subject do
Puppet::Type.type(:cs_rsc_order)
end
it "should have a 'name' parameter" do
subject.new(:name => "mock_resource")[:name].should == "mock_resource"
end
describe "basic structure" do
it "should be able to create an instance" do
provider_class = Puppet::Type::Cs_rsc_order.provider(Puppet::Type::Cs_rsc_order.providers[0])
Puppet::Type::Cs_rsc_order.expects(:defaultprovider).returns(provider_class)
subject.new(:name => "mock_resource").should_not be_nil
end
[:cib, :name ].each do |param|
it "should have a #{param} parameter" do
subject.validparameter?(param).should be_true
end
it "should have documentation for its #{param} parameter" do
subject.paramclass(param).doc.should be_instance_of(String)
end
end
[:first,:second,:score].each do |property|
it "should have a #{property} property" do
subject.validproperty?(property).should be_true
end
it "should have documentation for its #{property} property" do
subject.propertybyname(property).doc.should be_instance_of(String)
end
end
it "should validate the score values" do
['fadsfasdf', '10a', nil].each do |value|
expect { subject.new(
:name => "mock_colocation",
:primitives => ['foo','bar'],
:score => value
) }.to raise_error(Puppet::Error)
end
end
end
describe "when autorequiring resources" do
before :each do
@csresource_foo = Puppet::Type.type(:cs_resource).new(:name => 'foo', :ensure => :present)
@csresource_bar = Puppet::Type.type(:cs_resource).new(:name => 'bar', :ensure => :present)
@shadow = Puppet::Type.type(:cs_shadow).new(:name => 'baz',:cib=>"baz")
@catalog = Puppet::Resource::Catalog.new
@catalog.add_resource @shadow, @csresource_bar, @csresource_foo
end
it "should autorequire the corresponding resources" do
@resource = described_class.new(:name => 'dummy', :first => 'foo',:second=>'bar', :cib=>"baz", :score=>"inf")
@catalog.add_resource @resource
req = @resource.autorequire
req.size.should == 3
req.each do |e|
#rewrite this f*cking should method of property type by the ancestor method
class << e.target
def should(*args)
Object.instance_method(:should).bind(self).call(*args)
end
end
e.target.should eql(@resource)
[@csresource_bar,@csresource_foo,@shadow].should include(e.source)
end
end
end
end

View File

@ -1,83 +0,0 @@
require 'spec_helper'
describe Puppet::Type.type(:pcmk_nodes) do
subject do
Puppet::Type.type(:pcmk_nodes).new(:name => 'pacemaker', :nodes => nodes_data)
end
let(:nodes_data) do
{
'node-1' => { "ip" => "192.168.0.1", "id" => "1" },
'node-2' => { "ip" => "192.168.0.2", "id" => "2" },
'node-3' => { "ip" => "192.168.0.3", "id" => "3" },
'node-4' => { "ip" => "192.168.0.4", "id" => "4" },
}
end
let(:corosync_nodes_data) do
{
'1' => "192.168.0.1",
'2' => "192.168.0.2",
'3' => "192.168.0.3",
'4' => "192.168.0.4",
}
end
let(:pacemaker_nodes_data) do
{
'node-1' => "1",
'node-2' => "2",
'node-3' => "3",
'node-4' => "4",
}
end
it "should have a 'name' parameter" do
expect(subject[:name]).to eq 'pacemaker'
end
it "should have a 'nodes' parameter" do
expect(subject[:nodes]).to eq nodes_data
end
it "should have a 'corosync_nodes' property that defaults to 'nodes' parameter" do
expect(subject[:corosync_nodes]).to eq corosync_nodes_data
end
it "should have a 'pacemaker_nodes' property that defaults to 'nodes' parameter" do
expect(subject[:pacemaker_nodes]).to eq pacemaker_nodes_data
end
it "should fail if nodes data is not provided or incorrect" do
expect {
subject[:nodes] = nil
}.to raise_error
expect {
subject[:nodes] = 'node-1'
}.to raise_error
end
it "should fail if there is no corosync_nodes" do
expect {
subject[:corosync_nodes] = nil
subject.validate
}.to raise_error
expect {
subject[:corosync_nodes] = {}
subject.validate
}.to raise_error
end
it "should fail if there is no pacemaker_nodes" do
expect {
subject[:pacemaker_nodes] = nil
subject.validate
}.to raise_error
expect {
subject[:pacemaker_nodes] = {}
subject.validate
}.to raise_error
end
end

View File

@ -1,97 +0,0 @@
define pacemaker_wrappers::service (
$ensure = 'present',
$ocf_root_path = '/usr/lib/ocf',
$primitive_class = 'ocf',
$primitive_provider = 'fuel',
$primitive_type = undef,
$prefix = true,
$parameters = undef,
$operations = undef,
$metadata = undef,
$ms_metadata = undef,
$complex_type = undef,
$use_handler = true,
$handler_root_path = '/usr/local/bin',
$ocf_script_template = undef,
$ocf_script_file = undef,
$create_primitive = true,
$cib = undef,
) {
$service_name = $title
if $prefix {
$primitive_name = "p_${service_name}"
} else {
$primitive_name = $service_name
}
$ocf_script_name = "${service_name}-ocf-file"
$ocf_handler_name = "ocf_handler_${service_name}"
$ocf_dir_path = "${ocf_root_path}/resource.d"
$ocf_script_path = "${ocf_dir_path}/${primitive_provider}/${$primitive_type}"
$ocf_handler_path = "${handler_root_path}/${ocf_handler_name}"
Service<| title == $service_name |> {
provider => 'pacemaker',
}
Service<| name == $service_name |> {
provider => 'pacemaker',
}
if $create_primitive {
cs_resource { $primitive_name :
ensure => $ensure,
primitive_class => $primitive_class,
primitive_type => $primitive_type,
provided_by => $primitive_provider,
parameters => $parameters,
operations => $operations,
metadata => $metadata,
ms_metadata => $ms_metadata,
complex_type => $complex_type,
}
}
if $ocf_script_template or $ocf_script_file {
file { $ocf_script_name :
ensure => $ensure,
path => $ocf_script_path,
mode => '0755',
owner => 'root',
group => 'root',
}
if $ocf_script_template {
File[$ocf_script_name] {
content => template($ocf_script_template),
}
} elsif $ocf_script_file {
File[$ocf_script_name] {
source => "puppet:///modules/${ocf_script_file}",
}
}
}
if ($primitive_class == 'ocf') and ($use_handler) {
file { $ocf_handler_name :
path => $ocf_handler_path,
ensure => present,
owner => 'root',
group => 'root',
mode => '0700',
content => template('pacemaker_wrappers/ocf_handler.erb'),
}
}
File<| title == $ocf_script_name |> -> Cs_resource<| title == $primitive_name |>
File<| title == $ocf_script_name |> ~> Service[$service_name]
Cs_resource<| title == $primitive_name |> -> Service[$service_name]
File<| title == $ocf_handler_name |> -> Service[$service_name]
}

View File

@ -1,129 +0,0 @@
#!/bin/bash
export PATH='/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin'
export OCF_ROOT='<%= @ocf_root_path %>'
export OCF_RA_VERSION_MAJOR='1'
export OCF_RA_VERSION_MINOR='0'
export OCF_RESOURCE_INSTANCE='<%= @primitive_name %>'
# OCF Parameters
<% if @parameters.is_a? Hash -%>
<% @parameters.each do |k,v| -%>
<% v = v.to_s -%>
<% v = v + "'" unless v.end_with? "'" -%>
<% v = "'" + v unless v.start_with? "'" -%>
<%= "export OCF_RESKEY_#{k}=#{v}" %>
<% end -%>
<% end -%>
help() {
cat<<EOF
OCF wrapper for <%= @service_name %> Pacemaker primitive
Usage: <%= @ocf_handler_name %> [-dh] (action)
Options:
-d - Use set -x to debug the shell script
-h - Show this help
Main actions:
* start
* stop
* monitor
* meta-data
* validate-all
Multistate:
* promote
* demote
* notify
Migration:
* migrate_to
* migrate_from
Optional and unused:
* usage
* help
* status
* reload
* restart
* recover
EOF
}
red() {
echo -e "\033[31m${1}\033[0m"
}
green() {
echo -e "\033[32m${1}\033[0m"
}
blue() {
echo -e "\033[34m${1}\033[0m"
}
ec2error() {
case "${1}" in
0) green 'Success' ;;
1) red 'Error: Generic' ;;
2) red 'Error: Arguments' ;;
3) red 'Error: Unimplemented' ;;
4) red 'Error: Permissions' ;;
5) red 'Error: Installation' ;;
6) red 'Error: Configuration' ;;
7) blue 'Not Running' ;;
8) green 'Master Running' ;;
9) red 'Master Failed' ;;
*) red "Unknown" ;;
esac
}
DEBUG='0'
while getopts ':dh' opt; do
case $opt in
d)
DEBUG='1'
;;
h)
help
exit 0
;;
\?)
echo "Invalid option: -${OPTARG}" >&2
help
exit 1
;;
esac
done
shift "$((OPTIND - 1))"
ACTION="${1}"
# set default action to monitor
if [ "${ACTION}" = '' ]; then
ACTION='monitor'
fi
# alias status to monitor
if [ "${ACTION}" = 'status' ]; then
ACTION='monitor'
fi
# view defined OCF parameters
if [ "${ACTION}" = 'params' ]; then
env | grep 'OCF_'
exit 0
fi
if [ "${DEBUG}" = '1' ]; then
bash -x <%= @ocf_script_path %> "${ACTION}"
else
<%= @ocf_script_path %> "${ACTION}"
fi
ec="${?}"
message="$(ec2error ${ec})"
echo "Exit status: ${message} (${ec})"
exit "${ec}"

View File

@ -33,40 +33,54 @@ define vmware::ceilometer::ha (
}
}
cs_resource { "p_ceilometer_agent_compute_vmware_${availability_zone_name}_${service_name}":
ensure => present,
primitive_class => 'ocf',
provided_by => 'fuel',
primitive_type => 'ceilometer-agent-compute',
metadata => {
'target-role' => 'stopped',
'resource-stickiness' => '1'
$primitive_name = "p_ceilometer_agent_compute_vmware_${availability_zone_name}_${service_name}"
$primitive_class = 'ocf'
$primitive_provider = 'fuel'
$primitive_type = 'ceilometer-agent-compute'
$metadata = {
'target-role' => 'stopped',
'resource-stickiness' => '1'
}
$parameters = {
'amqp_server_port' => $amqp_port,
'config' => $ceilometer_config,
'pid' => "/var/run/ceilometer/ceilometer-agent-compute-${availability_zone_name}_${service_name}.pid",
'user' => "ceilometer",
'additional_parameters' => "--config-file=${ceilometer_compute_conf}",
}
$operations = {
'monitor' => {
'timeout' => '20',
'interval' => '30',
},
parameters => {
amqp_server_port => $amqp_port,
config => $ceilometer_config,
pid => "/var/run/ceilometer/ceilometer-agent-compute-${availability_zone_name}_${service_name}.pid",
user => "ceilometer",
additional_parameters => "--config-file=${ceilometer_compute_conf}",
'start' => {
'timeout' => '360',
},
operations => {
monitor => { timeout => '20', interval => '30' },
start => { timeout => '360' },
stop => { timeout => '360' }
'stop' => {
'timeout' => '360',
}
}
service { "p_ceilometer_agent_compute_vmware_${availability_zone_name}_${service_name}":
ensure => running,
enable => true,
hasstatus => true,
hasrestart => true,
provider => 'pacemaker',
pacemaker::service { $primitive_name :
prefix => false,
primitive_class => $primitive_class,
primitive_provider => $primitive_provider,
primitive_type => $primitive_type,
metadata => $metadata,
parameters => $parameters,
operations => $operations,
}
service { $primitive_name :
ensure => 'running',
enable => true,
}
File["${ceilometer_conf_dir}"]->
File["${ceilometer_compute_conf}"]->
Cs_resource["p_ceilometer_agent_compute_vmware_${availability_zone_name}_${service_name}"]->
Service["p_ceilometer_agent_compute_vmware_${availability_zone_name}_${service_name}"]
Pcmk_resource[$primitive_name]->
Service[$primitive_name]
}
}

View File

@ -26,15 +26,14 @@ define vmware::compute::ha(
$target_node,
$datastore_regex = undef,
$amqp_port = '5673',
$api_retry_count = 5,
$maximum_objects = 100,
$api_retry_count = '5',
$maximum_objects = '100',
$nova_conf = '/etc/nova/nova.conf',
$nova_conf_dir = '/etc/nova/nova-compute.d',
$task_poll_interval = 5.0,
$task_poll_interval = '5.0',
$use_linked_clone = true,
$wsdl_location = undef
)
{
) {
# We deploy nova-compute on controller node only if
# $target_node contains 'controllers' otherwise
# service will be deployed on separate node
@ -43,56 +42,71 @@ define vmware::compute::ha(
if ! defined(File[$nova_conf_dir]) {
file { $nova_conf_dir:
ensure => directory,
owner => nova,
group => nova,
ensure => 'directory',
owner => 'nova',
group => 'nova',
mode => '0750'
}
}
if ! defined(File[$nova_compute_conf]) {
# $cluster is used inside template
# $cluster is used inside template
$cluster = $name
file { $nova_compute_conf:
ensure => present,
ensure => 'present',
content => template('vmware/nova-compute.conf.erb'),
mode => '0600',
owner => nova,
group => nova,
owner => 'nova',
group => 'nova',
}
}
cs_resource { "p_nova_compute_vmware_${availability_zone_name}-${service_name}":
ensure => present,
primitive_class => 'ocf',
provided_by => 'fuel',
primitive_type => 'nova-compute',
metadata => {
resource-stickiness => '1'
$primitive_name = "p_nova_compute_vmware_${availability_zone_name}-${service_name}"
$primitive_class = 'ocf'
$primitive_provider = 'fuel'
$primitive_type = 'nova-compute'
$metadata = {
'resource-stickiness' => '1'
}
$parameters = {
'amqp_server_port' => $amqp_port,
'config' => $nova_conf,
'pid' => "/var/run/nova/nova-compute-${availability_zone_name}-${service_name}.pid",
'additional_parameters' => "--config-file=${nova_compute_conf}",
}
$operations = {
'monitor' => {
'timeout' => '10',
'interval' => '20',
},
parameters => {
amqp_server_port => $amqp_port,
config => $nova_conf,
pid => "/var/run/nova/nova-compute-${availability_zone_name}-${service_name}.pid",
additional_parameters => "--config-file=${nova_compute_conf}",
'start' => {
'timeout' => '30',
},
operations => {
monitor => { timeout => '10', interval => '20' },
start => { timeout => '30' },
stop => { timeout => '30' }
'stop' => {
'timeout' => '30',
}
}
service { "p_nova_compute_vmware_${availability_zone_name}-${service_name}":
ensure => running,
pacemaker::service { $primitive_name :
prefix => false,
primitive_class => $primitive_class,
primitive_provider => $primitive_provider,
primitive_type => $primitive_type,
metadata => $metadata,
parameters => $parameters,
operations => $operations,
}
service { $primitive_name :
ensure => 'running',
enable => true,
provider => 'pacemaker',
}
File["${nova_conf_dir}"]->
File["${nova_compute_conf}"]->
Cs_resource["p_nova_compute_vmware_${availability_zone_name}-${service_name}"]->
Service["p_nova_compute_vmware_${availability_zone_name}-${service_name}"]
Pcmk_resource[$primitive_name]->
Service[$primitive_name]
}
}

View File

@ -19,32 +19,30 @@ class vmware::network::nova (
$amqp_port = '5673',
$nova_network_config = '/etc/nova/nova.conf',
$nova_network_config_dir = '/etc/nova/nova-network.d'
)
{
) {
include nova::params
$nova_network_config_ha = "${nova_network_config_dir}/nova-network-ha.conf"
if ! defined(File[$nova_network_config_dir]) {
file { $nova_network_config_dir:
ensure => directory,
owner => nova,
group => nova,
ensure => 'directory',
owner => 'nova',
group => 'nova',
mode => '0750'
}
}
if ! defined(File[$nova_network_config_ha]) {
file { $nova_network_config_ha:
ensure => present,
ensure => 'present',
content => template('vmware/nova-network-ha.conf.erb'),
mode => '0600',
owner => nova,
group => nova,
owner => 'nova',
group => 'nova',
}
}
$nova_user = 'nova'
$nova_hash = hiera('nova')
$nova_password = $nova_hash['user_password']
@ -52,52 +50,59 @@ class vmware::network::nova (
$auth_url = "http://${management_vip}:5000/v2.0"
$region = hiera('region', 'RegionOne')
cs_resource { 'p_vcenter_nova_network':
ensure => present,
primitive_class => 'ocf',
provided_by => 'fuel',
primitive_type => 'nova-network',
metadata => {
resource-stickiness => '1'
$service_name = 'p_vcenter_nova_network'
$primitive_class = 'ocf'
$primitive_provider = 'fuel'
$primitive_type = 'nova-network'
$metadata = {
'resource-stickiness' => '1'
}
$parameters = {
'amqp_server_port' => $amqp_port,
'user' => $nova_user,
'password' => $nova_password,
'auth_url' => $auth_url,
'region' => $region,
'config' => $nova_network_config,
'additional_parameters' => "--config-file=${nova_network_config_ha}",
}
$operations = {
'monitor' => {
'interval' => '20',
'timeout' => '30',
},
parameters => {
amqp_server_port => $amqp_port,
user => $nova_user,
password => $nova_password,
auth_url => $auth_url,
region => $region,
config => $nova_network_config,
additional_parameters => "--config-file=${nova_network_config_ha}",
'start' => {
'timeout' => '20',
},
operations => {
monitor => {
interval => '20',
timeout => '30',
},
start => {
timeout => '20',
},
stop => {
timeout => '20',
}
'stop' => {
'timeout' => '20',
}
}
pacemaker::service { $service_name :
prefix => false,
primitive_class => $primitive_class,
primitive_provider => $primitive_provider,
primitive_type => $primitive_type,
metadata => $metadata,
parameters => $parameters,
operations => $operations,
}
if ($::operatingsystem == 'Ubuntu') {
tweaks::ubuntu_service_override { 'nova-network':
package_name => 'nova-network'
}
}
service { 'p_vcenter_nova_network':
ensure => 'running',
enable => true,
provider => 'pacemaker',
service { $service_name :
ensure => 'running',
enable => true,
}
package { 'nova-network':
ensure => 'present',
name => $::nova::params::network_package_name,
ensure => present
}
service { 'nova-network':
@ -112,9 +117,9 @@ class vmware::network::nova (
Anchor['vcenter-nova-network-start']->
Package['nova-network']->
Service['nova-network']->
File["${nova_network_config_dir}"]->
File["${nova_network_config_ha}"]->
Cs_resource['p_vcenter_nova_network']->
Service['p_vcenter_nova_network']->
File[$nova_network_config_dir]->
File[$nova_network_config_ha]->
Pcmk_resource[$service_name]->
Service[$service_name]->
Anchor['vcenter-nova-network-end']
}

View File

@ -20,9 +20,9 @@ describe 'vmware::ceilometer::ha' do
end
it 'should create service p_ceilometer_agent_compute_vmware_vCenter_prod' do
should contain_cs_resource('p_ceilometer_agent_compute_vmware_vCenter_prod').with({
'primitive_class' => 'ocf',
'provided_by' => 'fuel',
should contain_pcmk_resource('p_ceilometer_agent_compute_vmware_vCenter_prod').with({
'primitive_class' => 'ocf',
'primitive_provider' => 'fuel',
})
end
@ -31,6 +31,6 @@ describe 'vmware::ceilometer::ha' do
end
it 'should apply configuration file before corosync resource' do
should contain_file('/etc/ceilometer/ceilometer-compute.d/vmware-vCenter_prod.conf').that_comes_before('Cs_resource[p_ceilometer_agent_compute_vmware_vCenter_prod]')
should contain_file('/etc/ceilometer/ceilometer-compute.d/vmware-vCenter_prod.conf').that_comes_before('Pcmk_resource[p_ceilometer_agent_compute_vmware_vCenter_prod]')
end
end

View File

@ -22,9 +22,9 @@ describe 'vmware::compute::ha' do
end
it 'should create service p_nova_compute_vmware_vCenter-prod' do
should contain_cs_resource('p_nova_compute_vmware_vCenter-prod').with({
'primitive_class' => 'ocf',
'provided_by' => 'fuel',
should contain_pcmk_resource('p_nova_compute_vmware_vCenter-prod').with({
'primitive_class' => 'ocf',
'primitive_provider' => 'fuel',
})
end
@ -33,6 +33,6 @@ describe 'vmware::compute::ha' do
end
it 'should apply configuration file before corosync resource' do
should contain_file('/etc/nova/nova-compute.d/vmware-vCenter_prod.conf').that_comes_before('Cs_resource[p_nova_compute_vmware_vCenter-prod]')
should contain_file('/etc/nova/nova-compute.d/vmware-vCenter_prod.conf').that_comes_before('Pcmk_resource[p_nova_compute_vmware_vCenter-prod]')
end
end

View File

@ -73,12 +73,12 @@ describe manifest do
end
it 'nova-compute should manages by pacemaker, and should be disabled as system service' do
expect(subject).to contain_cs_resource('p_nova_compute_ironic').with(
:name => "p_nova_compute_ironic",
:ensure => "present",
:primitive_class => "ocf",
:provided_by => "pacemaker",
:primitive_type => "nova-compute",
expect(subject).to contain_pcmk_resource('p_nova_compute_ironic').with(
:name => "p_nova_compute_ironic",
:ensure => "present",
:primitive_class => "ocf",
:primitive_provider => "pacemaker",
:primitive_type => "nova-compute",
:metadata => {"resource-stickiness" => "1"},
:parameters => {"config" => "/etc/nova/nova.conf",
"pid" => "/var/run/nova/nova-compute-ironic.pid",

View File

@ -143,7 +143,7 @@ describe manifest do
it 'should configure pacemaker RA' do
if use_pacemaker
should contain_class('pacemaker_wrappers::rabbitmq').with(
should contain_class('cluster::rabbitmq_ocf').with(
:command_timeout => $command_timeout,
:debug => debug,
:erlang_cookie => erlang_cookie,

View File

@ -4,53 +4,56 @@ manifest = 'virtual_ips/public_vip_ping.pp'
describe manifest do
shared_examples 'catalog' do
run_ping_checker = Noop.hiera 'run_ping_checker', true
primary_controller = Noop.hiera 'primary_controller'
let (:ping_host) {
ping_host = Noop.hiera_structure('network_scheme/endpoints/br-ex/gateway')
raise 'Could not get the ping host!' unless ping_host
ping_host
}
context 'in pinger is enabled on the primary controller', :if => (run_ping_checker and primary_controller) do
let (:ping_host) {
ping_host = Noop.hiera_structure('network_scheme/endpoints/br-ex/gateway')
raise 'Could not get the ping host!' unless ping_host
ping_host
}
it do
expect(subject).to contain_cluster__virtual_ip_ping('vip__public').with(
:name => "vip__public",
:host_list => ping_host,
)
end
it do
expect(subject).to contain_pcmk_resource('ping_vip__public').with(
:name => "ping_vip__public",
:ensure => "present",
:primitive_class => "ocf",
:primitive_provider => "pacemaker",
:primitive_type => "ping",
:parameters => {"host_list" => ping_host, "multiplier" => "1000", "dampen" => "30s", "timeout" => "3s"},
:operations => {"monitor" => {"interval" => "20", "timeout" => "30"}},
:complex_type => "clone",
:before => ["Pcmk_location[loc_ping_vip__public]", "Service[ping_vip__public]"],
)
end
it do
expect(subject).to contain_service('ping_vip__public').with(
:name => "ping_vip__public",
:ensure => "running",
:enable => true,
:provider => "pacemaker",
)
end
it do
expect(subject).to contain_pcmk_location('loc_ping_vip__public').with(
:name => "loc_ping_vip__public",
:primitive => "vip__public",
:before => "Service[ping_vip__public]",
)
end
it do
expect(subject).to contain_cluster__virtual_ip_ping('vip__public').with(
:name => "vip__public",
:host_list => ping_host,
)
end
it do
expect(subject).to contain_cs_resource('ping_vip__public').with(
:name => "ping_vip__public",
:ensure => "present",
:primitive_class => "ocf",
:provided_by => "pacemaker",
:primitive_type => "ping",
:parameters => {"host_list"=>ping_host, "multiplier"=>"1000", "dampen"=>"30s", "timeout"=>"3s"},
:operations => {"monitor"=>{"interval"=>"20", "timeout"=>"30"}},
:complex_type => "clone",
:before => "Cs_rsc_location[loc_ping_vip__public]",
)
end
it do
expect(subject).to contain_service('ping_vip__public').with(
:name => "ping_vip__public",
:ensure => "running",
:enable => true,
:provider => "pacemaker",
)
end
it do
expect(subject).to contain_cs_rsc_location('loc_ping_vip__public').with(
:name => "loc_ping_vip__public",
:primitive => "vip__public",
:cib => "ping_vip__public",
:before => "Service[ping_vip__public]",
)
end
end
test_ubuntu_and_centos manifest

View File

@ -1,3 +1,6 @@
# List of modules with disabled 'rake lint' check.
# Such modules will be checked with 'puppet-lint' command.
# No need to include here modules defined in the fuel-library Puppetfile.
# TODO(bkupidura): this is needed to get while it's being removed, should be
# removed in a follow on commit
pacemaker_wrappers

View File

@ -4,6 +4,5 @@ docker
mellanox_openstack
mysql
pacemaker
pacemaker_wrappers
rsyslog
vmware