Remove deprecated pacemaker resource management

Change-Id: I7910dfdde21fb5c01ca62445a5b51c1a582e53df
This commit is contained in:
Takashi Kajinami 2024-04-19 01:36:56 +09:00
parent 6d6887f2e5
commit 1ced44160b
11 changed files with 6 additions and 2306 deletions

View File

@ -59,62 +59,6 @@ Implementation
openstack_extras is a combination of Puppet manifest and ruby code to delivery
configuration and extra functionality through types and providers.
**HA configuration for Openstack services**
This module allows to configure Openstack services in HA. Please refer to the [ha-guide](https://docs.openstack.org/ha-guide/) for details.
If you have a Corosync with Pacemaker cluster with several nodes joined, you may want to use an HA service provider which allows you to create the pacemaker resources for Openstack services and run them in HA mode.
The example HA service configuration for keystone service:
```puppet
openstack_extras::pacemaker::service { 'openstack-keystone' :
ensure => present,
metadata => {},
ms_metadata => {},
operations => {},
parameters => {},
primitive_class => 'systemd',
primitive_provider => false,
primitive_type => 'openstack-keystone',
use_handler => false,
clone => true,
require => Package['openstack-keystone']
}
```
This example will create a pacemaker clone resource named `p_openstack-keystone-clone` and will start it with the help of systemd.
And this example will create a resource `p_cinder-api-clone` for Cinder API service with the given OCF script template from some `cluster` module:
```puppet
$metadata = {
'resource-stickiness' => '1'
}
$operations = {
'monitor' => {
'interval' => '20',
'timeout' => '30',
},
'start' => {
'timeout' => '60',
},
'stop' => {
'timeout' => '60',
},
}
$ms_metadata = {
'interleave' => true,
}
openstack_extras::pacemaker::service { 'cinder-api' :
primitive_type => 'cinder-api',
metadata => $metadata,
ms_metadata => $ms_metadata,
operations => $operations,
clone => true,
ocf_script_template => 'cluster/cinder_api.ocf.erb',
}
```
Limitations
-----------

View File

@ -1,743 +0,0 @@
begin
require 'rexml/document'
rescue LoadError
end
class Puppet::Provider::Pacemaker_common < Puppet::Provider
@raw_cib = nil
@cib = nil
@primitives = nil
@primitives_structure = nil
RETRY_COUNT = 100
RETRY_STEP = 6
# get a raw CIB from cibadmin
# or from a debug file if raw_cib_file is set
# @return [String] cib xml
def raw_cib
@raw_cib = cibadmin '-Q'
if @raw_cib == '' or not @raw_cib
fail 'Could not dump CIB XML using "cibadmin -Q" command!'
end
@raw_cib
end
# create a new REXML CIB document
# @return [REXML::Document] at '/'
def cib
return @cib if @cib
@cib = REXML::Document.new(raw_cib)
end
# reset all saved variables to obtain new data
def cib_reset
# Puppet.debug 'Reset CIB memoization'
@raw_cib = nil
@cib = nil
@primitives = nil
@primitives_structure = nil
@nodes_structure = nil
end
# get lrm_rsc_ops section from lrm_resource section CIB section
# @param lrm_resource [REXML::Element]
# at /cib/status/node_state/lrm[@id="node-name"]/lrm_resources/lrm_resource[@id="resource-name"]/lrm_rsc_op
# @return [REXML::Element]
def cib_section_lrm_rsc_ops(lrm_resource)
return unless lrm_resource.is_a? REXML::Element
REXML::XPath.match lrm_resource, 'lrm_rsc_op'
end
# get node_state CIB section
# @return [REXML::Element] at /cib/status/node_state
def cib_section_nodes_state
REXML::XPath.match cib, '//node_state'
end
# get primitives CIB section
# @return [Array<REXML::Element>] at /cib/configuration/resources/primitive
def cib_section_primitives
REXML::XPath.match cib, '//primitive'
end
# get lrm_rsc_ops section from lrm_resource section CIB section
# @param lrm [REXML::Element]
# at /cib/status/node_state/lrm[@id="node-name"]/lrm_resources/lrm_resource
# @return [REXML::Element]
def cib_section_lrm_resources(lrm)
return unless lrm.is_a? REXML::Element
REXML::XPath.match lrm, 'lrm_resources/lrm_resource'
end
# determine the status of a single operation
# @param op [Hash<String => String>]
# @return ['start','stop','master',nil]
def operation_status(op)
# skip pending ops
# we should wait for status for become known
return if op['op-status'] == '-1'
if op['operation'] == 'monitor'
# for monitor operation status is determined by its rc-code
# 0 - start, 8 - master, 7 - stop, else - error
case op['rc-code']
when '0'
'start'
when '7'
'stop'
when '8'
'master'
else
# not entirely correct but count failed monitor as 'stop'
'stop'
end
elsif %w(start stop promote).include? op['operation']
# for start/stop/promote status is set if op was successful
# use master instead of promote
return unless %w(0 7 8).include? op['rc-code']
if op['operation'] == 'promote'
'master'
else
op['operation']
end
else
# other operations are irrelevant
nil
end
end
# determine resource status by parsing last operations
# @param ops [Array<Hash>]
# @return ['start','stop','master',nil]
# nil means that status is unknown
def determine_primitive_status(ops)
status = nil
ops.each do |op|
op_status = operation_status op
status = op_status if op_status
end
status
end
# check if operations have same failed operations
# that should be cleaned up later
# @param ops [Array<Hash>]
# @return [TrueClass,FalseClass]
def failed_operations_found?(ops)
ops.each do |op|
# skip incompleate ops
next unless op['op-status'] == '0'
# skip useless ops
next unless %w(start stop monitor promote).include? op['operation']
# are there failed start, stop
if %w(start stop promote).include? op['operation']
return true if op['rc-code'] != '0'
end
# are there failed monitors
if op['operation'] == 'monitor'
return true unless %w(0 7 8).include? op['rc-code']
end
end
false
end
# convert elements's attributes to hash
# @param element [REXML::Element]
# @return [Hash<String => String>]
def attributes_to_hash(element)
hash = {}
element.attributes.each do |a, v|
hash.store a.to_s, v.to_s
end
hash
end
# convert element's children to hash
# of their attributes using key and hash key
# @param element [REXML::Element]
# @param key <String>
# @return [Hash<String => String>]
def elements_to_hash(element, key, tag = nil)
elements = {}
children = element.get_elements tag
return elements unless children
children.each do |child|
child_structure = attributes_to_hash child
name = child_structure[key]
next unless name
elements.store name, child_structure
end
elements
end
# decode lrm_resources section of CIB
# @param lrm_resources [REXML::Element]
# @return [Hash<String => Hash>]
def decode_lrm_resources(lrm_resources)
resources = {}
lrm_resources.each do |lrm_resource|
resource = attributes_to_hash lrm_resource
id = resource['id']
next unless id
lrm_rsc_ops = cib_section_lrm_rsc_ops lrm_resource
next unless lrm_rsc_ops
ops = decode_lrm_rsc_ops lrm_rsc_ops
resource.store 'ops', ops
resource.store 'status', determine_primitive_status(ops)
resource.store 'failed', failed_operations_found?(ops)
resources.store id, resource
end
resources
end
# decode lrm_rsc_ops section of the resource's CIB
# @param lrm_rsc_ops [REXML::Element]
# @return [Array<Hash>]
def decode_lrm_rsc_ops(lrm_rsc_ops)
ops = []
lrm_rsc_ops.each do |lrm_rsc_op|
op = attributes_to_hash lrm_rsc_op
next unless op['call-id']
ops << op
end
ops.sort { |a,b| a['call-id'].to_i <=> b['call-id'].to_i }
end
# get nodes structure with resources and their statuses
# @return [Hash<String => Hash>]
def nodes
return @nodes_structure if @nodes_structure
@nodes_structure = {}
cib_section_nodes_state.each do |node_state|
node = attributes_to_hash node_state
node_name = node['uname']
next unless node_name
lrm = node_state.elements['lrm']
next unless lrm
lrm_resources = cib_section_lrm_resources lrm
next unless lrm_resources
resources = decode_lrm_resources lrm_resources
node.store 'primitives', resources
@nodes_structure.store node_name, node
end
@nodes_structure
end
# get primitives configuration structure with primitives and their attributes
# @return [Hash<String => Hash>]
def primitives
return @primitives_structure if @primitives_structure
@primitives_structure = {}
cib_section_primitives.each do |primitive|
primitive_structure = {}
id = primitive.attributes['id']
next unless id
primitive_structure.store 'name', id
primitive.attributes.each do |k, v|
primitive_structure.store k.to_s, v
end
if primitive.parent.name and primitive.parent.attributes['id']
parent_structure = {
'id' => primitive.parent.attributes['id'],
'type' => primitive.parent.name
}
primitive_structure.store 'name', parent_structure['id']
primitive_structure.store 'parent', parent_structure
end
instance_attributes = primitive.elements['instance_attributes']
if instance_attributes
instance_attributes_structure = elements_to_hash instance_attributes, 'name', 'nvpair'
primitive_structure.store 'instance_attributes', instance_attributes_structure
end
meta_attributes = primitive.elements['meta_attributes']
if meta_attributes
meta_attributes_structure = elements_to_hash meta_attributes, 'name', 'nvpair'
primitive_structure.store 'meta_attributes', meta_attributes_structure
end
operations = primitive.elements['operations']
if operations
operations_structure = elements_to_hash operations, 'id', 'op'
primitive_structure.store 'operations', operations_structure
end
@primitives_structure.store id, primitive_structure
end
@primitives_structure
end
# check if primitive is clone or multistate
# @param primitive [String] primitive id
# @return [TrueClass,FalseClass]
def primitive_is_complex?(primitive)
return unless primitive_exists? primitive
primitives[primitive].key? 'parent'
end
# check if primitive is clone
# @param primitive [String] primitive id
# @return [TrueClass,FalseClass]
def primitive_is_clone?(primitive)
is_complex = primitive_is_complex? primitive
return is_complex unless is_complex
primitives[primitive]['parent']['type'] == 'clone'
end
# check if primitive is multistate
# @param primitive [String] primitive id
# @return [TrueClass,FalseClass]
def primitive_is_multistate?(primitive)
is_complex = primitive_is_complex? primitive
return is_complex unless is_complex
primitives[primitive]['parent']['type'] == 'master'
end
# return primitive class
# @param primitive [String] primitive id
# @return [String]
def primitive_class(primitive)
return unless primitive_exists? primitive
primitives[primitive]['class']
end
# disable this primitive
# @param primitive [String]
def disable_primitive(primitive)
retry_command {
pcs 'resource', 'disable', primitive
}
end
alias :stop_primitive :disable_primitive
# enable this primitive
# @param primitive [String]
def enable_primitive(primitive)
retry_command {
pcs 'resource', 'enable', primitive
}
end
alias :start_primitive :enable_primitive
# ban this primitive
# @param primitive [String]
def ban_primitive(primitive, node = '')
retry_command {
pcs 'resource', 'ban', primitive, node
}
end
# move this primitive
# @param primitive [String]
def move_primitive(primitive, node = '')
retry_command {
pcs 'resource', 'move', primitive, node
}
end
# unban/unmove this primitive
# @param primitive [String]
def unban_primitive(primitive, node = '')
retry_command {
pcs 'resource', 'clear', primitive, node
}
end
alias :clear_primitive :unban_primitive
alias :unmove_primitive :unban_primitive
# cleanup this primitive
# @param primitive [String]
def cleanup_primitive(primitive, node = nil)
opts = ['--cleanup', "--resource=#{primitive}"]
opts << "--node=#{node}" if node
retry_command {
crm_resource opts
}
end
# manage this primitive
# @param primitive [String]
def manage_primitive(primitive)
retry_command {
pcs 'resource', 'manage', primitive
}
end
# unamanage this primitive
# @param primitive [String]
def unmanage_primitive(primitive)
retry_command {
pcs 'resource', 'unmanage', primitive
}
end
# set quorum_policy of the cluster
# @param primitive [String]
def no_quorum_policy(primitive)
retry_command {
pcs 'property', 'set', "no-quorum-policy=#{primitive}"
}
end
# set maintenance_mode of the cluster
# @param primitive [TrueClass,FalseClass]
def maintenance_mode(primitive)
retry_command {
pcs 'property', 'set', "maintenance-mode=#{primitive}"
}
end
# add a location constraint
# @param primitive [String] the primitive's name
# @param node [String] the node's name
# @param score [Numeric,String] score value
def constraint_location_add(primitive, node, score = 100)
id = "#{primitive}-on-#{node}"
xml = <<-EOF
<diff>
<diff-added>
<cib>
<configuration>
<constraints>
<rsc_location id="#{id}" node="#{node}" rsc="#{primitive}" score="#{score}" __crm_diff_marker__="added:top"/>
</constraints>
</configuration>
</cib>
</diff-added>
</diff>
EOF
retry_command {
cibadmin '--patch', '--sync-call', '--xml-text', xml
}
end
# remove a location constraint
# @param primitive [String] the primitive's name
# @param node [String] the node's name
def constraint_location_remove(primitive, node)
id = "#{primitive}_on_#{node}"
retry_command {
pcs 'constraint', 'location', 'remove', id
}
end
# get a status of a primitive on the entire cluster
# of on a node if node name param given
# @param primitive [String]
# @param node [String]
# @return [String]
def primitive_status(primitive, node = nil)
if node
found_node = nil
nodes.each do |k, v|
if v.fetch("uname", {}).eql? node
found_node = v
end
end
return unless found_node
found_node.
fetch('primitives',{}).
fetch(primitive, {}).
fetch('status', nil)
else
statuses = []
nodes.each do |k,v|
status = v.fetch('primitives',{}).
fetch(primitive, {}).
fetch('status', nil)
statuses << status
end
status_values = {
'stop' => 0,
'start' => 1,
'master' => 2,
}
statuses.max_by do |status|
return unless status
status_values[status]
end
end
end
# generate report of primitive statuses by node
# mostly for debugging
# @return [Hash]
def primitives_status_by_node
report = {}
return unless nodes.is_a? Hash
nodes.each do |node_name, node_data|
primitives_of_node = node_data['primitives']
next unless primitives_of_node.is_a? Hash
primitives_of_node.each do |primitive, primitive_data|
primitive_status = primitive_data['status']
report[primitive] = {} unless report[primitive].is_a? Hash
report[primitive][node_name] = primitive_status
end
end
report
end
# form a cluster status report for debugging
# @return [String]
def get_cluster_debug_report
report = "\n"
primitives_status_by_node.each do |primitive, data|
primitive_name = primitive
primitive_name = primitives[primitive]['name'] if primitives[primitive]['name']
primitive_type = 'Simple'
primitive_type = 'Cloned' if primitive_is_clone? primitive
primitive_type = 'Multistate' if primitive_is_multistate? primitive
primitive_status = primitive_status primitive
report += "-> #{primitive_type} primitive '#{primitive_name}' global status: #{primitive_status}"
report += ' (UNMANAGE)' unless primitive_is_managed? primitive
report += "\n"
report += ' ' if data.any?
nodes = []
data.keys.sort.each do |node_name|
node_status = data.fetch node_name
node_block = "#{node_name}: #{node_status}"
node_block += ' (FAIL)' if primitive_has_failures? primitive, node_name
nodes << node_block
end
report += nodes.join ' | '
report += "\n"
end
report
end
# does this primitive have failed operations?
# @param primitive [String] primitive name
# @param node [String] on this node if given
# @return [TrueClass,FalseClass]
def primitive_has_failures?(primitive, node = nil)
return unless primitive_exists? primitive
if node
nodes.
fetch(node, {}).
fetch('primitives',{}).
fetch(primitive, {}).
fetch('failed', nil)
else
nodes.each do |k,v|
failed = v.fetch('primitives',{}).
fetch(primitive, {}).
fetch('failed', nil)
return true if failed
end
false
end
end
# determine if a primitive is running on the entire cluster
# of on a node if node name param given
# @param primitive [String] primitive id
# @param node [String] on this node if given
# @return [TrueClass,FalseClass]
def primitive_is_running?(primitive, node = nil)
return unless primitive_exists? primitive
status = primitive_status primitive, node
return status unless status
%w(start master).include? status
end
# check if primitive is running as a master
# either anywhere or on the give node
# @param primitive [String] primitive id
# @param node [String] on this node if given
# @return [TrueClass,FalseClass]
def primitive_has_master_running?(primitive, node = nil)
is_multistate = primitive_is_multistate? primitive
return is_multistate unless is_multistate
status = primitive_status primitive, node
return status unless status
status == 'master'
end
# return service status value expected by Puppet
# puppet wants :running or :stopped symbol
# @param primitive [String] primitive id
# @param node [String] on this node if given
# @return [:running,:stopped]
def get_primitive_puppet_status(primitive, node = nil)
if primitive_is_running? primitive, node
:running
else
:stopped
end
end
# return service enabled status value expected by Puppet
# puppet wants :true or :false symbols
# @param primitive [String]
# @return [:true,:false]
def get_primitive_puppet_enable(primitive)
if primitive_is_managed? primitive
:true
else
:false
end
end
# check if primitive exists in the confiuguration
# @param primitive primitive id or name
def primitive_exists?(primitive)
primitives.key? primitive
end
# determine if primitive is managed
# @param primitive [String] primitive id
# @return [TrueClass,FalseClass]
# TODO: will not work correctly if cluster is in management mode
def primitive_is_managed?(primitive)
return unless primitive_exists? primitive
is_managed = primitives.fetch(primitive).fetch('meta_attributes', {}).fetch('is-managed', {}).fetch('value', 'true')
is_managed == 'true'
end
# determine if primitive has target-state started
# @param primitive [String] primitive id
# @return [TrueClass,FalseClass]
# TODO: will not work correctly if target state is set globally to stopped
def primitive_is_started?(primitive)
return unless primitive_exists? primitive
target_role = primitives.fetch(primitive).fetch('meta_attributes', {}).fetch('target-role', {}).fetch('value', 'Started')
target_role == 'Started'
end
# check if pacemaker is online
# and we can work with it
# @return [TrueClass,FalseClass]
def is_online?
begin
dc_version = crm_attribute '-q', '--type', 'crm_config', '--query', '--name', 'dc-version'
return false unless dc_version
return false if dc_version.empty?
return false unless cib_section_nodes_state
true
rescue Puppet::ExecutionFailure
false
end
end
# retry the given command until it runs without errors
# or for RETRY_COUNT times with RETRY_STEP sec step
# print cluster status report on fail
# returns normal command output on success
# @return [String]
def retry_command
(0..RETRY_COUNT).each do
begin
out = yield
rescue Puppet::ExecutionFailure => e
Puppet.debug "Command failed: #{e.message}"
sleep RETRY_STEP
else
return out
end
end
Puppet.debug get_cluster_debug_report if is_online?
fail "Execution timeout after #{RETRY_COUNT * RETRY_STEP} seconds!"
end
# retry the given block until it returns true
# or for RETRY_COUNT times with RETRY_STEP sec step
# print cluster status report on fail
def retry_block_until_true
(0..RETRY_COUNT).each do
return if yield
sleep RETRY_STEP
end
Puppet.debug get_cluster_debug_report if is_online?
fail "Execution timeout after #{RETRY_COUNT * RETRY_STEP} seconds!"
end
# wait for pacemaker to become online
def wait_for_online
Puppet.debug "Waiting #{RETRY_COUNT * RETRY_STEP} seconds for Pacemaker to become online"
retry_block_until_true do
is_online?
end
Puppet.debug 'Pacemaker is online'
end
# wait until we can get a known status of the primitive
# @param primitive [String] primitive name
def wait_for_status(primitive, node = nil)
msg = "Wait for known status of '#{primitive}'"
msg += " on node '#{node}'" if node
Puppet.debug msg
retry_block_until_true do
cib_reset
primitive_status(primitive) != nil
end
msg = "Primitive '#{primitive}' has status '#{primitive_status primitive}'"
msg += " on node '#{node}'" if node
Puppet.debug msg
end
# wait for primitive to start
# if node is given then start on this node
# @param primitive [String] primitive id
# @param node [String] on this node if given
def wait_for_start(primitive, node = nil)
message = "Waiting #{RETRY_COUNT * RETRY_STEP} seconds for service '#{primitive}' to start"
message += " on node '#{node}'" if node
Puppet.debug get_cluster_debug_report
Puppet.debug message
retry_block_until_true do
cib_reset
primitive_is_running? primitive, node
end
Puppet.debug get_cluster_debug_report
message = "Service '#{primitive}' have started"
message += " on node '#{node}'" if node
Puppet.debug message
end
# wait for primitive to start as a master
# if node is given then start as a master on this node
# @param primitive [String] primitive id
# @param node [String] on this node if given
def wait_for_master(primitive, node = nil)
message = "Waiting #{RETRY_COUNT * RETRY_STEP} seconds for service '#{primitive}' to start master"
message += " on node '#{node}'" if node
Puppet.debug get_cluster_debug_report
Puppet.debug message
retry_block_until_true do
cib_reset
primitive_has_master_running? primitive, node
end
Puppet.debug get_cluster_debug_report
message = "Service '#{primitive}' have started master"
message += " on node '#{node}'" if node
Puppet.debug message
end
# wait for primitive to stop
# if node is given then start on this node
# @param primitive [String] primitive id
# @param node [String] on this node if given
def wait_for_stop(primitive, node = nil)
message = "Waiting #{RETRY_COUNT * RETRY_STEP} seconds for service '#{primitive}' to stop"
message += " on node '#{node}'" if node
Puppet.debug get_cluster_debug_report
Puppet.debug message
retry_block_until_true do
cib_reset
result = primitive_is_running? primitive, node
result.is_a? FalseClass
end
Puppet.debug get_cluster_debug_report
message = "Service '#{primitive}' was stopped"
message += " on node '#{node}'" if node
Puppet.debug message
end
end

View File

@ -1,212 +0,0 @@
require File.join File.dirname(__FILE__), '../pacemaker_common.rb'
Puppet::Type.type(:service).provide :pacemaker, :parent => Puppet::Provider::Pacemaker_common do
has_feature :enableable
has_feature :refreshable
commands :uname => 'uname'
commands :pcs => 'pcs'
commands :crm_resource => 'crm_resource'
commands :crm_attribute => 'crm_attribute'
commands :cibadmin => 'cibadmin'
# hostname of the current node
# @return [String]
def hostname
return @hostname if @hostname
@hostname = (uname '-n').chomp.strip
end
# original name passed from the type
# @return [String]
def title
@resource[:name]
end
# primitive name with 'p_' added if needed
# @return [String]
def name
return @name if @name
primitive_name = title
if primitive_exists? primitive_name
Puppet.debug "Primitive with title '#{primitive_name}' was found in CIB"
@name = primitive_name
return @name
end
primitive_name = "p_#{primitive_name}"
if primitive_exists? primitive_name
Puppet.debug "Using '#{primitive_name}' name instead of '#{title}'"
@name = primitive_name
return @name
end
fail "Primitive '#{title}' was not found in CIB!"
end
# full name of the primitive
# if resource is complex use group name
# @return [String]
def full_name
return @full_name if @full_name
if primitive_is_complex? name
full_name = primitives[name]['name']
Puppet.debug "Using full name '#{full_name}' for complex primitive '#{name}'"
@full_name = full_name
else
@full_name = name
end
end
# name of the basic service without 'p_' prefix
# used to disable the basic service
# @return [String]
def basic_service_name
return @basic_service_name if @basic_service_name
if name.start_with? 'p_'
basic_service_name = name.gsub /^p_/, ''
Puppet.debug "Using '#{basic_service_name}' as the basic service name for primitive '#{name}'"
@basic_service_name = basic_service_name
else
@basic_service_name = name
end
end
# cleanup a primitive and
# wait for cleanup to finish
def cleanup
cleanup_primitive full_name, hostname
wait_for_status name
end
# called by Puppet to determine if the service
# is running on the local node
# @return [:running,:stopped]
def status
wait_for_online
Puppet.debug "Call: 'status' for Pacemaker service '#{name}' on node '#{hostname}'"
cib_reset
out = get_primitive_puppet_status name, hostname
Puppet.debug get_cluster_debug_report
Puppet.debug "Return: '#{out}' (#{out.class})"
out
end
# called by Puppet to start the service
def start
Puppet.debug "Call 'start' for Pacemaker service '#{name}' on node '#{hostname}'"
enable unless primitive_is_managed? name
disable_basic_service
constraint_location_add full_name, hostname
cleanup
unban_primitive name, hostname
start_primitive full_name
start_primitive name
if primitive_is_multistate? name
Puppet.debug "Choose master start for Pacemaker service '#{name}'"
wait_for_master name
else
Puppet.debug "Choose global start for Pacemaker service '#{name}'"
wait_for_start name
end
end
# called by Puppet to stop the service
def stop
Puppet.debug "Call 'stop' for Pacemaker service '#{name}' on node '#{hostname}'"
cleanup
enable unless primitive_is_managed? name
if primitive_is_complex? name
Puppet.debug "Choose local stop for Pacemaker service '#{name}' on node '#{hostname}'"
ban_primitive name, hostname
wait_for_stop name, hostname
else
Puppet.debug "Choose global stop for Pacemaker service '#{name}'"
stop_primitive name
wait_for_stop name
end
end
# called by Puppet to restart the service
def restart
Puppet.debug "Call 'restart' for Pacemaker service '#{name}' on node '#{hostname}'"
unless primitive_is_running? name, hostname
Puppet.info "Pacemaker service '#{name}' is not running on node '#{hostname}'. Skipping restart!"
return
end
begin
stop
rescue
nil
ensure
start
end
end
# called by Puppet to enable the service
def enable
Puppet.debug "Call 'enable' for Pacemaker service '#{name}' on node '#{hostname}'"
manage_primitive name
end
# called by Puppet to disable the service
def disable
Puppet.debug "Call 'disable' for Pacemaker service '#{name}' on node '#{hostname}'"
unmanage_primitive name
end
alias :manual_start :disable
# called by Puppet to determine if the service is enabled
# @return [:true,:false]
def enabled?
Puppet.debug "Call 'enabled?' for Pacemaker service '#{name}' on node '#{hostname}'"
out = get_primitive_puppet_enable name
Puppet.debug "Return: '#{out}' (#{out.class})"
out
end
# create an extra provider instance to deal with the basic service
# the provider will be chosen to match the current system
# @return [Puppet::Type::Service::Provider]
def extra_provider(provider_name = nil)
return @extra_provider if @extra_provider
begin
param_hash = {}
param_hash.store :name, basic_service_name
param_hash.store :provider, provider_name if provider_name
type = Puppet::Type::Service.new param_hash
@extra_provider = type.provider
rescue => e
Puppet.warning "Could not get extra provider for Pacemaker primitive '#{name}': #{e.message}"
@extra_provider = nil
end
end
# disable and stop the basic service
def disable_basic_service
return unless extra_provider
begin
if extra_provider.enableable? and extra_provider.enabled? == :true
Puppet.info "Disable basic service '#{extra_provider.name}' using provider '#{extra_provider.class.name}'"
extra_provider.disable
else
Puppet.info "Basic service '#{extra_provider.name}' is disabled as reported by '#{extra_provider.class.name}' provider"
end
if extra_provider.status == :running
if not ['lsb','systemd','upstart'].include?(primitive_class name)
Puppet.info "Stop basic service '#{extra_provider.name}' using provider '#{extra_provider.class.name}'"
extra_provider.stop
else
Puppet.info "Not stopping basic service '#{extra_provider.name}', since its Pacemaker primitive is using primitive_class '#{extra_provider.class.name}'"
end
else
Puppet.info "Basic service '#{extra_provider.name}' is stopped as reported by '#{extra_provider.class.name}' provider"
end
rescue => e
Puppet.warning "Could not disable basic service for Pacemaker primitive '#{name}' using '#{extra_provider.class.name}' provider: #{e.message}"
end
end
end

View File

@ -1,215 +0,0 @@
# == Class: openstack_extras::pacemaker::service
#
# Configures Pacemaker resource for a specified service and
# overrides its service provider to Pacemaker.
# Assumes there is a service already exists in the Puppet catalog.
# For example, the one, such as nova-api, heat-engine, neutron-agent-l3
# and so on, created by other core Puppet modules for Openstack.
#
# === Parameters
#
# [*ensure*]
# (optional) The state of the service provided by Pacemaker
# Defaults to present
#
# [*ocf_root_path*]
# (optional) The path for OCF scripts
# Defaults to /usr/lib/ocf
#
# [*primitive_class*]
# (optional) The class of Pacemaker resource (primitive)
# Defaults to ocf
#
# [*primitive_provider*]
# (optional) The provider of OCF scripts
# Defaults to pacemaker
#
# [*primitive_type*]
# (optional) The type of the primitive (OCF file name).
# Used with the other parameters as a full path to OCF script:
# primitive_class/primitive_provider/primitive_type
# resided at ocf_root_path/resource.d
# Defaults to false
#
# [*parameters*]
# (optional) The hash of parameters for a primitive
# Defaults to false
#
# [*operations*]
# (optional) The hash of operations for a primitive
# Defaults to false
#
# [*metadata*]
# (optional) The hash of metadata for a primitive
# Defaults to false
#
# [*ms_metadata*]
# (optional) The hash of ms_metadata for a primitive
# Defaults to false
#
# [*use_handler*]
# (optional) The handler (wrapper script) for OCF script
# Could be useful for debug and informational purposes.
# It sets some default values like OCF_ROOT in order to
# simplify debugging of OCF scripts
# Defaults to true
#
# [*handler_root_path*]
# (optional) The path for a handler script
# Defaults to /usr/local/bin
#
# [*ocf_script_template*]
# (optional) ERB template for OCF script for Pacemaker
# resource
# Defaults to false
#
# [*ocf_script_file*]
# (optional) OCF file for Pacemaker resource
# Defaults to false
#
# [*create_primitive*]
# (optional) Controls Pacemaker primitive creation
# Defaults to true
#
# [*clone*]
# (optional) Create a cloned primitive
# Defaults to false
#
# === Examples
#
# Will create resource and ensure Pacemaker provider for
# 'some-api-service' with the given OCF scripte template and
# parameters:
#
# $metadata = {
# 'resource-stickiness' => '1'
# }
# $operations = {
# 'monitor' => {
# 'interval' => '20',
# 'timeout' => '30',
# },
# 'start' => {
# 'timeout' => '60',
# },
# 'stop' => {
# 'timeout' => '60',
# },
# }
# $ms_metadata = {
# 'interleave' => true,
# }
#
# openstack_extras::pacemaker::service { 'some-api-service' :
# primitive_type => 'some-api-service',
# metadata => $metadata,
# ms_metadata => $ms_metadata,
# operations => $operations,
# clone => true,
# ocf_script_template => 'some_module/some_api_service.ocf.erb',
# }
#
define openstack_extras::pacemaker::service (
$ensure = 'present',
$ocf_root_path = '/usr/lib/ocf',
$primitive_class = 'ocf',
$primitive_provider = 'pacemaker',
$primitive_type = false,
$parameters = false,
$operations = false,
$metadata = false,
$ms_metadata = false,
$use_handler = true,
$handler_root_path = '/usr/local/bin',
$ocf_script_template = false,
$ocf_script_file = false,
$create_primitive = true,
$clone = false,
) {
warning('This defined resource type has been deprecated and will be removed in a future release')
$service_name = $title
$primitive_name = "p_${service_name}"
$ocf_script_name = "${service_name}-ocf-file"
$ocf_handler_name = "ocf_handler_${service_name}"
$ocf_dir_path = "${ocf_root_path}/resource.d"
$ocf_script_path = "${ocf_dir_path}/${primitive_provider}/${$primitive_type}"
$ocf_handler_path = "${handler_root_path}/${ocf_handler_name}"
Service<| title == $service_name |> {
provider => 'pacemaker',
}
Service<| name == $service_name |> {
provider => 'pacemaker',
}
if $create_primitive {
cs_primitive { $primitive_name :
ensure => $ensure,
primitive_class => $primitive_class,
primitive_type => $primitive_type,
provided_by => $primitive_provider,
parameters => $parameters,
operations => $operations,
metadata => $metadata,
ms_metadata => $ms_metadata,
}
$clone_name="${primitive_name}-clone"
if $clone {
cs_clone { $clone_name :
ensure => present,
primitive => $primitive_name,
require => Cs_primitive[$primitive_name]
}
}
else {
cs_clone { $clone_name :
ensure => absent,
require => Cs_primitive[$primitive_name]
}
}
}
if $ocf_script_template or $ocf_script_file {
file { $ocf_script_name :
ensure => $ensure,
path => $ocf_script_path,
mode => '0755',
owner => 'root',
group => 'root',
}
if $ocf_script_template {
File[$ocf_script_name] {
content => template($ocf_script_template),
}
} elsif $ocf_script_file {
File[$ocf_script_name] {
source => "puppet:///modules/${ocf_script_file}",
}
}
}
if ($primitive_class == 'ocf') and ($use_handler) {
file { $ocf_handler_name :
ensure => present,
path => $ocf_handler_path,
owner => 'root',
group => 'root',
mode => '0700',
content => template('openstack_extras/ocf_handler.erb'),
}
}
File<| title == $ocf_script_name |>
-> Cs_primitive<| title == $primitive_name |>
File<| title == $ocf_script_name |> ~> Service[$service_name]
Cs_primitive<| title == $primitive_name |> -> Service[$service_name]
File<| title == $ocf_handler_name |> -> Service[$service_name]
}

View File

@ -5,10 +5,6 @@
"name": "puppetlabs/apt",
"version_requirement": ">=1.8.0 <10.0.0"
},
{
"name": "puppet/corosync",
"version_requirement": ">=5.0.0 <9.0.0"
},
{
"name": "puppetlabs/stdlib",
"version_requirement": ">=5.0.0 <10.0.0"

View File

@ -1,5 +1,5 @@
---
deprecations:
- |
The ``openstack_extras::pacemaker::serices`` defined resource type has been
The ``openstack_extras::pacemaker::service`` defined resource type has been
deprecated and will be removed in a future release.

View File

@ -0,0 +1,5 @@
---
upgrade:
- |
The ``openstack_extras::pacemaker::service`` defined resource type has
been removed.

View File

@ -1,141 +0,0 @@
require 'spec_helper'
describe 'openstack_extras::pacemaker::service', :type => :define do
shared_examples 'openstack_extras::pacemaker::service' do
let :pre_condition do
[
"class { 'glance::api::authtoken': password => 'password', }",
"include glance::api",
]
end
let (:title) { 'glance-api' }
let :default_params do
{
:ensure => 'present',
:ocf_root_path => '/usr/lib/ocf',
:primitive_class => 'ocf',
:primitive_provider => 'pacemaker',
:primitive_type => false,
:parameters => false,
:operations => false,
:metadata => false,
:ms_metadata => false,
:use_handler => true,
:handler_root_path => '/usr/local/bin',
:ocf_script_template => false,
:ocf_script_file => false,
:create_primitive => true,
:clone => false
}
end
context 'with defaults' do
it { should contain_openstack_extras__pacemaker__service(title).with(default_params) }
it { should contain_service('glance-api').with_provider('pacemaker') }
it { should contain_cs_primitive('p_glance-api').with(
:ensure => default_params[:ensure],
:primitive_class => default_params[:primitive_class],
:primitive_type => default_params[:primitive_type],
:provided_by => default_params[:primitive_provider],
:parameters => default_params[:parameters],
:operations => default_params[:operations],
:metadata => default_params[:metadata],
:ms_metadata => default_params[:ms_metadata],
)}
it { should contain_cs_clone('p_glance-api-clone').with_ensure('absent') }
end
context 'with custom OCF file' do
let :params do
default_params.merge( :ocf_script_file => 'foo/scripts/foo.ocf' )
end
let (:ocf_dir_path) { "#{params[:ocf_root_path]}/resource.d" }
let (:ocf_script_path) { "#{ocf_dir_path}/#{params[:primitive_provider]}/#{params[:primitive_type]}" }
let (:ocf_handler_name) { "ocf_handler_#{title}" }
let (:ocf_handler_path) { "#{params[:handler_root_path]}/#{ocf_handler_name}" }
it { should contain_file("#{title}-ocf-file").with(
:ensure => 'present',
:path => ocf_script_path,
:mode => '0755',
:owner => 'root',
:group => 'root',
:source => "puppet:///modules/#{params[:ocf_script_file]}"
)}
it { should contain_file("#{ocf_handler_name}").with(
:ensure => 'present',
:path => ocf_handler_path,
:owner => 'root',
:group => 'root',
:mode => '0700',
:content => /OCF_ROOT/
)}
end
context 'with custom OCF path, provider, erb and w/o a wrapper' do
let(:params) do
default_params.merge( :ocf_script_template => 'openstack_extras/ocf_handler.erb',
:use_handler => false,
:primitive_provider => 'some_provider',
:ocf_root_path => '/usr/lib/some_path' )
end
let (:ocf_dir_path) { "#{params[:ocf_root_path]}/resource.d" }
let (:ocf_script_path) {
"#{ocf_dir_path}/#{params[:primitive_provider]}/#{params[:primitive_type]}"
}
it { should contain_file("#{title}-ocf-file").with(
:path => ocf_script_path,
:mode => '0755',
:owner => 'root',
:group => 'root',
:content => /monitor/
)}
it { should_not contain_file('ocf_handler_glance_api') }
it { should contain_cs_primitive('p_glance-api').with(
:ensure => params[:ensure],
:primitive_class => params[:primitive_class],
:primitive_type => params[:primitive_type],
:provided_by => params[:primitive_provider],
:parameters => params[:parameters],
:operations => params[:operations],
:metadata => params[:metadata],
:ms_metadata => params[:ms_metadata],
)}
end
context 'with cloned resources' do
let (:params) do
default_params.merge( :clone => true )
end
it { should contain_cs_clone('p_glance-api-clone').with(
:ensure => 'present',
:primitive => 'p_glance-api',
)}
end
end
on_supported_os({
:supported_os => OSDefaults.get_supported_os
}).each do |os,facts|
context "on #{os}" do
let (:facts) do
facts.merge!(OSDefaults.get_facts())
end
if facts[:os]['name'] == 'Debian'
it_behaves_like 'openstack_extras::pacemaker::service'
end
end
end
end

View File

@ -1,483 +0,0 @@
<cib epoch="622" num_updates="11" admin_epoch="0" validate-with="pacemaker-1.2" crm_feature_set="3.0.7" have-quorum="1" dc-uuid="node-1" cib-last-written="Wed Nov 5 10:54:20 2014" update-origin="node-2" update-client="cibadmin">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="1.1.10-42f2063"/>
<nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="classic openais (with plugin)"/>
<nvpair id="cib-bootstrap-options-expected-quorum-votes" name="expected-quorum-votes" value="3"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/>
<nvpair id="cib-bootstrap-options-start-failure-is-fatal" name="start-failure-is-fatal" value="false"/>
<nvpair id="cib-bootstrap-options-symmetric-cluster" name="symmetric-cluster" value="false"/>
<nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1415124915"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node-1" uname="node-1">
<instance_attributes id="nodes-node-1">
<nvpair id="nodes-node-1-gtid" name="gtid" value="b65eb4b3-644a-11e4-afd3-9335a5b6ec3f:80680"/>
</instance_attributes>
</node>
<node id="node-2" uname="node-2">
<instance_attributes id="nodes-node-2">
<nvpair id="nodes-node-2-gtid" name="gtid" value="b65eb4b3-644a-11e4-afd3-9335a5b6ec3f:80645"/>
</instance_attributes>
</node>
<node id="node-3" uname="node-3"/>
</nodes>
<resources>
<primitive class="ocf" id="vip__public" provider="pacemaker" type="ns_IPaddr2">
<instance_attributes id="vip__public-instance_attributes">
<nvpair id="vip__public-instance_attributes-nic" name="nic" value="br-ex"/>
<nvpair id="vip__public-instance_attributes-iflabel" name="iflabel" value="ka"/>
<nvpair id="vip__public-instance_attributes-iptables_comment" name="iptables_comment" value="masquerade-for-public-net"/>
<nvpair id="vip__public-instance_attributes-ns_veth" name="ns_veth" value="hapr-p"/>
<nvpair id="vip__public-instance_attributes-base_veth" name="base_veth" value="br-ex-hapr"/>
<nvpair id="vip__public-instance_attributes-cidr_netmask" name="cidr_netmask" value="24"/>
<nvpair id="vip__public-instance_attributes-gateway" name="gateway" value="link"/>
<nvpair id="vip__public-instance_attributes-iptables_stop_rules" name="iptables_stop_rules" value="iptables -t mangle -D PREROUTING -i br-ex-hapr -j MARK --set-mark 0x2a ; iptables -t nat -D POSTROUTING -m mark --mark 0x2a ! -o br-ex -j MASQUERADE"/>
<nvpair id="vip__public-instance_attributes-ns" name="ns" value="haproxy"/>
<nvpair id="vip__public-instance_attributes-iptables_start_rules" name="iptables_start_rules" value="iptables -t mangle -I PREROUTING -i br-ex-hapr -j MARK --set-mark 0x2a ; iptables -t nat -I POSTROUTING -m mark --mark 0x2a ! -o br-ex -j MASQUERADE"/>
<nvpair id="vip__public-instance_attributes-ip" name="ip" value="10.108.1.2"/>
<nvpair id="vip__public-instance_attributes-gateway_metric" name="gateway_metric" value="10"/>
</instance_attributes>
<meta_attributes id="vip__public-meta_attributes">
<nvpair id="vip__public-meta_attributes-migration-threshold" name="migration-threshold" value="3"/>
<nvpair id="vip__public-meta_attributes-failure-timeout" name="failure-timeout" value="60"/>
<nvpair id="vip__public-meta_attributes-resource-stickiness" name="resource-stickiness" value="1"/>
</meta_attributes>
<operations>
<op id="vip__public-monitor-3" interval="3" name="monitor" timeout="30"/>
<op id="vip__public-start-0" interval="0" name="start" timeout="30"/>
<op id="vip__public-stop-0" interval="0" name="stop" timeout="30"/>
</operations>
</primitive>
<clone id="clone_ping_vip__public">
<primitive class="ocf" id="ping_vip__public" provider="pacemaker" type="ping">
<instance_attributes id="ping_vip__public-instance_attributes">
<nvpair id="ping_vip__public-instance_attributes-dampen" name="dampen" value="30s"/>
<nvpair id="ping_vip__public-instance_attributes-timeout" name="timeout" value="3s"/>
<nvpair id="ping_vip__public-instance_attributes-multiplier" name="multiplier" value="1000"/>
<nvpair id="ping_vip__public-instance_attributes-host_list" name="host_list" value="10.108.1.1"/>
</instance_attributes>
<operations>
<op id="ping_vip__public-monitor-20" interval="20" name="monitor" timeout="30"/>
</operations>
</primitive>
</clone>
<primitive class="ocf" id="vip__management" provider="pacemaker" type="ns_IPaddr2">
<instance_attributes id="vip__management-instance_attributes">
<nvpair id="vip__management-instance_attributes-nic" name="nic" value="br-mgmt"/>
<nvpair id="vip__management-instance_attributes-iflabel" name="iflabel" value="ka"/>
<nvpair id="vip__management-instance_attributes-iptables_comment" name="iptables_comment" value="masquerade-for-management-net"/>
<nvpair id="vip__management-instance_attributes-ns_veth" name="ns_veth" value="hapr-m"/>
<nvpair id="vip__management-instance_attributes-base_veth" name="base_veth" value="br-mgmt-hapr"/>
<nvpair id="vip__management-instance_attributes-cidr_netmask" name="cidr_netmask" value="24"/>
<nvpair id="vip__management-instance_attributes-gateway" name="gateway" value="link"/>
<nvpair id="vip__management-instance_attributes-iptables_stop_rules" name="iptables_stop_rules" value="iptables -t mangle -D PREROUTING -i br-mgmt-hapr -j MARK --set-mark 0x2b ; iptables -t nat -D POSTROUTING -m mark --mark 0x2b ! -o br-mgmt -j MASQUERADE"/>
<nvpair id="vip__management-instance_attributes-ns" name="ns" value="haproxy"/>
<nvpair id="vip__management-instance_attributes-iptables_start_rules" name="iptables_start_rules" value="iptables -t mangle -I PREROUTING -i br-mgmt-hapr -j MARK --set-mark 0x2b ; iptables -t nat -I POSTROUTING -m mark --mark 0x2b ! -o br-mgmt -j MASQUERADE"/>
<nvpair id="vip__management-instance_attributes-ip" name="ip" value="10.108.2.2"/>
<nvpair id="vip__management-instance_attributes-gateway_metric" name="gateway_metric" value="20"/>
</instance_attributes>
<meta_attributes id="vip__management-meta_attributes">
<nvpair id="vip__management-meta_attributes-migration-threshold" name="migration-threshold" value="3"/>
<nvpair id="vip__management-meta_attributes-failure-timeout" name="failure-timeout" value="60"/>
<nvpair id="vip__management-meta_attributes-resource-stickiness" name="resource-stickiness" value="1"/>
</meta_attributes>
<operations>
<op id="vip__management-monitor-3" interval="3" name="monitor" timeout="30"/>
<op id="vip__management-start-0" interval="0" name="start" timeout="30"/>
<op id="vip__management-stop-0" interval="0" name="stop" timeout="30"/>
</operations>
</primitive>
<master id="master_p_rabbitmq-server">
<meta_attributes id="master_p_rabbitmq-server-meta_attributes">
<nvpair id="master_p_rabbitmq-server-meta_attributes-notify" name="notify" value="true"/>
<nvpair id="master_p_rabbitmq-server-meta_attributes-master-node-max" name="master-node-max" value="1"/>
<nvpair id="master_p_rabbitmq-server-meta_attributes-ordered" name="ordered" value="false"/>
<nvpair id="master_p_rabbitmq-server-meta_attributes-target-role" name="target-role" value="Master"/>
<nvpair id="master_p_rabbitmq-server-meta_attributes-master-max" name="master-max" value="1"/>
<nvpair id="master_p_rabbitmq-server-meta_attributes-interleave" name="interleave" value="true"/>
</meta_attributes>
<primitive class="ocf" id="p_rabbitmq-server" provider="pacemaker" type="rabbitmq-server">
<instance_attributes id="p_rabbitmq-server-instance_attributes">
<nvpair id="p_rabbitmq-server-instance_attributes-node_port" name="node_port" value="5673"/>
</instance_attributes>
<meta_attributes id="p_rabbitmq-server-meta_attributes">
<nvpair id="p_rabbitmq-server-meta_attributes-migration-threshold" name="migration-threshold" value="INFINITY"/>
<nvpair id="p_rabbitmq-server-meta_attributes-failure-timeout" name="failure-timeout" value="60s"/>
</meta_attributes>
<operations>
<op id="p_rabbitmq-server-promote-0" interval="0" name="promote" timeout="120"/>
<op id="p_rabbitmq-server-monitor-30" interval="30" name="monitor" timeout="60"/>
<op id="p_rabbitmq-server-start-0" interval="0" name="start" timeout="120"/>
<op id="p_rabbitmq-server-monitor-27" interval="27" name="monitor" role="Master" timeout="60"/>
<op id="p_rabbitmq-server-stop-0" interval="0" name="stop" timeout="60"/>
<op id="p_rabbitmq-server-notify-0" interval="0" name="notify" timeout="60"/>
<op id="p_rabbitmq-server-demote-0" interval="0" name="demote" timeout="60"/>
</operations>
</primitive>
</master>
<clone id="clone_p_neutron-plugin-openvswitch-agent">
<meta_attributes id="clone_p_neutron-plugin-openvswitch-agent-meta_attributes">
<nvpair id="clone_p_neutron-plugin-openvswitch-agent-meta_attributes-interleave" name="interleave" value="true"/>
</meta_attributes>
<primitive class="ocf" id="p_neutron-plugin-openvswitch-agent" provider="pacemaker" type="neutron-agent-ovs">
<instance_attributes id="p_neutron-plugin-openvswitch-agent-instance_attributes">
<nvpair id="p_neutron-plugin-openvswitch-agent-instance_attributes-plugin_config" name="plugin_config" value="/etc/neutron/plugin.ini"/>
</instance_attributes>
<operations>
<op id="p_neutron-plugin-openvswitch-agent-monitor-20" interval="20" name="monitor" timeout="10"/>
<op id="p_neutron-plugin-openvswitch-agent-start-0" interval="0" name="start" timeout="80"/>
<op id="p_neutron-plugin-openvswitch-agent-stop-0" interval="0" name="stop" timeout="80"/>
</operations>
</primitive>
</clone>
<primitive class="ocf" id="p_neutron-dhcp-agent" provider="pacemaker" type="neutron-agent-dhcp">
<instance_attributes id="p_neutron-dhcp-agent-instance_attributes">
<nvpair id="p_neutron-dhcp-agent-instance_attributes-os_auth_url" name="os_auth_url" value="http://10.108.2.2:5000/v2.0"/>
<nvpair id="p_neutron-dhcp-agent-instance_attributes-amqp_server_port" name="amqp_server_port" value="5673"/>
<nvpair id="p_neutron-dhcp-agent-instance_attributes-multiple_agents" name="multiple_agents" value="false"/>
<nvpair id="p_neutron-dhcp-agent-instance_attributes-password" name="password" value="7BqMhboS"/>
<nvpair id="p_neutron-dhcp-agent-instance_attributes-tenant" name="tenant" value="services"/>
<nvpair id="p_neutron-dhcp-agent-instance_attributes-username" name="username" value="undef"/>
</instance_attributes>
<meta_attributes id="p_neutron-dhcp-agent-meta_attributes">
<nvpair id="p_neutron-dhcp-agent-meta_attributes-resource-stickiness" name="resource-stickiness" value="1"/>
</meta_attributes>
<operations>
<op id="p_neutron-dhcp-agent-monitor-20" interval="20" name="monitor" timeout="10"/>
<op id="p_neutron-dhcp-agent-start-0" interval="0" name="start" timeout="60"/>
<op id="p_neutron-dhcp-agent-stop-0" interval="0" name="stop" timeout="60"/>
</operations>
</primitive>
<primitive id="p_heat-engine" class="ocf" provider="pacemaker" type="heat-engine">
<meta_attributes id="p_heat-engine-meta_attributes">
<nvpair id="p_heat-engine-meta_attributes-resource-stickiness" name="resource-stickiness" value="1"/>
</meta_attributes>
<operations>
<op id="p_heat-engine-monitor-20" interval="20" name="monitor" timeout="30"/>
<op id="p_heat-engine-start-0" interval="0" name="start" timeout="60"/>
<op id="p_heat-engine-stop-0" interval="0" name="stop" timeout="60"/>
</operations>
</primitive>
<clone id="clone_p_neutron-metadata-agent">
<meta_attributes id="clone_p_neutron-metadata-agent-meta_attributes">
<nvpair id="clone_p_neutron-metadata-agent-meta_attributes-interleave" name="interleave" value="true"/>
</meta_attributes>
<primitive class="ocf" id="p_neutron-metadata-agent" provider="pacemaker" type="neutron-agent-metadata">
<operations>
<op id="p_neutron-metadata-agent-monitor-60" interval="60" name="monitor" timeout="10"/>
<op id="p_neutron-metadata-agent-start-0" interval="0" name="start" timeout="30"/>
<op id="p_neutron-metadata-agent-stop-0" interval="0" name="stop" timeout="30"/>
</operations>
</primitive>
</clone>
<clone id="clone_p_neutron-l3-agent">
<meta_attributes id="clone_p_neutron-l3-agent-meta_attributes">
<nvpair id="clone_p_neutron-l3-agent-meta_attributes-interleave" name="interleave" value="true"/>
</meta_attributes>
<primitive class="ocf" id="p_neutron-l3-agent" provider="pacemaker" type="neutron-agent-l3">
<instance_attributes id="p_neutron-l3-agent-instance_attributes">
<nvpair id="p_neutron-l3-agent-instance_attributes-os_auth_url" name="os_auth_url" value="http://10.108.2.2:5000/v2.0"/>
<nvpair id="p_neutron-l3-agent-instance_attributes-multiple_agents" name="multiple_agents" value="true"/>
<nvpair id="p_neutron-l3-agent-instance_attributes-syslog" name="syslog" value="true"/>
<nvpair id="p_neutron-l3-agent-instance_attributes-password" name="password" value="7BqMhboS"/>
<nvpair id="p_neutron-l3-agent-instance_attributes-plugin_config" name="plugin_config" value="/etc/neutron/l3_agent.ini"/>
<nvpair id="p_neutron-l3-agent-instance_attributes-debug" name="debug" value="true"/>
<nvpair id="p_neutron-l3-agent-instance_attributes-tenant" name="tenant" value="services"/>
<nvpair id="p_neutron-l3-agent-instance_attributes-username" name="username" value="undef"/>
</instance_attributes>
<operations>
<op id="p_neutron-l3-agent-monitor-20" interval="20" name="monitor" timeout="10"/>
<op id="p_neutron-l3-agent-start-0" interval="0" name="start" timeout="60"/>
<op id="p_neutron-l3-agent-stop-0" interval="0" name="stop" timeout="60"/>
</operations>
</primitive>
</clone>
<clone id="clone_p_mysql">
<primitive class="ocf" id="p_mysql" provider="pacemaker" type="mysql-wss">
<instance_attributes id="p_mysql-instance_attributes">
<nvpair id="p_mysql-instance_attributes-socket" name="socket" value="/var/run/mysqld/mysqld.sock"/>
<nvpair id="p_mysql-instance_attributes-test_passwd" name="test_passwd" value="password"/>
<nvpair id="p_mysql-instance_attributes-test_user" name="test_user" value="wsrep_sst"/>
</instance_attributes>
<operations>
<op id="p_mysql-monitor-120" interval="120" name="monitor" timeout="115"/>
<op id="p_mysql-start-0" interval="0" name="start" timeout="475"/>
<op id="p_mysql-stop-0" interval="0" name="stop" timeout="175"/>
</operations>
</primitive>
<meta_attributes id="clone_p_mysql-meta_attributes">
<nvpair id="clone_p_mysql-meta_attributes-target-role" name="target-role" value="Started"/>
</meta_attributes>
</clone>
<primitive class="ocf" id="p_ceilometer-alarm-evaluator" provider="pacemaker" type="ceilometer-alarm-evaluator">
<instance_attributes id="p_ceilometer-alarm-evaluator-instance_attributes">
<nvpair id="p_ceilometer-alarm-evaluator-instance_attributes-user" name="user" value="ceilometer"/>
</instance_attributes>
<meta_attributes id="p_ceilometer-alarm-evaluator-meta_attributes">
<nvpair id="p_ceilometer-alarm-evaluator-meta_attributes-target-role" name="target-role" value="stopped"/>
</meta_attributes>
<operations>
<op id="p_ceilometer-alarm-evaluator-monitor-20" interval="20" name="monitor" timeout="30"/>
<op id="p_ceilometer-alarm-evaluator-start-0" interval="0" name="start" timeout="360"/>
<op id="p_ceilometer-alarm-evaluator-stop-0" interval="0" name="stop" timeout="360"/>
</operations>
</primitive>
<primitive class="ocf" id="p_ceilometer-agent-central" provider="pacemaker" type="ceilometer-agent-central">
<instance_attributes id="p_ceilometer-agent-central-instance_attributes">
<nvpair id="p_ceilometer-agent-central-instance_attributes-user" name="user" value="ceilometer"/>
</instance_attributes>
<meta_attributes id="p_ceilometer-agent-central-meta_attributes">
<nvpair id="p_ceilometer-agent-central-meta_attributes-target-role" name="target-role" value="stopped"/>
<nvpair id="p_ceilometer-agent-central-meta_attributes-resource-stickiness" name="resource-stickiness" value="1"/>
</meta_attributes>
<operations>
<op id="p_ceilometer-agent-central-monitor-20" interval="20" name="monitor" timeout="30"/>
<op id="p_ceilometer-agent-central-start-0" interval="0" name="start" timeout="360"/>
<op id="p_ceilometer-agent-central-stop-0" interval="0" name="stop" timeout="360"/>
</operations>
</primitive>
<clone id="clone_p_haproxy">
<meta_attributes id="clone_p_haproxy-meta_attributes">
<nvpair id="clone_p_haproxy-meta_attributes-interleave" name="interleave" value="true"/>
</meta_attributes>
<primitive class="ocf" id="p_haproxy" provider="pacemaker" type="ns_haproxy">
<instance_attributes id="p_haproxy-instance_attributes">
<nvpair id="p_haproxy-instance_attributes-ns" name="ns" value="haproxy"/>
</instance_attributes>
<meta_attributes id="p_haproxy-meta_attributes">
<nvpair id="p_haproxy-meta_attributes-migration-threshold" name="migration-threshold" value="3"/>
<nvpair id="p_haproxy-meta_attributes-failure-timeout" name="failure-timeout" value="120"/>
</meta_attributes>
<operations>
<op id="p_haproxy-monitor-20" interval="20" name="monitor" timeout="10"/>
<op id="p_haproxy-start-0" interval="0" name="start" timeout="30"/>
<op id="p_haproxy-stop-0" interval="0" name="stop" timeout="30"/>
</operations>
</primitive>
</clone>
</resources>
<constraints>
<rsc_location id="loc_ping_vip__public" rsc="vip__public">
<rule boolean-op="or" id="loc_ping_vip__public-rule" score="-INFINITY">
<expression attribute="pingd" id="loc_ping_vip__public-expression" operation="not_defined"/>
<expression attribute="pingd" id="loc_ping_vip__public-expression-0" operation="lte" value="0"/>
</rule>
</rsc_location>
<rsc_location id="clone_ping_vip__public_on_node-1" node="node-1" rsc="clone_ping_vip__public" score="100"/>
<rsc_location id="vip__management_on_node-1" node="node-1" rsc="vip__management" score="100"/>
<rsc_colocation id="p_neutron-dhcp-agent-with-clone_p_neutron-plugin-openvswitch-agent" rsc="p_neutron-dhcp-agent" score="INFINITY" with-rsc="clone_p_neutron-plugin-openvswitch-agent"/>
<rsc_order first="clone_p_neutron-plugin-openvswitch-agent" id="p_neutron-dhcp-agent-after-clone_p_neutron-plugin-openvswitch-agent" score="INFINITY" then="p_neutron-dhcp-agent"/>
<rsc_location id="master_p_rabbitmq-server_on_node-1" node="node-1" rsc="master_p_rabbitmq-server" score="100"/>
<rsc_colocation id="vip_management-with-haproxy" rsc="vip__management" score="INFINITY" with-rsc="clone_p_haproxy"/>
<rsc_colocation id="vip_public-with-haproxy" rsc="vip__public" score="INFINITY" with-rsc="clone_p_haproxy"/>
<rsc_location id="p_neutron-dhcp-agent_on_node-1" node="node-1" rsc="p_neutron-dhcp-agent" score="100"/>
<rsc_location id="clone_p_neutron-l3-agent_on_node-1" node="node-1" rsc="clone_p_neutron-l3-agent" score="100"/>
<rsc_location id="clone_p_neutron-metadata-agent_on_node-1" node="node-1" rsc="clone_p_neutron-metadata-agent" score="100"/>
<rsc_location id="vip__public_on_node-1" node="node-1" rsc="vip__public" score="100"/>
<rsc_location id="clone_p_mysql_on_node-1" node="node-1" rsc="clone_p_mysql" score="100"/>
<rsc_location id="clone_p_haproxy_on_node-1" node="node-1" rsc="clone_p_haproxy" score="100"/>
<rsc_location id="clone_p_neutron-plugin-openvswitch-agent_on_node-1" node="node-1" rsc="clone_p_neutron-plugin-openvswitch-agent" score="100"/>
<rsc_location id="p_heat-engine_on_node-1" node="node-1" rsc="p_heat-engine" score="100"/>
<rsc_location id="vip__public_on_node-3" node="node-3" rsc="vip__public" score="100"/>
<rsc_location id="vip__public_on_node-2" node="node-2" rsc="vip__public" score="100"/>
<rsc_location id="clone_ping_vip__public_on_node-3" node="node-3" rsc="clone_ping_vip__public" score="100"/>
<rsc_location id="clone_ping_vip__public_on_node-2" node="node-2" rsc="clone_ping_vip__public" score="100"/>
<rsc_location id="vip__management_on_node-3" node="node-3" rsc="vip__management" score="100"/>
<rsc_location id="vip__management_on_node-2" node="node-2" rsc="vip__management" score="100"/>
<rsc_location id="clone_p_mysql_on_node-2" node="node-2" rsc="clone_p_mysql" score="100"/>
<rsc_location id="master_p_rabbitmq-server_on_node-2" node="node-2" rsc="master_p_rabbitmq-server" score="100"/>
<rsc_location id="clone_p_haproxy_on_node-2" node="node-2" rsc="clone_p_haproxy" score="100"/>
</constraints>
</configuration>
<status>
<node_state id="node-1" uname="node-1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="do_state_transition">
<transient_attributes id="node-1">
<instance_attributes id="status-node-1">
<nvpair id="status-node-1-probe_complete" name="probe_complete" value="true"/>
<nvpair id="status-node-1-pingd" name="pingd" value="1000"/>
<nvpair id="status-node-1-master-p_rabbitmq-server" name="master-p_rabbitmq-server" value="1000"/>
<nvpair id="status-node-1-fail-count-p_neutron-dhcp-agent" name="fail-count-p_neutron-dhcp-agent" value="5"/>
<nvpair id="status-node-1-last-failure-p_neutron-dhcp-agent" name="last-failure-p_neutron-dhcp-agent" value="1415184394"/>
<nvpair id="status-node-1-fail-count-p_heat-engine" name="fail-count-p_heat-engine" value="5"/>
<nvpair id="status-node-1-last-failure-p_heat-engine" name="last-failure-p_heat-engine" value="1415184394"/>
<nvpair id="status-node-1-rabbit-master" name="rabbit-master" value="true"/>
<nvpair id="status-node-1-rabbit-start-time" name="rabbit-start-time" value="1415184397"/>
</instance_attributes>
</transient_attributes>
<lrm id="node-1">
<lrm_resources>
<lrm_resource id="p_neutron-plugin-openvswitch-agent" type="neutron-agent-ovs" class="ocf" provider="pacemaker">
<lrm_rsc_op id="p_neutron-plugin-openvswitch-agent_last_failure_0" operation_key="p_neutron-plugin-openvswitch-agent_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="14:94:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;14:94:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="269" rc-code="0" op-status="0" interval="0" last-run="1415124890" last-rc-change="1415124890" exec-time="17" queue-time="0" op-digest="424153f43b0852bb5bccdf71f34784d4"/>
<lrm_rsc_op id="p_neutron-plugin-openvswitch-agent_monitor_20000" operation_key="p_neutron-plugin-openvswitch-agent_monitor_20000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="56:95:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;56:95:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="272" rc-code="0" op-status="0" interval="20000" last-rc-change="1415124890" exec-time="18" queue-time="0" op-digest="cb8604dd2f221b4ea8d2b0670feb8819"/>
</lrm_resource>
<lrm_resource id="p_ceilometer-alarm-evaluator" type="ceilometer-alarm-evaluator" class="ocf" provider="pacemaker">
<lrm_rsc_op id="p_ceilometer-alarm-evaluator_last_0" operation_key="p_ceilometer-alarm-evaluator_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="8:32:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:7;8:32:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="102" rc-code="7" op-status="0" interval="0" last-run="1415123313" last-rc-change="1415123313" exec-time="9" queue-time="0" op-digest="0af7dfa5435ca11d7d668ed679515e13"/>
</lrm_resource>
<lrm_resource id="p_heat-engine" type="heat-engine" class="ocf" provider="pacemaker">
<lrm_rsc_op id="p_heat-engine_last_0" operation_key="p_heat-engine_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="62:648:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;62:648:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="456" rc-code="0" op-status="0" interval="0" last-run="1415184593" last-rc-change="1415184593" exec-time="8162" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
<lrm_rsc_op id="p_heat-engine_last_failure_0" operation_key="p_heat-engine_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="60:626:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="2:1;60:626:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="387" rc-code="1" op-status="2" interval="0" last-run="1415184334" last-rc-change="1415184334" exec-time="60001" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
<lrm_rsc_op id="p_heat-engine_monitor_20000" operation_key="p_heat-engine_monitor_20000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="64:649:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;64:649:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="459" rc-code="0" op-status="0" interval="20000" last-rc-change="1415184601" exec-time="20" queue-time="0" op-digest="02a5bcf940fc8d3239701acb11438d6a"/>
</lrm_resource>
<lrm_resource id="p_ceilometer-agent-central" type="ceilometer-agent-central" class="ocf" provider="pacemaker">
<lrm_rsc_op id="p_ceilometer-agent-central_last_0" operation_key="p_ceilometer-agent-central_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="8:33:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:7;8:33:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="107" rc-code="7" op-status="0" interval="0" last-run="1415123321" last-rc-change="1415123321" exec-time="9" queue-time="0" op-digest="0af7dfa5435ca11d7d668ed679515e13"/>
</lrm_resource>
<lrm_resource id="vip__management" type="ns_IPaddr2" class="ocf" provider="pacemaker">
<lrm_rsc_op id="vip__management_last_0" operation_key="vip__management_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="15:42:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;15:42:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="146" rc-code="0" op-status="0" interval="0" last-run="1415123419" last-rc-change="1415123419" exec-time="1225" queue-time="0" op-digest="88dd3ef5610eee85fdb12cf6731c0720"/>
<lrm_rsc_op id="vip__management_monitor_3000" operation_key="vip__management_monitor_3000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="16:42:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;16:42:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="158" rc-code="0" op-status="0" interval="3000" last-rc-change="1415123420" exec-time="2077" queue-time="0" op-digest="7e62e225befb35aa4e6bf834801d9954"/>
</lrm_resource>
<lrm_resource id="ping_vip__public" type="ping" class="ocf" provider="pacemaker">
<lrm_rsc_op id="ping_vip__public_last_0" operation_key="ping_vip__public_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="4:11:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;4:11:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="25" rc-code="0" op-status="0" interval="0" last-run="1415123088" last-rc-change="1415123088" exec-time="2009" queue-time="0" op-digest="606e53800773938f91e1c261bb4c725c"/>
<lrm_rsc_op id="ping_vip__public_monitor_20000" operation_key="ping_vip__public_monitor_20000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="5:11:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;5:11:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="28" rc-code="0" op-status="0" interval="20000" last-rc-change="1415123090" exec-time="2009" queue-time="0" op-digest="873bd0812a107843932917e76b49de81"/>
</lrm_resource>
<lrm_resource id="p_neutron-l3-agent" type="neutron-agent-l3" class="ocf" provider="pacemaker">
<lrm_rsc_op id="p_neutron-l3-agent_last_0" operation_key="p_neutron-l3-agent_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="72:101:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;72:101:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="311" rc-code="0" op-status="0" interval="0" last-run="1415124912" last-rc-change="1415124912" exec-time="1303" queue-time="0" op-digest="8c1166c16a3eccc28a06071394828564"/>
<lrm_rsc_op id="p_neutron-l3-agent_last_failure_0" operation_key="p_neutron-l3-agent_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="14:98:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;14:98:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="292" rc-code="0" op-status="0" interval="0" last-run="1415124901" last-rc-change="1415124901" exec-time="25" queue-time="0" op-digest="8c1166c16a3eccc28a06071394828564"/>
<lrm_rsc_op id="p_neutron-l3-agent_monitor_20000" operation_key="p_neutron-l3-agent_monitor_20000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="72:102:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;72:102:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="317" rc-code="0" op-status="0" interval="20000" last-rc-change="1415124913" exec-time="49" queue-time="0" op-digest="c5692788618f2567fcca9a9865023acd"/>
</lrm_resource>
<lrm_resource id="p_neutron-metadata-agent" type="neutron-agent-metadata" class="ocf" provider="pacemaker">
<lrm_rsc_op id="p_neutron-metadata-agent_last_failure_0" operation_key="p_neutron-metadata-agent_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="13:101:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;13:101:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="309" rc-code="0" op-status="0" interval="0" last-run="1415124912" last-rc-change="1415124912" exec-time="18" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
<lrm_rsc_op id="p_neutron-metadata-agent_monitor_60000" operation_key="p_neutron-metadata-agent_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="65:102:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;65:102:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="315" rc-code="0" op-status="0" interval="60000" last-rc-change="1415124913" exec-time="42" queue-time="0" op-digest="19240b0a103493c96459e91c1a816b50"/>
</lrm_resource>
<lrm_resource id="p_mysql" type="mysql-wss" class="ocf" provider="pacemaker">
<lrm_rsc_op id="p_mysql_last_0" operation_key="p_mysql_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="76:617:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;76:617:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="337" rc-code="0" op-status="0" interval="0" last-run="1415184199" last-rc-change="1415184199" exec-time="12137" queue-time="0" op-digest="af83dba8adcaf0e62865958e2b4993b7"/>
<lrm_rsc_op id="p_mysql_last_failure_0" operation_key="p_mysql_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="14:70:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;14:70:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="221" rc-code="0" op-status="0" interval="0" last-run="1415123879" last-rc-change="1415123879" exec-time="70" queue-time="0" op-digest="af83dba8adcaf0e62865958e2b4993b7"/>
<lrm_rsc_op id="p_mysql_monitor_120000" operation_key="p_mysql_monitor_120000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="78:618:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;78:618:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="340" rc-code="0" op-status="0" interval="120000" last-rc-change="1415184211" exec-time="57" queue-time="0" op-digest="a8b28e947b0d74f7953d47c509353648"/>
</lrm_resource>
<lrm_resource id="p_neutron-dhcp-agent" type="neutron-agent-dhcp" class="ocf" provider="pacemaker">
<lrm_rsc_op id="p_neutron-dhcp-agent_last_0" operation_key="p_neutron-dhcp-agent_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="59:630:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;59:630:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="414" rc-code="0" op-status="0" interval="0" last-run="1415184400" last-rc-change="1415184400" exec-time="4640" queue-time="0" op-digest="1b732a7fb883e1f6a419959b2aa29f92"/>
<lrm_rsc_op id="p_neutron-dhcp-agent_last_failure_0" operation_key="p_neutron-dhcp-agent_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="58:626:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="2:1;58:626:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="385" rc-code="1" op-status="2" interval="0" last-run="1415184334" last-rc-change="1415184334" exec-time="60002" queue-time="0" op-digest="1b732a7fb883e1f6a419959b2aa29f92"/>
<lrm_rsc_op id="p_neutron-dhcp-agent_monitor_20000" operation_key="p_neutron-dhcp-agent_monitor_20000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="60:630:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;60:630:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="424" rc-code="0" op-status="0" interval="20000" last-rc-change="1415184405" exec-time="35" queue-time="0" op-digest="1544ce43fe39b90e744ce887e1e691d6"/>
</lrm_resource>
<lrm_resource id="vip__public" type="ns_IPaddr2" class="ocf" provider="pacemaker">
<lrm_rsc_op id="vip__public_last_0" operation_key="vip__public_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="7:42:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;7:42:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="144" rc-code="0" op-status="0" interval="0" last-run="1415123419" last-rc-change="1415123419" exec-time="1196" queue-time="0" op-digest="c59062bf796f251f178e8646ea654950"/>
<lrm_rsc_op id="vip__public_monitor_3000" operation_key="vip__public_monitor_3000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="8:42:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;8:42:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="155" rc-code="0" op-status="0" interval="3000" last-rc-change="1415123420" exec-time="2085" queue-time="0" op-digest="11d7dbf9a56a20ff8834890f633710aa"/>
</lrm_resource>
<lrm_resource id="p_haproxy" type="ns_haproxy" class="ocf" provider="pacemaker">
<lrm_rsc_op id="p_haproxy_last_0" operation_key="p_haproxy_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="84:636:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;84:636:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="445" rc-code="0" op-status="0" interval="0" last-run="1415184446" last-rc-change="1415184446" exec-time="144" queue-time="0" op-digest="2a23892614b6b1d0f70ca66b073b5bc0"/>
<lrm_rsc_op id="p_haproxy_monitor_20000" operation_key="p_haproxy_monitor_20000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="10:636:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;10:636:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="448" rc-code="0" op-status="0" interval="20000" last-rc-change="1415184446" exec-time="53" queue-time="0" op-digest="3513c6578b2be63b3c075d885eb6ac8d"/>
</lrm_resource>
<lrm_resource id="p_rabbitmq-server" type="rabbitmq-server" class="ocf" provider="pacemaker">
<lrm_rsc_op id="p_rabbitmq-server_last_0" operation_key="p_rabbitmq-server_promote_0" operation="promote" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="28:629:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;28:629:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="404" rc-code="0" op-status="0" interval="0" last-run="1415184394" last-rc-change="1415184394" exec-time="3654" queue-time="0" op-digest="6f1cd990340e90d62b7efecdec17ba24"/>
<lrm_rsc_op id="p_rabbitmq-server_monitor_27000" operation_key="p_rabbitmq-server_monitor_27000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="28:630:8:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:8;28:630:8:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="412" rc-code="8" op-status="0" interval="27000" last-rc-change="1415184401" exec-time="1103" queue-time="0" op-digest="f81fdd633d61ff45f1dfcce00be7c955"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<node_state id="node-2" uname="node-2" crmd="online" crm-debug-origin="do_update_resource" in_ccm="true" join="member" expected="member">
<transient_attributes id="node-2">
<instance_attributes id="status-node-2">
<nvpair id="status-node-2-probe_complete" name="probe_complete" value="true"/>
<nvpair id="status-node-2-master-p_rabbitmq-server" name="master-p_rabbitmq-server" value="1"/>
<nvpair id="status-node-2-pingd" name="pingd" value="1000"/>
<nvpair id="status-node-2-rabbit-start-time" name="rabbit-start-time" value="1415184812"/>
</instance_attributes>
</transient_attributes>
<lrm id="node-2">
<lrm_resources>
<lrm_resource id="p_neutron-plugin-openvswitch-agent" type="neutron-agent-ovs" class="ocf" provider="pacemaker">
<lrm_rsc_op id="p_neutron-plugin-openvswitch-agent_last_0" operation_key="p_neutron-plugin-openvswitch-agent_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="16:94:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:7;16:94:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="141" rc-code="7" op-status="0" interval="0" last-run="1415124890" last-rc-change="1415124890" exec-time="29" queue-time="0" op-digest="424153f43b0852bb5bccdf71f34784d4"/>
</lrm_resource>
<lrm_resource id="p_ceilometer-alarm-evaluator" type="ceilometer-alarm-evaluator" class="ocf" provider="pacemaker">
<lrm_rsc_op id="p_ceilometer-alarm-evaluator_last_failure_0" operation_key="p_ceilometer-alarm-evaluator_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="26:59:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:5;26:59:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="51" rc-code="5" op-status="0" interval="0" last-run="1415123701" last-rc-change="1415123701" exec-time="1" queue-time="0" op-digest="0af7dfa5435ca11d7d668ed679515e13"/>
</lrm_resource>
<lrm_resource id="p_heat-engine" type="heat-engine" class="ocf" provider="pacemaker">
<lrm_rsc_op id="p_heat-engine_last_0" operation_key="p_heat-engine_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="16:103:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:7;16:103:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="174" rc-code="7" op-status="0" interval="0" last-run="1415124915" last-rc-change="1415124915" exec-time="11" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
<lrm_resource id="p_ceilometer-agent-central" type="ceilometer-agent-central" class="ocf" provider="pacemaker">
<lrm_rsc_op id="p_ceilometer-agent-central_last_failure_0" operation_key="p_ceilometer-agent-central_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="27:59:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:5;27:59:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="55" rc-code="5" op-status="0" interval="0" last-run="1415123701" last-rc-change="1415123701" exec-time="0" queue-time="0" op-digest="0af7dfa5435ca11d7d668ed679515e13"/>
</lrm_resource>
<lrm_resource id="vip__management" type="ns_IPaddr2" class="ocf" provider="pacemaker">
<lrm_rsc_op id="vip__management_last_0" operation_key="vip__management_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="18:59:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:7;18:59:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="14" rc-code="7" op-status="0" interval="0" last-run="1415123701" last-rc-change="1415123701" exec-time="9" queue-time="0" op-digest="88dd3ef5610eee85fdb12cf6731c0720"/>
</lrm_resource>
<lrm_resource id="ping_vip__public" type="ping" class="ocf" provider="pacemaker">
<lrm_rsc_op id="ping_vip__public_last_0" operation_key="ping_vip__public_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="24:654:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;24:654:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="177" rc-code="0" op-status="0" interval="0" last-run="1415184705" last-rc-change="1415184705" exec-time="2012" queue-time="0" op-digest="606e53800773938f91e1c261bb4c725c"/>
<lrm_rsc_op id="ping_vip__public_monitor_20000" operation_key="ping_vip__public_monitor_20000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="25:654:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;25:654:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="180" rc-code="0" op-status="0" interval="20000" last-rc-change="1415184707" exec-time="2010" queue-time="0" op-digest="873bd0812a107843932917e76b49de81"/>
</lrm_resource>
<lrm_resource id="p_neutron-l3-agent" type="neutron-agent-l3" class="ocf" provider="pacemaker">
<lrm_rsc_op id="p_neutron-l3-agent_last_0" operation_key="p_neutron-l3-agent_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="73:99:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;73:99:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="159" rc-code="0" op-status="0" interval="0" last-run="1415124901" last-rc-change="1415124901" exec-time="4289" queue-time="0" op-digest="8c1166c16a3eccc28a06071394828564"/>
<lrm_rsc_op id="p_neutron-l3-agent_last_failure_0" operation_key="p_neutron-l3-agent_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="16:98:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;16:98:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="156" rc-code="0" op-status="0" interval="0" last-run="1415124901" last-rc-change="1415124901" exec-time="63" queue-time="0" op-digest="8c1166c16a3eccc28a06071394828564"/>
</lrm_resource>
<lrm_resource id="p_neutron-metadata-agent" type="neutron-agent-metadata" class="ocf" provider="pacemaker">
<lrm_rsc_op id="p_neutron-metadata-agent_last_0" operation_key="p_neutron-metadata-agent_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="15:101:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:7;15:101:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="167" rc-code="7" op-status="0" interval="0" last-run="1415124912" last-rc-change="1415124912" exec-time="30" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
<lrm_resource id="p_mysql" type="mysql-wss" class="ocf" provider="pacemaker">
<lrm_rsc_op id="p_mysql_last_0" operation_key="p_mysql_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="85:659:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;85:659:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="183" rc-code="0" op-status="0" interval="0" last-run="1415184750" last-rc-change="1415184750" exec-time="34103" queue-time="1" op-digest="af83dba8adcaf0e62865958e2b4993b7"/>
<lrm_rsc_op id="p_mysql_monitor_120000" operation_key="p_mysql_monitor_120000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="86:659:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;86:659:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="186" rc-code="0" op-status="0" interval="120000" last-rc-change="1415184784" exec-time="77" queue-time="0" op-digest="a8b28e947b0d74f7953d47c509353648"/>
</lrm_resource>
<lrm_resource id="p_neutron-dhcp-agent" type="neutron-agent-dhcp" class="ocf" provider="pacemaker">
<lrm_rsc_op id="p_neutron-dhcp-agent_last_0" operation_key="p_neutron-dhcp-agent_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="15:97:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:7;15:97:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="148" rc-code="7" op-status="0" interval="0" last-run="1415124893" last-rc-change="1415124893" exec-time="20" queue-time="0" op-digest="1b732a7fb883e1f6a419959b2aa29f92"/>
</lrm_resource>
<lrm_resource id="vip__public" type="ns_IPaddr2" class="ocf" provider="pacemaker">
<lrm_rsc_op id="vip__public_last_0" operation_key="vip__public_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="16:59:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:7;16:59:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="5" rc-code="7" op-status="0" interval="0" last-run="1415123701" last-rc-change="1415123701" exec-time="14" queue-time="0" op-digest="c59062bf796f251f178e8646ea654950"/>
</lrm_resource>
<lrm_resource id="p_haproxy" type="ns_haproxy" class="ocf" provider="pacemaker">
<lrm_rsc_op id="p_haproxy_last_0" operation_key="p_haproxy_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.7" transition-key="97:664:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;97:664:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="198" rc-code="0" op-status="0" interval="0" last-run="1415184860" last-rc-change="1415184860" exec-time="236" queue-time="0" op-digest="2a23892614b6b1d0f70ca66b073b5bc0"/>
<lrm_rsc_op id="p_haproxy_monitor_20000" operation_key="p_haproxy_monitor_20000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.7" transition-key="98:664:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;98:664:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="201" rc-code="0" op-status="0" interval="20000" last-rc-change="1415184861" exec-time="71" queue-time="0" op-digest="3513c6578b2be63b3c075d885eb6ac8d"/>
</lrm_resource>
<lrm_resource id="p_rabbitmq-server" type="rabbitmq-server" class="ocf" provider="pacemaker">
<lrm_rsc_op id="p_rabbitmq-server_last_0" operation_key="p_rabbitmq-server_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="38:661:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;38:661:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="189" rc-code="0" op-status="0" interval="0" last-run="1415184803" last-rc-change="1415184803" exec-time="5579" queue-time="0" op-digest="6f1cd990340e90d62b7efecdec17ba24"/>
<lrm_rsc_op id="p_rabbitmq-server_monitor_30000" operation_key="p_rabbitmq-server_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="40:662:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;40:662:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="195" rc-code="0" op-status="0" interval="30000" last-rc-change="1415184812" exec-time="459" queue-time="0" op-digest="f81fdd633d61ff45f1dfcce00be7c955"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<node_state id="node-3" uname="node-3" crmd="online" crm-debug-origin="do_state_transition" in_ccm="true" join="member" expected="member">
<transient_attributes id="node-3">
<instance_attributes id="status-node-3">
<nvpair id="status-node-3-probe_complete" name="probe_complete" value="true"/>
<nvpair id="status-node-3-master-p_rabbitmq-server" name="master-p_rabbitmq-server" value="0"/>
<nvpair id="status-node-3-pingd" name="pingd" value="1000"/>
</instance_attributes>
</transient_attributes>
<lrm id="node-3">
<lrm_resources>
<lrm_resource id="p_neutron-plugin-openvswitch-agent" type="neutron-agent-ovs" class="ocf" provider="pacemaker">
<lrm_rsc_op id="p_neutron-plugin-openvswitch-agent_last_0" operation_key="p_neutron-plugin-openvswitch-agent_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="57:95:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;57:95:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="144" rc-code="0" op-status="0" interval="0" last-run="1415124890" last-rc-change="1415124890" exec-time="34" queue-time="0" op-digest="424153f43b0852bb5bccdf71f34784d4"/>
<lrm_rsc_op id="p_neutron-plugin-openvswitch-agent_last_failure_0" operation_key="p_neutron-plugin-openvswitch-agent_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="18:94:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;18:94:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="141" rc-code="0" op-status="0" interval="0" last-run="1415124890" last-rc-change="1415124890" exec-time="20" queue-time="1" op-digest="424153f43b0852bb5bccdf71f34784d4"/>
</lrm_resource>
<lrm_resource id="p_ceilometer-alarm-evaluator" type="ceilometer-alarm-evaluator" class="ocf" provider="pacemaker">
<lrm_rsc_op id="p_ceilometer-alarm-evaluator_last_failure_0" operation_key="p_ceilometer-alarm-evaluator_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="40:59:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:5;40:59:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="51" rc-code="5" op-status="0" interval="0" last-run="1415123701" last-rc-change="1415123701" exec-time="1" queue-time="0" op-digest="0af7dfa5435ca11d7d668ed679515e13"/>
</lrm_resource>
<lrm_resource id="p_heat-engine" type="heat-engine" class="ocf" provider="pacemaker">
<lrm_rsc_op id="p_heat-engine_last_0" operation_key="p_heat-engine_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="18:103:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:7;18:103:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="174" rc-code="7" op-status="0" interval="0" last-run="1415124915" last-rc-change="1415124915" exec-time="14" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
<lrm_resource id="p_ceilometer-agent-central" type="ceilometer-agent-central" class="ocf" provider="pacemaker">
<lrm_rsc_op id="p_ceilometer-agent-central_last_failure_0" operation_key="p_ceilometer-agent-central_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="41:59:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:5;41:59:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="55" rc-code="5" op-status="0" interval="0" last-run="1415123701" last-rc-change="1415123701" exec-time="0" queue-time="0" op-digest="0af7dfa5435ca11d7d668ed679515e13"/>
</lrm_resource>
<lrm_resource id="vip__management" type="ns_IPaddr2" class="ocf" provider="pacemaker">
<lrm_rsc_op id="vip__management_last_0" operation_key="vip__management_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="32:59:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:7;32:59:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="14" rc-code="7" op-status="0" interval="0" last-run="1415123701" last-rc-change="1415123701" exec-time="11" queue-time="0" op-digest="88dd3ef5610eee85fdb12cf6731c0720"/>
</lrm_resource>
<lrm_resource id="ping_vip__public" type="ping" class="ocf" provider="pacemaker">
<lrm_rsc_op id="ping_vip__public_last_0" operation_key="ping_vip__public_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="21:653:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;21:653:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="177" rc-code="0" op-status="0" interval="0" last-run="1415184703" last-rc-change="1415184703" exec-time="2011" queue-time="0" op-digest="606e53800773938f91e1c261bb4c725c"/>
<lrm_rsc_op id="ping_vip__public_monitor_20000" operation_key="ping_vip__public_monitor_20000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="23:654:0:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:0;23:654:0:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="180" rc-code="0" op-status="0" interval="20000" last-rc-change="1415184705" exec-time="2008" queue-time="0" op-digest="873bd0812a107843932917e76b49de81"/>
</lrm_resource>
<lrm_resource id="p_neutron-l3-agent" type="neutron-agent-l3" class="ocf" provider="pacemaker">
<lrm_rsc_op id="p_neutron-l3-agent_last_0" operation_key="p_neutron-l3-agent_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="18:98:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:7;18:98:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="159" rc-code="7" op-status="0" interval="0" last-run="1415124900" last-rc-change="1415124900" exec-time="22" queue-time="0" op-digest="8c1166c16a3eccc28a06071394828564"/>
</lrm_resource>
<lrm_resource id="p_neutron-metadata-agent" type="neutron-agent-metadata" class="ocf" provider="pacemaker">
<lrm_rsc_op id="p_neutron-metadata-agent_last_0" operation_key="p_neutron-metadata-agent_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="17:101:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:7;17:101:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="167" rc-code="7" op-status="0" interval="0" last-run="1415124912" last-rc-change="1415124912" exec-time="17" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
<lrm_resource id="p_mysql" type="mysql-wss" class="ocf" provider="pacemaker">
<lrm_rsc_op id="p_mysql_last_0" operation_key="p_mysql_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="18:70:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:7;18:70:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="97" rc-code="7" op-status="0" interval="0" last-run="1415123879" last-rc-change="1415123879" exec-time="20039" queue-time="0" op-digest="af83dba8adcaf0e62865958e2b4993b7"/>
</lrm_resource>
<lrm_resource id="p_neutron-dhcp-agent" type="neutron-agent-dhcp" class="ocf" provider="pacemaker">
<lrm_rsc_op id="p_neutron-dhcp-agent_last_0" operation_key="p_neutron-dhcp-agent_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="17:97:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:7;17:97:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="151" rc-code="7" op-status="0" interval="0" last-run="1415124893" last-rc-change="1415124893" exec-time="32" queue-time="0" op-digest="1b732a7fb883e1f6a419959b2aa29f92"/>
</lrm_resource>
<lrm_resource id="vip__public" type="ns_IPaddr2" class="ocf" provider="pacemaker">
<lrm_rsc_op id="vip__public_last_0" operation_key="vip__public_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="30:59:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:7;30:59:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="5" rc-code="7" op-status="0" interval="0" last-run="1415123701" last-rc-change="1415123701" exec-time="9" queue-time="0" op-digest="c59062bf796f251f178e8646ea654950"/>
</lrm_resource>
<lrm_resource id="p_haproxy" type="ns_haproxy" class="ocf" provider="pacemaker">
<lrm_rsc_op id="p_haproxy_last_0" operation_key="p_haproxy_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="18:91:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:7;18:91:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="133" rc-code="7" op-status="0" interval="0" last-run="1415124881" last-rc-change="1415124881" exec-time="80" queue-time="0" op-digest="2a23892614b6b1d0f70ca66b073b5bc0"/>
</lrm_resource>
<lrm_resource id="p_rabbitmq-server" type="rabbitmq-server" class="ocf" provider="pacemaker">
<lrm_rsc_op id="p_rabbitmq-server_last_0" operation_key="p_rabbitmq-server_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.7" transition-key="18:83:7:3bec3a94-918a-4c0c-9818-e588e618bdab" transition-magic="0:7;18:83:7:3bec3a94-918a-4c0c-9818-e588e618bdab" call-id="114" rc-code="7" op-status="0" interval="0" last-run="1415124527" last-rc-change="1415124527" exec-time="132" queue-time="0" op-digest="6f1cd990340e90d62b7efecdec17ba24"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
</status>
</cib>

View File

@ -1,221 +0,0 @@
require 'spec_helper'
require File.expand_path(File.join(File.dirname(__FILE__), '../../../../lib/puppet/provider/pacemaker_common.rb'))
describe Puppet::Provider::Pacemaker_common do
cib_xml_file = File.join File.dirname(__FILE__), 'cib.xml'
let(:raw_cib) do
File.read cib_xml_file
end
let(:resources_regexp) do
%r{nova|cinder|glance|keystone|neutron|sahara|murano|ceilometer|heat|swift}
end
###########################
#-> Cloned primitive 'clone_p_neutron-plugin-openvswitch-agent' global status: start
#node-1: start | node-2: stop | node-3: stop
#-> Cloned primitive 'clone_ping_vip__public' global status: start
#node-1: start | node-2: start | node-3: start
#-> Cloned primitive 'clone_p_neutron-metadata-agent' global status: start
#node-1: start | node-2: stop | node-3: stop
#-> Simple primitive 'vip__management' global status: start
#node-1: start | node-2: stop | node-3: stop
#-> Cloned primitive 'clone_p_mysql' global status: start
#node-1: start | node-2: start | node-3: stop
#-> Multistate primitive 'master_p_rabbitmq-server' global status: master
#node-1: master | node-2: start | node-3: stop
#-> Cloned primitive 'clone_p_haproxy' global status: start
#node-1: start | node-2: start | node-3: stop
#-> Simple primitive 'p_ceilometer-alarm-evaluator' global status: stop
#node-1: stop | node-2: stop (FAIL) | node-3: stop (FAIL)
#-> Simple primitive 'p_ceilometer-agent-central' global status: stop
#node-1: stop | node-2: stop (FAIL) | node-3: stop (FAIL)
#-> Cloned primitive 'clone_p_neutron-l3-agent' global status: start
#node-1: start | node-2: stop | node-3: stop
#-> Simple primitive 'p_neutron-dhcp-agent' global status: start
#node-1: start | node-2: stop | node-3: stop
#-> Simple primitive 'vip__public' global status: start
#node-1: start | node-2: stop | node-3: stop
#-> Simple primitive 'p_heat-engine' global status: start
#node-1: start | node-2: stop | node-3: stop
before(:each) do
@class = subject
allow(@class).to receive(:raw_cib).and_return raw_cib
allow(@class).to receive(:pcs).and_return true
end
context 'configuration parser' do
it 'can obtain a CIB XML object' do
expect(@class.cib.to_s).to include '<configuration>'
expect(@class.cib.to_s).to include '<nodes>'
expect(@class.cib.to_s).to include '<resources>'
expect(@class.cib.to_s).to include '<status>'
expect(@class.cib.to_s).to include '<operations>'
end
it 'can get primitives section of CIB XML' do
expect(@class.cib_section_primitives).to be_a(Array)
expect(@class.cib_section_primitives.first.to_s).to start_with '<primitive'
expect(@class.cib_section_primitives.first.to_s).to end_with '</primitive>'
end
it 'can get primitives configuration' do
expect(@class.primitives).to be_a Hash
expect(@class.primitives['vip__public']).to be_a Hash
expect(@class.primitives['vip__public']['meta_attributes']).to be_a Hash
expect(@class.primitives['vip__public']['instance_attributes']).to be_a Hash
expect(@class.primitives['vip__public']['instance_attributes']['ip']).to be_a Hash
expect(@class.primitives['vip__public']['operations']).to be_a Hash
expect(@class.primitives['vip__public']['meta_attributes']['resource-stickiness']).to be_a Hash
expect(@class.primitives['vip__public']['operations']['vip__public-start-0']).to be_a Hash
end
it 'can determine is primitive is simple or complex' do
expect(@class.primitive_is_complex? 'p_haproxy').to eq true
expect(@class.primitive_is_complex? 'vip__management').to eq false
end
end
context 'node status parser' do
it 'can produce nodes structure' do
expect(@class.nodes).to be_a Hash
expect(@class.nodes['node-1']['primitives']['p_heat-engine']['status']).to eq('start')
#puts @class.get_cluster_debug_report
end
it 'can determite a global primitive status' do
expect(@class.primitive_status 'p_heat-engine').to eq('start')
expect(@class.primitive_is_running? 'p_heat-engine').to eq true
expect(@class.primitive_status 'p_ceilometer-agent-central').to eq('stop')
expect(@class.primitive_is_running? 'p_ceilometer-agent-central').to eq false
expect(@class.primitive_is_running? 'UNKNOWN').to eq nil
expect(@class.primitive_status 'UNKNOWN').to eq nil
end
it 'can determine a local primitive status on a node' do
expect(@class.primitive_status 'p_heat-engine', 'node-1').to eq('start')
expect(@class.primitive_is_running? 'p_heat-engine', 'node-1').to eq true
expect(@class.primitive_status 'p_heat-engine', 'node-2').to eq('stop')
expect(@class.primitive_is_running? 'p_heat-engine', 'node-2').to eq false
expect(@class.primitive_is_running? 'UNKNOWN', 'node-1').to eq nil
expect(@class.primitive_status 'UNKNOWN', 'node-1').to eq nil
end
it 'can determine if primitive is managed or not' do
expect(@class.primitive_is_managed? 'p_heat-engine').to eq true
expect(@class.primitive_is_managed? 'p_haproxy').to eq true
expect(@class.primitive_is_managed? 'UNKNOWN').to eq nil
end
it 'can determine if primitive is started or not' do
expect(@class.primitive_is_started? 'p_heat-engine').to eq true
expect(@class.primitive_is_started? 'p_haproxy').to eq true
expect(@class.primitive_is_started? 'UNKNOWN').to eq nil
end
it 'can determine if primitive is failed or not globally' do
expect(@class.primitive_has_failures? 'p_ceilometer-agent-central').to eq true
expect(@class.primitive_has_failures? 'p_heat-engine').to eq false
expect(@class.primitive_has_failures? 'UNKNOWN').to eq nil
end
it 'can determine if primitive is failed or not locally' do
expect(@class.primitive_has_failures? 'p_ceilometer-agent-central', 'node-1').to eq false
expect(@class.primitive_has_failures? 'p_ceilometer-agent-central', 'node-2').to eq true
expect(@class.primitive_has_failures? 'p_heat-engine', 'node-1').to eq false
expect(@class.primitive_has_failures? 'p_heat-engine', 'node-2').to eq false
expect(@class.primitive_has_failures? 'UNKNOWN', 'node-1').to eq nil
end
it 'can determine that primitive is complex' do
expect(@class.primitive_is_complex? 'p_haproxy').to eq true
expect(@class.primitive_is_complex? 'p_heat-engine').to eq false
expect(@class.primitive_is_complex? 'p_rabbitmq-server').to eq true
expect(@class.primitive_is_complex? 'UNKNOWN').to eq nil
end
it 'can determine that primitive is multistate' do
expect(@class.primitive_is_multistate? 'p_haproxy').to eq false
expect(@class.primitive_is_multistate? 'p_heat-engine').to eq false
expect(@class.primitive_is_multistate? 'p_rabbitmq-server').to eq true
expect(@class.primitive_is_multistate? 'UNKNOWN').to eq nil
end
it 'can determine that primitive has master running' do
expect(@class.primitive_has_master_running? 'p_rabbitmq-server').to eq true
expect(@class.primitive_has_master_running? 'p_heat-engine').to eq false
expect(@class.primitive_has_master_running? 'UNKNOWN').to eq nil
end
it 'can determine that primitive is clone' do
expect(@class.primitive_is_clone? 'p_haproxy').to eq true
expect(@class.primitive_is_clone? 'p_heat-engine').to eq false
expect(@class.primitive_is_clone? 'p_rabbitmq-server').to eq false
expect(@class.primitive_is_clone? 'UNKNOWN').to eq nil
end
end
context 'cluster control' do
it 'can enable maintenance mode' do
expect(@class).to receive(:pcs).with 'property', 'set', 'maintenance-mode=true'
@class.maintenance_mode 'true'
end
it 'can disable maintenance mode' do
expect(@class).to receive(:pcs).with 'property', 'set', 'maintenance-mode=false'
@class.maintenance_mode 'false'
end
it 'can set no-quorum policy' do
expect(@class).to receive(:pcs).with 'property', 'set', 'no-quorum-policy=ignore'
@class.no_quorum_policy 'ignore'
end
end
context 'constraints control' do
it 'can add location constraint' do
expect(@class).to receive(:cibadmin).and_return(true)
@class.constraint_location_add 'myprimitive', 'mynode', '200'
end
it 'can remove location constraint' do
expect(@class).to receive(:pcs).with 'constraint', 'location', 'remove', 'myprimitive_on_mynode'
@class.constraint_location_remove 'myprimitive', 'mynode'
end
end
context 'wait functions' do
it 'retries block until it becomes true' do
@class.retry_block_until_true { true }
end
it 'waits for Pacemaker to become ready' do
allow(@class).to receive(:is_online?).and_return true
@class.wait_for_online
end
it 'waits for status to become known' do
allow(@class).to receive(:cib_reset).and_return true
allow(@class).to receive(:primitive_status).and_return 'stopped'
@class.wait_for_status 'myprimitive'
end
it 'waits for the service to start' do
allow(@class).to receive(:cib_reset).and_return true
allow(@class).to receive(:primitive_is_running?).with('myprimitive', nil).and_return true
@class.wait_for_start 'myprimitive'
end
it 'waits for the service to stop' do
allow(@class).to receive(:cib_reset).and_return true
allow(@class).to receive(:primitive_is_running?).with('myprimitive', nil).and_return false
@class.wait_for_stop 'myprimitive'
end
end
end

View File

@ -1,230 +0,0 @@
require 'spec_helper'
describe Puppet::Type.type(:service).provider(:pacemaker) do
let(:resource) { Puppet::Type.type(:service).new(:name => title, :provider=> :pacemaker) }
let(:provider) { resource.provider }
let(:title) { 'myservice' }
let(:full_name) { 'clone-p_myservice' }
let(:name) { 'p_myservice' }
let(:hostname) { 'mynode' }
let(:primitive_class) { 'ocf' }
before :each do
@class = provider
allow(@class).to receive(:title).and_return(title)
allow(@class).to receive(:hostname).and_return(hostname)
allow(@class).to receive(:name).and_return(name)
allow(@class).to receive(:full_name).and_return(full_name)
allow(@class).to receive(:basic_service_name).and_return(title)
allow(@class).to receive(:primitive_class).and_return(primitive_class)
allow(@class).to receive(:cib_reset).and_return(true)
allow(@class).to receive(:wait_for_online).and_return(true)
allow(@class).to receive(:wait_for_status).and_return(true)
allow(@class).to receive(:wait_for_start).and_return(true)
allow(@class).to receive(:wait_for_stop).and_return(true)
allow(@class).to receive(:disable_basic_service).and_return(true)
allow(@class).to receive(:get_primitive_puppet_status).and_return(:started)
allow(@class).to receive(:get_primitive_puppet_enable).and_return(:true)
allow(@class).to receive(:primitive_is_managed?).and_return(true)
allow(@class).to receive(:primitive_is_running?).and_return(true)
allow(@class).to receive(:primitive_has_failures?).and_return(false)
allow(@class).to receive(:primitive_is_complex?).and_return(false)
allow(@class).to receive(:primitive_is_multistate?).and_return(false)
allow(@class).to receive(:primitive_is_clone?).and_return(false)
allow(@class).to receive(:unban_primitive).and_return(true)
allow(@class).to receive(:ban_primitive).and_return(true)
allow(@class).to receive(:start_primitive).and_return(true)
allow(@class).to receive(:stop_primitive).and_return(true)
allow(@class).to receive(:cleanup_primitive).and_return(true)
allow(@class).to receive(:enable).and_return(true)
allow(@class).to receive(:disable).and_return(true)
allow(@class).to receive(:constraint_location_add).and_return(true)
allow(@class).to receive(:constraint_location_remove).and_return(true)
allow(@class).to receive(:get_cluster_debug_report).and_return(true)
end
context 'service name mangling' do
it 'uses title as the service name if it is found in CIB' do
allow(@class).to receive(:name).and_call_original
allow(@class).to receive(:primitive_exists?).with(title).and_return(true)
expect(@class.name).to eq(title)
end
it 'uses "p_" prefix with name if found name with prefix' do
allow(@class).to receive(:name).and_call_original
allow(@class).to receive(:primitive_exists?).with(title).and_return(false)
allow(@class).to receive(:primitive_exists?).with(name).and_return(true)
expect(@class.name).to eq(name)
end
it 'uses name without "p_" to disable basic service' do
allow(@class).to receive(:name).and_return(name)
expect(@class.basic_service_name).to eq(title)
end
end
context '#status' do
it 'should wait for pacemaker to become online' do
expect(@class).to receive(:wait_for_online)
@class.status
end
it 'should reset cib mnemoization on every call' do
expect(@class).to receive(:cib_reset)
@class.status
end
it 'gets service status locally' do
expect(@class).to receive(:get_primitive_puppet_status).with name, hostname
@class.status
end
end
context '#start' do
it 'tries to enable service if it is not enabled to work with it' do
allow(@class).to receive(:primitive_is_managed?).and_return(false)
expect(@class).to receive(:enable).once
@class.start
allow(@class).to receive(:primitive_is_managed?).and_return(true)
allow(@class).to receive(:enable).and_call_original
expect(@class).to receive(:enable).never
@class.start
end
it 'tries to disable a basic service with the same name' do
expect(@class).to receive(:disable_basic_service)
@class.start
end
it 'should cleanup a primitive' do
allow(@class).to receive(:primitive_has_failures?).and_return(true)
expect(@class).to receive(:cleanup_primitive).with(full_name, hostname).once
@class.start
end
it 'tries to unban the service on the node by the name' do
expect(@class).to receive(:unban_primitive).with(name, hostname)
@class.start
end
it 'tries to start the service by its full name' do
expect(@class).to receive(:start_primitive).with(full_name)
@class.start
end
it 'adds a location constraint for the service by its full_name' do
expect(@class).to receive(:constraint_location_add).with(full_name, hostname)
@class.start
end
it 'waits for the service to start locally if primitive is clone' do
allow(@class).to receive(:primitive_is_clone?).and_return(true)
allow(@class).to receive(:primitive_is_multistate?).and_return(false)
allow(@class).to receive(:primitive_is_complex?).and_return(true)
expect(@class).to receive(:wait_for_start).with name
@class.start
end
it 'waits for the service to start master anywhere if primitive is multistate' do
allow(@class).to receive(:primitive_is_clone?).and_return(false)
allow(@class).to receive(:primitive_is_multistate?).and_return(true)
allow(@class).to receive(:primitive_is_complex?).and_return(true)
expect(@class).to receive(:wait_for_master).with name
@class.start
end
it 'waits for the service to start anywhere if primitive is simple' do
allow(@class).to receive(:primitive_is_clone?).and_return(false)
allow(@class).to receive(:primitive_is_multistate?).and_return(false)
allow(@class).to receive(:primitive_is_complex?).and_return(false)
expect(@class).to receive(:wait_for_start).with name
@class.start
end
end
context '#stop' do
it 'tries to disable service if it is not enabled to work with it' do
allow(@class).to receive(:primitive_is_managed?).and_return(false)
expect(@class).to receive(:enable).once
@class.stop
allow(@class).to receive(:primitive_is_managed?).and_return(true)
allow(@class).to receive(:enable).and_call_original
expect(@class).to receive(:enable).never
@class.stop
end
it 'should cleanup a primitive on stop' do
expect(@class).to receive(:cleanup_primitive).with(full_name, hostname).once.once
@class.stop
end
it 'uses Ban to stop the service and waits for it to stop locally if service is complex' do
allow(@class).to receive(:primitive_is_complex?).and_return(true)
expect(@class).to receive(:wait_for_stop).with name, hostname
expect(@class).to receive(:ban_primitive).with name, hostname
@class.stop
end
it 'uses Stop to stop the service and waits for it to stop globally if service is simple' do
allow(@class).to receive(:primitive_is_complex?).and_return(false)
expect(@class).to receive(:wait_for_stop).with name
expect(@class).to receive(:stop_primitive).with name
@class.stop
end
end
context '#restart' do
it 'does not stop or start the service if it is not locally running' do
allow(@class).to receive(:primitive_is_running?).with(name, hostname).and_return(false)
expect(@class).to receive(:stop).never
expect(@class).to receive(:start).never
@class.restart
end
it 'stops and start the service if it is locally running' do
allow(@class).to receive(:primitive_is_running?).with(name, hostname).and_return(true)
expect(@class).to receive(:stop).ordered
expect(@class).to receive(:start).ordered
@class.restart
end
end
context 'basic service handling' do
before :each do
allow(@class).to receive(:disable_basic_service).and_call_original
allow(@class.extra_provider).to receive(:enableable?).and_return true
allow(@class.extra_provider).to receive(:enabled?).and_return :true
allow(@class.extra_provider).to receive(:disable).and_return true
allow(@class.extra_provider).to receive(:stop).and_return true
allow(@class.extra_provider).to receive(:status).and_return :running
end
it 'tries to disable the basic service if it is enabled' do
expect(@class.extra_provider).to receive(:disable)
@class.disable_basic_service
end
it 'tries to stop the service if it is running' do
expect(@class.extra_provider).to receive(:stop)
@class.disable_basic_service
end
it 'does not try to stop a systemd running service' do
allow(@class).to receive(:primitive_class).and_return('systemd')
expect(@class.extra_provider).to receive(:stop).never
@class.disable_basic_service
end
end
end