107 lines
3.4 KiB
YAML
107 lines
3.4 KiB
YAML
---
|
|
# how many times a command should retry if it's failing
|
|
:retry_count: 360
|
|
|
|
# how long to wait between retries (seconds)
|
|
:retry_step: 5
|
|
|
|
# how long to wait for a single command to finish running (seconds)
|
|
:retry_timeout: 60
|
|
|
|
# count false or nil block return values as failures or only exceptions?
|
|
:retry_false_is_failure: true
|
|
|
|
# raise error if no more retries left and command is still failing?
|
|
:retry_fail_on_timeout: true
|
|
|
|
# what cluster properties should be shown on the debug status output
|
|
:debug_show_properties:
|
|
- symmetric-cluster
|
|
- no-quorum-policy
|
|
|
|
# Show the debug messages for the resource operations status calculation
|
|
:debug_show_operations: false
|
|
|
|
# don't actually do any changes to the system
|
|
# only show what command would have been run
|
|
:debug_enabled: false
|
|
|
|
# how do we determine that the service have been started?
|
|
# :global - The service is running on any node
|
|
# :master - The service is running in the master mode on any node
|
|
# :local - The service is running on the local node
|
|
:start_mode_master: :master
|
|
:start_mode_clone: :global
|
|
:start_mode_simple: :global
|
|
|
|
# what method should be used to stop the service?
|
|
# :global - Stop the running service by disabling it
|
|
# :local - Stop the locally running service by banning it on this node
|
|
# Note: by default restart does not stop services
|
|
# if they are not running locally on the node
|
|
:stop_mode_master: :local
|
|
:stop_mode_clone: :local
|
|
:stop_mode_simple: :global
|
|
|
|
# what service is considered running?
|
|
# :global - The service is running on any node
|
|
# :local - The service is running on the local node
|
|
:status_mode_master: :local
|
|
:status_mode_clone: :local
|
|
:status_mode_simple: :global
|
|
|
|
# cleanup the primitive during these actions?
|
|
:cleanup_on_start: true
|
|
:cleanup_on_stop: true
|
|
|
|
# set the primitive status to stopped if there are failures
|
|
# forcing the primitive to be started again and cleaned up
|
|
# on this node
|
|
:cleanup_on_status: true
|
|
|
|
# try to stop and disable the basic service on these provider actions
|
|
# the basic service is the service managed by the system
|
|
# init scripts or the upstart/systemd units
|
|
# in order to run the Pacemaker service the basic service
|
|
# should be stopped and disabled first so it will not mess
|
|
# with the OCF script
|
|
:disable_basic_service_on_status: false
|
|
:disable_basic_service_on_start: true
|
|
:disable_basic_service_on_stop: false
|
|
|
|
# don't try to stop basic service for these primitive classes
|
|
# because they are based on the native service manager
|
|
# and the basic service and the Pacemaker service is the same thing
|
|
:native_based_primitive_classes:
|
|
- lsb
|
|
- systemd
|
|
- upstart
|
|
|
|
# add location constraint to allow the service to run on the current node
|
|
# useful for asymmetric cluster mode
|
|
:add_location_constraint: true
|
|
|
|
# restart the service only if it's running on this node
|
|
# and skip restart if it's running elsewhere
|
|
:restart_only_if_local: true
|
|
|
|
# cleanup primitive only if it has failures
|
|
:cleanup_only_if_failures: true
|
|
|
|
# use prefetch in the providers
|
|
:prefetch: false
|
|
|
|
# Use additional idempotency checks before cibadmin create and delete actions
|
|
# It may be needed if many nodes are running at the same time
|
|
:cibadmin_idempotency_checks: true
|
|
|
|
# Should the constraints auto require their primitives?
|
|
# It can cause unwanted dependency cycles.
|
|
:autorequire_primitives: false
|
|
|
|
# meta attributes that are related to the primitive's service status
|
|
# and should be excluded from the configuration
|
|
:status_meta_attributes:
|
|
- target-role
|
|
- is-managed
|