From 9bd4d6455e52833ddcb9d7fc10be40d2decc473c Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Sat, 24 Feb 2024 11:31:59 -0800 Subject: [PATCH] Retire Tripleo: remove repo content TripleO project is retiring - https://review.opendev.org/c/openstack/governance/+/905145 this commit remove the content of this project repo Change-Id: I5b659ef9596c8b6b7edca9fbe0ef36021b9dfc33 --- .fixtures.yml | 9 - .gitignore | 27 - .rspec | 2 - .rubocop.yml | 87 -- .travis.yml | 44 - .zuul.yaml | 19 - CONTRIBUTING.md | 96 -- Gemfile | 83 -- LICENSE | 201 ---- README.md | 946 ------------------ README.rst | 10 + Rakefile | 47 - TODO.md | 12 - agent_generator/agent_generator.rb | 250 ----- agent_generator/generate_manifests.sh | 17 - agent_generator/src_xml/fence_amt.xml | 172 ---- agent_generator/src_xml/fence_apc.xml | 142 --- agent_generator/src_xml/fence_apc_snmp.xml | 151 --- agent_generator/src_xml/fence_bladecenter.xml | 142 --- agent_generator/src_xml/fence_brocade.xml | 136 --- agent_generator/src_xml/fence_cisco_mds.xml | 150 --- agent_generator/src_xml/fence_cisco_ucs.xml | 142 --- agent_generator/src_xml/fence_compute.xml | 177 ---- agent_generator/src_xml/fence_crosslink.xml | 109 -- agent_generator/src_xml/fence_drac5.xml | 142 --- agent_generator/src_xml/fence_eaton_snmp.xml | 151 --- agent_generator/src_xml/fence_eps.xml | 123 --- agent_generator/src_xml/fence_hpblade.xml | 142 --- agent_generator/src_xml/fence_ibmblade.xml | 151 --- agent_generator/src_xml/fence_idrac.xml | 94 -- agent_generator/src_xml/fence_ifmib.xml | 152 --- agent_generator/src_xml/fence_ilo.xml | 132 --- agent_generator/src_xml/fence_ilo2.xml | 132 --- agent_generator/src_xml/fence_ilo3.xml | 94 -- agent_generator/src_xml/fence_ilo4.xml | 94 -- agent_generator/src_xml/fence_ilo_mp.xml | 127 --- agent_generator/src_xml/fence_imm.xml | 94 -- .../src_xml/fence_intelmodular.xml | 153 --- agent_generator/src_xml/fence_ipdu.xml | 151 --- agent_generator/src_xml/fence_ipmilan.xml | 99 -- agent_generator/src_xml/fence_ironic.xml | 61 -- agent_generator/src_xml/fence_kdump.xml | 51 - agent_generator/src_xml/fence_kubevirt.xml | 134 --- agent_generator/src_xml/fence_redfish.xml | 182 ---- agent_generator/src_xml/fence_rhevm.xml | 142 --- agent_generator/src_xml/fence_rsb.xml | 126 --- agent_generator/src_xml/fence_scsi.xml | 48 - agent_generator/src_xml/fence_virt.xml | 66 -- agent_generator/src_xml/fence_vmware_soap.xml | 139 --- agent_generator/src_xml/fence_watchdog.xml | 40 - agent_generator/src_xml/fence_wti.xml | 137 --- agent_generator/src_xml/fence_xvm.xml | 86 -- agent_generator/update_sources.sh | 26 - agent_generator/variables.sh | 47 - bindep.txt | 23 - doc/requirements.txt | 6 - examples/pacemaker/host.pp | 35 - examples/pacemaker/setup.pp | 29 - examples/pacemaker_colocation/create.pp | 43 - examples/pacemaker_colocation/delete.pp | 23 - examples/pacemaker_colocation/show.sh | 10 - examples/pacemaker_colocation/update.pp | 43 - examples/pacemaker_location/create.pp | 51 - examples/pacemaker_location/delete.pp | 19 - examples/pacemaker_location/show.sh | 10 - examples/pacemaker_location/update.pp | 51 - .../pacemaker_operation_default/create.pp | 4 - .../pacemaker_operation_default/delete.pp | 3 - examples/pacemaker_operation_default/show.sh | 9 - .../pacemaker_operation_default/update.pp | 4 - examples/pacemaker_order/create.pp | 41 - examples/pacemaker_order/delete.pp | 21 - examples/pacemaker_order/show.sh | 10 - examples/pacemaker_order/update.pp | 41 - examples/pacemaker_property/create.pp | 9 - examples/pacemaker_property/delete.pp | 7 - examples/pacemaker_property/show.sh | 10 - examples/pacemaker_property/update.pp | 9 - examples/pacemaker_resource/create.pp | 108 -- examples/pacemaker_resource/delete.pp | 22 - examples/pacemaker_resource/show.sh | 26 - examples/pacemaker_resource/update.pp | 108 -- examples/pacemaker_resource_default/create.pp | 4 - examples/pacemaker_resource_default/delete.pp | 3 - examples/pacemaker_resource_default/show.sh | 9 - examples/pacemaker_resource_default/update.pp | 4 - examples/service/clean.pp | 7 - examples/service/start.pp | 52 - examples/service/stop.pp | 52 - lib/facter/pacemaker_node_name.rb | 10 - lib/pacemaker/options.rb | 31 - lib/pacemaker/options.yaml | 106 -- lib/pacemaker/pcs/cluster_property.rb | 40 - lib/pacemaker/pcs/common.rb | 48 - lib/pacemaker/pcs/operation_default.rb | 40 - lib/pacemaker/pcs/pcsd_auth.rb | 64 -- lib/pacemaker/pcs/resource_default.rb | 40 - lib/pacemaker/type.rb | 184 ---- lib/pacemaker/wait.rb | 238 ----- lib/pacemaker/xml/cib.rb | 123 --- lib/pacemaker/xml/constraint_colocations.rb | 43 - lib/pacemaker/xml/constraint_locations.rb | 104 -- lib/pacemaker/xml/constraint_orders.rb | 43 - lib/pacemaker/xml/constraints.rb | 77 -- lib/pacemaker/xml/debug.rb | 138 --- lib/pacemaker/xml/helpers.rb | 139 --- lib/pacemaker/xml/nodes.rb | 47 - lib/pacemaker/xml/operation_default.rb | 56 -- lib/pacemaker/xml/primitives.rb | 384 ------- lib/pacemaker/xml/properties.rb | 52 - lib/pacemaker/xml/resource_default.rb | 56 -- lib/pacemaker/xml/status.rb | 277 ----- lib/pacemaker/xml/xml.rb | 65 -- .../functions/pacemaker_cluster_nodes.rb | 191 ---- .../functions/pacemaker_cluster_options.rb | 28 - .../pacemaker_resource_parameters.rb | 26 - .../parser/functions/pcmk_cluster_setup.rb | 62 -- .../parser/functions/pcmk_nodes_added.rb | 101 -- .../pacemaker_colocation/pacemaker_noop.rb | 6 - .../pacemaker_colocation/pacemaker_xml.rb | 170 ---- .../pacemaker_location/pacemaker_noop.rb | 6 - .../pacemaker_location/pacemaker_xml.rb | 168 ---- .../pacemaker_nodes/pacemaker_noop.rb | 6 - .../provider/pacemaker_nodes/pacemaker_xml.rb | 130 --- lib/puppet/provider/pacemaker_noop.rb | 55 - .../pacemaker_online/pacemaker_noop.rb | 6 - .../pacemaker_online/pacemaker_xml.rb | 25 - .../pacemaker_noop.rb | 6 - .../pacemaker_pcs.rb | 51 - .../pacemaker_xml.rb | 77 -- .../pacemaker_order/pacemaker_noop.rb | 6 - .../provider/pacemaker_order/pacemaker_xml.rb | 229 ----- lib/puppet/provider/pacemaker_pcs.rb | 27 - .../pacemaker_pcsd_auth/pacemaker_noop.rb | 6 - .../pacemaker_pcsd_auth/pacemaker_pcs.rb | 120 --- .../pacemaker_property/pacemaker_noop.rb | 6 - .../pacemaker_property/pacemaker_pcs.rb | 51 - .../pacemaker_property/pacemaker_xml.rb | 77 -- .../pacemaker_resource/pacemaker_noop.rb | 6 - .../pacemaker_resource/pacemaker_xml.rb | 345 ------- .../pacemaker_noop.rb | 6 - .../pacemaker_pcs.rb | 51 - .../pacemaker_xml.rb | 78 -- lib/puppet/provider/pacemaker_xml.rb | 48 - lib/puppet/provider/pcmk_bundle/default.rb | 295 ------ lib/puppet/provider/pcmk_common.rb | 432 -------- .../provider/pcmk_constraint/default.rb | 82 -- lib/puppet/provider/pcmk_property/default.rb | 86 -- lib/puppet/provider/pcmk_remote/default.rb | 132 --- lib/puppet/provider/pcmk_resource/default.rb | 280 ------ .../provider/pcmk_resource_default/pcs.rb | 40 - .../provider/pcmk_resource_op_default/pcs.rb | 40 - lib/puppet/provider/pcmk_stonith/default.rb | 142 --- .../provider/pcmk_stonith_level/default.rb | 76 -- lib/puppet/provider/service/pacemaker_noop.rb | 6 - lib/puppet/provider/service/pacemaker_xml.rb | 368 ------- lib/puppet/type/pacemaker_colocation.rb | 92 -- lib/puppet/type/pacemaker_location.rb | 106 -- lib/puppet/type/pacemaker_nodes.rb | 87 -- lib/puppet/type/pacemaker_online.rb | 20 - .../type/pacemaker_operation_default.rb | 48 - lib/puppet/type/pacemaker_order.rb | 165 --- lib/puppet/type/pacemaker_pcsd_auth.rb | 100 -- lib/puppet/type/pacemaker_property.rb | 52 - lib/puppet/type/pacemaker_resource.rb | 235 ----- lib/puppet/type/pacemaker_resource_default.rb | 56 -- lib/puppet/type/pcmk_bundle.rb | 180 ---- lib/puppet/type/pcmk_constraint.rb | 80 -- lib/puppet/type/pcmk_property.rb | 62 -- lib/puppet/type/pcmk_remote.rb | 137 --- lib/puppet/type/pcmk_resource.rb | 155 --- lib/puppet/type/pcmk_resource_default.rb | 81 -- lib/puppet/type/pcmk_resource_op_default.rb | 81 -- lib/puppet/type/pcmk_stonith.rb | 116 --- lib/puppet/type/pcmk_stonith_level.rb | 99 -- lib/serverspec/type/pacemaker_colocation.rb | 57 -- lib/serverspec/type/pacemaker_location.rb | 63 -- .../type/pacemaker_operation_default.rb | 38 - lib/serverspec/type/pacemaker_order.rb | 85 -- lib/serverspec/type/pacemaker_property.rb | 38 - lib/serverspec/type/pacemaker_resource.rb | 201 ---- .../type/pacemaker_resource_default.rb | 38 - lib/serverspec/type/pacemaker_xml.rb | 64 -- lib/tools/console.rb | 18 - lib/tools/provider.rb | 24 - lib/tools/status.rb | 17 - manifests/constraint/base.pp | 173 ---- manifests/constraint/colocation.pp | 77 -- manifests/constraint/location.pp | 70 -- manifests/constraint/order.pp | 88 -- manifests/contain.pp | 17 - manifests/corosync.pp | 420 -------- manifests/init.pp | 55 - manifests/install.pp | 43 - manifests/new.pp | 120 --- manifests/new/firewall.pp | 108 -- manifests/new/install.pp | 53 - manifests/new/params.pp | 74 -- manifests/new/resource/filesystem.pp | 111 -- manifests/new/resource/ip.pp | 91 -- manifests/new/resource/route.pp | 101 -- manifests/new/service.pp | 83 -- manifests/new/setup.pp | 69 -- manifests/new/setup/auth_key.pp | 70 -- manifests/new/setup/config.pp | 151 --- manifests/new/setup/debian.pp | 56 -- manifests/new/setup/pcsd.pp | 220 ---- manifests/new/wrapper.pp | 214 ---- manifests/params.pp | 60 -- manifests/property.pp | 83 -- manifests/remote.pp | 178 ---- manifests/resource/bundle.pp | 200 ---- manifests/resource/filesystem.pp | 169 ---- manifests/resource/ip.pp | 186 ---- manifests/resource/lsb.pp | 150 --- manifests/resource/ocf.pp | 156 --- manifests/resource/remote.pp | 158 --- manifests/resource/route.pp | 169 ---- manifests/resource/service.pp | 155 --- manifests/resource/systemd.pp | 150 --- manifests/resource_defaults.pp | 74 -- manifests/resource_op_defaults.pp | 74 -- manifests/service.pp | 84 -- manifests/stonith.pp | 64 -- manifests/stonith/.gitkeep | 0 manifests/stonith/fence_amt.pp | 322 ------ manifests/stonith/fence_apc.pp | 293 ------ manifests/stonith/fence_apc_snmp.pp | 309 ------ manifests/stonith/fence_bladecenter.pp | 293 ------ manifests/stonith/fence_brocade.pp | 285 ------ manifests/stonith/fence_cisco_mds.pp | 309 ------ manifests/stonith/fence_cisco_ucs.pp | 293 ------ manifests/stonith/fence_compute.pp | 349 ------- manifests/stonith/fence_crosslink.pp | 234 ----- manifests/stonith/fence_drac5.pp | 293 ------ manifests/stonith/fence_eaton_snmp.pp | 309 ------ manifests/stonith/fence_eps.pp | 261 ----- manifests/stonith/fence_hpblade.pp | 293 ------ manifests/stonith/fence_ibmblade.pp | 309 ------ manifests/stonith/fence_idrac.pp | 221 ---- manifests/stonith/fence_ifmib.pp | 309 ------ manifests/stonith/fence_ilo.pp | 277 ----- manifests/stonith/fence_ilo2.pp | 277 ----- manifests/stonith/fence_ilo3.pp | 221 ---- manifests/stonith/fence_ilo4.pp | 221 ---- manifests/stonith/fence_ilo_mp.pp | 269 ----- manifests/stonith/fence_imm.pp | 221 ---- manifests/stonith/fence_intelmodular.pp | 309 ------ manifests/stonith/fence_ipdu.pp | 309 ------ manifests/stonith/fence_ipmilan.pp | 229 ----- manifests/stonith/fence_ironic.pp | 186 ---- manifests/stonith/fence_kdump.pp | 165 --- manifests/stonith/fence_kubevirt.pp | 274 ----- manifests/stonith/fence_redfish.pp | 357 ------- manifests/stonith/fence_rhevm.pp | 293 ------ manifests/stonith/fence_rsb.pp | 269 ----- manifests/stonith/fence_scsi.pp | 165 --- manifests/stonith/fence_virt.pp | 189 ---- manifests/stonith/fence_vmware_soap.pp | 285 ------ manifests/stonith/fence_watchdog.pp | 130 --- manifests/stonith/fence_wti.pp | 285 ------ manifests/stonith/fence_xvm.pp | 278 ----- manifests/stonith/level.pp | 90 -- metadata.json | 50 - .../bundle_support-2fec55cad0f44ace.yaml | 6 - ...e_add_xenial_support-6ff98a4d9fd83e62.yaml | 3 - .../fence_kubevirt-435249b87ac6c4a1.yaml | 4 - .../pcsd_bind_addr-2e4c6da53262f72a.yaml | 4 - .../update-resources-ae2ef4f9e75e8699.yaml | 16 - ...ify_on_create_remote-b4abeace7018480d.yaml | 5 - releasenotes/source/0.6.x.rst | 6 - releasenotes/source/0.7.x.rst | 6 - releasenotes/source/1.1.x.rst | 6 - releasenotes/source/1.5.x.rst | 6 - releasenotes/source/_static/.placeholder | 0 releasenotes/source/conf.py | 265 ----- releasenotes/source/index.rst | 22 - releasenotes/source/unreleased.rst | 5 - setup.cfg | 14 - setup.py | 23 - .../nodesets/disabled/centos-511-x64.yml | 9 - .../nodesets/disabled/centos-66-x64-pe.yml | 11 - .../nodesets/disabled/centos-66-x64.yml | 9 - .../nodesets/disabled/centos-72-x64.yml | 9 - .../nodesets/disabled/debian-78-x64.yml | 9 - .../nodesets/disabled/debian-82-x64.yml | 9 - .../disabled/ubuntu-server-1204-x64.yml | 9 - .../disabled/ubuntu-server-1404-x64.yml | 9 - spec/acceptance/nodesets/nodepool-centos7.yml | 10 - spec/acceptance/nodesets/nodepool-trusty.yml | 10 - spec/acceptance/nodesets/nodepool-xenial.yml | 10 - .../nodesets/vagrant-centos-7.2-64.yml | 10 - .../nodesets/vagrant-ubuntu-14.04-64.yml | 10 - spec/acceptance/pacemaker_colocation_spec.rb | 89 -- spec/acceptance/pacemaker_location_spec.rb | 119 --- spec/acceptance/pacemaker_node_name_spec.rb | 18 - .../pacemaker_operation_default_spec.rb | 30 - spec/acceptance/pacemaker_order_spec.rb | 85 -- spec/acceptance/pacemaker_property_spec.rb | 44 - .../pacemaker_resource_default_spec.rb | 30 - spec/acceptance/pacemaker_resource_spec.rb | 245 ----- spec/acceptance/pacemaker_service_spec.rb | 131 --- spec/acceptance/pacemaker_setup_spec.rb | 13 - spec/classes/pacemaker_firewall_spec.rb | 138 --- spec/classes/pacemaker_install_spec.rb | 92 -- .../pacemaker_resource_defaults_spec.rb | 52 - .../pacemaker_resource_op_defaults_spec.rb | 52 - spec/classes/pacemaker_service_spec.rb | 53 - spec/classes/pacemaker_setup_auth_key_spec.rb | 90 -- spec/classes/pacemaker_setup_config_spec.rb | 198 ---- spec/classes/pacemaker_setup_debian_spec.rb | 42 - spec/classes/pacemaker_setup_pcsd_spec.rb | 111 -- spec/classes/pacemaker_setup_spec.rb | 36 - spec/classes/pacemaker_spec.rb | 32 - spec/defines/pacemaker_contain_spec.rb | 31 - .../pacemaker_resource_filesystem_spec.rb | 106 -- spec/defines/pacemaker_resource_ip_spec.rb | 120 --- spec/defines/pacemaker_resource_route_spec.rb | 83 -- .../pacemaker_stonith_fence_ipmilan_spec.rb | 102 -- spec/defines/pacemaker_stonith_level_spec.rb | 86 -- spec/defines/pacemaker_wrapper_spec.rb | 352 ------- .../functions/pacemaker_cluster_nodes_spec.rb | 119 --- .../pacemaker_cluster_options_spec.rb | 34 - .../pacemaker_resource_parameters_spec.rb | 21 - spec/functions/pcmk_cluster_setup_spec.rb | 41 - spec/functions/pcmk_nodes_added_spec.rb | 83 -- spec/spec_helper.rb | 14 - spec/spec_helper_acceptance.rb | 49 - spec/unit/puppet/provider/cib-orig.xml | 238 ----- spec/unit/puppet/provider/cib.xml | 860 ---------------- .../provider/pacemaker_colocation/xml_spec.rb | 81 -- .../provider/pacemaker_location/xml_spec.rb | 180 ---- .../provider/pacemaker_nodes/xml_spec.rb | 103 -- .../puppet/provider/pacemaker_noop_spec.rb | 31 - .../provider/pacemaker_online/xmk_spec.rb | 33 - .../pacemaker_operation_default/pcs_spec.rb | 54 - .../pacemaker_operation_default/xml_spec.rb | 55 - .../provider/pacemaker_order/xml_spec.rb | 112 --- .../puppet/provider/pacemaker_pcs_spec.rb | 255 ----- .../provider/pacemaker_pcsd_auth/pcs_spec.rb | 154 --- .../provider/pacemaker_property/pcs_spec.rb | 54 - .../provider/pacemaker_property/xml_spec.rb | 55 - .../provider/pacemaker_resource/xml_spec.rb | 510 ---------- .../pacemaker_resource_default/pcs_spec.rb | 54 - .../pacemaker_resource_default/xml_spec.rb | 55 - .../puppet/provider/pacemaker_xml_spec.rb | 914 ----------------- spec/unit/puppet/provider/pcmk_common_spec.rb | 65 -- .../provider/service/pacemaker_xml_spec.rb | 340 ------- .../puppet/type/pacemaker_colocation_spec.rb | 102 -- .../puppet/type/pacemaker_location_spec.rb | 215 ---- spec/unit/puppet/type/pacemaker_nodes_spec.rb | 52 - .../unit/puppet/type/pacemaker_online_spec.rb | 34 - .../type/pacemaker_operation_default_spec.rb | 45 - spec/unit/puppet/type/pacemaker_order_spec.rb | 99 -- .../puppet/type/pacemaker_pcsd_auth_spec.rb | 55 - .../puppet/type/pacemaker_property_spec.rb | 45 - .../type/pacemaker_resource_default_spec.rb | 45 - .../puppet/type/pacemaker_resource_spec.rb | 178 ---- .../type/pacemaker_colocation_spec.rb | 31 - .../type/pacemaker_location_spec.rb | 67 -- .../type/pacemaker_operation_default_spec.rb | 27 - .../serverspec/type/pacemaker_order_spec.rb | 58 -- .../type/pacemaker_property_spec.rb | 35 - .../type/pacemaker_resource_default_spec.rb | 27 - .../type/pacemaker_resource_spec.rb | 178 ---- templates/corosync.conf.erb | 181 ---- templates/debian/cman_default.erb | 2 - templates/debian/corosync_default.erb | 2 - templates/debian/pacemaker_default.erb | 1 - templates/debian/pacemaker_service.erb | 5 - templates/ocf_handler.sh.erb | 133 --- tox.ini | 12 - 372 files changed, 10 insertions(+), 40425 deletions(-) delete mode 100644 .fixtures.yml delete mode 100644 .gitignore delete mode 100644 .rspec delete mode 100644 .rubocop.yml delete mode 100644 .travis.yml delete mode 100644 .zuul.yaml delete mode 100644 CONTRIBUTING.md delete mode 100644 Gemfile delete mode 100644 LICENSE delete mode 100644 README.md create mode 100644 README.rst delete mode 100644 Rakefile delete mode 100644 TODO.md delete mode 100755 agent_generator/agent_generator.rb delete mode 100755 agent_generator/generate_manifests.sh delete mode 100644 agent_generator/src_xml/fence_amt.xml delete mode 100644 agent_generator/src_xml/fence_apc.xml delete mode 100644 agent_generator/src_xml/fence_apc_snmp.xml delete mode 100644 agent_generator/src_xml/fence_bladecenter.xml delete mode 100644 agent_generator/src_xml/fence_brocade.xml delete mode 100644 agent_generator/src_xml/fence_cisco_mds.xml delete mode 100644 agent_generator/src_xml/fence_cisco_ucs.xml delete mode 100644 agent_generator/src_xml/fence_compute.xml delete mode 100644 agent_generator/src_xml/fence_crosslink.xml delete mode 100644 agent_generator/src_xml/fence_drac5.xml delete mode 100644 agent_generator/src_xml/fence_eaton_snmp.xml delete mode 100644 agent_generator/src_xml/fence_eps.xml delete mode 100644 agent_generator/src_xml/fence_hpblade.xml delete mode 100644 agent_generator/src_xml/fence_ibmblade.xml delete mode 100644 agent_generator/src_xml/fence_idrac.xml delete mode 100644 agent_generator/src_xml/fence_ifmib.xml delete mode 100644 agent_generator/src_xml/fence_ilo.xml delete mode 100644 agent_generator/src_xml/fence_ilo2.xml delete mode 100644 agent_generator/src_xml/fence_ilo3.xml delete mode 100644 agent_generator/src_xml/fence_ilo4.xml delete mode 100644 agent_generator/src_xml/fence_ilo_mp.xml delete mode 100644 agent_generator/src_xml/fence_imm.xml delete mode 100644 agent_generator/src_xml/fence_intelmodular.xml delete mode 100644 agent_generator/src_xml/fence_ipdu.xml delete mode 100644 agent_generator/src_xml/fence_ipmilan.xml delete mode 100644 agent_generator/src_xml/fence_ironic.xml delete mode 100644 agent_generator/src_xml/fence_kdump.xml delete mode 100644 agent_generator/src_xml/fence_kubevirt.xml delete mode 100644 agent_generator/src_xml/fence_redfish.xml delete mode 100644 agent_generator/src_xml/fence_rhevm.xml delete mode 100644 agent_generator/src_xml/fence_rsb.xml delete mode 100644 agent_generator/src_xml/fence_scsi.xml delete mode 100644 agent_generator/src_xml/fence_virt.xml delete mode 100644 agent_generator/src_xml/fence_vmware_soap.xml delete mode 100644 agent_generator/src_xml/fence_watchdog.xml delete mode 100644 agent_generator/src_xml/fence_wti.xml delete mode 100644 agent_generator/src_xml/fence_xvm.xml delete mode 100755 agent_generator/update_sources.sh delete mode 100644 agent_generator/variables.sh delete mode 100644 bindep.txt delete mode 100644 doc/requirements.txt delete mode 100644 examples/pacemaker/host.pp delete mode 100644 examples/pacemaker/setup.pp delete mode 100644 examples/pacemaker_colocation/create.pp delete mode 100644 examples/pacemaker_colocation/delete.pp delete mode 100755 examples/pacemaker_colocation/show.sh delete mode 100644 examples/pacemaker_colocation/update.pp delete mode 100644 examples/pacemaker_location/create.pp delete mode 100644 examples/pacemaker_location/delete.pp delete mode 100755 examples/pacemaker_location/show.sh delete mode 100644 examples/pacemaker_location/update.pp delete mode 100644 examples/pacemaker_operation_default/create.pp delete mode 100644 examples/pacemaker_operation_default/delete.pp delete mode 100755 examples/pacemaker_operation_default/show.sh delete mode 100644 examples/pacemaker_operation_default/update.pp delete mode 100644 examples/pacemaker_order/create.pp delete mode 100644 examples/pacemaker_order/delete.pp delete mode 100755 examples/pacemaker_order/show.sh delete mode 100644 examples/pacemaker_order/update.pp delete mode 100644 examples/pacemaker_property/create.pp delete mode 100644 examples/pacemaker_property/delete.pp delete mode 100755 examples/pacemaker_property/show.sh delete mode 100644 examples/pacemaker_property/update.pp delete mode 100644 examples/pacemaker_resource/create.pp delete mode 100644 examples/pacemaker_resource/delete.pp delete mode 100755 examples/pacemaker_resource/show.sh delete mode 100644 examples/pacemaker_resource/update.pp delete mode 100644 examples/pacemaker_resource_default/create.pp delete mode 100644 examples/pacemaker_resource_default/delete.pp delete mode 100755 examples/pacemaker_resource_default/show.sh delete mode 100644 examples/pacemaker_resource_default/update.pp delete mode 100644 examples/service/clean.pp delete mode 100644 examples/service/start.pp delete mode 100644 examples/service/stop.pp delete mode 100644 lib/facter/pacemaker_node_name.rb delete mode 100644 lib/pacemaker/options.rb delete mode 100644 lib/pacemaker/options.yaml delete mode 100644 lib/pacemaker/pcs/cluster_property.rb delete mode 100644 lib/pacemaker/pcs/common.rb delete mode 100644 lib/pacemaker/pcs/operation_default.rb delete mode 100644 lib/pacemaker/pcs/pcsd_auth.rb delete mode 100644 lib/pacemaker/pcs/resource_default.rb delete mode 100644 lib/pacemaker/type.rb delete mode 100644 lib/pacemaker/wait.rb delete mode 100644 lib/pacemaker/xml/cib.rb delete mode 100644 lib/pacemaker/xml/constraint_colocations.rb delete mode 100644 lib/pacemaker/xml/constraint_locations.rb delete mode 100644 lib/pacemaker/xml/constraint_orders.rb delete mode 100644 lib/pacemaker/xml/constraints.rb delete mode 100644 lib/pacemaker/xml/debug.rb delete mode 100644 lib/pacemaker/xml/helpers.rb delete mode 100644 lib/pacemaker/xml/nodes.rb delete mode 100644 lib/pacemaker/xml/operation_default.rb delete mode 100644 lib/pacemaker/xml/primitives.rb delete mode 100644 lib/pacemaker/xml/properties.rb delete mode 100644 lib/pacemaker/xml/resource_default.rb delete mode 100644 lib/pacemaker/xml/status.rb delete mode 100644 lib/pacemaker/xml/xml.rb delete mode 100644 lib/puppet/parser/functions/pacemaker_cluster_nodes.rb delete mode 100644 lib/puppet/parser/functions/pacemaker_cluster_options.rb delete mode 100644 lib/puppet/parser/functions/pacemaker_resource_parameters.rb delete mode 100644 lib/puppet/parser/functions/pcmk_cluster_setup.rb delete mode 100644 lib/puppet/parser/functions/pcmk_nodes_added.rb delete mode 100644 lib/puppet/provider/pacemaker_colocation/pacemaker_noop.rb delete mode 100644 lib/puppet/provider/pacemaker_colocation/pacemaker_xml.rb delete mode 100644 lib/puppet/provider/pacemaker_location/pacemaker_noop.rb delete mode 100644 lib/puppet/provider/pacemaker_location/pacemaker_xml.rb delete mode 100644 lib/puppet/provider/pacemaker_nodes/pacemaker_noop.rb delete mode 100644 lib/puppet/provider/pacemaker_nodes/pacemaker_xml.rb delete mode 100644 lib/puppet/provider/pacemaker_noop.rb delete mode 100644 lib/puppet/provider/pacemaker_online/pacemaker_noop.rb delete mode 100644 lib/puppet/provider/pacemaker_online/pacemaker_xml.rb delete mode 100644 lib/puppet/provider/pacemaker_operation_default/pacemaker_noop.rb delete mode 100644 lib/puppet/provider/pacemaker_operation_default/pacemaker_pcs.rb delete mode 100644 lib/puppet/provider/pacemaker_operation_default/pacemaker_xml.rb delete mode 100644 lib/puppet/provider/pacemaker_order/pacemaker_noop.rb delete mode 100644 lib/puppet/provider/pacemaker_order/pacemaker_xml.rb delete mode 100644 lib/puppet/provider/pacemaker_pcs.rb delete mode 100644 lib/puppet/provider/pacemaker_pcsd_auth/pacemaker_noop.rb delete mode 100644 lib/puppet/provider/pacemaker_pcsd_auth/pacemaker_pcs.rb delete mode 100644 lib/puppet/provider/pacemaker_property/pacemaker_noop.rb delete mode 100644 lib/puppet/provider/pacemaker_property/pacemaker_pcs.rb delete mode 100644 lib/puppet/provider/pacemaker_property/pacemaker_xml.rb delete mode 100644 lib/puppet/provider/pacemaker_resource/pacemaker_noop.rb delete mode 100644 lib/puppet/provider/pacemaker_resource/pacemaker_xml.rb delete mode 100644 lib/puppet/provider/pacemaker_resource_default/pacemaker_noop.rb delete mode 100644 lib/puppet/provider/pacemaker_resource_default/pacemaker_pcs.rb delete mode 100644 lib/puppet/provider/pacemaker_resource_default/pacemaker_xml.rb delete mode 100644 lib/puppet/provider/pacemaker_xml.rb delete mode 100644 lib/puppet/provider/pcmk_bundle/default.rb delete mode 100644 lib/puppet/provider/pcmk_common.rb delete mode 100644 lib/puppet/provider/pcmk_constraint/default.rb delete mode 100644 lib/puppet/provider/pcmk_property/default.rb delete mode 100644 lib/puppet/provider/pcmk_remote/default.rb delete mode 100644 lib/puppet/provider/pcmk_resource/default.rb delete mode 100644 lib/puppet/provider/pcmk_resource_default/pcs.rb delete mode 100644 lib/puppet/provider/pcmk_resource_op_default/pcs.rb delete mode 100644 lib/puppet/provider/pcmk_stonith/default.rb delete mode 100644 lib/puppet/provider/pcmk_stonith_level/default.rb delete mode 100644 lib/puppet/provider/service/pacemaker_noop.rb delete mode 100644 lib/puppet/provider/service/pacemaker_xml.rb delete mode 100644 lib/puppet/type/pacemaker_colocation.rb delete mode 100644 lib/puppet/type/pacemaker_location.rb delete mode 100644 lib/puppet/type/pacemaker_nodes.rb delete mode 100644 lib/puppet/type/pacemaker_online.rb delete mode 100644 lib/puppet/type/pacemaker_operation_default.rb delete mode 100644 lib/puppet/type/pacemaker_order.rb delete mode 100644 lib/puppet/type/pacemaker_pcsd_auth.rb delete mode 100644 lib/puppet/type/pacemaker_property.rb delete mode 100644 lib/puppet/type/pacemaker_resource.rb delete mode 100644 lib/puppet/type/pacemaker_resource_default.rb delete mode 100644 lib/puppet/type/pcmk_bundle.rb delete mode 100644 lib/puppet/type/pcmk_constraint.rb delete mode 100644 lib/puppet/type/pcmk_property.rb delete mode 100644 lib/puppet/type/pcmk_remote.rb delete mode 100644 lib/puppet/type/pcmk_resource.rb delete mode 100644 lib/puppet/type/pcmk_resource_default.rb delete mode 100644 lib/puppet/type/pcmk_resource_op_default.rb delete mode 100644 lib/puppet/type/pcmk_stonith.rb delete mode 100644 lib/puppet/type/pcmk_stonith_level.rb delete mode 100644 lib/serverspec/type/pacemaker_colocation.rb delete mode 100644 lib/serverspec/type/pacemaker_location.rb delete mode 100644 lib/serverspec/type/pacemaker_operation_default.rb delete mode 100644 lib/serverspec/type/pacemaker_order.rb delete mode 100644 lib/serverspec/type/pacemaker_property.rb delete mode 100644 lib/serverspec/type/pacemaker_resource.rb delete mode 100644 lib/serverspec/type/pacemaker_resource_default.rb delete mode 100644 lib/serverspec/type/pacemaker_xml.rb delete mode 100644 lib/tools/console.rb delete mode 100644 lib/tools/provider.rb delete mode 100644 lib/tools/status.rb delete mode 100644 manifests/constraint/base.pp delete mode 100644 manifests/constraint/colocation.pp delete mode 100644 manifests/constraint/location.pp delete mode 100644 manifests/constraint/order.pp delete mode 100644 manifests/contain.pp delete mode 100644 manifests/corosync.pp delete mode 100644 manifests/init.pp delete mode 100644 manifests/install.pp delete mode 100644 manifests/new.pp delete mode 100644 manifests/new/firewall.pp delete mode 100644 manifests/new/install.pp delete mode 100644 manifests/new/params.pp delete mode 100644 manifests/new/resource/filesystem.pp delete mode 100644 manifests/new/resource/ip.pp delete mode 100644 manifests/new/resource/route.pp delete mode 100644 manifests/new/service.pp delete mode 100644 manifests/new/setup.pp delete mode 100644 manifests/new/setup/auth_key.pp delete mode 100644 manifests/new/setup/config.pp delete mode 100644 manifests/new/setup/debian.pp delete mode 100644 manifests/new/setup/pcsd.pp delete mode 100644 manifests/new/wrapper.pp delete mode 100644 manifests/params.pp delete mode 100644 manifests/property.pp delete mode 100644 manifests/remote.pp delete mode 100644 manifests/resource/bundle.pp delete mode 100644 manifests/resource/filesystem.pp delete mode 100644 manifests/resource/ip.pp delete mode 100644 manifests/resource/lsb.pp delete mode 100644 manifests/resource/ocf.pp delete mode 100644 manifests/resource/remote.pp delete mode 100644 manifests/resource/route.pp delete mode 100644 manifests/resource/service.pp delete mode 100644 manifests/resource/systemd.pp delete mode 100644 manifests/resource_defaults.pp delete mode 100644 manifests/resource_op_defaults.pp delete mode 100644 manifests/service.pp delete mode 100644 manifests/stonith.pp delete mode 100644 manifests/stonith/.gitkeep delete mode 100644 manifests/stonith/fence_amt.pp delete mode 100644 manifests/stonith/fence_apc.pp delete mode 100644 manifests/stonith/fence_apc_snmp.pp delete mode 100644 manifests/stonith/fence_bladecenter.pp delete mode 100644 manifests/stonith/fence_brocade.pp delete mode 100644 manifests/stonith/fence_cisco_mds.pp delete mode 100644 manifests/stonith/fence_cisco_ucs.pp delete mode 100644 manifests/stonith/fence_compute.pp delete mode 100644 manifests/stonith/fence_crosslink.pp delete mode 100644 manifests/stonith/fence_drac5.pp delete mode 100644 manifests/stonith/fence_eaton_snmp.pp delete mode 100644 manifests/stonith/fence_eps.pp delete mode 100644 manifests/stonith/fence_hpblade.pp delete mode 100644 manifests/stonith/fence_ibmblade.pp delete mode 100644 manifests/stonith/fence_idrac.pp delete mode 100644 manifests/stonith/fence_ifmib.pp delete mode 100644 manifests/stonith/fence_ilo.pp delete mode 100644 manifests/stonith/fence_ilo2.pp delete mode 100644 manifests/stonith/fence_ilo3.pp delete mode 100644 manifests/stonith/fence_ilo4.pp delete mode 100644 manifests/stonith/fence_ilo_mp.pp delete mode 100644 manifests/stonith/fence_imm.pp delete mode 100644 manifests/stonith/fence_intelmodular.pp delete mode 100644 manifests/stonith/fence_ipdu.pp delete mode 100644 manifests/stonith/fence_ipmilan.pp delete mode 100644 manifests/stonith/fence_ironic.pp delete mode 100644 manifests/stonith/fence_kdump.pp delete mode 100644 manifests/stonith/fence_kubevirt.pp delete mode 100644 manifests/stonith/fence_redfish.pp delete mode 100644 manifests/stonith/fence_rhevm.pp delete mode 100644 manifests/stonith/fence_rsb.pp delete mode 100644 manifests/stonith/fence_scsi.pp delete mode 100644 manifests/stonith/fence_virt.pp delete mode 100644 manifests/stonith/fence_vmware_soap.pp delete mode 100644 manifests/stonith/fence_watchdog.pp delete mode 100644 manifests/stonith/fence_wti.pp delete mode 100644 manifests/stonith/fence_xvm.pp delete mode 100644 manifests/stonith/level.pp delete mode 100644 metadata.json delete mode 100644 releasenotes/notes/bundle_support-2fec55cad0f44ace.yaml delete mode 100644 releasenotes/notes/feature_add_xenial_support-6ff98a4d9fd83e62.yaml delete mode 100644 releasenotes/notes/fence_kubevirt-435249b87ac6c4a1.yaml delete mode 100644 releasenotes/notes/pcsd_bind_addr-2e4c6da53262f72a.yaml delete mode 100644 releasenotes/notes/update-resources-ae2ef4f9e75e8699.yaml delete mode 100644 releasenotes/notes/verify_on_create_remote-b4abeace7018480d.yaml delete mode 100644 releasenotes/source/0.6.x.rst delete mode 100644 releasenotes/source/0.7.x.rst delete mode 100644 releasenotes/source/1.1.x.rst delete mode 100644 releasenotes/source/1.5.x.rst delete mode 100644 releasenotes/source/_static/.placeholder delete mode 100644 releasenotes/source/conf.py delete mode 100644 releasenotes/source/index.rst delete mode 100644 releasenotes/source/unreleased.rst delete mode 100644 setup.cfg delete mode 100644 setup.py delete mode 100644 spec/acceptance/nodesets/disabled/centos-511-x64.yml delete mode 100644 spec/acceptance/nodesets/disabled/centos-66-x64-pe.yml delete mode 100644 spec/acceptance/nodesets/disabled/centos-66-x64.yml delete mode 100644 spec/acceptance/nodesets/disabled/centos-72-x64.yml delete mode 100644 spec/acceptance/nodesets/disabled/debian-78-x64.yml delete mode 100644 spec/acceptance/nodesets/disabled/debian-82-x64.yml delete mode 100644 spec/acceptance/nodesets/disabled/ubuntu-server-1204-x64.yml delete mode 100644 spec/acceptance/nodesets/disabled/ubuntu-server-1404-x64.yml delete mode 100644 spec/acceptance/nodesets/nodepool-centos7.yml delete mode 100644 spec/acceptance/nodesets/nodepool-trusty.yml delete mode 100644 spec/acceptance/nodesets/nodepool-xenial.yml delete mode 100644 spec/acceptance/nodesets/vagrant-centos-7.2-64.yml delete mode 100644 spec/acceptance/nodesets/vagrant-ubuntu-14.04-64.yml delete mode 100644 spec/acceptance/pacemaker_colocation_spec.rb delete mode 100644 spec/acceptance/pacemaker_location_spec.rb delete mode 100644 spec/acceptance/pacemaker_node_name_spec.rb delete mode 100644 spec/acceptance/pacemaker_operation_default_spec.rb delete mode 100644 spec/acceptance/pacemaker_order_spec.rb delete mode 100644 spec/acceptance/pacemaker_property_spec.rb delete mode 100644 spec/acceptance/pacemaker_resource_default_spec.rb delete mode 100644 spec/acceptance/pacemaker_resource_spec.rb delete mode 100644 spec/acceptance/pacemaker_service_spec.rb delete mode 100644 spec/acceptance/pacemaker_setup_spec.rb delete mode 100644 spec/classes/pacemaker_firewall_spec.rb delete mode 100644 spec/classes/pacemaker_install_spec.rb delete mode 100644 spec/classes/pacemaker_resource_defaults_spec.rb delete mode 100644 spec/classes/pacemaker_resource_op_defaults_spec.rb delete mode 100644 spec/classes/pacemaker_service_spec.rb delete mode 100644 spec/classes/pacemaker_setup_auth_key_spec.rb delete mode 100644 spec/classes/pacemaker_setup_config_spec.rb delete mode 100644 spec/classes/pacemaker_setup_debian_spec.rb delete mode 100644 spec/classes/pacemaker_setup_pcsd_spec.rb delete mode 100644 spec/classes/pacemaker_setup_spec.rb delete mode 100644 spec/classes/pacemaker_spec.rb delete mode 100644 spec/defines/pacemaker_contain_spec.rb delete mode 100644 spec/defines/pacemaker_resource_filesystem_spec.rb delete mode 100644 spec/defines/pacemaker_resource_ip_spec.rb delete mode 100644 spec/defines/pacemaker_resource_route_spec.rb delete mode 100644 spec/defines/pacemaker_stonith_fence_ipmilan_spec.rb delete mode 100644 spec/defines/pacemaker_stonith_level_spec.rb delete mode 100644 spec/defines/pacemaker_wrapper_spec.rb delete mode 100644 spec/functions/pacemaker_cluster_nodes_spec.rb delete mode 100644 spec/functions/pacemaker_cluster_options_spec.rb delete mode 100644 spec/functions/pacemaker_resource_parameters_spec.rb delete mode 100644 spec/functions/pcmk_cluster_setup_spec.rb delete mode 100644 spec/functions/pcmk_nodes_added_spec.rb delete mode 100644 spec/spec_helper.rb delete mode 100644 spec/spec_helper_acceptance.rb delete mode 100644 spec/unit/puppet/provider/cib-orig.xml delete mode 100644 spec/unit/puppet/provider/cib.xml delete mode 100644 spec/unit/puppet/provider/pacemaker_colocation/xml_spec.rb delete mode 100644 spec/unit/puppet/provider/pacemaker_location/xml_spec.rb delete mode 100644 spec/unit/puppet/provider/pacemaker_nodes/xml_spec.rb delete mode 100644 spec/unit/puppet/provider/pacemaker_noop_spec.rb delete mode 100644 spec/unit/puppet/provider/pacemaker_online/xmk_spec.rb delete mode 100644 spec/unit/puppet/provider/pacemaker_operation_default/pcs_spec.rb delete mode 100644 spec/unit/puppet/provider/pacemaker_operation_default/xml_spec.rb delete mode 100644 spec/unit/puppet/provider/pacemaker_order/xml_spec.rb delete mode 100644 spec/unit/puppet/provider/pacemaker_pcs_spec.rb delete mode 100644 spec/unit/puppet/provider/pacemaker_pcsd_auth/pcs_spec.rb delete mode 100644 spec/unit/puppet/provider/pacemaker_property/pcs_spec.rb delete mode 100644 spec/unit/puppet/provider/pacemaker_property/xml_spec.rb delete mode 100644 spec/unit/puppet/provider/pacemaker_resource/xml_spec.rb delete mode 100644 spec/unit/puppet/provider/pacemaker_resource_default/pcs_spec.rb delete mode 100644 spec/unit/puppet/provider/pacemaker_resource_default/xml_spec.rb delete mode 100644 spec/unit/puppet/provider/pacemaker_xml_spec.rb delete mode 100644 spec/unit/puppet/provider/pcmk_common_spec.rb delete mode 100644 spec/unit/puppet/provider/service/pacemaker_xml_spec.rb delete mode 100644 spec/unit/puppet/type/pacemaker_colocation_spec.rb delete mode 100644 spec/unit/puppet/type/pacemaker_location_spec.rb delete mode 100644 spec/unit/puppet/type/pacemaker_nodes_spec.rb delete mode 100644 spec/unit/puppet/type/pacemaker_online_spec.rb delete mode 100644 spec/unit/puppet/type/pacemaker_operation_default_spec.rb delete mode 100644 spec/unit/puppet/type/pacemaker_order_spec.rb delete mode 100644 spec/unit/puppet/type/pacemaker_pcsd_auth_spec.rb delete mode 100644 spec/unit/puppet/type/pacemaker_property_spec.rb delete mode 100644 spec/unit/puppet/type/pacemaker_resource_default_spec.rb delete mode 100644 spec/unit/puppet/type/pacemaker_resource_spec.rb delete mode 100644 spec/unit/serverspec/type/pacemaker_colocation_spec.rb delete mode 100644 spec/unit/serverspec/type/pacemaker_location_spec.rb delete mode 100644 spec/unit/serverspec/type/pacemaker_operation_default_spec.rb delete mode 100644 spec/unit/serverspec/type/pacemaker_order_spec.rb delete mode 100644 spec/unit/serverspec/type/pacemaker_property_spec.rb delete mode 100644 spec/unit/serverspec/type/pacemaker_resource_default_spec.rb delete mode 100644 spec/unit/serverspec/type/pacemaker_resource_spec.rb delete mode 100644 templates/corosync.conf.erb delete mode 100644 templates/debian/cman_default.erb delete mode 100644 templates/debian/corosync_default.erb delete mode 100644 templates/debian/pacemaker_default.erb delete mode 100644 templates/debian/pacemaker_service.erb delete mode 100644 templates/ocf_handler.sh.erb delete mode 100644 tox.ini diff --git a/.fixtures.yml b/.fixtures.yml deleted file mode 100644 index 5cb13039..00000000 --- a/.fixtures.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -fixtures: - repositories: - stdlib: - repo: "https://github.com/puppetlabs/puppetlabs-stdlib" - ref: "4.16.0" - firewall: "https://github.com/puppetlabs/puppetlabs-firewall" - symlinks: - pacemaker: "#{source_dir}" diff --git a/.gitignore b/.gitignore deleted file mode 100644 index c178b316..00000000 --- a/.gitignore +++ /dev/null @@ -1,27 +0,0 @@ -# Add patterns in here to exclude files created by tools integrated with this -# repository, such as test frameworks from the project's recommended workflow, -# rendered documentation and package builds. -# -# Don't add patterns to exclude files created by preferred personal tools -# (editors, IDEs, your operating system itself even). These should instead be -# maintained outside the repository, for example in a ~/.gitignore file added -# with: -# -# git config --global core.excludesfile '~/.gitignore' - -pkg/ -Gemfile.lock -vendor/ -spec/fixtures/ -.vagrant/ -.bundle/ -coverage/ -log/ -.idea/ -*.iml -.tox/ - -# documentation -.yardoc -.yardwarns -strings.json diff --git a/.rspec b/.rspec deleted file mode 100644 index 8c18f1ab..00000000 --- a/.rspec +++ /dev/null @@ -1,2 +0,0 @@ ---format documentation ---color diff --git a/.rubocop.yml b/.rubocop.yml deleted file mode 100644 index 395ff736..00000000 --- a/.rubocop.yml +++ /dev/null @@ -1,87 +0,0 @@ -AllCops: - Include: - - ./**/*.rb - Exclude: - - vendor/**/* - - pkg/**/* - - spec/fixtures/**/* - -# Configuration parameters: AllowURI, URISchemes. -Metrics/LineLength: - Max: 328 - -# 'Complexity' is very relative -Metrics/PerceivedComplexity: - Enabled: false - -# 'Complexity' is very relative -Metrics/CyclomaticComplexity: - Enabled: false - -# 'Complexity' is very relative -Metrics/AbcSize: - Enabled: false - -# Method length is not necessarily an indicator of code quality -Metrics/MethodLength: - Enabled: false - -# Module length is not necessarily an indicator of code quality -Metrics/ModuleLength: - Enabled: false - -# Class length is not necessarily an indicator of code quality -Metrics/ClassLength: - Enabled: false - -# dealbreaker: -Style/TrailingCommaInArguments: - Enabled: false -Style/TrailingCommaInLiteral: - Enabled: false -Style/ClosingParenthesisIndentation: - Enabled: false - -Lint/AmbiguousRegexpLiteral: - Enabled: true -Style/RegexpLiteral: - Enabled: true -Style/WordArray: - Enabled: true - -# this catches the cases of using `module` for parser functions, types, or -# providers -Style/ClassAndModuleChildren: - Enabled: false - -Style/Documentation: - Description: 'Document classes and non-namespace modules.' - Enabled: false - -# More comfortable block layouts -Style/BlockDelimiters: - Enabled: false - -Style/MultilineBlockLayout: - Enabled: false - -Style/GuardClause: - Enabled: false - -Style/NestedParenthesizedCalls: - Enabled: false - -Style/ClassAndModuleCamelCase: - Enabled: false - -Style/PredicateName: - Enabled: false - -Style/VariableName: - Enabled: false - -Style/MethodName: - Enabled: false - -Style/FormatString: - Enabled: false diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index abf320c8..00000000 --- a/.travis.yml +++ /dev/null @@ -1,44 +0,0 @@ ---- -sudo: false -language: ruby -cache: bundler -bundler_args: --without system_tests -before_install: rm Gemfile.lock || true -script: - - 'bundle exec rake $CHECK' -matrix: - fast_finish: true - include: - - rvm: 1.9.3 - env: PUPPET_VERSION="~> 3.0" STRICT_VARIABLES="yes" CHECK=test - - rvm: 2.1.8 - env: PUPPET_VERSION="~> 3.0" STRICT_VARIABLES="yes" CHECK=test - - rvm: 1.9.3 - env: PUPPET_VERSION="~> 3.0" STRICT_VARIABLES="yes" CHECK=test FUTURE_PARSER=yes - - rvm: 2.1.8 - env: PUPPET_VERSION="~> 3.0" STRICT_VARIABLES="yes" CHECK=test FUTURE_PARSER=yes - - rvm: 2.1.8 - env: PUPPET_VERSION="~> 4.0" STRICT_VARIABLES="yes" CHECK=test - - rvm: 2.2.4 - env: PUPPET_VERSION="~> 4.0" STRICT_VARIABLES="yes" CHECK=test - - rvm: 2.2.4 - env: PUPPET_VERSION="~> 4.0" STRICT_VARIABLES="yes" CHECK=rubocop - - rvm: 2.3.0 - env: PUPPET_VERSION="~> 4.0" STRICT_VARIABLES="yes" CHECK=test - allow_failures: - - rvm: 2.3.0 - env: PUPPET_VERSION="~> 4.0" STRICT_VARIABLES="yes" CHECK=test -notifications: - email: false -deploy: - provider: puppetforge - user: puppet - password: - secure: "" - on: - tags: true - # all_branches is required to use tags - all_branches: true - # Only publish if our main Ruby target builds - rvm: 1.9.3 - condition: "$FUTURE_PARSER = yes" diff --git a/.zuul.yaml b/.zuul.yaml deleted file mode 100644 index 09d300df..00000000 --- a/.zuul.yaml +++ /dev/null @@ -1,19 +0,0 @@ -- project: - queue: tripleo - templates: - - puppet-openstack-check-jobs - - puppet-openstack-module-unit-jobs - - release-notes-jobs-python3 - check: - jobs: - - puppet-openstack-unit-7.16-centos-9-stream: - voting: false - - tripleo-ci-centos-9-scenario004-standalone: &scen4_vars - vars: - build_container_images: true - containers_base_image: registry.access.redhat.com/ubi9:latest - gate: - jobs: - - puppet-openstack-unit-7.16-centos-9-stream: - voting: false - - tripleo-ci-centos-9-scenario004-standalone: *scen4_vars diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index 00deb27d..00000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,96 +0,0 @@ -This module has grown over time based on a range of contributions from -people using it. If you follow these contributing guidelines your patch -will likely make it into a release a little quicker. - - -## Contributing - -Please note that this project is released with a Contributor Code of Conduct. By participating in this project you agree to abide by its terms. [Contributor Code of Conduct](https://voxpupuli.org/coc/). - -1. Fork the repo. - -1. Create a separate branch for your change. - -1. Run the tests. We only take pull requests with passing tests, and - documentation. - -1. Add a test for your change. Only refactoring and documentation - changes require no new tests. If you are adding functionality - or fixing a bug, please add a test. - -1. Squash your commits down into logical components. Make sure to rebase - against the current master. - -1. Push the branch to your fork and submit a pull request. - -Please be prepared to repeat some of these steps as our contributors review -your code. - -## Dependencies - -The testing and development tools have a bunch of dependencies, -all managed by [bundler](http://bundler.io/) according to the -[Puppet support matrix](http://docs.puppetlabs.com/guides/platforms.html#ruby-versions). - -By default the tests use a baseline version of Puppet. - -If you have Ruby 2.x or want a specific version of Puppet, -you must set an environment variable such as: - - export PUPPET_VERSION="~> 4.2.0" - -Install the dependencies like so... - - bundle install - -## Syntax and style - -The test suite will run [Puppet Lint](http://puppet-lint.com/) and -[Puppet Syntax](https://github.com/gds-operations/puppet-syntax) to -check various syntax and style things. You can run these locally with: - - bundle exec rake lint - bundle exec rake validate - -## Running the unit tests - -The unit test suite covers most of the code, as mentioned above please -add tests if you're adding new functionality. If you've not used -[rspec-puppet](http://rspec-puppet.com/) before then feel free to ask -about how best to test your new feature. - -To run your all the unit tests - - bundle exec rake spec SPEC_OPTS='--format documentation' - -To run a specific spec test set the `SPEC` variable: - - bundle exec rake spec SPEC=spec/foo_spec.rb - -To run the linter, the syntax checker and the unit tests: - - bundle exec rake test - - -## Integration tests - -The unit tests just check the code runs, not that it does exactly what -we want on a real machine. For that we're using -[beaker](https://github.com/puppetlabs/beaker). - -This fires up a new virtual machine (using vagrant) and runs a series of -simple tests against it after applying the module. You can run this -with: - - bundle exec rake acceptance - -This will run the tests on an Ubuntu 12.04 virtual machine. You can also -run the integration tests against Centos 6.5 with. - - BEAKER_set=centos-64-x64 bundle exec rake acceptances - -If you don't want to have to recreate the virtual machine every time you -can use `BEAKER_DESTROY=no` and `BEAKER_PROVISION=no`. On the first run you will -at least need `BEAKER_PROVISION` set to yes (the default). The Vagrantfile -for the created virtual machines will be in `.vagrant/beaker_vagrant_fies`. - diff --git a/Gemfile b/Gemfile deleted file mode 100644 index 838c70a5..00000000 --- a/Gemfile +++ /dev/null @@ -1,83 +0,0 @@ -source ENV['GEM_SOURCE'] || "https://rubygems.org" - -def location_for(place, fake_version = nil) - if place =~ /^(git[:@][^#]*)#(.*)/ - [fake_version, { :git => $1, :branch => $2, :require => false }].compact - elsif place =~ /^file:\/\/(.*)/ - ['>= 0', { :path => File.expand_path($1), :require => false }] - else - [place, { :require => false }] - end -end - -group :test do - gem 'net-telnet', '~> 0.1.1', :require => false - gem 'jwt', '~> 1.5.6', :require => false - gem 'nokogiri', '~> 1.6.0', :require => false - gem 'rake', '< 13.0.0', :require => false - gem 'dry-inflector', '~> 0.1.2', :require => false - gem 'signet', '~> 0.11.0', :require => false - gem 'minitest', '~> 5.11.3', :require => false - gem 'mocha', '< 2.0.0', :require => false - gem 'rspec-puppet', :require => false - gem 'puppet-lint', '~> 1.1.0', :require => false - gem 'metadata-json-lint', :require => false - # rspec-puppet-facts >= 1.9.5 requires ruby 2.1.0+ - if RUBY_VERSION.to_f >= 2.1 - gem 'rspec-puppet-facts', :require => false - else - gem 'rspec-puppet-facts', '< 1.9.5', :require => false - end - gem 'rspec', :require => false - gem 'rspec-puppet-utils', :require => false - gem 'puppet-lint-absolute_classname-check', '~> 2.0.0', :require => false - gem 'puppet-lint-leading_zero-check', :require => false - gem 'puppet-lint-trailing_comma-check', :require => false - gem 'puppet-lint-version_comparison-check', :require => false - gem 'puppet-lint-classes_and_types_beginning_with_digits-check', :require => false - gem 'puppet-lint-unquoted_string-check', :require => false - gem 'puppet-lint-variable_contains_upcase', :require => false - gem 'unicode-display_width', :require => false - gem 'puppetlabs_spec_helper', :require => false - gem 'serverspec', :require => false -end - -group :development do - gem 'pry' -end - -group :system_tests do - #TODO: to be removed when - #https://tickets.puppetlabs.com/browse/BKR-851 is resolved. - gem 'specinfra', '= 2.59.0' - - if beaker_version = ENV['BEAKER_VERSION'] - gem 'beaker', *location_for(beaker_version) - else - #NOTE(aschultz): beaker > 3.1.0 requires ruby 2.2.x and 3.0.0 had a bad - # serverspec reference. So pin to less than 3.0.0 - gem 'beaker', '< 3.0.0', :require => false - end - if beaker_rspec_version = ENV['BEAKER_RSPEC_VERSION'] - gem 'beaker-rspec', *location_for(beaker_rspec_version) - else - gem 'beaker-rspec', :require => false - end - gem 'beaker-puppet_install_helper', :require => false -end - - - -if facterversion = ENV['FACTER_GEM_VERSION'] - gem 'facter', facterversion.to_s, :require => false, :groups => [:test] -else - gem 'facter', :require => false, :groups => [:test] -end - -if puppetversion = ENV['PUPPET_GEM_VERSION'] - gem 'puppet', puppetversion, :require => false, :groups => [:test] -else - gem 'puppet', :require => false, :groups => [:test] -end - -# vim:ft=ruby diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 8d968b6c..00000000 --- a/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/README.md b/README.md deleted file mode 100644 index 518da46d..00000000 --- a/README.md +++ /dev/null @@ -1,946 +0,0 @@ -# Puppet Pacemaker module - -This Puppet module is intended to work with the running Pacemaker -cluster to manage its configuration. It can create, update and remove -most of the configuration objects and query their status. - -The interface of the Puppet types in the module is loosely based on -**puppetlabs/corosync** types with *cs_* prefix changed to the *pacemaker_* -prefix but it have been significantly reworked and is not compatible. - -**puppet-pacemaker** is much more sophisticated then the -**puppetlabs/corosync** module and provides a lot of debugging -features, checks, configuration options and it can work even when -the Puppet is being run on many cluster nodes at the same time and -without neither **crm** nor **pcs** being installed. - -## License -Apache 2.0 - -# Pacemaker types - -These types are used to configure Pacemaker object and are the core of -this module. You can find some "interactive examples" of their usage in -the *examples* folder. - -## pacemaker_resource - -This is the most important resource type. It creates, updates and -removes Pacemaker primitives. - -### Parameters - -#### primitive_class - -The basic class of this primitive. It could be *ocf*, *lsb*, *systemd* -and some others. -Default: ocf - -#### primitive_provider - -The provider or vendor of the primitive. For OCF class can be -*pacemaker*, *heartbeat* or vendor-specific values. -Default: pacemaker - -#### primitive_type - -The actual provider script or service to use. Should be equal to the -OCF file name, or to the service name if other classes are used. -Default: Stateful - -#### parameters - -The Hash of resource instance attribute names and their values. -These attributes are used to configure the running service and, -usually, only OCF class supports them. - -Example: - -```puppet -{ - 'a' => '1', - 'b' => '2', -}, -``` - -#### operation - -This data structure describes this primitive's operations and timeouts. - -Example: - -```puppet -{ - 'monitor' => { - 'interval' => '20', - 'timeout' => '10' - }, - 'start' => { - 'timeout' => '30' - }, - 'stop' => { - 'timeout' => '30' - }, -} -``` - -Using array and multiple monitors: - -```puppet -[ - { - 'name' => 'monitor', - 'interval' => '10', - 'timeout' => '10', - }, - { - 'name' => 'monitor', - 'interval' => '60', - 'timeout' => '10', - }, - { - 'name' => 'start', - 'timeout' => '30', - }, - { - 'name' => 'stop', - 'timeout' => '30', - }, -] -``` - -#### metadata - -This hash can contain names and values of primitive's meta attributes. - -Example: - -```puppet -{ - 'migration-threshold' => '100', - 'failure-timeout' => '120', -} -``` - -#### complex_type - -A primitive can be either a *simple* one, and run only as a single -instance. Or it can be *clone* and have many instances, or it can be -*master* and be able to have master and slave states. -Default: simple - -#### complex_metadata - -A hash of complex type related metadata names and values. - -Example: - -```puppet -{ - 'interleave' => true, - 'master-max' => '1', -} -``` - -#### debug - -This option makes supported provides to omit any changes to the system. -Providers will still retrieve the system state, compare it to the desired -state from the catalog and will try to sync state if there are differences. -But it will only show destructive commands that it would be executing in -the normal mode. It's better then Puppet's *noop* mode because it shows -sync actions and is useful for debugging. -Default: false - -## pacemaker_location - -This type can manage location constraints. Either the node and score -based ones or the rule based ones. This constraints can control the -primitive placement to nodes through priorities or rules. - -### Parameters - -#### primitive - -The name of the Pacemaker primitive of this location. - -#### score - -The score values for a node/score location. - -#### node - -The node name of the node/score location. - -#### rules - -The rules data structure. - -Example: - -```puppet -[ - { - 'score' => '100', - 'expressions' => [ - { - 'attribute' => 'test1', - 'operation' => 'defined', - } - ] - }, - { - 'score' => '200', - 'expressions' => [ - { - 'attribute' => 'test2', - 'operation' => 'defined', - } - ] - } -] -``` - -#### debug - -Don't actually do changes -Default: false - -## pacemaker_colocation - -This type manages colocation constraints. If two resources are in a -colocation they will always be on the same node. Note that colocation -implies the start order because the second resource will always start -after the first. - -### Parameters - -#### first - -The name of the first primitive - -#### second - -The name of the second primitive - -#### score - -The priority score of this constraint - -#### debug - -Don't actually do changes -Default: false - -## pacemaker_order - -This type can manage the order constraints. These constraints controls -the start and stop ordering of resources. Order doesn't imply colocation -and resources can run on different nodes. - -### Parameters - -#### first - -(Mandatory) -The name of the first primitive. - -#### second - -(Mandatory) -The name of the second primitive. - -#### score - -The priority score of this constraint. -If greater than zero, the constraint is mandatory. -Otherwise it is only a suggestion. -Used for Pacemaker version 1.0 and below. -Default: undef - -#### first_action - -The action that the first resource must complete before second action -can be initiated for the then resource. -Allowed values: start, stop, promote, demote. -Default: undef (means start) - -#### second_action - -The action that the then resource can execute only after the -first action on the first resource has completed. -Allowed values: start, stop, promote, demote. -Default: undef (means the value of the first action) - -#### kind - -How to enforce the constraint. Allowed values: - -* **optional**: Just a suggestion. Only applies if both resources are - executing the specified actions. - Any change in state by the first resource will have no effect on - the then resource. -* **mandatory**: Always. If first does not perform first-action, - then will not be allowed to performed then-action. - If first is restarted, then (if running) will be stopped beforehand - and started afterward. -* **serialize**: Ensure that no two stop/start actions occur - concurrently for the resources. - First and then can start in either order, but one must complete - starting before the other can be started. - A typical use case is when resource start-up puts a high load on - the host. - -Used only with Pacemaker version 1.1 and above. -Default: undef - -#### symmetrical - -If true, the reverse of the constraint applies for the opposite action -(for example, if B starts after A starts, then B stops before A stops). - -Default: undef (means true) - -#### require_all - -Whether all members of the set must be active before continuing. - -Default: undef (means true) - -#### debug - -Don't actually do changes -Default: false - -## pacemaker_operation_default - -This little type controls the default operation properties of the -cluster resources. For example, you can set the default *timeout* -for every operation without it's own configured *timeout* value. - -### parameters - -#### name - -The default property name - -#### value - -The default property value - -#### debug - -Don't actually do changes -Default: false - -Example: - -```puppet -pacemaker_operation_default { 'timeout' : value => '30' } -``` - -## pacemaker_resource_default - -This little type controls the default meta-attributes of all resources -without their own defined values. - -### parameters - -#### name - -The default property name - -#### value - -The default property value - -#### debug - -Don't actually do changes -Default: false - -Example: - -```puppet -pacemaker_resource_default { 'resource-stickiness' : value => '100' } -``` - -## pacemaker_property - -This tiny type can the cluster-wide properties. - -### parameters - -#### name - -The property name - -#### value - -The property value - -#### debug - -Don't actually do changes -Default: false - -Example: - -```puppet -pacemaker_property { 'stonith-enabled' : - value => false, -} -pacemaker_property { 'no-quorum-policy' : - value => 'ignore', -} -``` - - -## pacemaker_online - -This little resource can wait until the cluster have settled and ready -to be configured. It can be useful in some cases, perhaps as an anchor, -but most other type's *xml* providers can wait for cluster on their own. - -Example: - -```puppet -pacemaker_online { 'setup-finished' :} -``` - -## service (pacemaker provider) - -This type uses the standard *service* type from the Puppet distribution -but implements the custom *pacemaker* provider. It can be used to start -and stop Pacemaker services the same way the Puppet starts and stops -system services. - -It can query the service status, either on the entire cluster or on the -local node, start and stop single, cloned and master services. - -There are also two special features: -- Adding location constraints. This provider can add the location - constraint to enable the run of the primitive on the current node. - It's needed in the asymmetric cluster configuration where services - are not allowed to start anywhere unless explicitly allowed to. -- Disabling the basic service. For example, you have the *apache* - primitive service in your cluster and are using an OCF script - to manage it. In this can you will not want another instance of - *apache* to start by the system init scripts or startup service. - The provider will detect the running basic service and will stop - and disable it's auto-run before trying to start the cluster service. - -## pacemaker_nodes - -This type is very special and designed to add and remove corosync 2 nodes -without restarting the service by providing the data structure like -this: - -```puppet -{ - 'node-1' => { 'id' => '1', 'ip' => '192.168.0.1'}, - 'node-2' => { 'id' => '2', 'ip' => '192.168.0.2'}, -} -``` - -Most likely you should never use this type. - -## Pacemaker providers - -Each *pacemaker_* type may have up to three different providers: - -- *xml* provider. This provider is based on the *pacemaker* library - XML parsing and generating capabilities and in most canes require - only *cibadmin* to download XML CIB and apply patches, but can - use *crm_attribute* too. These tools are written in C and are the - core parts of the Pacemaker project and most likely will be present - on every system. - -- *pcs* provider. These provides are designed around the *pcs* cluster - management tool usually found on Red Hat family systems. They should - not be as complex as *xml* providers, but *pcs* may not be available - on you distribution. Currently it's implemented only for few types - and they disabled because there is no reason to actually use them. - -- *noop* provider. These providers do absolutely nothing completely - disabling the resource if the provider is manually set to *noop*. - This resource will not fail even if there is no Pacemaker installed. - It can be useful if you want to turn off several resources. - Puppet's *noop* meta-attribute will not do the same this because it - still does the retrieve phase and will fail if the state cannot be - obtained. - -## pacemaker::wrapper - -This definition can be applied to any Puppet managed service, even from -a third party module, and make this service a Pacemaker managed service -without modifying the Puppet code. - -Wrapper can also create the OCF script from a Puppet file or template, -or the script can be obtained elsewhere. Actually, wrappers are only -practical for OCF managed services, because lsb, systemd or upstart -services can be managed directly by the cluster. - -It can also create *ocf_handlers*. The OCF handler is a special shell -script that can call the OCF script with all environment variables -and parameters set. The handler can be used to take manual control -over the pacemaker managed service and start and stop them without -the cluster. It can be useful for debugging or during the -disaster recovery. - -### Parameters - -#### ensure -(optional) Create or remove the files -Default: present - -#### ocf_root_path -(optional) Path to the ocf folder -Default: /usr/lib/ocf - -#### primitive_class -(optional) Class of the created primitive -Default: ocf - -#### primitive_provider -(optional) Provider of the created primitive -Default: pacemaker - -#### primitive_type -(optional) Type of the created primitive. Set this to your OCF script. -Default: Stateful - -#### prefix -(optional) Use p_ prefix for the Pacemaker primitive. There is no -need to use it since the service provider can disable the basic -service on its own. -Default: false - -#### parameters -(optional) Instance attributes hash of the primitive -Default: undef - -#### operations -(optional) Operations hash of the primitive -Default: undef - -#### metadata -(optional) Primitive meta-attributes hash -Default: undef - -#### complex_metadata -(optional) Meta-attributes of the complex primitive -Default: undef - -#### complex_type -(optional) Set this to 'clone' or 'master' to create a -complex primitive -Default: undef - -#### use_handler -(optional) Should the handler script be created -Default: true - -#### handler_root_path -(optional) Where the handler should be placed -Default: /usr/local/bin - -#### ocf_script_template -(optional) Generate the OCF script from this template -Default: undef - -#### ocf_script_file -(optional) Download the OCF script from this file -Defaults: undef - -#### create_primitive -(optional) Should the Pacemaker primitive be created -Defaults: true - -#### service_provider -(optional) The name of Pacemaker service provider -to be set to this service. -Default: pacemaker - -For example, if you have a simple service: - -```puppet -service { 'apache' : - ensure => 'running', - enable => true, -} -``` - -You can convert it to the Pacemaker service just by adding this -definition: - -```puppet -pacemaker:wrapper { 'apache' : - primitive_type => 'apache', - parameters => { - 'port' => '80', - }, - operations => { - 'monitor' => { - 'interval' => '10', - }, - }, -} -``` - -Provided there is the ocf:pacemaker:apache script with the port -parameter, the *apache* Pacemaker primitive will be created and -started and the basic *apache* service will be disabled and stopped. - -## STONITH - -STONITH manifests are auto generated from the XML source -files by the generator script. - -```bash -rake generate_stonith -``` - -The generated defined types can be found in *manifests/stonith*. -Every STONITH implementation has different parameters. - -Example: - -```puppet -class { "pacemaker::stonith::ipmilan" : - address => "10.10.10.100", - username => "admin", - password => "admin", -} -``` - -# Development - -## Library structure - -You can find these folders inside the **lib**: - -- *facter* contains the fact **pacemaker_node_name**. It is equal to the - node name from the Pacemaker point of view. May be equal to either - $::hostname of $::fqdn. - -- *pacemaker* contains the Pacemaker library files. The Pacemaker - module functions are split to submodules according to their role. - There are also *xml* and *pcs* groups of files related to either - *pcs* or *xml* provider and several common files. - -- *puppet* contains Puppet types and provider. They are using the - functions from the Pacemaker library. - -- *serverspec* contains the custom ServerSpec types to work with - Pacemaker. They are using the same library Puppet types and providers - do. These types are used in the Acceptance tests to validate that - Pacemaker have really be configured and its configuration contains - the desired elements and parameters. - -- *tools* contains two interactive tools: console and status. Console - can be used to manually run the library functions either to debug them - or to configure the cluster by hand. Status uses the library functions - to implement something like pcs or crm status command to see the - current cluster status. - -## Data flow - -When the catalog is being compiled the instance of each type will -be created and properties and parameters values will be assigned. - -At this stage the values can be validated. If the property has -the *validate* function it will be called to check if the value is -correct or the exception can be raised. After tha validation the -*munge* function will be called if the values need to be changed or -converted somehow. If the property accepts the array value every -elements will be validated and then munged separately. - -When the catalog is compiled and delivered to the node Puppet will -start to apply it. - -### Data retrieval - -Puppet type will try to retrieve the current state first. It will -either use *prefetch* mechanics if it's enabled or will simply walk -through every resource in the catalog calling *exists?* functions -and then other getter functions to determine the current system state. - -If prefetch is used, it will assign every provider, generated by the -*instances* function to the corresponding resource in the catalog. -During the transaction the resource will be able to use already -acquired data speeding the Puppet run up a little. Without prefetch -each provider will receive the system state when its resource is -processed separately. - -#### Complex providers - -Providers: pacemaker_resource, pacemaker_location, pacemaker_colocation, pacemaker_order. - -This providers use *retrieve_data* function to get the configuration -and status data from the library and convert it to the form used -in the Puppet type by filling the *property_hash*. This happens -either during prefetch or when the *exists?* function is called. -Other getter function will just take their values from the -*property_hash* after it was filled with data. - -#### Simple providers - -Providers: pacemaker_property, pacemaker_resource_default, -pacemaker_operation_default, pacemaker_online. - -These providers are much more simple. There is no *retrieve_data* -function and the values are just passed as the property_hash to -the provider from *instances* if the prefetch is used and then -are taken from this hash by the getter functions. If there is no -prefetch and *property_hash* is empty the values are retrieved -from the library directly by the getters. Actually there is only one -getter for *value* and an implicit getter for *ensure* or no getter -at all for the not ensurable *pacemaker_online*. - -#### Library - -Both complex and simple providers are using the library functions to -get the current state of the system. There is the *main data structure* -for each entity the library can work with. For example, the resources -use the *primitives* structure. - -Every provider can either take the values directly from this structure -or it can use one of the many values helpers and predicate functions -such as *primitive_type* or *primitive_is_complex?*. Most of these -helper functions are taking the resource name as an argument and try to -find the asked values in the data structure and return it. - -The main data structures are formed by functions and their values are -memorised and returned from the cache every time they are called again. -Sometimes, when the new values should be acquired from the system this -memoization can be dropped by calling the *cib_reset* function. - -Every data structure get its values by parsing the CIB XML. This xml -is got by calling the *cibaqmin -Q* command. Then the *REXML* document -is created with this data and saved too. It can be accessed by the -*cib* function, or, you can even set the new XML text to using -the *cib=* function if you want the library to use the prepared XML -data instead of receiving the new one. - -Data structures are formed by using CIB section filter functions like -*cib_section_primitives* which return the requested part of the CIB. -Then these objects are parsed into the data structures. - -For a library user is most cases there is no need to work with -anything but main data structures and helper getters and predicates. - -These are the main data structures: - -- **primitives** The list of primitives and their configurations. -- **node_status** The current primitive status by node. -- **constraints** All types of constraints and their parameters. -- **constraint_colocations** Filtered colocation constraints. -- **constraint_locations** Filtered location constraints. -- **constraint_orders** Filtered order constraints. -- **nodes** Cluster nodes ands their ids. -- **operation_defaults** Defined operation defaults and their values. -- **resource_defaults** Defined resource defaults and their values. -- **cluster_properties** Defined cluster properties and their values. - -PCS based versions of the data structures: - -- **pcs_operation_defaults** Defined operation defaults and their values. -- **pcs_resource_defaults** Defined resource defaults and their values. -- **pcs_cluster_properties** Defined cluster properties and their values. - -### Data matching - -After the provider have retrieved the current system state one way or -another and it's getters are able to return the values the types -starts to check is these values are equal to the desired ones. - -For every property the value will be retrieved by it's getter function -in the provider and the value will be compared to the value the type -got from the catalog using the *insync?* function. Usually there is -not need to change it's behaviour and this function can be left -unimplemented and taken from the parent implementation, but in some -cases this comparison should use a special function to check -if the data structures are equal if the conversion or filtering -is required and a the custom *insync?* should somehow determine is -*is* is equal to *should* or not. Function *is_to_s* and *should_to_s* -will be used to format the property change message in the puppet log. - -### Data syncing - -If the retrieved data for the property was different from the desired -one or if the resource doesn't exist at all the type will try to sync -the values. - -If the resource was found not to exist the *create* method will be -called. It should create the new resource with all parameters or fill -the property hash with them. If the resource should be removed the -*destroy* function will be called. If should either actually destroy -the resource or clear the property hash and set ensure to absent. - -If the resource exists and should not be removed but has incorrect -parameter values the setters will be used to set properties to the -desired values. Each setter can either set the value directly or -modify the property hash. - -Finally, the *flush* function will be called if it's defined. This -function should use the values from the property hash to actually -change the system state by creating, removing or updating the resource. -If getters and setters are not using the property hash and are making -changes directly there is not need for the *flush* function. - -#### Complex providers - -Complex providers are using the property hash to set the values and -the flush function to modify the pacemaker configuration. -When the *property_hash* is formed by using the *create* function or -setter function the *flush* method should convert the values from the -property hash to the library friendly data structure. Then the XML -generator function can be called to convert this structure to the -XML patch that can be applied to the CIB, and the *cibadmin --patch* -command will be called to apply it. If the resource should be removed -the small XML patch can be applied by the remove function directly. - -All command calls that are changing the system should be run as their -safe versions. They will not be executed if the debug parameter is -enabled and will be just shown in the log. - -#### Simple providers - -Simple providers are not using the *flush* function and setters are -modifying the system directly. XML generator are not used too and -the values are set using the *crm_attribute* command calls. -Service provider can also use *crm_attribute* to change the service -status. - -PCS versions of these providers are using the *pcs* command calls for -the same purpose. PCS providers should be using their own main -data structures and are designed to be as simple as possible. - -### Special providers - -The providers of *service* and *pacemaker_nodes* types are working very -differently from other. - -Service provider is not ensurable and cannot create services but can -control their status. It will use the library to get the status of the -service's primitive, try to start or stop it, and then will wait -for this action to succeed. It is also capable of adding the service -location constraints using the special library function and stopping -and disabling the basic service using another provider instance. - -Pcmk_nodes provider uses the *nodes* structure but work mostly -with corosync nodes using the *corosync-cmapctl* of the Corosync2 -installation. It will match the exiting nodes to the desired node list -and will remove all extra corosync nodes and add the missing ones. -It can also remove the extra Pacemaker nodes but adding new nodes is -not required because Pacemaker will handle it on its own and therefore -should be disabled. - -## Custom configuration - -Some aspects of the providers behaviour can be controlled by the -*options.yaml*. This file can be found at *lib/pacemaker/options.yaml* -and contains all set options and their descriptions. - -## Testing and debugging - -### Specs - -Most of the code base in the library has Ruby specs as well as Puppet -types and providers. - -- *unit/puppet* Contains the specs for Puppet types and providers - as well as the spec for the whole Pacemaker library and the fixture - XML file. Most of the library, type and provider function are - tested here. - -- *unit/serverspec* Contains the specs for the ServerSpec types. They - are used to check that these types are working correctly as well - as indirectly checking the library too. - -- *classes* and *defined* have rspec-puppet tests for - the classes and definitions. - -- *acceptance* These tests are using the ServerSpec types to check - that the module is actually configuring the cluster correctly on the - virtual system. First, the corosync and pacemaker is being installed - on the newly created system, then the test manifests in the *examples* - folder are applied to check if the resources can be successfully - created, updated and removed. Every time the specs look into the - pacemaker configuration to ensure that the resources are present - and have the correct properties. - -### ServerSpec types - -- Pcmk_resource -- Pcmk_location -- Pcmk_colocation -- Pcmk_order -- Pcmk_property -- Pcmk_resource_default -- Pcmk_operation_default - -You can find the description of the properties in the actual type files -and examples in the *spec/serverspec* and *spec/acceptance*. - -### Manual testing - -The library provides debug checkpoints for a lot of function calls and -their output can be seen in the Puppet debug log. - -Service provider uses the *cluster_debug_report* function to output -the formatted report of the current cluster state. - - Pacemaker debug block start at 'test' - -> Clone primitive: 'p_neutron-plugin-openvswitch-agent-clone' - node-1: START (L) | node-2: STOP | node-3: STOP - -> Simple primitive: 'p_ceilometer-alarm-evaluator' - node-1: STOP | node-2: STOP (F) | node-3: STOP (F) - -> Simple primitive: 'p_heat-engine' - node-1: START (L) | node-2: STOP | node-3: STOP - -> Simple primitive: 'p_ceilometer-agent-central' - node-1: STOP | node-2: STOP (F) | node-3: STOP (F) - -> Simple primitive: 'vip__management' - node-1: START (L) | node-2: STOP (L) | node-3: STOP (L) - -> Clone primitive: 'ping_vip__public-clone' - node-1: START (L) | node-2: START (L) | node-3: START (L) - -> Clone primitive: 'p_neutron-l3-agent-clone' - node-1: START (L) | node-2: STOP | node-3: STOP - -> Clone primitive: 'p_neutron-metadata-agent-clone' - node-1: START (L) | node-2: STOP | node-3: STOP - -> Clone primitive: 'p_mysql-clone' - node-1: START (L) | node-2: START (L) | node-3: STOP - -> Simple primitive: 'p_neutron-dhcp-agent' - node-1: START (L) | node-2: STOP | node-3: STOP - -> Simple primitive: 'vip__public' - node-1: START (L) | node-2: STOP (L) | node-3: STOP (L) - -> Clone primitive: 'p_haproxy-clone' - node-1: START (L) | node-2: START (L) | node-3: STOP - -> Master primitive: 'p_rabbitmq-server-master' - node-1: MASTER (L) | node-2: START (L) | node-3: STOP - * symmetric-cluster: false - * no-quorum-policy: ignore - Pacemaker debug block end at 'test' - -- (L) The location constraint for this resource is created in this node -- (F) This resource have failed on this node -- (M) This resource is not managed - -Inserting this function into other providers can be helpful if you need -to se the status of all surrounding resources. - -Using the **debug** property of most resources can help you to debug -the providers without damaging the system configuration. - -## Links - -- [Pacemaker Explained](http://clusterlabs.org/doc/en-US/Pacemaker/1.1/html/Pacemaker_Explained/) -- [Pacemaker Cluster from Scratch](http://clusterlabs.org/doc/en-US/Pacemaker/1.1/html/Clusters_from_Scratch/) -- [Puppet Types and Providers](https://puppet.com/docs/puppet/5.5/complete_resource_example.html) -- [RSpec Puppet Test](http://rspec-puppet.com/) -- [ServerSpec Tests](http://serverspec.org/) -- [RSpec-Beaker Acceptance Tests](https://github.com/puppetlabs/beaker-rspec) -- [source code repository](https://git.openstack.org/cgit/openstack/puppet-pacemaker) -- [Development](https://docs.openstack.org/puppet-openstack-guide/latest/) -- [Release Notes](https://docs.openstack.org/releasenotes/puppet-pacemaker) diff --git a/README.rst b/README.rst new file mode 100644 index 00000000..4ee2c5f1 --- /dev/null +++ b/README.rst @@ -0,0 +1,10 @@ +This project is no longer maintained. + +The contents of this repository are still available in the Git +source code management system. To see the contents of this +repository before it reached its end of life, please check out the +previous commit with "git checkout HEAD^1". + +For any further questions, please email +openstack-discuss@lists.openstack.org or join #openstack-dev on +OFTC. diff --git a/Rakefile b/Rakefile deleted file mode 100644 index a8a6a87e..00000000 --- a/Rakefile +++ /dev/null @@ -1,47 +0,0 @@ -require 'puppetlabs_spec_helper/rake_tasks' -require 'puppet-lint/tasks/puppet-lint' -require 'puppet-syntax/tasks/puppet-syntax' -require 'metadata-json-lint/rake_task' - -# require 'puppet-strings/rake_tasks' -# require 'rubocop/rake_task' -# RuboCop::RakeTask.new - -PuppetLint.configuration.log_format = '%{path}:%{linenumber}:%{check}:%{KIND}:%{message}' -PuppetLint.configuration.fail_on_warnings = true -PuppetLint.configuration.send('disable_140chars') -PuppetLint.configuration.send('disable_class_inherits_from_params_class') -PuppetLint.configuration.send('disable_documentation') -PuppetLint.configuration.send('disable_single_quote_string_with_variables') -PuppetSyntax.fail_on_deprecation_notices = false - -exclude_paths = %w( - pkg/**/* - vendor/**/* - spec/**/* -) -PuppetLint.configuration.ignore_paths = exclude_paths -PuppetSyntax.exclude_paths = exclude_paths - -desc 'Run acceptance tests' -RSpec::Core::RakeTask.new(:acceptance) do |t| - t.pattern = 'spec/acceptance' -end - -desc 'Run metadata_lint, lint, syntax, and spec tests.' -task test: [ - :metadata_lint, - :lint, - :syntax, - :spec, -] - -desc 'Generate the Stonith modules' -task :generate_stonith do - sh './agent_generator/generate_manifests.sh' -end - -ENV['BEAKER_debug'] = 'yes' -ENV['BEAKER_set'] = 'vagrant-ubuntu-14.04-64' unless ENV['BEAKER_set'] -ENV['BEAKER_destroy'] = 'onpass' unless ENV['BEAKER_destroy'] -ENV['BEAKER_provision'] = 'yes' unless ENV['BEAKER_provision'] diff --git a/TODO.md b/TODO.md deleted file mode 100644 index 7525106d..00000000 --- a/TODO.md +++ /dev/null @@ -1,12 +0,0 @@ -* add pacemaker_group type -* pacemaker_location add date_expressions support -* pacemaker_location rules format/validation -* pacemaker_resource convert complex to simple and back -* pacemaker_resource add utilization support -* cleanup unused methods from pacemaker_nodes provider -* unit tests for location, colocation, order autorequire functions -* change tests behaviour according to the options and test several possible options -* noop provider is not working for non-ensurable types -* colocation/location/order will prevent its primitives from being removed. remove constraints first? -* primitive should use similar functions to constraint_location_add/remove to reduce code duplication -* primitive_is_started? and primitive_is_managed don't support resource defaults and management-mode diff --git a/agent_generator/agent_generator.rb b/agent_generator/agent_generator.rb deleted file mode 100755 index 46d8cac9..00000000 --- a/agent_generator/agent_generator.rb +++ /dev/null @@ -1,250 +0,0 @@ -#!/usr/bin/ruby - -# Usage: -# -# * install a fence agent package e.g. fence-agents-ilo2 -# * fence_ilo2 -o metadata > ilo.xml -# * fence-generator.rb ilo.xml fence_ilo2 fence-agents-ilo2 -# [ XML metadata, name of the class, name of the package for dependency check ] - -require 'rexml/document' - -class FencingMetadataParser - def initialize(filename, agentName, packageName) - @agentName = agentName - @packageName = packageName - file = File.new(filename) - @doc = REXML::Document.new file - @params = [] - @params_max_len = 14 # pcmk_host_list - end - - def getPackageName - @packageName - end - - def getAgentName - @agentName - end - - def getParameters - ## result have to be array as order should be preserved - return @params unless @params.empty? - @doc.elements.each('resource-agent/parameters/parameter') { |p| - param = {} - param['name'] = REXML::XPath.match(p, 'string(./@name)')[0] - @params_max_len = param['name'].length if param['name'].length > @params_max_len - param['type'] = REXML::XPath.match(p, 'string(./content/@type)')[0] - ## if 'default' is list then we can not enter it as parameter !! - ## this is problem only for 'cmd_prompt' - param['default'] = REXML::XPath.match(p, 'string(./content/@default)')[0] - param['description'] = REXML::XPath.match(p, 'string(./shortdesc)')[0] - ## remove parameters that are not usable during automatic execution - @params.push(param) unless %w(help version).include?(param['name']) - } - @params - end - - def getMaxLen - @params_max_len - end -end - -class ManifestGenerator - def initialize(parser) - @parser = parser - end - - def generate - puts <<-eos -# == Define: pacemaker::stonith::#{@parser.getAgentName} -# -# Module for managing Stonith for #{@parser.getAgentName}. -# -# WARNING: Generated by "rake generate_stonith", manual changes will -# be lost. -# -# === Parameters -# -#{getManifestDocumentation}# [*interval*] -# Interval between tries. -# -# [*ensure*] -# The desired state of the resource. -# -# [*tries*] -# The number of tries. -# -# [*try_sleep*] -# Time to sleep between tries. -# -# [*pcmk_host_list*] -# List of Pacemaker hosts. -# -# [*meta_attr*] -# (optional) String of meta attributes -# Defaults to undef -# -# [*deep_compare*] -# Enable deep comparing of resources and bundles -# When set to true a resource will be compared in full (options, meta parameters,..) -# to the existing one and in case of difference it will be repushed to the CIB -# Defaults to false -# -# [*update_settle_secs*] -# When deep_compare is enabled and puppet updates a resource, this -# parameter represents the number (in seconds) to wait for the cluster to settle -# after the resource update. -# Defaults to 600 (seconds) -# -# === Dependencies -# None -# -# === Authors -# -# Generated by rake generate_stonith task. -# -# === Copyright -# -# Copyright (C) 2016 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -define pacemaker::stonith::#{@parser.getAgentName} ( -#{getManifestParameters} - $deep_compare = false, - $update_settle_secs = 600, -) { -#{getVariableValues} -#{getPcmkHostList} - $meta_attr_value_chunk = $meta_attr ? { - undef => '', - default => "meta ${meta_attr}", - } - - # $title can be a mac address, remove the colons for pcmk resource name - $safe_title = regsubst($title, ':', '', 'G') - - Exec<| title == 'wait-for-settle' |> -> Pcmk_stonith<||> - - $param_string = "#{getChunks} op monitor interval=${interval} ${meta_attr_value_chunk}" - -#{getPackageSnippet} -#{getManifestCreate}} -eos - end - - def getManifestCreate - agent_name = @parser.getAgentName == 'fence_watchdog' ? '\'watchdog\'' : "\"stonith-#{@parser.getAgentName}-${safe_title}\"" - text = '' - text += " pcmk_stonith { #{agent_name}:\n" - text += " ensure => $ensure,\n" - text += " stonith_type => '#{@parser.getAgentName}',\n" - text += " pcmk_host_list => $pcmk_host_value_chunk,\n" - text += " pcs_param_string => $param_string,\n" - text += " tries => $tries,\n" - text += " try_sleep => $try_sleep,\n" - text += " deep_compare => $deep_compare,\n" - text += " update_settle_secs => $update_settle_secs,\n" - text += " }\n" - text - end - - def getPcmkHostList - text = '' - if @parser.getAgentName == 'fence_watchdog' - text += " $pcmk_host_value_chunk = '$(crm_node -l |awk \\'{print $2}\\' |paste -sd, -)'\n" - else - text += " $pcmk_host_value_chunk = $pcmk_host_list ? {\n" - text += " undef => '$(/usr/sbin/crm_node -n)',\n" - text += " default => $pcmk_host_list,\n" - text += " }\n" - end - text - end - - def getPackageSnippet - agent_name = @parser.getAgentName == 'fence_watchdog' ? '\'watchdog\'' : "\"stonith-#{@parser.getAgentName}-${safe_title}\"" - text = '' - if @parser.getPackageName != 'None' - text += " if $ensure != 'absent' {\n" - text += " ensure_packages('#{@parser.getPackageName}', { ensure => 'installed' })\n" - text += " Package['#{@parser.getPackageName}'] -> Pcmk_stonith[#{agent_name}]\n" - text += " }" - end - text - end - - def getManifestDocumentation - text = '' - @parser.getParameters.each { |p| - text += "# [*#{p['name']}*]\n" - text += "# #{p['description']}\n#\n" - } - text - end - - def getManifestParameters - text = '' - @parser.getParameters.each { |p| - text += format_param(p['name']) - } - - text += "\n" - text += format_param('meta_attr', 'undef') - text += format_param('interval', "'60s'") - text += format_param('ensure', 'present') - text += format_param('pcmk_host_list') - text += "\n" - text += format_param('tries') - text += format_param('try_sleep') - - text - end - - def getVariableValues - text = '' - @parser.getParameters.each { |p| - text += " $#{p['name']}_chunk = $#{p['name']} ? {\n" - text += " undef => '',\n" - text += " default => \"#{p['name']}=\\\"${#{p['name']}}\\\"\",\n" - text += " }\n" - } - - text - end - - def getChunks - text = '' - @parser.getParameters.each { |p| - text += "${#{p['name']}_chunk} " - } - text - end - - private - - def format_param(param, value = 'undef') - " $%-#{@parser.getMaxLen}s = %s,\n" % [param, value] - end -end - -if ARGV.length != 3 - puts 'You have to enter three arguments: path to metadata, name of fence agent and fence agent package' - exit 1 -end - -metadata, agentName, packageName = ARGV -# e.g. parser = FencingMetadataParser.new("ilo.xml", "fence_ilo", "fence-agents-ilo2") -parser = FencingMetadataParser.new(metadata, agentName, packageName) -ManifestGenerator.new(parser).generate diff --git a/agent_generator/generate_manifests.sh b/agent_generator/generate_manifests.sh deleted file mode 100755 index 7db5afca..00000000 --- a/agent_generator/generate_manifests.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash - -# This scripts generates fence agent manifests from their XML -# descriptions - -set -exuo pipefail - -generator_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" - -source "$generator_dir/variables.sh" - -for cmd_pkg in "${cmd_pkg_map[@]}"; do - cmd=${cmd_pkg%%:*} - pkg=${cmd_pkg#*:} - - "$generator_dir/agent_generator.rb" "$generator_dir/src_xml/$cmd.xml" $cmd $pkg > "$generator_dir/../manifests/stonith/$cmd.pp" -done diff --git a/agent_generator/src_xml/fence_amt.xml b/agent_generator/src_xml/fence_amt.xml deleted file mode 100644 index 26c2aa85..00000000 --- a/agent_generator/src_xml/fence_amt.xml +++ /dev/null @@ -1,172 +0,0 @@ - - -fence_amt is an I/O Fencing agent which can be used with Intel AMT. This agent calls support software amttool(http://www.kraxel.org/cgit/amtterm/). -http://www.intel.com/ - - - - - TCP/UDP port to use for connection with device - - - - - IP address or hostname of fencing device (together with --port-as-ip) - - - - - Forces agent to use IPv6 addresses only - - - - - IP Address or Hostname - - - - - Forces agent to use IPv4 addresses only - - - - - - Method to fence (onoff|cycle) - - - - - Script to retrieve password - - - - - Login password or passphrase - - - - - - Change the default boot behavior of the machine. - - - - - Fencing Action - - - - - IP address or hostname of fencing device (together with --port-as-ip) - - - - - IP Address or Hostname - - - - - Login password or passphrase - - - - - Script to retrieve password - - - - - Verbose mode - - - - - Write debug information to given file - - - - - Write debug information to given file - - - - - Display version information and exit - - - - - Display help and exit - - - - - Wait X seconds after issuing ON/OFF - - - - - Wait X seconds for cmd prompt after login - - - - - Test X seconds for status change after ON/OFF - - - - - Wait X seconds before fencing is started - - - - - Wait X seconds for cmd prompt after issuing command - - - - - Path to amttool binary - - - - - Make "port/plug" to be an alias to IP address - - - - - Count of attempts to retry power on - - - - - Use sudo (without password) when calling 3rd party sotfware. - - - - - Use sudo (without password) when calling 3rd party sotfware. - - - - - - - - - - - - - - diff --git a/agent_generator/src_xml/fence_apc.xml b/agent_generator/src_xml/fence_apc.xml deleted file mode 100644 index 74830c52..00000000 --- a/agent_generator/src_xml/fence_apc.xml +++ /dev/null @@ -1,142 +0,0 @@ - - -fence_apc is an I/O Fencing agent which can be used with the APC network power switch. It logs into device via telnet/ssh and reboots a specified outlet. Lengthy telnet/ssh connections should be avoided while a GFS cluster is running because the connection will block any necessary fencing actions. -http://www.apc.com - - - - - IP Address or Hostname - - - - - Login Name - - - - - Login password or passphrase - - - - - Force Python regex for command prompt - - - - - SSH connection - - - - - Physical plug number, name of virtual machine or UUID - - - - - Physical switch number on device - - - - - TCP/UDP port to use for connection with device - - - - - Forces agent to use IPv4 addresses only - - - - - Forces agent to use IPv6 addresses only - - - - - Script to retrieve password - - - - - Identity file for ssh - - - - - SSH options to use - - - - - Fencing Action - - - - - Verbose mode - - - - - Write debug information to given file - - - - - Display version information and exit - - - - - Display help and exit - - - - - Separator for CSV created by operation list - - - - - Test X seconds for status change after ON/OFF - - - - - Wait X seconds for cmd prompt after issuing command - - - - - Wait X seconds for cmd prompt after login - - - - - Wait X seconds after issuing ON/OFF - - - - - Wait X seconds before fencing is started - - - - - Count of attempts to retry power on - - - - - - - - - - - - diff --git a/agent_generator/src_xml/fence_apc_snmp.xml b/agent_generator/src_xml/fence_apc_snmp.xml deleted file mode 100644 index 87f21d30..00000000 --- a/agent_generator/src_xml/fence_apc_snmp.xml +++ /dev/null @@ -1,151 +0,0 @@ - - -fence_apc_snmp is an I/O Fencing agent which can be used with the APC network power switch. It logs into a device via SNMP and reboots a specified outlet. It supports SNMP v1 and v3 with all combinations of authenticity/privacy settings. -http://www.apc.com - - - - - IP Address or Hostname - - - - - Login Name - - - - - Login password or passphrase - - - - - Physical plug number, name of virtual machine or UUID - - - - - Specifies SNMP version to use (1,2c,3) - - - - - Set the community string - - - - - TCP/UDP port to use for connection with device - - - - - Forces agent to use IPv4 addresses only - - - - - Forces agent to use IPv6 addresses only - - - - - Script to retrieve password - - - - - Set authentication protocol (MD5|SHA) - - - - - Set security level (noAuthNoPriv|authNoPriv|authPriv) - - - - - Set privacy protocol (DES|AES) - - - - - Set privacy protocol password - - - - - Script to run to retrieve privacy password - - - - - Fencing Action - - - - - Verbose mode - - - - - Write debug information to given file - - - - - Display version information and exit - - - - - Display help and exit - - - - - Separator for CSV created by operation list - - - - - Test X seconds for status change after ON/OFF - - - - - Wait X seconds for cmd prompt after issuing command - - - - - Wait X seconds for cmd prompt after login - - - - - Wait X seconds after issuing ON/OFF - - - - - Wait X seconds before fencing is started - - - - - Count of attempts to retry power on - - - - - - - - - - - - diff --git a/agent_generator/src_xml/fence_bladecenter.xml b/agent_generator/src_xml/fence_bladecenter.xml deleted file mode 100644 index b1b94951..00000000 --- a/agent_generator/src_xml/fence_bladecenter.xml +++ /dev/null @@ -1,142 +0,0 @@ - - -fence_bladecenter is an I/O Fencing agent which can be used with IBM Bladecenters with recent enough firmware that includes telnet support. It logs into a Brocade chasis via telnet or ssh and uses the command line interface to power on and off blades. -http://www.ibm.com - - - - - IP Address or Hostname - - - - - Login Name - - - - - Login password or passphrase - - - - - Force Python regex for command prompt - - - - - SSH connection - - - - - Physical plug number, name of virtual machine or UUID - - - - - TCP/UDP port to use for connection with device - - - - - Forces agent to use IPv4 addresses only - - - - - Forces agent to use IPv6 addresses only - - - - - Script to retrieve password - - - - - Identity file for ssh - - - - - SSH options to use - - - - - Fencing Action - - - - - Verbose mode - - - - - Write debug information to given file - - - - - Display version information and exit - - - - - Display help and exit - - - - - Separator for CSV created by operation list - - - - - Missing port returns OFF instead of failure - - - - - Test X seconds for status change after ON/OFF - - - - - Wait X seconds for cmd prompt after issuing command - - - - - Wait X seconds for cmd prompt after login - - - - - Wait X seconds after issuing ON/OFF - - - - - Wait X seconds before fencing is started - - - - - Count of attempts to retry power on - - - - - - - - - - - - diff --git a/agent_generator/src_xml/fence_brocade.xml b/agent_generator/src_xml/fence_brocade.xml deleted file mode 100644 index 140dda5f..00000000 --- a/agent_generator/src_xml/fence_brocade.xml +++ /dev/null @@ -1,136 +0,0 @@ - - -fence_brocade is an I/O Fencing agent which can be used with Brocade FC switches. It logs into a Brocade switch via telnet and disables a specified port. Disabling the port which a machine is connected to effectively fences that machine. Lengthy telnet connections to the switch should be avoided while a GFS cluster is running because the connection will block any necessary fencing actions. After a fence operation has taken place the fenced machine can no longer connect to the Brocade FC switch. When the fenced machine is ready to be brought back into the GFS cluster (after reboot) the port on the Brocade FC switch needs to be enabled. This can be done by running fence_brocade and specifying the enable action -http://www.brocade.com - - - - - IP Address or Hostname - - - - - Login Name - - - - - Login password or passphrase - - - - - Force Python regex for command prompt - - - - - SSH connection - - - - - Physical plug number, name of virtual machine or UUID - - - - - TCP/UDP port to use for connection with device - - - - - Forces agent to use IPv4 addresses only - - - - - Forces agent to use IPv6 addresses only - - - - - Script to retrieve password - - - - - Identity file for ssh - - - - - SSH options to use - - - - - Fencing Action - - - - - Verbose mode - - - - - Write debug information to given file - - - - - Display version information and exit - - - - - Display help and exit - - - - - Separator for CSV created by operation list - - - - - Test X seconds for status change after ON/OFF - - - - - Wait X seconds for cmd prompt after issuing command - - - - - Wait X seconds for cmd prompt after login - - - - - Wait X seconds after issuing ON/OFF - - - - - Wait X seconds before fencing is started - - - - - Count of attempts to retry power on - - - - - - - - - - - diff --git a/agent_generator/src_xml/fence_cisco_mds.xml b/agent_generator/src_xml/fence_cisco_mds.xml deleted file mode 100644 index d444b604..00000000 --- a/agent_generator/src_xml/fence_cisco_mds.xml +++ /dev/null @@ -1,150 +0,0 @@ - - -fence_cisco_mds is an I/O Fencing agent which can be used with any Cisco MDS 9000 series with SNMP enabled device. -http://www.cisco.com - - - - - IP Address or Hostname - - - - - Login Name - - - - - Login password or passphrase - - - - - Physical plug number, name of virtual machine or UUID - - - - - Specifies SNMP version to use (1,2c,3) - - - - - Set the community string - - - - - TCP/UDP port to use for connection with device - - - - - Forces agent to use IPv4 addresses only - - - - - Forces agent to use IPv6 addresses only - - - - - Script to retrieve password - - - - - Set authentication protocol (MD5|SHA) - - - - - Set security level (noAuthNoPriv|authNoPriv|authPriv) - - - - - Set privacy protocol (DES|AES) - - - - - Set privacy protocol password - - - - - Script to run to retrieve privacy password - - - - - Fencing Action - - - - - Verbose mode - - - - - Write debug information to given file - - - - - Display version information and exit - - - - - Display help and exit - - - - - Separator for CSV created by operation list - - - - - Test X seconds for status change after ON/OFF - - - - - Wait X seconds for cmd prompt after issuing command - - - - - Wait X seconds for cmd prompt after login - - - - - Wait X seconds after issuing ON/OFF - - - - - Wait X seconds before fencing is started - - - - - Count of attempts to retry power on - - - - - - - - - - - diff --git a/agent_generator/src_xml/fence_cisco_ucs.xml b/agent_generator/src_xml/fence_cisco_ucs.xml deleted file mode 100644 index bbaacc8a..00000000 --- a/agent_generator/src_xml/fence_cisco_ucs.xml +++ /dev/null @@ -1,142 +0,0 @@ - - -fence_cisco_ucs is an I/O Fencing agent which can be used with Cisco UCS to fence machines. -http://www.cisco.com - - - - - IP Address or Hostname - - - - - Login Name - - - - - Login password or passphrase - - - - - SSL connection - - - - - Disable TLS negotiation - - - - - Physical plug number, name of virtual machine or UUID - - - - - Additional path needed to access suborganization - - - - - TCP/UDP port to use for connection with device - - - - - Forces agent to use IPv4 addresses only - - - - - Forces agent to use IPv6 addresses only - - - - - Script to retrieve password - - - - - SSL connection with verifying fence device's certificate - - - - - SSL connection without verifying fence device's certificate - - - - - Fencing Action - - - - - Verbose mode - - - - - Write debug information to given file - - - - - Display version information and exit - - - - - Display help and exit - - - - - Separator for CSV created by operation list - - - - - Test X seconds for status change after ON/OFF - - - - - Wait X seconds for cmd prompt after issuing command - - - - - Wait X seconds for cmd prompt after login - - - - - Wait X seconds after issuing ON/OFF - - - - - Wait X seconds before fencing is started - - - - - Count of attempts to retry power on - - - - - - - - - - - - diff --git a/agent_generator/src_xml/fence_compute.xml b/agent_generator/src_xml/fence_compute.xml deleted file mode 100644 index 2f3cf630..00000000 --- a/agent_generator/src_xml/fence_compute.xml +++ /dev/null @@ -1,177 +0,0 @@ - - -Used to tell Nova that compute nodes are down and to reschedule flagged instances - - - - - - Keystone Admin Tenant or v3 Project - - - - - Keystone Admin Auth URL - - - - - Physical plug number, name of virtual machine or UUID - - - - - Script to retrieve password - - - - - Region Name - - - - - Login password or passphrase - - - - - Nova Endpoint type - - - - - Fencing Action - - - - - Login Name - - - - - Physical plug number, name of virtual machine or UUID - - - - - Login Name - - - - - Login password or passphrase - - - - - Script to retrieve password - - - - - Allow Insecure TLS Requests - - - - - DNS domain in which hosts live - - - - - Keystone v3 Project Domain - - - - - Keystone v3 User Domain - - - - - Allow instances to be evacuated - - - - - Disable functionality for dealing with shared storage - - - - - Only record the target as needing evacuation - - - - - Verbose mode - - - - - Write debug information to given file - - - - - Write debug information to given file - - - - - Display version information and exit - - - - - Display help and exit - - - - - Separator for CSV created by operation list - - - - - Wait X seconds after issuing ON/OFF - - - - - Wait X seconds for cmd prompt after login - - - - - Wait X seconds before fencing is started - - - - - Test X seconds for status change after ON/OFF - - - - - Wait X seconds for cmd prompt after issuing command - - - - - Count of attempts to retry power on - - - - - - - - - - - - - diff --git a/agent_generator/src_xml/fence_crosslink.xml b/agent_generator/src_xml/fence_crosslink.xml deleted file mode 100644 index e2669ed4..00000000 --- a/agent_generator/src_xml/fence_crosslink.xml +++ /dev/null @@ -1,109 +0,0 @@ - - -This agent helps two-node clusters to tackle the situation where one node lost power, cannot be fenced by telling pacemaker that if the node is not reachable over the crosslink cable, we can assume it is dead - - - - - - Fencing action - - - - - Cross-cable IP - - - - - Physical plug number on device, UUID or identification of machine - - - - - Physical plug number on device, UUID or identification of machine - - - - - No ICMP reply in 5 seconds -> Node is considered dead - - - - - Disable logging to stderr. Does not affect --verbose or --debug-file or logging to syslog. - - - - - Verbose mode - - - - - Write debug information to given file - - - - - Write debug information to given file - - - - - Display version information and exit - - - - - Display help and exit - - - - - Separator for CSV created by 'list' operation - - - - - Wait X seconds before fencing is started - - - - - Wait X seconds for cmd prompt after login - - - - - Test X seconds for status change after ON/OFF - - - - - Wait X seconds after issuing ON/OFF - - - - - Wait X seconds for cmd prompt after issuing command - - - - - Count of attempts to retry power on - - - - - - - - - - - - - - - diff --git a/agent_generator/src_xml/fence_drac5.xml b/agent_generator/src_xml/fence_drac5.xml deleted file mode 100644 index 35e00e0c..00000000 --- a/agent_generator/src_xml/fence_drac5.xml +++ /dev/null @@ -1,142 +0,0 @@ - - -fence_drac5 is an I/O Fencing agent which can be used with the Dell Remote Access Card v5 or CMC (DRAC). This device provides remote access to controlling power to a server. It logs into the DRAC through the telnet/ssh interface of the card. By default, the telnet interface is not enabled. -http://www.dell.com - - - - - IP Address or Hostname - - - - - Login Name - - - - - Login password or passphrase - - - - - Force Python regex for command prompt - - - - - SSH connection - - - - - Force DRAC version to use - - - - - Physical plug number, name of virtual machine or UUID - - - - - TCP/UDP port to use for connection with device - - - - - Forces agent to use IPv4 addresses only - - - - - Forces agent to use IPv6 addresses only - - - - - Script to retrieve password - - - - - Identity file for ssh - - - - - SSH options to use - - - - - Fencing Action - - - - - Verbose mode - - - - - Write debug information to given file - - - - - Display version information and exit - - - - - Display help and exit - - - - - Separator for CSV created by operation list - - - - - Test X seconds for status change after ON/OFF - - - - - Wait X seconds for cmd prompt after issuing command - - - - - Wait X seconds for cmd prompt after login - - - - - Wait X seconds after issuing ON/OFF - - - - - Wait X seconds before fencing is started - - - - - Count of attempts to retry power on - - - - - - - - - - - - diff --git a/agent_generator/src_xml/fence_eaton_snmp.xml b/agent_generator/src_xml/fence_eaton_snmp.xml deleted file mode 100644 index a7052576..00000000 --- a/agent_generator/src_xml/fence_eaton_snmp.xml +++ /dev/null @@ -1,151 +0,0 @@ - - -fence_eaton_snmp is an I/O Fencing agent which can be used with the Eaton network power switch. It logs into a device via SNMP and reboots a specified outlet. It supports SNMP v1 and v3 with all combinations of authenticity/privacy settings. -http://powerquality.eaton.com - - - - - IP Address or Hostname - - - - - Login Name - - - - - Login password or passphrase - - - - - Physical plug number, name of virtual machine or UUID - - - - - Specifies SNMP version to use (1,2c,3) - - - - - Set the community string - - - - - TCP/UDP port to use for connection with device - - - - - Forces agent to use IPv4 addresses only - - - - - Forces agent to use IPv6 addresses only - - - - - Script to retrieve password - - - - - Set authentication protocol (MD5|SHA) - - - - - Set security level (noAuthNoPriv|authNoPriv|authPriv) - - - - - Set privacy protocol (DES|AES) - - - - - Set privacy protocol password - - - - - Script to run to retrieve privacy password - - - - - Fencing Action - - - - - Verbose mode - - - - - Write debug information to given file - - - - - Display version information and exit - - - - - Display help and exit - - - - - Separator for CSV created by operation list - - - - - Test X seconds for status change after ON/OFF - - - - - Wait X seconds for cmd prompt after issuing command - - - - - Wait X seconds for cmd prompt after login - - - - - Wait X seconds after issuing ON/OFF - - - - - Wait X seconds before fencing is started - - - - - Count of attempts to retry power on - - - - - - - - - - - - diff --git a/agent_generator/src_xml/fence_eps.xml b/agent_generator/src_xml/fence_eps.xml deleted file mode 100644 index f7a9c898..00000000 --- a/agent_generator/src_xml/fence_eps.xml +++ /dev/null @@ -1,123 +0,0 @@ - - -fence_eps is an I/O Fencing agent which can be used with the ePowerSwitch 8M+ power switch to fence connected machines. Fence agent works ONLY on 8M+ device, because this is only one, which has support for hidden page feature. -.TP -Agent basically works by connecting to hidden page and pass appropriate arguments to GET request. This means, that hidden page feature must be enabled and properly configured. -http://www.epowerswitch.com - - - - - IP Address or Hostname - - - - - Login Name - - - - - Login password or passphrase - - - - - Physical plug number, name of virtual machine or UUID - - - - - Name of hidden page - - - - - TCP/UDP port to use for connection with device - - - - - Forces agent to use IPv4 addresses only - - - - - Forces agent to use IPv6 addresses only - - - - - Script to retrieve password - - - - - Fencing Action - - - - - Verbose mode - - - - - Write debug information to given file - - - - - Display version information and exit - - - - - Display help and exit - - - - - Separator for CSV created by operation list - - - - - Test X seconds for status change after ON/OFF - - - - - Wait X seconds for cmd prompt after issuing command - - - - - Wait X seconds for cmd prompt after login - - - - - Wait X seconds after issuing ON/OFF - - - - - Wait X seconds before fencing is started - - - - - Count of attempts to retry power on - - - - - - - - - - - - diff --git a/agent_generator/src_xml/fence_hpblade.xml b/agent_generator/src_xml/fence_hpblade.xml deleted file mode 100644 index 5e20fba3..00000000 --- a/agent_generator/src_xml/fence_hpblade.xml +++ /dev/null @@ -1,142 +0,0 @@ - - -fence_hpblade is an I/O Fencing agent which can be used with HP BladeSystem. It logs into an enclosure via telnet or ssh and uses the command line interface to power on and off blades. -http://www.hp.com - - - - - IP Address or Hostname - - - - - Login Name - - - - - Login password or passphrase - - - - - Force Python regex for command prompt - - - - - SSH connection - - - - - Physical plug number, name of virtual machine or UUID - - - - - TCP/UDP port to use for connection with device - - - - - Forces agent to use IPv4 addresses only - - - - - Forces agent to use IPv6 addresses only - - - - - Script to retrieve password - - - - - Identity file for ssh - - - - - SSH options to use - - - - - Fencing Action - - - - - Verbose mode - - - - - Write debug information to given file - - - - - Display version information and exit - - - - - Display help and exit - - - - - Separator for CSV created by operation list - - - - - Missing port returns OFF instead of failure - - - - - Test X seconds for status change after ON/OFF - - - - - Wait X seconds for cmd prompt after issuing command - - - - - Wait X seconds for cmd prompt after login - - - - - Wait X seconds after issuing ON/OFF - - - - - Wait X seconds before fencing is started - - - - - Count of attempts to retry power on - - - - - - - - - - - - diff --git a/agent_generator/src_xml/fence_ibmblade.xml b/agent_generator/src_xml/fence_ibmblade.xml deleted file mode 100644 index d711a0ee..00000000 --- a/agent_generator/src_xml/fence_ibmblade.xml +++ /dev/null @@ -1,151 +0,0 @@ - - -fence_ibmblade is an I/O Fencing agent which can be used with IBM BladeCenter chassis. It issues SNMP Set request to BladeCenter chassis, rebooting, powering up or down the specified Blade Server. -http://www.ibm.com - - - - - IP Address or Hostname - - - - - Login Name - - - - - Login password or passphrase - - - - - Physical plug number, name of virtual machine or UUID - - - - - Specifies SNMP version to use (1,2c,3) - - - - - Set the community string - - - - - TCP/UDP port to use for connection with device - - - - - Forces agent to use IPv4 addresses only - - - - - Forces agent to use IPv6 addresses only - - - - - Script to retrieve password - - - - - Set authentication protocol (MD5|SHA) - - - - - Set security level (noAuthNoPriv|authNoPriv|authPriv) - - - - - Set privacy protocol (DES|AES) - - - - - Set privacy protocol password - - - - - Script to run to retrieve privacy password - - - - - Fencing Action - - - - - Verbose mode - - - - - Write debug information to given file - - - - - Display version information and exit - - - - - Display help and exit - - - - - Separator for CSV created by operation list - - - - - Test X seconds for status change after ON/OFF - - - - - Wait X seconds for cmd prompt after issuing command - - - - - Wait X seconds for cmd prompt after login - - - - - Wait X seconds after issuing ON/OFF - - - - - Wait X seconds before fencing is started - - - - - Count of attempts to retry power on - - - - - - - - - - - - diff --git a/agent_generator/src_xml/fence_idrac.xml b/agent_generator/src_xml/fence_idrac.xml deleted file mode 100644 index 4faead68..00000000 --- a/agent_generator/src_xml/fence_idrac.xml +++ /dev/null @@ -1,94 +0,0 @@ - - - - - - - -fence_ipmilan is an I/O Fencing agent which can be used with machines controlled by IPMI. This agent calls support software using ipmitool (http://ipmitool.sf.net/). - -To use fence_ipmilan with HP iLO 3 or HP iLO 4 you have to enable lanplus option (lanplus / -P) and increase wait after operation to 4 seconds (power_wait=4 / -T 4) -http://www.intel.com - - - - - IPMI Lan Auth type (md5, password, or none) - - - - - IPMI Lan IP to talk to - - - - - Password (if required) to control power on IPMI device - - - - - Script to retrieve password (if required) - - - - - Use Lanplus to improve security of connection - - - - - Username/Login (if required) to control power on IPMI device - - - - - Operation to perform. Valid operations: on, off, reboot, status, list, diag, monitor or metadata - - - - - Timeout (sec) for IPMI operation - - - - - Ciphersuite to use (same as ipmitool -C parameter) - - - - - Method to fence (onoff or cycle) - - - - - Wait X seconds after on/off operation - - - - - Wait X seconds before fencing is started - - - - - Privilege level on IPMI device - - - - - Verbose mode - - - - - - - - - - - - - diff --git a/agent_generator/src_xml/fence_ifmib.xml b/agent_generator/src_xml/fence_ifmib.xml deleted file mode 100644 index f4da8ba4..00000000 --- a/agent_generator/src_xml/fence_ifmib.xml +++ /dev/null @@ -1,152 +0,0 @@ - - -fence_ifmib is an I/O Fencing agent which can be used with any SNMP IF-MIB capable device. -.P -It was written with managed ethernet switches in mind, in order to fence iSCSI SAN connections. However, there are many devices that support the IF-MIB interface. The agent uses IF-MIB::ifAdminStatus to control the state of an interface. -http://www.ietf.org/wg/concluded/ifmib.html - - - - - IP Address or Hostname - - - - - Login Name - - - - - Login password or passphrase - - - - - Physical plug number, name of virtual machine or UUID - - - - - Specifies SNMP version to use (1,2c,3) - - - - - Set the community string - - - - - TCP/UDP port to use for connection with device - - - - - Forces agent to use IPv4 addresses only - - - - - Forces agent to use IPv6 addresses only - - - - - Script to retrieve password - - - - - Set authentication protocol (MD5|SHA) - - - - - Set security level (noAuthNoPriv|authNoPriv|authPriv) - - - - - Set privacy protocol (DES|AES) - - - - - Set privacy protocol password - - - - - Script to run to retrieve privacy password - - - - - Fencing Action - - - - - Verbose mode - - - - - Write debug information to given file - - - - - Display version information and exit - - - - - Display help and exit - - - - - Separator for CSV created by operation list - - - - - Test X seconds for status change after ON/OFF - - - - - Wait X seconds for cmd prompt after issuing command - - - - - Wait X seconds for cmd prompt after login - - - - - Wait X seconds after issuing ON/OFF - - - - - Wait X seconds before fencing is started - - - - - Count of attempts to retry power on - - - - - - - - - - - diff --git a/agent_generator/src_xml/fence_ilo.xml b/agent_generator/src_xml/fence_ilo.xml deleted file mode 100644 index ac72e393..00000000 --- a/agent_generator/src_xml/fence_ilo.xml +++ /dev/null @@ -1,132 +0,0 @@ - - - -fence_ilo is an I/O Fencing agent used for HP servers with the Integrated Light Out (iLO) PCI card.The agent opens an SSL connection to the iLO card. Once the SSL connection is established, the agent is able to communicate with the iLO card through an XML stream. -http://www.hp.com - - - - - IP Address or Hostname - - - - - Login Name - - - - - Login password or passphrase - - - - - SSL connection - - - - - Disable TLS negotiation - - - - - Force ribcl version to use - - - - - TCP/UDP port to use for connection with device - - - - - Forces agent to use IPv4 addresses only - - - - - Forces agent to use IPv6 addresses only - - - - - Script to retrieve password - - - - - SSL connection with verifying fence device's certificate - - - - - SSL connection without verifying fence device's certificate - - - - - Fencing Action - - - - - Verbose mode - - - - - Write debug information to given file - - - - - Display version information and exit - - - - - Display help and exit - - - - - Test X seconds for status change after ON/OFF - - - - - Wait X seconds for cmd prompt after issuing command - - - - - Wait X seconds for cmd prompt after login - - - - - Wait X seconds after issuing ON/OFF - - - - - Wait X seconds before fencing is started - - - - - Count of attempts to retry power on - - - - - - - - - - - - diff --git a/agent_generator/src_xml/fence_ilo2.xml b/agent_generator/src_xml/fence_ilo2.xml deleted file mode 100644 index fbeee809..00000000 --- a/agent_generator/src_xml/fence_ilo2.xml +++ /dev/null @@ -1,132 +0,0 @@ - - - -fence_ilo is an I/O Fencing agent used for HP servers with the Integrated Light Out (iLO) PCI card.The agent opens an SSL connection to the iLO card. Once the SSL connection is established, the agent is able to communicate with the iLO card through an XML stream. -http://www.hp.com - - - - - IP Address or Hostname - - - - - Login Name - - - - - Login password or passphrase - - - - - SSL connection - - - - - Disable TLS negotiation - - - - - Force ribcl version to use - - - - - TCP/UDP port to use for connection with device - - - - - Forces agent to use IPv4 addresses only - - - - - Forces agent to use IPv6 addresses only - - - - - Script to retrieve password - - - - - SSL connection with verifying fence device's certificate - - - - - SSL connection without verifying fence device's certificate - - - - - Fencing Action - - - - - Verbose mode - - - - - Write debug information to given file - - - - - Display version information and exit - - - - - Display help and exit - - - - - Test X seconds for status change after ON/OFF - - - - - Wait X seconds for cmd prompt after issuing command - - - - - Wait X seconds for cmd prompt after login - - - - - Wait X seconds after issuing ON/OFF - - - - - Wait X seconds before fencing is started - - - - - Count of attempts to retry power on - - - - - - - - - - - - diff --git a/agent_generator/src_xml/fence_ilo3.xml b/agent_generator/src_xml/fence_ilo3.xml deleted file mode 100644 index 89ad7d98..00000000 --- a/agent_generator/src_xml/fence_ilo3.xml +++ /dev/null @@ -1,94 +0,0 @@ - - - - - - - -fence_ipmilan is an I/O Fencing agent which can be used with machines controlled by IPMI. This agent calls support software using ipmitool (http://ipmitool.sf.net/). - -To use fence_ipmilan with HP iLO 3 or HP iLO 4 you have to enable lanplus option (lanplus / -P) and increase wait after operation to 4 seconds (power_wait=4 / -T 4) -http://www.intel.com - - - - - IPMI Lan Auth type (md5, password, or none) - - - - - IPMI Lan IP to talk to - - - - - Password (if required) to control power on IPMI device - - - - - Script to retrieve password (if required) - - - - - Use Lanplus to improve security of connection - - - - - Username/Login (if required) to control power on IPMI device - - - - - Operation to perform. Valid operations: on, off, reboot, status, list, diag, monitor or metadata - - - - - Timeout (sec) for IPMI operation - - - - - Ciphersuite to use (same as ipmitool -C parameter) - - - - - Method to fence (onoff or cycle) - - - - - Wait X seconds after on/off operation - - - - - Wait X seconds before fencing is started - - - - - Privilege level on IPMI device - - - - - Verbose mode - - - - - - - - - - - - - diff --git a/agent_generator/src_xml/fence_ilo4.xml b/agent_generator/src_xml/fence_ilo4.xml deleted file mode 100644 index ec767ce4..00000000 --- a/agent_generator/src_xml/fence_ilo4.xml +++ /dev/null @@ -1,94 +0,0 @@ - - - - - - - -fence_ipmilan is an I/O Fencing agent which can be used with machines controlled by IPMI. This agent calls support software using ipmitool (http://ipmitool.sf.net/). - -To use fence_ipmilan with HP iLO 3 or HP iLO 4 you have to enable lanplus option (lanplus / -P) and increase wait after operation to 4 seconds (power_wait=4 / -T 4) -http://www.intel.com - - - - - IPMI Lan Auth type (md5, password, or none) - - - - - IPMI Lan IP to talk to - - - - - Password (if required) to control power on IPMI device - - - - - Script to retrieve password (if required) - - - - - Use Lanplus to improve security of connection - - - - - Username/Login (if required) to control power on IPMI device - - - - - Operation to perform. Valid operations: on, off, reboot, status, list, diag, monitor or metadata - - - - - Timeout (sec) for IPMI operation - - - - - Ciphersuite to use (same as ipmitool -C parameter) - - - - - Method to fence (onoff or cycle) - - - - - Wait X seconds after on/off operation - - - - - Wait X seconds before fencing is started - - - - - Privilege level on IPMI device - - - - - Verbose mode - - - - - - - - - - - - - diff --git a/agent_generator/src_xml/fence_ilo_mp.xml b/agent_generator/src_xml/fence_ilo_mp.xml deleted file mode 100644 index 121db8e1..00000000 --- a/agent_generator/src_xml/fence_ilo_mp.xml +++ /dev/null @@ -1,127 +0,0 @@ - - - -http://www.hp.com - - - - - IP Address or Hostname - - - - - Login Name - - - - - Login password or passphrase - - - - - SSH connection - - - - - Force Python regex for command prompt - - - - - TCP/UDP port to use for connection with device - - - - - Forces agent to use IPv4 addresses only - - - - - Forces agent to use IPv6 addresses only - - - - - Script to retrieve password - - - - - Identity file for ssh - - - - - SSH options to use - - - - - Fencing Action - - - - - Verbose mode - - - - - Write debug information to given file - - - - - Display version information and exit - - - - - Display help and exit - - - - - Test X seconds for status change after ON/OFF - - - - - Wait X seconds for cmd prompt after issuing command - - - - - Wait X seconds for cmd prompt after login - - - - - Wait X seconds after issuing ON/OFF - - - - - Wait X seconds before fencing is started - - - - - Count of attempts to retry power on - - - - - - - - - - - - diff --git a/agent_generator/src_xml/fence_imm.xml b/agent_generator/src_xml/fence_imm.xml deleted file mode 100644 index 6795729b..00000000 --- a/agent_generator/src_xml/fence_imm.xml +++ /dev/null @@ -1,94 +0,0 @@ - - - - - - - -fence_ipmilan is an I/O Fencing agent which can be used with machines controlled by IPMI. This agent calls support software using ipmitool (http://ipmitool.sf.net/). - -To use fence_ipmilan with HP iLO 3 or HP iLO 4 you have to enable lanplus option (lanplus / -P) and increase wait after operation to 4 seconds (power_wait=4 / -T 4) -http://www.intel.com - - - - - IPMI Lan Auth type (md5, password, or none) - - - - - IPMI Lan IP to talk to - - - - - Password (if required) to control power on IPMI device - - - - - Script to retrieve password (if required) - - - - - Use Lanplus to improve security of connection - - - - - Username/Login (if required) to control power on IPMI device - - - - - Operation to perform. Valid operations: on, off, reboot, status, list, diag, monitor or metadata - - - - - Timeout (sec) for IPMI operation - - - - - Ciphersuite to use (same as ipmitool -C parameter) - - - - - Method to fence (onoff or cycle) - - - - - Wait X seconds after on/off operation - - - - - Wait X seconds before fencing is started - - - - - Privilege level on IPMI device - - - - - Verbose mode - - - - - - - - - - - - - diff --git a/agent_generator/src_xml/fence_intelmodular.xml b/agent_generator/src_xml/fence_intelmodular.xml deleted file mode 100644 index 4dd74635..00000000 --- a/agent_generator/src_xml/fence_intelmodular.xml +++ /dev/null @@ -1,153 +0,0 @@ - - -fence_intelmodular is an I/O Fencing agent which can be used with Intel Modular device (tested on Intel MFSYS25, should work with MFSYS35 as well). -.P -Note: Since firmware update version 2.7, SNMP v2 write support is removed, and replaced by SNMP v3 support. So agent now has default SNMP version 3. If you are using older firmware, please supply -d for command line and snmp_version option for your cluster.conf. -http://www.intel.com - - - - - IP Address or Hostname - - - - - Login Name - - - - - Login password or passphrase - - - - - Physical plug number, name of virtual machine or UUID - - - - - Specifies SNMP version to use (1,2c,3) - - - - - Set the community string - - - - - TCP/UDP port to use for connection with device - - - - - Forces agent to use IPv4 addresses only - - - - - Forces agent to use IPv6 addresses only - - - - - Script to retrieve password - - - - - Set authentication protocol (MD5|SHA) - - - - - Set security level (noAuthNoPriv|authNoPriv|authPriv) - - - - - Set privacy protocol (DES|AES) - - - - - Set privacy protocol password - - - - - Script to run to retrieve privacy password - - - - - Fencing Action - - - - - Verbose mode - - - - - Write debug information to given file - - - - - Display version information and exit - - - - - Display help and exit - - - - - Separator for CSV created by operation list - - - - - Test X seconds for status change after ON/OFF - - - - - Wait X seconds for cmd prompt after issuing command - - - - - Wait X seconds for cmd prompt after login - - - - - Wait X seconds after issuing ON/OFF - - - - - Wait X seconds before fencing is started - - - - - Count of attempts to retry power on - - - - - - - - - - - - diff --git a/agent_generator/src_xml/fence_ipdu.xml b/agent_generator/src_xml/fence_ipdu.xml deleted file mode 100644 index f46e08b7..00000000 --- a/agent_generator/src_xml/fence_ipdu.xml +++ /dev/null @@ -1,151 +0,0 @@ - - -fence_ipdu is an I/O Fencing agent which can be used with the IBM iPDU network power switch. It logs into a device via SNMP and reboots a specified outlet. It supports SNMP v3 with all combinations of authenticity/privacy settings. -http://www.ibm.com - - - - - IP Address or Hostname - - - - - Login Name - - - - - Login password or passphrase - - - - - Physical plug number, name of virtual machine or UUID - - - - - Specifies SNMP version to use (1,2c,3) - - - - - Set the community string - - - - - TCP/UDP port to use for connection with device - - - - - Forces agent to use IPv4 addresses only - - - - - Forces agent to use IPv6 addresses only - - - - - Script to retrieve password - - - - - Set authentication protocol (MD5|SHA) - - - - - Set security level (noAuthNoPriv|authNoPriv|authPriv) - - - - - Set privacy protocol (DES|AES) - - - - - Set privacy protocol password - - - - - Script to run to retrieve privacy password - - - - - Fencing Action - - - - - Verbose mode - - - - - Write debug information to given file - - - - - Display version information and exit - - - - - Display help and exit - - - - - Separator for CSV created by operation list - - - - - Test X seconds for status change after ON/OFF - - - - - Wait X seconds for cmd prompt after issuing command - - - - - Wait X seconds for cmd prompt after login - - - - - Wait X seconds after issuing ON/OFF - - - - - Wait X seconds before fencing is started - - - - - Count of attempts to retry power on - - - - - - - - - - - - diff --git a/agent_generator/src_xml/fence_ipmilan.xml b/agent_generator/src_xml/fence_ipmilan.xml deleted file mode 100644 index 209af283..00000000 --- a/agent_generator/src_xml/fence_ipmilan.xml +++ /dev/null @@ -1,99 +0,0 @@ - - - - - - - -fence_ipmilan is an I/O Fencing agent which can be used with machines controlled by IPMI. This agent calls support software using ipmitool (http://ipmitool.sf.net/). - -To use fence_ipmilan with HP iLO 3 or HP iLO 4 you have to enable lanplus option (lanplus / -P) and increase wait after operation to 4 seconds (power_wait=4 / -T 4) -http://www.intel.com - - - - - IPMI Lan Auth type (md5, password, or none) - - - - - IPMI Lan IP to talk to - - - - - IPMI Lan port to talk to - - - - - Password (if required) to control power on IPMI device - - - - - Script to retrieve password (if required) - - - - - Use Lanplus to improve security of connection - - - - - Username/Login (if required) to control power on IPMI device - - - - - Operation to perform. Valid operations: on, off, reboot, status, list, diag, monitor or metadata - - - - - Timeout (sec) for IPMI operation - - - - - Ciphersuite to use (same as ipmitool -C parameter) - - - - - Method to fence (onoff or cycle) - - - - - Wait X seconds after on/off operation - - - - - Wait X seconds before fencing is started - - - - - Privilege level on IPMI device - - - - - Verbose mode - - - - - - - - - - - - - diff --git a/agent_generator/src_xml/fence_ironic.xml b/agent_generator/src_xml/fence_ironic.xml deleted file mode 100644 index 0f4b96f1..00000000 --- a/agent_generator/src_xml/fence_ironic.xml +++ /dev/null @@ -1,61 +0,0 @@ - - -fence_ironic is an I/O Fencing agent which can be used with Ironic-managed nodes. - - - - - Specify (stdin) or increment (command line) debug level - - - - Keystone URL to authenticate against - - - - Keystone username to use for authentication - - - - Keystone password to use for authentication - - - - Keystone tenant name to use for authentication - - - - A mapping of UUIDs to node names - - - - - Fencing action (null, off, on, [reboot], status, list, monitor, metadata) - - - - - Fencing timeout (in seconds; default=30) - - - - - Fencing delay (in seconds; default=0) - - - - - Virtual Machine (domain name) to fence (deprecated; use port) - - - - - - - - - - - - - diff --git a/agent_generator/src_xml/fence_kdump.xml b/agent_generator/src_xml/fence_kdump.xml deleted file mode 100644 index c985cacf..00000000 --- a/agent_generator/src_xml/fence_kdump.xml +++ /dev/null @@ -1,51 +0,0 @@ - - -The fence_kdump agent is intended to be used with with kdump service. -http://www.kernel.org/pub/linux/utils/kernel/kexec/ - - - - - Name or IP address of node to be fenced - - - - - Port number - - - - - Network family - - - - - Fencing action - - - - - Timeout in seconds - - - - - Print verbose output - - - - - Print version - - - - - Print usage - - - - - - - diff --git a/agent_generator/src_xml/fence_kubevirt.xml b/agent_generator/src_xml/fence_kubevirt.xml deleted file mode 100644 index 9e355084..00000000 --- a/agent_generator/src_xml/fence_kubevirt.xml +++ /dev/null @@ -1,134 +0,0 @@ - - -fence_kubevirt is an I/O Fencing agent for KubeVirt. -https://kubevirt.io/ - - - - - Fencing action - - - - - Physical plug number on device, UUID or identification of machine - - - - - Physical plug number on device, UUID or identification of machine - - - - - Use SSL connection without verifying certificate - - - - - Namespace of the KubeVirt machine. - - - - - Kubeconfig file path - - - - - Version of the KubeVirt API. - - - - - Disable logging to stderr. Does not affect --verbose or --debug-file or logging to syslog. - - - - - Verbose mode. Multiple -v flags can be stacked on the command line (e.g., -vvv) to increase verbosity. - - - - - Level of debugging detail in output. Defaults to the number of --verbose flags specified on the command line, or to 1 if verbose=1 in a stonith device configuration (i.e., on stdin). - - - - - Write debug information to given file - - - - - Write debug information to given file - - - - - Display version information and exit - - - - - Display help and exit - - - - - Separator for CSV created by 'list' operation - - - - - Wait X seconds before fencing is started - - - - - Disable timeout (true/false) (default: true when run from Pacemaker 2.0+) - - - - - Wait X seconds for cmd prompt after login - - - - - Test X seconds for status change after ON/OFF - - - - - Wait X seconds after issuing ON/OFF - - - - - Wait X seconds for cmd prompt after issuing command - - - - - Sleep X seconds between status calls during a STONITH action - - - - - Count of attempts to retry power on - - - - - - - - - - - - - - - diff --git a/agent_generator/src_xml/fence_redfish.xml b/agent_generator/src_xml/fence_redfish.xml deleted file mode 100644 index 7ca457d5..00000000 --- a/agent_generator/src_xml/fence_redfish.xml +++ /dev/null @@ -1,182 +0,0 @@ - - -fence_redfish is an I/O Fencing agent which can be used with Out-of-Band controllers that support Redfish APIs. These controllers provide remote access to control power on a server. -http://www.dmtf.org - - - - - Fencing action - - - - - Forces agent to use IPv4 addresses only - - - - - Forces agent to use IPv6 addresses only - - - - - IP address or hostname of fencing device - - - - - IP address or hostname of fencing device - - - - - TCP/UDP port to use for connection with device - - - - - Login name - - - - - Login password or passphrase - - - - - Script to run to retrieve password - - - - - Login password or passphrase - - - - - Script to run to retrieve password - - - - - IP address or hostname of fencing device (together with --port-as-ip) - - - - - IP address or hostname of fencing device (together with --port-as-ip) - - - - - Base or starting Redfish URI - - - - - Use SSL connection with verifying certificate - - - - - Use SSL connection without verifying certificate - - - - - Use SSL connection with verifying certificate - - - - - Redfish Systems resource URI, i.e. /redfish/v1/Systems/System.Embedded.1 - - - - - Login name - - - - - Disable logging to stderr. Does not affect --verbose or --debug-file or logging to syslog. - - - - - Verbose mode - - - - - Write debug information to given file - - - - - Write debug information to given file - - - - - Display version information and exit - - - - - Display help and exit - - - - - Wait X seconds before fencing is started - - - - - Wait X seconds for cmd prompt after login - - - - - Make "port/plug" to be an alias to IP address - - - - - Test X seconds for status change after ON/OFF - - - - - Wait X seconds after issuing ON/OFF - - - - - Wait X seconds for cmd prompt after issuing command - - - - - Count of attempts to retry power on - - - - - Path to gnutls-cli binary - - - - - - - - - - - - - diff --git a/agent_generator/src_xml/fence_rhevm.xml b/agent_generator/src_xml/fence_rhevm.xml deleted file mode 100644 index 8a281ef2..00000000 --- a/agent_generator/src_xml/fence_rhevm.xml +++ /dev/null @@ -1,142 +0,0 @@ - - -fence_rhevm is an I/O Fencing agent which can be used with RHEV-M REST API to fence virtual machines. -http://www.redhat.com - - - - - IP Address or Hostname - - - - - Login Name - - - - - Login password or passphrase - - - - - SSL connection - - - - - Disable TLS negotiation - - - - - Physical plug number, name of virtual machine or UUID - - - - - TCP/UDP port to use for connection with device - - - - - Forces agent to use IPv4 addresses only - - - - - Forces agent to use IPv6 addresses only - - - - - Script to retrieve password - - - - - SSL connection with verifying fence device's certificate - - - - - SSL connection without verifying fence device's certificate - - - - - Fencing Action - - - - - Verbose mode - - - - - Write debug information to given file - - - - - Display version information and exit - - - - - Display help and exit - - - - - Separator for CSV created by operation list - - - - - Test X seconds for status change after ON/OFF - - - - - Set HTTP Filter header to false - - - - - Wait X seconds for cmd prompt after issuing command - - - - - Wait X seconds for cmd prompt after login - - - - - Wait X seconds after issuing ON/OFF - - - - - Wait X seconds before fencing is started - - - - - Count of attempts to retry power on - - - - - - - - - - - - diff --git a/agent_generator/src_xml/fence_rsb.xml b/agent_generator/src_xml/fence_rsb.xml deleted file mode 100644 index 72166dbe..00000000 --- a/agent_generator/src_xml/fence_rsb.xml +++ /dev/null @@ -1,126 +0,0 @@ - - -fence_rsb is an I/O Fencing agent which can be used with the Fujitsu-Siemens RSB management interface. It logs into device via telnet/ssh and reboots a specified outlet. Lengthy telnet/ssh connections should be avoided while a GFS cluster is running because the connection will block any necessary fencing actions. -http://www.fujitsu.com - - - - - IP Address or Hostname - - - - - Login Name - - - - - Login password or passphrase - - - - - SSH connection - - - - - Force Python regex for command prompt - - - - - TCP/UDP port to use for connection with device - - - - - Forces agent to use IPv4 addresses only - - - - - Forces agent to use IPv6 addresses only - - - - - Script to retrieve password - - - - - Identity file for ssh - - - - - SSH options to use - - - - - Fencing Action - - - - - Verbose mode - - - - - Write debug information to given file - - - - - Display version information and exit - - - - - Display help and exit - - - - - Test X seconds for status change after ON/OFF - - - - - Wait X seconds for cmd prompt after issuing command - - - - - Wait X seconds for cmd prompt after login - - - - - Wait X seconds after issuing ON/OFF - - - - - Wait X seconds before fencing is started - - - - - Count of attempts to retry power on - - - - - - - - - - - - diff --git a/agent_generator/src_xml/fence_scsi.xml b/agent_generator/src_xml/fence_scsi.xml deleted file mode 100644 index 9be7db1c..00000000 --- a/agent_generator/src_xml/fence_scsi.xml +++ /dev/null @@ -1,48 +0,0 @@ - - -fence_scsi -http://www.t10.org - - - - - Use APTPL flag for registrations - - - - - List of devices to be used for fencing action - - - - - File to write error/debug messages - - - - - Wait X seconds before fencing is started - - - - - Key value to be used for fencing action - - - - - Fencing action - - - - - Name of node - - - - - - - - - diff --git a/agent_generator/src_xml/fence_virt.xml b/agent_generator/src_xml/fence_virt.xml deleted file mode 100644 index c712e939..00000000 --- a/agent_generator/src_xml/fence_virt.xml +++ /dev/null @@ -1,66 +0,0 @@ - - -fence_virt is an I/O Fencing agent which can be used withvirtual machines. - - - - - Specify (stdin) or increment (command line) debug level - - - - - Serial device (default=/dev/ttyS1) - - - - - Serial Parameters (default=115200,8N1) - - - - - VM Channel IP address (default=10.0.2.179) - - - - - Multicast or VMChannel IP port (default=1229) - - - - - Virtual Machine (domain name) to fence - - - - - Fencing action (null, off, on, [reboot], status, list, monitor, metadata) - - - - - Fencing timeout (in seconds; default=30) - - - - - Fencing delay (in seconds; default=0) - - - - - Virtual Machine (domain name) to fence (deprecated; use port) - - - - - - - - - - - - - diff --git a/agent_generator/src_xml/fence_vmware_soap.xml b/agent_generator/src_xml/fence_vmware_soap.xml deleted file mode 100644 index 95a22dc1..00000000 --- a/agent_generator/src_xml/fence_vmware_soap.xml +++ /dev/null @@ -1,139 +0,0 @@ - - -fence_vmware_soap is an I/O Fencing agent which can be used with the virtual machines managed by VMWare products that have SOAP API v4.1+. -.P -Name of virtual machine (-n / port) has to be used in inventory path format (e.g. /datacenter/vm/Discovered virtual machine/myMachine). In the cases when name of yours VM is unique you can use it instead. Alternatively you can always use UUID to access virtual machine. -http://www.vmware.com - - - - - IP Address or Hostname - - - - - Login Name - - - - - Login password or passphrase - - - - - SSL connection - - - - - Disable TLS negotiation - - - - - Physical plug number, name of virtual machine or UUID - - - - - TCP/UDP port to use for connection with device - - - - - Forces agent to use IPv4 addresses only - - - - - Forces agent to use IPv6 addresses only - - - - - Script to retrieve password - - - - - SSL connection with verifying fence device's certificate - - - - - SSL connection without verifying fence device's certificate - - - - - Fencing Action - - - - - Verbose mode - - - - - Write debug information to given file - - - - - Display version information and exit - - - - - Display help and exit - - - - - Separator for CSV created by operation list - - - - - Test X seconds for status change after ON/OFF - - - - - Wait X seconds for cmd prompt after issuing command - - - - - Wait X seconds for cmd prompt after login - - - - - Wait X seconds after issuing ON/OFF - - - - - Wait X seconds before fencing is started - - - - - Count of attempts to retry power on - - - - - - - - - - - - diff --git a/agent_generator/src_xml/fence_watchdog.xml b/agent_generator/src_xml/fence_watchdog.xml deleted file mode 100644 index 503dfeec..00000000 --- a/agent_generator/src_xml/fence_watchdog.xml +++ /dev/null @@ -1,40 +0,0 @@ - - -fence_watchdog just provides -meta-data - actual fencing is done by the pacemaker internal watchdog agent. - - - - - Fencing Action - - - - - Ignored - - - - - Ignored - - - - - Display version information and exit - - - - - Display help and exit - - - - - - - - - - - diff --git a/agent_generator/src_xml/fence_wti.xml b/agent_generator/src_xml/fence_wti.xml deleted file mode 100644 index e83d5a44..00000000 --- a/agent_generator/src_xml/fence_wti.xml +++ /dev/null @@ -1,137 +0,0 @@ - - -fence_wti is an I/O Fencing agent which can be used with the WTI Network Power Switch (NPS). It logs into an NPS via telnet or ssh and boots a specified plug. Lengthy telnet connections to the NPS should be avoided while a GFS cluster is running because the connection will block any necessary fencing actions. -http://www.wti.com - - - - - IP Address or Hostname - - - - - Login Name - - - - - Login password or passphrase - - - - - Force Python regex for command prompt - - - - - SSH connection - - - - - Physical plug number, name of virtual machine or UUID - - - - - TCP/UDP port to use for connection with device - - - - - Forces agent to use IPv4 addresses only - - - - - Forces agent to use IPv6 addresses only - - - - - Script to retrieve password - - - - - Identity file for ssh - - - - - SSH options to use - - - - - Fencing Action - - - - - Verbose mode - - - - - Write debug information to given file - - - - - Display version information and exit - - - - - Display help and exit - - - - - Separator for CSV created by operation list - - - - - Test X seconds for status change after ON/OFF - - - - - Wait X seconds for cmd prompt after issuing command - - - - - Wait X seconds for cmd prompt after login - - - - - Wait X seconds after issuing ON/OFF - - - - - Wait X seconds before fencing is started - - - - - Count of attempts to retry power on - - - - - - - - - - - - diff --git a/agent_generator/src_xml/fence_xvm.xml b/agent_generator/src_xml/fence_xvm.xml deleted file mode 100644 index 15d00e60..00000000 --- a/agent_generator/src_xml/fence_xvm.xml +++ /dev/null @@ -1,86 +0,0 @@ - - -fence_xvm is an I/O Fencing agent which can be used withvirtual machines. - - - - - Specify (stdin) or increment (command line) debug level - - - - - IP Family ([auto], ipv4, ipv6) - - - - - Multicast address (default=225.0.0.12 / ff05::3:1) - - - - - TCP, Multicast, or VMChannel IP port (default=1229) - - - - - Multicast retransmit time (in 1/10sec; default=20) - - - - - Authentication (none, sha1, [sha256], sha512) - - - - - Packet hash strength (none, sha1, [sha256], sha512) - - - - - Shared key file (default=/etc/cluster/fence_xvm.key) - - - - - Virtual Machine (domain name) to fence - - - - - Treat [domain] as UUID instead of domain name. This is provided for compatibility with older fence_xvmd installations. - - - - - Fencing action (null, off, on, [reboot], status, list, monitor, metadata) - - - - - Fencing timeout (in seconds; default=30) - - - - - Fencing delay (in seconds; default=0) - - - - - Virtual Machine (domain name) to fence (deprecated; use port) - - - - - - - - - - - - - diff --git a/agent_generator/update_sources.sh b/agent_generator/update_sources.sh deleted file mode 100755 index 4b8d0a8a..00000000 --- a/agent_generator/update_sources.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -# This script updates fence agent descriptions (XML files in src_xml -# directory). Running this will install and update fence agent -# packages to the latest version. - -set -exuo pipefail - -generator_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" - -source "$generator_dir/variables.sh" - -all_pkgs='' -for cmd_pkg in "${cmd_pkg_map[@]}"; do - pkg=${cmd_pkg#*:} - all_pkgs+="$pkg " -done - -sudo yum -y install $all_pkgs -sudo yum -y update $all_pkgs - -for cmd_pkg in "${cmd_pkg_map[@]}"; do - cmd=${cmd_pkg%%:*} - - $cmd -o metadata > "$generator_dir/src_xml/$cmd.xml" -done diff --git a/agent_generator/variables.sh b/agent_generator/variables.sh deleted file mode 100644 index 27d8bb95..00000000 --- a/agent_generator/variables.sh +++ /dev/null @@ -1,47 +0,0 @@ -#!/bin/bash - -cmd_pkg_map=( - "fence_apc:fence-agents-apc" - "fence_apc_snmp:fence-agents-apc-snmp" - "fence_amt:None" - "fence_bladecenter:fence-agents-bladecenter" - "fence_brocade:fence-agents-brocade" - "fence_cisco_mds:fence-agents-cisco-mds" - "fence_cisco_ucs:fence-agents-cisco-ucs" - "fence_compute:fence-agents-compute" - "fence_crosslink:None" - "fence_drac5:fence-agents-drac5" - "fence_eaton_snmp:fence-agents-eaton-snmp" - "fence_eps:fence-agents-eps" - "fence_hpblade:fence-agents-hpblade" - "fence_ibmblade:fence-agents-ibmblade" - "fence_idrac:fence-agents-ipmilan" - "fence_ifmib:fence-agents-ifmib" - "fence_ilo:fence-agents-ilo2" - "fence_ilo2:fence-agents-ilo2" - "fence_ilo3:fence-agents-ipmilan" - "fence_ilo4:fence-agents-ipmilan" - "fence_ilo_mp:fence-agents-ilo-mp" - "fence_imm:fence-agents-ipmilan" - "fence_intelmodular:fence-agents-intelmodular" - "fence_ipdu:fence-agents-ipdu" - "fence_ipmilan:fence-agents-ipmilan" - "fence_ironic:None" - "fence_kdump:fence-agents-kdump" - "fence_kubevirt:None" - "fence_redfish:fence-agents-redfish" - "fence_rhevm:fence-agents-rhevm" - "fence_rsb:fence-agents-rsb" - "fence_scsi:fence-agents-scsi" - "fence_virt:fence-virt" - "fence_vmware_soap:fence-agents-vmware-soap" - "fence_watchdog:fence-agents-sbd" - "fence_wti:fence-agents-wti" - - # These have manual changes and need to be updated manually: - # "fence_xvm:fence-virt" - - # re fence_kubevirt: - # change to fence-agents-kubevirt when we have it with - # https://bugzilla.redhat.com/show_bug.cgi?id=1984803 -) diff --git a/bindep.txt b/bindep.txt deleted file mode 100644 index 144c8b0a..00000000 --- a/bindep.txt +++ /dev/null @@ -1,23 +0,0 @@ -# This is a cross-platform list tracking distribution packages needed by tests; -# see http://docs.openstack.org/infra/bindep/ for additional information. - -libxml2-devel [test platform:rpm] -libxml2-dev [test platform:dpkg] -libxslt-devel [test platform:rpm] -libxslt1-dev [test platform:dpkg] -ruby-devel [test platform:rpm] -ruby-dev [test platform:dpkg] -zlib1g-dev [test platform:dpkg] -zlib-devel [test platform:rpm] -pacemaker-cli-utils [test platform:dpkg] -pacemaker-cli [test platform:rpm] -pcs [test platform:rpm] -pcs [test platform:dpkg] -fence-agents-redfish [test platform:rpm] -fence-agents-ipmilan [test platform:rpm] -fence-agents-kdump [test platform:rpm] -fence-agents-rhevm [test platform:rpm] -fence-agents [test platform:dpkg] -pacemaker [test platform:rpm] -pacemaker [test platform:dpkg] -puppet [build] diff --git a/doc/requirements.txt b/doc/requirements.txt deleted file mode 100644 index 44cb2082..00000000 --- a/doc/requirements.txt +++ /dev/null @@ -1,6 +0,0 @@ -# This is required for the docs build jobs -sphinx>=2.0.0,!=2.1.0 # BSD -openstackdocstheme>=2.2.1 # Apache-2.0 - -# This is required for the releasenotes build jobs -reno>=3.1.0 # Apache-2.0 diff --git a/examples/pacemaker/host.pp b/examples/pacemaker/host.pp deleted file mode 100644 index e3eee4f2..00000000 --- a/examples/pacemaker/host.pp +++ /dev/null @@ -1,35 +0,0 @@ -class hosts ( - $hostname = 'node', -) { - resources { 'host': - purge => true, - } - - host { 'localhost' : - ip => '127.0.0.1', - host_aliases => [$hostname], - } -} - -class hostname ( - $hostname = 'node', -) { - - if $::osfamily == 'Debian' { - file { 'hostname' : - ensure => 'present', - path => '/etc/hostname', - content => "${hostname}\n", - } - } - - exec { 'set-hostname' : - command => "hostname ${hostname}", - unless => "test `uname -n` = '${hostname}'", - provider => 'shell', - } - -} - -include hosts -include hostname \ No newline at end of file diff --git a/examples/pacemaker/setup.pp b/examples/pacemaker/setup.pp deleted file mode 100644 index e7fac982..00000000 --- a/examples/pacemaker/setup.pp +++ /dev/null @@ -1,29 +0,0 @@ -class properties { - - pacemaker_property { 'stonith-enabled' : - ensure => 'present', - value => false, - } - - pacemaker_property { 'no-quorum-policy' : - ensure => 'present', - value => 'ignore', - } - -} - -include properties - -class { 'pacemaker::new' : - cluster_nodes => ['node'], - cluster_password => 'hacluster', - - # firewall is not needed on a signle node - firewall_corosync_manage => false, - firewall_pcsd_manage => false, -} - -Class['pacemaker::new'] -> -Class['properties'] - - diff --git a/examples/pacemaker_colocation/create.pp b/examples/pacemaker_colocation/create.pp deleted file mode 100644 index f83d1d1d..00000000 --- a/examples/pacemaker_colocation/create.pp +++ /dev/null @@ -1,43 +0,0 @@ -Pacemaker_resource { - ensure => 'present', - primitive_class => 'ocf', - primitive_type => 'Dummy', - primitive_provider => 'pacemaker', -} - -Pacemaker_colocation { - ensure => 'present', -} - -pacemaker_resource { 'colocation-test1' : - parameters => { - 'fake' => '1', - }, -} - -pacemaker_resource { 'colocation-test2' : - parameters => { - 'fake' => '2', - }, -} - -pacemaker_colocation { 'colocation-test2_with_and_after_colocation-test1' : - first => 'colocation-test1', - second => 'colocation-test2', - score => '200', -} - -pacemaker_resource { 'colocation-test3' : - parameters => { - 'fake' => '3', - }, -} - -pacemaker_colocation { 'colocation-test3_with_and_after_colocation-test1' : - first => 'colocation-test1', - second => 'colocation-test3', - score => '400', -} - -Pacemaker_resource<||> -> -Pacemaker_colocation<||> diff --git a/examples/pacemaker_colocation/delete.pp b/examples/pacemaker_colocation/delete.pp deleted file mode 100644 index 166b95ec..00000000 --- a/examples/pacemaker_colocation/delete.pp +++ /dev/null @@ -1,23 +0,0 @@ -Pacemaker_resource { - ensure => 'absent', - primitive_class => 'ocf', - primitive_type => 'Dummy', - primitive_provider => 'pacemaker', -} - -Pacemaker_colocation { - ensure => 'absent', -} - -pacemaker_resource { 'colocation-test1' :} - -pacemaker_resource { 'colocation-test2' :} - -pacemaker_colocation { 'colocation-test2_with_and_after_colocation-test1' :} - -pacemaker_resource { 'colocation-test3' :} - -pacemaker_colocation { 'colocation-test3_with_and_after_colocation-test1' :} - -Pacemaker_colocation<||> -> -Pacemaker_resource<||> diff --git a/examples/pacemaker_colocation/show.sh b/examples/pacemaker_colocation/show.sh deleted file mode 100755 index a93503da..00000000 --- a/examples/pacemaker_colocation/show.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/sh - -show() { - puppet resource pacemaker_colocation "${1}" - cibadmin --query --xpath "/cib/configuration/constraints/rsc_colocation[@id='${1}']" - echo '--------------------' -} - -show 'colocation-test2_with_and_after_colocation-test1' -show 'colocation-test3_with_and_after_colocation-test1' diff --git a/examples/pacemaker_colocation/update.pp b/examples/pacemaker_colocation/update.pp deleted file mode 100644 index a07ab2a3..00000000 --- a/examples/pacemaker_colocation/update.pp +++ /dev/null @@ -1,43 +0,0 @@ -Pacemaker_resource { - ensure => 'present', - primitive_class => 'ocf', - primitive_type => 'Dummy', - primitive_provider => 'pacemaker', -} - -Pacemaker_colocation { - ensure => 'present', -} - -pacemaker_resource { 'colocation-test1' : - parameters => { - 'fake' => '1', - }, -} - -pacemaker_resource { 'colocation-test2' : - parameters => { - 'fake' => '2', - }, -} - -pacemaker_colocation { 'colocation-test2_with_and_after_colocation-test1' : - first => 'colocation-test1', - second => 'colocation-test2', - score => '201', -} - -pacemaker_resource { 'colocation-test3' : - parameters => { - 'fake' => '3', - }, -} - -pacemaker_colocation { 'colocation-test3_with_and_after_colocation-test1' : - first => 'colocation-test1', - second => 'colocation-test3', - score => '401', -} - -Pacemaker_resource<||> -> -Pacemaker_colocation<||> diff --git a/examples/pacemaker_location/create.pp b/examples/pacemaker_location/create.pp deleted file mode 100644 index 66e95bc5..00000000 --- a/examples/pacemaker_location/create.pp +++ /dev/null @@ -1,51 +0,0 @@ -Pacemaker_resource { - ensure => 'present', - primitive_class => 'ocf', - primitive_type => 'Dummy', - primitive_provider => 'pacemaker', -} - -Pacemaker_location { - ensure => 'present', -} - -pacemaker_resource { 'location-test1' : - parameters => { - 'fake' => '1', - }, -} - -$rules = [ - { - 'score' => '100', - 'expressions' => [ - { - 'attribute' => 'a', - 'operation' => 'defined', - }, - ] - }, - { - 'score' => '200', - 'expressions' => [ - { - 'attribute' => 'b', - 'operation' => 'defined', - }, - ] - } -] - -pacemaker_location { 'location-test1_location_with_rule' : - primitive => 'location-test1', - rules => $rules, -} - -pacemaker_location { 'location-test1_location_with_score' : - primitive => 'location-test1', - node => $pacemaker_node_name, - score => '200', -} - -Pacemaker_resource<||> -> -Pacemaker_location<||> diff --git a/examples/pacemaker_location/delete.pp b/examples/pacemaker_location/delete.pp deleted file mode 100644 index 775211a5..00000000 --- a/examples/pacemaker_location/delete.pp +++ /dev/null @@ -1,19 +0,0 @@ -Pacemaker_resource { - ensure => 'absent', - primitive_class => 'ocf', - primitive_type => 'Dummy', - primitive_provider => 'pacemaker', -} - -Pacemaker_location { - ensure => 'absent', -} - -pacemaker_resource { 'location-test1' :} - -pacemaker_location { 'location-test1_location_with_rule' :} - -pacemaker_location { 'location-test1_location_with_score' :} - -Pacemaker_location<||> -> -Pacemaker_resource<||> diff --git a/examples/pacemaker_location/show.sh b/examples/pacemaker_location/show.sh deleted file mode 100755 index 5857dce7..00000000 --- a/examples/pacemaker_location/show.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/sh - -show() { - puppet resource pacemaker_location "${1}" - cibadmin --query --xpath "/cib/configuration/constraints/rsc_location[@id='${1}']" - echo '--------------------' -} - -show 'location-test1_location_with_rule' -show 'location-test1_location_with_score' diff --git a/examples/pacemaker_location/update.pp b/examples/pacemaker_location/update.pp deleted file mode 100644 index c5a7b39e..00000000 --- a/examples/pacemaker_location/update.pp +++ /dev/null @@ -1,51 +0,0 @@ -Pacemaker_resource { - ensure => 'present', - primitive_class => 'ocf', - primitive_type => 'Dummy', - primitive_provider => 'pacemaker', -} - -Pacemaker_location { - ensure => 'present', -} - -pacemaker_resource { 'location-test1' : - parameters => { - 'fake' => '1', - }, -} - -$rules = [ - { - 'score' => '101', - 'expressions' => [ - { - 'attribute' => 'a', - 'operation' => 'defined', - }, - ] - }, - { - 'score' => '201', - 'expressions' => [ - { - 'attribute' => 'b', - 'operation' => 'defined', - }, - ] - } -] - -pacemaker_location { 'location-test1_location_with_rule' : - primitive => 'location-test1', - rules => $rules, -} - -pacemaker_location { 'location-test1_location_with_score' : - primitive => 'location-test1', - node => $pacemaker_node_name, - score => '201', -} - -Pacemaker_resource<||> -> -Pacemaker_location<||> diff --git a/examples/pacemaker_operation_default/create.pp b/examples/pacemaker_operation_default/create.pp deleted file mode 100644 index 1416dd24..00000000 --- a/examples/pacemaker_operation_default/create.pp +++ /dev/null @@ -1,4 +0,0 @@ -pacemaker_operation_default { 'interval' : - ensure => 'present', - value => '300', -} diff --git a/examples/pacemaker_operation_default/delete.pp b/examples/pacemaker_operation_default/delete.pp deleted file mode 100644 index a6062179..00000000 --- a/examples/pacemaker_operation_default/delete.pp +++ /dev/null @@ -1,3 +0,0 @@ -pacemaker_operation_default { 'interval' : - ensure => 'absent', -} diff --git a/examples/pacemaker_operation_default/show.sh b/examples/pacemaker_operation_default/show.sh deleted file mode 100755 index 5cdfe60c..00000000 --- a/examples/pacemaker_operation_default/show.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/sh - -show() { - puppet resource pacemaker_operation_default "${1}" - cibadmin --query --xpath "/cib/configuration/op_defaults/meta_attributes/nvpair[@name='${1}']" - echo '--------------------' -} - -show 'interval' diff --git a/examples/pacemaker_operation_default/update.pp b/examples/pacemaker_operation_default/update.pp deleted file mode 100644 index 85251af7..00000000 --- a/examples/pacemaker_operation_default/update.pp +++ /dev/null @@ -1,4 +0,0 @@ -pacemaker_operation_default { 'interval' : - ensure => 'present', - value => '301', -} diff --git a/examples/pacemaker_order/create.pp b/examples/pacemaker_order/create.pp deleted file mode 100644 index 1f237222..00000000 --- a/examples/pacemaker_order/create.pp +++ /dev/null @@ -1,41 +0,0 @@ -Pacemaker_resource { - ensure => 'present', - primitive_class => 'ocf', - primitive_type => 'Dummy', - primitive_provider => 'pacemaker', -} - -Pacemaker_order { - ensure => 'present', -} - -pacemaker_resource { 'order-test1' : - parameters => { - 'fake' => '1', - }, -} - -pacemaker_resource { 'order-test2' : - parameters => { - 'fake' => '2', - }, -} - -pacemaker_order { 'order-test2_after_order-test1_score' : - first => 'order-test1', - second => 'order-test2', - score => '200', -} - -# Pacemaker 1.1+ -pacemaker_order { 'order-test2_after_order-test1_kind' : - first => 'order-test1', - first_action => 'promote', - second => 'order-test2', - second_action => 'demote', - kind => 'mandatory', - symmetrical => true, -} - -Pacemaker_resource<||> -> -Pacemaker_order<||> diff --git a/examples/pacemaker_order/delete.pp b/examples/pacemaker_order/delete.pp deleted file mode 100644 index c1c28fc8..00000000 --- a/examples/pacemaker_order/delete.pp +++ /dev/null @@ -1,21 +0,0 @@ -Pacemaker_resource { - ensure => 'absent', - primitive_class => 'ocf', - primitive_type => 'Dummy', - primitive_provider => 'pacemaker', -} - -Pacemaker_order { - ensure => 'absent', -} - -pacemaker_resource { 'order-test1' :} - -pacemaker_resource { 'order-test2' :} - -pacemaker_order { 'order-test2_after_order-test1_score' :} - -pacemaker_order { 'order-test2_after_order-test1_kind' :} - -Pacemaker_order<||> -> -Pacemaker_resource<||> diff --git a/examples/pacemaker_order/show.sh b/examples/pacemaker_order/show.sh deleted file mode 100755 index 746ccf6e..00000000 --- a/examples/pacemaker_order/show.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/sh - -show() { - puppet resource pacemaker_order "${1}" - cibadmin --query --xpath "/cib/configuration/constraints/rsc_order[@id='${1}']" - echo '--------------------' -} - -show 'order-test2_after_order-test1_score' -show 'order-test2_after_order-test1_kind' diff --git a/examples/pacemaker_order/update.pp b/examples/pacemaker_order/update.pp deleted file mode 100644 index 3b8faed0..00000000 --- a/examples/pacemaker_order/update.pp +++ /dev/null @@ -1,41 +0,0 @@ -Pacemaker_resource { - ensure => 'present', - primitive_class => 'ocf', - primitive_type => 'Dummy', - primitive_provider => 'pacemaker', -} - -Pacemaker_order { - ensure => 'present', -} - -pacemaker_resource { 'order-test1' : - parameters => { - 'fake' => '1', - }, -} - -pacemaker_resource { 'order-test2' : - parameters => { - 'fake' => '2', - }, -} - -pacemaker_order { 'order-test2_after_order-test1_score' : - first => 'order-test1', - second => 'order-test2', - score => '201', -} - -# Pacemaker 1.1+ -pacemaker_order { 'order-test2_after_order-test1_kind' : - first => 'order-test1', - first_action => 'promote', - second => 'order-test2', - second_action => 'start', - kind => 'serialize', - symmetrical => true, -} - -Pacemaker_resource<||> -> -Pacemaker_order<||> diff --git a/examples/pacemaker_property/create.pp b/examples/pacemaker_property/create.pp deleted file mode 100644 index 4970baa0..00000000 --- a/examples/pacemaker_property/create.pp +++ /dev/null @@ -1,9 +0,0 @@ -pacemaker_property { 'cluster-delay' : - ensure => 'present', - value => '50', -} - -pacemaker_property { 'batch-limit' : - ensure => 'present', - value => '50', -} diff --git a/examples/pacemaker_property/delete.pp b/examples/pacemaker_property/delete.pp deleted file mode 100644 index 9bc7e0d6..00000000 --- a/examples/pacemaker_property/delete.pp +++ /dev/null @@ -1,7 +0,0 @@ -pacemaker_property { 'cluster-delay' : - ensure => 'absent', -} - -pacemaker_property { 'batch-limit' : - ensure => 'absent', -} diff --git a/examples/pacemaker_property/show.sh b/examples/pacemaker_property/show.sh deleted file mode 100755 index feee20dc..00000000 --- a/examples/pacemaker_property/show.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/sh - -show() { - puppet resource pacemaker_property "${1}" - cibadmin --query --xpath "/cib/configuration/crm_config/cluster_property_set/nvpair[@name='${1}']" - echo '--------------------' -} - -show 'cluster-delay' -show 'batch-limit' diff --git a/examples/pacemaker_property/update.pp b/examples/pacemaker_property/update.pp deleted file mode 100644 index 99063b58..00000000 --- a/examples/pacemaker_property/update.pp +++ /dev/null @@ -1,9 +0,0 @@ -pacemaker_property { 'cluster-delay' : - ensure => 'present', - value => '51', -} - -pacemaker_property { 'batch-limit' : - ensure => 'present', - value => '51', -} diff --git a/examples/pacemaker_resource/create.pp b/examples/pacemaker_resource/create.pp deleted file mode 100644 index 7195e325..00000000 --- a/examples/pacemaker_resource/create.pp +++ /dev/null @@ -1,108 +0,0 @@ -Pacemaker_resource { - ensure => 'present', - primitive_class => 'ocf', - primitive_provider => 'pacemaker', - primitive_type => 'Dummy', -} - -pacemaker_resource { 'test-simple1' : - parameters => { - 'fake' => '1', - }, -} - -pacemaker_resource { 'test-simple2' : - parameters => { - 'fake' => '2', - }, -} - -pacemaker_resource { 'test-simple-params1' : - parameters => { - 'fake' => '3', - }, - metadata => { - 'migration-threshold' => '3', - 'failure-timeout' => '120', - }, - operations => { - 'monitor' => { - 'interval' => '20', - 'timeout' => '10', - }, - 'start' => { - 'timeout' => '30', - }, - 'stop' => { - 'timeout' => '30', - }, - }, -} - -pacemaker_resource { 'test-simple-params2' : - parameters => { - 'fake' => '4', - }, - metadata => { - 'migration-threshold' => '3', - 'failure-timeout' => '120', - }, - operations => [ - { - 'name' => 'monitor', - 'interval' => '10', - 'timeout' => '10', - }, - { - 'name' => 'monitor', - 'interval' => '60', - 'timeout' => '10', - }, - { - 'name' => 'start', - 'timeout' => '30', - }, - { - 'name' => 'stop', - 'timeout' => '30', - }, - ], -} - -pacemaker_resource { 'test-clone' : - complex_type => 'clone', - complex_metadata => { - 'interleave' => true, - }, - parameters => { - 'fake' => '5', - }, -} - -pacemaker_resource { 'test-master' : - primitive_type => 'Stateful', - complex_type => 'master', - complex_metadata => { - 'interleave' => true, - 'master-max' => '1', - }, - parameters => { - 'fake' => '6', - }, -} - -pacemaker_resource { 'test-clone-change' : - primitive_type => 'Stateful', - complex_type => 'simple', - parameters => { - 'fake' => '7', - }, -} - -pacemaker_resource { 'test-master-change' : - primitive_type => 'Stateful', - complex_type => 'master', - parameters => { - 'fake' => '8', - }, -} \ No newline at end of file diff --git a/examples/pacemaker_resource/delete.pp b/examples/pacemaker_resource/delete.pp deleted file mode 100644 index 2cc5a0c6..00000000 --- a/examples/pacemaker_resource/delete.pp +++ /dev/null @@ -1,22 +0,0 @@ -Pacemaker_resource { - ensure => 'absent', - primitive_class => 'ocf', - primitive_provider => 'pacemaker', - primitive_type => 'Dummy', -} - -pacemaker_resource { 'test-simple1' :} - -pacemaker_resource { 'test-simple2' :} - -pacemaker_resource { 'test-simple-params1' :} - -pacemaker_resource { 'test-simple-params2' :} - -pacemaker_resource { 'test-clone' :} - -pacemaker_resource { 'test-master' :} - -pacemaker_resource { 'test-clone-change' :} - -pacemaker_resource { 'test-master-change' :} diff --git a/examples/pacemaker_resource/show.sh b/examples/pacemaker_resource/show.sh deleted file mode 100755 index 0bc518fc..00000000 --- a/examples/pacemaker_resource/show.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/sh - -show() { - puppet resource pacemaker_resource "${1}" - cibadmin --query --xpath "/cib/configuration/resources/primitive[@id='${1}']" - echo '--------------------' -} - -show_clone() { - puppet resource pacemaker_resource "${1}" - cibadmin --query --xpath "/cib/configuration/resources/clone[@id='${1}-clone']" - echo '--------------------' -} - -show_master() { - puppet resource pacemaker_resource "${1}" - cibadmin --query --xpath "/cib/configuration/resources/master[@id='${1}-master']" - echo '--------------------' -} - -show 'test-simple1' -show 'test-simple2' -show 'test-simple-params1' -show 'test-simple-params2' -show_clone 'test-clone' -show_master 'test-master' diff --git a/examples/pacemaker_resource/update.pp b/examples/pacemaker_resource/update.pp deleted file mode 100644 index a97ffcb5..00000000 --- a/examples/pacemaker_resource/update.pp +++ /dev/null @@ -1,108 +0,0 @@ -Pacemaker_resource { - ensure => 'present', - primitive_class => 'ocf', - primitive_provider => 'pacemaker', - primitive_type => 'Dummy', -} - -pacemaker_resource { 'test-simple1' : - parameters => { - 'fake' => '2', - }, -} - -pacemaker_resource { 'test-simple2' : - parameters => { - 'fake' => '3', - }, -} - -pacemaker_resource { 'test-simple-params1' : - parameters => { - 'fake' => '4', - }, - metadata => { - 'migration-threshold' => '4', - 'failure-timeout' => '121', - }, - operations => { - 'monitor' => { - 'interval' => '21', - 'timeout' => '11', - }, - 'start' => { - 'timeout' => '31', - }, - 'stop' => { - 'timeout' => '31', - }, - }, -} - -pacemaker_resource { 'test-simple-params2' : - parameters => { - 'fake' => '5', - }, - metadata => { - 'migration-threshold' => '4', - 'failure-timeout' => '121', - }, - operations => [ - { - 'name' => 'monitor', - 'interval' => '11', - 'timeout' => '11', - }, - { - 'name' => 'monitor', - 'interval' => '61', - 'timeout' => '11', - }, - { - 'name' => 'start', - 'timeout' => '31', - }, - { - 'name' => 'stop', - 'timeout' => '31', - }, - ], -} - -pacemaker_resource { 'test-clone' : - complex_type => 'clone', - complex_metadata => { - 'interleave' => true, - }, - parameters => { - 'fake' => '6', - }, -} - -pacemaker_resource { 'test-master' : - primitive_type => 'Stateful', - complex_type => 'master', - complex_metadata => { - 'interleave' => true, - 'master-max' => '1', - }, - parameters => { - 'fake' => '7', - }, -} - -pacemaker_resource { 'test-clone-change' : - primitive_type => 'Stateful', - complex_type => 'clone', - parameters => { - 'fake' => '8', - }, -} - -pacemaker_resource { 'test-master-change' : - primitive_type => 'Stateful', - complex_type => 'simple', - parameters => { - 'fake' => '9', - }, -} \ No newline at end of file diff --git a/examples/pacemaker_resource_default/create.pp b/examples/pacemaker_resource_default/create.pp deleted file mode 100644 index 94b55cc6..00000000 --- a/examples/pacemaker_resource_default/create.pp +++ /dev/null @@ -1,4 +0,0 @@ -pacemaker_resource_default { 'resource-stickiness' : - ensure => 'present', - value => '100', -} diff --git a/examples/pacemaker_resource_default/delete.pp b/examples/pacemaker_resource_default/delete.pp deleted file mode 100644 index b7ec3599..00000000 --- a/examples/pacemaker_resource_default/delete.pp +++ /dev/null @@ -1,3 +0,0 @@ -pacemaker_resource_default { 'resource-stickiness' : - ensure => 'absent', -} diff --git a/examples/pacemaker_resource_default/show.sh b/examples/pacemaker_resource_default/show.sh deleted file mode 100755 index 675f178d..00000000 --- a/examples/pacemaker_resource_default/show.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/sh - -show() { - puppet resource pacemaker_resource_default "${1}" - cibadmin --query --xpath "/cib/configuration/rsc_defaults/meta_attributes/nvpair[@name='${1}']" - echo '--------------------' -} - -show 'resource-stickiness' diff --git a/examples/pacemaker_resource_default/update.pp b/examples/pacemaker_resource_default/update.pp deleted file mode 100644 index 1be81b82..00000000 --- a/examples/pacemaker_resource_default/update.pp +++ /dev/null @@ -1,4 +0,0 @@ -pacemaker_resource_default { 'resource-stickiness' : - ensure => 'present', - value => '101', -} diff --git a/examples/service/clean.pp b/examples/service/clean.pp deleted file mode 100644 index e02e1f2f..00000000 --- a/examples/service/clean.pp +++ /dev/null @@ -1,7 +0,0 @@ -pacemaker_resource { 'service-test1' : - ensure => 'absent', -} - -pacemaker_resource { 'service-test2' : - ensure => 'absent', -} diff --git a/examples/service/start.pp b/examples/service/start.pp deleted file mode 100644 index 2b910b9a..00000000 --- a/examples/service/start.pp +++ /dev/null @@ -1,52 +0,0 @@ -# Using the wrapper - -# a simple service -service { 'service-test1' : - ensure => 'running', - enable => true, -} - -# apply a wrapper -pacemaker::new::wrapper { 'service-test1' : - primitive_class => 'ocf', - primitive_provider => 'pacemaker', - primitive_type => 'Dummy', - - parameters => { - 'fake' => '1', - }, - - operations => { - 'monitor' => { - 'interval' => '10', - 'timeout' => '10', - }, - }, -} - -# Without the wrapper - -pacemaker_resource { 'service-test2' : - ensure => 'present', - primitive_class => 'ocf', - primitive_type => 'Dummy', - primitive_provider => 'pacemaker', - parameters => { - 'fake' => '2', - }, - operations => { - 'monitor' => { - 'interval' => '10', - 'timeout' => '10', - }, - }, -} - -service { 'service-test2' : - ensure => 'running', - enable => true, - provider => 'pacemaker_xml', -} - -Pacemaker_resource['service-test2'] -> -Service['service-test2'] diff --git a/examples/service/stop.pp b/examples/service/stop.pp deleted file mode 100644 index c553efa3..00000000 --- a/examples/service/stop.pp +++ /dev/null @@ -1,52 +0,0 @@ -# Using the wrapper - -# a simple service -service { 'service-test1' : - ensure => 'stopped', - enable => true, -} - -# apply a wrapper -pacemaker::new::wrapper { 'service-test1' : - primitive_class => 'ocf', - primitive_provider => 'pacemaker', - primitive_type => 'Dummy', - - parameters => { - 'fake' => '1', - }, - - operations => { - 'monitor' => { - 'interval' => '10', - 'timeout' => '10', - }, - }, -} - -# Without the wrapper - -pacemaker_resource { 'service-test2' : - ensure => 'present', - primitive_class => 'ocf', - primitive_type => 'Dummy', - primitive_provider => 'pacemaker', - parameters => { - 'fake' => '2', - }, - operations => { - 'monitor' => { - 'interval' => '10', - 'timeout' => '10', - }, - }, -} - -service { 'service-test2' : - ensure => 'stopped', - enable => true, - provider => 'pacemaker_xml', -} - -Pacemaker_resource['service-test2'] -> -Service['service-test2'] diff --git a/lib/facter/pacemaker_node_name.rb b/lib/facter/pacemaker_node_name.rb deleted file mode 100644 index 7bd4c2c1..00000000 --- a/lib/facter/pacemaker_node_name.rb +++ /dev/null @@ -1,10 +0,0 @@ -require 'facter' - -# Do not call crm_node -n when running inside a container -if not File.exists?('/.dockerenv') and not File.exists?('/run/.containerenv') - Facter.add('pacemaker_node_name') do - setcode do - Facter::Core::Execution.exec 'crm_node -n 2>/dev/null' - end - end -end diff --git a/lib/pacemaker/options.rb b/lib/pacemaker/options.rb deleted file mode 100644 index 39a4f461..00000000 --- a/lib/pacemaker/options.rb +++ /dev/null @@ -1,31 +0,0 @@ -module Pacemaker - # pacemaker options submodule - # takes the options structure from the YAML file - # other options sources can be implemented here - module Options - # the YAML file with pacemaker options - # @return [String] - def self.pacemaker_options_file - File.join File.dirname(__FILE__), 'options.yaml' - end - - # pacemaker options structure (class level) - # @return [Hash] - def self.pacemaker_options - return @pacemaker_options if @pacemaker_options - @pacemaker_options = YAML.load_file pacemaker_options_file - end - - # pacemaker options structure (instance level) - # @return [Hash] - def pacemaker_options - Pacemaker::Options.pacemaker_options - end - - # maximum possible waiting time of retry functions - # @return [Integer] - def max_wait_time - pacemaker_options[:retry_count] * pacemaker_options[:retry_step] - end - end -end diff --git a/lib/pacemaker/options.yaml b/lib/pacemaker/options.yaml deleted file mode 100644 index a78a9cd2..00000000 --- a/lib/pacemaker/options.yaml +++ /dev/null @@ -1,106 +0,0 @@ ---- -# how many times a command should retry if it's failing -:retry_count: 360 - -# how long to wait between retries (seconds) -:retry_step: 5 - -# how long to wait for a single command to finish running (seconds) -:retry_timeout: 60 - -# count false or nil block return values as failures or only exceptions? -:retry_false_is_failure: true - -# raise error if no more retries left and command is still failing? -:retry_fail_on_timeout: true - -# what cluster properties should be shown on the debug status output -:debug_show_properties: -- symmetric-cluster -- no-quorum-policy - -# Show the debug messages for the resource operations status calculation -:debug_show_operations: false - -# don't actually do any changes to the system -# only show what command would have been run -:debug_enabled: false - -# how do we determine that the service have been started? -# :global - The service is running on any node -# :master - The service is running in the master mode on any node -# :local - The service is running on the local node -:start_mode_master: :master -:start_mode_clone: :global -:start_mode_simple: :global - -# what method should be used to stop the service? -# :global - Stop the running service by disabling it -# :local - Stop the locally running service by banning it on this node -# Note: by default restart does not stop services -# if they are not running locally on the node -:stop_mode_master: :local -:stop_mode_clone: :local -:stop_mode_simple: :global - -# what service is considered running? -# :global - The service is running on any node -# :local - The service is running on the local node -:status_mode_master: :local -:status_mode_clone: :local -:status_mode_simple: :global - -# cleanup the primitive during these actions? -:cleanup_on_start: true -:cleanup_on_stop: true - -# set the primitive status to stopped if there are failures -# forcing the primitive to be started again and cleaned up -# on this node -:cleanup_on_status: true - -# try to stop and disable the basic service on these provider actions -# the basic service is the service managed by the system -# init scripts or the upstart/systemd units -# in order to run the Pacemaker service the basic service -# should be stopped and disabled first so it will not mess -# with the OCF script -:disable_basic_service_on_status: false -:disable_basic_service_on_start: true -:disable_basic_service_on_stop: false - -# don't try to stop basic service for these primitive classes -# because they are based on the native service manager -# and the basic service and the Pacemaker service is the same thing -:native_based_primitive_classes: -- lsb -- systemd -- upstart - -# add location constraint to allow the service to run on the current node -# useful for asymmetric cluster mode -:add_location_constraint: true - -# restart the service only if it's running on this node -# and skip restart if it's running elsewhere -:restart_only_if_local: true - -# cleanup primitive only if it has failures -:cleanup_only_if_failures: true - -# use prefetch in the providers -:prefetch: false - -# Use additional idempotency checks before cibadmin create and delete actions -# It may be needed if many nodes are running at the same time -:cibadmin_idempotency_checks: true - -# Should the constraints auto require their primitives? -# It can cause unwanted dependency cycles. -:autorequire_primitives: false - -# meta attributes that are related to the primitive's service status -# and should be excluded from the configuration -:status_meta_attributes: -- target-role -- is-managed diff --git a/lib/pacemaker/pcs/cluster_property.rb b/lib/pacemaker/pcs/cluster_property.rb deleted file mode 100644 index 6d4b84bb..00000000 --- a/lib/pacemaker/pcs/cluster_property.rb +++ /dev/null @@ -1,40 +0,0 @@ -module Pacemaker - # this submodule contains "pcs" based function for cluster property provider - module PcsClusterProperty - # @return [String] - def pcs_cluster_properties_list - pcs 'property', 'list' - rescue Puppet::ExecutionFailure - '' - end - - # @return [Hash] - def pcs_cluster_properties - pcs_list_to_hash pcs_cluster_properties_list - end - - # @return [String,true,false,nil] - def pcs_cluster_property_value(name) - pcs_cluster_properties.fetch name.to_s, nil - end - - # @param name [String] - # @param value [String,true,false] - def pcs_cluster_property_set(name, value) - cmd = ['property', 'set', "#{name}=#{value}"] - retry_block { pcs_safe cmd } - end - - # @param name [String] - def pcs_cluster_property_delete(name) - cmd = ['property', 'unset', name] - retry_block { pcs_safe cmd } - end - - # @param name [String] - # @return [true,false] - def pcs_cluster_property_defined?(name) - pcs_cluster_properties.key? name.to_s - end - end -end diff --git a/lib/pacemaker/pcs/common.rb b/lib/pacemaker/pcs/common.rb deleted file mode 100644 index e9834346..00000000 --- a/lib/pacemaker/pcs/common.rb +++ /dev/null @@ -1,48 +0,0 @@ -module Pacemaker - # this submodule contains "pcs" based common functions - module PcsCommon - # check if debug is enabled either in the pacemaker options - # or the resource has the 'debug' parameter and it's enabled - # @return [TrueClass,FalseClass] - def debug_mode_enabled? - return true if pacemaker_options[:debug_enabled] - return true if @resource && @resource.parameters.keys.include?(:debug) && @resource[:debug] - false - end - - # safe pcs command - # @param args [Array] command arguments - # @return [String,NilClass] - def pcs_safe(*args) - command_line = (['pcs'] + args).join ' ' - if debug_mode_enabled? - debug "Exec: #{command_line}" - return - end - begin - pcs *args - rescue StandardError => exception - debug "Command execution have failed: #{command_line}" - raise exception - end - end - - # parse a list of "key: value" data to a hash - # @param list [String] - # @return [Hash] - def pcs_list_to_hash(list) - hash = {} - list.split("\n").each do |line| - line_arr = line.split ':' - next unless line_arr.length == 2 - name = line_arr[0].chomp.strip - value = line_arr[1].chomp.strip - next if name.empty? || value.empty? - value = false if value == 'false' - value = true if value == 'true' - hash.store name, value - end - hash - end - end -end diff --git a/lib/pacemaker/pcs/operation_default.rb b/lib/pacemaker/pcs/operation_default.rb deleted file mode 100644 index 7199f1b5..00000000 --- a/lib/pacemaker/pcs/operation_default.rb +++ /dev/null @@ -1,40 +0,0 @@ -module Pacemaker - # this submodule contains "pcs" based function for operation default provider - module PcsOperationDefault - # @return [String] - def pcs_operation_default_list - pcs 'resource', 'op', 'defaults' - rescue Puppet::ExecutionFailure - '' - end - - # @return [Hash] - def pcs_operation_defaults - pcs_list_to_hash pcs_operation_default_list - end - - # @return [String,true,false,nil] - def pcs_operation_default_value(name) - pcs_operation_defaults.fetch name.to_s, nil - end - - # @param name [String] - # @param value [String,true,false] - def pcs_operation_default_set(name, value) - cmd = ['resource', 'op', 'defaults', "#{name}=#{value}"] - retry_block { pcs_safe cmd } - end - - # @param name [String] - def pcs_operation_default_delete(name) - cmd = ['resource', 'op', 'defaults', "#{name}="] - retry_block { pcs_safe cmd } - end - - # @param name [String] - # @return [true,false] - def pcs_operation_default_defined?(name) - pcs_operation_defaults.key? name.to_s - end - end -end diff --git a/lib/pacemaker/pcs/pcsd_auth.rb b/lib/pacemaker/pcs/pcsd_auth.rb deleted file mode 100644 index 8789e9ab..00000000 --- a/lib/pacemaker/pcs/pcsd_auth.rb +++ /dev/null @@ -1,64 +0,0 @@ -module Pacemaker - # this submodule contains "pcs" based function for cluster property provider - module PcsPcsdAuth - # run the 'pcs cluster auth' command and capture - # the debug output, returned by the pcsd.cli ruby tool - # returns nil if could not get the data - # @param nodes [Array] the list of cluster nodes top auth - # @param username [String] - # @param password [String] - # @param force [String] auth even if already have been auth'ed - # @param local [String] auth only the local node - # @return [String,nil] - def pcs_auth_command(nodes, username, password, force=false, local=false) - command = %w(cluster auth --debug) - command << '--force' if force - command << '--local' if local - command += [ '-u', username ] - command += [ '-p', password ] - command += [nodes] - command.flatten! - - begin - output = pcs *command - rescue Puppet::ExecutionFailure => e - output = e.to_s - end - - return unless output - inside_debug_block = false - result = [] - output.split("\n").each do |line| - inside_debug_block = false if line =~ /--Debug (Output|Stdout) End--/ - result << line if inside_debug_block - inside_debug_block = true if line =~ /--Debug (Output|Stdout) Start--/ - end - return unless result.any? - result.join("\n") - end - - # parse the debug output of the pcs auth command - # to a hash of nodes and their statuses - # returns nil on error - # @param result [String] - # @return [Hash String>,nil] - def pcs_auth_parse(result) - result_structure = begin - JSON.load result - rescue StandardError - nil - end - return unless result_structure.is_a? Hash - responses = result_structure.fetch('data', {}).fetch('auth_responses', {}) - status_hash = {} - responses.each do |node, response| - next unless response.is_a? Hash - node_status = response['status'] - next unless node_status - status_hash.store node, node_status - end - status_hash - end - - end -end diff --git a/lib/pacemaker/pcs/resource_default.rb b/lib/pacemaker/pcs/resource_default.rb deleted file mode 100644 index 08bf04ad..00000000 --- a/lib/pacemaker/pcs/resource_default.rb +++ /dev/null @@ -1,40 +0,0 @@ -module Pacemaker - # this submodule contains "pcs" based function for resource default provider - module PcsResourceDefault - # @return [String] - def pcs_resource_default_list - pcs 'resource', 'defaults' - rescue Puppet::ExecutionFailure - '' - end - - # @return [Hash] - def pcs_resource_defaults - pcs_list_to_hash pcs_resource_default_list - end - - # @return [String,true,false,nil] - def pcs_resource_default_value(name) - pcs_resource_defaults.fetch name.to_s, nil - end - - # @param name [String] - # @param value [String,true,false] - def pcs_resource_default_set(name, value) - cmd = ['resource', 'defaults', "#{name}=#{value}"] - retry_block { pcs_safe cmd } - end - - # @param name [String] - def pcs_resource_default_delete(name) - cmd = ['resource', 'defaults', "#{name}="] - retry_block { pcs_safe cmd } - end - - # @param name [String] - # @return [true,false] - def pcs_resource_default_defined?(name) - pcs_resource_defaults.key? name.to_s - end - end -end diff --git a/lib/pacemaker/type.rb b/lib/pacemaker/type.rb deleted file mode 100644 index b08c4e95..00000000 --- a/lib/pacemaker/type.rb +++ /dev/null @@ -1,184 +0,0 @@ -module Pacemaker - # contains functions that can be included to the pacemaker types - module Type - # output IS and SHOULD values for debugging - # @param is [Object] the current value of the parameter - # @param should [Object] the catalog value of the parameter - # @param tag [String] log tag comment to trace calls - def insync_debug(is, should, tag = nil) - debug "insync?: #{tag}" if tag - debug "IS: #{is.inspect} #{is.class}" - debug "SH: #{should.inspect} #{should.class}" - end - - # return inspected data structure, used in should_to_s and is_to_s functions - # @param data [Object] - # @return [String] - def inspect_to_s(data) - data.inspect - end - - # convert data structure's keys and values to strings - # @param data [Object] - # @return [Object] - def stringify_data(data) - if data.is_a? Hash - new_data = {} - data.each do |key, value| - new_data.store stringify_data(key), stringify_data(value) - end - data.clear - data.merge! new_data - elsif data.is_a? Array - data.map! do |element| - stringify_data element - end - elsif data.is_a? Set - raise "unexpected Set data: #{data}" - else - data.to_s - end - end - - # Maintains an array of operation hashes as if it was a sorted set. These - # are in Array-of-Hash format ({ 'name' => 'monitor', 'interval' => ...}), - # not { 'monitor' => {...} } ones. The unicity is done on the name and - # interval operation keys. The input is expected to have been stringified - # and munged. - # - # Modifies the operations argument and returns it. - # - # We can't use a real Set as it doesn't serialize correctly in Puppet's - # transaction store. This datastructure is always small, so performance - # is irrelevant. - def add_to_operations_array(operations, new_op) - operations.delete_if { |op| op['name'] == new_op['name'] && op['interval'] == new_op['interval'] } - operations << new_op - operations.sort_by! { |op| "#{op['name']} #{op['interval']}" } - end - - # Munges the input into an Array of munged operations. - # @param [Hash,Array] operations_input parameter value from catalog - def munge_operations_array(operations_input) - operations_input = stringify_data(operations_input) - operations_input = [operations_input] unless operations_input.is_a? Array - operations = [] - operations_input.each do |operation| - # operations were provided as an array of hashes - if operation.is_a? Hash and operation['name'] - munge_operation operation - add_to_operations_array(operations, operation) - elsif operation.is_a? Hash - # operations were provided as a hash of hashes - operation.each do |operation_name, operation_data| - raise "invalid operation in a hash of hashes: #{operation_data}" unless operation_data.is_a? Hash - operation = {} - if operation_name.include? ':' - operation_name_array = operation_name.split(':') - operation_name = operation_name_array[0] - if not operation_data['role'] and operation_name_array[1] - operation_data['role'] = operation_name_array[1] - end - end - operation['name'] = operation_name - operation.merge! operation_data - munge_operation operation - add_to_operations_array(operations, operation) if operation.any? - end - else - raise "invalid pacemaker_resource.operations input: #{operations_input}" - end - end - operations - end - - # munge a single operations hash - # @param [Hash] operation - def munge_operation(operation) - raise "invalid pacemaker_resource.operations element: #{operation}" unless operation.is_a? Hash - operation['name'] = 'monitor' unless operation['name'] - operation['interval'] = '0' unless operation['name'] == 'monitor' - operation['interval'] = '0' unless operation['interval'] - operation['role'].capitalize! if operation['role'] - operation - end - - # compare meta_attribute hashes excluding status meta attributes - # @param is [Hash] - # @param should [Hash] - # @return [TrueClass,FalseClass] - def compare_meta_attributes(is, should) - return unless is.is_a?(Hash) && should.is_a?(Hash) - is_without_state = is.reject do |k, _v| - pacemaker_options[:status_meta_attributes].include? k.to_s - end - should_without_state = should.reject do |k, _v| - pacemaker_options[:status_meta_attributes].include? k.to_s - end - result = is_without_state == should_without_state - debug "compare_meta_attributes: #{result}" - result - end - - # sort operations array before insync? - # to make different order and same data arrays equal - # @param is [Array] - # @param should [Array] - # @return [TrueClass,FalseClass] - def compare_operations(is, should) - is = is.first if is.is_a? Array - should = should.first if should.is_a? Array - result = (is == should) - debug "compare_operations: #{result}" - result - end - - # remove status related meta attributes - # from the meta attributes hash - # @param attributes_from [Hash] - # @return [Hash] - def munge_meta_attributes(attributes_from) - attributes_to = {} - attributes_from.each do |name, parameters| - next if pacemaker_options[:status_meta_attributes].include? name - attributes_to.store name, parameters - end - attributes_to - end - - # normalize a single location rule - # @param rule [Hash] rule structure - # @param rule_number [Integer] rule index number - # @param title [String] constraint name - # @return [Hash] normalized rule structure - def munge_rule(rule, rule_number, title) - rule['id'] = "#{title}-rule-#{rule_number}" unless rule['id'] - rule['boolean-op'] = 'or' unless rule['boolean-op'] - rule['score'].gsub! 'inf', 'INFINITY' if rule['score'] - if rule['expressions'] - unless rule['expressions'].is_a? Array - expressions_array = [] - expressions_array << rule['expressions'] - rule['expressions'] = expressions_array - end - expression_number = 0 - rule['expressions'].each do |expression| - unless expression['id'] - expression['id'] = "#{title}-rule-#{rule_number}-expression-#{expression_number}" - end - expression_number += 1 - end - end - rule - end - - # remove "-clone" or "-master" suffix - # and "role" suffix (:Master, :Slave) from a primitive's name - # @param primitive [String] - # @return [String] - def primitive_base_name(primitive) - primitive = primitive.split(':').first - primitive.gsub(/-clone$|-master$/, '') - end - end -end diff --git a/lib/pacemaker/wait.rb b/lib/pacemaker/wait.rb deleted file mode 100644 index 62147af5..00000000 --- a/lib/pacemaker/wait.rb +++ /dev/null @@ -1,238 +0,0 @@ -module Pacemaker - # functions that can wait for something repeatedly - # polling the system status until the condition is met - module Wait - # retry the given command until it runs without errors - # or for RETRY_COUNT times with RETRY_STEP sec step - # print cluster status report on fail - # @param options [Hash] - def retry_block(options = {}) - options = pacemaker_options.merge options - - options[:retry_count].times do - begin - out = Timeout.timeout(options[:retry_timeout]) { yield } - if options[:retry_false_is_failure] - return out if out - else - return out - end - rescue => e - debug "Execution failure: #{e.message}" - end - sleep options[:retry_step] - end - raise "Execution timeout after #{options[:retry_count] * options[:retry_step]} seconds!" if options[:retry_fail_on_timeout] - end - - # wait for pacemaker to become online - # @param comment [String] log tag comment to trace calls - def wait_for_online(comment = nil) - message = "Waiting #{max_wait_time} seconds for Pacemaker to become online" - message += " (#{comment})" if comment - debug message - retry_block do - cib_reset 'wait_for_online' - online? - end - debug 'Pacemaker is online' - end - - # wait until a primitive has known status - # @param primitive [String] primitive name - # @param node [String] on this node if given - def wait_for_status(primitive, node = nil) - message = "Waiting #{max_wait_time} seconds for a known status of '#{primitive}'" - message += " on node '#{node}'" if node - debug message - retry_block do - cib_reset 'wait_for_status' - !primitive_status(primitive).nil? - end - message = "Primitive '#{primitive}' has status '#{primitive_status primitive}'" - message += " on node '#{node}'" if node - debug message - end - - # wait for primitive to start - # if node is given then start on this node - # @param primitive [String] primitive id - # @param node [String] on this node if given - def wait_for_start(primitive, node = nil) - message = "Waiting #{max_wait_time} seconds for the service '#{primitive}' to start" - message += " on node '#{node}'" if node - debug message - retry_block do - cib_reset 'wait_for_start' - primitive_is_running? primitive, node - end - message = "Service '#{primitive}' have started" - message += " on node '#{node}'" if node - debug message - end - - # wait for primitive to start as a master - # if node is given then start as a master on this node - # @param primitive [String] primitive id - # @param node [String] on this node if given - def wait_for_master(primitive, node = nil) - message = "Waiting #{max_wait_time} seconds for the service '#{primitive}' to start master" - message += " on node '#{node}'" if node - debug message - retry_block do - cib_reset 'wait_for_master' - primitive_has_master_running? primitive, node - end - message = "Service '#{primitive}' have started master" - message += " on node '#{node}'" if node - debug message - end - - # wait for primitive to stop - # if node is given then start on this node - # @param primitive [String] primitive id - # @param node [String] on this node if given - def wait_for_stop(primitive, node = nil) - message = "Waiting #{max_wait_time} seconds for the service '#{primitive}' to stop" - message += " on node '#{node}'" if node - debug message - retry_block do - cib_reset 'wait_for_stop' - result = primitive_is_running? primitive, node - result.is_a? FalseClass - end - message = "Service '#{primitive}' was stopped" - message += " on node '#{node}'" if node - debug message - end - - # add a new primitive to CIB - # and wait for it to be actually created - # @param xml [String, REXML::Element] XML block to add - # @param primitive [String] the id of the new primitive - # @param scope [String] XML root scope - def wait_for_primitive_create(xml, primitive, scope = 'resources') - message = "Waiting #{max_wait_time} seconds for the primitive '#{primitive}' to be created" - debug message - retry_block do - if pacemaker_options[:cibadmin_idempotency_checks] - cib_reset 'wait_for_primitive_create' - break true if primitive_exists? primitive - end - cibadmin_create xml, scope - end - message = "Primitive '#{primitive}' was created" - debug message - end - - # remove a primitive from CIB - # and wait for it to be actually removed - # @param xml [String, REXML::Element] XML block to remove - # @param primitive [String] the id of the removed primitive - # @param scope [String] XML root scope - def wait_for_primitive_remove(xml, primitive, scope = 'resources') - message = "Waiting #{max_wait_time} seconds for the primitive '#{primitive}' to be removed" - debug message - retry_block do - if pacemaker_options[:cibadmin_idempotency_checks] - cib_reset 'wait_for_primitive_remove' - break true unless primitive_exists? primitive - end - cibadmin_delete xml, scope - end - message = "Primitive '#{primitive}' was removed" - debug message - end - - # update a primitive in CIB - # and wait for it to be actually updated - # @param xml [String, REXML::Element] XML block to update - # @param primitive [String] the id of the updated primitive - # @param scope [String] XML root scope - def wait_for_primitive_update(xml, primitive, scope = 'resources') - message = "Waiting #{max_wait_time} seconds for the primitive '#{primitive}' to be updated" - debug message - retry_block do - # replace action is already idempotent - cibadmin_replace xml, scope - end - message = "Primitive '#{primitive}' was updated" - debug message - end - - # add a new constraint to CIB - # and wait for it to be actually created - # @param xml [String, REXML::Element] XML block to add - # @param constraint [String] the id of the new constraint - # @param scope [String] XML root scope - def wait_for_constraint_create(xml, constraint, scope = 'constraints') - message = "Waiting #{max_wait_time} seconds for the constraint '#{constraint}' to be created" - debug message - retry_block do - if pacemaker_options[:cibadmin_idempotency_checks] - cib_reset 'wait_for_constraint_create' - break true if constraint_exists? constraint - end - cibadmin_create xml, scope - end - message = "Constraint '#{constraint}' was created" - debug message - end - - # remove a constraint from CIB - # and wait for it to be actually removed - # @param xml [String, REXML::Element] XML block to remove - # @param constraint [String] the id of the removed constraint - # @param scope [String] XML root scope - def wait_for_constraint_remove(xml, constraint, scope = 'constraints') - message = "Waiting #{max_wait_time} seconds for the constraint '#{constraint}' to be removed" - debug message - retry_block do - if pacemaker_options[:cibadmin_idempotency_checks] - cib_reset 'wait_for_constraint_remove' - break true unless constraint_exists? constraint - end - cibadmin_delete xml, scope - end - message = "Constraint '#{constraint}' was removed" - debug message - end - - # update a constraint in CIB - # and wait for it to be actually updated - # @param xml [String, REXML::Element] XML block to update - # @param constraint [String] the id of the updated constraint - # @param scope [String] XML root scope - def wait_for_constraint_update(xml, constraint, scope = 'constraints') - message = "Waiting #{max_wait_time} seconds for the constraint '#{constraint}' to be updated" - debug message - retry_block do - # replace action is already idempotent - cibadmin_replace xml, scope - end - message = "Constraint '#{constraint}' was updated" - debug message - end - - # check if pacemaker is online and we can work with it - # * pacemaker is online if cib can be downloaded - # * dc_version attribute can be obtained - # * DC have been designated - # Times out a stuck command calls and catches failed command calls - # @return [TrueClass,FalseClass] - def online? - Timeout.timeout(pacemaker_options[:retry_timeout]) do - return false unless dc_version - return false unless dc - return false unless cib_section_node_state - true - end - rescue Puppet::ExecutionFailure => e - debug "Cluster is offline: #{e.message}" - false - rescue Timeout::Error - debug 'Online check timeout!' - false - end - end -end diff --git a/lib/pacemaker/xml/cib.rb b/lib/pacemaker/xml/cib.rb deleted file mode 100644 index e3426fe3..00000000 --- a/lib/pacemaker/xml/cib.rb +++ /dev/null @@ -1,123 +0,0 @@ -# the Pacemaker module contains many submodules separated -# by the preformed functions. They are later merged -# together in the 'provided' file -module Pacemaker - # this submodule contains the basic functions - # for low-level actions with CIB data - module Cib - # get the raw CIB from Pacemaker - # @return [String] cib xml data - def raw_cib - raw_cib = cibadmin '-Q' - raise 'Could not dump CIB XML!' if !raw_cib || raw_cib == '' - raw_cib - end - - # REXML::Document of the CIB data - # @return [REXML::Document] at '/' - def cib - return @cib if @cib - @cib = REXML::Document.new(raw_cib) - end - - # insert a new cib xml data instead of retrieving it - # can be used either for prefetching or for debugging - # @param cib [String,REXML::Document] CIB XML text or element - def cib=(cib) - @cib = if cib.is_a? REXML::Document - cib - else - REXML::Document.new(cib) - end - end - - # check id the CIB is retrieved and memorized - # @return [TrueClass,FalseClass] - def cib? - !@cib.nil? - end - - # add a new XML element to CIB - # @param xml [String, REXML::Element] XML block to add - # @param scope [String] XML root scope - def cibadmin_create(xml, scope = nil) - xml = xml_pretty_format xml if xml.is_a? REXML::Element - options = %w(--force --sync-call --create) - options += ['--scope', scope.to_s] if scope - cibadmin_safe options, '--xml-text', xml.to_s - end - - # delete the XML element to CIB - # @param xml [String, REXML::Element] XML block to delete - # @param scope [String] XML root scope - def cibadmin_delete(xml, scope = nil) - xml = xml_pretty_format xml if xml.is_a? REXML::Element - options = %w(--force --sync-call --delete) - options += ['--scope', scope.to_s] if scope - cibadmin_safe options, '--xml-text', xml.to_s - end - - # modify the XML element - # @param xml [String, REXML::Element] XML element to modify - # @param scope [String] XML root scope - def cibadmin_modify(xml, scope = nil) - xml = xml_pretty_format xml if xml.is_a? REXML::Element - options = %w(--force --sync-call --modify) - options += ['--scope', scope.to_s] if scope - cibadmin_safe options, '--xml-text', xml.to_s - end - - # replace the XML element - # @param xml [String, REXML::Element] XML element to replace - # @param scope [String] XML root scope - def cibadmin_replace(xml, scope = nil) - xml = xml_pretty_format xml if xml.is_a? REXML::Element - options = %w(--force --sync-call --replace) - options += ['--scope', scope.to_s] if scope - cibadmin_safe options, '--xml-text', xml.to_s - end - - # get the name of the DC (Designated Controller) node - # used to determine if the cluster have elected one and is ready - # @return [String, nil] - def dc - cib_element = cib.elements['/cib'] - return unless cib_element - dc_node_id = cib_element.attribute('dc-uuid') - return unless dc_node_id - return if dc_node_id == 'NONE' - dc_node_id.to_s - end - - # get the dc_version string from the CIB configuration - # used to determine that the cluster have finished forming a correct cib structure - # uses an independent command call because CIB may not be ready yet - # @return [String, nil] - def dc_version - dc_version = crm_attribute '-q', '--type', 'crm_config', '--query', '--name', 'dc-version' - return if dc_version.empty? - dc_version - end - - # reset all mnemoization variables - # to force pacemaker to reload all the data structures - # @param comment [String] log file comment tag to trace calls - def cib_reset(comment = nil) - message = 'Call: cib_reset' - message += " (#{comment})" if comment - debug message - - @cib = nil - - @primitives_structure = nil - @locations_structure = nil - @colocations_structure = nil - @orders_structure = nil - @node_status_structure = nil - @cluster_properties_structure = nil - @nodes_structure = nil - @resource_defaults_structure = nil - @operation_defaults_structure = nil - end - end -end diff --git a/lib/pacemaker/xml/constraint_colocations.rb b/lib/pacemaker/xml/constraint_colocations.rb deleted file mode 100644 index 3c7a2cc5..00000000 --- a/lib/pacemaker/xml/constraint_colocations.rb +++ /dev/null @@ -1,43 +0,0 @@ -module Pacemaker - # functions related to colocations constraints - # main structure "constraint_colocations" - module ConstraintColocations - # get colocation constraints and use mnemoization on the list - # @return [Hash Hash>] - def constraint_colocations - return @colocations_structure if @colocations_structure - @colocations_structure = constraints 'rsc_colocation' - end - - # check if colocation constraint exists - # @param id [String] the constraint id - # @return [TrueClass,FalseClass] - def constraint_colocation_exists?(id) - constraint_colocations.key? id - end - - # add a colocation constraint - # @param colocation_structure [Hash String>] the location data structure - def constraint_colocation_add(colocation_structure) - colocation_patch = xml_document - colocation_element = xml_rsc_colocation colocation_structure - raise "Could not create XML patch from colocation '#{colocation_structure.inspect}'!" unless colocation_element - colocation_patch.add_element colocation_element - wait_for_constraint_create xml_pretty_format(colocation_patch.root), colocation_structure['id'] - end - - # remove a colocation constraint - # @param id [String] the constraint id - def constraint_colocation_remove(id) - wait_for_constraint_remove "\n", id - end - - # generate rsc_colocation elements from data structure - # @param data [Hash] - # @return [REXML::Element] - def xml_rsc_colocation(data) - return unless data && data.is_a?(Hash) - xml_element 'rsc_colocation', data, 'type' - end - end -end diff --git a/lib/pacemaker/xml/constraint_locations.rb b/lib/pacemaker/xml/constraint_locations.rb deleted file mode 100644 index 0488527a..00000000 --- a/lib/pacemaker/xml/constraint_locations.rb +++ /dev/null @@ -1,104 +0,0 @@ -module Pacemaker - # functions related to locations constraints - # main structure "constraint_locations" - module ConstraintLocations - # construct the constraint unique name - # from primitive's and node's names - # @param primitive [String] - # @param node [String] - # @return [String] - def service_location_name(primitive, node) - "#{primitive}-on-#{node}" - end - - # check if service location exists for this primitive on this node - # @param primitive [String] the primitive's name - # @param node [String] the node's name - # @return [true,false] - def service_location_exists?(primitive, node) - id = service_location_name primitive, node - constraint_location_exists? id - end - - # add a location constraint to enable a service on a node - # @param primitive [String] the primitive's name - # @param node [String] the node's name - # @param score [Numeric,String] score value - def service_location_add(primitive, node, score = 100) - location_structure = { - 'id' => service_location_name(primitive, node), - 'node' => node, - 'rsc' => primitive, - 'score' => score, - } - constraint_location_add location_structure - end - - # remove the service location on this node - # @param primitive [String] the primitive's name - # @param node [String] the node's name - def service_location_remove(primitive, node) - id = service_location_name primitive, node - constraint_location_remove id - end - - # get location constraints and use mnemoization on the list - # @return [Hash Hash>] - def constraint_locations - return @locations_structure if @locations_structure - @locations_structure = constraints 'rsc_location' - end - - # add a location constraint - # @param location_structure [Hash String>] the location data structure - def constraint_location_add(location_structure) - location_patch = xml_document - location_element = xml_rsc_location location_structure - raise "Could not create XML patch from location '#{location_structure.inspect}'!" unless location_element - location_patch.add_element location_element - wait_for_constraint_create xml_pretty_format(location_patch.root), location_structure['id'] - end - - # remove a location constraint - # @param id [String] the constraint id - def constraint_location_remove(id) - wait_for_constraint_remove "\n", id - end - - # check if locations constraint exists - # @param id [String] the constraint id - # @return [TrueClass,FalseClass] - def constraint_location_exists?(id) - constraint_locations.key? id - end - - # generate rsc_location elements from data structure - # @param data [Hash] - # @return [REXML::Element] - def xml_rsc_location(data) - return unless data && data.is_a?(Hash) - # create an element from the top level hash and skip 'rules' attribute - # because if should be processed as children elements and useless 'type' attribute - rsc_location_element = xml_element 'rsc_location', data, %w(rules type) - - # there are no rule elements - return rsc_location_element unless data['rules'] && data['rules'].respond_to?(:each) - - # create a rule element with attributes and treat expressions as children elements - sort_data(data['rules']).each do |rule| - next unless rule.is_a? Hash - rule_element = xml_element 'rule', rule, 'expressions' - # add expression children elements to the rule element if the are present - if rule['expressions'] && rule['expressions'].respond_to?(:each) - sort_data(rule['expressions']).each do |expression| - next unless expression.is_a? Hash - expression_element = xml_element 'expression', expression - rule_element.add_element expression_element - end - end - rsc_location_element.add_element rule_element - end - rsc_location_element - end - end -end diff --git a/lib/pacemaker/xml/constraint_orders.rb b/lib/pacemaker/xml/constraint_orders.rb deleted file mode 100644 index bb882f3f..00000000 --- a/lib/pacemaker/xml/constraint_orders.rb +++ /dev/null @@ -1,43 +0,0 @@ -module Pacemaker - # functions related to constraint_orders constraints - # main structure "constraint_orders" - module ConstraintOrders - # get order constraints and use memoization on the list - # @return [Hash Hash>] - def constraint_orders - return @orders_structure if @orders_structure - @orders_structure = constraints 'rsc_order' - end - - # check if order constraint exists - # @param id [String] the constraint id - # @return [TrueClass,FalseClass] - def constraint_order_exists?(id) - constraint_orders.key? id - end - - # add a order constraint - # @param order_structure [Hash String>] the location data structure - def constraint_order_add(order_structure) - order_patch = xml_document - order_element = xml_rsc_order order_structure - raise "Could not create XML patch from colocation '#{order_structure.inspect}'!" unless order_element - order_patch.add_element order_element - wait_for_constraint_create xml_pretty_format(order_patch.root), order_structure['id'] - end - - # remove an order constraint - # @param id [String] the constraint id - def constraint_order_remove(id) - wait_for_constraint_remove "\n", id - end - - # generate rsc_order elements from data structure - # @param data [Hash] - # @return [REXML::Element] - def xml_rsc_order(data) - return unless data && data.is_a?(Hash) - xml_element 'rsc_order', data, 'type' - end - end -end diff --git a/lib/pacemaker/xml/constraints.rb b/lib/pacemaker/xml/constraints.rb deleted file mode 100644 index 7bcf1e0f..00000000 --- a/lib/pacemaker/xml/constraints.rb +++ /dev/null @@ -1,77 +0,0 @@ -module Pacemaker - # functions related to constraints (order, location, colocation) - # main structure "constraints" - # this structure is used by other specific location colocation and order - # submodules to form their data structures - module Constraints - # get all 'rsc_location', 'rsc_order' and 'rsc_colocation' sections from CIB - # @return [Array] at /cib/configuration/constraints/* - def cib_section_constraints - REXML::XPath.match cib, '//constraints/*' - end - - # get all rule elements from the constraint element - # @return [Array] at /cib/configuration/constraints/*/rule - def cib_section_constraint_rules(constraint) - return unless constraint.is_a? REXML::Element - REXML::XPath.match constraint, 'rule' - end - - # parse constraint rule elements to the rule structure - # @param element [REXML::Element] - # @return [Hash Hash>] - def decode_constraint_rules(element) - rules = cib_section_constraint_rules element - return [] unless rules.any? - rules_array = [] - rules.each do |rule| - rule_structure = attributes_to_hash rule - next unless rule_structure['id'] - rule_expressions = children_elements_to_array rule, 'expression' - rule_structure.store 'expressions', rule_expressions if rule_expressions - rules_array << rule_structure - end - rules_array.sort_by { |rule| rule['id'] } - end - - # decode a single constraint element to the data structure - # @param element [REXML::Element] - # @return [Hash String>] - def decode_constraint(element) - return unless element.is_a? REXML::Element - return unless element.attributes['id'] - return unless element.name - - constraint_structure = attributes_to_hash element - constraint_structure.store 'type', element.name - - rules = decode_constraint_rules element - constraint_structure.store 'rules', rules if rules.any? - constraint_structure - end - - # constraints found in the CIB - # filter them by the provided tag name - # @param type [String] filter this location type - # @return [Hash Hash>] - def constraints(type = nil) - constraints = {} - cib_section_constraints.each do |constraint| - constraint_structure = decode_constraint constraint - next unless constraint_structure - next unless constraint_structure['id'] - next if type && !(constraint_structure['type'] == type) - constraint_structure.delete 'type' - constraints.store constraint_structure['id'], constraint_structure - end - constraints - end - - # check if a constraint exists - # @param id [String] the constraint id - # @return [TrueClass,FalseClass] - def constraint_exists?(id) - constraints.key? id - end - end -end diff --git a/lib/pacemaker/xml/debug.rb b/lib/pacemaker/xml/debug.rb deleted file mode 100644 index 7cc895e6..00000000 --- a/lib/pacemaker/xml/debug.rb +++ /dev/null @@ -1,138 +0,0 @@ -module Pacemaker - # debug related functions - # "cluster_debug_report" the main debug text generation function - # "safe_methods" are used to debug providers without making any actual changes to the system - module Debug - # check if debug is enabled either in the pacemaker options - # or the resource has the 'debug' parameter and it's enabled - # @return [TrueClass,FalseClass] - def debug_mode_enabled? - return true if pacemaker_options[:debug_enabled] - return true if @resource && @resource.parameters.keys.include?(:debug) && @resource[:debug] - false - end - - # Call a Puppet shell command method with wrappers - # If debug is enabled, show what would be executed and don't actually - # run the command. Used to debug commands that should modify the system - # and don't return any data. Should never be use with commands that retrieve data. - # If a command have failed, show command and the arguments and the raise the exception. - # The actual commands methods should be created by the provider's "commands" helper. - # @param cmd [Symbol, String] command name - # @param args [Array] command arguments - # @return [String,NilClass] - def safe_method(cmd, *args) - cmd = cmd.to_sym unless cmd.is_a? Symbol - command_line = ([cmd.to_s] + args).join ' ' - if debug_mode_enabled? - debug "Exec: #{command_line}" - return - end - begin - send cmd, *args - rescue StandardError => exception - debug "Command execution have failed: #{command_line}" - raise exception - end - end - - # safe cibadmin command - # @param args [Array] command arguments - # @return [String,NilClass] - def cibadmin_safe(*args) - safe_method :cibadmin, *args - end - - # safe crm_node command - # @param args [Array] command arguments - # @return [String,NilClass] - def crm_node_safe(*args) - safe_method :crm_node, *args - end - - # safe cmapctl command - # @param args [Array] command arguments - # @return [String,NilClass] - def cmapctl_safe(*args) - safe_method :cmapctl, *args - end - - # safe crm_resource command - # @param args [Array] command arguments - # @return [String,NilClass] - def crm_resource_safe(*args) - safe_method :crm_resource, *args - end - - # safe crm_attribute command - # @param args [Array] command arguments - # @return [String,NilClass] - def crm_attribute_safe(*args) - safe_method :crm_attribute, *args - end - - ################################################################################ - - # form a cluster status report for debugging - # "(L)" - location constraint for this primitive is present on this node - # "(F)" - the primitive is not running and have failed on this node - # "(M)" - this primitive is not managed - # @param tag [String] log comment tag to to trace calls - # @return [String] - def cluster_debug_report(tag = nil) - return unless cib? - report = "\n" - report += 'Pacemaker debug block start' - report += " at '#{tag}'" if tag - report += "\n" - primitives_status_by_node.each do |primitive, data| - primitive_name = primitive - next unless primitives.key? primitive - primitive_name = primitives[primitive]['name'] if primitives[primitive]['name'] - primitive_type = 'Simple' - primitive_type = 'Clone' if primitive_is_clone? primitive - primitive_type = 'Master' if primitive_is_master? primitive - - report += "-> #{primitive_type} primitive: '#{primitive_name}'" - report += ' (M)' unless primitive_is_managed? primitive - report += "\n" - nodes = [] - data.keys.sort.each do |node_name| - node_status_string = data.fetch node_name - node_status_string = '?' unless node_status_string.is_a? String - node_status_string = node_status_string.upcase - node_block = "#{node_name}: #{node_status_string}" - node_block += ' (F)' if primitive_has_failures? primitive, node_name - node_block += ' (L)' if service_location_exists? primitive_full_name(primitive), node_name - nodes << node_block - end - report += ' ' + nodes.join(' | ') + "\n" - end - pacemaker_options[:debug_show_properties].each do |p| - report += "* #{p}: #{cluster_property_value p}\n" if cluster_property_defined? p - end - report += 'Pacemaker debug block end' - report += " at '#{tag}'" if tag - report + "\n" - end - - # Generate the report message for the operation status calculation - # @param [Array] operations - # @param [Hash String>] resource - # @param [String] node_name - # @return [String] - def resource_operations_report(operations, resource, node_name) - report = "Operations status debug start for the node: '#{node_name}'\n" - report += "Resource: '#{resource['id']}'\n" - operations.each do |operation| - type = operation.fetch('operation', '?').capitalize - rc_code = operation.fetch('rc-code', '?') - op_code = operation.fetch('op-status', '?') - report += "* #{type.ljust 7}: rc:#{rc_code} op:#{op_code}\n" - end - report += "Status: #{resource['status']} Failed: #{resource['failed']}\n" - report + "Operations status debug end for the node: '#{node_name}'\n" - end - - end -end diff --git a/lib/pacemaker/xml/helpers.rb b/lib/pacemaker/xml/helpers.rb deleted file mode 100644 index d020b2ac..00000000 --- a/lib/pacemaker/xml/helpers.rb +++ /dev/null @@ -1,139 +0,0 @@ -module Pacemaker - # misc helper methods used in other submodules - module Helpers - # convert elements's attributes to hash - # @param element [REXML::Element] - # @return [Hash String>] - def attributes_to_hash(element, hash = {}) - element.attributes.each do |a, v| - next if a == '__crm_diff_marker__' - hash.store a.to_s, v.to_s - end - hash - end - - # convert element's children to hash - # of their attributes using key and hash key - # @param element [REXML::Element] - # @param key use this attribute as hash key - # @param tag get only this type of children - # @return [Hash String>] - def children_elements_to_hash(element, key, tag = nil) - return unless element.is_a? REXML::Element - elements = {} - children = element.get_elements tag - return elements unless children - children.each do |child| - child_structure = attributes_to_hash child - name = child_structure[key] - next unless name - elements.store name, child_structure - end - elements - end - - # convert element's children to array of their attributes - # @param element [REXML::Element] - # @param tag [String] get only this type of children - # @return [Array] - def children_elements_to_array(element, tag = nil) - return unless element.is_a? REXML::Element - elements = [] - children = element.get_elements tag - return elements unless children - children.each do |child| - child_structure = attributes_to_hash child - next unless child_structure['id'] - elements << child_structure - end - elements - end - - # copy value from one hash_like structure to another - # if the value is present - # @param from[Hash] - # @param from_key [String,Symbol] - # @param to [Hash] - # @param to_key [String,Symbol,NilClass] - def copy_value(from, from_key, to, to_key = nil) - value = from[from_key] - return value unless value - to_key = from_key unless to_key - to[to_key] = value - value - end - - # sort hash of hashes into an array of hashes - # by one of the subhash's attributes - # @param data [Hash Hash>] - # @param key [String] - # @return [Array] - def sort_data(data, key = 'id') - data = data.values if data.is_a? Hash - data.sort do |x, y| - break 0 unless x[key] && y[key] - x[key] <=> y[key] - end - end - - # return service status value expected by Puppet - # puppet wants :running or :stopped symbol - # @param primitive [String] primitive id - # @param node [String] on this node if given - # @return [:running,:stopped] - def get_primitive_puppet_status(primitive, node = nil) - if primitive_is_running? primitive, node - :running - else - :stopped - end - end - - # return service enabled status value expected by Puppet - # puppet wants :true or :false symbols - # @param primitive [String] - # @return [:true,:false] - def get_primitive_puppet_enable(primitive) - if primitive_is_managed? primitive - :true - else - :false - end - end - - # import the library representation of the attributes structure - # to the Puppet one - def import_attributes_structure(attributes) - return unless attributes.respond_to? :each - hash = {} - attributes.each do |attribute| - if attribute.is_a?(Array) && attribute.length == 2 - attribute = attribute[1] - end - next unless attribute['name'] && attribute['value'] - hash.store attribute['name'], attribute['value'] - end - hash - end - - # export the Puppet representation of attributes - # to the library one - # @param hash [Hash] attributes (name => value) - # @param attributes_id_tag [String] attributes name for id naming - # @return [Hash,NilClass] - def export_attributes_structure(hash, attributes_id_tag) - return unless hash.is_a? Hash - attributes = {} - hash.each do |attribute_name, attribute_value| - id_components = [resource[:name], attributes_id_tag, attribute_name] - id_components.reject!(&:nil?) - attribute_structure = {} - attribute_structure['id'] = id_components.join '-' - attribute_structure['name'] = attribute_name - attribute_structure['value'] = attribute_value - attributes.store attribute_name, attribute_structure - end - attributes - end - end -end diff --git a/lib/pacemaker/xml/nodes.rb b/lib/pacemaker/xml/nodes.rb deleted file mode 100644 index 357b197e..00000000 --- a/lib/pacemaker/xml/nodes.rb +++ /dev/null @@ -1,47 +0,0 @@ -module Pacemaker - # functions related to the cluster nodes - # main structure "nodes" with node's names and ids - module Nodes - # get nodes CIB section - # @return [REXML::Element] at /cib/configuration/nodes - def cib_section_nodes - REXML::XPath.match cib, '/cib/configuration/nodes/*' - end - - # hostname of the current node - # @return [String] - def node_name - return @node_name if @node_name - @node_name = crm_node('-n').chomp.strip - end - - alias hostname node_name - - # the nodes structure - # uname => id - # @return [Hash Hash>] - def nodes - return @nodes_structure if @nodes_structure - @nodes_structure = {} - cib_section_nodes.each do |node_block| - node = attributes_to_hash node_block - next unless node['id'] && node['uname'] - @nodes_structure.store node['uname'], node - end - @nodes_structure - end - - # the name of the current DC node - # @return [String,nil] - def dc_name - dc_node_id = dc - return unless dc_node_id - nodes.each do |node, attrs| - next unless attrs['id'] == dc_node_id - return node - end - nil - end - - end -end diff --git a/lib/pacemaker/xml/operation_default.rb b/lib/pacemaker/xml/operation_default.rb deleted file mode 100644 index 429bc55f..00000000 --- a/lib/pacemaker/xml/operation_default.rb +++ /dev/null @@ -1,56 +0,0 @@ -module Pacemaker - # functions related to the operation defaults - # main structure "operation_defaults" - module OperationDefault - # get operation defaults CIB section - # @return [REXML::Element] - def cib_section_operation_defaults - REXML::XPath.match(cib, '/cib/configuration/op_defaults/meta_attributes').first - end - - # the main 'operation_defaults' structure - # contains defaults operations and their values - # @return [Hash] - def operation_defaults - return @operation_defaults_structure if @operation_defaults_structure - @operation_defaults_structure = children_elements_to_hash cib_section_operation_defaults, 'name' - @operation_defaults_structure = {} unless @operation_defaults_structure - @operation_defaults_structure - end - - # extract a single operation default attribute value - # returns nil if it have not been set - # @param attribute_name [String] - # @return [String, nil] - def operation_default_value(attribute_name) - return unless operation_default_defined? attribute_name - operation_defaults[attribute_name]['value'] - end - - # set a single operation default value - # @param attribute_name [String] - # @param attribute_value [String] - def operation_default_set(attribute_name, attribute_value) - options = ['--quiet', '--type', 'op_defaults', '--attr-name', attribute_name] - options += ['--attr-value', attribute_value] - retry_block { crm_attribute_safe options } - end - - # remove a defined operation default attribute - # @param attribute_name [String] - def operation_default_delete(attribute_name) - options = ['--quiet', '--type', 'op_defaults', '--attr-name', attribute_name] - options += ['--delete-attr'] - retry_block { crm_attribute_safe options } - end - - # check if this operation default attribute have been defined - # @param attribute_name [String] - # @return [true,false] - def operation_default_defined?(attribute_name) - return false unless operation_defaults.key? attribute_name - return false unless operation_defaults[attribute_name].is_a?(Hash) && operation_defaults[attribute_name]['value'] - true - end - end -end diff --git a/lib/pacemaker/xml/primitives.rb b/lib/pacemaker/xml/primitives.rb deleted file mode 100644 index 697d40fe..00000000 --- a/lib/pacemaker/xml/primitives.rb +++ /dev/null @@ -1,384 +0,0 @@ -module Pacemaker - # function related to the primitives configuration - # main structure "primitives" - module Primitives - # get all 'primitive' sections from CIB - # @return [Array] at /cib/configuration/resources/primitive - def cib_section_primitives - REXML::XPath.match cib, '//primitive' - end - - # sets the meta attribute of a primitive - # @param primitive [String] primitive's id - # @param attribute [String] atttibute's name - # @param value [String] attribute's value - def set_primitive_meta_attribute(primitive, attribute, value) - options = ['--quiet', '--resource', primitive] - options += ['--set-parameter', attribute, '--meta', '--parameter-value', value] - retry_block { crm_resource_safe options } - end - - # disable this primitive - # @param primitive [String] what primitive to disable - def disable_primitive(primitive) - set_primitive_meta_attribute primitive, 'target-role', 'Stopped' - end - - alias stop_primitive disable_primitive - - # enable this primitive - # @param primitive [String] what primitive to enable - def enable_primitive(primitive) - set_primitive_meta_attribute primitive, 'target-role', 'Started' - end - - alias start_primitive enable_primitive - - # manage this primitive - # @param primitive [String] what primitive to manage - def manage_primitive(primitive) - set_primitive_meta_attribute primitive, 'is-managed', 'true' - end - - # unmanage this primitive - # @param primitive [String] what primitive to unmanage - def unmanage_primitive(primitive) - set_primitive_meta_attribute primitive, 'is-managed', 'false' - end - - # ban this primitive - # @param primitive [String] what primitive to ban - # @param node [String] on which node this primitive should be banned - def ban_primitive(primitive, node) - options = ['--quiet', '--resource', primitive, '--node', node] - options += ['--ban'] - retry_block { crm_resource_safe options } - end - - # unban this primitive - # @param primitive [String] what primitive to unban - # @param node [String] on which node this primitive should be unbanned - def unban_primitive(primitive, node) - options = ['--quiet', '--resource', primitive, '--node', node] - options += ['--clear'] - retry_block { crm_resource_safe options } - end - - alias clear_primitive unban_primitive - - # move this primitive - # @param primitive [String] what primitive to un-move - # @param node [String] to which node the primitive should be moved - def move_primitive(primitive, node) - options = ['--quiet', '--resource', primitive, '--node', node] - options += ['--move'] - retry_block { crm_resource_safe options } - end - - # un-move this primitive - # @param primitive [String] what primitive to un-move - # @param node [String] from which node the primitive should be un-moved - def unmove_primitive(primitive, node) - options = ['--quiet', '--resource', primitive, '--node', node] - options += ['--un-move'] - retry_block { crm_resource_safe options } - end - - # cleanup this primitive - # @param primitive [String] what primitive to cleanup - # @param node [String] on which node to cleanup (optional) - # cleanups on every node if node is not given - def cleanup_primitive(primitive, node = nil) - options = ['--quiet', '--resource', primitive] - options += ['--node', node] if node - options += ['--cleanup'] - retry_block { crm_resource_safe options } - end - - # the list of complex types the library should - # read from the CIB and parse their meta-data - # @return [Array] - def read_complex_types - %w(clone master group) - end - - # the list of complex type the library should - # be able to create XML elements for - # @return [Array] - def write_complex_types - %w(clone master) - end - - ############################################################################## - - # get primitives configuration structure with primitives and their attributes - # @return [Hash Hash>] - def primitives - return @primitives_structure if @primitives_structure - @primitives_structure = {} - cib_section_primitives.each do |primitive| - id = primitive.attributes['id'] - next unless id - primitive_structure = attributes_to_hash primitive - primitive_structure.store 'name', id - - if read_complex_types.include?(primitive.parent.name) && primitive.parent.attributes['id'] - complex_structure = { - 'id' => primitive.parent.attributes['id'], - 'type' => primitive.parent.name - } - - complex_meta_attributes = primitive.parent.elements['meta_attributes'] - if complex_meta_attributes - complex_meta_attributes_structure = children_elements_to_hash complex_meta_attributes, 'name', 'nvpair' - complex_structure.store 'meta_attributes', complex_meta_attributes_structure - end - - primitive_structure.store 'name', complex_structure['id'] unless complex_structure['type'] == 'group' - primitive_structure.store 'complex', complex_structure - end - - instance_attributes = primitive.elements['instance_attributes'] - if instance_attributes - instance_attributes_structure = children_elements_to_hash instance_attributes, 'name', 'nvpair' - primitive_structure.store 'instance_attributes', instance_attributes_structure - end - - meta_attributes = primitive.elements['meta_attributes'] - if meta_attributes - meta_attributes_structure = children_elements_to_hash meta_attributes, 'name', 'nvpair' - primitive_structure.store 'meta_attributes', meta_attributes_structure - end - - operations = primitive.elements['operations'] - if operations - operations_structure = parse_operations operations - primitive_structure.store 'operations', operations_structure - end - - @primitives_structure.store id, primitive_structure - end - @primitives_structure - end - - # parse the operations structure of a primitive - # @param operations_element [REXML::Element] - # @return [Hash] - def parse_operations(operations_element) - return unless operations_element.is_a? REXML::Element - operations = {} - ops = operations_element.get_elements 'op' - return operations unless ops.any? - ops.each do |op| - op_structure = attributes_to_hash op - id = op_structure['id'] - next unless id - instance_attributes = op.elements['instance_attributes'] - if instance_attributes - instance_attributes_structure = children_elements_to_hash instance_attributes, 'name', 'nvpair' - if instance_attributes_structure.key? 'OCF_CHECK_LEVEL' - value = instance_attributes_structure.fetch('OCF_CHECK_LEVEL', {}).fetch('value', nil) - op_structure['OCF_CHECK_LEVEL'] = value if value - end - end - operations.store id, op_structure - end - operations - end - - # check if primitive exists in the configuration - # @param primitive primitive id or name - def primitive_exists?(primitive) - primitives.key? primitive - end - - # return primitive class - # @param primitive [String] primitive id - # @return [String,nil] primitive class - def primitive_class(primitive) - return unless primitive_exists? primitive - primitives[primitive]['class'] - end - - # return primitive type - # @param primitive [String] primitive id - # @return [String,nil] primitive type - def primitive_type(primitive) - return unless primitive_exists? primitive - primitives[primitive]['type'] - end - - # return primitive provider - # @param primitive [String] primitive id - # @return [String,nil] primitive type - def primitive_provider(primitive) - return unless primitive_exists? primitive - primitives[primitive]['provider'] - end - - # return primitive complex type - # or nil is the primitive is simple - # @param primitive [String] primitive id - # @return [Symbol] primitive complex type - def primitive_complex_type(primitive) - return unless primitive_is_complex? primitive - primitives[primitive]['complex']['type'].to_sym - end - - # return the full name of the complex primitive - # or just a name for a simple primitive - # @return [String] primitive type - def primitive_full_name(primitive) - return unless primitive_exists? primitive - primitives[primitive]['name'] - end - - # check if primitive is clone or master - # primitives in groups are not considered complex - # despite having the complex structure - # @param primitive [String] primitive id - # @return [TrueClass,FalseClass] - def primitive_is_complex?(primitive) - return unless primitive_exists? primitive - return false unless primitives[primitive].key? 'complex' - primitives[primitive]['complex']['type'] != 'group' - end - - # reverse of the complex? predicate - # but returns nil if resource doesn't exist - # @param primitive [String] primitive id - # @return [TrueClass,FalseClass] - def primitive_is_simple?(primitive) - return unless primitive_exists? primitive - ! primitive_is_complex?(primitive) - end - - # check if the primitive is assigned to a group - # @param primitive [String] primitive id - # @return [TrueClass,FalseClass] - def primitive_in_group?(primitive) - return unless primitive_exists? primitive - return false unless primitives[primitive].key? 'complex' - primitives[primitive]['complex']['type'] == 'group' - end - - # get the group name of the primitive - # returns nil if primitive is not in a group - # @return [String,nil] primitive group - def primitive_group(primitive) - return unless primitive_in_group? primitive - primitives[primitive]['complex']['id'] - end - - # check if primitive is clone - # @param primitive [String] primitive id - # @return [TrueClass,FalseClass] - def primitive_is_clone?(primitive) - is_complex = primitive_is_complex? primitive - return is_complex unless is_complex - primitives[primitive]['complex']['type'] == 'clone' - end - - # check if primitive is master - # @param primitive [String] primitive id - # @return [TrueClass,FalseClass] - def primitive_is_master?(primitive) - is_complex = primitive_is_complex? primitive - return is_complex unless is_complex - primitives[primitive]['complex']['type'] == 'master' - end - - # determine if primitive is managed - # @param primitive [String] primitive id - # @return [TrueClass,FalseClass] - def primitive_is_managed?(primitive) - return unless primitive_exists? primitive - is_managed = primitives.fetch(primitive).fetch('meta_attributes', {}).fetch('is-managed', {}).fetch('value', 'true') - is_managed == 'true' - end - - # determine if primitive has target-state started - # @param primitive [String] primitive id - # @return [TrueClass,FalseClass] - def primitive_is_started?(primitive) - return unless primitive_exists? primitive - target_role = primitives.fetch(primitive).fetch('meta_attributes', {}).fetch('target-role', {}).fetch('value', 'Started') - target_role == 'Started' - end - - # generate a new XML element object - # and fill it with the primitive data from - # the provided primitive structure - # @param data [Hash] primitive_structure - # @return [REXML::Element] - def xml_primitive(data) - raise "Primitive data should be a hash! Got: #{data.inspect}" unless data.is_a? Hash - primitive_skip_attributes = %w(name parent instance_attributes operations meta_attributes utilization) - primitive_element = xml_element 'primitive', data, primitive_skip_attributes - - # instance attributes - if data['instance_attributes'].respond_to?(:each) && data['instance_attributes'].any? - instance_attributes_document = xml_document 'instance_attributes', primitive_element - instance_attributes_document.add_attribute 'id', data['id'] + '-instance_attributes' - sort_data(data['instance_attributes']).each do |instance_attribute| - instance_attribute_element = xml_element 'nvpair', instance_attribute - instance_attributes_document.add_element instance_attribute_element if instance_attribute_element - end - end - - # meta attributes - if data['meta_attributes'].respond_to?(:each) && data['meta_attributes'].any? - complex_meta_attributes_document = xml_document 'meta_attributes', primitive_element - complex_meta_attributes_document.add_attribute 'id', data['id'] + '-meta_attributes' - sort_data(data['meta_attributes']).each do |meta_attribute| - meta_attribute_element = xml_element 'nvpair', meta_attribute - complex_meta_attributes_document.add_element meta_attribute_element if meta_attribute_element - end - end - - # operations - if data['operations'].respond_to?(:each) && data['operations'].any? - operations_document = xml_document 'operations', primitive_element - sort_data(data['operations']).each do |operation| - operation_element = xml_element 'op', operation, %w(OCF_CHECK_LEVEL) - if operation.key? 'OCF_CHECK_LEVEL' - instance_attributes_document = xml_document 'instance_attributes', operation_element - instance_attributes_id = operation['id'] + '-instance_attributes' - instance_attributes_document.add_attribute 'id', instance_attributes_id - ocf_check_level_id = instance_attributes_id + '-OCF_CHECK_LEVEL' - ocf_check_level_structure = { - 'id' => ocf_check_level_id, - 'name' => 'OCF_CHECK_LEVEL', - 'value' => operation['OCF_CHECK_LEVEL'], - } - ocf_check_level_element = xml_element 'nvpair', ocf_check_level_structure - instance_attributes_document.add_element ocf_check_level_element if ocf_check_level_element - end - operations_document.add_element operation_element if operation_element - end - end - - # complex structure - if data['complex'].is_a?(Hash) && write_complex_types.include?(data['complex']['type'].to_s) - skip_complex_attributes = 'type' - complex_tag_name = data['complex']['type'].to_s - complex_element = xml_element complex_tag_name, data['complex'], skip_complex_attributes - - # complex meta attributes - if data['complex']['meta_attributes'].respond_to?(:each) && data['complex']['meta_attributes'].any? - complex_meta_attributes_document = xml_document 'meta_attributes', complex_element - complex_meta_attributes_document.add_attribute 'id', data['complex']['id'] + '-meta_attributes' - sort_data(data['complex']['meta_attributes']).each do |meta_attribute| - complex_meta_attribute_element = xml_element 'nvpair', meta_attribute - complex_meta_attributes_document.add_element complex_meta_attribute_element if complex_meta_attribute_element - end - end - - complex_element.add_element primitive_element - return complex_element - end - - primitive_element - end - end -end diff --git a/lib/pacemaker/xml/properties.rb b/lib/pacemaker/xml/properties.rb deleted file mode 100644 index fc3223ba..00000000 --- a/lib/pacemaker/xml/properties.rb +++ /dev/null @@ -1,52 +0,0 @@ -module Pacemaker - # functions related to the cluster properties - # main structure "cluster_properties" - module Properties - # get cluster property CIB section - # @return [REXML::Element] - def cib_section_cluster_property - REXML::XPath.match(cib, '/cib/configuration/crm_config/cluster_property_set').first - end - - # get cluster property structure - # @return [Hash Hash>] - def cluster_properties - return @cluster_properties_structure if @cluster_properties_structure - @cluster_properties_structure = children_elements_to_hash cib_section_cluster_property, 'name' - end - - # get the value of a cluster property by it's name - # @param property_name [String] the name of the property - # @return [String] - def cluster_property_value(property_name) - return unless cluster_property_defined? property_name - cluster_properties[property_name]['value'] - end - - # set the value to this cluster's property - # @param property_name [String] the name of the property - # @param property_value [String] the value of the property - def cluster_property_set(property_name, property_value) - options = ['--quiet', '--type', 'crm_config', '--name', property_name] - options += ['--update', property_value] - retry_block { crm_attribute_safe options } - end - - # delete this cluster's property - # @param property_name [String] the name of the property - def cluster_property_delete(property_name) - options = ['--quiet', '--type', 'crm_config', '--name', property_name] - options += ['--delete'] - retry_block { crm_attribute_safe options } - end - - # check if this property has a value - # @param property_name [String] the name of the property - # @return [TrueClass,FalseClass] - def cluster_property_defined?(property_name) - return false unless cluster_properties.key? property_name - return false unless cluster_properties[property_name].is_a?(Hash) && cluster_properties[property_name]['value'] - true - end - end -end diff --git a/lib/pacemaker/xml/resource_default.rb b/lib/pacemaker/xml/resource_default.rb deleted file mode 100644 index d569bf91..00000000 --- a/lib/pacemaker/xml/resource_default.rb +++ /dev/null @@ -1,56 +0,0 @@ -module Pacemaker - # functions related to the resource defaults - # main structure "resource_defaults" - module ResourceDefault - # get resource defaults CIB section - # @return [REXML::Element] - def cib_section_resource_defaults - REXML::XPath.match(cib, '/cib/configuration/rsc_defaults/meta_attributes').first - end - - # the main 'resource_defaults' structure - # contains defaults operations and their values - # @return [Hash] - def resource_defaults - return @resource_defaults_structure if @resource_defaults_structure - @resource_defaults_structure = children_elements_to_hash cib_section_resource_defaults, 'name' - @resource_defaults_structure = {} unless @resource_defaults_structure - @resource_defaults_structure - end - - # extract a single resource default attribute value - # returns nil if it have not been set - # @param attribute_name [String] - # @return [String, nil] - def resource_default_value(attribute_name) - return unless resource_default_defined? attribute_name - resource_defaults[attribute_name]['value'] - end - - # set a single resource default value - # @param attribute_name [String] - # @param attribute_value [String] - def resource_default_set(attribute_name, attribute_value) - options = ['--quiet', '--type', 'rsc_defaults', '--attr-name', attribute_name] - options += ['--attr-value', attribute_value] - retry_block { crm_attribute_safe options } - end - - # remove a defined resource default attribute - # @param attribute_name [String] - def resource_default_delete(attribute_name) - options = ['--quiet', '--type', 'rsc_defaults', '--attr-name', attribute_name] - options += ['--delete-attr'] - retry_block { crm_attribute_safe options } - end - - # check if this resource default attribute have been defined - # @param attribute_name [String] - # @return [true,false] - def resource_default_defined?(attribute_name) - return false unless resource_defaults.key? attribute_name - return false unless resource_defaults[attribute_name].is_a?(Hash) && resource_defaults[attribute_name]['value'] - true - end - end -end diff --git a/lib/pacemaker/xml/status.rb b/lib/pacemaker/xml/status.rb deleted file mode 100644 index 7efc42a7..00000000 --- a/lib/pacemaker/xml/status.rb +++ /dev/null @@ -1,277 +0,0 @@ -module Pacemaker - # functions related to the primitive and node status - # main structure "node_status" - module Status - # get lrm_rsc_ops section from lrm_resource section CIB section - # @param lrm_resource [REXML::Element] - # at /cib/status/node_state/lrm[@id="node-name"]/lrm_resources/lrm_resource[@id="resource-name"]/lrm_rsc_op - # @return [REXML::Element] - def cib_section_lrm_rsc_ops(lrm_resource) - return unless lrm_resource.is_a? REXML::Element - REXML::XPath.match lrm_resource, 'lrm_rsc_op' - end - - # get node_state CIB section - # @return [REXML::Element] at /cib/status/node_state - def cib_section_node_state - REXML::XPath.match cib, '//node_state' - end - - # get lrm_rsc_ops section from lrm_resource section CIB section - # @param lrm [REXML::Element] - # at /cib/status/node_state/lrm[@id="node-name"]/lrm_resources/lrm_resource - # @return [REXML::Element] - def cib_section_lrm_resources(lrm) - return unless lrm.is_a? REXML::Element - REXML::XPath.match lrm, 'lrm_resources/lrm_resource' - end - - # determine the status of a single operation - # @param op [Hash String>] - # @return ['start','stop','master',nil] - def operation_status(op) - # skip pendings ops - # we should waqit until status becomes known - return if op['op-status'] == '-1' - - if op['operation'] == 'monitor' - # for monitor operation status is determined by its rc-code - # 0 - start, 8 - master, 7 - stop, else - error - case op['rc-code'] - when '0' - 'start' - when '7' - 'stop' - when '8' - 'master' - else - # not entirely correct but count failed monitor as 'stop' - 'stop' - end - elsif %w(start stop promote demote).include? op['operation'] - # if the operation was not successful the status is unknown - # it will be determined by the next monitor - # if Pacemaker is unable to bring the resource to a known state - # it can use STONITH on this node if it's configured - return unless op['rc-code'] == '0' - # for a successful start/stop/promote/demote operations - # we use use master instead of promote and start instead of demote - if op['operation'] == 'promote' - 'master' - elsif op['operation'] == 'demote' - 'start' - else - op['operation'] - end - end - end - - # determine resource status by parsing its operations - # it goes from the first operation to the last updating - # status if it's defined in the end there will be the - # actual status of this primitive - # @param ops [Array] - # @return ['start','stop','master',nil] - # nil means that the status is unknown - def determine_primitive_status(ops) - status = nil - ops.each do |op| - op_status = operation_status op - status = op_status if op_status - end - status - end - - # decode lrm_resources section of CIB - # @param lrm_resources [REXML::Element] - # @param [String] node_name - # @return [Hash Hash>] - def decode_lrm_resources(lrm_resources, node_name=nil) - resources = {} - lrm_resources.each do |lrm_resource| - resource = attributes_to_hash lrm_resource - id = resource['id'] - next unless id - lrm_rsc_ops = cib_section_lrm_rsc_ops lrm_resource - next unless lrm_rsc_ops - ops = decode_lrm_rsc_ops lrm_rsc_ops - resource.store 'ops', ops - resource.store 'status', determine_primitive_status(ops) - resource.store 'failed', failed_operations_found?(ops) - debug resource_operations_report ops, resource, node_name if pacemaker_options[:debug_show_operations] - resources.store id, resource - end - resources - end - - # decode lrm_rsc_ops section of the resource's CIB - # @param lrm_rsc_ops [REXML::Element] - # @return [Array] - def decode_lrm_rsc_ops(lrm_rsc_ops) - ops = [] - lrm_rsc_ops.each do |lrm_rsc_op| - op = attributes_to_hash lrm_rsc_op - next unless op['call-id'] - ops << op - end - ops.sort { |a, b| a['call-id'].to_i <=> b['call-id'].to_i } - end - - # get nodes_status structure with resources and their statuses - # @return [Hash Hash>] - def node_status - return @node_status_structure if @node_status_structure - @node_status_structure = {} - cib_section_node_state.each do |node_state| - node = attributes_to_hash node_state - node_name = node['uname'] - next unless node_name - lrm = node_state.elements['lrm'] - next unless lrm - lrm_resources = cib_section_lrm_resources lrm - next unless lrm_resources - resources = decode_lrm_resources lrm_resources, node_name - node.store 'primitives', resources - @node_status_structure.store node_name, node - end - @node_status_structure - end - - # check if operations have same failed operations - # that should be cleaned up later - # @param ops [Array] - # @return [TrueClass,FalseClass] - def failed_operations_found?(ops) - ops.each do |op| - # skip pending ops - next if op['op-status'] == '-1' - - # skip useless ops - next unless %w(start stop monitor promote).include? op['operation'] - - # are there failed start, stop - if %w(start stop promote).include? op['operation'] - return true if op['rc-code'] != '0' - end - - # are there failed monitors - if op['operation'] == 'monitor' - return true unless %w(0 7 8).include? op['rc-code'] - end - end - false - end - - # get a status of a primitive on the entire cluster - # of on a node if node name param given - # @param primitive [String] - # @param node [String] - # @return [String] - def primitive_status(primitive, node = nil) - if node - node_status - .fetch(node, {}) - .fetch('primitives', {}) - .fetch(primitive, {}) - .fetch('status', nil) - else - statuses = [] - node_status.each do |_node_name, node_status| - status = node_status.fetch('primitives', {}) - .fetch(primitive, {}) - .fetch('status', nil) - statuses << status - end - status_values = { - 'stop' => 0, - 'start' => 1, - 'master' => 2, - } - statuses.max_by do |status| - return nil unless status - status_values[status] - end - end - end - - # does this primitive have failed operations? - # @param primitive [String] primitive name - # @param node [String] on this node if given - # @return [TrueClass,FalseClass] - def primitive_has_failures?(primitive, node = nil) - return unless primitive_exists? primitive - if node - node_status - .fetch(node, {}) - .fetch('primitives', {}) - .fetch(primitive, {}) - .fetch('failed', nil) - else - node_status.each do |_k, v| - failed = v.fetch('primitives', {}) - .fetch(primitive, {}) - .fetch('failed', nil) - return true if failed - end - false - end - end - - # determine if a primitive is running on the entire cluster - # of on a node if node name param given - # @param primitive [String] primitive id - # @param node [String] on this node if given - # @return [TrueClass,FalseClass] - def primitive_is_running?(primitive, node = nil) - return unless primitive_exists? primitive - status = primitive_status primitive, node - return status unless status - %w(start master).include? status - end - - # check if primitive is running as a master - # either anywhere or on the give node - # @param primitive [String] primitive id - # @param node [String] on this node if given - # @return [TrueClass,FalseClass] - def primitive_has_master_running?(primitive, node = nil) - is_master = primitive_is_master? primitive - return is_master unless is_master - status = primitive_status primitive, node - return status unless status - status == 'master' - end - - # generate the report of primitive statuses by node - # @return [Hash] - def primitives_status_by_node - report = {} - return unless node_status.is_a? Hash - node_status.each do |node_name, node_data| - primitives_of_node = node_data['primitives'] - next unless primitives_of_node.is_a? Hash - primitives_of_node.each do |primitive, primitive_data| - primitive_status = primitive_data['status'] - report[primitive] = {} unless report[primitive].is_a? Hash - report[primitive][node_name] = primitive_status - end - end - report - end - - # Get the list on node names where this primitive - # has the specified status. - # @param [String] primitive - # @param [String,Symbol] expected_status (stop/start/master) - # @return [Array] The array of node names where the primitive has this status - def primitive_has_status_on(primitive, expected_status = 'start') - expected_status = expected_status.to_s.downcase - primitive_status_by_node = primitives_status_by_node[primitive] - primitive_status_by_node.inject([]) do |found_nodes, node_and_status| - next found_nodes unless node_and_status.last == expected_status - found_nodes << node_and_status.first - end - end - - end -end diff --git a/lib/pacemaker/xml/xml.rb b/lib/pacemaker/xml/xml.rb deleted file mode 100644 index 9cc37d95..00000000 --- a/lib/pacemaker/xml/xml.rb +++ /dev/null @@ -1,65 +0,0 @@ -module Pacemaker - # functions that are used to generate XML documents and create XML patches - module Xml - # create a new xml document - # @param path [String,Array] create this sequence of path elements - # @param root [REXML::Document] use existing element as a root instead of creating a new one - # @return [REXML::Element] element point to the last path component - # use .root to get the document root - def xml_document(path = [], root = nil) - root = REXML::Document.new unless root - element = root - path = Array(path) unless path.is_a? Array - path.each do |component| - element = element.add_element component - end - element - end - - # convert hash to xml element - # @param tag [String] what xml tag to create - # @param hash [Hash] attributes data structure - # @param skip_attributes [String,Array] skip these hash keys - # @return [REXML::Element] - def xml_element(tag, hash, skip_attributes = nil) - return unless hash.is_a? Hash - element = REXML::Element.new tag.to_s - hash.each do |attribute, value| - attribute = attribute.to_s - # skip attributes that were specified to be skipped - next if skip_attributes == attribute || - (skip_attributes.respond_to?(:include?) && skip_attributes.include?(attribute)) - # skip array and hash values. add only scalar ones - next if value.is_a?(Array) || value.is_a?(Hash) - element.add_attribute attribute, value - end - element - end - - # output xml element as the actual xml text with indentation - # @param element [REXML::Element] - # @return [String] - def xml_pretty_format(element) - return unless element.is_a? REXML::Element - formatter = REXML::Formatters::Pretty.new - formatter.compact = true - xml = '' - formatter.write element, xml - xml + "\n" - end - end -end - -# external REXML module patching -module REXML - # make REXML's attributes to be sorted by their name - # when iterating through them instead of randomly placing them each time - # it's required to generate stable XML texts for unit testing - class Attributes - def each_value # :yields: attribute - keys.sort.each do |key| - yield fetch key - end - end - end -end diff --git a/lib/puppet/parser/functions/pacemaker_cluster_nodes.rb b/lib/puppet/parser/functions/pacemaker_cluster_nodes.rb deleted file mode 100644 index b1b2f0f3..00000000 --- a/lib/puppet/parser/functions/pacemaker_cluster_nodes.rb +++ /dev/null @@ -1,191 +0,0 @@ -module Puppet::Parser::Functions - newfunction( - :pacemaker_cluster_nodes, - type: :rvalue, - arity: -2, - doc: <<-eof -Convert different forms of node list to the required form. - -Input data can be: -* String 'n1 n2a,n2b n3' -* Array ['n1', ['n2a','n2b'], 'n3'] -* Hash { 'name' => 'n1', 'ring0' => '192.168.0.1' } - -Hash can have optional keys: name, id, ip, votes, - -Node id will be autogenerated unless id key is provided. - -Internal nodes structure example: - ``` - [ - { - name: 'node1', - id: '1', - votes: '2', - ring0: '192.168.0.1', - ring1: '172.16.0.1', - }, - { - name: 'node2', - id: '2', - votes: '1', - ring0: '192.168.0.2', - ring1: '172.16.0.2', - }, - { - name: 'node3', - id: '3', - votes: '1', - ring0: '192.168.0.3', - ring1: '172.16.0.3', - } - ] - # All fields except at least one ring address are optional - ``` - -If neither 'ring0' nor 'ring1' are found fields 'ip' and 'name' will be used. - -Output forms: -* hash - output the hash of the node ids and their data (used for corosync config template) -* list - output the space ad comma separated nodes and interfaces (used for "pcs cluster setup") -* array - output the plain array of nodes (used for pacemaker_auth or "pcs cluster auth") - eof - ) do |args| - nodes = args[0] - form = args[1] || 'hash' - form = form.to_s.downcase - raise(Puppet::Error, 'Nodes are not provided!') if [String, Hash, Array].include? nodes.class and nodes.empty? - forms = %w(list array hash) - raise(Puppet::Error, "Unknown form: '#{form}'") unless forms.include? form - - array_formatter = lambda do |structure| - list = [] - structure.each do |node| - next unless node.is_a? Hash - list << node['ring0'] if node['ring0'] - list << node['ring1'] if node['ring1'] - end - list.flatten.compact.uniq - end - - list_formatter = lambda do |structure| - list = [] - structure.each do |node| - node_rings = [] - node_rings[0] = node['ring0'] if node['ring0'] - node_rings[1] = node['ring1'] if node['ring1'] - list << node_rings.join(',') - end - list.join ' ' - end - - hash_formatter = lambda do |structure| - hash = {} - structure.each do |node| - id = node['id'] - next unless id - hash[id] = node - end - hash - end - - node_split_to_rings = lambda do |node| - node = node.to_s.chomp.strip - rings = node.split ',' - node_hash = {} - node_hash['ring0'] = rings[0].to_s if rings[0] and rings[0] != '' - node_hash['ring1'] = rings[1].to_s if rings[1] and rings[1] != '' - node_hash - end - - node_hash_process = lambda do |node| - ring0 = node['ring0'] - ring1 = node['ring1'] - ring0 = node['ip'] if node['ip'] and not ring0 - ring0 = node['name'] if node['name'] and not ring0 - node_hash = {} - node_hash['ring0'] = ring0.to_s if ring0 - node_hash['ring1'] = ring1.to_s if ring1 - node_hash['name'] = node['name'].to_s if node['name'] - node_hash['id'] = node['id'].to_s if node['id'] - node_hash['vote'] = node['vote'].to_s if node['vote'] - node_hash - end - - string_parser = lambda do |string| - string = string.to_s.chomp.strip - node_list = [] - string.split.each do |node| - node_hash = node_split_to_rings.call node - next unless node_hash['ring0'] or node_hash['ring1'] - node_list << node_hash - end - node_list - end - - array_parser = lambda do |array| - array = [array] unless array.is_a? Array - node_list = [] - array.each do |node| - if node.is_a? Array - node_hash = {} - node_hash['ring0'] = node[0].to_s if node[0] - node_hash['ring1'] = node[1].to_s if node[1] - elsif node.is_a? Hash - node_hash = node_hash_process.call node - else - node_hash = node_split_to_rings.call node.to_s - end - next unless node_hash['ring0'] or node_hash['ring1'] - node_list << node_hash - end - node_list - end - - hash_parser = lambda do |hash| - raise(Puppet::Error, "Data is not a hash: #{hash.inspect}") unless hash.is_a? Hash - node_list = [] - hash.each do |node_name, node| - node = node.dup - node['name'] = node_name if node_name and not node['name'] - node_hash = node_hash_process.call node - next unless node_hash['ring0'] or node_hash['ring1'] - node_list << node_hash - end - node_list - end - - set_node_ids = lambda do |structure| - next_id = 1 - structure.each do |node| - unless node['id'] - node['id'] = next_id.to_s - next_id += 1 - end - end - end - - structure = [] - - if nodes.is_a? String - structure = string_parser.call nodes - elsif nodes.is_a? Array - structure = array_parser.call nodes - elsif nodes.is_a? Hash - structure = hash_parser.call nodes - else - raise(Puppet::Error, "Got unsupported nodes input data: #{nodes.inspect}") - end - - set_node_ids.call structure - - if form == 'hash' - hash_formatter.call structure - elsif form == 'list' - list_formatter.call structure - elsif form == 'array' - array_formatter.call structure - end - - end -end diff --git a/lib/puppet/parser/functions/pacemaker_cluster_options.rb b/lib/puppet/parser/functions/pacemaker_cluster_options.rb deleted file mode 100644 index e545b320..00000000 --- a/lib/puppet/parser/functions/pacemaker_cluster_options.rb +++ /dev/null @@ -1,28 +0,0 @@ -module Puppet::Parser::Functions - newfunction( - :pacemaker_cluster_options, - type: :rvalue, - arity: 1, - doc: <<-eof -Convert the cluster options to the "pcs cluster create" CLI options string -eof - ) do |args| - options = args[0] - break '' unless options - break options if options.is_a? String - if options.is_a? Hash - options_array = [] - options.each do |option, value| - option = "--#{option}" unless option.start_with? '--' - if value.is_a? TrueClass or value.is_a? FalseClass - options_array << option if value - else - options_array << option - options_array << value - end - end - options = options_array - end - [options].flatten.join ' ' - end -end diff --git a/lib/puppet/parser/functions/pacemaker_resource_parameters.rb b/lib/puppet/parser/functions/pacemaker_resource_parameters.rb deleted file mode 100644 index e1125182..00000000 --- a/lib/puppet/parser/functions/pacemaker_resource_parameters.rb +++ /dev/null @@ -1,26 +0,0 @@ -module Puppet::Parser::Functions - newfunction( - :pacemaker_resource_parameters, - type: :rvalue, - arity: -1, - doc: <<-eof -Gather resource parameters and their values - eof - ) do |args| - parameters = {} - args.flatten.each_slice(2) do |key, value| - if value.nil? and key.is_a? Hash - parameters.merge! key - else - next if key.nil? - next if key == '' - next if value.nil? - next if value == '' - next if value == :undef - key = key.to_s - parameters.store key, value - end - end - parameters - end -end diff --git a/lib/puppet/parser/functions/pcmk_cluster_setup.rb b/lib/puppet/parser/functions/pcmk_cluster_setup.rb deleted file mode 100644 index 677d1f7e..00000000 --- a/lib/puppet/parser/functions/pcmk_cluster_setup.rb +++ /dev/null @@ -1,62 +0,0 @@ -require_relative '../../provider/pcmk_common' - -module Puppet::Parser::Functions - newfunction( - :pcmk_cluster_setup, - type: :rvalue, - arity: -1, - doc: <<-eof -Input data cluster_members string separated by a space: -* String A space-separated string containing a list of node names -* String A list containing either a single string (single ip) or a list of strings - (multiple ipaddresses) associated to each cluster node -* String the version of pcs used - -Output forms: -* string - Output A string to be used in the cluster setup call to pcs - eof - ) do |args| - nodes = args[0] - addr_list = args[1] - pcs_version = args[2] - fail "pcmk_cluster_setup: Got unsupported nodes input data: #{nodes.inspect}" if not nodes.is_a? String - fail "pcmk_cluster_setup: Got unsupported addr_list input data: #{addr_list.inspect}" if not addr_list.is_a? Array - fail "pcmk_cluster_setup: Got unsupported version input data: #{pcs_version.inspect}" if not pcs_version.is_a? String - node_list = nodes.split() - fail "pcmk_cluster_setup: node list and addr list should be of the same size when defined and not empty" if addr_list.size > 0 and addr_list.size != node_list.size - # pcs 0.10 supports knet clusters which require addresses to be specified - if pcs_version =~ /0.10/ - # If the addr_list was specified we need to return a string in the form of - # node1 addr=1.2.3.4 node2 addr=1.2.3.5 addr=1.2.3.6 node3 addr=1.2.3.7 - if addr_list.size > 0 - ret = '' - node_list.zip(addr_list).each do |node, ip| - # addr can be '1.2.3.4' or ['1.2.3.4', '1.2.3.5'] or - if ip.is_a? String - addr = "addr=#{ip}" - elsif ip.is_a? Array - addr = '' - ip.each do |i| - addr += "addr=#{i}" - addr += " " if not i.equal?(ip.last) - end - else - fail "pcmk_cluster_setup: One of the addresses in addr_list is neither a String nor an Array" - end - ret += "#{node} #{addr}" - ret += " " if not node.equal?(node_list.last) - end - # only node_list is specified so we just return the original string - else - ret = nodes.strip() - end - ret - elsif pcs_version =~ /0.9/ - # With pcs 0.9 only non-knet clusters are supported, aka only one address can be used - # so we take the node name as we always did - nodes.strip() - else - fail("pcmk_cluster_setup: pcs #{pcs_version} is unsupported") - end - end -end diff --git a/lib/puppet/parser/functions/pcmk_nodes_added.rb b/lib/puppet/parser/functions/pcmk_nodes_added.rb deleted file mode 100644 index 5bd22478..00000000 --- a/lib/puppet/parser/functions/pcmk_nodes_added.rb +++ /dev/null @@ -1,101 +0,0 @@ -module Puppet::Parser::Functions - newfunction( - :pcmk_nodes_added, - type: :rvalue, - arity: -1, - doc: <<-eof -Input data cluster_members string separated by a space: -* String A space-separated string containing a list of node names -* String A list containing either a single string (single ip) or a list of strings - (multiple ipaddresses) associated to each cluster node -* String the version of pcs used -* Output of `crm_node -l` (only used to ease unit testing) (optional) - -Output forms: -* array - output the plain array of nodes that have been added compared - to the running cluster. It returns an empty array in case the - cluster is not set up or if crm_node return an error - eof - ) do |args| - # no point in doing this if the crm_node executable does not exist - return [] if Facter::Util::Resolution.which('crm_node') == nil - nodes = args[0] - addr_list = args[1] - pcs_version = args[2] - crm_node_list = args[3] - unless nodes.is_a? String - fail "Got unsupported nodes input data: #{nodes.inspect}" - end - unless addr_list.is_a? Array - fail "Got unsupported addr_list input data: #{addr_list.inspect}" - end - if crm_node_list && !crm_node_list.kind_of?(String) then - fail "Got unsupported crm_node_list #{crm_node_list.inspect}" - end - node_list = nodes.split() - fail "pcmk_cluster_setup: node list and addr list should be of the same size when defined and not empty" if addr_list.size > 0 and addr_list.size != node_list.size - - if crm_node_list && crm_node_list.kind_of?(String) then - return [] if crm_node_list.empty? - crm_nodes_output = crm_node_list - else - # A typical crm_node -l output is like the following: - # [root@foobar-0 ~]# crm_node -l - # 3 foobar-2 member - # 1 foobar-0 member - # 2 foobar-1 lost - crm_nodes_output = `crm_node -l` - # if the command fails we certainly did not add any nodes - return [] if $?.exitstatus != 0 - end - Puppet.debug("pcmk_nodes_added: crm_nodes_output #{crm_nodes_output}") - - crm_nodes = [] - crm_nodes_output.lines.each { |line| - (id, node, state, _) = line.split(" ").collect(&:strip) - valid_states = %w(member lost) - state.downcase! if state - crm_nodes.push(node.strip) if valid_states.include? state - } - nodes_added = node_list - crm_nodes - - if pcs_version =~ /0.10/ - # If the addr_list was specified we need to return a list in the form of - # ['node1 addr=1.2.3.4', 'node2 addr=1.2.3.5 addr=1.2.3.6', 'node3 addr=1.2.3.7'] - if addr_list.size > 0 - ret = [] - nodes_addrs_added = node_list.zip(addr_list) - .select { |node_addr| nodes_added.include?(node_addr[0]) } - nodes_addrs_added.each do |node_addr| - node = node_addr[0] - ip = node_addr[1] - # addr can be '1.2.3.4' or ['1.2.3.4', '1.2.3.5'] or - if ip.is_a? String - addr = "addr=#{ip}" - elsif ip.is_a? Array - addr = '' - ip.each do |i| - addr += "addr=#{i}" - addr += " " if not i.equal?(ip.last) - end - else - fail "pcmk_nodes_added: One of the addresses in addr_list is neither a String nor an Array" - end - ret << "#{node} #{addr}" - end - # only node_added is specified so we just return the original string - else - ret = nodes_added - end - elsif pcs_version =~ /0.9/ - # With pcs 0.9 only non-knet clusters are supported, aka only one address can be used - # so we take the node name as we always did - ret = nodes_added - else - fail("pcmk_nodes_added: pcs #{pcs_version} is unsupported") - end - - Puppet.debug("pcmk_nodes_added: #{ret} [#{node_list} - #{crm_nodes}]") - ret - end -end diff --git a/lib/puppet/provider/pacemaker_colocation/pacemaker_noop.rb b/lib/puppet/provider/pacemaker_colocation/pacemaker_noop.rb deleted file mode 100644 index ba5be717..00000000 --- a/lib/puppet/provider/pacemaker_colocation/pacemaker_noop.rb +++ /dev/null @@ -1,6 +0,0 @@ -require_relative '../pacemaker_noop' - -Puppet::Type.type(:pacemaker_colocation).provide(:noop, parent: Puppet::Provider::PacemakerNoop) do - # disable this provider - confine(true: false) -end diff --git a/lib/puppet/provider/pacemaker_colocation/pacemaker_xml.rb b/lib/puppet/provider/pacemaker_colocation/pacemaker_xml.rb deleted file mode 100644 index 88333b90..00000000 --- a/lib/puppet/provider/pacemaker_colocation/pacemaker_xml.rb +++ /dev/null @@ -1,170 +0,0 @@ -require_relative '../pacemaker_xml' - -Puppet::Type.type(:pacemaker_colocation).provide(:xml, parent: Puppet::Provider::PacemakerXML) do - desc 'Specific provider for a rather specific type since I currently have no plan to - abstract corosync/pacemaker vs. keepalived. This provider will check the state - of current primitive colocations on the system; add, delete, or adjust various - aspects.' - - commands cibadmin: 'cibadmin' - commands crm_attribute: 'crm_attribute' - commands crm_node: 'crm_node' - commands crm_resource: 'crm_resource' - commands crm_attribute: 'crm_attribute' - - attr_accessor :property_hash - attr_accessor :resource - - def self.instances - debug 'Call: self.instances' - proxy_instance = new - instances = [] - proxy_instance.constraint_colocations.map do |title, data| - parameters = {} - debug "Prefetch constraint_colocation: #{title}" - proxy_instance.retrieve_data data, parameters - instance = new(parameters) - instance.cib = proxy_instance.cib - instances << instance - end - instances - end - - def self.prefetch(catalog_instances) - debug 'Call: self.prefetch' - return unless pacemaker_options[:prefetch] - discovered_instances = instances - discovered_instances.each do |instance| - next unless catalog_instances.key? instance.name - catalog_instances[instance.name].provider = instance - end - end - - # retrieve data from library to the target_structure - # @param data [Hash] extracted colocation data - # will extract the current colocation data unless a value is provided - # @param target_structure [Hash] copy data to this structure - # defaults to the property_hash of this provider - def retrieve_data(data = nil, target_structure = property_hash) - data = constraint_colocations.fetch resource[:name], {} unless data - target_structure[:name] = data['id'] if data['id'] - target_structure[:ensure] = :present - target_structure[:first] = data['with-rsc'] if data['with-rsc'] - target_structure[:first] += ":#{data['with-rsc-role']}" if data['with-rsc-role'] - target_structure[:second] = data['rsc'] if data['rsc'] - target_structure[:second] += ":#{data['rsc-role']}" if data['rsc-role'] - target_structure[:score] = data['score'] if data['score'] - end - - def exists? - debug 'Call: exists?' - out = constraint_colocation_exists? resource[:name] - retrieve_data - debug "Return: #{out}" - out - end - - # check if the colocation ensure is set to present - # @return [TrueClass,FalseClass] - def present? - property_hash[:ensure] == :present - end - - # Create just adds our resource to the property_hash and flush will take care - # of actually doing the work. - def create - debug 'Call: create' - self.property_hash = { - name: resource[:name], - ensure: :absent, - first: resource[:first], - second: resource[:second], - score: resource[:score], - } - end - - # Unlike create we actually immediately delete the item. - def destroy - debug 'Call: destroy' - constraint_colocation_remove resource[:name] - property_hash.clear - cluster_debug_report "#{resource} destroy" - end - - # Getter that obtains the our score that should have been populated by - # prefetch or instances (depends on if your using puppet resource or not). - def score - property_hash[:score] - end - - # Getters that obtains the first and second primitives and score in our - # ordering definition that have been populated by prefetch or instances - # (depends on if your using puppet resource or not). - def first - property_hash[:first] - end - - def second - property_hash[:second] - end - - # Our setters for the first and second primitives and score. Setters are - # used when the resource already exists so we just update the current value - # in the property hash and doing this marks it to be flushed. - def first=(should) - property_hash[:first] = should - end - - def second=(should) - property_hash[:second] = should - end - - def score=(should) - property_hash[:score] = should - end - - # Flush is triggered on anything that has been detected as being - # modified in the property_hash. It generates a temporary file with - # the updates that need to be made. The temporary file is then used - # as stdin for the crm command. - def flush - debug 'Call: flush' - return unless property_hash && property_hash.any? - - unless property_hash[:name] && property_hash[:score] && property_hash[:first] && property_hash[:second] - raise 'Data does not contain all the required fields!' - end - - unless primitive_exists? primitive_base_name property_hash[:first] - raise "Primitive '#{property_hash[:first]}' does not exist!" - end - - unless primitive_exists? primitive_base_name property_hash[:second] - raise "Primitive '#{property_hash[:second]}' does not exist!" - end - - colocation_structure = {} - colocation_structure['id'] = property_hash[:name] - colocation_structure['score'] = property_hash[:score] - - first_element_array = property_hash[:first].split ':' - second_element_array = property_hash[:second].split ':' - - colocation_structure['rsc'] = second_element_array[0] - colocation_structure['rsc-role'] = second_element_array[1] if second_element_array[1] - colocation_structure['with-rsc'] = first_element_array[0] - colocation_structure['with-rsc-role'] = first_element_array[1] if first_element_array[1] - - colocation_patch = xml_document - colocation_element = xml_rsc_colocation colocation_structure - raise "Could not create XML patch for '#{resource}'" unless colocation_element - colocation_patch.add_element colocation_element - - if present? - wait_for_constraint_update xml_pretty_format(colocation_patch.root), colocation_structure['id'] - else - wait_for_constraint_create xml_pretty_format(colocation_patch.root), colocation_structure['id'] - end - cluster_debug_report "#{resource} flush" - end -end diff --git a/lib/puppet/provider/pacemaker_location/pacemaker_noop.rb b/lib/puppet/provider/pacemaker_location/pacemaker_noop.rb deleted file mode 100644 index a4da3c29..00000000 --- a/lib/puppet/provider/pacemaker_location/pacemaker_noop.rb +++ /dev/null @@ -1,6 +0,0 @@ -require_relative '../pacemaker_noop' - -Puppet::Type.type(:pacemaker_location).provide(:noop, parent: Puppet::Provider::PacemakerNoop) do - # disable this provider - confine(true: false) -end diff --git a/lib/puppet/provider/pacemaker_location/pacemaker_xml.rb b/lib/puppet/provider/pacemaker_location/pacemaker_xml.rb deleted file mode 100644 index c391d04a..00000000 --- a/lib/puppet/provider/pacemaker_location/pacemaker_xml.rb +++ /dev/null @@ -1,168 +0,0 @@ -require_relative '../pacemaker_xml' - -Puppet::Type.type(:pacemaker_location).provide(:xml, parent: Puppet::Provider::PacemakerXML) do - desc 'Specific provider for a rather specific type since I currently have no plan to - abstract corosync/pacemaker vs. keepalived. This provider will check the state - of current primitive colocations on the system; add, delete, or adjust various aspects.' - - commands cibadmin: 'cibadmin' - commands crm_attribute: 'crm_attribute' - commands crm_node: 'crm_node' - commands crm_resource: 'crm_resource' - commands crm_attribute: 'crm_attribute' - - attr_accessor :property_hash - attr_accessor :resource - - def self.instances - debug 'Call: self.instances' - proxy_instance = new - instances = [] - proxy_instance.constraint_locations.map do |title, data| - parameters = {} - debug "Prefetch constraint_location: #{title}" - proxy_instance.retrieve_data data, parameters - instance = new(parameters) - instance.cib = proxy_instance.cib - instances << instance - end - instances - end - - def self.prefetch(catalog_instances) - debug 'Call: self.prefetch' - return unless pacemaker_options[:prefetch] - discovered_instances = instances - discovered_instances.each do |instance| - next unless catalog_instances.key? instance.name - catalog_instances[instance.name].provider = instance - end - end - - # retrieve data from library to the target_structure - # @param data [Hash] extracted location data - # will extract the current location data unless a value is provided - # @param target_structure [Hash] copy data to this structure - # defaults to the property_hash of this provider - def retrieve_data(data = nil, target_structure = property_hash) - debug 'Call: retrieve_data' - data = constraint_locations.fetch resource[:name], {} unless data - target_structure[:ensure] = :present - target_structure[:name] = data['id'] if data['id'] - target_structure[:primitive] = data['rsc'] if data['rsc'] - target_structure[:node] = data['node'] if data['node'] - target_structure[:score] = data['score'] if data['score'] - target_structure[:rules] = data['rules'] if data['rules'] - end - - def exists? - debug 'Call: exists?' - out = constraint_location_exists? resource[:name] - retrieve_data - debug "Return: #{out}" - out - end - - # check if the location ensure is set to present - # @return [TrueClass,FalseClass] - def present? - property_hash[:ensure] == :present - end - - # Create just adds our resource to the property_hash and flush will take care - # of actually doing the work. - def create - debug 'Call: create' - self.property_hash = { - name: resource[:name], - ensure: :absent, - primitive: resource[:primitive], - node: resource[:node], - score: resource[:score], - rules: resource[:rules], - } - end - - # Unlike create we actually immediately delete the item. - def destroy - debug 'Call: destroy' - constraint_location_remove resource[:name] - property_hash.clear - cluster_debug_report "#{resource} destroy" - end - - # Getter that obtains the primitives array for us that should have - # been populated by prefetch or instances (depends on if your using - # puppet resource or not). - def primitive - property_hash[:primitive] - end - - def score - property_hash[:score] - end - - def rules - property_hash[:rules] - end - - def node - property_hash[:node] - end - - # Our setters for the primitives array and score. Setters are used when the - # resource already exists so we just update the current value in the property - # hash and doing this marks it to be flushed. - def rules=(should) - property_hash[:rules] = should - end - - def primitives=(should) - property_hash[:primitive] = should - end - - def score=(should) - property_hash[:score] = should - end - - def node=(should) - property_hash[:node] = should - end - - # Flush is triggered on anything that has been detected as being - # modified in the property_hash. It generates a temporary file with - # the updates that need to be made. The temporary file is then used - # as stdin for the crm command. - def flush - debug 'Call: flush' - return unless property_hash && property_hash.any? - - unless primitive_exists? primitive_base_name property_hash[:primitive] - raise "Primitive '#{property_hash[:primitive]}' does not exist!" - end - - unless property_hash[:name] && property_hash[:primitive] && - (property_hash[:rules] || (property_hash[:score] && property_hash[:node])) - raise 'Data does not contain all the required fields!' - end - - location_structure = {} - location_structure['id'] = property_hash[:name] - location_structure['rsc'] = property_hash[:primitive] - location_structure['score'] = property_hash[:score] if property_hash[:score] - location_structure['node'] = property_hash[:node] if property_hash[:node] - location_structure['rules'] = property_hash[:rules] if property_hash[:rules] - - location_patch = xml_document - location_element = xml_rsc_location location_structure - raise "Could not create XML patch for '#{resource}'" unless location_element - location_patch.add_element location_element - - if present? - wait_for_constraint_update xml_pretty_format(location_patch.root), location_structure['id'] - else - wait_for_constraint_create xml_pretty_format(location_patch.root), location_structure['id'] - end - cluster_debug_report "#{resource} flush" - end -end diff --git a/lib/puppet/provider/pacemaker_nodes/pacemaker_noop.rb b/lib/puppet/provider/pacemaker_nodes/pacemaker_noop.rb deleted file mode 100644 index 3859473d..00000000 --- a/lib/puppet/provider/pacemaker_nodes/pacemaker_noop.rb +++ /dev/null @@ -1,6 +0,0 @@ -require_relative '../pacemaker_noop' - -Puppet::Type.type(:pacemaker_nodes).provide(:noop, parent: Puppet::Provider::PacemakerNoop) do - # disable this provider - confine(true: false) -end diff --git a/lib/puppet/provider/pacemaker_nodes/pacemaker_xml.rb b/lib/puppet/provider/pacemaker_nodes/pacemaker_xml.rb deleted file mode 100644 index 6e4b885c..00000000 --- a/lib/puppet/provider/pacemaker_nodes/pacemaker_xml.rb +++ /dev/null @@ -1,130 +0,0 @@ -require_relative '../pacemaker_xml' - -Puppet::Type.type(:pacemaker_nodes).provide(:xml, parent: Puppet::Provider::PacemakerXML) do - commands cmapctl: '/usr/sbin/corosync-cmapctl' - commands cibadmin: '/usr/sbin/cibadmin' - commands crm_node: '/usr/sbin/crm_node' - commands crm_attribute: '/usr/sbin/crm_attribute' - - def cmapctl_nodelist - cmapctl '-b', 'nodelist.node' - end - - def nodes_data - return {} unless @resource[:nodes].is_a? Hash - @resource[:nodes] - end - - # retrieve the current Corosync nodes - # the node number to match the "id" and the ring address lines - # @return - def corosync_nodes_structure - return @corosync_nodes_structure if @corosync_nodes_structure - nodes = {} - cmapctl_nodelist.split("\n").each do |line| - if line =~ /^nodelist\.node\.(\d+)\.nodeid\s+\(u32\)\s+=\s+(\d+)/ - # this is the 'id' line - node_number = Regexp.last_match(1) - node_id = Regexp.last_match(2) - nodes[node_number] = {} unless nodes[node_number] - nodes[node_number]['id'] = node_id - nodes[node_number]['number'] = node_number - end - if line =~ /^nodelist\.node\.(\d+)\.ring(\d+)_addr\s+\(str\)\s+=\s+(\S+)/ - node_number = Regexp.last_match(1) - ring_number = Regexp.last_match(2) - node_ip_addr = Regexp.last_match(3) - nodes[node_number] = {} unless nodes[node_number] - key = "ring#{ring_number}" - nodes[node_number][key] = node_ip_addr - end - end - @corosync_nodes_structure = {} - nodes.values.each do |node| - id = node['id'] - next unless id - @corosync_nodes_structure[id] = node - end - @corosync_nodes_structure - end - - # ids and name of current Pacemaker nodes - # @return - def pacemaker_nodes_structure - @pacemaker_nodes_structure = {} - nodes.each do |name, node| - id = node['id'] - next unless name && id - @pacemaker_nodes_structure.store id, name - end - @pacemaker_nodes_structure - end - - def pacemaker_nodes_reset - @corosync_nodes_structure = nil - @pacemaker_nodes_structure = nil - @resource_nodes_structure = nil - @node_name = nil - end - - def next_corosync_node_number - number = corosync_nodes_structure.inject(0) do |max, node| - number = node.last['number'].to_i - max = number if number > max - max - end - number += 1 - number.to_s - end - - def remove_pacemaker_node_record(node_name) - cibadmin_safe '--delete', '--scope', 'nodes', '--xml-text', "" - end - - def remove_pacemaker_node_state(node_name) - cibadmin_safe '--delete', '--scope', 'status', '--xml-text', "" - end - - def remove_location_constraints(node_name) - cibadmin_safe '--delete', '--scope', 'constraints', '--xml-text', "" - end - - def remove_corosync_node_record(node_number) - cmapctl_safe '-D', "nodelist.node.#{node_number}" - rescue => e - debug "Failed: #{e.message}" - end - - def add_corosync_node_record(node_number, node_id, ring0 = nil, ring1 = nil) - cmapctl_safe '-s', "nodelist.node.#{node_number}.nodeid", 'u32', node_id - cmapctl_safe '-s', "nodelist.node.#{node_number}.ring0_addr", 'str', ring0 if ring0 - cmapctl_safe '-s', "nodelist.node.#{node_number}.ring1_addr", 'str', ring1 if ring1 - end - - def remove_pacemaker_node(node_name) - debug "Remove the pacemaker node: '#{node_name}'" - remove_pacemaker_node_record node_name - remove_pacemaker_node_state node_name - remove_location_constraints node_name - pacemaker_nodes_reset - end - - def remove_corosync_node(node_id) - debug "Remove the corosync node: '#{node_id}'" - node_number = corosync_nodes_structure.fetch(node_id, {}).fetch('number') - raise "Could not get the node_number of the node_id: '#{node_id}'!" unless node_number - remove_corosync_node_record node_number - pacemaker_nodes_reset - end - - def add_corosync_node(node_id) - debug "Add corosync node: '#{node_id}'" - node_number = next_corosync_node_number - raise "Could not find node_id: '#{node_id}' in the resource data!" unless nodes_data[node_id].is_a? Hash - ring0 = nodes_data[node_id]['ring0'] - ring1 = nodes_data[node_id]['ring1'] - add_corosync_node_record node_number, node_id, ring0, ring1 - pacemaker_nodes_reset - end - -end diff --git a/lib/puppet/provider/pacemaker_noop.rb b/lib/puppet/provider/pacemaker_noop.rb deleted file mode 100644 index 0543b3f8..00000000 --- a/lib/puppet/provider/pacemaker_noop.rb +++ /dev/null @@ -1,55 +0,0 @@ -# this is the abstract provider to create "noop" providers for pacemaker types -# if a "noop" provider is used for a resource it will do nothing when applied -# neither the retrieving nor the modification phase -class Puppet::Provider::PacemakerNoop < Puppet::Provider - attr_accessor :property_hash - attr_accessor :resource - - # stub "exists?" method that returns "true" and logs its calls - # @return [true] - def exists? - debug 'Call: exists?' - make_property_methods - out = true - debug "Return: #{out}" - out - end - - # stub "creat" method method cleans the property hash - # should never be actually called because exists? always returns true - def create - debug 'Call: create' - self.property_hash = {} - end - - # stub "destroy" method cleans the property hash - def destroy - debug 'Call: destroy' - self.property_hash = {} - end - - # stub "flush" method does nothing - def flush - debug 'Call: flush' - end - - # this method creates getters and setters for each - # of the resource properties - # works directly with resource parameter values instead of property_hash - def make_property_methods - properties = resource.properties.map(&:name) - properties.each do |property| - next if property == :ensure - self.class.send :define_method, property do - debug "Call: #{property}" - out = resource[property] - debug "Return: #{out.inspect}" - out - end - self.class.send :define_method, "#{property}=" do |value| - debug "Call: #{property}=#{value.inspect}" - resource[property] = value - end - end - end -end diff --git a/lib/puppet/provider/pacemaker_online/pacemaker_noop.rb b/lib/puppet/provider/pacemaker_online/pacemaker_noop.rb deleted file mode 100644 index 8c4287d1..00000000 --- a/lib/puppet/provider/pacemaker_online/pacemaker_noop.rb +++ /dev/null @@ -1,6 +0,0 @@ -require_relative '../pacemaker_noop' - -Puppet::Type.type(:pacemaker_online).provide(:noop, parent: Puppet::Provider::PacemakerNoop) do - # disable this provider - confine(true: false) -end diff --git a/lib/puppet/provider/pacemaker_online/pacemaker_xml.rb b/lib/puppet/provider/pacemaker_online/pacemaker_xml.rb deleted file mode 100644 index 818b115a..00000000 --- a/lib/puppet/provider/pacemaker_online/pacemaker_xml.rb +++ /dev/null @@ -1,25 +0,0 @@ -require_relative '../pacemaker_xml' - -Puppet::Type.type(:pacemaker_online).provide(:xml, parent: Puppet::Provider::PacemakerXML) do - desc 'Use pacemaker library to wait for the cluster to become online before trying to do something with it.' - - commands cibadmin: 'cibadmin' - commands crm_attribute: 'crm_attribute' - - # get the cluster status - # @return [Symbol] - def status - if online? - :online - else - :offline - end - end - - # wait for the cluster to become online - # is status is set to :online - # @param value [Symbol] - def status=(value) - wait_for_online 'pacemaker_online' if value == :online - end -end diff --git a/lib/puppet/provider/pacemaker_operation_default/pacemaker_noop.rb b/lib/puppet/provider/pacemaker_operation_default/pacemaker_noop.rb deleted file mode 100644 index 024005b4..00000000 --- a/lib/puppet/provider/pacemaker_operation_default/pacemaker_noop.rb +++ /dev/null @@ -1,6 +0,0 @@ -require_relative '../pacemaker_noop' - -Puppet::Type.type(:pacemaker_operation_default).provide(:noop, parent: Puppet::Provider::PacemakerNoop) do - # disable this provider - confine(true: false) -end diff --git a/lib/puppet/provider/pacemaker_operation_default/pacemaker_pcs.rb b/lib/puppet/provider/pacemaker_operation_default/pacemaker_pcs.rb deleted file mode 100644 index ef634a97..00000000 --- a/lib/puppet/provider/pacemaker_operation_default/pacemaker_pcs.rb +++ /dev/null @@ -1,51 +0,0 @@ -require_relative '../pacemaker_pcs' - -Puppet::Type.type(:pacemaker_operation_default).provide(:pcs, parent: Puppet::Provider::PacemakerPCS) do - desc 'Manages default values for pacemaker operation options via pcs' - - commands pcs: 'pcs' - - # disable this provider - confine(true: false) - - def self.instances - debug 'Call: self.instances' - proxy_instance = new - instances = [] - proxy_instance.pcs_operation_defaults.map do |title, value| - parameters = {} - debug "Prefetch: #{title}" - parameters[:ensure] = :present - parameters[:value] = value - parameters[:name] = title - instance = new(parameters) - instances << instance - end - instances - end - - def create - debug 'Call: create' - self.value = @resource[:value] - end - - def destroy - debug 'Call: destroy' - pcs_operation_default_delete @resource[:name] - end - - def exists? - debug 'Call: exists?' - pcs_operation_default_defined? @resource[:name] - end - - def value - debug 'Call: value' - pcs_operation_default_value @resource[:name] - end - - def value=(value) - debug "Call: value=#{value}" - pcs_operation_default_set @resource[:name], value - end -end diff --git a/lib/puppet/provider/pacemaker_operation_default/pacemaker_xml.rb b/lib/puppet/provider/pacemaker_operation_default/pacemaker_xml.rb deleted file mode 100644 index 8b436407..00000000 --- a/lib/puppet/provider/pacemaker_operation_default/pacemaker_xml.rb +++ /dev/null @@ -1,77 +0,0 @@ -require_relative '../pacemaker_xml' - -Puppet::Type.type(:pacemaker_operation_default).provide(:xml, parent: Puppet::Provider::PacemakerXML) do - desc 'Specific operation_default for a rather specific type since I currently have no plan to - abstract corosync/pacemaker vs. keepalived. This op_defaults will check the state - of Corosync cluster configuration properties.' - - commands cibadmin: 'cibadmin' - commands crm_attribute: 'crm_attribute' - commands crm_node: 'crm_node' - commands crm_resource: 'crm_resource' - commands crm_attribute: 'crm_attribute' - - attr_accessor :property_hash - attr_accessor :resource - - def self.instances - debug 'Call: self.instances' - wait_for_online 'pacemaker_operation_default' - proxy_instance = new - instances = [] - proxy_instance.operation_defaults.map do |title, data| - parameters = {} - debug "Prefetch: #{title}" - parameters[:ensure] = :present - parameters[:value] = data['value'] - parameters[:name] = title - instance = new(parameters) - instance.cib = proxy_instance.cib - instances << instance - end - instances - end - - def self.prefetch(catalog_instances) - debug 'Call: self.prefetch' - return unless pacemaker_options[:prefetch] - discovered_instances = instances - discovered_instances.each do |instance| - next unless catalog_instances.key? instance.name - catalog_instances[instance.name].provider = instance - end - end - - # @return [true,false] - def exists? - debug 'Call: exists?' - wait_for_online 'pacemaker_operation_default' - return property_hash[:ensure] == :present if property_hash[:ensure] - out = operation_default_defined? resource[:name] - debug "Return: #{out}" - out - end - - def create - debug 'Call: create' - self.value = resource[:value] - end - - def destroy - debug 'Call: destroy' - operation_default_delete resource[:name] - end - - def value - debug 'Call: value' - return property_hash[:value] if property_hash[:value] - out = operation_default_value resource[:name] - debug "Return: #{out}" - out - end - - def value=(should) - debug "Call: value=#{should}" - operation_default_set resource[:name], should - end -end diff --git a/lib/puppet/provider/pacemaker_order/pacemaker_noop.rb b/lib/puppet/provider/pacemaker_order/pacemaker_noop.rb deleted file mode 100644 index a3ba6025..00000000 --- a/lib/puppet/provider/pacemaker_order/pacemaker_noop.rb +++ /dev/null @@ -1,6 +0,0 @@ -require_relative '../pacemaker_noop' - -Puppet::Type.type(:pacemaker_order).provide(:noop, parent: Puppet::Provider::PacemakerNoop) do - # disable this provider - confine(true: false) -end diff --git a/lib/puppet/provider/pacemaker_order/pacemaker_xml.rb b/lib/puppet/provider/pacemaker_order/pacemaker_xml.rb deleted file mode 100644 index 260840f8..00000000 --- a/lib/puppet/provider/pacemaker_order/pacemaker_xml.rb +++ /dev/null @@ -1,229 +0,0 @@ -require_relative '../pacemaker_xml' - -Puppet::Type.type(:pacemaker_order).provide(:xml, parent: Puppet::Provider::PacemakerXML) do - desc <<-eof -Specific provider for a rather specific type since I currently have no plan to -abstract corosync/pacemaker vs. keepalived. This provider will check the state -of current primitive start orders on the system; add, delete, or adjust various -aspects. -eof - - commands cibadmin: 'cibadmin' - commands crm_attribute: 'crm_attribute' - commands crm_node: 'crm_node' - commands crm_resource: 'crm_resource' - commands crm_attribute: 'crm_attribute' - - attr_accessor :property_hash - attr_accessor :resource - - def self.instances - debug 'Call: self.instances' - proxy_instance = new - instances = [] - proxy_instance.constraint_orders.map do |title, data| - parameters = {} - debug "Prefetch constraint_order: #{title}" - proxy_instance.retrieve_data data, parameters - instance = new(parameters) - instance.cib = proxy_instance.cib - instances << instance - end - instances - end - - def self.prefetch(catalog_instances) - debug 'Call: self.prefetch' - return unless pacemaker_options[:prefetch] - discovered_instances = instances - discovered_instances.each do |instance| - next unless catalog_instances.key? instance.name - catalog_instances[instance.name].provider = instance - end - end - - # retrieve data from library to the target_structure - # @param data [Hash] extracted order data - # will extract the current order data unless a value is provided - # @param target_structure [Hash] copy data to this structure - # defaults to the property_hash of this provider - def retrieve_data(data = nil, target_structure = property_hash) - data = constraint_orders.fetch resource[:name], {} unless data - target_structure[:name] = data['id'] if data['id'] - target_structure[:ensure] = :present - target_structure[:first] = data['first'] if data['first'] - target_structure[:second] = data['then'] if data['then'] - target_structure[:first_action] = data['first-action'].downcase if data['first-action'] - target_structure[:second_action] = data['then-action'].downcase if data['then-action'] - target_structure[:score] = data['score'] if data['score'] - target_structure[:kind] = data['kind'].downcase if data['kind'] - target_structure[:symmetrical] = data['symmetrical'].downcase if data['symmetrical'] - target_structure[:require_all] = data['require-all'].downcase if data['require-all'] - end - - def exists? - debug 'Call: exists?' - out = constraint_order_exists? resource[:name] - retrieve_data - debug "Return: #{out}" - out - end - - # check if the order ensure is set to present - # @return [TrueClass,FalseClass] - def present? - property_hash[:ensure] == :present - end - - # Create just adds our resource to the property_hash and flush will take care - # of actually doing the work. - def create - debug 'Call: create' - self.property_hash = { - name: resource[:name], - ensure: :absent, - first: resource[:first], - second: resource[:second], - first_action: resource[:first_action], - second_action: resource[:second_action], - score: resource[:score], - kind: resource[:kind], - symmetrical: resource[:symmetrical], - require_all: resource[:require_all], - } - end - - # Unlike create we actually immediately delete the item. - def destroy - debug 'Call: destroy' - constraint_order_remove resource[:name] - property_hash.clear - cluster_debug_report "#{resource} destroy" - end - - # Getters that obtains the first and second primitives and score in our - # ordering definition that have been populated by prefetch or instances - # (depends on if your using puppet resource or not). - def first - property_hash[:first] - end - - def second - property_hash[:second] - end - - def first_action - if property_hash[:first_action].respond_to? :to_sym - property_hash[:first_action].to_sym - else - property_hash[:first_action] - end - end - - def second_action - if property_hash[:second_action].respond_to? :to_sym - property_hash[:second_action].to_sym - else - property_hash[:second_action] - end - end - - def score - property_hash[:score] - end - - def kind - if property_hash[:kind].respond_to? :to_sym - property_hash[:kind].to_sym - else - property_hash[:kind] - end - end - - def symmetrical - property_hash[:symmetrical] - end - - def require_all - property_hash[:require_all] - end - - # Our setters for the first and second primitives and score. Setters are - # used when the resource already exists so we just update the current value - # in the property hash and doing this marks it to be flushed. - def first=(should) - property_hash[:first] = should - end - - def second=(should) - property_hash[:second] = should - end - - def first_action=(should) - property_hash[:first_action] = should - end - - def second_action=(should) - property_hash[:second_action] = should - end - - def score=(should) - property_hash[:score] = should - end - - def kind=(should) - property_hash[:kind] = should - end - - def symmetrical=(should) - property_hash[:symmetrical] = should - end - - def require_all=(should) - property_hash[:require_all] = should - end - - # Flush is triggered on anything that has been detected as being - # modified in the property_hash. It generates a temporary file with - # the updates that need to be made. The temporary file is then used - # as stdin for the crm command. - def flush - debug 'Call: flush' - return unless property_hash && property_hash.any? - - unless primitive_exists? primitive_base_name property_hash[:first] - raise "Primitive '#{property_hash[:first]}' does not exist!" - end - - unless primitive_exists? primitive_base_name property_hash[:second] - raise "Primitive '#{property_hash[:second]}' does not exist!" - end - - unless property_hash[:name] && property_hash[:first] && property_hash[:second] - raise 'Data does not contain all the required fields!' - end - - order_structure = {} - order_structure['id'] = name - order_structure['first'] = first - order_structure['then'] = second - order_structure['first-action'] = first_action.to_s if first_action - order_structure['then-action'] = second_action.to_s if second_action - order_structure['score'] = score.to_s if score - order_structure['kind'] = kind.to_s.capitalize if kind - order_structure['symmetrical'] = symmetrical.to_s unless symmetrical.nil? - order_structure['require-all'] = require_all.to_s unless require_all.nil? - - order_patch = xml_document - order_element = xml_rsc_order order_structure - raise "Could not create XML patch for '#{resource}'" unless order_element - order_patch.add_element order_element - - if present? - wait_for_constraint_update xml_pretty_format(order_patch.root), order_structure['id'] - else - wait_for_constraint_create xml_pretty_format(order_patch.root), order_structure['id'] - end - cluster_debug_report "#{resource} flush" - end -end diff --git a/lib/puppet/provider/pacemaker_pcs.rb b/lib/puppet/provider/pacemaker_pcs.rb deleted file mode 100644 index 6e1f1f31..00000000 --- a/lib/puppet/provider/pacemaker_pcs.rb +++ /dev/null @@ -1,27 +0,0 @@ -require 'rexml/document' -require 'rexml/formatters/pretty' -require 'timeout' -require 'yaml' - -require_relative '../../pacemaker/pcs/resource_default' -require_relative '../../pacemaker/pcs/operation_default' -require_relative '../../pacemaker/pcs/cluster_property' -require_relative '../../pacemaker/pcs/pcsd_auth' -require_relative '../../pacemaker/pcs/common' -require_relative '../../pacemaker/options' -require_relative '../../pacemaker/wait' - -# the parent provider for all other pcs providers -class Puppet::Provider::PacemakerPCS < Puppet::Provider - # include instance methods from the pcs library files - include Pacemaker::PcsCommon - include Pacemaker::PcsResourceDefault - include Pacemaker::PcsOperationDefault - include Pacemaker::PcsClusterProperty - include Pacemaker::PcsPcsdAuth - include Pacemaker::Wait - include Pacemaker::Options - - # include class methods from the pacemaker options - extend Pacemaker::Options -end diff --git a/lib/puppet/provider/pacemaker_pcsd_auth/pacemaker_noop.rb b/lib/puppet/provider/pacemaker_pcsd_auth/pacemaker_noop.rb deleted file mode 100644 index 16fe9c47..00000000 --- a/lib/puppet/provider/pacemaker_pcsd_auth/pacemaker_noop.rb +++ /dev/null @@ -1,6 +0,0 @@ -require_relative '../pacemaker_noop' - -Puppet::Type.type(:pacemaker_pcsd_auth).provide(:noop, parent: Puppet::Provider::PacemakerNoop) do - # disable this provider - confine(true: false) -end diff --git a/lib/puppet/provider/pacemaker_pcsd_auth/pacemaker_pcs.rb b/lib/puppet/provider/pacemaker_pcsd_auth/pacemaker_pcs.rb deleted file mode 100644 index 6a02c989..00000000 --- a/lib/puppet/provider/pacemaker_pcsd_auth/pacemaker_pcs.rb +++ /dev/null @@ -1,120 +0,0 @@ -require_relative '../pacemaker_pcs' -require 'json' - -Puppet::Type.type(:pacemaker_pcsd_auth).provide(:pcs, parent: Puppet::Provider::PacemakerPCS) do - desc 'Authenticate the nodes using the "pcs" command' - - commands pcs: 'pcs' - commands crm_node: 'crm_node' - - attr_reader :resource - - # these statuses are considered to be successful - # @return [Array] - def success_node_statuses - %w(already_authorized ok) - end - - # checks if all nodes in the cluster are already authenticated, - # or only the local one if :whole is not enabled - # @return [true,false] - def success - validate_input - nodes_status = cluster_auth - - success = whole_auth_success? nodes_status - - unless success or resource[:whole] - success = local_auth_success? nodes_status - end - - show_cluster_auth_status nodes_status - success - end - - # if the initial check was not successful - # retry the auth command until it succeeds - # or time runs out - # @param value [true,false] - def success=(value) - # if the resource success value is not true don't do anything - return unless value - - if resource[:whole] - debug "Waiting #{max_wait_time} seconds for the whole cluster to authenticate" - else - debug "Waiting #{max_wait_time} seconds for the local node to authenticate" - end - - # auth may succeed if the missing nodes come online - # if password is not correct, wait for someone or something to change the password - retry_block { success } - - if resource[:whole] - debug 'The whole cluster authentication was successful!' - else - debug 'The local node have been successfully authenticated!' - end - end - - # show the debug block with the cluster auth status - # @param nodes_status [Hash] - def show_cluster_auth_status(nodes_status) - message = "\nCluster auth status debug start\n" - nodes_status.each do |node, status| - success = success_node_statuses.include? status - prefix = success ? 'OK ' : 'FAIL' - message += "#{prefix} #{node} (#{status})" - message += ' <- this node' if node_name == node - message += "\n" - end - message += 'Cluster auth status debug end' - debug message - end - - def validate_input - fail 'Both username and password should be provided!' unless resource[:username] and resource[:password] - resource[:nodes] = [resource[:nodes]] if resource[:nodes].is_a? String - fail 'At least one node should be provided!' unless resource[:nodes].is_a? Array and resource[:nodes].any? - end - - def cluster_auth - debug 'Call: cluster_auth' - result = pcs_auth_command( - resource[:nodes], - resource[:username], - resource[:password], - resource[:force], - resource[:local], - ) - nodes_status = pcs_auth_parse(result) - fail "Could not parse the result of the cluster auth command: '#{result}'" unless nodes_status.is_a? Hash and nodes_status.any? - nodes_status - end - - # get the Pacemaker name of the current node - # @return [String] - def node_name - return @node_name if @node_name - @node_name = crm_node('-n').chomp.strip - end - - # check if the local node auth have been successful - # or was already done before - # @param nodes_status [Hash] - # @return [true,false] - def local_auth_success?(nodes_status) - success_node_statuses.include? nodes_status[node_name] - end - - # check if all cluster nodes have been successfully authenticated - # or have already been authenticated before - # @param nodes_status [Hash] - # @return [true,false] - def whole_auth_success?(nodes_status) - resource[:nodes].all? do |node| - success_node_statuses.include? nodes_status[node] - end - end - -end diff --git a/lib/puppet/provider/pacemaker_property/pacemaker_noop.rb b/lib/puppet/provider/pacemaker_property/pacemaker_noop.rb deleted file mode 100644 index d9dd944d..00000000 --- a/lib/puppet/provider/pacemaker_property/pacemaker_noop.rb +++ /dev/null @@ -1,6 +0,0 @@ -require_relative '../pacemaker_noop' - -Puppet::Type.type(:pacemaker_property).provide(:noop, parent: Puppet::Provider::PacemakerNoop) do - # disable this provider - confine(true: false) -end diff --git a/lib/puppet/provider/pacemaker_property/pacemaker_pcs.rb b/lib/puppet/provider/pacemaker_property/pacemaker_pcs.rb deleted file mode 100644 index b1908a60..00000000 --- a/lib/puppet/provider/pacemaker_property/pacemaker_pcs.rb +++ /dev/null @@ -1,51 +0,0 @@ -require_relative '../pacemaker_pcs' - -Puppet::Type.type(:pacemaker_property).provide(:pcs, parent: Puppet::Provider::PacemakerPCS) do - desc 'Manages default values for pacemaker operation options via pcs' - - commands pcs: 'pcs' - - # disable this provider - confine(true: false) - - def self.instances - debug 'Call: self.instances' - proxy_instance = new - instances = [] - proxy_instance.pcs_cluster_properties.map do |title, value| - parameters = {} - debug "Prefetch: #{title}" - parameters[:ensure] = :present - parameters[:value] = value - parameters[:name] = title - instance = new(parameters) - instances << instance - end - instances - end - - def create - debug 'Call: create' - self.value = @resource[:value] - end - - def destroy - debug 'Call: destroy' - pcs_cluster_property_delete @resource[:name] - end - - def exists? - debug 'Call: exists?' - pcs_cluster_property_defined? @resource[:name] - end - - def value - debug 'Call: value' - pcs_cluster_property_value @resource[:name] - end - - def value=(value) - debug "Call: value=#{value}" - pcs_cluster_property_set @resource[:name], value - end -end diff --git a/lib/puppet/provider/pacemaker_property/pacemaker_xml.rb b/lib/puppet/provider/pacemaker_property/pacemaker_xml.rb deleted file mode 100644 index 30f757e3..00000000 --- a/lib/puppet/provider/pacemaker_property/pacemaker_xml.rb +++ /dev/null @@ -1,77 +0,0 @@ -require_relative '../pacemaker_xml' - -Puppet::Type.type(:pacemaker_property).provide(:xml, parent: Puppet::Provider::PacemakerXML) do - desc 'Specific provider for a rather specific type since I currently have no plan to - abstract corosync/pacemaker vs. keepalived. This provider will check the state - of Corosync cluster configuration properties.' - - commands cibadmin: 'cibadmin' - commands crm_attribute: 'crm_attribute' - commands crm_node: 'crm_node' - commands crm_resource: 'crm_resource' - commands crm_attribute: 'crm_attribute' - - attr_accessor :property_hash - attr_accessor :resource - - def self.instances - debug 'Call: self.instances' - wait_for_online 'pacemaker_property' - proxy_instance = new - instances = [] - proxy_instance.cluster_properties.map do |title, data| - parameters = {} - debug "Prefetch: #{title}" - parameters[:ensure] = :present - parameters[:value] = data['value'] - parameters[:name] = title - instance = new(parameters) - instance.cib = proxy_instance.cib - instances << instance - end - instances - end - - def self.prefetch(catalog_instances) - debug 'Call: self.prefetch' - return unless pacemaker_options[:prefetch] - discovered_instances = instances - discovered_instances.each do |instance| - next unless catalog_instances.key? instance.name - catalog_instances[instance.name].provider = instance - end - end - - # @return [true,false] - def exists? - debug 'Call: exists?' - wait_for_online 'pacemaker_property' - return property_hash[:ensure] == :present if property_hash[:ensure] - out = cluster_property_defined? resource[:name] - debug "Return: #{out}" - out - end - - def create - debug 'Call: create' - self.value = resource[:value] - end - - def destroy - debug 'Call: destroy' - cluster_property_delete resource[:name] - end - - def value - debug 'Call: value' - return property_hash[:value] if property_hash[:value] - out = cluster_property_value resource[:name] - debug "Return: #{out}" - out - end - - def value=(should) - debug "Call: value=#{should}" - cluster_property_set resource[:name], should - end -end diff --git a/lib/puppet/provider/pacemaker_resource/pacemaker_noop.rb b/lib/puppet/provider/pacemaker_resource/pacemaker_noop.rb deleted file mode 100644 index 849ab8fe..00000000 --- a/lib/puppet/provider/pacemaker_resource/pacemaker_noop.rb +++ /dev/null @@ -1,6 +0,0 @@ -require_relative '../pacemaker_noop' - -Puppet::Type.type(:pacemaker_resource).provide(:noop, parent: Puppet::Provider::PacemakerNoop) do - # disable this provider - confine(true: false) -end diff --git a/lib/puppet/provider/pacemaker_resource/pacemaker_xml.rb b/lib/puppet/provider/pacemaker_resource/pacemaker_xml.rb deleted file mode 100644 index ceb555e8..00000000 --- a/lib/puppet/provider/pacemaker_resource/pacemaker_xml.rb +++ /dev/null @@ -1,345 +0,0 @@ -require_relative '../pacemaker_xml' - -Puppet::Type.type(:pacemaker_resource).provide(:xml, parent: Puppet::Provider::PacemakerXML) do - desc <<-eof -Specific provider for a rather specific type since I currently have no -plan to abstract corosync/pacemaker vs. keepalived. Primitives in -Corosync are the thing we desire to monitor; websites, ipaddresses, -databases, etc, etc. Here we manage the creation and deletion of -these primitives. We will accept a hash for what Corosync calls -operations and parameters. A hash is used instead of constucting a -better model since these values can be almost anything.' - eof - - commands cibadmin: 'cibadmin' - commands crm_attribute: 'crm_attribute' - commands crm_node: 'crm_node' - commands crm_resource: 'crm_resource' - commands crm_attribute: 'crm_attribute' - - attr_accessor :property_hash - attr_accessor :resource - - def self.instances - debug 'Call: self.instances' - proxy_instance = new - instances = [] - proxy_instance.primitives.map do |title, data| - parameters = {} - debug "Prefetch resource: #{title}" - proxy_instance.retrieve_data data, parameters - instance = new(parameters) - instance.cib = proxy_instance.cib - instances << instance - end - instances - end - - def self.prefetch(catalog_instances) - debug 'Call: self.prefetch' - return unless pacemaker_options[:prefetch] - discovered_instances = instances - discovered_instances.each do |instance| - next unless catalog_instances.key? instance.name - catalog_instances[instance.name].provider = instance - end - end - - # retrieve data from library to the target_structure - # @param data [Hash] extracted primitive data - # will extract the current primitive data unless a value is provided - # @param target_structure [Hash] copy data to this structure - # defaults to the property_hash of this provider - def retrieve_data(data = nil, target_structure = property_hash) - debug 'Call: retrieve_data' - data = primitives.fetch resource[:name], {} unless data - target_structure[:ensure] = :present - target_structure[:complex_type] = :simple - copy_value data, 'id', target_structure, :name - copy_value data, 'class', target_structure, :primitive_class - copy_value data, 'provider', target_structure, :primitive_provider - copy_value data, 'type', target_structure, :primitive_type - - if data['complex'] - data_complex_type = data['complex']['type'].to_sym - target_structure[:complex_type] = data_complex_type if complex_types.include? data_complex_type - complex_metadata = import_attributes_structure data['complex']['meta_attributes'] - target_structure[:complex_metadata] = complex_metadata if complex_metadata - end - - if data['instance_attributes'] - parameters_data = import_attributes_structure data['instance_attributes'] - if parameters_data && parameters_data.is_a?(Hash) - target_structure[:parameters] = parameters_data if parameters_data - end - end - - if data['meta_attributes'] - metadata_data = import_attributes_structure data['meta_attributes'] - if metadata_data && metadata_data.is_a?(Hash) - target_structure[:metadata] = metadata_data - end - end - - if data['operations'] - operations_data = [] - data['operations'].each do |_id, operation| - operation.delete 'id' - operation = munge_operation(operation) - add_to_operations_array(operations_data, operation) - end - target_structure[:operations] = operations_data - end - end - - def exists? - debug 'Call: exists?' - out = primitive_exists? resource[:name] - retrieve_data - debug "Return: #{out}" - out - end - - # check if the location ensure is set to present - # @return [TrueClass,FalseClass] - def present? - property_hash[:ensure] == :present - end - - # check if the complex type of the resource is changing - # and we have to recreate it - # @return [true,false] - def complex_change? - current_complex_type = primitive_complex_type(name) || :simple - current_complex_type != complex_type - end - - # is this primitive complex? - # @return [true,false] - def is_complex? - complex_types.include? complex_type - end - - # list of the actually supported complex types - # @return [Array] - def complex_types - [:clone, :master] - end - - # Create just adds our resource to the property_hash and flush will take care - # of actually doing the work. - def create - debug 'Call: create' - self.property_hash = { - ensure: :absent, - name: resource[:name], - } - - parameters = [ - :primitive_class, - :primitive_provider, - :primitive_type, - :parameters, - :operations, - :metadata, - :complex_type, - :complex_metadata, - ] - - parameters.each do |parameter| - send "#{parameter}=".to_sym, resource[parameter] - end - end - - # use cibadmin to remove the XML section describing this primitive - def remove_primitive - return unless primitive_exists? resource[:name] - stop_service - primitive_tag = 'primitive' - primitive_tag = primitive_complex_type resource[:name] if primitive_is_complex? resource[:name] - wait_for_primitive_remove "<#{primitive_tag} id='#{primitive_full_name resource[:name]}'/>\n", resource[:name] - property_hash[:ensure] = :absent - end - - # stop the primitive before its removal - def stop_service - stop_primitive primitive_full_name resource[:name] - cleanup_primitive primitive_full_name resource[:name] - wait_for_stop resource[:name] - end - - # Unlike create we actually immediately delete the item. Corosync forces us - # to "stop" the primitive before we are able to remove it. - def destroy - debug 'Call: destroy' - remove_primitive - property_hash.clear - cluster_debug_report "#{resource} destroy" - end - - # Getters that obtains the parameters and operations defined in our primitive - # that have been populated by prefetch or instances (depends on if your using - # puppet resource or not). - def parameters - property_hash[:parameters] - end - - def operations - property_hash[:operations] - end - - def metadata - property_hash[:metadata] - end - - def complex_metadata - property_hash[:complex_metadata] - end - - def complex_type - if property_hash[:complex_type].respond_to? :to_sym - property_hash[:complex_type].to_sym - else - property_hash[:complex_type] - end - end - - def primitive_class - property_hash[:primitive_class] - end - - def primitive_provider - property_hash[:primitive_provider] - end - - def primitive_type - property_hash[:primitive_type] - end - - def full_name - if is_complex? - "#{name}-#{complex_type}" - else - name - end - end - - # Our setters for parameters and operations. Setters are used when the - # resource already exists so we just update the current value in the - # property_hash and doing this marks it to be flushed. - def parameters=(should) - property_hash[:parameters] = should - end - - def operations=(should) - property_hash[:operations] = should - end - - def metadata=(should) - property_hash[:metadata] = should - end - - def complex_metadata=(should) - property_hash[:complex_metadata] = should - end - - def complex_type=(should) - property_hash[:complex_type] = should - end - - def primitive_class=(should) - property_hash[:primitive_class] = should - end - - def primitive_provider=(should) - property_hash[:primitive_provider] = should - end - - def primitive_type=(should) - property_hash[:primitive_type] = should - end - - # Flush is triggered on anything that has been detected as being - # modified in the property_hash. It generates a temporary file with - # the updates that need to be made. The temporary file is then used - # as stdin for the crm command. We have to do a bit of munging of our - # operations and parameters hash to eventually flatten them into a string - # that can be used by the crm command. - def flush - debug 'Call: flush' - return unless property_hash && property_hash.any? - - unless primitive_class && primitive_type - raise 'Primitive class and type should be present!' - end - - # if the complex type is changing we have to remove the resource - # and create a new one with the correct complex type - if complex_change? - debug 'Changing the complex type of the primitive. First remove and then create it!' - remove_primitive - end - - # basic primitive structure - primitive_structure = {} - primitive_structure['id'] = name - primitive_structure['name'] = full_name - primitive_structure['class'] = primitive_class - primitive_structure['provider'] = primitive_provider if primitive_provider - primitive_structure['type'] = primitive_type - - # complex structure - if is_complex? - complex_structure = {} - complex_structure['type'] = complex_type - complex_structure['id'] = full_name - - # complex meta_attributes structure - if complex_metadata && complex_metadata.any? - meta_attributes_structure = export_attributes_structure complex_metadata, 'meta_attributes' - complex_structure['meta_attributes'] = meta_attributes_structure if meta_attributes_structure - end - primitive_structure['complex'] = complex_structure - end - - # operations structure - if operations && operations.any? - raise "expected operations to be an array" unless operations.is_a? Array - primitive_structure['operations'] = {} - operations.each do |operation| - raise "expected operations members to be hashes" unless operation.is_a? Hash - unless operation['id'] - # there is no id provided, generate it - id_components = [name, operation['name'], operation['interval']] - id_components.reject!(&:nil?) - operation['id'] = id_components.join '-' - end - primitive_structure['operations'].store operation['id'], operation - end - end - - # instance_attributes structure - if parameters && parameters.any? - instance_attributes_structure = export_attributes_structure parameters, 'instance_attributes' - primitive_structure['instance_attributes'] = instance_attributes_structure if instance_attributes_structure - end - - # meta_attributes structure - if metadata && metadata.any? - meta_attributes_structure = export_attributes_structure metadata, 'meta_attributes' - primitive_structure['meta_attributes'] = meta_attributes_structure - end - - # create and apply XML patch - primitive_patch = xml_document - primitive_element = xml_primitive primitive_structure - raise "Could not create XML patch for '#{resource}'" unless primitive_element - primitive_patch.add_element primitive_element - if present? - wait_for_primitive_update xml_pretty_format(primitive_patch.root), primitive_structure['id'] - else - wait_for_primitive_create xml_pretty_format(primitive_patch.root), primitive_structure['id'] - end - cluster_debug_report "#{resource} flush" - end -end diff --git a/lib/puppet/provider/pacemaker_resource_default/pacemaker_noop.rb b/lib/puppet/provider/pacemaker_resource_default/pacemaker_noop.rb deleted file mode 100644 index 6465b8ea..00000000 --- a/lib/puppet/provider/pacemaker_resource_default/pacemaker_noop.rb +++ /dev/null @@ -1,6 +0,0 @@ -require_relative '../pacemaker_noop' - -Puppet::Type.type(:pacemaker_resource_default).provide(:noop, parent: Puppet::Provider::PacemakerNoop) do - # disable this provider - confine(true: false) -end diff --git a/lib/puppet/provider/pacemaker_resource_default/pacemaker_pcs.rb b/lib/puppet/provider/pacemaker_resource_default/pacemaker_pcs.rb deleted file mode 100644 index 8be94f33..00000000 --- a/lib/puppet/provider/pacemaker_resource_default/pacemaker_pcs.rb +++ /dev/null @@ -1,51 +0,0 @@ -require_relative '../pacemaker_pcs' - -Puppet::Type.type(:pacemaker_resource_default).provide(:pcs, parent: Puppet::Provider::PacemakerPCS) do - desc 'Manages default values for pacemaker resource options via pcs' - - commands pcs: 'pcs' - - # disable this provider - confine(true: false) - - def self.instances - debug 'Call: self.instances' - proxy_instance = new - instances = [] - proxy_instance.pcs_resource_defaults.map do |title, value| - parameters = {} - debug "Prefetch: #{title}" - parameters[:ensure] = :present - parameters[:value] = value - parameters[:name] = title - instance = new(parameters) - instances << instance - end - instances - end - - def create - debug 'Call: create' - self.value = @resource[:value] - end - - def destroy - debug 'Call: destroy' - pcs_resource_default_delete @resource[:name] - end - - def exists? - debug 'Call: exists?' - pcs_resource_default_defined? @resource[:name] - end - - def value - debug 'Call: value' - pcs_resource_default_value @resource[:name] - end - - def value=(value) - debug "Call: value=#{value}" - pcs_resource_default_set @resource[:name], value - end -end diff --git a/lib/puppet/provider/pacemaker_resource_default/pacemaker_xml.rb b/lib/puppet/provider/pacemaker_resource_default/pacemaker_xml.rb deleted file mode 100644 index df395b45..00000000 --- a/lib/puppet/provider/pacemaker_resource_default/pacemaker_xml.rb +++ /dev/null @@ -1,78 +0,0 @@ -require_relative '../pacemaker_xml' - -Puppet::Type.type(:pacemaker_resource_default).provide(:xml, parent: Puppet::Provider::PacemakerXML) do - desc 'Specific resource_default for a rather specific type since I currently have no plan to - abstract corosync/pacemaker vs. keepalived. This rsc_defaults will check the state - of Corosync cluster configuration properties.' - - commands cibadmin: 'cibadmin' - commands crm_attribute: 'crm_attribute' - commands crm_node: 'crm_node' - commands crm_resource: 'crm_resource' - commands crm_attribute: 'crm_attribute' - - attr_accessor :property_hash - attr_accessor :resource - - def self.instances - debug 'Call: self.instances' - wait_for_online 'pacemaker_resource_default' - proxy_instance = new - instances = [] - proxy_instance.resource_defaults.map do |title, data| - parameters = {} - debug "Prefetch: #{title}" - parameters[:ensure] = :present - parameters[:value] = data['value'] - parameters[:name] = title - instance = new(parameters) - instance.cib = proxy_instance.cib - instances << instance - end - instances - end - - def self.prefetch(catalog_instances) - debug 'Call: self.prefetch' - return unless pacemaker_options[:prefetch] - discovered_instances = instances - discovered_instances.each do |instance| - next unless catalog_instances.key? instance.name - catalog_instances[instance.name].provider = instance - end - end - - # @return [true,false] - def exists? - debug 'Call: exists?' - wait_for_online 'pacemaker_resource_default' - return property_hash[:ensure] == :present if property_hash[:ensure] - out = resource_default_defined? resource[:name] - debug "Return: #{out}" - out - end - - def create - debug 'Call: create' - self.value = resource[:value] - end - - def destroy - debug 'Call: destroy' - resource_default_delete resource[:name] - end - - def value - debug 'Call: value' - return property_hash[:value] if property_hash[:value] - out = resource_default_value resource[:name] - debug "Return: #{out}" - out - end - - def value=(should) - debug "Call: value=#{should}" - raise 'There is no value!' unless should - resource_default_set resource[:name], should - end -end diff --git a/lib/puppet/provider/pacemaker_xml.rb b/lib/puppet/provider/pacemaker_xml.rb deleted file mode 100644 index cf85a915..00000000 --- a/lib/puppet/provider/pacemaker_xml.rb +++ /dev/null @@ -1,48 +0,0 @@ -require 'rexml/document' -require 'rexml/formatters/pretty' -require 'timeout' -require 'yaml' - -require_relative '../../pacemaker/xml/cib' -require_relative '../../pacemaker/xml/constraints' -require_relative '../../pacemaker/xml/constraint_colocations' -require_relative '../../pacemaker/xml/constraint_locations' -require_relative '../../pacemaker/xml/constraint_orders' -require_relative '../../pacemaker/xml/helpers' -require_relative '../../pacemaker/xml/nodes' -require_relative '../../pacemaker/xml/primitives' -require_relative '../../pacemaker/xml/properties' -require_relative '../../pacemaker/xml/resource_default' -require_relative '../../pacemaker/xml/operation_default' -require_relative '../../pacemaker/xml/status' -require_relative '../../pacemaker/xml/debug' -require_relative '../../pacemaker/options' -require_relative '../../pacemaker/wait' -require_relative '../../pacemaker/xml/xml' -require_relative '../../pacemaker/type' - -# the parent provider for all other pacemaker providers -# includes all functions from all submodules -class Puppet::Provider::PacemakerXML < Puppet::Provider - # include instance methods from the pacemaker library files - include Pacemaker::Cib - include Pacemaker::Constraints - include Pacemaker::ConstraintOrders - include Pacemaker::ConstraintLocations - include Pacemaker::ConstraintColocations - include Pacemaker::Helpers - include Pacemaker::Nodes - include Pacemaker::Options - include Pacemaker::Primitives - include Pacemaker::Properties - include Pacemaker::Debug - include Pacemaker::ResourceDefault - include Pacemaker::OperationDefault - include Pacemaker::Status - include Pacemaker::Wait - include Pacemaker::Xml - include Pacemaker::Type - - # include class methods from the pacemaker options - extend Pacemaker::Options -end diff --git a/lib/puppet/provider/pcmk_bundle/default.rb b/lib/puppet/provider/pcmk_bundle/default.rb deleted file mode 100644 index 186353ec..00000000 --- a/lib/puppet/provider/pcmk_bundle/default.rb +++ /dev/null @@ -1,295 +0,0 @@ -require_relative '../pcmk_common' - -Puppet::Type.type(:pcmk_bundle).provide(:default) do - desc 'A bundle resource definition for pacemaker' - - def _storage_maps_cmd(storage_maps, update=false) - return '' if storage_maps == nil or storage_maps.empty? - cmd = '' - if update - add = ' add ' - else - add = ' ' - end - storage_maps.each do | key, value | - cmd += ' storage-map' + add + 'id=' + key + \ - ' source-dir=' + value['source-dir'] + \ - ' target-dir=' + value['target-dir'] - options = value['options'] - if not_empty_string(options) - cmd += ' options=' + options - end - end - cmd - end - - def build_pcs_bundle_cmd(update=false) - image = @resource[:image] - replicas = @resource[:replicas] - masters = @resource[:masters] - promoted_max = @resource[:promoted_max] - container_options = @resource[:container_options] - options = @resource[:options] - run_command = @resource[:run_command] - storage_maps = @resource[:storage_maps] - network = @resource[:network] - location_rule = @resource[:location_rule] - container_backend = @resource[:container_backend] - - if update - create_cmd = 'update' - docker_cmd = '' - else - create_cmd = 'create' - docker_cmd = container_backend - end - - if @resource[:force] - force_cmd = '--force ' - else - force_cmd = '' - end - # Build the 'pcs resource create' command. Check out the pcs man page :-) - cmd = force_cmd + 'resource bundle ' + create_cmd + ' ' + @resource[:name] + ' container ' + docker_cmd + ' image=' + @resource[:image] - if replicas - cmd += " replicas=#{replicas}" - end - if masters - cmd += " masters=#{masters}" - end - if promoted_max - if update - cmd += " masters=" - end - cmd += " promoted-max=#{promoted_max}" - end - if options - cmd += ' options="' + options + '"' - end - if run_command - cmd += ' run-command="' + run_command + '"' - end - if container_options - cmd += ' ' + container_options - end - - # When we're updating a bundle we first dump the CIB, then - # we remove all the *current* storage-maps for the resource - # and then we add back the storage-maps passed to us - cmd += _storage_maps_cmd(storage_maps, update) - if network - cmd += ' network ' + network - end - cmd - end - - def build_pcs_bundle_pruning - cmd = 'resource bundle update ' + @resource[:name] - # In case of updates due to how pcs manages storage, we need to first remove all - # the *current* existing storage maps and then readd the puppet defined ones - live_storage_maps = pcmk_get_bundle_storage_map(@resource[:name]) - if live_storage_maps and !live_storage_maps.empty? - live_storage_maps.each do | key, value | - cmd += ' storage-map remove ' + value['id'] - end - return cmd - end - return '' - end - - ### overloaded methods - def initialize(*args) - super(*args) - Puppet.debug("puppet-pacemaker: initialize()") - # Hash to store the existance state of each resource or location - @resources_state = {} - @locations_state = {} - end - - def create_bundle_and_location(location_rule, needs_update=false) - if needs_update then - cmd = build_pcs_bundle_cmd(update=true) - pcmk_update_resource(@resource, cmd, build_pcs_bundle_pruning(), @resource[:update_settle_secs]) - else - cmd = build_pcs_bundle_cmd() - if location_rule then - pcs('create', @resource[:name], "#{cmd} --disabled", @resource[:tries], - @resource[:try_sleep], @resource[:verify_on_create], @resource[:post_success_sleep]) - location_rule_create() - pcs('create', @resource[:name], "resource enable #{@resource[:name]}", @resource[:tries], - @resource[:try_sleep], @resource[:verify_on_create], @resource[:post_success_sleep]) - else - pcs('create', @resource[:name], cmd, @resource[:tries], - @resource[:try_sleep], @resource[:verify_on_create], @resource[:post_success_sleep]) - end - end - end - - def create - # We need to probe the existance of both location and resource - # because we do not know why we're being created (if for both or - # only for one) - did_resource_exist = @resources_state[@resource[:name]] == PCMK_NOCHANGENEEDED - did_location_exist = @locations_state[@resource[:name]] == PCMK_NOCHANGENEEDED - Puppet.debug("Create: resource exists #{@resources_state[@resource[:name]]} location exists #{@locations_state[@resource[:name]]}") - needs_update = @resources_state[@resource[:name]] == PCMK_CHANGENEEDED - - cmd = build_pcs_bundle_cmd() - - # If both the resource and the location do not exist, we create them both - # if a location_rule is specified otherwise only the resource - if not did_location_exist and not did_resource_exist - create_bundle_and_location(location_rule, needs_update) - # If the location_rule already existed, we only create the resource - elsif did_location_exist and not did_resource_exist - create_bundle_and_location(false, needs_update) - # The location_rule does not exist and the resource does exist - elsif not did_location_exist and did_resource_exist - if location_rule - location_rule_create() - end - else - raise Puppet::Error, "Invalid create: #{@resource[:name]} resource exists #{did_resource_exist} " - "location exists #{did_location_exist} - location_rule #{location_rule}" - end - end - - def destroy - # Any corresponding location rules will be deleted by - # pcs automatically, if present - cmd = 'resource delete ' + @resource[:name] - pcs('delete', @resource[:name], cmd, @resource[:tries], - @resource[:try_sleep], @resource[:verify_on_create], @resource[:post_success_sleep]) - end - - def exists? - @locations_state[@resource[:name]] = location_exists? - @resources_state[@resource[:name]] = resource_exists? - did_resource_exist = @resources_state[@resource[:name]] == PCMK_NOCHANGENEEDED - did_location_exist = @locations_state[@resource[:name]] == PCMK_NOCHANGENEEDED - Puppet.debug("Exists: bundle #{@resource[:name]} exists "\ - "#{@resources_state[@resource[:name]]} "\ - "location exists #{@locations_state[@resource[:name]]} "\ - "deep_compare: #{@resource[:deep_compare]}") - if did_resource_exist and did_location_exist - return true - end - return false - end - - def resource_exists? - cmd = 'resource ' + pcs_config_or_show() + ' ' + @resource[:name] + ' > /dev/null 2>&1' - ret = pcs('show', @resource[:name], cmd, @resource[:tries], - @resource[:try_sleep], @resource[:verify_on_create], @resource[:post_success_sleep]) - if ret == false then - return PCMK_NOTEXISTS - end - if @resource[:deep_compare] and pcmk_resource_has_changed?(@resource, build_pcs_bundle_cmd(update=true), build_pcs_bundle_pruning(), true) then - return PCMK_CHANGENEEDED - end - return PCMK_NOCHANGENEEDED - end - - def location_exists? - # If no location_rule is specified then we treat it as if it - # always exists - if not @resource[:location_rule] - return PCMK_NOCHANGENEEDED - end - constraint_name = 'location-' + @resource[:name] - cmd = "constraint list | grep #{constraint_name} > /dev/null 2>&1" - ret = pcs('show', @resource[:name], cmd, @resource[:tries], - @resource[:try_sleep], @resource[:verify_on_create], @resource[:post_success_sleep]) - return ret == false ? PCMK_NOTEXISTS : PCMK_NOCHANGENEEDED - end - - def location_rule_create() - location_cmd = build_pcs_location_rule_cmd(@resource) - Puppet.debug("location_rule_create: #{location_cmd}") - pcs('create', @resource[:name], location_cmd, @resource[:tries], - @resource[:try_sleep], @resource[:verify_on_create], @resource[:post_success_sleep]) - end - - - ### property methods - - # It isn't an easy road if you want to make these true - # puppet-like resource properties. Here is a start if you are feeling brave: - # https://github.com/cwolferh/puppet-pacemaker/blob/pcmk_resource_improvements_try0/lib/puppet/provider/pcmk_resource/default.rb#L64 - def image - @resource[:image] - end - - def image=(value) - end - - def replicas - @resource[:replicas] - end - - def replicas=(value) - end - - def masters - @resource[:masters] - end - - def masters=(value) - end - - def promoted_max - @resource[:promoted_max] - end - - def promoted_max=(value) - end - - def options - @resource[:options] - end - - def options=(value) - end - - def container_options - @resource[:container_options] - end - - def container_options=(value) - end - - def run_command - @resource[:run_command] - end - - def run_command=(value) - end - - def storage_maps - @resource[:storage_maps] - end - - def storage_maps=(value) - end - - def network - @resource[:network] - end - - def network=(value) - end - - def location_rule - @resource[:location_rule] - end - - def location_rule=(value) - end - - def container_backend - @resource[:container_backend] - end - - def container_backend=(value) - end -end diff --git a/lib/puppet/provider/pcmk_common.rb b/lib/puppet/provider/pcmk_common.rb deleted file mode 100644 index f8a8e5f6..00000000 --- a/lib/puppet/provider/pcmk_common.rb +++ /dev/null @@ -1,432 +0,0 @@ -require 'digest' -require 'rexml/document' - -# Constants that represent the state of a resource/constraint -PCMK_NOCHANGENEEDED = 0 unless defined? PCMK_NOCHANGENEEDED -PCMK_NOTEXISTS = 1 unless defined? PCMK_NOTEXISTS -PCMK_CHANGENEEDED = 2 unless defined? PCMK_CHANGENEEDED - -# Base temporary CIB backup folder -PCMK_TMP_BASE = "/var/lib/pacemaker/cib" unless defined? PCMK_TMP_BASE - -# Let's use pcs from PATH when it is set: -# Useful to run pcs from a different path when using -# containers -if ENV.has_key?('PATH') - prefix_path = '' -else - prefix_path = '/usr/sbin/' -end - -PCS_BIN = "#{prefix_path}pcs" unless defined? PCS_BIN -CRMDIFF_BIN = "#{prefix_path}crm_diff" unless defined? CRMDIFF_BIN -CRMNODE_BIN = "#{prefix_path}crm_node" unless defined? CRMNODE_BIN -CRMSIMULATE_BIN = "#{prefix_path}crm_simulate" unless defined? CRMSIMULATE_BIN -CRMRESOURCE_BIN = "#{prefix_path}crm_resource" unless defined? CRMRESOURCE_BIN -TIMEOUT_BIN = "#{prefix_path}timeout" unless defined? TIMEOUT_BIN - -# Use pcs_cli_version() as opposed to a facter so that if the pcs -# package gets installed during the puppet run everything still works -# as expected. Returns empty string if pcs command does not exist -def pcs_cli_version() - begin - pcs_cli_version = `#{PCS_BIN} --version` - rescue Errno::ENOENT - pcs_cli_version = '' - end - return pcs_cli_version -end - -# returns 'show' or 'config' depending on the pcs version -# In case pcs returns '' we choose the more recent 'config' default -# (Although in that case it is likely it will fail differently anyways) -def pcs_config_or_show() - if Puppet::Util::Package.versioncmp(pcs_cli_version(), '0.10.0') < 0 - return 'show' - else - return 'config' - end -end - -def crm_node_l() - begin - nodes = `#{CRMNODE_BIN} -l` - rescue - nodes = '' - end - return nodes -end - - -# Ruby 2.5 has dropped Dir::Tmpname.make_tmpname -# https://github.com/ruby/ruby/commit/25d56ea7b7b52dc81af30c92a9a0e2d2dab6ff27 -def pcmk_tmpname((prefix, suffix), n) - #Dir::Tmpname.make_tmpname (prefix, suffix), n - prefix = (String.try_convert(prefix) or - raise ArgumentError, "unexpected prefix: #{prefix.inspect}") - suffix &&= (String.try_convert(suffix) or - raise ArgumentError, "unexpected suffix: #{suffix.inspect}") - t = Time.now.strftime("%Y%m%d") - path = "#{prefix}#{t}-#{$$}-#{rand(0x100000000).to_s(36)}".dup - path << "-#{n}" if n - path << suffix if suffix - path -end - -def delete_cib(cib) - FileUtils.rm(cib, :force => true) - FileUtils.rm("#{cib}.orig", :force => true) -end - -# backs up the current cib and returns the temporary file name where it -# was stored. Besides the temporary file it also makes an identical copy -# called temporary file + ".orig" -def backup_cib() - # We use the pacemaker CIB folder because of its restricted access permissions - cib = pcmk_tmpname("#{PCMK_TMP_BASE}/puppet-cib-backup", nil) - cmd = "#{PCS_BIN} cluster cib #{cib}" - output = `#{cmd} 2>&1` - ret = $? - if not ret.success? - msg = "backup_cib: Running: #{cmd} failed with code: #{ret.exitstatus} -> #{output}." \ - " Either the cluster was not running or the versions of pcmk/pcs between host and container are not matching" - FileUtils.rm(cib, :force => true) - raise Puppet::Error, msg - end - Puppet.debug("backup_cib: #{cmd} returned #{output}") - FileUtils.cp cib, "#{cib}.orig" - return cib -end - -# Pushes the cib file back to the cluster and removes the cib files -# returns the pcs cluster cib-push return code. If the cib file and its -# original counterpart are the exact same push_cib() is a no-op. -# The pcs cluster-cib syntax with "diff-against" is used only if pcs supports -# it (it helps to minimize the chances that a cib-push might fail due -# to us trying to push a too old CIB) -def push_cib(cib) - cib_digest = Digest::SHA2.file(cib) - cib_orig_digest = Digest::SHA2.file("#{cib}.orig") - if cib_digest == cib_orig_digest - Puppet.debug("push_cib: #{cib} and #{cib}.orig were identical, skipping") - delete_cib(cib) - return 0 - end - has_diffagainst = `#{PCS_BIN} cluster cib-push --help`.include? 'diff-against' - cmd = "#{PCS_BIN} cluster cib-push #{cib}" - if has_diffagainst - cmd += " diff-against=#{cib}.orig" - end - output = `#{cmd} 2>&1` - ret = $? - delete_cib(cib) - if not ret.success? - msg = "push_cib: Running: #{cmd} failed with code: #{ret.exitstatus} -> #{output}" - Puppet.debug("push_cib failed: #{msg}") - end - - Puppet.debug("push_cib: #{cmd} returned #{ret.exitstatus} -> #{output}") - return ret.exitstatus -end - -def pcs(name, resource_name, cmd, tries=1, try_sleep=0, - verify_on_create=false, post_success_sleep=0) - if name.start_with?("create") && verify_on_create - return pcs_create_with_verify(name, resource_name, cmd, tries, try_sleep) - end - max_tries = tries - max_tries.times do |try| - begin - try_text = max_tries > 1 ? "try #{try+1}/#{max_tries}: " : '' - cib = backup_cib() - Puppet.debug("#{try_text}#{PCS_BIN} -f #{cib} #{cmd}") - pcs_out = `#{PCS_BIN} -f #{cib} #{cmd} 2>&1` - if name.include?('show') - delete_cib(cib) - # return output for good exit or false for failure. - return $?.exitstatus == 0 ? pcs_out : false - end - if $?.exitstatus == 0 - # If push_cib failed, we stay in the loop and keep trying - if push_cib(cib) == 0 - sleep post_success_sleep - return pcs_out - end - end - Puppet.debug("Error: #{pcs_out}") - rescue Puppet::Error - Puppet.debug("cib_backup failed. Retrying #{try_text}") - end - if try == max_tries-1 - # need to consider the case that pcs_out was always nil due to cib_backup() always failing - delete_cib(cib) if cib - if pcs_out == nil - pcs_out_line = '' - else - pcs_out_line = pcs_out.lines.first ? pcs_out.lines.first.chomp! : '' - end - raise Puppet::Error, "pcs -f #{cib} #{cmd} failed: #{pcs_out_line}. Too many tries" - end - if try_sleep > 0 - Puppet.debug("Sleeping for #{try_sleep} seconds between tries") - sleep try_sleep - end - end -end - -def pcs_without_push(name, resource_name, cmd, tries=1, try_sleep=0, post_success_sleep=0) - max_tries = tries - max_tries.times do |try| - try_text = max_tries > 1 ? "try #{try+1}/#{max_tries}: " : '' - Puppet.debug("#{try_text}#{PCS_BIN} #{cmd}") - pcs_out = `#{PCS_BIN} #{cmd} 2>&1` - if $?.exitstatus == 0 - sleep post_success_sleep - return pcs_out - else - Puppet.debug("Error: #{pcs_out}") - sleep try_sleep - end - if try == max_tries-1 - pcs_out_line = pcs_out.lines.first ? pcs_out.lines.first.chomp! : '' - raise Puppet::Error, "pcs #{name} failed: #{pcs_out_line}" - end - end -end - -def pcs_create_with_verify(name, resource_name, cmd, tries=1, try_sleep=0) - max_tries = tries - max_tries.times do |try| - try_text = max_tries > 1 ? "try #{try+1}/#{max_tries}: " : '' - Puppet.debug("#{try_text}#{PCS_BIN} #{cmd}") - pcs_out = `#{PCS_BIN} #{cmd} 2>&1` - if $?.exitstatus == 0 - sleep try_sleep - cmd_show = "#{PCS_BIN} resource " + pcs_config_or_show() + " " + resource_name - Puppet.debug("Verifying with: "+cmd_show) - `#{cmd_show}` - if $?.exitstatus == 0 - return pcs_out - else - Puppet.debug("Warning: verification of pcs resource creation failed") - end - else - Puppet.debug("Error: #{pcs_out}") - sleep try_sleep - end - if try == max_tries-1 - pcs_out_line = pcs_out.lines.first ? pcs_out.lines.first.chomp! : '' - raise Puppet::Error, "pcs #{name} failed: #{pcs_out_line}" - end - end -end - -def not_empty_string(p) - p && p.kind_of?(String) && ! p.empty? -end - -# Returns the pcs command to create the location rule -def build_pcs_location_rule_cmd(resource, force=false) - # The name that pcs will create is location-[-{clone,master}] - location_rule = resource[:location_rule] - location_cmd = 'constraint location ' - if resource.propertydefined?(:bundle) - location_cmd += resource[:bundle] - else - location_cmd += resource[:name] - if resource.propertydefined?(:clone_params) - location_cmd += '-clone' - elsif resource.propertydefined?(:master_params) - location_cmd += '-master' - end - end - location_cmd += ' rule' - if location_rule['resource_discovery'] - location_cmd += " resource-discovery=#{location_rule['resource_discovery']}" - end - if location_rule['score'] - location_cmd += " score=#{location_rule['score']}" - end - if location_rule['score_attribute'] - location_cmd += " score-attribure=#{location_rule['score_attribute']}" - end - if location_rule['expression'] - location_cmd += " " + location_rule['expression'].join(' ') - end - if force - location_cmd += ' --force' - end - Puppet.debug("build_pcs_location_rule_cmd: #{location_cmd}") - location_cmd -end - -# This method runs a pcs command on an offline cib -# Much simpler logic compared to pcs() -# return output for good exit or false for failure. -def pcs_offline(cmd, cib) - pcs_out = `#{PCS_BIN} -f #{cib} #{cmd}` - Puppet.debug("pcs_offline: #{PCS_BIN} -f #{cib} #{cmd}. Output: #{pcs_out}") - return $?.exitstatus == 0 ? pcs_out : false -end - -# This is a loop that simply tries to push a CIB a number of time -# on to the live cluster. It does not remove the CIB except in the Error -# case. Returns nothing in case of success and errors out in case of errors -def push_cib_offline(cib, tries=1, try_sleep=0, post_success_sleep=0) - tries.times do |try| - try_text = tries > 1 ? "try #{try+1}/#{tries}: " : '' - Puppet.debug("pcs_cib_offline push #{try_text}") - if push_cib(cib) == 0 - sleep post_success_sleep - return - end - Puppet.debug("Error: #{pcs_out}") - if try == tries-1 - delete_cib(cib) - raise Puppet::Error, "push_cib_offline for #{cib} failed" - end - if try_sleep > 0 - Puppet.debug("Sleeping for #{try_sleep} seconds between tries") - sleep try_sleep - end - end -end - -# returns the storage map for the resource as a dictionary -def pcmk_get_bundle_storage_map(resource) - storage_xpath = "/cib/configuration/resources/bundle[@id='#{resource}']/storage/storage-mapping" - cib = backup_cib() - cibxml = File.read(cib) - storage_doc = REXML::Document.new cibxml - ret = {} - REXML::XPath.each(storage_doc, storage_xpath) do |element| - attrs = {} - element.attributes.each do |key, value| - attrs[key] = value - end - ret[attrs['id']] = attrs - end - delete_cib(cib) - Puppet.debug("pcmk_get_bundle_storage_map #{resource} returned #{ret}") - ret -end - -# This function will return true when a CIB diff xml has an empty meta_attribute change (either -# addition or removal). It does so by veryfiying that the diff has an empty meta_attribute node -# and when that is the case it verifies that the corresponding meta_attributes -# for the resource in the CIB is indeed either non-existing or has no children -def has_empty_meta_attributes?(cibfile, element) - # First we verify that the cib diff does contain an empty meta_attributes node, like this: - # - # - # - if element.attributes.has_key?('operation') and \ - ['delete', 'create'].include? element.attributes['operation'] and \ - element.attributes.has_key?('path') - path = element.attributes['path'] - element.each_element('//meta_attributes') do |meta| - # If the meta_attributes was an empty set we verify that it is so in the CIB as well - # and if that is the case we return true - if not meta.has_elements? - begin - meta_id = meta.attributes['id'] - orig_cib = File.read(cibfile) - meta_doc = REXML::Document.new orig_cib - meta_xpath = "//meta_attributes[@id='#{meta_id}']" - meta_search = meta_doc.get_elements(meta_xpath) - # If there are not meta_attributes at all or if we have a tag but it has no elements - # then we return true - if meta_search.empty? or (meta_search.length() > 0 and not meta_search[0].has_elements?) - Puppet.debug("has_empty_meta_attributes? detected and empty meta_attribute change and empty meta_attribute in the CIB, skipping: #{meta_id}") - return true - end - rescue - # Should there be any kind of exception in the code above we take - # the slightly safer path and we simply return false which implies - # updating the CIB and pushing it to the live cluster - return false - end - end - end - end - return false -end - -# This given a cib (and it's .orig copy) and a resource name, this method returns true if pacemaker -# will restart the resource false if no action will be taken by pacemaker -def pcmk_restart_resource_ng?(resource_name, cib) - cmd = "#{CRMDIFF_BIN} --cib -o #{cib}.orig -n #{cib}" - cmd_out = `#{cmd}` - ret = $?.exitstatus - # crm_diff returns 0 for no differences, 1 for differences, other return codes - # for errors - if not [0, 1].include? ret - delete_cib(cib) - raise Puppet::Error, "#{cmd} failed with (#{ret}): #{cmd_out}" - end - # If crm_diff says there are no differences (ret code 0), we can just - # exit and state that nothing needs restarting - return false if ret == 0 - # In case the return code is 1 we will need to make sure that the resource - # we were passed is indeed involved in the change detected by crm_diff - graph_doc = REXML::Document.new cmd_out - # crm_diff --cib -o cib-orig.xml -n cib-vip-update.xml | \ - # xmllint --xpath '/diff/change[@operation and contains(@path, "ip-192.168.24.6")]/change-result' - - xpath_query = "/diff/change[@operation and @operation != 'move' and contains(@path, \"@id='#{resource_name}'\")]" - REXML::XPath.each(graph_doc, xpath_query) do |element| - # We need to check for removals of empty meta_attribute tags and ignore those - # See https://bugzilla.redhat.com/show_bug.cgi?id=1568353 for pcs creating those spurious empty tags - next if has_empty_meta_attributes?(cib, element) - return true - end - return false -end - -# This method takes a resource and a creation command and does the following -# 1. Deletes the resource from the offline CIB -# 2. Recreates the resource on the offline CIB -# 3. Verifies if the pacemaker will restart the resource and returns true if the answer is a yes -def pcmk_resource_has_changed?(resource, cmd_update, cmd_pruning='', is_bundle=false) - cib = backup_cib() - if not_empty_string(cmd_pruning) - ret = pcs_offline(cmd_pruning, cib) - if ret == false - delete_cib(cib) - raise Puppet::Error, "pcmk_update_resource #{cmd_pruning} returned error on #{resource[:name]}. This should never happen." - end - end - ret = pcs_offline(cmd_update, cib) - if ret == false - delete_cib(cib) - raise Puppet::Error, "pcmk_resource_has_changed? #{cmd_update} returned error #{resource[:name]}. This should never happen." - end - ret = pcmk_restart_resource_ng?(resource[:name], cib) - Puppet.debug("pcmk_resource_has_changed (ng version) returned #{ret} for resource #{resource[:name]}") - delete_cib(cib) - return ret -end - -# This function will update a resource by making a cib backup, -# running a pruning command first and then running the update command. -# Finally it pushes the CIB back to the cluster. -def pcmk_update_resource(resource, cmd_update, cmd_pruning='', settle_timeout_secs=600) - cib = backup_cib() - if not_empty_string(cmd_pruning) - ret = pcs_offline(cmd_pruning, cib) - if ret == false - delete_cib(cib) - raise Puppet::Error, "pcmk_update_resource #{cmd_pruning} returned error on #{resource[:name]}. This should never happen." - end - end - ret = pcs_offline(cmd_update, cib) - if ret == false - delete_cib(cib) - raise Puppet::Error, "pcmk_update_resource #{cmd_update} returned error on #{resource[:name]}. This should never happen." - end - push_cib_offline(cib, resource[:tries], resource[:try_sleep], resource[:post_success_sleep]) - cmd = "#{TIMEOUT_BIN} #{settle_timeout_secs} #{CRMRESOURCE_BIN} --wait" - cmd_out = `#{cmd}` - ret = $?.exitstatus - Puppet.debug("pcmk_update_resource: #{cmd} returned (#{ret}): #{cmd_out}") - delete_cib(cib) -end diff --git a/lib/puppet/provider/pcmk_constraint/default.rb b/lib/puppet/provider/pcmk_constraint/default.rb deleted file mode 100644 index b3ce8f95..00000000 --- a/lib/puppet/provider/pcmk_constraint/default.rb +++ /dev/null @@ -1,82 +0,0 @@ -require_relative '../pcmk_common' - -Puppet::Type.type(:pcmk_constraint).provide(:default) do - desc 'A base constraint definition for a pacemaker constraint' - - ### overloaded methods - def create - resource_name = @resource[:name].gsub(':', '.') - case @resource[:constraint_type] - when :location - resource_resource = @resource[:resource].gsub(':', '.') - resource_location = @resource[:location].gsub(':', '.') - cmd = 'constraint location add ' + resource_name + ' ' + resource_resource + ' ' + @resource[:location] + ' ' + @resource[:score] - when :colocation - resource_resource = @resource[:resource].gsub(':', '.') - resource_location = @resource[:location].gsub(':', '.') - if @resource[:master_slave] - cmd = 'constraint colocation add ' + resource_resource + ' with master ' + resource_location + ' ' + @resource[:score] - else - cmd = 'constraint colocation add ' + resource_resource + ' with ' + resource_location + ' ' + @resource[:score] - end - when :order - first_resource = @resource[:first_resource].gsub(':', '.') - second_resource = @resource[:second_resource].gsub(':', '.') - constraint_params = @resource[:constraint_params] - cmd = 'constraint order ' + @resource[:first_action] + ' ' + first_resource + ' then ' + @resource[:second_action] + ' ' + second_resource - if not_empty_string(constraint_params) - cmd += ' ' + constraint_params - end - else - fail(String(@resource[:constraint_type]) + ' is an invalid location type') - end - - # do pcs create - pcs('create constraint', resource_name, cmd, @resource[:tries], @resource[:try_sleep]) - end - - def destroy - resource_name = @resource[:name].gsub(':', '.') - case @resource[:constraint_type] - when :location - cmd = 'constraint location remove ' + resource_name - when :colocation - resource_resource = @resource[:resource].gsub(':', '.') - resource_location = @resource[:location].gsub(':', '.') - cmd = 'constraint colocation remove ' + resource_resource + ' ' + resource_location - when :order - first_resource = @resource[:first_resource].gsub(':', '.') - second_resource = @resource[:second_resource].gsub(':', '.') - cmd = 'constraint order remove ' + first_resource + ' ' + second_resource - end - - pcs('constraint delete', resource_name, cmd, @resource[:tries], @resource[:try_sleep]) - end - - def exists? - resource_name = @resource[:name].gsub(':', '.') - cmd = 'constraint ' + String(@resource[:constraint_type]) + ' show --full' - pcs_out = pcs('show', resource_name, cmd) - # find the constraint - for line in pcs_out.lines.each do - case @resource[:constraint_type] - when :location - return true if line.include? resource_name - when :colocation - resource_location = @resource[:location].gsub(':', '.') - resource_resource = @resource[:resource].gsub(':', '.') - if @resource[:master_slave] - # pacemaker 2.1 started returning Promoted instead of Master - # so we need to cater to both - return true if line.include? resource_resource + ' with ' + resource_location and (line.include? "with-rsc-role:Master" or line.include? "with-rsc-role:Promoted") - else - return true if line.include? resource_resource + ' with ' + resource_location - end - when :order - return true if line.include? resource_name - end - end - # return false if constraint not found - false - end -end diff --git a/lib/puppet/provider/pcmk_property/default.rb b/lib/puppet/provider/pcmk_property/default.rb deleted file mode 100644 index b8122455..00000000 --- a/lib/puppet/provider/pcmk_property/default.rb +++ /dev/null @@ -1,86 +0,0 @@ -require_relative '../pcmk_common' - -Puppet::Type.type(:pcmk_property).provide(:default) do - desc 'A base resource definition for a pacemaker property' - - ### overloaded methods - def create - property = @resource[:property] - node = @resource[:node] - value = @resource[:value] - if not_empty_string(node) - if Puppet::Util::Package.versioncmp(pcs_cli_version(), '0.10.0') >= 0 - cmd = "node attribute #{node}" - else - cmd = "property set --node #{node}" - end - else - cmd = "property set" - end - if not_empty_string(@resource[:force]) - cmd += " --force" - end - cmd += " #{property}=#{value}" - ret = pcs('create', @resource[:property], cmd, @resource[:tries], @resource[:try_sleep]) - Puppet.debug("property create: #{cmd} -> #{ret}") - return ret - end - - def destroy - property = @resource[:property] - node = @resource[:node] - if not_empty_string(node) - if Puppet::Util::Package.versioncmp(pcs_cli_version(), '0.10.0') >= 0 - cmd = "node attribute #{node} #{property}=" - else - cmd = "property unset --node #{node} #{property}" - end - else - cmd = "property unset #{property}" - end - ret = pcs('delete', @resource[:property], cmd, @resource[:tries], @resource[:try_sleep]) - Puppet.debug("property destroy: #{cmd} -> #{ret}") - return ret - end - - def exists? - property = @resource[:property] - node = @resource[:node] - # If the goal is to have the property present, we need to make sure - # exists? returns false in case the property exists but has a different value - if @resource[:ensure] == :present - # This forces the value to be a string (might be a bool) - value = "#{@resource[:value]}" - else - value = '' - end - if Puppet::Util::Package.versioncmp(pcs_cli_version(), '0.10.0') >= 0 and not_empty_string(node) - cmd = "node attribute #{node}" - else - cmd = "property show" - end - # We need to distinguish between per node properties and global ones as the output is - # different: - # Cluster Properties: - # cluster-infrastructure: corosync - # cluster-name: tripleo_cluster - # dc-version: 1.1.19-8.el7-c3c624ea3d - # have-watchdog: false - # maintenance-mode: false - # redis_REPL_INFO: controller-0 - # stonith-enabled: false - # Node Attributes: - # controller-0: cinder-volume-role=true galera-role=true haproxy-role=true rabbitmq-role=true redis-role=true rmq-node-attr-last-known-rabbitmq=rabbit@controller-0 - # controller-1: cinder-volume-role=true galera-role=true haproxy-role=true rabbitmq-role=true redis-role=true rmq-node-attr-last-known-rabbitmq=rabbit@controller-1 - # controller-2: cinder-volume-role=true galera-role=true haproxy-role=true rabbitmq-role=true redis-role=true rmq-node-attr-last-known-rabbitmq=rabbit@controller-2 - if not_empty_string(node) - cmd += " | grep -e '#{node}:.*#{property}=#{value}'" - else - cmd += " | grep -e '#{property}:.*#{value}'" - end - cmd += " > /dev/null 2>&1" - ret = pcs('show', @resource[:property], cmd, @resource[:tries], @resource[:try_sleep]) - Puppet.debug("property exists: #{cmd} -> #{ret}") - return ret == false ? false : true - end -end diff --git a/lib/puppet/provider/pcmk_remote/default.rb b/lib/puppet/provider/pcmk_remote/default.rb deleted file mode 100644 index 9aa4e111..00000000 --- a/lib/puppet/provider/pcmk_remote/default.rb +++ /dev/null @@ -1,132 +0,0 @@ -require_relative '../pcmk_common' - -Puppet::Type.type(:pcmk_remote).provide(:default) do - desc 'A remote resource definition for pacemaker resource' - - def build_pcs_auth_cmd() - if @resource[:pcs_user] == '' or @resource[:pcs_password] == '' - raise(Puppet::Error, "When using the new pcs cluster node backend for remotes " + - "'pcs_user' and 'pcs_password must be both defined (#{@resource[:pcs_user]} " + - "#{@resource[:pcs_password]})") - end - # Build the 'pcs resource create' command. Check out the pcs man page :-) - cmd = 'host auth ' + @resource[:name] - if not_empty_string(@resource[:remote_address]) - cmd += ' addr=' + @resource[:remote_address] - end - cmd += ' -u ' + @resource[:pcs_user] + ' -p "' + @resource[:pcs_password] + '"' - cmd - end - - def build_pcs_remote_cmd() - resource_params = @resource[:resource_params] - meta_params = @resource[:meta_params] - op_params = @resource[:op_params] - - # Build the 'pcs resource create' command. Check out the pcs man page :-) - cmd = 'cluster node add-remote ' + @resource[:name] - if not_empty_string(@resource[:remote_address]) - cmd += ' ' + @resource[:remote_address] - end - # reconnect_interval always has a default - cmd += " reconnect_interval=#{@resource[:reconnect_interval]}" - if not_empty_string(resource_params) - cmd += ' ' + resource_params - end - if not_empty_string(meta_params) - cmd += ' meta ' + meta_params - end - if not_empty_string(op_params) - cmd += ' op ' + op_params - end - cmd - end - - ### overloaded methods - def initialize(*args) - super(*args) - Puppet.debug("puppet-pacemaker: initialize()") - # Hash to store the existance state of each resource - @resources_state = {} - end - - def create - did_resource_exist = @resources_state[@resource[:name]] == PCMK_NOCHANGENEEDED - - cmd_auth = build_pcs_auth_cmd() - cmd_remote = build_pcs_remote_cmd() - - pcs_without_push('create', @resource[:name], cmd_auth, @resource[:tries], - @resource[:try_sleep], @resource[:post_success_sleep]) - pcs_without_push('create', @resource[:name], cmd_remote, @resource[:tries], - @resource[:try_sleep], @resource[:post_success_sleep]) - end - - def destroy - cmd = 'cluster node delete-remote ' + @resource[:name] - pcs_without_push('delete', @resource[:name], cmd, @resource[:tries], - @resource[:try_sleep], @resource[:post_success_sleep]) - end - - def exists? - @resources_state[@resource[:name]] = resource_exists? - did_resource_exist = @resources_state[@resource[:name]] == PCMK_NOCHANGENEEDED - Puppet.debug("Exists: resource #{@resource[:name]} exists "\ - "#{@resources_state[@resource[:name]]} "\ - "resource deep_compare: #{@resource[:deep_compare]}") - if did_resource_exist - return true - end - return false - end - - def resource_exists? - cmd = 'resource ' + pcs_config_or_show() + ' ' + @resource[:name] + ' > /dev/null 2>&1' - ret = pcs('show', @resource[:name], cmd, @resource[:tries], - @resource[:try_sleep], false, @resource[:post_success_sleep]) - if ret == false then - return PCMK_NOTEXISTS - end - return PCMK_NOCHANGENEEDED - end - - ### property methods - - # It isn't an easy road if you want to make these true - # puppet-like resource properties. Here is a start if you are feeling brave: - # https://github.com/cwolferh/puppet-pacemaker/blob/pcmk_resource_improvements_try0/lib/puppet/provider/pcmk_resource/default.rb#L64 - def resource_params - @resource[:resource_params] - end - - def resource_params=(value) - end - - def op_params - @resource[:op_params] - end - - def op_params=(value) - end - - def meta_params - @resource[:meta_params] - end - - def meta_params=(value) - end - - def reconnect_interval - @resource[:reconnect_interval] - end - - def reconnect_interval=(value) - end - - def remote_address - @resource[:remote_address] - end - - def remote_address=(value) - end -end diff --git a/lib/puppet/provider/pcmk_resource/default.rb b/lib/puppet/provider/pcmk_resource/default.rb deleted file mode 100644 index e396001d..00000000 --- a/lib/puppet/provider/pcmk_resource/default.rb +++ /dev/null @@ -1,280 +0,0 @@ -require_relative '../pcmk_common' - -Puppet::Type.type(:pcmk_resource).provide(:default) do - desc 'A base resource definition for a pacemaker resource' - - def build_pcs_resource_cmd(update=false) - resource_params = @resource[:resource_params] - meta_params = @resource[:meta_params] - op_params = @resource[:op_params] - clone_params = @resource[:clone_params] - group_params = @resource[:group_params] - master_params = @resource[:master_params] - location_rule = @resource[:location_rule] - bundle = @resource[:bundle] - - suffixes = 0 - if clone_params then suffixes +=1 end - if master_params then suffixes +=1 end - if group_params then suffixes +=1 end - if suffixes > 1 - raise(Puppet::Error, "May only define one of clone_params, "+ - "master_params and group_params") - end - if update - create_cmd = ' update ' - else - create_cmd = ' create ' - end - - if @resource[:force] - force_cmd = '--force ' - else - force_cmd = '' - end - # Build the 'pcs resource create' command. Check out the pcs man page :-) - cmd = force_cmd + 'resource' + create_cmd + @resource[:name] + ' ' + @resource[:resource_type] - if @resource[:resource_type] == 'remote' - if not_empty_string(@resource[:remote_address]) - cmd += ' server=' + @resource[:remote_address] - end - # reconnect_interval always has a default - cmd += " reconnect_interval=#{@resource[:reconnect_interval]}" - end - if not_empty_string(resource_params) - cmd += ' ' + resource_params - end - if not_empty_string(meta_params) - cmd += ' meta ' + meta_params - end - if not_empty_string(op_params) - cmd += ' op ' + op_params - end - # When a bundle is specified we may not specify clone, master or group - if bundle - cmd += ' bundle ' + bundle - else - if clone_params - # pcs 0.10/pcmk 2.0 removed the --clone option - if Puppet::Util::Package.versioncmp(pcs_cli_version(), '0.10.0') >= 0 - cmd += ' clone' - else - cmd += ' --clone' - end - if not_empty_string(clone_params) - cmd += ' ' + clone_params - end - end - if not_empty_string(group_params) - cmd += ' --group ' + group_params - end - if master_params - cmd += ' --master' - if not_empty_string(master_params) - cmd += ' ' + master_params - end - end - end - cmd - end - - ### overloaded methods - def initialize(*args) - super(*args) - Puppet.debug("puppet-pacemaker: initialize()") - # Hash to store the existance state of each resource or location - @resources_state = {} - @locations_state = {} - end - - def create_resource_and_location(location_rule, needs_update=false) - if needs_update then - cmd = build_pcs_resource_cmd(update=true) - pcmk_update_resource(@resource, cmd, '', @resource[:update_settle_secs]) - else - cmd = build_pcs_resource_cmd() - if location_rule then - pcs('create', @resource[:name], "#{cmd} --disabled", @resource[:tries], - @resource[:try_sleep], @resource[:verify_on_create], @resource[:post_success_sleep]) - location_rule_create() - pcs('create', @resource[:name], "resource enable #{@resource[:name]}", @resource[:tries], - @resource[:try_sleep], @resource[:verify_on_create], @resource[:post_success_sleep]) - else - pcs('create', @resource[:name], cmd, @resource[:tries], - @resource[:try_sleep], @resource[:verify_on_create], @resource[:post_success_sleep]) - end - end - end - - def create - # We need to probe the existance of both location and resource - # because we do not know why we're being created (if for both or - # only for one) - did_resource_exist = @resources_state[@resource[:name]] == PCMK_NOCHANGENEEDED - did_location_exist = @locations_state[@resource[:name]] == PCMK_NOCHANGENEEDED - Puppet.debug("Create: resource exists #{@resources_state[@resource[:name]]} location exists #{@locations_state[@resource[:name]]}") - needs_update = @resources_state[@resource[:name]] == PCMK_CHANGENEEDED - - cmd = build_pcs_resource_cmd() - - # If both the resource and the location do not exist, we create them both - # if a location_rule is specified otherwise only the resource - if not did_location_exist and not did_resource_exist - create_resource_and_location(location_rule, needs_update) - # If the location_rule already existed, we only create the resource - elsif did_location_exist and not did_resource_exist - create_resource_and_location(false, needs_update) - # The location_rule does not exist and the resource does exist - elsif not did_location_exist and did_resource_exist - if location_rule - location_rule_create() - end - else - raise Puppet::Error, "Invalid create: #{@resource[:name]} resource exists #{did_resource_exist} " - "location exists #{did_location_exist} - location_rule #{location_rule}" - end - end - - def destroy - # Any corresponding location rules will be deleted by - # pcs automatically, if present - cmd = 'resource delete ' + @resource[:name] - pcs('delete', @resource[:name], cmd, @resource[:tries], - @resource[:try_sleep], @resource[:verify_on_create], @resource[:post_success_sleep]) - end - - def exists? - @locations_state[@resource[:name]] = location_exists? - @resources_state[@resource[:name]] = resource_exists? - did_resource_exist = @resources_state[@resource[:name]] == PCMK_NOCHANGENEEDED - did_location_exist = @locations_state[@resource[:name]] == PCMK_NOCHANGENEEDED - Puppet.debug("Exists: resource #{@resource[:name]} exists "\ - "#{@resources_state[@resource[:name]]} "\ - "location exists #{@locations_state[@resource[:name]]} "\ - "resource deep_compare: #{@resource[:deep_compare]}") - if did_resource_exist and did_location_exist - return true - end - return false - end - - def resource_exists? - cmd = 'resource ' + pcs_config_or_show() + ' ' + @resource[:name] + ' > /dev/null 2>&1' - ret = pcs('show', @resource[:name], cmd, @resource[:tries], - @resource[:try_sleep], @resource[:verify_on_create], @resource[:post_success_sleep]) - if ret == false then - return PCMK_NOTEXISTS - end - if @resource[:deep_compare] and pcmk_resource_has_changed?(@resource, build_pcs_resource_cmd(update=true), '') then - return PCMK_CHANGENEEDED - end - return PCMK_NOCHANGENEEDED - end - - def location_exists? - bundle = @resource[:bundle] - # If no location_rule is specified then we treat it as if it - # always exists - if not @resource[:location_rule] - return PCMK_NOCHANGENEEDED - end - if bundle - constraint_name = 'location-' + bundle - else - constraint_name = 'location-' + @resource[:name] - if @resource[:clone_params] - constraint_name += '-clone' - elsif @resource[:master_params] - constraint_name += '-master' - end - end - cmd = "constraint list | grep #{constraint_name} > /dev/null 2>&1" - ret = pcs('show', @resource[:name], cmd, @resource[:tries], - @resource[:try_sleep], @resource[:verify_on_create], @resource[:post_success_sleep]) - return ret == false ? PCMK_NOTEXISTS : PCMK_NOCHANGENEEDED - end - - def location_rule_create() - location_cmd = build_pcs_location_rule_cmd(@resource) - Puppet.debug("location_rule_create: #{location_cmd}") - pcs('create', @resource[:name], location_cmd, @resource[:tries], - @resource[:try_sleep], @resource[:verify_on_create], @resource[:post_success_sleep]) - end - - - ### property methods - - # It isn't an easy road if you want to make these true - # puppet-like resource properties. Here is a start if you are feeling brave: - # https://github.com/cwolferh/puppet-pacemaker/blob/pcmk_resource_improvements_try0/lib/puppet/provider/pcmk_resource/default.rb#L64 - def resource_params - @resource[:resource_params] - end - - def resource_params=(value) - end - - def op_params - @resource[:op_params] - end - - def op_params=(value) - end - - def meta_params - @resource[:meta_params] - end - - def meta_params=(value) - end - - def group_params - @resource[:group_params] - end - - def group_params=(value) - end - - def master_params - @resource[:master_params] - end - - def master_params=(value) - end - - def clone_params - @resource[:clone_params] - end - - def clone_params=(value) - end - - def location_rule - @resource[:location_rule] - end - - def location_rule=(value) - end - - def reconnect_interval - @resource[:reconnect_interval] - end - - def reconnect_interval=(value) - end - - def remote_address - @resource[:remote_address] - end - - def remote_address=(value) - end - - def bundle - @resource[:bundle] - end - - def bundle=(value) - end - -end diff --git a/lib/puppet/provider/pcmk_resource_default/pcs.rb b/lib/puppet/provider/pcmk_resource_default/pcs.rb deleted file mode 100644 index aa3b4d7e..00000000 --- a/lib/puppet/provider/pcmk_resource_default/pcs.rb +++ /dev/null @@ -1,40 +0,0 @@ -require_relative '../pcmk_common' - -# Currently the implementation is somewhat naive (will not work great -# with ensure => absent, unless the correct current value is also -# specified). For more proper handling, prefetching should be -# implemented and `value` should be switched from a param to a -# property. This should be possible to do without breaking the -# interface of the resource type. -Puppet::Type.type(:pcmk_resource_default).provide(:pcs) do - desc 'Manages default values for pacemaker resource options via pcs' - - def create - name = @resource[:name] - value = @resource[:value] - - cmd = "resource defaults #{name}='#{value}'" - - pcs('create', name, cmd, @resource[:tries], @resource[:try_sleep], - @resource[:verify_on_create], @resource[:post_success_sleep]) - end - - def destroy - name = @resource[:name] - - cmd = "resource defaults #{name}=" - pcs('create', name, cmd, @resource[:tries], @resource[:try_sleep], - @resource[:verify_on_create], @resource[:post_success_sleep]) - end - - def exists? - name = @resource[:name] - value = @resource[:value] - - cmd = "resource defaults | grep '^#{name}: #{value}\$'" - Puppet.debug("defaults exists #{cmd}") - status = pcs('show', name, cmd, @resource[:tries], @resource[:try_sleep], - @resource[:verify_on_create], @resource[:post_success_sleep]) - return status == false ? false : true - end -end diff --git a/lib/puppet/provider/pcmk_resource_op_default/pcs.rb b/lib/puppet/provider/pcmk_resource_op_default/pcs.rb deleted file mode 100644 index 90115ce3..00000000 --- a/lib/puppet/provider/pcmk_resource_op_default/pcs.rb +++ /dev/null @@ -1,40 +0,0 @@ -require_relative '../pcmk_common' - -# Currently the implementation is somewhat naive (will not work great -# with ensure => absent, unless the correct current value is also -# specified). For more proper handling, prefetching should be -# implemented and `value` should be switched from a param to a -# property. This should be possible to do without breaking the -# interface of the resource type. -Puppet::Type.type(:pcmk_resource_op_default).provide(:pcs) do - desc 'Manages default values for pacemaker resource operations via pcs' - - def create - name = @resource[:name] - value = @resource[:value] - - cmd = "resource op defaults #{name}='#{value}'" - - pcs('create', name, cmd, @resource[:tries], @resource[:try_sleep], - @resource[:verify_on_create], @resource[:post_success_sleep]) - end - - def destroy - name = @resource[:name] - - cmd = "resource defaults #{name}=" - pcs('create', name, cmd, @resource[:tries], @resource[:try_sleep], - @resource[:verify_on_create], @resource[:post_success_sleep]) - end - - def exists? - name = @resource[:name] - value = @resource[:value] - - cmd = "resource op defaults | grep '^#{name}: #{value}\$'" - Puppet.debug("defaults exists #{cmd}") - status = pcs('show', name, cmd, @resource[:tries], @resource[:try_sleep], - @resource[:verify_on_create], @resource[:post_success_sleep]) - return status == false ? false : true - end -end diff --git a/lib/puppet/provider/pcmk_stonith/default.rb b/lib/puppet/provider/pcmk_stonith/default.rb deleted file mode 100644 index 55d43531..00000000 --- a/lib/puppet/provider/pcmk_stonith/default.rb +++ /dev/null @@ -1,142 +0,0 @@ -require_relative '../pcmk_common' - -Puppet::Type.type(:pcmk_stonith).provide(:default) do - desc 'A base resource definition for a pacemaker stonith' - - def initialize(*args) - super(*args) - Puppet.debug("puppet-pacemaker: initialize()") - # Hash to store the existance state of each resource or location - @resources_state = {} - @locations_state = {} - end - - def build_pcs_resource_cmd(update=false) - - name = @resource[:name] - stonith_type = @resource[:stonith_type] - pcmk_host_list = @resource[:pcmk_host_list] - pcs_param_string = @resource[:pcs_param_string] - - if update - create_cmd = ' update ' - else - create_cmd = ' create ' - end - - cmd = 'stonith' + create_cmd + name + ' ' + stonith_type + ' ' - if not_empty_string(pcmk_host_list) - cmd += 'pcmk_host_list=' + pcmk_host_list + ' ' - end - cmd += @resource[:pcs_param_string] - - end - - def create_resource_and_location(location_rule=false, needs_update=false) - if needs_update then - cmd = build_pcs_resource_cmd(update=true) - pcs('update', name, cmd, @resource[:tries], - @resource[:try_sleep], @resource[:verify_on_create], @resource[:post_success_sleep]) - else - cmd = build_pcs_resource_cmd() - if location_rule then - pcs('create', name, "#{cmd} --disabled", @resource[:tries], - @resource[:try_sleep], @resource[:verify_on_create], @resource[:post_success_sleep]) - stonith_location_rule_create() - pcs('create', name, "resource enable #{name}", @resource[:tries], - @resource[:try_sleep], @resource[:verify_on_create], @resource[:post_success_sleep]) - else - pcs('create', name, cmd, @resource[:tries], - @resource[:try_sleep], @resource[:verify_on_create], @resource[:post_success_sleep]) - end - end - end - - - def create - - # We need to probe the existance of both location and resource - # because we do not know why we're being created (if for both or - # only for one) - did_stonith_resource_exist = @resources_state[@resource[:name]] == PCMK_NOCHANGENEEDED - did_stonith_location_exist = @locations_state[@resource[:name]] == PCMK_NOCHANGENEEDED - - Puppet.debug("Create: stonith exists #{did_stonith_resource_exist} location exists #{did_stonith_location_exist}") - - needs_update = @resources_state[@resource[:name]] == PCMK_CHANGENEEDED - - # If both the stonith resource and the location do not exist, we create them both - # if a location_rule is specified otherwise only the resource - if not did_stonith_location_exist and not did_stonith_resource_exist - create_resource_and_location(true, needs_update) - # If the location_rule already existed, we only create the resource - elsif did_stonith_location_exist and not did_stonith_resource_exist - create_resource_and_location(false, needs_update) - # The location_rule does not exist and the resource does exist - elsif not did_stonith_location_exist and did_stonith_resource_exist - stonith_location_rule_create() - else - raise Puppet::Error, "Invalid create: #{name} stonith resource exists #{did_stonith_resource_exist} " - "stonith location exists #{did_stonith_location_exist}" - end - end - - def destroy - # Any corresponding location rules will be deleted by - # pcs automatically, if present - cmd = 'resource delete ' + @resource[:name] - pcs('delete', @resource[:name], cmd, @resource[:tries], - @resource[:try_sleep], @resource[:verify_on_create], @resource[:post_success_sleep]) - end - - def exists? - @locations_state[@resource[:name]] = stonith_location_exists? - @resources_state[@resource[:name]] = stonith_resource_exists? - did_stonith_resource_exist = @resources_state[@resource[:name]] == PCMK_NOCHANGENEEDED - did_stonith_location_exist = @locations_state[@resource[:name]] == PCMK_NOCHANGENEEDED - - Puppet.debug("Exists: stonith resource exists #{did_stonith_resource_exist} location exists #{did_stonith_location_exist}") - if did_stonith_resource_exist and did_stonith_location_exist - return true - end - return false - end - - def stonith_resource_exists? - cmd = 'stonith ' + pcs_config_or_show() + ' ' + @resource[:name] + ' > /dev/null 2>&1' - ret = pcs('show', @resource[:name], cmd, @resource[:tries], - @resource[:try_sleep], @resource[:verify_on_create], @resource[:post_success_sleep]) - if ret == false then - return PCMK_NOTEXISTS - end - if @resource[:deep_compare] and pcmk_resource_has_changed?(@resource, build_pcs_resource_cmd(update=true), '') then - return PCMK_CHANGENEEDED - end - return PCMK_NOCHANGENEEDED - end - - def stonith_location_exists? - # We automatically create the resource location constraint only in the case when - # pcmk_host_list is not empty - if not_empty_string(@resource[:pcmk_host_list]) - constraint_name = "#{@resource[:name]}" - cmd = "constraint location | grep #{constraint_name} > /dev/null 2>&1" - ret = pcs('show', @resource[:name], cmd, @resource[:tries], - @resource[:try_sleep], @resource[:verify_on_create], @resource[:post_success_sleep]) - return ret == false ? false : true - else - return true - end - end - - def stonith_location_rule_create() - pcmk_host_list = @resource[:pcmk_host_list] - nodes_count = crm_node_l().lines.size - if not_empty_string(pcmk_host_list) and nodes_count > 1 and @resource[:name] != 'watchdog' - location_cmd = "constraint location #{@resource[:name]} avoids #{pcmk_host_list}=10000" - Puppet.debug("stonith_location_rule_create: #{location_cmd}") - pcs('create', @resource[:name], location_cmd, @resource[:tries], - @resource[:try_sleep], @resource[:verify_on_create], @resource[:post_success_sleep]) - end - end -end diff --git a/lib/puppet/provider/pcmk_stonith_level/default.rb b/lib/puppet/provider/pcmk_stonith_level/default.rb deleted file mode 100644 index aa0449bf..00000000 --- a/lib/puppet/provider/pcmk_stonith_level/default.rb +++ /dev/null @@ -1,76 +0,0 @@ -require_relative '../pcmk_common' - -Puppet::Type.type(:pcmk_stonith_level).provide(:default) do - desc 'A base resource definition for a pacemaker stonith level definition' - - ### overloaded methods - def create - level = @resource[:level] - target = @resource[:target] - stonith_resources = @resource[:stonith_resources] - res = stonith_resources.join(',') - cmd = 'stonith level add ' + level.to_s + ' ' + target + ' ' + res - - destroy if does_level_exist? - pcs('create', "#{name}-#{target}-#{res}", cmd, @resource[:tries], - @resource[:try_sleep], @resource[:verify_on_create], @resource[:post_success_sleep]) - end - - def destroy - # Any corresponding location rules will be deleted by - # pcs automatically, if present - target = @resource[:target] - level = @resource[:level] - cmd = 'stonith level remove ' + level.to_s + ' ' + target - pcs('delete', @resource[:name], cmd, @resource[:tries], - @resource[:try_sleep], @resource[:verify_on_create], @resource[:post_success_sleep]) - end - - def does_level_exist? - # stonith level output is a bit cumbersome to parse: - # Target: overcloud-galera-0 - # Level 1 - stonith-fence_ipmilan-006809859383,stonith-fence_compute-fence-nova - # Target: overcloud-novacompute-0 - # Level 1 - stonith-fence_ipmilan-006809859383,stonith-fence_compute-fence-nova - # Level 2 - stonith-fence_ipmilan-006809859383,stonith-fence_compute-fence-nova - # Target: overcloud-rabbit-0 - # Level 2 - stonith-fence_ipmilan-006809859383,stonith-fence_compute-fence-nova - target = @resource[:target] - level = @resource[:level] - stonith_resources = @resource[:stonith_resources] - res = stonith_resources.join(',') - # The below cmd return the "Level X - ...." strings after the Target: string until the next - # Target: string (or until the bottom of the file if it is the last Target in the output - cmd = 'stonith level | sed -n "/^Target: ' + target + '$/,/^Target:/{/^Target: ' + target + '$/b;/^Target:/b;p}"' - cmd += ' | grep -e "Level[[:space:]]*' + level.to_s + '"' - Puppet.debug("Exists: does level exist with something else #{level} #{target} #{res} -> #{cmd}") - ret = pcs('show', @resource[:name], cmd, @resource[:tries], - @resource[:try_sleep], @resource[:verify_on_create], @resource[:post_success_sleep]) - - return ret == false ? false : true - end - - def exists? - # stonith level output is a bit cumbersome to parse: - # Target: overcloud-galera-0 - # Level 1 - stonith-fence_ipmilan-006809859383,stonith-fence_compute-fence-nova - # Target: overcloud-novacompute-0 - # Level 1 - stonith-fence_ipmilan-006809859383,stonith-fence_compute-fence-nova - # Level 2 - stonith-fence_ipmilan-006809859383,stonith-fence_compute-fence-nova - # Target: overcloud-rabbit-0 - # Level 2 - stonith-fence_ipmilan-006809859383,stonith-fence_compute-fence-nova - target = @resource[:target] - level = @resource[:level] - stonith_resources = @resource[:stonith_resources] - res = stonith_resources.join(',') - Puppet.debug("Exists: stonith level exists #{level} #{target} #{res}") - # The below cmd return the "Level X - ...." strings after the Target: string until the next - # Target: string (or until the bottom of the file if it is the last Target in the output - cmd = 'stonith level | sed -n "/^Target: ' + target + '$/,/^Target:/{/^Target: ' + target + '$/b;/^Target:/b;p}"' - cmd += ' | grep -e "Level[[:space:]]*' + level.to_s + '[[:space:]]*-[[:space:]]*' + res + '"' - ret = pcs('show', @resource[:name], cmd, @resource[:tries], - @resource[:try_sleep], @resource[:verify_on_create], @resource[:post_success_sleep]) - - return ret == false ? false : true - end -end diff --git a/lib/puppet/provider/service/pacemaker_noop.rb b/lib/puppet/provider/service/pacemaker_noop.rb deleted file mode 100644 index 12cf03df..00000000 --- a/lib/puppet/provider/service/pacemaker_noop.rb +++ /dev/null @@ -1,6 +0,0 @@ -require_relative '../pacemaker_noop' - -Puppet::Type.type(:service).provide(:noop, parent: Puppet::Provider::PacemakerNoop) do - # disable this provider - confine(true: false) -end diff --git a/lib/puppet/provider/service/pacemaker_xml.rb b/lib/puppet/provider/service/pacemaker_xml.rb deleted file mode 100644 index 1ea8aa26..00000000 --- a/lib/puppet/provider/service/pacemaker_xml.rb +++ /dev/null @@ -1,368 +0,0 @@ -require_relative '../pacemaker_xml' - -Puppet::Type.type(:service).provide(:pacemaker_xml, parent: Puppet::Provider::PacemakerXML) do - has_feature :enableable - has_feature :refreshable - - commands crm_node: 'crm_node' - commands crm_resource: 'crm_resource' - commands crm_attribute: 'crm_attribute' - commands cibadmin: 'cibadmin' - - # original title of the service - # @return [String] - def service_title - @resource.title - end - - # original name of the service - # in most cases will be equal to the title - # but can be different - # @return [String] - def service_name - resource[:name] - end - - # check if the service name is the same as service title - # @return [true,false] - def name_equals_title? - service_title == service_name - end - - # find a primitive name that is present in the CIB - # or nil if none is present - # @return [String,nil] - def pick_existing_name(*names) - names.flatten.find do |name| - primitive_exists? name - end - end - - # generate a list of strings the service name could be written as - # perhaps, one of them could be found in the CIB - # @param name [String] - # @return [Array] - def service_name_variations(name) - name = name.to_s - variations = [] - variations << name - variations << if name.start_with? 'p_' - name.gsub(/^p_/, '') - else - "p_#{name}" - end - - base_name = primitive_base_name name - unless base_name == name - variations << base_name - variations << if base_name.start_with? 'p_' - base_name.gsub(/^p_/, '') - else - "p_#{base_name}" - end - end - variations - end - - # get the correct name of the service primitive - # @return [String] - def name - return @name if @name - @name = pick_existing_name service_name_variations(service_title), service_name_variations(service_name) - if @name - message = "Using CIB name '#{@name}' for primitive '#{service_title}'" - message += " with name '#{service_name}'" unless name_equals_title? - debug message - else - message = "Primitive '#{service_title}'" - message += " with name '#{service_name}'" unless name_equals_title? - message += ' was not found in CIB!' - raise message - end - @name - end - - # full name of the primitive - # if resource is complex use group name - # @return [String] - def full_name - return @full_name if @full_name - if primitive_is_complex? name - full_name = primitive_full_name name - debug "Using full name '#{full_name}' for complex primitive '#{name}'" - @full_name = full_name - else - @full_name = name - end - end - - # name of the basic service without 'p_' prefix - # used to disable the basic service. - # Uses "name" property if it's not the same as title - # because most likely it will be the real system service name - # @return [String] - def basic_service_name - return @basic_service_name if @basic_service_name - basic_service_name = name - basic_service_name = service_name unless name_equals_title? - if basic_service_name.start_with? 'p_' - basic_service_name = basic_service_name.gsub(/^p_/, '') - end - debug "Using '#{basic_service_name}' as the basic service name for the primitive '#{name}'" - @basic_service_name = basic_service_name - end - - # cleanup a primitive and - # wait until cleanup finishes - def cleanup - cleanup_primitive full_name, hostname - wait_for_status name - end - - # run the disable basic service action only - # if it's enabled fot this provider action - # and is globally enabled too - # @param [Symbol] action (:start/:stop/:status) - def disable_basic_service_on_action(action) - if action == :start - return unless pacemaker_options[:disable_basic_service_on_start] - elsif action == :stop - return unless pacemaker_options[:disable_basic_service_on_stop] - elsif action == :status - return unless pacemaker_options[:disable_basic_service_on_status] - else - fail "Action '#{action}' is incorrect!" - end - - disable_basic_service - end - - # called by Puppet to determine if the service - # is running on the local node - # @return [:running,:stopped] - def status - debug "Call: 'status' for Pacemaker service '#{name}' on node '#{hostname}'" - disable_basic_service_on_action :status - - cib_reset 'service_status' - wait_for_online 'service_status' - - out = if primitive_is_master? name - service_status_mode pacemaker_options[:status_mode_master] - elsif primitive_is_clone? name - service_status_mode pacemaker_options[:status_mode_clone] - else - service_status_mode pacemaker_options[:status_mode_simple] - end - - if pacemaker_options[:add_location_constraint] - if out == :running && (!service_location_exists? full_name, hostname) - debug 'Location constraint is missing. Service status set to "stopped".' - out = :stopped - end - end - - if pacemaker_options[:cleanup_on_status] - if out == :running and primitive_has_failures? name, hostname - debug "Primitive: '#{name}' has failures on the node: '#{hostname}' Service status set to 'stopped'." - out = :stopped - end - end - - debug "Return: '#{out}' (#{out.class})" - debug cluster_debug_report "#{@resource} status" - out - end - - # called by Puppet to start the service - def start - debug "Call 'start' for Pacemaker service '#{name}' on node '#{hostname}'" - disable_basic_service_on_action :start - - enable unless primitive_is_managed? name - - if pacemaker_options[:cleanup_on_start] - if !pacemaker_options[:cleanup_only_if_failures] || primitive_has_failures?(name, hostname) - cleanup - end - end - - if pacemaker_options[:add_location_constraint] - service_location_add full_name, hostname unless service_location_exists? full_name, hostname - end - - unban_primitive name, hostname - start_primitive name - start_primitive full_name - - if primitive_is_master? name - debug "Choose master start for Pacemaker service '#{name}'" - wait_for_master name - else - service_start_mode pacemaker_options[:start_mode_simple] - end - debug cluster_debug_report "#{@resource} start" - end - - # called by Puppet to stop the service - def stop - debug "Call 'stop' for Pacemaker service '#{name}' on node '#{hostname}'" - disable_basic_service_on_action :stop - - enable unless primitive_is_managed? name - - if pacemaker_options[:cleanup_on_stop] - if !pacemaker_options[:cleanup_only_if_failures] || primitive_has_failures?(name, hostname) - cleanup - end - end - - if pacemaker_options[:add_location_constraint] - service_location_remove full_name, hostname if service_location_exists? full_name, hostname - end - - if primitive_is_master? name - service_stop_mode pacemaker_options[:stop_mode_master] - elsif primitive_is_clone? name - service_stop_mode pacemaker_options[:stop_mode_clone] - else - service_stop_mode pacemaker_options[:stop_mode_simple] - end - debug cluster_debug_report "#{@resource} stop" - end - - # called by Puppet to restart the service - def restart - debug "Call 'restart' for Pacemaker service '#{name}' on node '#{hostname}'" - if pacemaker_options[:restart_only_if_local] && (!primitive_is_running? name, hostname) - Puppet.info "Pacemaker service '#{name}' is not running on node '#{hostname}'. Skipping restart!" - return - end - - begin - stop - rescue - nil - ensure - start - end - end - - # wait for the service to start using - # the selected method. - # @param mode [:global, :master, :local] - def service_start_mode(mode = :global) - if mode == :master - debug "Choose master start for Pacemaker service '#{name}'" - wait_for_master name - elsif mode == :local - debug "Choose local start for Pacemaker service '#{name}' on node '#{hostname}'" - wait_for_start name, hostname - elsif mode == :global - debug "Choose global start for Pacemaker service '#{name}'" - wait_for_start name - else - raise "Unknown service start mode '#{mode}'" - end - end - - # wait for the service to stop using - # the selected method. - # @param mode [:global, :master, :local] - def service_stop_mode(mode = :global) - if mode == :local - debug "Choose local stop for Pacemaker service '#{name}' on node '#{hostname}'" - ban_primitive name, hostname - wait_for_stop name, hostname - elsif mode == :global - debug "Choose global stop for Pacemaker service '#{name}'" - stop_primitive name - wait_for_stop name - else - raise "Unknown service stop mode '#{mode}'" - end - end - - # determine the status of the service using - # the selected method. - # @param mode [:global, :master, :local] - # @return [:running,:stopped] - def service_status_mode(mode = :local) - if mode == :local - debug "Choose local status for Pacemaker service '#{name}' on node '#{hostname}'" - get_primitive_puppet_status name, hostname - elsif mode == :global - debug "Choose global status for Pacemaker service '#{name}'" - get_primitive_puppet_status name - else - raise "Unknown service status mode '#{mode}'" - end - end - - # called by Puppet to enable the service - def enable - debug "Call 'enable' for Pacemaker service '#{name}' on node '#{hostname}'" - manage_primitive name - end - - # called by Puppet to disable the service - def disable - debug "Call 'disable' for Pacemaker service '#{name}' on node '#{hostname}'" - unmanage_primitive name - end - - alias_method :manual_start, :disable - - # called by Puppet to determine if the service is enabled - # @return [:true,:false] - def enabled? - debug "Call 'enabled?' for Pacemaker service '#{name}' on node '#{hostname}'" - out = get_primitive_puppet_enable name - debug "Return: '#{out}' (#{out.class})" - out - end - - # create an extra provider instance to deal with the basic service - # the provider will be chosen to match the current system - # @return [Puppet::Type::Service::Provider] - def extra_provider(provider_name = nil) - return @extra_provider if @extra_provider - begin - param_hash = {} - param_hash.store :name, basic_service_name - param_hash.store :provider, provider_name if provider_name - type = Puppet::Type::Service.new param_hash - @extra_provider = type.provider - rescue => e - Puppet.info "Could not get extra provider for Pacemaker primitive '#{name}': #{e.message}" - @extra_provider = nil - end - end - - # disable and stop the basic service - def disable_basic_service - # skip native-based primitive classes - if pacemaker_options[:native_based_primitive_classes].include?(primitive_class name) - Puppet.info "Not stopping basic service '#{basic_service_name}', since its Pacemaker primitive is using primitive_class '#{primitive_class name}'" - return - end - - return unless extra_provider - begin - if extra_provider.enableable? && extra_provider.enabled? == :true - Puppet.info "Disable basic service '#{extra_provider.name}' using provider '#{extra_provider.class.name}'" - extra_provider.disable - else - Puppet.info "Basic service '#{extra_provider.name}' is disabled as reported by '#{extra_provider.class.name}' provider" - end - if extra_provider.status == :running - Puppet.info "Stop basic service '#{extra_provider.name}' using provider '#{extra_provider.class.name}'" - extra_provider.stop - else - Puppet.info "Basic service '#{extra_provider.name}' is stopped as reported by '#{extra_provider.class.name}' provider" - end - rescue => e - Puppet.info "Could not disable basic service for Pacemaker primitive '#{name}' using '#{extra_provider.class.name}' provider: #{e.message}" - end - end -end diff --git a/lib/puppet/type/pacemaker_colocation.rb b/lib/puppet/type/pacemaker_colocation.rb deleted file mode 100644 index be3d4a29..00000000 --- a/lib/puppet/type/pacemaker_colocation.rb +++ /dev/null @@ -1,92 +0,0 @@ -require 'puppet/parameter/boolean' -require_relative '../../pacemaker/options' -require_relative '../../pacemaker/type' - -Puppet::Type.newtype(:pacemaker_colocation) do - desc 'Type for manipulating corosync/pacemaker colocation. Colocation - is the grouping together of a set of primitives so that they travel - together when one of them fails. For instance, if a web server vhost - is colocated with a specific ip address and the web server software - crashes, the ip address with migrate to the new host with the vhost. - - More information on Corosync/Pacemaker colocation can be found here: - - * http://www.clusterlabs.org/doc/en-US/Pacemaker/1.1/html/Clusters_from_Scratch/_ensuring_resources_run_on_the_same_host.html' - - include Pacemaker::Options - include Pacemaker::Type - - ensurable - - newparam(:name) do - desc "Identifier of the colocation entry. This value needs to be unique - across the entire Corosync/Pacemaker configuration since it doesn't have - the concept of name spaces per type." - isnamevar - end - - newparam(:debug, boolean: true, parent: Puppet::Parameter::Boolean) do - desc "Don't actually make changes" - defaultto false - end - - newproperty(:first) do - desc 'First Corosync primitive.' - end - - newproperty(:second) do - desc 'Second Corosync primitive.' - end - - newproperty(:score) do - desc 'The priority of this colocation. Primitives can be a part of - multiple colocation groups and so there is a way to control which - primitives get priority when forcing the move of other primitives. - This value can be an integer but is often defined as the string - INFINITY.' - - defaultto 'INFINITY' - - validate do |value| - break if %w(inf INFINITY -inf -INFINITY).include? value - break if value.to_i.to_s == value - raise 'Score parameter is invalid, should be +/- INFINITY(or inf) or Integer' - end - - munge do |value| - value.gsub 'inf', 'INFINITY' - end - - isrequired - end - - autorequire(:service) do - %w(corosync pacemaker) - end - - def autorequire_enabled? - pacemaker_options[:autorequire_primitives] - end - - autorequire(:pacemaker_resource) do - resources = [] - next resources unless autorequire_enabled? - next resources unless self[:ensure] == :present - resources << primitive_base_name(self[:first]) if self[:first] - resources << primitive_base_name(self[:second]) if self[:second] - debug "Autorequire pacemaker_resources: #{resources.join ', '}" if resources.any? - resources - end - - if respond_to? :autobefore - autobefore(:pacemaker_resource) do - resources = [] - next resources unless autorequire_enabled? - next resources unless self[:ensure] == :absent - resources << primitive_base_name(self[:first]) if self[:first] - resources << primitive_base_name(self[:second]) if self[:second] - debug "Autobefore pacemaker_resources: #{resources.join ', '}" if resources.any? - resources - end - end -end diff --git a/lib/puppet/type/pacemaker_location.rb b/lib/puppet/type/pacemaker_location.rb deleted file mode 100644 index e1c8cfea..00000000 --- a/lib/puppet/type/pacemaker_location.rb +++ /dev/null @@ -1,106 +0,0 @@ -require 'puppet/parameter/boolean' -require_relative '../../pacemaker/options' -require_relative '../../pacemaker/type' - -Puppet::Type.newtype(:pacemaker_location) do - desc 'Type for manipulating corosync/pacemaker location. Location - is the set of rules defining the place where resource will be run. - More information on Corosync/Pacemaker location can be found here: - - * http://www.clusterlabs.org/doc/en-US/Pacemaker/1.1/html/Clusters_from_Scratch/_ensuring_resources_run_on_the_same_host.html' - - include Pacemaker::Options - include Pacemaker::Type - - ensurable - - newparam(:name) do - desc "Identifier of the location entry. This value needs to be unique - across the entire Corosync/Pacemaker configuration since it doesn't have - the concept of name spaces per type." - - isnamevar - end - - newparam(:debug, boolean: true, parent: Puppet::Parameter::Boolean) do - desc "Don't actually make changes" - defaultto false - end - - newproperty(:primitive) do - desc 'Corosync primitive being managed.' - end - - newproperty(:score) do - desc 'The score for the node' - - validate do |value| - break if %w(inf INFINITY -inf -INFINITY).include? value - break if value.to_i.to_s == value - raise 'Score parameter is invalid, should be +/- INFINITY(or inf) or Integer' - end - - munge do |value| - value.gsub 'inf', 'INFINITY' - end - end - - newproperty(:rules, array_matching: :all) do - desc 'Specify rules for location' - - munge do |rule| - resource.stringify_data rule - if @rule_number - @rule_number += 1 - else - @rule_number = 0 - end - resource.munge_rule rule, @rule_number, @resource[:name] - end - - def insync?(is) - resource.insync_debug is, should, 'rules' - super - end - - def is_to_s(is) - resource.inspect_to_s is - end - - def should_to_s(should) - resource.inspect_to_s should - end - end - - newproperty(:node) do - desc 'The node for which to apply node score' - end - - autorequire(:service) do - %w(corosync pacemaker) - end - - def autorequire_enabled? - pacemaker_options[:autorequire_primitives] - end - - autorequire(:pacemaker_resource) do - resources = [] - next resources unless autorequire_enabled? - next resources unless self[:ensure] == :present - resources << primitive_base_name(self[:primitive]) if self[:primitive] - debug "Autorequire pacemaker_resources: #{resources.join ', '}" if resources.any? - resources - end - - if respond_to? :autobefore - autobefore(:pacemaker_resource) do - resources = [] - next resources unless autorequire_enabled? - next resources unless self[:ensure] == :absent - resources << primitive_base_name(self[:primitive]) if self[:primitive] - debug "Autobefore pacemaker_resources: #{resources.join ', '}" if resources.any? - resources - end - end -end diff --git a/lib/puppet/type/pacemaker_nodes.rb b/lib/puppet/type/pacemaker_nodes.rb deleted file mode 100644 index fbc8e164..00000000 --- a/lib/puppet/type/pacemaker_nodes.rb +++ /dev/null @@ -1,87 +0,0 @@ -require 'puppet/parameter/boolean' -require_relative '../../pacemaker/options' -require_relative '../../pacemaker/type' - -Puppet::Type.newtype(:pacemaker_nodes) do - desc <<-eof -Add and remove cluster nodes using the "corosync-cmapctl" -tool without restart of the Pacemaker and Corosync services. - eof - - include Pacemaker::Options - include Pacemaker::Type - - newparam(:name) do - isnamevar - end - - newparam(:debug, boolean: true, parent: Puppet::Parameter::Boolean) do - desc <<-eof -Don't actually make changes - -Default: false -eof - defaultto false - end - - newparam(:nodes) do - desc <<-eos -Nodes data structure: - ``` - { - '1' => { - name: 'node1', - id: '1', - ring0: '192.168.0.1', - ring1: '172.16.0.1', - }, - '2' => { - name: 'node2', - id: '2', - ring0: '192.168.0.2', - ring1: '172.16.0.2', - }, - '3' => { - name: 'node3', - id: '3', - ring0: '192.168.0.3', - ring1: '172.16.0.3', - } - } - ``` -This structure may be generated by the "corosync_nodes" -function with "raw" output type from the several input formats. - eos - - def filter_nodes_structure(nodes) - filtered_nodes = {} - node_attributes = %w(id name ring0 ring1) - nodes.each do |node_id, node_hash| - node = {} - node_attributes.each do |node_attribute| - node[node_attribute] = node_hash[node_attribute] if node_hash[node_attribute] - end - filtered_nodes[node_id] = node - end - filtered_nodes - end - - def insync?(should) - debug "#{is.inspect} vs #{should.inspect}" - filter_nodes_structure(is) == filter_nodes_structure(should) - end - - def validate(value) - fail 'Nodes should be a non-empty hash!' unless value.is_a? Hash and value.any? - end - end - - newparam(:remove_pacemaker_nodes) do - defaultto true - end - - autorequire(:service) do - %w(corosync pacemaker pcsd) - end - -end diff --git a/lib/puppet/type/pacemaker_online.rb b/lib/puppet/type/pacemaker_online.rb deleted file mode 100644 index f2db738a..00000000 --- a/lib/puppet/type/pacemaker_online.rb +++ /dev/null @@ -1,20 +0,0 @@ -require_relative '../../pacemaker/options' -require_relative '../../pacemaker/type' - -Puppet::Type.newtype(:pacemaker_online) do - desc 'Wait for pacemaker to become online' - - newparam(:name) do - isnamevar - end - - newproperty(:status) do - desc 'Should we wait for online or offline status' - defaultto :online - newvalues :online, :offline - end - - autorequire(:service) do - %w(corosync pacemaker) - end -end diff --git a/lib/puppet/type/pacemaker_operation_default.rb b/lib/puppet/type/pacemaker_operation_default.rb deleted file mode 100644 index 52546a44..00000000 --- a/lib/puppet/type/pacemaker_operation_default.rb +++ /dev/null @@ -1,48 +0,0 @@ -require 'puppet/parameter/boolean' -require_relative '../../pacemaker/options' -require_relative '../../pacemaker/type' - -Puppet::Type.newtype(:pacemaker_operation_default) do - desc 'Type for manipulating corosync/pacemaker configuration op_defaults. - Besides the configuration file that is managed by the module the contains - all these related Corosync types and providers, there is a set of cluster - op_defaults that can be set and saved inside the CIB (A CIB being a set of - configuration that is synced across the cluster, it can be exported as XML - for processing and backup). The type is pretty simple interface for - setting key/value pairs or removing them completely. Removing them will - result in them taking on their default value. - - More information on cluster properties can be found here: - - * http://clusterlabs.org/doc/en-US/Pacemaker/1.1-plugin/html/Clusters_from_Scratch/ch05s03s02.html' - - include Pacemaker::Options - include Pacemaker::Type - - ensurable - - newparam(:name) do - desc 'Name identifier of this op_defaults. Simply the name of the cluster - op_defaults. Happily most of these are unique.' - - isnamevar - end - - newparam(:debug, boolean: true, parent: Puppet::Parameter::Boolean) do - desc "Don't actually make any changes" - - defaultto false - end - - newproperty(:value) do - desc "Value of the op_defaults. It is expected that this will be a single - value but we aren't validating string vs. integer vs. boolean because - cluster op_operations can range the gambit." - - isrequired - end - - autorequire(:service) do - %w(corosync pacemaker) - end -end diff --git a/lib/puppet/type/pacemaker_order.rb b/lib/puppet/type/pacemaker_order.rb deleted file mode 100644 index cbfed147..00000000 --- a/lib/puppet/type/pacemaker_order.rb +++ /dev/null @@ -1,165 +0,0 @@ -require 'puppet/parameter/boolean' -require 'puppet/property/boolean' - -require_relative '../../pacemaker/options' -require_relative '../../pacemaker/type' - -Puppet::Type.newtype(:pacemaker_order) do - desc <<-eof -Type for manipulating Corosync/Pacemkaer ordering entries. Order -entries are another type of constraint that can be put on sets of -primitives but unlike colocation, order does matter. These designate -the order at which you need specific primitives to come into a desired -state before starting up a related primitive. - -More information can be found at the following link: - -* http://www.clusterlabs.org/doc/en-US/Pacemaker/1.1/html/Clusters_from_Scratch/_controlling_resource_start_stop_ordering.html' - eof - - include Pacemaker::Options - include Pacemaker::Type - - ensurable - - newparam(:name) do - desc <<-eof -Name identifier of this ordering entry. This value needs to be unique -across the entire Corosync/Pacemaker configuration since it doesn't have -the concept of name spaces per type." - eof - - isnamevar - end - - newparam(:debug, boolean: true, parent: Puppet::Parameter::Boolean) do - desc <<-eof -Don't actually make changes. - -Default: false - eof - - defaultto false - end - - newproperty(:first) do - desc <<-eof -(Mandatory) -Name of the resource that the then resource depends on. - eof - end - - newproperty(:second) do - desc <<-eof -(Mandatory) -Name of the dependent resource - eof - end - - newproperty(:score) do - desc <<-eof -The priority of the this ordered grouping. Primitives can be a part -of multiple order groups and so there is a way to control which -primitives get priority when forcing the order of state changes on -other primitives. This value can be an integer but is often defined -as the string INFINITY. - -Default: undef -eof - - validate do |value| - next if %w(inf INFINITY -inf -INFINITY).include? value - next if value.to_i.to_s == value - raise 'Score parameter is invalid, should be +/- INFINITY(or inf) or Integer' - end - - munge do |value| - value.gsub 'inf', 'INFINITY' - end - end - - newproperty(:first_action) do - desc <<-eof -The action that the first resource must complete before the second action can be initiated for the then resource. -Allowed values: start, stop, promote, demote. - -Default: undef (means start) - eof - - newvalues(:start, :stop, :promote, :demote) - end - - newproperty(:second_action) do - desc <<-eof -The action that the then resource can execute only after the first action on the first resource has completed. -Allowed values: start, stop, promote, demote. - -Default: undef (means the value of the first action) - eof - - newvalues(:start, :stop, :promote, :demote) - end - - newproperty(:kind) do - desc <<-eof -How to enforce the constraint. Allowed values: - -* optional: Just a suggestion. Only applies if both resources are executing the specified actions. - Any change in state by the first resource will have no effect on the then resource. -* mandatory: Always. If first does not perform first-action, then will not be allowed to performed then-action. - If first is restarted, then (if running) will be stopped beforehand and started afterward. -* serialize: Ensure that no two stop/start actions occur concurrently for the resources. - First and then can start in either order, but one must complete starting before the other can be started. - A typical use case is when resource start-up puts a high load on the host. - eof - - newvalues(:optional, :mandatory, :serialize) - end - - newproperty(:symmetrical, boolean: true, parent: Puppet::Property::Boolean) do - desc <<-eof -If true, the reverse of the constraint applies for the opposite action -(for example, if B starts after A starts, then B stops before A stops). - -Default: undef (means true) - eof - end - - newproperty(:require_all, boolean: true, parent: Puppet::Property::Boolean) do - desc <<-eof -Whether all members of the set must be active before continuing. - -Default: undef (means true) - eof - end - - autorequire(:service) do - %w(corosync pacemaker pcsd) - end - - def autorequire_enabled? - pacemaker_options[:autorequire_primitives] - end - - autorequire(:pacemaker_resource) do - resources = [] - next resources unless autorequire_enabled? - next resources unless self[:ensure] == :present - resources << primitive_base_name(self[:first]) if self[:first] - resources << primitive_base_name(self[:second]) if self[:second] - debug "Autorequire pacemaker_resources: #{resources.join ', '}" if resources.any? - resources - end - - if respond_to? :autobefore - autobefore(:pacemaker_resource) do - resources = [] - next resources unless autorequire_enabled? - next resources unless self[:ensure] == :absent - resources << primitive_base_name(self[:first]) if self[:first] - resources << primitive_base_name(self[:second]) if self[:second] - debug "Autobefore pacemaker_resources: #{resources.join ', '}" if resources.any? - resources - end - end -end diff --git a/lib/puppet/type/pacemaker_pcsd_auth.rb b/lib/puppet/type/pacemaker_pcsd_auth.rb deleted file mode 100644 index 9d1eb083..00000000 --- a/lib/puppet/type/pacemaker_pcsd_auth.rb +++ /dev/null @@ -1,100 +0,0 @@ -require 'puppet/parameter/boolean' -require 'puppet/property/boolean' - -Puppet::Type.newtype(:pacemaker_pcsd_auth) do - desc <<-eof -Use the "pcs" command to authenticate nodes in each other's "pcsd" daemon so the cluster can be managed. -This is a "singleton" type, usually there is no need to have several instances of it in a single catalog. - -But, if you have serious reasons to, you can have many instances of this type with different sets -of cluster nodes or other parameters. - eof - - newparam(:name) do - isnamevar - end - - newproperty(:success, boolean: true, parent: Puppet::Property::Boolean) do - desc <<-eof -Should the auth succeed? The value of this property should be true. -Setting it to false will disable the retry loop that waits for the -auth success. - eof - - isrequired - defaultto true - end - - newparam(:nodes, array_matching: :all) do - desc <<-eof -The list of cluster nodes to authenticate. -The retrieved values would be the list of successfully authenticated nodes. -Order of the nodes list does not matter. - eof - - isrequired - defaultto [] - end - - newparam(:username) do - desc <<-eof -Use this user to access the nodes -Default: hacluster - eof - - isrequired - defaultto 'hacluster' - end - - newparam(:password) do - desc <<-eof -Use this user to access the nodes - eof - - isrequired - end - - newparam(:force, boolean: true, parent: Puppet::Parameter::Boolean) do - desc <<-eof -Re-authenticate nodes on each run even if they are already authenticated. - eof - - isrequired - defaultto false - end - - newparam(:local, boolean: true, parent: Puppet::Parameter::Boolean) do - desc <<-eof -Don't authenticate all the nodes to each other. Authenticate only the local node. -It may be helpful if the other cluster nodes are not online. -Requires the 'whole' parameter to be set to false. - eof - - isrequired - defaultto false - end - - newparam(:whole, boolean: true, parent: Puppet::Parameter::Boolean) do - desc <<-eof -Consider authentication successful only if the whole cluster have been authenticated, -or the local node alone is enough to continue the deployment. Other nodes may come online later. - eof - - isrequired - defaultto true - end - - autorequire(:service) do - %w(pcsd) - end - - # if this resource receives the notify event - # (likely from a User type or from something that have set the user password) - # force the re-authentication of the cluster nodes - def refresh - debug 'Forcing the re-authentication of the cluster nodes' - self[:force] = true - provider.success = self[:success] unless provider.success - end - -end diff --git a/lib/puppet/type/pacemaker_property.rb b/lib/puppet/type/pacemaker_property.rb deleted file mode 100644 index 0c59ef07..00000000 --- a/lib/puppet/type/pacemaker_property.rb +++ /dev/null @@ -1,52 +0,0 @@ -require 'puppet/parameter/boolean' -require_relative '../../pacemaker/options' -require_relative '../../pacemaker/type' - -Puppet::Type.newtype(:pacemaker_property) do - desc "Type for manipulating corosync/pacemaker configuration properties. - Besides the configuration file that is managed by the module the contains - all these related Corosync types and providers, there is a set of cluster - properties that can be set and saved inside the CIB (A CIB being a set of - configuration that is synced across the cluster, it can be exported as XML - for processing and backup). The type is pretty simple interface for - setting key/value pairs or removing them completely. Removing them will - result in them taking on their default value. - - More information on cluster properties can be found here: - - * http://www.clusterlabs.org/doc/en-US/Pacemaker/1.1/html/Pacemaker_Explained/_cluster_options.html - - P.S Looked at generating a type dynamically from the cluster's property - meta-data that would result in a single type with puppet type properties - of every cluster property... may still do so in a later iteration." - - include Pacemaker::Options - include Pacemaker::Type - - ensurable - - newparam(:name) do - desc 'Name identifier of this property. Simply the name of the cluster - property. Happily most of these are unique.' - - isnamevar - end - - newparam(:debug, boolean: true, parent: Puppet::Parameter::Boolean) do - desc "Don't actually make changes" - - defaultto false - end - - newproperty(:value) do - desc "Value of the property. It is expected that this will be a single - value but we aren't validating string vs. integer vs. boolean because - cluster properties can range the gambit." - - isrequired - end - - autorequire(:service) do - %w(corosync pacemaker) - end -end diff --git a/lib/puppet/type/pacemaker_resource.rb b/lib/puppet/type/pacemaker_resource.rb deleted file mode 100644 index 7385bb2b..00000000 --- a/lib/puppet/type/pacemaker_resource.rb +++ /dev/null @@ -1,235 +0,0 @@ -require 'puppet/parameter/boolean' -require_relative '../../pacemaker/options' -require_relative '../../pacemaker/type' - -Puppet::Type.newtype(:pacemaker_resource) do - desc <<-eof -Type for manipulating Corosync/Pacemaker primitives. Primitives -are probably the most important building block when creating highly -available clusters using Corosync and Pacemaker. Each primitive defines -an application, ip address, or similar to monitor and maintain. These -managed primitives are maintained using what is called a resource agent. -These resource agents have a concept of class, type, and subsystem that -provides the functionality. Regretibly these pieces of vocabulary -clash with those used in Puppet so to overcome the name clashing the -property and parameter names have been qualified a bit for clarity. - -More information on primitive definitions can be found at the following link: - -* http://www.clusterlabs.org/doc/en-US/Pacemaker/1.1/html/Clusters_from_Scratch/_adding_a_resource.html - eof - - include Pacemaker::Options - include Pacemaker::Type - - ensurable - - newparam(:name) do - desc <<-eof -Name identifier of primitive. This value needs to be unique -across the entire Corosync/Pacemaker configuration since it doesn't have -the concept of name spaces per type. - eof - - isnamevar - end - - newparam(:debug, boolean: true, parent: Puppet::Parameter::Boolean) do - desc <<-eof -Don't actually make changes - eof - - defaultto false - end - - newproperty(:primitive_class) do - desc <<-eof -Corosync class of the primitive. Examples of classes are lsb or ocf. -Lsb funtions a lot like the init provider in Puppet for services, an init -script is ran periodically on each host to identify status, or to start -and stop a particular application. Ocf of the other hand is a script with -meta-data and stucture that is specific to Corosync and Pacemaker. - eof - - isrequired - end - - newproperty(:primitive_type) do - desc <<-eof -Corosync primitive type. Type generally matches to the specific -'thing' your managing, i.e. ip address or vhost. Though, they can be -completely arbitrarily named and manage any number of underlying -applications or resources. - eof - - isrequired - end - - newproperty(:primitive_provider) do - desc <<-eof -Corosync primitive provider. All resource agents used in a primitve -have something that provides them to the system, be it the Pacemaker or -redhat plugins... they're not always obvious though so currently you're -left to understand Corosync enough to figure it out. Usually, if it isn't -obvious it is because there is only one provider for the resource agent. - -To find the list of providers for a resource agent run the following -from the command line has Corosync installed: - -* `crm configure ra providers ` - -Required for OCF primitives and may not be needed for other classes. - eof - end - - # Our parameters and operations properties must be hashes. - newproperty(:parameters) do - desc <<-eof -A hash of params for the primitive. Parameters in a primitive are -used by the underlying resource agent, each class using them slightly -differently. In ocf scripts they are exported and pulled into the -script as variables to be used. Since the list of these parameters -are completely arbitrary and validity not enforced we simply defer -defining a model and just accept a hash. - eof - - validate do |value| - raise 'Parameters property must be a hash' unless value.is_a? Hash - end - - def is_to_s(is) - resource.inspect_to_s is - end - - def should_to_s(should) - resource.inspect_to_s should - end - - def insync?(is) - resource.insync_debug is, should, 'parameters' - super - end - - munge do |value| - resource.stringify_data value - end - end - - newproperty(:operations, array_matching: :all) do - desc <<-eof -A hash of operations for the primitive. Operations defined in a -primitive are little more predictable as they are commonly things like -monitor or start and their values are in seconds. Since each resource -agent can define its own set of operations we are going to defer again -and just accept a hash. There maybe room to model this one but it -would require a review of all resource agents to see if each operation -is valid. - eof - - validate do |value| - raise "Operations property must be an Hash. Got: #{value.inspect}" unless value.is_a? Hash - end - - # Puppet calls this for individual operations inside the Array - munge do |value| - raise "expected to munge a single operation" if value.is_a? Array - value = resource.stringify_data value - resource.munge_operation(value) - end - - def should=(value) - munged = resource.munge_operations_array(value) - super(munged) - # @shouldorig is supposed to hold the original value, but super will - # stored munged, not the original it didn't receive. - @shouldorig = value - end - - def is_to_s(is) - resource.inspect_to_s is - end - - def should_to_s(should) - resource.inspect_to_s should - end - - def insync?(is) - resource.insync_debug is, should, 'operations' - resource.compare_operations is, should - end - end - - newproperty(:metadata) do - desc <<-eof -A hash of metadata for the primitive. A primitive can have a set of -metadata that doesn't affect the underlying Corosync type/provider but -affect that concept of a resource. This metadata is similar to Puppet's -resources resource and some meta-parameters, they change resource -behavior but have no affect of the data that is synced or manipulated. - eof - - validate do |value| - raise 'Metadata property must be a hash' unless value.is_a? Hash - end - - munge do |value| - value = resource.stringify_data value - resource.munge_meta_attributes value - end - - def is_to_s(is) - resource.inspect_to_s is - end - - def should_to_s(should) - resource.inspect_to_s should - end - - def insync?(is) - resource.insync_debug is, should, 'metadata' - resource.compare_meta_attributes is, should - end - end - - newproperty(:complex_metadata) do - desc <<-eof -A hash of metadata for the complex primitives - eof - - validate do |value| - raise 'Complex_metadata property must be a hash' unless value.is_a? Hash - end - - munge do |value| - value = resource.stringify_data value - resource.munge_meta_attributes value - end - - def is_to_s(is) - resource.inspect_to_s is - end - - def should_to_s(should) - resource.inspect_to_s should - end - - def insync?(is) - resource.insync_debug is, should, 'complex_metadata' - resource.compare_meta_attributes is, should - end - end - - newproperty(:complex_type) do - desc <<-eof -Which complex type this resource should be? Supported: clone, master, simple. -The "simple" is the default value and means a non-complex resource. - eof - - newvalues 'clone', 'master', 'simple' - defaultto 'simple' - end - - autorequire(:service) do - %w(corosync pacemaker) - end -end diff --git a/lib/puppet/type/pacemaker_resource_default.rb b/lib/puppet/type/pacemaker_resource_default.rb deleted file mode 100644 index 72a6219a..00000000 --- a/lib/puppet/type/pacemaker_resource_default.rb +++ /dev/null @@ -1,56 +0,0 @@ -require 'puppet/parameter/boolean' -require_relative '../../pacemaker/options' -require_relative '../../pacemaker/type' - -Puppet::Type.newtype(:pacemaker_resource_default) do - desc <<-eof -Type for manipulating corosync/pacemaker configuration rsc_defaults. -Besides the configuration file that is managed by the module the contains -all these related Corosync types and providers, there is a set of cluster -rsc_defaults that can be set and saved inside the CIB (A CIB being a set of -configuration that is synced across the cluster, it can be exported as XML -for processing and backup). The type is pretty simple interface for -setting key/value pairs or removing them completely. Removing them will -result in them taking on their default value. - -More information on cluster properties can be found here: - -* http://clusterlabs.org/doc/en-US/Pacemaker/1.1-plugin/html/Clusters_from_Scratch/ch05s03s02.html' - eof - - include Pacemaker::Options - include Pacemaker::Type - - ensurable - - newparam(:name) do - desc <<-eof -Name identifier of this rsc_defaults. Simply the name of the cluster -rsc_defaults. Happily most of these are unique. - eof - - isnamevar - end - - newparam(:debug, boolean: true, parent: Puppet::Parameter::Boolean) do - desc <<-eof -Don't actually make any changes - eof - - defaultto false - end - - newproperty(:value) do - desc <<-eof -Value of the rsc_defaults. It is expected that this will be a single -value but we aren't validating string vs. integer vs. boolean because -cluster rsc_resources can range the gambit. - eof - - isrequired - end - - autorequire(:service) do - %w(corosync pacemaker) - end -end diff --git a/lib/puppet/type/pcmk_bundle.rb b/lib/puppet/type/pcmk_bundle.rb deleted file mode 100644 index c96d00ca..00000000 --- a/lib/puppet/type/pcmk_bundle.rb +++ /dev/null @@ -1,180 +0,0 @@ -require 'puppet/parameter/boolean' - -Puppet::Type.newtype(:pcmk_bundle) do - @doc = "Resource definition for a pacemaker resource bundle" - - ensurable - - newparam(:name) do - desc "A unique name for the resource" - end - - newparam(:post_success_sleep) do - desc "The time to sleep after successful pcs action. The reason to set - this is to avoid immediate back-to-back 'pcs resource create' calls - when creating multiple resources. Defaults to '0'." - - munge do |value| - if value.is_a?(String) - unless value =~ /^[-\d.]+$/ - raise ArgumentError, "post_success_sleep must be a number" - end - value = Float(value) - end - raise ArgumentError, "post_success_sleep cannot be a negative number" if value < 0 - value - end - - defaultto 0 - end - - ## borrowed from exec.rb - newparam(:tries) do - desc "The number of times to attempt to create a pcs resource. - Defaults to '1'." - - munge do |value| - if value.is_a?(String) - unless value =~ /^[\d]+$/ - raise ArgumentError, "Tries must be an integer" - end - value = Integer(value) - end - raise ArgumentError, "Tries must be an integer >= 1" if value < 1 - value - end - - defaultto 1 - end - - newparam(:try_sleep) do - desc "The time to sleep in seconds between 'tries'." - - munge do |value| - if value.is_a?(String) - unless value =~ /^[-\d.]+$/ - raise ArgumentError, "try_sleep must be a number" - end - value = Float(value) - end - raise ArgumentError, "try_sleep cannot be a negative number" if value < 0 - value - end - - defaultto 0 - end - - newparam(:verify_on_create, :boolean => true, :parent => Puppet::Parameter::Boolean) do - desc "Whether to verify pcs resource creation with an additional - call to 'pcs resource config' rather than just relying on the exit - status of 'pcs resource create'. When true, $try_sleep - determines how long to wait to verify and $post_success_sleep is - ignored. Defaults to `false`." - - defaultto false - end - - newparam(:force, :boolean => true, :parent => Puppet::Parameter::Boolean) do - desc "Wheter to use --force with pcs" - - defaultto false - end - - newproperty(:image) do - desc "docker image" - end - newproperty(:container_options) do - desc "options to pcs container argument" - end - newproperty(:replicas) do - desc "number of replicas" - - munge do |value| - if value.is_a?(String) - unless value =~ /^[\d]+$/ - raise ArgumentError, "Tries must be an integer" - end - value = Integer(value) - end - raise ArgumentError, "Tries must be an integer >= 1" if value < 1 - value - end - end - - newproperty(:masters) do - desc "number of masters" - - munge do |value| - if value.is_a?(String) - unless value =~ /^[\d]+$/ - raise ArgumentError, "Tries must be an integer" - end - value = Integer(value) - end - raise ArgumentError, "Tries must be an integer >= 1" if value < 1 - value - end - end - - newproperty(:promoted_max) do - desc "number of clones promotable to master" - - munge do |value| - if value.is_a?(String) - unless value =~ /^[\d]+$/ - raise ArgumentError, "promoted_max must be an integer" - end - value = Integer(value) - end - raise ArgumentError, "promoted_max must be an integer >= 1" if value < 1 - value - end - end - - newproperty(:options) do - desc "docker options" - end - newproperty(:run_command) do - desc "dock run command" - end - newproperty(:storage_maps) do - desc "storage maps" - end - newproperty(:network) do - desc "network options" - end - newproperty(:location_rule) do - desc "A location rule constraint hash" - end - - newparam(:deep_compare, :boolean => true, :parent => Puppet::Parameter::Boolean) do - desc "Whether to enable deep comparing of resource - When set to true a resource will be compared in full (options, meta parameters,..) - to the existing one and in case of difference it will be repushed to the CIB - Defaults to `false`." - - defaultto false - end - - newparam(:update_settle_secs) do - desc "The time in seconds to wait for the cluster to settle after resource has been updated - when :deep_compare kicked in. Defaults to '600'." - - munge do |value| - if value.is_a?(String) - unless value =~ /^[-\d.]+$/ - raise ArgumentError, "update_settle_secs must be a number" - end - value = Float(value) - end - raise ArgumentError, "update_settle_secs cannot be a negative number" if value < 0 - value - end - - defaultto 600 - end - newproperty(:container_backend) do - desc "Container backend" - defaultto "docker" - end -end diff --git a/lib/puppet/type/pcmk_constraint.rb b/lib/puppet/type/pcmk_constraint.rb deleted file mode 100644 index 73d5c873..00000000 --- a/lib/puppet/type/pcmk_constraint.rb +++ /dev/null @@ -1,80 +0,0 @@ -require 'puppet/parameter/boolean' - -Puppet::Type.newtype(:pcmk_constraint) do - @doc = "Base constraint definition for a pacemaker constraint" - - ensurable - - newparam(:name) do - desc "A unique name for the constraint" - end - - newparam(:constraint_type) do - desc "the pacemaker type to create" - newvalues(:location, :colocation, :order) - end - newparam(:resource) do - desc "resource list" - newvalues(/.+/) - end - newparam(:location) do - desc "location" - newvalues(/.+/) - end - newparam(:score) do - desc "Score" - end - newparam(:first_resource) do - desc "First resource in ordering constraint" - end - newparam(:second_resource) do - desc "Second resource in ordering constraint" - end - newparam(:first_action) do - desc "First action in ordering constraint" - end - newparam(:second_action) do - desc "Second action in ordering constraint" - end - newparam(:constraint_params) do - desc "Constraint parameters in ordering constraint" - end - newparam(:master_slave, :boolean => true, :parent => Puppet::Parameter::Boolean) do - desc "Enable master/slave support with multistage" - end - ## borrowed from exec.rb - newparam(:tries) do - desc "The number of times to attempt to create a pcs resource. - Defaults to '1'." - - munge do |value| - if value.is_a?(String) - unless value =~ /^[\d]+$/ - raise ArgumentError, "Tries must be an integer" - end - value = Integer(value) - end - raise ArgumentError, "Tries must be an integer >= 1" if value < 1 - value - end - - defaultto 1 - end - - newparam(:try_sleep) do - desc "The time to sleep in seconds between 'tries'." - - munge do |value| - if value.is_a?(String) - unless value =~ /^[-\d.]+$/ - raise ArgumentError, "try_sleep must be a number" - end - value = Float(value) - end - raise ArgumentError, "try_sleep cannot be a negative number" if value < 0 - value - end - - defaultto 0 - end -end diff --git a/lib/puppet/type/pcmk_property.rb b/lib/puppet/type/pcmk_property.rb deleted file mode 100644 index 54c852b5..00000000 --- a/lib/puppet/type/pcmk_property.rb +++ /dev/null @@ -1,62 +0,0 @@ -require 'puppet/parameter/boolean' - -Puppet::Type.newtype(:pcmk_property) do - @doc = "Base resource definition for a pacemaker property" - - ensurable - newparam(:name) do - desc "A unique name for the resource" - end - - newparam(:property) do - desc "A unique name for the property" - end - newparam(:value) do - desc "the value for the pacemaker property" - end - newparam(:node) do - desc "Optional specific node to set the property on" - end - - newparam(:force, :boolean => true, :parent => Puppet::Parameter::Boolean) do - desc "Wheter to use --force with pcs" - - defaultto false - end - - ## borrowed from exec.rb - newparam(:tries) do - desc "The number of times to attempt to create a pcs resource. - Defaults to '1'." - - munge do |value| - if value.is_a?(String) - unless value =~ /^[\d]+$/ - raise ArgumentError, "Tries must be an integer" - end - value = Integer(value) - end - raise ArgumentError, "Tries must be an integer >= 1" if value < 1 - value - end - - defaultto 1 - end - - newparam(:try_sleep) do - desc "The time to sleep in seconds between 'tries'." - - munge do |value| - if value.is_a?(String) - unless value =~ /^[-\d.]+$/ - raise ArgumentError, "try_sleep must be a number" - end - value = Float(value) - end - raise ArgumentError, "try_sleep cannot be a negative number" if value < 0 - value - end - - defaultto 0 - end -end diff --git a/lib/puppet/type/pcmk_remote.rb b/lib/puppet/type/pcmk_remote.rb deleted file mode 100644 index 50b08903..00000000 --- a/lib/puppet/type/pcmk_remote.rb +++ /dev/null @@ -1,137 +0,0 @@ -require 'puppet/parameter/boolean' - -Puppet::Type.newtype(:pcmk_remote) do - @doc = "Remote resource definition for a pacemaker" - - ensurable - - newparam(:name) do - desc "A unique name for the resource" - end - - newparam(:pcs_user) do - desc "Pcs user to use when authenticating a remote node" - defaultto '' - end - - newparam(:pcs_password) do - desc "Pcs password to use when authenticating a remote node" - defaultto '' - end - - newparam(:post_success_sleep) do - desc "The time to sleep after successful pcs action. The reason to set - this is to avoid immediate back-to-back 'pcs resource create' calls - when creating multiple resources. Defaults to '0'." - - munge do |value| - if value.is_a?(String) - unless value =~ /^[-\d.]+$/ - raise ArgumentError, "post_success_sleep must be a number" - end - value = Float(value) - end - raise ArgumentError, "post_success_sleep cannot be a negative number" if value < 0 - value - end - - defaultto 0 - end - - newparam(:force, :boolean => true, :parent => Puppet::Parameter::Boolean) do - desc "Wheter to use --force with pcs" - - defaultto false - end - - ## borrowed from exec.rb - newparam(:tries) do - desc "The number of times to attempt to create a pcs resource. - Defaults to '1'." - - munge do |value| - if value.is_a?(String) - unless value =~ /^[\d]+$/ - raise ArgumentError, "Tries must be an integer" - end - value = Integer(value) - end - raise ArgumentError, "Tries must be an integer >= 1" if value < 1 - value - end - - defaultto 1 - end - - newparam(:try_sleep) do - desc "The time to sleep in seconds between 'tries'." - - munge do |value| - if value.is_a?(String) - unless value =~ /^[-\d.]+$/ - raise ArgumentError, "try_sleep must be a number" - end - value = Float(value) - end - raise ArgumentError, "try_sleep cannot be a negative number" if value < 0 - value - end - - defaultto 0 - end - - newproperty(:op_params) do - desc "op parameters" - end - newproperty(:meta_params) do - desc "meta parameters" - end - newproperty(:resource_params) do - desc "resource parameters" - end - newproperty(:remote_address) do - desc "Address for remote resources" - end - newproperty(:reconnect_interval) do - desc "reconnection interval for remote resources" - munge do |value| - if value.is_a?(String) - unless value =~ /^[-\d.]+$/ - raise ArgumentError, "reconnect_interval must be a number" - end - value = Float(value) - end - raise ArgumentError, "reconnect_interval cannot be a negative number" if value < 0 - value - end - - defaultto 60 - end - - newparam(:deep_compare, :boolean => true, :parent => Puppet::Parameter::Boolean) do - desc "Whether to enable deep comparing of resource - When set to true a resource will be compared in full (options, meta parameters,..) - to the existing one and in case of difference it will be repushed to the CIB - Defaults to `false`." - - defaultto false - end - - newparam(:update_settle_secs) do - desc "The time in seconds to wait for the cluster to settle after resource has been updated - when :deep_compare kicked in. Defaults to '600'." - - munge do |value| - if value.is_a?(String) - unless value =~ /^[-\d.]+$/ - raise ArgumentError, "update_settle_secs must be a number" - end - value = Float(value) - end - raise ArgumentError, "update_settle_secs cannot be a negative number" if value < 0 - value - end - - defaultto 600 - end -end diff --git a/lib/puppet/type/pcmk_resource.rb b/lib/puppet/type/pcmk_resource.rb deleted file mode 100644 index 2cc847d2..00000000 --- a/lib/puppet/type/pcmk_resource.rb +++ /dev/null @@ -1,155 +0,0 @@ -require 'puppet/parameter/boolean' - -Puppet::Type.newtype(:pcmk_resource) do - @doc = "Base resource definition for a pacemaker resource" - - ensurable - - newparam(:name) do - desc "A unique name for the resource" - end - newparam(:resource_type) do - desc "the pacemaker type to create" - end - - newparam(:post_success_sleep) do - desc "The time to sleep after successful pcs action. The reason to set - this is to avoid immediate back-to-back 'pcs resource create' calls - when creating multiple resources. Defaults to '0'." - - munge do |value| - if value.is_a?(String) - unless value =~ /^[-\d.]+$/ - raise ArgumentError, "post_success_sleep must be a number" - end - value = Float(value) - end - raise ArgumentError, "post_success_sleep cannot be a negative number" if value < 0 - value - end - - defaultto 0 - end - - ## borrowed from exec.rb - newparam(:tries) do - desc "The number of times to attempt to create a pcs resource. - Defaults to '1'." - - munge do |value| - if value.is_a?(String) - unless value =~ /^[\d]+$/ - raise ArgumentError, "Tries must be an integer" - end - value = Integer(value) - end - raise ArgumentError, "Tries must be an integer >= 1" if value < 1 - value - end - - defaultto 1 - end - - newparam(:try_sleep) do - desc "The time to sleep in seconds between 'tries'." - - munge do |value| - if value.is_a?(String) - unless value =~ /^[-\d.]+$/ - raise ArgumentError, "try_sleep must be a number" - end - value = Float(value) - end - raise ArgumentError, "try_sleep cannot be a negative number" if value < 0 - value - end - - defaultto 0 - end - - newparam(:verify_on_create, :boolean => true, :parent => Puppet::Parameter::Boolean) do - desc "Whether to verify pcs resource creation with an additional - call to 'pcs resource config' rather than just relying on the exit - status of 'pcs resource create'. When true, $try_sleep - determines how long to wait to verify and $post_success_sleep is - ignored. Defaults to `false`." - - defaultto false - end - - newparam(:force, :boolean => true, :parent => Puppet::Parameter::Boolean) do - desc "Wheter to use --force with pcs" - - defaultto false - end - - newproperty(:op_params) do - desc "op parameters" - end - newproperty(:meta_params) do - desc "meta parameters" - end - newproperty(:resource_params) do - desc "resource parameters" - end - newproperty(:clone_params) do - desc "clone params" - end - newproperty(:group_params) do - desc "A resource group to put the resource in" - end - newproperty(:master_params) do - desc "set if this is a cloned resource" - end - newproperty(:bundle) do - desc "set to bundle id if part of a bundle" - end - newproperty(:location_rule) do - desc "A location rule constraint hash" - end - newproperty(:remote_address) do - desc "Address for remote resources" - end - newproperty(:reconnect_interval) do - desc "reconnection interval for remote resources" - munge do |value| - if value.is_a?(String) - unless value =~ /^[-\d.]+$/ - raise ArgumentError, "reconnect_interval must be a number" - end - value = Float(value) - end - raise ArgumentError, "reconnect_interval cannot be a negative number" if value < 0 - value - end - - defaultto 60 - end - - newparam(:deep_compare, :boolean => true, :parent => Puppet::Parameter::Boolean) do - desc "Whether to enable deep comparing of resource - When set to true a resource will be compared in full (options, meta parameters,..) - to the existing one and in case of difference it will be repushed to the CIB - Defaults to `false`." - - defaultto false - end - - newparam(:update_settle_secs) do - desc "The time in seconds to wait for the cluster to settle after resource has been updated - when :deep_compare kicked in. Defaults to '600'." - - munge do |value| - if value.is_a?(String) - unless value =~ /^[-\d.]+$/ - raise ArgumentError, "update_settle_secs must be a number" - end - value = Float(value) - end - raise ArgumentError, "update_settle_secs cannot be a negative number" if value < 0 - value - end - - defaultto 600 - end -end diff --git a/lib/puppet/type/pcmk_resource_default.rb b/lib/puppet/type/pcmk_resource_default.rb deleted file mode 100644 index c59c5694..00000000 --- a/lib/puppet/type/pcmk_resource_default.rb +++ /dev/null @@ -1,81 +0,0 @@ -require 'puppet/parameter/boolean' - -Puppet::Type.newtype(:pcmk_resource_default) do - @doc = "A default value for pacemaker resource options" - - ensurable - - newparam(:name) do - desc "A unique name of the option" - end - - newparam(:value) do - desc "A default value for the option" - end - - newparam(:post_success_sleep) do - desc "The time to sleep after successful pcs action. The reason to set - this is to avoid immediate back-to-back 'pcs resource create' calls - when creating multiple resources. Defaults to '0'." - - munge do |value| - if value.is_a?(String) - unless value =~ /^[-\d.]+$/ - raise ArgumentError, "post_success_sleep must be a number" - end - value = Float(value) - end - raise ArgumentError, "post_success_sleep cannot be a negative number" if value < 0 - value - end - - defaultto 0 - end - - ## borrowed from exec.rb - newparam(:tries) do - desc "The number of times to attempt to create a pcs resource. - Defaults to '1'." - - munge do |value| - if value.is_a?(String) - unless value =~ /^[\d]+$/ - raise ArgumentError, "Tries must be an integer" - end - value = Integer(value) - end - raise ArgumentError, "Tries must be an integer >= 1" if value < 1 - value - end - - defaultto 1 - end - - newparam(:try_sleep) do - desc "The time to sleep in seconds between 'tries'." - - munge do |value| - if value.is_a?(String) - unless value =~ /^[-\d.]+$/ - raise ArgumentError, "try_sleep must be a number" - end - value = Float(value) - end - raise ArgumentError, "try_sleep cannot be a negative number" if value < 0 - value - end - - defaultto 0 - end - - newparam(:verify_on_create, :boolean => true, :parent => Puppet::Parameter::Boolean) do - desc "Whether to verify pcs resource creation with an additional - call to 'pcs resource config' rather than just relying on the exit - status of 'pcs resource create'. When true, $try_sleep - determines how long to wait to verify and $post_success_sleep is - ignored. Defaults to `false`." - - defaultto false - end - -end diff --git a/lib/puppet/type/pcmk_resource_op_default.rb b/lib/puppet/type/pcmk_resource_op_default.rb deleted file mode 100644 index ed1935bf..00000000 --- a/lib/puppet/type/pcmk_resource_op_default.rb +++ /dev/null @@ -1,81 +0,0 @@ -require 'puppet/parameter/boolean' - -Puppet::Type.newtype(:pcmk_resource_op_default) do - @doc = "A default value for pacemaker resource op options" - - ensurable - - newparam(:name) do - desc "A unique name of the option" - end - - newparam(:value) do - desc "A default value for the option" - end - - newparam(:post_success_sleep) do - desc "The time to sleep after successful pcs action. The reason to set - this is to avoid immediate back-to-back 'pcs resource create' calls - when creating multiple resources. Defaults to '0'." - - munge do |value| - if value.is_a?(String) - unless value =~ /^[-\d.]+$/ - raise ArgumentError, "post_success_sleep must be a number" - end - value = Float(value) - end - raise ArgumentError, "post_success_sleep cannot be a negative number" if value < 0 - value - end - - defaultto 0 - end - - ## borrowed from exec.rb - newparam(:tries) do - desc "The number of times to attempt to create a pcs resource. - Defaults to '1'." - - munge do |value| - if value.is_a?(String) - unless value =~ /^[\d]+$/ - raise ArgumentError, "Tries must be an integer" - end - value = Integer(value) - end - raise ArgumentError, "Tries must be an integer >= 1" if value < 1 - value - end - - defaultto 1 - end - - newparam(:try_sleep) do - desc "The time to sleep in seconds between 'tries'." - - munge do |value| - if value.is_a?(String) - unless value =~ /^[-\d.]+$/ - raise ArgumentError, "try_sleep must be a number" - end - value = Float(value) - end - raise ArgumentError, "try_sleep cannot be a negative number" if value < 0 - value - end - - defaultto 0 - end - - newparam(:verify_on_create, :boolean => true, :parent => Puppet::Parameter::Boolean) do - desc "Whether to verify pcs resource creation with an additional - call to 'pcs resource config' rather than just relying on the exit - status of 'pcs resource create'. When true, $try_sleep - determines how long to wait to verify and $post_success_sleep is - ignored. Defaults to `false`." - - defaultto false - end - -end diff --git a/lib/puppet/type/pcmk_stonith.rb b/lib/puppet/type/pcmk_stonith.rb deleted file mode 100644 index 8caff568..00000000 --- a/lib/puppet/type/pcmk_stonith.rb +++ /dev/null @@ -1,116 +0,0 @@ -require 'puppet/parameter/boolean' - -Puppet::Type.newtype(:pcmk_stonith) do - @doc = "Base resource definition for a pacemaker stonith resource" - - ensurable - - newparam(:name) do - desc "A unique name for the stonith resource." - end - - newparam(:stonith_type) do - desc "The pacemaker stonith type to create." - end - - newparam(:pcmk_host_list) do - desc "The pcmk_host_list parameter for pcs. When set to the empty - string '', the parameter will not be used when calling pcs." - end - - newparam(:pcs_param_string) do - desc "The pacemaker pcs string to use." - end - - newparam(:deep_compare, :boolean => true, :parent => Puppet::Parameter::Boolean) do - desc "Whether to enable deep comparing of resource - When set to true a resource will be compared in full (options, meta parameters,..) - to the existing one and in case of difference it will be repushed to the CIB - Defaults to `false`." - - defaultto false - end - - newparam(:update_settle_secs) do - desc "The time in seconds to wait for the cluster to settle after resource has been updated - when :deep_compare kicked in. Defaults to '600'." - - munge do |value| - if value.is_a?(String) - unless value =~ /^[-\d.]+$/ - raise ArgumentError, "update_settle_secs must be a number" - end - value = Float(value) - end - raise ArgumentError, "update_settle_secs cannot be a negative number" if value < 0 - value - end - - defaultto 600 - end - - ## borrowed from exec.rb - newparam(:tries) do - desc "The number of times to attempt to create a pcs resource. - Defaults to '1'." - - munge do |value| - if value.is_a?(String) - unless value =~ /^[\d]+$/ - raise ArgumentError, "Tries must be an integer" - end - value = Integer(value) - end - raise ArgumentError, "Tries must be an integer >= 1" if value < 1 - value - end - - defaultto 1 - end - - newparam(:try_sleep) do - desc "The time to sleep in seconds between 'tries'." - - munge do |value| - if value.is_a?(String) - unless value =~ /^[-\d.]+$/ - raise ArgumentError, "try_sleep must be a number" - end - value = Float(value) - end - raise ArgumentError, "try_sleep cannot be a negative number" if value < 0 - value - end - - defaultto 0 - end - - newparam(:verify_on_create, :boolean => true, :parent => Puppet::Parameter::Boolean) do - desc "Whether to verify pcs resource creation with an additional - call to 'pcs resource config' rather than just relying on the exit - status of 'pcs resource create'. When true, $try_sleep - determines how long to wait to verify and $post_success_sleep is - ignored. Defaults to `false`." - - defaultto false - end - - newparam(:post_success_sleep) do - desc "The time to sleep after successful pcs action. The reason to set - this is to avoid immediate back-to-back 'pcs resource create' calls - when creating multiple resources. Defaults to '0'." - - munge do |value| - if value.is_a?(String) - unless value =~ /^[-\d.]+$/ - raise ArgumentError, "post_success_sleep must be a number" - end - value = Float(value) - end - raise ArgumentError, "post_success_sleep cannot be a negative number" if value < 0 - value - end - - defaultto 0 - end -end diff --git a/lib/puppet/type/pcmk_stonith_level.rb b/lib/puppet/type/pcmk_stonith_level.rb deleted file mode 100644 index 89d46016..00000000 --- a/lib/puppet/type/pcmk_stonith_level.rb +++ /dev/null @@ -1,99 +0,0 @@ -require 'puppet/parameter/boolean' - -Puppet::Type.newtype(:pcmk_stonith_level) do - @doc = "Base resource definition for a pacemaker stonith level resource" - - ensurable - - newparam(:name) do - desc "A unique name for the stonith level" - end - - newparam(:level) do - desc "The stonith level" - munge do |value| - if value.is_a?(String) - unless value =~ /^[\d]+$/ - raise ArgumentError, "The stonith level must be an integer" - end - value = Integer(value) - end - raise ArgumentError, "Level must be an integer >= 1" if value < 1 - value - end - end - - newparam(:target) do - desc "The pacemaker stonith target to apply the level to" - end - - newparam(:stonith_resources) do - desc "The array containing the list of stonith devices" - # FIXME: check for an array of strings - end - - ## borrowed from exec.rb - newparam(:tries) do - desc "The number of times to attempt to create a pcs resource. - Defaults to '1'." - - munge do |value| - if value.is_a?(String) - unless value =~ /^[\d]+$/ - raise ArgumentError, "Tries must be an integer" - end - value = Integer(value) - end - raise ArgumentError, "Tries must be an integer >= 1" if value < 1 - value - end - - defaultto 1 - end - - newparam(:try_sleep) do - desc "The time to sleep in seconds between 'tries'." - - munge do |value| - if value.is_a?(String) - unless value =~ /^[-\d.]+$/ - raise ArgumentError, "try_sleep must be a number" - end - value = Float(value) - end - raise ArgumentError, "try_sleep cannot be a negative number" if value < 0 - value - end - - defaultto 0 - end - - newparam(:verify_on_create, :boolean => true, :parent => Puppet::Parameter::Boolean) do - desc "Whether to verify pcs resource creation with an additional - call to 'pcs resource config' rather than just relying on the exit - status of 'pcs resource create'. When true, $try_sleep - determines how long to wait to verify and $post_success_sleep is - ignored. Defaults to `false`." - - defaultto false - end - - newparam(:post_success_sleep) do - desc "The time to sleep after successful pcs action. The reason to set - this is to avoid immediate back-to-back 'pcs resource create' calls - when creating multiple resources. Defaults to '0'." - - munge do |value| - if value.is_a?(String) - unless value =~ /^[-\d.]+$/ - raise ArgumentError, "post_success_sleep must be a number" - end - value = Float(value) - end - raise ArgumentError, "post_success_sleep cannot be a negative number" if value < 0 - value - end - - defaultto 0 - end -end diff --git a/lib/serverspec/type/pacemaker_colocation.rb b/lib/serverspec/type/pacemaker_colocation.rb deleted file mode 100644 index 0deae195..00000000 --- a/lib/serverspec/type/pacemaker_colocation.rb +++ /dev/null @@ -1,57 +0,0 @@ -require_relative 'pacemaker_xml' - -module Serverspec::Type - # This Serverspec type can do the check on the Pacemaker colocation object - class Pacemaker_colocation < PacemakerXML - # Check if this object is present - # @return [true,false] - def present? - !instance.nil? - end - - alias exists? present? - - # The data object from the library or nil if there is no object - # @return [Hash,nil] - def instance - constraint_colocations[@name] - end - - # The name of the resource which is - # running on the same node as the first one - # @return [Strong,nil] - def rsc - return unless instance - instance['rsc'] - end - - alias second rsc - - # The name of the first resource - # @return [Strong,nil] - def with_rsc - return unless instance - instance['with-rsc'] - end - - alias first with_rsc - - # The priority score value - # @return [String,nil] - def score - return unless instance - instance['score'] - end - - # Test representation - def to_s - "Pacemaker_colocation #{@name}" - end - end -end - -# Define the object creation function -def pacemaker_colocation(*args) - name = args.first - Serverspec::Type::Pacemaker_colocation.new(name) -end diff --git a/lib/serverspec/type/pacemaker_location.rb b/lib/serverspec/type/pacemaker_location.rb deleted file mode 100644 index a3209f19..00000000 --- a/lib/serverspec/type/pacemaker_location.rb +++ /dev/null @@ -1,63 +0,0 @@ -require_relative 'pacemaker_xml' - -module Serverspec::Type - # This Serverspec type can do the check on the Pacemaker location object - class Pacemaker_location < PacemakerXML - # Check if this object is present - # @return [true,false] - def present? - !instance.nil? - end - - alias exists? present? - - # The data object from the library or nil if there is no object - # @return [Hash,nil] - def instance - constraint_locations[@name] - end - - # The name of the resource this location is related to - # @return [String,nil] - def rsc - return unless instance - instance['rsc'] - end - - alias primitive rsc - alias resource rsc - - # The priority score value - # Used for node based locations - # @return [String,nil] - def score - return unless instance - instance['score'] - end - - # The name of the node of this resource - # It's used for a node based locations - def node - return unless instance - instance['node'] - end - - # The structure with the location rules - # Used for rule based locations - def rules - return unless instance - instance['rules'] - end - - # Test representation - def to_s - "Pacemaker_location #{@name}" - end - end -end - -# Define the object creation function -def pacemaker_location(*args) - name = args.first - Serverspec::Type::Pacemaker_location.new(name) -end diff --git a/lib/serverspec/type/pacemaker_operation_default.rb b/lib/serverspec/type/pacemaker_operation_default.rb deleted file mode 100644 index cbce793b..00000000 --- a/lib/serverspec/type/pacemaker_operation_default.rb +++ /dev/null @@ -1,38 +0,0 @@ -require_relative 'pacemaker_xml' - -module Serverspec::Type - # This Serverspec type can do the check on the Pacemaker operation default object - class Pacemaker_operation_default < PacemakerXML - # Check if this object is present - # @return [true,false] - def present? - !instance.nil? - end - - alias exists? present? - - # The data object from the library or nil if there is no object - # @return [Hash,nil] - def instance - operation_defaults[@name] - end - - # The value of this object - # @return [String,nil] - def value - return unless instance - instance['value'] - end - - # Test representation - def to_s - "Pacemaker_operation_default #{@name}" - end - end -end - -# Define the object creation function -def pacemaker_operation_default(*args) - name = args.first - Serverspec::Type::Pacemaker_operation_default.new(name) -end diff --git a/lib/serverspec/type/pacemaker_order.rb b/lib/serverspec/type/pacemaker_order.rb deleted file mode 100644 index 57b5219d..00000000 --- a/lib/serverspec/type/pacemaker_order.rb +++ /dev/null @@ -1,85 +0,0 @@ -require_relative 'pacemaker_xml' - -module Serverspec::Type - # This Serverspec type can do the check on the Pacemaker order object - class Pacemaker_order < PacemakerXML - # Check if this object is present - # @return [true,false] - def present? - !instance.nil? - end - - alias exists? present? - - # The data object from the library or nil if there is no object - # @return [Hash,nil] - def instance - constraint_orders[@name] - end - - # The resource which should start first - def first - return unless instance - instance['first'] - end - - # The resource that should start after the first one - def second - return unless instance - instance['then'] - end - - alias then second - - # The priority score value - # @return [String,nil] - def score - return unless instance - instance['score'] - end - - # The action of the first resource that triggers the constraint - def first_action - return unless instance - instance['first-action'] - end - - # The action of the second resource that triggers the constraint - def second_action - return unless instance - instance['then-action'] - end - - alias then_action second_action - - # the enforcement type of the constraint - def kind - return unless instance - return unless instance['kind'] - instance['kind'].downcase - end - - # The symmetrical setting of the constraint - def symmetrical - return unless instance - instance['symmetrical'] - end - - # The require_all setting of the constraint - def require_all - return unless instance - instance['require-all'] - end - - # Test representation - def to_s - "Pacemaker_order #{@name}" - end - end -end - -# Define the object creation function -def pacemaker_order(*args) - name = args.first - Serverspec::Type::Pacemaker_order.new(name) -end diff --git a/lib/serverspec/type/pacemaker_property.rb b/lib/serverspec/type/pacemaker_property.rb deleted file mode 100644 index 53f6ed70..00000000 --- a/lib/serverspec/type/pacemaker_property.rb +++ /dev/null @@ -1,38 +0,0 @@ -require_relative 'pacemaker_xml' - -module Serverspec::Type - # This Serverspec type can do the check on the Pacemaker property object - class Pacemaker_property < PacemakerXML - # Check if this object is present - # @return [true,false] - def present? - !instance.nil? - end - - alias exists? present? - - # The data object from the library or nil if there is no object - # @return [Hash,nil] - def instance - cluster_properties[@name] - end - - # The value of this object - # @return [String,nil] - def value - return unless instance - instance['value'] - end - - # Test representation - def to_s - "Pacemaker_property #{@name}" - end - end -end - -# Define the object creation function -def pacemaker_property(*args) - name = args.first - Serverspec::Type::Pacemaker_property.new(name) -end diff --git a/lib/serverspec/type/pacemaker_resource.rb b/lib/serverspec/type/pacemaker_resource.rb deleted file mode 100644 index c48245de..00000000 --- a/lib/serverspec/type/pacemaker_resource.rb +++ /dev/null @@ -1,201 +0,0 @@ -require_relative 'pacemaker_xml' - -module Serverspec::Type - # This Serverspec type can do the check on the Pacemaker resource object - class Pacemaker_resource < PacemakerXML - # Check if this object is present - # @return [true,false] - def present? - primitive_exists? @name - end - - # The full name of the resource - # It will have a prefix is the resource is complex - # @return [String] - def full_name - primitive_full_name @name - end - - alias exists? present? - - # The data object from the library or nil if there is no object - # @return [Hash,nil] - def instance - primitives[@name] - end - - # The base class of this resource (i.e. ocf, lsb) - # @return [String,nil] - def res_class - primitive_class @name - end - - # The provider of this resource (i.e. heartbeat, pacemaker) - # @return [String,nil] - def res_provider - primitive_provider @name - end - - # The actual resource type (i.e. Dummy, IPaddr) - # @return [String,nil] - def res_type - primitive_type @name - end - - # The hash of instance attribute (configuration parameters) - # @return [Hash,nil] - def instance_attributes - return unless instance - import_attributes_structure instance['instance_attributes'] - end - - alias parameters instance_attributes - - # The hash of this resource's meta attributes (i.e. resource-stickiness) - # @return [Hash,nil] - def meta_attributes - return unless instance - import_attributes_structure instance['meta_attributes'] - end - - alias metadata meta_attributes - - # Is this resource complex or simple? (master, clone, simple) - # @return [String,nil] - def complex_type - complex_type = primitive_complex_type @name - complex_type = complex_type.to_s if complex_type.is_a? Symbol - complex_type - end - - # The hash of complex resource related metadata values - # @return [Hash,nil] - def complex_meta_attributes - return unless instance - import_attributes_structure instance.fetch('complex', {}).fetch('meta_attributes', nil) - end - - alias complex_metadata complex_meta_attributes - - # Check if this resource is complex - # nil if not exists - # @return [true,false,nil] - def complex? - primitive_is_complex? @name - end - - # Check if this resource is simple - # nil if not exists - # @return [true,false,nil] - def simple? - primitive_is_simple? @name - end - - # Check if this resource is clone - # nil if not exists - # @return [true,false,nil] - def clone? - primitive_is_clone? @name - end - - # Check if this resource is master - # nil if not exists - # @return [true,false,nil] - def master? - primitive_is_master? @name - end - - alias multi_state? master? - - # Check if this resource is in group - # nil if not exists - # @return [true,false,nil] - def group? - primitive_in_group? @name - end - - # Get the name of the resource group if - # this resource belongs to one - # nil if not exists - # @return [String,nil] - def group_name - primitive_group @name - end - - # Check if this resource have Pacemaker management enabled - # nil if not exists - # @return [true,false,nil] - def managed? - primitive_is_managed? @name - end - - # Check if this resource has Started target state - # It doesn't mean that it's actually running, and constraints - # may prevent it from being run at all. - # nil if not exists - # @return [true,false,nil] - def started? - primitive_is_started? @name - end - - # Get the current actual status of the resource. - # It can be start, stop, master and nil(unknown) - # @return [String,nil] - def status(node=nil) - primitive_status @name, node - end - - # Check if this resource is currently running - # nil if not exists - # @return [true,false,nil] - def running? - primitive_is_running? @name - end - - # Check if this resource have been failed - # nil if not exists - # @return [true,false,nil] - def failed? - primitive_has_failures? @name - end - - # Check if this resource is running is the master mode - # nil if not exists - # @return [true,false,nil] - def master_running? - primitive_has_master_running? @name - end - - # check if the resource has the - # service location on this node - # @return [true,false,nil] - def has_location_on?(node) - return unless exists? - service_location_exists? full_name, node - end - - # The array of this resource operations - # @return [Array] - def operations - return unless instance - return unless instance['operations'] - operations_data = [] - sort_data(instance['operations']).each do |operation| - operation.delete 'id' - operations_data << operation - end - operations_data - end - - # Test representation - def to_s - "Pacemaker_resource #{@name}" - end - end -end - -# Define the object creation function -def pacemaker_resource(*args) - name = args.first - Serverspec::Type::Pacemaker_resource.new(name) -end diff --git a/lib/serverspec/type/pacemaker_resource_default.rb b/lib/serverspec/type/pacemaker_resource_default.rb deleted file mode 100644 index d035fe74..00000000 --- a/lib/serverspec/type/pacemaker_resource_default.rb +++ /dev/null @@ -1,38 +0,0 @@ -require_relative 'pacemaker_xml' - -module Serverspec::Type - # This Serverspec type can do the check on the Pacemaker resource defaults object - class Pacemaker_resource_default < PacemakerXML - # Check if this object is present - # @return [true,false] - def present? - !instance.nil? - end - - alias exists? present? - - # The data object from the library or nil if there is no object - # @return [Hash,nil] - def instance - resource_defaults[@name] - end - - # The value of this object - # @return [String,nil] - def value - return unless instance - instance['value'] - end - - # Test representation - def to_s - "Pacemaker_resource_default #{@name}" - end - end -end - -# Define the object creation function -def pacemaker_resource_default(*args) - name = args.first - Serverspec::Type::Pacemaker_resource_default.new(name) -end diff --git a/lib/serverspec/type/pacemaker_xml.rb b/lib/serverspec/type/pacemaker_xml.rb deleted file mode 100644 index df42138e..00000000 --- a/lib/serverspec/type/pacemaker_xml.rb +++ /dev/null @@ -1,64 +0,0 @@ -require 'rubygems' - -require 'rexml/document' -require 'rexml/formatters/pretty' -require 'timeout' -require 'yaml' - -require_relative '../../pacemaker/xml/cib' -require_relative '../../pacemaker/xml/constraints' -require_relative '../../pacemaker/xml/constraint_colocations' -require_relative '../../pacemaker/xml/constraint_locations' -require_relative '../../pacemaker/xml/constraint_orders' -require_relative '../../pacemaker/xml/helpers' -require_relative '../../pacemaker/xml/nodes' -require_relative '../../pacemaker/xml/primitives' -require_relative '../../pacemaker/xml/properties' -require_relative '../../pacemaker/xml/resource_default' -require_relative '../../pacemaker/xml/operation_default' -require_relative '../../pacemaker/xml/status' -require_relative '../../pacemaker/xml/debug' -require_relative '../../pacemaker/options' -require_relative '../../pacemaker/wait' -require_relative '../../pacemaker/xml/xml' -require_relative '../../pacemaker/type' - -# Serverspec Type collection module -module Serverspec::Type - # This class in the basic abstract type for all Pacemaker Serverspec types - # It includes the parts of the pacemaker library and real types - # should just inherit from it to use the library functions. - class PacemakerXML < Base - include Pacemaker::Cib - include Pacemaker::Constraints - include Pacemaker::ConstraintOrders - include Pacemaker::ConstraintLocations - include Pacemaker::ConstraintColocations - include Pacemaker::Helpers - include Pacemaker::Nodes - include Pacemaker::Options - include Pacemaker::Primitives - include Pacemaker::Properties - include Pacemaker::Debug - include Pacemaker::ResourceDefault - include Pacemaker::OperationDefault - include Pacemaker::Status - include Pacemaker::Wait - include Pacemaker::Xml - include Pacemaker::Type - - [:cibadmin, :crm_attribute, :crm_node, :crm_resource, :crm_attribute].each do |tool| - define_method(tool) do |*args| - command = [tool.to_s] + args - @runner.run_command(command).stdout - end - end - - # override the debug method - def debug(msg) - puts msg - end - - alias info debug - end -end diff --git a/lib/tools/console.rb b/lib/tools/console.rb deleted file mode 100644 index 853645b1..00000000 --- a/lib/tools/console.rb +++ /dev/null @@ -1,18 +0,0 @@ -require_relative 'provider' -require 'pry' - -# This console can be used to debug the pacemaker library -# and its methods or for the manual control over the cluster. -# -# It requires 'pry' gem to be installed. -# -# You can give it a dumped cib XML file for the first argument -# id you want to debug the code without Pacemaker running. - -common = Puppet::Provider::PacemakerXML.new -if $ARGV[0] && File.exist?($ARGV[0]) - xml = File.read $ARGV[0] - common.cib = xml -end - -common.pry diff --git a/lib/tools/provider.rb b/lib/tools/provider.rb deleted file mode 100644 index e66a7c09..00000000 --- a/lib/tools/provider.rb +++ /dev/null @@ -1,24 +0,0 @@ -require 'rubygems' -require 'puppet' - -require_relative '../puppet/provider/pacemaker_xml' - -class Puppet::Provider::PacemakerXML - [:cibadmin, :crm_attribute, :crm_node, :crm_resource, :crm_attribute].each do |tool| - define_method(tool) do |*args| - command = [tool.to_s] + args - if Puppet::Util::Execution.respond_to? :execute - Puppet::Util::Execution.execute command - else - Puppet::Util.execute command - end - end - end - - # override the debug method - def debug(msg) - puts msg - end - - alias info debug -end diff --git a/lib/tools/status.rb b/lib/tools/status.rb deleted file mode 100644 index 3349b5bc..00000000 --- a/lib/tools/status.rb +++ /dev/null @@ -1,17 +0,0 @@ -require_relative 'provider' - -# This tool is like 'pcs status'. You can use it to view -# the status of the cluster as this library sees it -# using the debug output function. -# -# You can give it a dumped cib XML file for the first argument -# id you want to debug the code without Pacemaker running. - -common = Puppet::Provider::PacemakerXML.new -if $ARGV[0] && File.exist?($ARGV[0]) - xml = File.read $ARGV[0] - common.cib = xml -end - -common.cib -puts common.cluster_debug_report diff --git a/manifests/constraint/base.pp b/manifests/constraint/base.pp deleted file mode 100644 index 3cd8c9f7..00000000 --- a/manifests/constraint/base.pp +++ /dev/null @@ -1,173 +0,0 @@ -# == Define: pacemaker::constraint::base -# -# A generic constraint class. Deprecated. Use defined types that match the -# desired constraint instead. -# -# === Parameters: -# -# [*constraint_type*] -# (required) Must be one of: colocation, order, location -# -# [*constraint_params*] -# (optional) Any additional parameters needed by pcs for the constraint to be -# properly configured -# Defaults to undef -# -# [*first_resource*] -# (optional) First resource to be constrained -# Defaults to undef -# -# [*second_resource*] -# (optional) Second resource to be constrained -# Defaults to undef -# -# [*first_action*] -# (optional) Only used for order constraints, action to take on first resource -# Defaults to undef -# -# [*second_action*] -# (optional) Only used for order constraints, action to take on second resource -# Defaults to undef -# -# [*location*] -# (optional) Specific location to place a resource, used only with location -# constraint_type -# Defaults to undef -# -# [*score*] -# (optional) Numeric score to weight the importance of the constraint -# Defaults to undef -# -# [*ensure*] -# (optional) Whether to make sure the constraint is present or absent -# Defaults to present -# -# [*tries*] -# (optional) How many times to attempt to create the constraint -# Defaults to 1 -# -# [*try_sleep*] -# (optional) How long to wait between tries, in seconds -# Defaults to 10 -# -# === Dependencies -# -# None -# -# === Authors -# -# Crag Wolfe -# Jason Guiditta -# -# === Copyright -# -# Copyright (C) 2016 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -define pacemaker::constraint::base ( - $constraint_type, - $constraint_params = undef, - $first_resource = undef, - $second_resource = undef, - $first_action = undef, - $second_action = undef, - $location = undef, - $score = undef, - $ensure = present, - $tries = 1, - $try_sleep = 10, -) { - - validate_re($constraint_type, ['colocation', 'order', 'location']) - - if($constraint_type == 'order' and ($first_action == undef or $second_action == undef)) { - fail('Must provide actions when constraint type is order') - } - - if($constraint_type == 'location' and $location == undef) { - fail('Must provide location when constraint type is location') - } - - if($constraint_type == 'location' and $score == undef) { - fail('Must provide score when constraint type is location') - } - - if $constraint_params != undef { - $_constraint_params = $constraint_params - } else { - $_constraint_params = '' - } - - $first_resource_cleaned = regsubst($first_resource, '(:)', '.', 'G') - $second_resource_cleaned = regsubst($second_resource, '(:)', '.', 'G') - - # We do not want to require Exec['wait-for-settle'] when we run this - # from a pacemaker remote node - Exec<| title == 'wait-for-settle' |> -> Exec<| tag == 'pacemaker_constraint' |> - if($ensure == absent) { - if($constraint_type == 'location') { - $name_cleaned = regsubst($name, '(:)', '.', 'G') - exec { "Removing location constraint ${name}": - command => "/usr/sbin/pcs constraint location remove ${name_cleaned}", - onlyif => "/usr/sbin/pcs constraint location show --full | grep ${name_cleaned}", - tries => $tries, - try_sleep => $try_sleep, - tag => [ 'pacemaker', 'pacemaker_constraint'], - } - } else { - exec { "Removing ${constraint_type} constraint ${name}": - command => "/usr/sbin/pcs constraint ${constraint_type} remove ${first_resource_cleaned} ${second_resource_cleaned}", - onlyif => "/usr/sbin/pcs constraint ${constraint_type} show | grep ${first_resource_cleaned} | grep ${second_resource_cleaned}", - tries => $tries, - try_sleep => $try_sleep, - tag => [ 'pacemaker', 'pacemaker_constraint'], - } - } - } else { - case $constraint_type { - 'colocation': { - fail('Deprecated use pacemaker::constraint::colocation') - exec { "Creating colocation constraint ${name}": - command => "/usr/sbin/pcs constraint colocation add ${first_resource_cleaned} ${second_resource_cleaned} ${score}", - unless => "/usr/sbin/pcs constraint colocation show | grep ${first_resource_cleaned} | grep ${second_resource_cleaned} > /dev/null 2>&1", - tries => $tries, - try_sleep => $try_sleep, - tag => [ 'pacemaker', 'pacemaker_constraint'], - } - } - 'order': { - exec { "Creating order constraint ${name}": - command => "/usr/sbin/pcs constraint order ${first_action} ${first_resource_cleaned} then ${second_action} ${second_resource_cleaned} ${_constraint_params}", - unless => "/usr/sbin/pcs constraint order show | grep ${first_resource_cleaned} | grep ${second_resource_cleaned} > /dev/null 2>&1", - tries => $tries, - try_sleep => $try_sleep, - tag => [ 'pacemaker', 'pacemaker_constraint'], - } - } - 'location': { - fail('Deprecated use pacemaker::constraint::location') - exec { "Creating location constraint ${name}": - command => "/usr/sbin/pcs constraint location add ${name} ${first_resource_cleaned} ${location} ${score}", - unless => "/usr/sbin/pcs constraint location show | grep ${first_resource_cleaned} > /dev/null 2>&1", - tries => $tries, - try_sleep => $try_sleep, - tag => [ 'pacemaker', 'pacemaker_constraint'], - } - } - default: { - fail('A constraint_type mst be provided') - } - } - } -} diff --git a/manifests/constraint/colocation.pp b/manifests/constraint/colocation.pp deleted file mode 100644 index e976200c..00000000 --- a/manifests/constraint/colocation.pp +++ /dev/null @@ -1,77 +0,0 @@ -# == Define: pacemaker::constraint::colocation -# -# Creates Colocation constraints for resources that need to reside together. -# -# === Parameters: -# -# [*source*] -# (required) First resource to be grouped together -# -# [*target*] -# (required) Second (target) resource to be grouped together -# -# [*score*] -# (required) Numberic weighting of priority of colocation -# -# [*master_slave*] -# (optional) Whether to set a resource with one node as master -# Defaults to false -# -# [*tries*] -# (optional) How many times to attempt to create the constraint -# Defaults to 1 -# -# [*try_sleep*] -# (optional) How long to wait between tries, in seconds -# Defaults to 0 -# -# [*ensure*] -# (optional) Whether to make sure the constraint is present or absent -# Defaults to present -# -# === Dependencies -# -# None -# -# === Authors -# -# Crag Wolfe -# Jason Guiditta -# -# === Copyright -# -# Copyright (C) 2016 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -define pacemaker::constraint::colocation ( - $source, - $target, - $score, - $master_slave = false, - $ensure = present, - $tries = 1, - $try_sleep = 0, -) { - pcmk_constraint {"colo-${source}-${target}": - ensure => $ensure, - constraint_type => colocation, - resource => $source, - location => $target, - score => $score, - master_slave => $master_slave, - tries => $tries, - try_sleep => $try_sleep, - } -} - diff --git a/manifests/constraint/location.pp b/manifests/constraint/location.pp deleted file mode 100644 index c7ce42bf..00000000 --- a/manifests/constraint/location.pp +++ /dev/null @@ -1,70 +0,0 @@ -# == Define: pacemaker::constraint::location -# -# Manages constraint on location of a resource -# -# === Parameters: -# -# [*resource*] -# (required) The name of the resource to be constrained -# -# [*location*] -# (required) Specific location to place a resource -# -# [*score*] -# (required) Numeric score to weight the importance of the constraint -# -# [*tries*] -# (optional) How many times to attempt to create the constraint -# Defaults to 1 -# -# [*try_sleep*] -# (optional) How long to wait between tries, in seconds -# Defaults to 0 -# -# [*ensure*] -# (optional) Whether to make sure the constraint is present or absent -# Defaults to present -# -# === Dependencies -# -# None -# -# === Authors -# -# Crag Wolfe -# Jason Guiditta -# -# === Copyright -# -# Copyright (C) 2016 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -define pacemaker::constraint::location ( - $resource, - $location, - $score, - $ensure = 'present', - $tries = 1, - $try_sleep = 0, -) { - pcmk_constraint {"loc-${resource}-${location}": - ensure => $ensure, - constraint_type => location, - resource => $resource, - location => $location, - score => $score, - tries => $tries, - try_sleep => $try_sleep, - } -} diff --git a/manifests/constraint/order.pp b/manifests/constraint/order.pp deleted file mode 100644 index dffb2f29..00000000 --- a/manifests/constraint/order.pp +++ /dev/null @@ -1,88 +0,0 @@ -# == Define: pacemaker::constraint::order -# -# Creates Order constraints for resources that need specific ordering. -# -# === Parameters: -# -# [*first_resource*] -# (optional) First resource to be constrained -# Defaults to undef -# -# [*second_resource*] -# (optional) Second resource to be constrained -# Defaults to undef -# -# [*first_action*] -# (optional) Only used for order constraints, action to take on first resource -# Defaults to undef -# -# [*second_action*] -# (optional) Only used for order constraints, action to take on second resource -# Defaults to undef -# -# [*tries*] -# (optional) How many times to attempt to create the constraint -# Defaults to 1 -# -# [*try_sleep*] -# (optional) How long to wait between tries, in seconds -# Defaults to 0 -# -# [*ensure*] -# (optional) Whether to make sure the constraint is present or absent -# Defaults to present -# -# [*constraint_params*] -# (optional) Any additional parameters needed by pcs for the constraint to be -# properly configured -# Defaults to undef -# -# === Dependencies -# -# None -# -# === Authors -# -# Michele Baldessari -# -# === Copyright -# -# Copyright (C) 2016 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -define pacemaker::constraint::order ( - $first_resource, - $second_resource, - $first_action, - $second_action, - $ensure = present, - $constraint_params = undef, - $tries = 1, - $try_sleep = 0, -) { - $first_resource_cleaned = regsubst($first_resource, '(:)', '.', 'G') - $second_resource_cleaned = regsubst($second_resource, '(:)', '.', 'G') - - pcmk_constraint {"order-${first_resource}-${second_resource}": - ensure => $ensure, - constraint_type => order, - first_resource => $first_resource_cleaned, - second_resource => $second_resource_cleaned, - first_action => $first_action, - second_action => $second_action, - constraint_params => $constraint_params, - tries => $tries, - try_sleep => $try_sleep, - } -} diff --git a/manifests/contain.pp b/manifests/contain.pp deleted file mode 100644 index 0da4c8ee..00000000 --- a/manifests/contain.pp +++ /dev/null @@ -1,17 +0,0 @@ -# == Class: pacemaker::contain -# -# Work around this bug https://github.com/puppetlabs/puppet/pull/2633 -# in puppet where contains cannot have a absolute class as a -# parameter. It has not been fixed for 3.6 which is tested in the CI. -# -# === Parameters -# -# [*class_name*] -# The relative name of the class to contain. -# Default to $name -define pacemaker::contain ($class_name='') { - $k = pick($class_name, $name) - validate_re($k, '^[^:][^:]', "The class name must be relative not ${k}") - include "::${k}" - contain $k -} diff --git a/manifests/corosync.pp b/manifests/corosync.pp deleted file mode 100644 index 9946b58e..00000000 --- a/manifests/corosync.pp +++ /dev/null @@ -1,420 +0,0 @@ -# == Class: pacemaker::corosync -# -# A class to setup a pacemaker cluster -# -# === Parameters -# [*cluster_members*] -# (required) A space-separted list of cluster IP's or names to run the -# authentication against -# -# [*cluster_members_addr*] -# (optional) An array of arrays containing the node addresses to be used -# As ring addresses in corosync (as required by corosync3+knet). For example: -# [[10.0.0.1], [10.0.0.10], [10.0.0.20,10.0.0.21]] with nodes [n1, n2, n3] -# will create the cluster as follows: -# pcs cluster setup clustername n1 addr=10.0.0.1 n2 addr=10.0.0.10 \ -# n3 addr=10.0.0.20 addr=10.0.0.21 -# Defaults to [] -# -# [*cluster_members_rrp*] -# (optional) A space-separated list of cluster IP's or names pair where each -# component represent a resource on respectively ring0 and ring1 -# Defaults to undef -# -# [*cluster_name*] -# (optional) The name of the cluster (no whitespace) -# Defaults to 'clustername' -# -# [*cluster_setup_extras*] -# (optional) Hash additional configuration when pcs cluster setup is run -# Example : {'--token' => '10000', '--ipv6' => '', '--join' => '100' } -# Defaults to {} -# -# [*cluster_start_timeout*] -# (optional) Timeout to wait for cluster start. -# Defaults to 300 -# -# [*cluster_start_tries*] -# (optional) Number of tries for cluster start. -# Defaults to 50 -# -# [*cluster_start_try_sleep*] -# (optional) Time to sleep after each cluster start try. -# Defaults to 20 -# -# [*manage_fw*] -# (optional) Manage or not IPtables rules. -# Defaults to true -# -# [*remote_authkey*] -# (optional) Value of /etc/pacemaker/authkey. Useful for pacemaker_remote. -# Defaults to undef -# -# [*settle_timeout*] -# (optional) Timeout to wait for settle. -# Defaults to 3600 -# -# [*settle_tries*] -# (optional) Number of tries for settle. -# Defaults to 360 -# -# [*settle_try_sleep*] -# (optional) Time to sleep after each seetle try. -# Defaults to 10 -# -# [*setup_cluster*] -# (optional) If your cluster includes pcsd, this should be set to true for -# just one node in cluster. Else set to true for all nodes. -# Defaults to true -# -# [*enable_sbd*] -# (optional) Controls whether to enable sbd or not -# Defaults to false -# -# [*sbd_watchdog_timeout*] -# (optional) Controls SBD_WATCHDOG_TIMEOUT value. -# Defaults to 10(s) -# -# [*pcsd_debug*] -# (optional) Enable pcsd debugging -# Defaults to false -# -# [*pcsd_bind_addr*] -# (optional) List of IP addresses pcsd should bind to -# Defaults to undef -# -# [*tls_priorities*] -# (optional) Sets PCMK_tls_priorities in /etc/sysconfig/pacemaker when set -# Defaults to undef -# -# [*enable_scaleup*] -# (optional) Enables the scaleup logic of the cluster nodes (i.e. we do not add a -# node via pcs if we detect a new node compared to the existing cluster) -# Defaults to true -# -# [*force_authkey*] -# (optional) Forces the use of the autkey parameter even when we're using pcs 0.10 -# Default to false -# -# === Dependencies -# -# None -# -# === Authors -# -# Crag Wolfe -# Jason Guiditta -# -# === Copyright -# -# Copyright (C) 2016 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -class pacemaker::corosync( - $cluster_members, - $cluster_members_addr = [], - $cluster_members_rrp = undef, - $cluster_name = 'clustername', - $cluster_setup_extras = {}, - $cluster_start_timeout = '300', - $cluster_start_tries = '50', - $cluster_start_try_sleep = '20', - $manage_fw = true, - $remote_authkey = undef, - $force_authkey = undef, - $settle_timeout = '3600', - $settle_tries = '360', - $settle_try_sleep = '10', - $setup_cluster = true, - $enable_sbd = false, - $sbd_watchdog_timeout = '10', - $pcsd_debug = false, - $pcsd_bind_addr = undef, - $tls_priorities = undef, - $enable_scaleup = true, -) inherits pacemaker { - include pacemaker::params - if ! $cluster_members_rrp { - if $::pacemaker::pcs_010 { - $cluster_members_rrp_real = pcmk_cluster_setup($cluster_members, $cluster_members_addr, '0.10') - } else { - $cluster_members_rrp_real = pcmk_cluster_setup($cluster_members, $cluster_members_addr, '0.9') - } - } else { - $cluster_members_rrp_real = $cluster_members_rrp - } - $cluster_setup_extras_real = inline_template('<%= @cluster_setup_extras.flatten.join(" ") %>') - - if $manage_fw { - firewall { '001 corosync mcast': - proto => 'udp', - dport => ['5404', '5405'], - action => 'accept', - } - firewall { '001 corosync mcast ipv6': - proto => 'udp', - dport => ['5404', '5405'], - action => 'accept', - provider => 'ip6tables', - } - } - - if $pacemaker::pcsd_mode { - if $manage_fw { - firewall { '001 pcsd': - proto => 'tcp', - dport => ['2224'], - action => 'accept', - } - firewall { '001 pcsd ipv6': - proto => 'tcp', - dport => ['2224'], - action => 'accept', - provider => 'ip6tables', - } - } - - $pcsd_debug_str = bool2str($pcsd_debug) - file_line { 'pcsd_debug_ini': - path => $::pacemaker::pcsd_sysconfig, - line => "PCSD_DEBUG=${pcsd_debug_str}", - match => '^PCSD_DEBUG=', - require => Class['pacemaker::install'], - before => Service['pcsd'], - notify => Service['pcsd'], - } - - if $pcsd_bind_addr != undef { - file_line { 'pcsd_bind_addr': - path => $::pacemaker::pcsd_sysconfig, - line => "PCSD_BIND_ADDR='${pcsd_bind_addr}'", - match => '^PCSD_BIND_ADDR=', - require => Class['pacemaker::install'], - before => Service['pcsd'], - notify => Service['pcsd'], - } - } - else { - file_line { 'pcsd_bind_addr': - ensure => absent, - path => $::pacemaker::pcsd_sysconfig, - match => '^PCSD_BIND_ADDR=*', - require => Class['pacemaker::install'], - before => Service['pcsd'], - notify => Service['pcsd'], - match_for_absence => true, - } - } - - if $tls_priorities != undef { - file_line { 'tls_priorities': - path => $::pacemaker::pcmk_sysconfig, - line => "PCMK_tls_priorities=${tls_priorities}", - match => '^PCMK_tls_priorities=', - require => Class['pacemaker::install'], - before => Service['pcsd'], - } - } - - user { 'hacluster': - password => pw_hash($::pacemaker::hacluster_pwd, 'SHA-512', fqdn_rand_string(10)), - groups => 'haclient', - require => Class['pacemaker::install'], - before => Service['pcsd'], - notify => Exec['reauthenticate-across-all-nodes'], - } - - # If we fail the local authentication via pcs, let's try and reauthenticate - # This might happen if /var/lib/pcsd/tokens got corrupt or if we upgraded pcs versions - # and authentication is not working even though the hacluster user has not changed - exec { 'check-for-local-authentication': - command => "/bin/echo 'local pcsd auth failed, triggering a reauthentication'", - onlyif => "${::pacemaker::pcs_bin} status pcsd ${::hostname} 2>&1 | grep 'Unable to authenticate'", - tag => 'pacemaker-auth', - notify => Exec['reauthenticate-across-all-nodes'], - } - - # pcs-0.10.x has different commands to set up the cluster - if $::pacemaker::pcs_010 { - $cluster_setup_cmd = "${::pacemaker::pcs_bin} cluster setup ${cluster_name} ${cluster_members_rrp_real} ${cluster_setup_extras_real}" - $cluster_reauthenticate_cmd = "${::pacemaker::pcs_bin} host auth ${cluster_members} -u hacluster -p ${::pacemaker::hacluster_pwd}" - $cluster_authenticate_cmd = "${::pacemaker::pcs_bin} host auth ${cluster_members} -u hacluster -p ${::pacemaker::hacluster_pwd}" - $cluster_authenticate_unless = "${::pacemaker::pcs_bin} host auth ${cluster_members} -u hacluster -p ${::pacemaker::hacluster_pwd} | grep 'Already authorized'" - } else { - $cluster_setup_cmd = "${::pacemaker::pcs_bin} cluster setup --wait --name ${cluster_name} ${cluster_members_rrp_real} ${cluster_setup_extras_real}" - $cluster_reauthenticate_cmd = "${::pacemaker::pcs_bin} cluster auth ${cluster_members} -u hacluster -p ${::pacemaker::hacluster_pwd} --force" - $cluster_authenticate_cmd = "${::pacemaker::pcs_bin} cluster auth ${cluster_members} -u hacluster -p ${::pacemaker::hacluster_pwd}" - $cluster_authenticate_unless = "${::pacemaker::pcs_bin} cluster auth ${cluster_members} -u hacluster -p ${::pacemaker::hacluster_pwd} | grep 'Already authorized'" - } - - - exec { 'reauthenticate-across-all-nodes': - command => $cluster_reauthenticate_cmd, - refreshonly => true, - timeout => $settle_timeout, - tries => $settle_tries, - try_sleep => $settle_try_sleep, - require => Service['pcsd'], - tag => 'pacemaker-auth', - } - - exec { 'auth-successful-across-all-nodes': - command => $cluster_authenticate_cmd, - refreshonly => true, - timeout => $settle_timeout, - tries => $settle_tries, - try_sleep => $settle_try_sleep, - require => [Service['pcsd'], User['hacluster']], - unless => $cluster_authenticate_unless, - tag => 'pacemaker-auth', - } - - Exec <|tag == 'pacemaker-auth'|> -> Exec['wait-for-settle'] - } - - if $setup_cluster { - # Detect if we are trying to add some nodes by comparing - # $cluster_members and the actual running nodes in the cluster - if $::pacemaker::pcs_010 { - $nodes_added = pcmk_nodes_added($cluster_members, $cluster_members_addr, '0.10') - $node_add_start_part = '--start' - } else { - $nodes_added = pcmk_nodes_added($cluster_members, $cluster_members_addr, '0.9') - $node_add_start_part = '' - } - if $enable_scaleup { - # If we're rerunning this puppet manifest and $cluster_members - # contains more nodes than the currently running cluster - if count($nodes_added) > 0 { - $nodes_added.each |$node_to_add| { - $node_name = split($node_to_add, ' ')[0] - if $::pacemaker::pcs_010 { - exec {"Authenticating new cluster node: ${node_to_add}": - command => "${::pacemaker::pcs_bin} host auth ${node_name} -u hacluster -p ${::pacemaker::hacluster_pwd}", - timeout => $cluster_start_timeout, - tries => $cluster_start_tries, - try_sleep => $cluster_start_try_sleep, - require => [Service['pcsd'], User['hacluster']], - tag => 'pacemaker-auth', - } - } - exec {"Adding Cluster node: ${node_to_add} to Cluster ${cluster_name}": - unless => "${::pacemaker::pcs_bin} status 2>&1 | grep -we \"Online:.* ${node_name} .*\"", - command => "${::pacemaker::pcs_bin} cluster node add ${node_to_add} ${node_add_start_part} --wait", - timeout => $cluster_start_timeout, - tries => $cluster_start_tries, - try_sleep => $cluster_start_try_sleep, - notify => Exec["node-cluster-start-${node_name}"], - tag => 'pacemaker-scaleup', - } - exec {"node-cluster-start-${node_name}": - unless => "${::pacemaker::pcs_bin} status 2>&1 | grep -we \"Online:.* ${node_name} .*\"", - command => "${::pacemaker::pcs_bin} cluster start ${node_name} --wait", - timeout => $cluster_start_timeout, - tries => $cluster_start_tries, - try_sleep => $cluster_start_try_sleep, - refreshonly => true, - tag => 'pacemaker-scaleup', - } - } - Exec <|tag == 'pacemaker-auth'|> -> Exec <|tag == 'pacemaker-scaleup'|> - } - } - - Exec <|tag == 'pacemaker-auth'|> - -> - exec {"Create Cluster ${cluster_name}": - creates => '/etc/cluster/cluster.conf', - command => $cluster_setup_cmd, - timeout => $cluster_start_timeout, - tries => $cluster_start_tries, - try_sleep => $cluster_start_try_sleep, - unless => '/usr/bin/test -f /etc/corosync/corosync.conf', - require => Class['pacemaker::install'], - } - -> - exec {"Start Cluster ${cluster_name}": - unless => "${::pacemaker::pcs_bin} status >/dev/null 2>&1", - command => "${::pacemaker::pcs_bin} cluster start --all", - timeout => $cluster_start_timeout, - tries => $cluster_start_tries, - try_sleep => $cluster_start_try_sleep, - require => Exec["Create Cluster ${cluster_name}"], - } - if $enable_sbd { - ensure_packages('sbd', { ensure => present }) - exec {'Enable SBD': - unless => "${::pacemaker::pcs_bin} status | grep -q 'sbd: active/enabled' > /dev/null 2>&1", - command => "${::pacemaker::pcs_bin} stonith sbd enable SBD_WATCHDOG_TIMEOUT=${sbd_watchdog_timeout}", - } - Package<| title == 'sbd' |> - -> Exec<| title == "Create Cluster ${cluster_name}" |> - -> Exec<| title == 'Enable SBD' |> - -> Exec<| title == "Start Cluster ${cluster_name}" |> - } - - if $pacemaker::pcsd_mode { - Exec['auth-successful-across-all-nodes'] -> - Exec["Create Cluster ${cluster_name}"] - } - Exec["Start Cluster ${cluster_name}"] - -> - Service <| tag == 'pcsd-cluster-service' |> - -> - Exec['wait-for-settle'] - } - - # pcs 0.10/pcmk 2.0 take care of the authkey internally by themselves - # unless force_authkey is true in which case we forcefully use remote_authkey - if $remote_authkey and (!$::pacemaker::pcs_010 or $force_authkey) { - file { 'etc-pacemaker': - ensure => directory, - path => '/etc/pacemaker', - owner => 'hacluster', - group => 'haclient', - mode => '0750', - } -> - file { 'etc-pacemaker-authkey': - path => '/etc/pacemaker/authkey', - owner => 'hacluster', - group => 'haclient', - mode => '0640', - content => $remote_authkey, - } - # On the bootstrap node we want to make sure that authkey is imposed - # after we create the cluster (because cluster create destroys it and regenerates a new one - # but before we start. On non bootstrap nodes we just let it before pcsd - if $setup_cluster { - Exec<| title == "Create Cluster ${cluster_name}" |> -> File<| title == 'etc-pacemaker-authkey' |> - File<| title == 'etc-pacemaker-authkey' |> -> Exec<| title == "Start Cluster ${cluster_name}" |> - } else { - File['etc-pacemaker-authkey'] -> Service['pcsd'] - } - - } - - exec {'wait-for-settle': - timeout => $settle_timeout, - tries => $settle_tries, - try_sleep => $settle_try_sleep, - command => "${::pacemaker::pcs_bin} status | grep -q 'partition with quorum' > /dev/null 2>&1", - unless => "${::pacemaker::pcs_bin} status | grep -q 'partition with quorum' > /dev/null 2>&1", - } - Exec<| title == 'wait-for-settle' |> -> Pcmk_constraint<||> - Exec<| title == 'wait-for-settle' |> -> Pcmk_resource<||> - Exec<| title == 'wait-for-settle' |> -> Pcmk_property<||> - Exec<| title == 'wait-for-settle' |> -> Pcmk_bundle<||> - Exec<| title == 'wait-for-settle' |> -> Pcmk_remote<||> - Exec<| title == 'wait-for-settle' |> -> Pcmk_resource_default<||> -} diff --git a/manifests/init.pp b/manifests/init.pp deleted file mode 100644 index 835da61d..00000000 --- a/manifests/init.pp +++ /dev/null @@ -1,55 +0,0 @@ -# == Class: pacemaker -# -# base class for pacemaker -# -# === Parameters -# -# [*pacemaker::params::hacluster_pwd*] -# String, used as the default for the pacemaker hacluster_pwd variable -# Default: CHANGEME -# -# === Variables -# -# [*hacluster_pwd*] -# used to set the password for the hacluster user on the nodes -# this user will be used in future pacemaker releases for pcsd to -# communicate between nodes. -# Default: $pacemaker::params::hacluster_pwd -# -# === Dependencies -# -# None -# -# === Examples -# -# see pacemaker::corosync -# -# === Authors -# -# Dan Radez -# Crag Wolfe -# Jason Guiditta -# -# === Copyright -# -# Copyright (C) 2016 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -class pacemaker( - $hacluster_pwd = $pacemaker::params::hacluster_pwd -) inherits pacemaker::params { - include pacemaker::params - include pacemaker::install - include pacemaker::service -} diff --git a/manifests/install.pp b/manifests/install.pp deleted file mode 100644 index 6fca7c51..00000000 --- a/manifests/install.pp +++ /dev/null @@ -1,43 +0,0 @@ -# == Class: pacemaker::install -# -# Installs needed packages for pacemaker -# -# === Parameters: -# -# [*ensure*] -# (optional) Whether to make sure packages are present or absent -# Defaults to present -# -# === Dependencies -# -# None -# -# === Authors -# -# Crag Wolfe -# Jason Guiditta -# -# === Copyright -# -# Copyright (C) 2016 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -class pacemaker::install ( - $ensure = present, -) { - include pacemaker::params - ensure_packages($::pacemaker::params::package_list, { - ensure => $ensure - }) -} diff --git a/manifests/new.pp b/manifests/new.pp deleted file mode 100644 index 0626c586..00000000 --- a/manifests/new.pp +++ /dev/null @@ -1,120 +0,0 @@ -class pacemaker::new ( - $firewall_ipv6_manage = $::pacemaker::new::params::firewall_ipv6_manage, - $firewall_corosync_manage = $::pacemaker::new::params::firewall_corosync_manage, - $firewall_corosync_ensure = $::pacemaker::new::params::firewall_corosync_ensure, - $firewall_corosync_dport = $::pacemaker::new::params::firewall_corosync_dport, - $firewall_corosync_proto = $::pacemaker::new::params::firewall_corosync_proto, - $firewall_corosync_action = $::pacemaker::new::params::firewall_corosync_action, - $firewall_pcsd_manage = $::pacemaker::new::params::firewall_pcsd_manage, - $firewall_pcsd_ensure = $::pacemaker::new::params::firewall_pcsd_ensure, - $firewall_pcsd_dport = $::pacemaker::new::params::firewall_pcsd_dport, - $firewall_pcsd_action = $::pacemaker::new::params::firewall_pcsd_action, - - $package_manage = $::pacemaker::new::params::package_manage, - $package_list = $::pacemaker::new::params::package_list, - $package_ensure = $::pacemaker::new::params::package_ensure, - $package_provider = $::pacemaker::new::params::package_provider, - - $pcsd_mode = $::pacemaker::new::params::pcsd_mode, - $cluster_nodes = $::pacemaker::new::params::cluster_nodes, - $cluster_rrp_nodes = $::pacemaker::new::params::cluster_rrp_nodes, - $cluster_name = $::pacemaker::new::params::cluster_name, - $cluster_auth_key = $::pacemaker::new::params::cluster_auth_key, - $cluster_auth_enabled = $::pacemaker::new::params::cluster_auth_enabled, - $cluster_setup = $::pacemaker::new::params::cluster_setup, - $cluster_options = $::pacemaker::new::params::cluster_options, - $cluster_user = $::pacemaker::new::params::cluster_user, - $cluster_group = $::pacemaker::new::params::cluster_group, - $cluster_password = $::pacemaker::new::params::cluster_password, - $pcs_bin_path = $::pacemaker::new::params::pcs_bin_path, - $cluster_config_path = $::pacemaker::new::params::cluster_config_path, - $cluster_interfaces = $::pacemaker::new::params::cluster_interfaces, - $cluster_log_subsys = $::pacemaker::new::params::cluster_log_subsys, - $plugin_version = $::pacemaker::new::params::plugin_version, - $log_file_path = $::pacemaker::new::params::log_file_path, - - $pcsd_manage = $::pacemaker::new::params::pcsd_manage, - $pcsd_enable = $::pacemaker::new::params::pcsd_enable, - $pcsd_service = $::pacemaker::new::params::pcsd_service, - $pcsd_provider = $::pacemaker::new::params::pcsd_provider, - $corosync_manage = $::pacemaker::new::params::corosync_manage, - $corosync_enable = $::pacemaker::new::params::corosync_enable, - $corosync_service = $::pacemaker::new::params::corosync_service, - $corosync_provider = $::pacemaker::new::params::corosync_provider, - $pacemaker_manage = $::pacemaker::new::params::pacemaker_manage, - $pacemaker_enable = $::pacemaker::new::params::pacemaker_enable, - $pacemaker_service = $::pacemaker::new::params::pacemaker_service, - $pacemaker_provider = $::pacemaker::new::params::pacemaker_provider, -) inherits pacemaker::new::params { - - class { 'pacemaker::new::firewall' : - firewall_ipv6_manage => $firewall_ipv6_manage, - firewall_corosync_manage => $firewall_corosync_manage, - firewall_corosync_ensure => $firewall_corosync_ensure, - firewall_corosync_dport => $firewall_corosync_dport, - firewall_corosync_proto => $firewall_corosync_proto, - firewall_corosync_action => $firewall_corosync_action, - firewall_pcsd_manage => $firewall_pcsd_manage, - firewall_pcsd_ensure => $firewall_pcsd_ensure, - firewall_pcsd_dport => $firewall_pcsd_dport, - firewall_pcsd_action => $firewall_pcsd_action, - } - - class { 'pacemaker::new::install' : - package_manage => $package_manage, - package_list => $package_list, - package_ensure => $package_ensure, - package_provider => $package_provider, - } - - class { 'pacemaker::new::setup' : - pcsd_mode => $pcsd_mode, - # pcsd only - cluster_nodes => $cluster_nodes, - cluster_rrp_nodes => $cluster_rrp_nodes, - cluster_name => $cluster_name, - cluster_auth_key => $cluster_auth_key, - cluster_auth_enabled => $cluster_auth_enabled, - cluster_setup => $cluster_setup, - cluster_options => $cluster_options, - cluster_user => $cluster_user, - cluster_password => $cluster_password, - pcs_bin_path => $pcs_bin_path, - # config only - cluster_config_path => $cluster_config_path, - cluster_interfaces => $cluster_interfaces, - cluster_log_subsys => $cluster_log_subsys, - plugin_version => $plugin_version, - log_file_path => $log_file_path, - } - - class { 'pacemaker::new::service' : - pcsd_manage => $pcsd_manage, - pcsd_enable => $pcsd_enable, - pcsd_service => $pcsd_service, - pcsd_provider => $pcsd_provider, - corosync_manage => $corosync_manage, - corosync_enable => $corosync_enable, - corosync_service => $corosync_service, - corosync_provider => $corosync_provider, - pacemaker_manage => $pacemaker_manage, - pacemaker_enable => $pacemaker_enable, - pacemaker_service => $pacemaker_service, - pacemaker_provider => $pacemaker_provider, - } - - pacemaker::contain { 'pacemaker::new::firewall': } - pacemaker::contain { 'pacemaker::new::install': } - pacemaker::contain { 'pacemaker::new::setup': } - pacemaker::contain { 'pacemaker::new::service': } - - Class['pacemaker::new::firewall'] -> - Class['pacemaker::new::install'] - - Class['pacemaker::new::install'] -> - Class['pacemaker::new::service'] - - Class['pacemaker::new::install'] -> - Class['pacemaker::new::setup'] - -} diff --git a/manifests/new/firewall.pp b/manifests/new/firewall.pp deleted file mode 100644 index 3594fe30..00000000 --- a/manifests/new/firewall.pp +++ /dev/null @@ -1,108 +0,0 @@ -# == Class: pacemaker::new::firewall -# -# Managaes the Corosync and Pacemaker firewall rules -# -# [*firewall_ipv6_manage*] -# (boolean) Should the ipv6 rule be added? -# Default: true -# -# [*firewall_corosync_manage*] -# (boolean) Should the module manage Corosync firewall rules? -# Default: true -# -# [*firewall_corosync_ensure*] -# (present/absent) Should the rules be created or removed? -# Default: present -# -# [*firewall_corosync_dport*] -# The range of ports to open. -# Default: ['5404', '5405'] -# -# [*firewall_corosync_proto*] -# Which protocol is being used? -# Default: udp -# -# [*firewall_corosync_action*] -# What should the rule do with the packets? -# Default: accept -# -# [*firewall_pcsd_manage*] -# (boolean) Should the module manage PCSD firewall rules? -# Default: true -# -# [*firewall_pcsd_ensure*] -# (present/absent) Should the rules be created or removed? -# Default: present -# -# [*firewall_pcsd_dport*] -# The range of ports to open. -# Default: ['5404', '5405'] -# -# [*firewall_pcsd_action*] -# What should the rule do with the packets? -# Default: accept -# -class pacemaker::new::firewall ( - $firewall_ipv6_manage = $::pacemaker::new::params::firewall_ipv6_manage, - - $firewall_corosync_manage = $::pacemaker::new::params::firewall_corosync_manage, - $firewall_corosync_ensure = $::pacemaker::new::params::firewall_corosync_ensure, - $firewall_corosync_dport = $::pacemaker::new::params::firewall_corosync_dport, - $firewall_corosync_proto = $::pacemaker::new::params::firewall_corosync_proto, - $firewall_corosync_action = $::pacemaker::new::params::firewall_corosync_action, - - $firewall_pcsd_manage = $::pacemaker::new::params::firewall_pcsd_manage, - $firewall_pcsd_ensure = $::pacemaker::new::params::firewall_pcsd_ensure, - $firewall_pcsd_dport = $::pacemaker::new::params::firewall_pcsd_dport, - $firewall_pcsd_action = $::pacemaker::new::params::firewall_pcsd_action, -) inherits pacemaker::new::params { - validate_bool($firewall_ipv6_manage) - - validate_bool($firewall_corosync_manage) - validate_string($firewall_corosync_ensure) - validate_array($firewall_corosync_dport) - validate_string($firewall_corosync_proto) - validate_string($firewall_corosync_action) - - validate_bool($firewall_pcsd_manage) - validate_string($firewall_pcsd_ensure) - validate_array($firewall_pcsd_dport) - validate_string($firewall_pcsd_action) - - if $firewall_corosync_manage { - firewall { '001 corosync mcast' : - ensure => $firewall_corosync_ensure, - proto => $firewall_corosync_proto, - dport => $firewall_corosync_dport, - action => $firewall_corosync_action, - } - if $firewall_ipv6_manage { - firewall { '001 corosync mcast ipv6' : - ensure => $firewall_corosync_ensure, - proto => $firewall_corosync_proto, - dport => $firewall_corosync_dport, - action => $firewall_corosync_action, - provider => 'ip6tables', - } - } - } - - if $firewall_pcsd_manage { - firewall { '001 pcsd': - ensure => $firewall_pcsd_ensure, - proto => 'tcp', - dport => $firewall_pcsd_dport, - action => $firewall_pcsd_action, - } - if $firewall_ipv6_manage { - firewall { '001 pcsd ipv6': - ensure => $firewall_pcsd_ensure, - proto => 'tcp', - dport => $firewall_pcsd_dport, - action => $firewall_pcsd_action, - provider => 'ip6tables', - } - } - } - -} diff --git a/manifests/new/install.pp b/manifests/new/install.pp deleted file mode 100644 index 05e8648d..00000000 --- a/manifests/new/install.pp +++ /dev/null @@ -1,53 +0,0 @@ -# == Class: pacemaker::new::install -# -# Install the required Pacemaker and Corosync packages -# and the basic configuration folders. -# -# [*package_manage*] -# Should the packages be managed? -# Default: true -# -# [*package_list*] -# The list of packages names to install -# -# [*package_ensure*] -# Ensure parameter of the packages (present,installed,absent,purged) -# -# [*package_provider] -# Override the default package provider -# Default: undef -# -class pacemaker::new::install ( - $package_manage = $::pacemaker::new::params::package_manage, - $package_list = $::pacemaker::new::params::package_list, - $package_ensure = $::pacemaker::new::params::package_ensure, - $package_provider = $::pacemaker::new::params::package_provider, -) inherits pacemaker::new::params { - validate_bool($package_manage) - validate_array($package_list) - validate_string($package_ensure) - - file { 'corosync-config-dir' : - ensure => 'directory', - path => '/etc/corosync', - group => 'root', - owner => 'root', - mode => '0755', - } - - file { 'pacemaker-config-dir' : - ensure => 'directory', - path => '/etc/pacemaker', - group => 'root', - owner => 'root', - mode => '0755', - } - - if $package_manage { - package { $package_list : - ensure => $package_ensure, - provider => $package_provider, - } - } - -} diff --git a/manifests/new/params.pp b/manifests/new/params.pp deleted file mode 100644 index df1478de..00000000 --- a/manifests/new/params.pp +++ /dev/null @@ -1,74 +0,0 @@ -# == Class: pacemaker::new::params -# -# Common default parameter values for the Paceamker module -# -class pacemaker::new::params { - $release = split($::os['release']['full'], '[.]') - $major = $::os['release']['major'] - $minor = $::os['release']['minor'] - - if $::os['family'] == 'RedHat' { - $package_list = ['pacemaker', 'pcs', 'pacemaker-libs'] - $pcsd_mode = true - $cluster_user = 'hacluster' - $cluster_group = 'haclient' - $log_file_path = '/var/log/cluster/corosync.log' - } elsif $::os['family'] == 'Debian' { - $pcsd_mode = false - if ($::os['name'] == 'Ubuntu') and (versioncmp($::os['release']['full'], '16') >= 0) { - $package_list = ['dbus', 'pacemaker', 'corosync', 'pacemaker-cli-utils', 'resource-agents', 'crmsh'] - } else { - $package_list = ['pacemaker-mgmt', 'pacemaker', 'corosync', 'pacemaker-cli-utils', 'resource-agents', 'crmsh'] - } - $cluster_user = 'root' - $cluster_group = 'root' - $log_file_path = '/var/log/corosync/corosync.log' - } else { - fail("OS '${::os['name']}' is not supported!") - } - - $firewall_ipv6_manage = true - $firewall_corosync_manage = true - $firewall_corosync_ensure = 'present' - $firewall_corosync_dport = ['5404', '5405'] - $firewall_corosync_proto = 'udp' - $firewall_corosync_action = 'accept' - $firewall_pcsd_manage = $pcsd_mode - $firewall_pcsd_ensure = 'present' - $firewall_pcsd_dport = ['2224'] - $firewall_pcsd_action = 'accept' - - $cluster_nodes = ['localhost'] - $cluster_rrp_nodes = undef - $cluster_name = 'clustername' - $cluster_setup = true - $cluster_config_path = '/etc/corosync/corosync.conf' - $cluster_options = {} - $cluster_interfaces = [] - $cluster_log_subsys = [] - $plugin_version = '1' - - $cluster_auth_key = undef - $cluster_auth_enabled = false - - $pcs_bin_path = '/usr/sbin/pcs' - $cluster_password = 'CHANGEME' - - $package_manage = true - $package_ensure = 'installed' - $package_provider = undef - - $pcsd_manage = $pcsd_mode - $pcsd_service = 'pcsd' - $pcsd_enable = true - $pcsd_provider = undef - $corosync_manage = true - $corosync_service = 'corosync' - $corosync_enable = true - $corosync_provider = undef - $pacemaker_manage = true - $pacemaker_service = 'pacemaker' - $pacemaker_enable = true - $pacemaker_provider = undef - -} diff --git a/manifests/new/resource/filesystem.pp b/manifests/new/resource/filesystem.pp deleted file mode 100644 index 9e4d4de2..00000000 --- a/manifests/new/resource/filesystem.pp +++ /dev/null @@ -1,111 +0,0 @@ -# == Define: pacemaker::new::resource::filesystem -# -# A resource type to create a pacemaker Filesystem resources, provided -# for convenience. -# -# === Parameters -# -# [*ensure*] -# (optional) Whether to make the resource present or absent -# Defaults to present -# -# [*device*] -# (optional) The device which is being mounted -# For example: 192.168.200.100:/export/foo -# Default: undef -# -# [*directory*] -# (optional) Where to mount the device (the empty dir must already exist) -# Default: undef -# -# [*fstype*] -# (optional) As you would pass to mount, for example 'nfs' -# Default: undef -# -# [*fsoptions*] -# (optional) Filesystem options as you would pass to mount command -# Default: undef -# -# [*additional*] -# (optional) Add any additional resource parameters as a hash. -# Default: {} -# -# [*metadata*] -# (optional) Additional meta parameters -# Default: {} -# -# [*operations*] -# (optional) Additional op parameters -# Default: {} -# -# [*complex_type*] -# Should a simple of comple primitive be created? (simple/clone/master) -# Default: simple -# -# [*complex_metadata*] -# Path additional complex type attributes -# Default: {} -# -# [*primitive_class*] -# Default: 'ocf' -# -# [*primitive_provider*] -# Default: 'heartbeat' -# -# [*primitive_type*] -# Default: 'Filesystem' -# -define pacemaker::new::resource::filesystem ( - $ensure = 'present', - $device = undef, - $directory = undef, - $fstype = undef, - $fsoptions = undef, - $additional = { }, - - $metadata = { }, - $operations = { }, - $complex_type = 'simple', - $complex_metadata = { }, - - $primitive_class = 'ocf', - $primitive_provider = 'heartbeat', - $primitive_type = 'Filesystem', -) { - - validate_string($ensure) - validate_string($device) - validate_absolute_path($directory) - - validate_hash($metadata) - validate_hash($operations) - validate_string($complex_type) - validate_hash($complex_metadata) - - validate_string($primitive_class) - validate_string($primitive_provider) - validate_string($primitive_type) - - $resource_id = regsubst("fs${device}", '\/', '_', 'G') - - $parameters = pacemaker_resource_parameters( - 'device', $device, - 'directory', $directory, - 'fstype', $fstype, - 'fsoptions', $fsoptions, - $additional - ) - - pacemaker_resource { $resource_id : - ensure => $ensure, - primitive_type => $primitive_type, - primitive_provider => $primitive_provider, - primitive_class => $primitive_class, - parameters => $parameters, - metadata => $metadata, - operations => $operations, - complex_type => $complex_type, - complex_metadata => $complex_metadata, - } - -} diff --git a/manifests/new/resource/ip.pp b/manifests/new/resource/ip.pp deleted file mode 100644 index c2511fb9..00000000 --- a/manifests/new/resource/ip.pp +++ /dev/null @@ -1,91 +0,0 @@ -# == Define: pacemaker::new::resource::ip -# -# A resource type to create a pacemaker IPaddr2 resources, provided -# for convenience. -# -# === Parameters -# -# [*ip_address*] -# The virtual IP address you want pacemaker to create and manage -# -# [*ensure*] -# (optional) Whether to make sure resource is created or removed -# Defaults to present -# -# [*cidr_netmask*] -# (optional) The netmask to use in the cidr= option in the -# "pcs resource create"command -# Defaults to '32' -# -# [*nic*] -# (optional) The nic to use in the nic= option in the "pcs resource create" -# command -# Defaults to undef -# -# [*additional*] -# (optional) Add any additional resource parameters as a hash. -# Default: {} -# -# [*metadata*] -# (optional) Additional meta parameters -# Default: {} -# -# [*operations*] -# (optional) Additional op parameters -# Default: {} -# -# [*primitive_class*] -# Default: 'ocf' -# -# [*primitive_provider*] -# Default: 'heartbeat' -# -# [*primitive_type*] -# Default: 'IPaddr2' -# -define pacemaker::new::resource::ip ( - $ensure = 'present', - $ip_address = undef, - $cidr_netmask = '32', - $nic = undef, - $additional = { }, - - $metadata = { }, - $operations = { }, - - $primitive_class = 'ocf', - $primitive_provider = 'heartbeat', - $primitive_type = 'IPaddr2', -) { - validate_string($ensure) - validate_ip_address($ip_address) - validate_integer($cidr_netmask) - validate_string($nic) - validate_hash($additional) - - validate_hash($metadata) - validate_hash($operations) - - validate_string($primitive_class) - validate_string($primitive_provider) - validate_string($primitive_type) - - $resource_name = regsubst($ip_address, '(:)', '.', 'G') - - $parameters = pacemaker_resource_parameters( - 'ip', $ip_address, - 'cidr_netmask', $cidr_netmask, - 'nic', $nic, - $additional - ) - - pacemaker_resource { "ip-${resource_name}": - ensure => $ensure, - primitive_type => $primitive_type, - primitive_provider => $primitive_provider, - primitive_class => $primitive_class, - parameters => $parameters, - metadata => $metadata, - operations => $operations, - } -} diff --git a/manifests/new/resource/route.pp b/manifests/new/resource/route.pp deleted file mode 100644 index a1ce1d6b..00000000 --- a/manifests/new/resource/route.pp +++ /dev/null @@ -1,101 +0,0 @@ -# == Define: pacemaker::new::resource::route -# -# A resource type to create a pacemaker Route resources, provided -# for convenience. -# -# === Parameters: -# -# [*ensure*] -# (optional) Whether to make sure the constraint is present or absent -# Defaults to present -# -# [*source*] -# (optional) Route source -# Default: undef -# -# [*destination*] -# (optional) Route destination -# Default: undef -# -# [*gateway*] -# (optional) Gateway to use -# Default: undef -# -# [*device*] -# (optional) Network interface to use -# Default: undef -# -# [*additional*] -# (optional) Add any additional resource parameters as a hash. -# Default: {} -# -# [*metadata*] -# (optional) Additional meta parameters -# Default: {} -# -# [*operations*] -# (optional) Additional op parameters -# Default: {} -# -# [*complex_type*] -# Should a simple of comple primitive be created? (simple/clone/master) -# Default: simple -# -# [*complex_metadata*] -# Path additional complex type attributes -# Default: {} -# -# [*primitive_class*] -# Default: 'ocf' -# -# [*primitive_provider*] -# Default: 'heartbeat' -# -# [*primitive_type*] -# Default: 'Filesystem' -# -define pacemaker::new::resource::route ( - $ensure = 'present', - $device = undef, - $source = undef, - $destination = undef, - $gateway = undef, - $additional = { }, - - $operations = { }, - $metadata = { }, - - $primitive_class = 'ocf', - $primitive_provider = 'heartbeat', - $primitive_type = 'Route', -) { - - validate_string($ensure) - - validate_hash($additional) - validate_hash($metadata) - validate_hash($operations) - - validate_string($primitive_class) - validate_string($primitive_provider) - validate_string($primitive_type) - - $parameters = pacemaker_resource_parameters( - 'device', $device, - 'source', $source, - 'destination', $destination, - 'gateway', $gateway, - $additional - ) - - pacemaker_resource { "route-${name}": - ensure => $ensure, - primitive_type => $primitive_type, - primitive_provider => $primitive_provider, - primitive_class => $primitive_class, - parameters => $parameters, - metadata => $metadata, - operations => $operations, - } - -} diff --git a/manifests/new/service.pp b/manifests/new/service.pp deleted file mode 100644 index 62291104..00000000 --- a/manifests/new/service.pp +++ /dev/null @@ -1,83 +0,0 @@ -# == Class: pacemaker::new::service -# -# Manages the Corosync, Pacemaker and Pcsd services -# -class pacemaker::new::service ( - $pcsd_manage = $::pacemaker::new::params::pcsd_enable, - $pcsd_enable = $::pacemaker::new::params::pcsd_enable, - $pcsd_service = $::pacemaker::new::params::pcsd_service, - $pcsd_provider = $::pacemaker::new::params::pcsd_provider, - - $corosync_manage = $::pacemaker::new::params::corosync_enable, - $corosync_enable = $::pacemaker::new::params::corosync_enable, - $corosync_service = $::pacemaker::new::params::corosync_service, - $corosync_provider = $::pacemaker::new::params::corosync_provider, - - $pacemaker_manage = $::pacemaker::new::params::pacemaker_enable, - $pacemaker_enable = $::pacemaker::new::params::pacemaker_enable, - $pacemaker_service = $::pacemaker::new::params::pacemaker_service, - $pacemaker_provider = $::pacemaker::new::params::pacemaker_provider, -) inherits pacemaker::new::params { - validate_bool($pcsd_manage) - validate_bool($pcsd_enable) - validate_string($pcsd_service) - - validate_bool($corosync_manage) - validate_bool($corosync_enable) - validate_string($corosync_service) - - validate_bool($pacemaker_manage) - validate_bool($pacemaker_enable) - validate_string($pacemaker_service) - - if $pcsd_enable { - $pcsd_ensure = 'running' - } else { - $pcsd_ensure = 'stopped' - } - - if $corosync_enable { - $corosync_ensure = 'running' - } else { - $corosync_ensure = 'stopped' - } - - if $pacemaker_enable { - $pacemaker_ensure = 'running' - } else { - $pacemaker_ensure = 'stopped' - } - - if $pcsd_manage { - service { 'pcsd' : - ensure => $pcsd_ensure, - enable => $pcsd_enable, - name => $pcsd_service, - provider => $pcsd_provider, - } - } - - if $corosync_manage { - service { 'corosync' : - ensure => $corosync_ensure, - enable => $corosync_enable, - name => $corosync_service, - provider => $corosync_provider, - tag => 'cluster-service', - } - } - - if $pacemaker_manage { - service { 'pacemaker' : - ensure => $pacemaker_ensure, - enable => $pacemaker_enable, - name => $pacemaker_service, - provider => $pacemaker_provider, - tag => 'cluster-service', - } - } - - Service <| title == 'corosync' |> -> - Service <| title == 'pacemaker' |> - -} diff --git a/manifests/new/setup.pp b/manifests/new/setup.pp deleted file mode 100644 index c68bbcb3..00000000 --- a/manifests/new/setup.pp +++ /dev/null @@ -1,69 +0,0 @@ -# == Class: pacemaker::new::setup -# -# Sets ups the cluster configuration -# either using the "pcsd" service or -# by creating the configuration file directy. -# -class pacemaker::new::setup ( - $pcsd_mode = $::pacemaker::new::params::pcsd_mode, - $cluster_nodes = $::pacemaker::new::params::cluster_nodes, - $cluster_rrp_nodes = $::pacemaker::new::params::cluster_rrp_nodes, - $cluster_name = $::pacemaker::new::params::cluster_name, - $cluster_auth_key = $::pacemaker::new::params::cluster_auth_key, - $cluster_auth_enabled = $::pacemaker::new::params::cluster_auth_enabled, - $cluster_setup = $::pacemaker::new::params::cluster_setup, - $cluster_options = $::pacemaker::new::params::cluster_options, - $cluster_user = $::pacemaker::new::params::cluster_user, - $cluster_group = $::pacemaker::new::params::cluster_group, - $cluster_password = $::pacemaker::new::params::cluster_password, - $pcs_bin_path = $::pacemaker::new::params::pcs_bin_path, - $cluster_config_path = $::pacemaker::new::params::cluster_config_path, - $cluster_interfaces = $::pacemaker::new::params::cluster_interfaces, - $cluster_log_subsys = $::pacemaker::new::params::cluster_log_subsys, - $plugin_version = $::pacemaker::new::params::plugin_version, - $log_file_path = $::pacemaker::new::params::log_file_path, -) inherits pacemaker::new::params { - if $::os['family'] == 'Debian' { - class { 'pacemaker::new::setup::debian' : - plugin_version => $plugin_version, - } - pacemaker::contain { 'pacemaker::new::setup::debian': } - } - - class { 'pacemaker::new::setup::auth_key' : - cluster_auth_enabled => $cluster_auth_enabled, - cluster_auth_key => $cluster_auth_key, - cluster_user => $cluster_user, - cluster_group => $cluster_group, - } - pacemaker::contain { 'pacemaker::new::setup::auth_key': } - - if $pcsd_mode { - class { 'pacemaker::new::setup::pcsd' : - cluster_nodes => $cluster_nodes, - cluster_rrp_nodes => $cluster_rrp_nodes, - cluster_name => $cluster_name, - cluster_setup => $cluster_setup, - cluster_options => $cluster_options, - cluster_user => $cluster_user, - cluster_group => $cluster_group, - cluster_password => $cluster_password, - pcs_bin_path => $pcs_bin_path, - } - pacemaker::contain { 'pacemaker::new::setup::pcsd': } - } else { - class { 'pacemaker::new::setup::config' : - cluster_nodes => $cluster_nodes, - cluster_rrp_nodes => $cluster_rrp_nodes, - cluster_name => $cluster_name, - cluster_auth_enabled => $cluster_auth_enabled, - cluster_setup => $cluster_setup, - cluster_options => $cluster_options, - cluster_config_path => $cluster_config_path, - cluster_interfaces => $cluster_interfaces, - cluster_log_subsys => $cluster_log_subsys, - log_file_path => $log_file_path, - } - pacemaker::contain { 'pacemaker::new::setup::config': } - } -} diff --git a/manifests/new/setup/auth_key.pp b/manifests/new/setup/auth_key.pp deleted file mode 100644 index 36747de7..00000000 --- a/manifests/new/setup/auth_key.pp +++ /dev/null @@ -1,70 +0,0 @@ -# == Class: pacemaker::new::setup::aith_key -# -# Install the cluster authencicatio key used to -# secure the Corosync internode communication -# if the key is provided and enabled. -# -# [*auth_key_enabled*] -# Enable of disable the use of Corosync auth keys. -# Enabling this will require *cluster_auth_key* to be set too. -# -# [*cluster_auth_key*] -# The string used to encrypt the Corosync inter-node communications. -# This should be a string generated by *corosync-keygen* or by any other -# means. If will placed to the */etc/corosync/authkey* file -# and will be used to authenticate internode corosync communication. -# Options *secauth* will be enabled if this key is present. -# -# [*cluster_user*] -# The systemn user owner of the key files. -# -# [*cluster_group*] -# The systemn user group of the key files. -# -class pacemaker::new::setup::auth_key ( - $cluster_auth_enabled = $::pacemaker::new::params::cluster_auth_enabled, - $cluster_auth_key = $::pacemaker::new::params::cluster_auth_key, - $cluster_user = $::pacemaker::new::params::cluster_user, - $cluster_group = $::pacemaker::new::params::cluster_group, -) inherits pacemaker::new::params { - validate_bool($cluster_auth_enabled) - validate_string($cluster_user) - validate_string($cluster_group) - - if $cluster_auth_enabled { - $key_ensure = 'present' - } else { - $key_ensure = 'absent' - } - - file { 'corosync-auth-key' : - ensure => $key_ensure, - path => '/etc/corosync/authkey', - content => $cluster_auth_key, - owner => $cluster_user, - group => $cluster_group, - mode => '0640', - } - - file { 'pacemaker-auth-key' : - ensure => $key_ensure, - path => '/etc/pacemaker/authkey', - target => '/etc/corosync/authkey', - owner => $cluster_user, - group => $cluster_group, - mode => '0640', - } - - # authkey should be placed before the cluster is created - File['pacemaker-auth-key'] -> - Exec <| title == 'create-cluster' |> - - File['corosync-auth-key'] -> - Exec <| title == 'create-cluster' |> - - File['pacemaker-auth-key'] ~> - Service <| tag == 'cluster-service' |> - - File['corosync-auth-key'] ~> - Service <| tag == 'cluster-service' |> -} diff --git a/manifests/new/setup/config.pp b/manifests/new/setup/config.pp deleted file mode 100644 index de4beff3..00000000 --- a/manifests/new/setup/config.pp +++ /dev/null @@ -1,151 +0,0 @@ -# == Class: pacemaker::new::setup::config -# -# Set the cluster up by directly generating -# the Corosync configuration file. -# -# [*cluster_nodes*] -# The cluster nodes structure. -# Can be provided in serveral formats. -# -# [*cluster_rrp_nodes*] -# Same as *cluster_nodes* and will override thir value. -# -# [*cluster_name*] -# The name attribute of the cluster. -# Default: clustername -# -# [*cluster_auth_enabled*] -# Does this cluster have a Corosync auth key? -# Default: false -# -# [*cluster_setup*] -# Should this cluster be set up? -# Default: true -# -# [*cluster_options*] -# A hash of additional cluster options. -# Totem section: -# * version -# * nodeid -# * clear_node_high_bit -# * secauth -# * crypto_cipher -# * crypto_hash -# * rrp_mode -# * netmtu -# * threads -# * vsftype -# * transport -# * token -# * token_retransmit -# * hold -# * token_retransmits_before_loss_const -# * join -# * send_join -# * consensus -# * merge -# * downcheck -# * fail_recv_const -# * seqno_unchanged_const -# * heartbeat_failures_allowed -# * max_network_delay -# * window_size -# * max_messages -# * miss_count_const -# * rrp_problem_count_timeout -# * rrp_problem_count_threshold -# * rrp_problem_count_mcast_threshold -# * rrp_token_expired_timeout -# * rrp_autorecovery_check_timeout -# Logging section: -# * timestamp -# * fileline -# * function_name -# * to_stderr -# * to_logfile -# * to_syslog -# * logfile -# * logfile_priority -# * syslog_facility -# * syslog_priority -# * debug -# * tags -# * subsys -# Quorum section: -# * provider -# * two_node -# * wait_for_all -# * last_man_standing -# * last_man_standing_window -# * auto_tie_breaker -# * auto_tie_breaker_node -# * allow_downscale -# * expected_votes -# * expected_votes_tracking -# * votes -# -# [*cluster_config_path*] -# Path to the cluster configuration file. -# Default: /etc/corosync/corosync.conf -# -# [*cluster_interfaces*] -# An array of hashes or a single hash with the cluster -# interface properties. This should be provided if the -# UDP multicast communications are being used to set -# the addresses: -# * ringnumber -# * bindnetaddr -# * nodeid -# * broadcast -# * mcastaddr -# * mcastport -# * ttl -# -# [*cluster_log_subsys*] -# An array of hashes or a single hash with the logger -# subsystem options. They will be added to the logging -# section and have the same parameters as the logging section -# itself with *subsys* name being mandatory. -# -# [*log_file_path*] -# Path to the cluster log file. -# -class pacemaker::new::setup::config ( - $cluster_nodes = $::pacemaker::new::params::cluster_nodes, - $cluster_rrp_nodes = $::pacemaker::new::params::cluster_rrp_nodes, - $cluster_name = $::pacemaker::new::params::cluster_name, - $cluster_auth_enabled = $::pacemaker::new::params::cluster_auth_enabled, - $cluster_setup = $::pacemaker::new::params::cluster_setup, - $cluster_options = $::pacemaker::new::params::cluster_options, - $cluster_config_path = $::pacemaker::new::params::cluster_config_path, - $cluster_interfaces = $::pacemaker::new::params::cluster_interfaces, - $cluster_log_subsys = $::pacemaker::new::params::cluster_log_subsys, - $log_file_path = $::pacemaker::new::params::log_file_path, -) inherits pacemaker::new::params { - validate_absolute_path($log_file_path) - validate_absolute_path($cluster_config_path) - validate_hash($cluster_options) - validate_string($cluster_name) - validate_bool($cluster_auth_enabled) - validate_bool($cluster_setup) - - if $cluster_setup { - $cluster_nodes_real = pick($cluster_rrp_nodes, $cluster_nodes, []) - $cluster_nodes_data = pacemaker_cluster_nodes($cluster_nodes_real, 'hash') - - file { 'corosync-config' : - ensure => 'present', - path => '/etc/corosync/corosync.conf', - content => template('pacemaker/corosync.conf.erb'), - } - - File['corosync-config'] ~> - Service <| tag == 'cluster-service' |> - } - - pacemaker_online { 'setup' :} - - Service <| tag == 'cluster-service' |> -> - Pacemaker_online['setup'] - -} diff --git a/manifests/new/setup/debian.pp b/manifests/new/setup/debian.pp deleted file mode 100644 index f13d36c7..00000000 --- a/manifests/new/setup/debian.pp +++ /dev/null @@ -1,56 +0,0 @@ -# == Class: pacemaker::new::setup::debian -# -# Configure the debian/ubuntu defaults and support files -# for the cluster services. -# -class pacemaker::new::setup::debian ( - $plugin_version = $pacemaker::new::params::plugin_version, -) inherits pacemaker::new::params { - validate_integer($plugin_version) - - File { - ensure => 'present', - owner => 'root', - group => 'root', - mode => '0644', - } - - file { 'corosync-service-dir' : - ensure => 'directory', - path => '/etc/corosync/service.d', - purge => true, - recurse => true, - } - - file { 'corosync-service-pacemaker' : - path => '/etc/corosync/service.d/pacemaker', - content => template('pacemaker/debian/pacemaker_service.erb'), - } - - file { 'corosync-debian-default' : - path => '/etc/default/corosync', - content => template('pacemaker/debian/corosync_default.erb'), - } - - file { 'pacemaker-debian-default' : - path => '/etc/default/pacemaker', - content => template('pacemaker/debian/pacemaker_default.erb'), - } - - file { 'cman-debian-default' : - path => '/etc/default/cman', - content => template('pacemaker/debian/cman_default.erb'), - } - - File['corosync-service-pacemaker'] ~> - Service <| tag == 'cluster-service' |> - - File['corosync-debian-default'] ~> - Service <| tag == 'cluster-service' |> - - File['pacemaker-debian-default'] ~> - Service <| tag == 'cluster-service' |> - - File['cman-debian-default'] ~> - Service <| tag == 'cluster-service' |> -} diff --git a/manifests/new/setup/pcsd.pp b/manifests/new/setup/pcsd.pp deleted file mode 100644 index f794fc76..00000000 --- a/manifests/new/setup/pcsd.pp +++ /dev/null @@ -1,220 +0,0 @@ -# ## Class: pacemaker::new::setup::pcsd -# -# A class to setup a pacemaker cluster using -# the "pcsd" service. -# -# ### Parameters -# -# [*cluster_nodes*] -# (required) A list cluster nodes to be authenticated by the PCSD daemon and -# be used in the cluster creation. -# This data can be provided in several forms: -# -# * String: `'node1 node2 node3'` -# * Array: `['node1', 'node2', 'node3']` -# * Hash: -# ``` -# { -# 'node1' => { -# 'host' => 'my_node', -# }, -# 'node2' => { -# 'host' => 'other_node', -# 'ring0' => '192.168.0.1', -# }, -# 'node3' => {} -# } -# # Will be converted to: -# ['my_node', '192.168.0.1', 'node3'] -# ``` -# -# Elements in the hash are used in this priority: -# 1. *ring0, ring1, ...* have the highest priority. -# They can be given either IP addresses or hostnames. -# 2. *ip* will be used if there is no *ring0* -# 2. *host* will be used if there is no *ip* -# 3. Hash keys will be used if there is no *host* -# -# [*cluster_rrp_nodes*] -# (optional) A list of nodes that will be actually used to create the cluster. -# It will be equal to the *cluster_nodes* if not provided or can be set using -# the same ways as the *cluster_nodes* does. -# This can be used o either just override the list of cluster nodes and make -# it different from nodes used for **pcsd** authentication, or to make a -# Redundant Ring Protocol (RRP) enabled cluster. -# -# RRP nodes can be specified by providing all node's interfaces as a -# comma-separated list. For example, *node1* has interface *node1a* in the -# first ring and node1b in the second ring. Node2 has the same interfaces. -# In this case, RRP nodes can be provided like this: -# -# * String: `'node1a,node1b node2a,node2b'` -# * Array: `['node1a,node1b', 'node2a,node2b']` -# * Hash: -# ``` -# { -# 'node1' => { -# 'host' => 'my_node', -# 'ip' => '192.168.0.1', -# }, -# 'node2' => { -# 'host' => 'other_node', -# 'ring0' => '192.168.0.2', -# 'ring1' => '172.16.0.2', -# }, -# 'node3' => {} -# } -# # Will be converted to: -# ['192.168.0.1', '192.168.0.2,172.16.0.2', 'node3'] -# ``` -# -# [*cluster_name*] -# (optional) The name of the cluster (no whitespace) -# Default: clustername -# -# [*cluster_setup*] -# (optional) If your cluster includes **pcsd**, this should be set to true for -# just one node in cluster. Else set to true for all nodes. -# Default: true -# -# [*cluster_options*] -# (optional) Hash additional cluster configuration options. -# Can be specified like this: -# -# * String: `'--token 10000 --ipv6 --join 100` -# * Array: `['--token', '10000', '--ipv6', '', '--join', '100']` -# * Hash: -# ``` -# { -# '--token' => '10000', -# '--ipv6' => '', -# '--join' => '100', -# } -# # Or: -# { -# 'token' => '10000', -# 'ipv6' => '', -# 'join' => '100', -# } -# ``` -# -# Supported cluster options: -# * transport udpu|udp -# * rrpmode active|passive -# * addr0 -# * mcast0
-# * mcastport0 -# * ttl0 -# * broadcast0 -# * addr1 -# * mcast1
-# * mcastport1 -# * ttl1 -# * broadcast1 -# * wait_for_all=<0|1> -# * auto_tie_breaker=<0|1> -# * last_man_standing=<0|1> -# * last_man_standing_window=