From 6a9a83d0d9da73a094bd29e5629b4c4b93d151bd Mon Sep 17 00:00:00 2001 From: iberezovskiy Date: Tue, 28 Jul 2015 13:00:54 +0300 Subject: [PATCH] Add Redis puppet module Add upstream Redis puppet module which will be used as base module for Ceilometer-Redis plugin: https://github.com/fsalum/puppet-redis Partially implements: blueprint ceilometer-central-agent-ha Change-Id: Icbd879623790bf96e54b6dad7435594f107cb650 --- .../puppet/modules/redis/.travis.yml | 21 + .../puppet/modules/redis/CHANGELOG | 99 + .../puppet/modules/redis/Gemfile | 24 + .../puppet/modules/redis/LICENSE | 201 ++ .../puppet/modules/redis/Modulefile | 13 + .../puppet/modules/redis/README.md | 108 + .../puppet/modules/redis/Rakefile | 11 + .../puppet/modules/redis/Vagrantfile | 43 + .../modules/redis/lib/facter/redis_version.rb | 71 + .../puppet/modules/redis/manifests/init.pp | 211 ++ .../puppet/modules/redis/manifests/params.pp | 37 + .../modules/redis/manifests/sentinel.pp | 152 + .../redis/manifests/sentinel_params.pp | 42 + .../puppet/modules/redis/metadata.json | 51 + .../puppet/modules/redis/spec/spec_helper.rb | 17 + .../modules/redis/templates/logrotate.erb | 9 + .../modules/redis/templates/redis.conf.erb | 2790 +++++++++++++++++ .../redis/templates/sentinel-init.conf.erb | 14 + .../modules/redis/templates/sentinel.conf.erb | 178 ++ .../puppet/modules/redis/tests/Puppetfile | 2 + .../puppet/modules/redis/tests/init.pp | 97 + .../puppet/modules/redis/tests/sentinel.pp | 38 + 22 files changed, 4229 insertions(+) create mode 100644 deployment_scripts/puppet/modules/redis/.travis.yml create mode 100644 deployment_scripts/puppet/modules/redis/CHANGELOG create mode 100644 deployment_scripts/puppet/modules/redis/Gemfile create mode 100644 deployment_scripts/puppet/modules/redis/LICENSE create mode 100644 deployment_scripts/puppet/modules/redis/Modulefile create mode 100644 deployment_scripts/puppet/modules/redis/README.md create mode 100644 deployment_scripts/puppet/modules/redis/Rakefile create mode 100644 deployment_scripts/puppet/modules/redis/Vagrantfile create mode 100644 deployment_scripts/puppet/modules/redis/lib/facter/redis_version.rb create mode 100644 deployment_scripts/puppet/modules/redis/manifests/init.pp create mode 100644 deployment_scripts/puppet/modules/redis/manifests/params.pp create mode 100644 deployment_scripts/puppet/modules/redis/manifests/sentinel.pp create mode 100644 deployment_scripts/puppet/modules/redis/manifests/sentinel_params.pp create mode 100644 deployment_scripts/puppet/modules/redis/metadata.json create mode 100644 deployment_scripts/puppet/modules/redis/spec/spec_helper.rb create mode 100644 deployment_scripts/puppet/modules/redis/templates/logrotate.erb create mode 100644 deployment_scripts/puppet/modules/redis/templates/redis.conf.erb create mode 100644 deployment_scripts/puppet/modules/redis/templates/sentinel-init.conf.erb create mode 100644 deployment_scripts/puppet/modules/redis/templates/sentinel.conf.erb create mode 100644 deployment_scripts/puppet/modules/redis/tests/Puppetfile create mode 100644 deployment_scripts/puppet/modules/redis/tests/init.pp create mode 100644 deployment_scripts/puppet/modules/redis/tests/sentinel.pp diff --git a/deployment_scripts/puppet/modules/redis/.travis.yml b/deployment_scripts/puppet/modules/redis/.travis.yml new file mode 100644 index 0000000..53b9381 --- /dev/null +++ b/deployment_scripts/puppet/modules/redis/.travis.yml @@ -0,0 +1,21 @@ +--- +language: ruby +bundler_args: --without development +script: "bundle exec rake validate && bundle exec rake lint && bundle exec rake spec SPEC_OPTS='--color --format documentation'" +matrix: + fast_finish: true + include: + - rvm: 1.8.7 + env: PUPPET_GEM_VERSION="~> 2.7.0" FACTER_GEM_VERSION="~> 1.6.0" + - rvm: 1.8.7 + env: PUPPET_GEM_VERSION="~> 2.7.0" FACTER_GEM_VERSION="~> 1.7.0" + - rvm: 1.8.7 + env: PUPPET_GEM_VERSION="~> 3.0" + - rvm: 1.9.3 + env: PUPPET_GEM_VERSION="~> 3.0" + - rvm: 2.0.0 + env: PUPPET_GEM_VERSION="~> 3.0" + - rvm: 2.1.6 + env: PUPPET_GEM_VERSION="~> 4.0" +notifications: + email: false diff --git a/deployment_scripts/puppet/modules/redis/CHANGELOG b/deployment_scripts/puppet/modules/redis/CHANGELOG new file mode 100644 index 0000000..976c243 --- /dev/null +++ b/deployment_scripts/puppet/modules/redis/CHANGELOG @@ -0,0 +1,99 @@ +## fsalum-redis changelog + +Release notes for the fsalum-redis module. + +------------------------------------------ + +#### 1.0.3 - 2014-11-25 + +* Version lookup not working (#41) +* Remove from facter warning when redis is not installed #42 (@pmoranga) + +#### 1.0.2 - 2014-10-25 + +* Fixed Travis integration and tests +* Added metadata.json +* Making it compatible to be 'puppet approved' :) + +#### 1.0.1 - 2014-09-16 + +* Allowing redis package name as a param. #35 (@nprimmer) +* add $redis_version_override #37 (@tmclaugh) + +#### 1.0.0 - 2014-06-08 + +Many CHANGES to this version, complete rewrite of redis.conf template +to support Redis 2.2 to 2.8. + +Make sure to test the module and parameters before upgrading in production. + +Thanks to @zxjinn and @zeroecco for their hard work on this release. + +* some parameters were removed, added and/or default values changed +* update redis.conf to the latest version available #32 (@zxjinn) +* Logic for redis config file to support 2.2.x through 2.8.x #31 (@zeroecco) +* Unixsocketoptions #33 (@nbeernink) +* Changed operating system check to validate osfamily #29 (@george-b) + +#### 0.0.12 - 2014-03-21 + +* Unset cleanup #27 (@trlinkin) +* toggle ability to notify service to restart when config file changes #28 (@tmclaugh) + +#### 0.0.11 - 2014-02-19 + +* system_sysctl parameter for redis class (@tehmaspc) + +#### 0.0.10 - 2014-02-19 + +* Allow conf_bind to be unset (@stevelacey) +* Changing default of glueoutputbuf to UNSET (@tehmaspc) + +#### 0.0.9 - 2014-01-09 + +* Add quotes around all instances of UNSET (@charlesdunbar) + +#### 0.0.8 - 2013-12-20 + +* Setting mode permission for conf_dir (@KlavsKlavsen) + +#### 0.0.7 - 2013-12-11 + +* Add glueoutputbuf config option (@kryptx) +* Add support for Amazon Linux AMI (@mattboston) + +#### 0.0.6 - 2013-08-01 + +* Install package before poking config (@doismellburning) + +#### 0.0.5 - 2013-06-06 + +* Fix Puppet 3.2.1 deprecation warnings (@ripienaar) +* Fix duplicate entry for logrotate on Debian/RHEL using different paths (@arthurfurlan) +* Add $conf_nosave parameter (@fsalum) +* Minor changes to params variables (@fsalum) +* Update CHANGELOG format + +------------------------------------------ + +#### 0.0.4 - 2013-04-18 + +* Creates conf_dir directory if it doesn't exist + +------------------------------------------ + +#### 0.0.3 - 2013-02-25 + +* Fixing redis.conf less options on Debian + +------------------------------------------ + +#### 0.0.2 - 2013-02-25 + +* Fixing redis.conf location for Debian + +------------------------------------------ + +#### 0.0.1 - 2013-02-25 + +* Initial Forge release diff --git a/deployment_scripts/puppet/modules/redis/Gemfile b/deployment_scripts/puppet/modules/redis/Gemfile new file mode 100644 index 0000000..02d5334 --- /dev/null +++ b/deployment_scripts/puppet/modules/redis/Gemfile @@ -0,0 +1,24 @@ +source ENV['GEM_SOURCE'] || "https://rubygems.org" + +group :development, :test do + gem 'rake', :require => false + gem 'rspec-puppet', :require => false + gem 'puppetlabs_spec_helper', :require => false + gem 'puppet-lint', :require => false + gem 'puppet_facts', :require => false + gem 'metadata-json-lint', :require => false +end + +if facterversion = ENV['FACTER_GEM_VERSION'] + gem 'facter', facterversion, :require => false +else + gem 'facter', :require => false +end + +if puppetversion = ENV['PUPPET_GEM_VERSION'] + gem 'puppet', puppetversion, :require => false +else + gem 'puppet', :require => false +end + +# vim:ft=ruby diff --git a/deployment_scripts/puppet/modules/redis/LICENSE b/deployment_scripts/puppet/modules/redis/LICENSE new file mode 100644 index 0000000..8ab6801 --- /dev/null +++ b/deployment_scripts/puppet/modules/redis/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {2014} {Felipe Salum} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/deployment_scripts/puppet/modules/redis/Modulefile b/deployment_scripts/puppet/modules/redis/Modulefile new file mode 100644 index 0000000..c58f5cf --- /dev/null +++ b/deployment_scripts/puppet/modules/redis/Modulefile @@ -0,0 +1,13 @@ +name 'fsalum-redis' +version '1.0.3' +source 'git://github.com/fsalum/puppet-redis.git' +author 'Felipe Salum' +license 'Apache License, Version 2.0' +summary 'Puppet module for Redis Server' +description 'Module to install and configure a Redis server' +project_page 'https://github.com/fsalum/puppet-redis' + +## Add dependencies, if any: + +# https://forge.puppetlabs.com/thias/sysctl +dependency 'thias/sysctl', '>= 0.3.0' diff --git a/deployment_scripts/puppet/modules/redis/README.md b/deployment_scripts/puppet/modules/redis/README.md new file mode 100644 index 0000000..54d9898 --- /dev/null +++ b/deployment_scripts/puppet/modules/redis/README.md @@ -0,0 +1,108 @@ +#Redis Module for Puppet +[![puppet-redis](https://img.shields.io/puppetforge/v/fsalum/redis.svg)](https://forge.puppetlabs.com/fsalum/redis) [![Build Status](https://travis-ci.org/fsalum/puppet-redis.svg?branch=master)](https://travis-ci.org/fsalum/puppet-redis) + +This module installs and manages a Redis server. All redis.conf options are +accepted in the parameterized class. + +##Important + +If you are upgrading this module from 0.x to 1.0+, please test it carefully +outside production as it is not fully backwards compatible. + +Some class parameters were added, removed or had their default values changed. + +The redis.conf template has been completely rewritten to support Redis 2.2+ to 2.8+. + +##Operating System + +Tested on CentOS 6.5, Ubuntu Saucy/Trusty/Precise, Debian 7.4 +redis.conf options compatible with Redis 2.2, 2.4, 2.6, 2.8 + +##Quick Start + +Use the default parameters: + + class { 'redis': } + +To change the port and listening network interface: + + class { 'redis': + conf_port => '6379', + conf_bind => '0.0.0.0', + } + +##Parameters + +Check the [init.pp](https://github.com/fsalum/puppet-redis/blob/master/manifests/init.pp) file for a complete list of parameters accepted. + +* custom sysctl + +To enable and set important Linux kernel sysctl parameters as described in the [Redis Admin Guide](http://redis.io/topics/admin) - use the following configuration option: + + class { 'redis': + system_sysctl => true + } + +By default, this sysctl parameter will not be enabled. Furthermore, you will need the sysctl module defined in the [Modulefile](https://github.com/fsalum/puppet-redis/blob/master/Modulefile) file. + +* service restart + +If you need to execute a controlled restart of redis after changes due master/slave relationships to avoid that both are restarted at the same time use the parameter below. + + class { 'redis': + service_restart => false + } + +By default service restart is true. + +#Sentinel + +This module supports Redis Sentinel that comes with Redis 2.8+ with all the configuration parameters. + +It manages upstart scripts (can be deactivated with parameter manage_upstart_scripts = false). + +##Operating System + +Tested on Ubuntu 14.04 with Redis 2.8 + +##Quick Start + +Example: + + class { redis::sentinel: + conf_port => '26379', + sentinel_confs => { + 'mymaster' => { + 'monitor' => '127.0.0.1 6379 2', + 'down-after-milliseconds' => '60000', + 'failover-timeout' => 180000, + 'notification-script' => '/etc/redis/scripts/thescript.py', + 'parallel-syncs' => '3', + }, + 'resque' => { + 'monitor' => '127.0.0.1 6379 4', + 'down-after-milliseconds' => '10000', + 'failover-timeout' => 180000, + 'notification-script' => '/etc/redis/scripts/thescript.py', + 'parallel-syncs' => '5', + } + } + } + +##Copyright and License + +Copyright (C) 2014 Felipe Salum + +Felipe Salum can be contacted at: fsalum@gmail.com + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/deployment_scripts/puppet/modules/redis/Rakefile b/deployment_scripts/puppet/modules/redis/Rakefile new file mode 100644 index 0000000..43e1b83 --- /dev/null +++ b/deployment_scripts/puppet/modules/redis/Rakefile @@ -0,0 +1,11 @@ +require 'puppetlabs_spec_helper/rake_tasks' +require 'puppet-lint/tasks/puppet-lint' + +PuppetLint.configuration.fail_on_warnings +PuppetLint.configuration.send('relative') +PuppetLint.configuration.send('disable_80chars') +PuppetLint.configuration.send('disable_class_inherits_from_params_class') +PuppetLint.configuration.send('disable_class_parameter_defaults') +PuppetLint.configuration.send('disable_documentation') +PuppetLint.configuration.send('disable_single_quote_string_with_variables') +PuppetLint.configuration.ignore_paths = ["spec/**/*.pp", "pkg/**/*.pp"] diff --git a/deployment_scripts/puppet/modules/redis/Vagrantfile b/deployment_scripts/puppet/modules/redis/Vagrantfile new file mode 100644 index 0000000..5b6a302 --- /dev/null +++ b/deployment_scripts/puppet/modules/redis/Vagrantfile @@ -0,0 +1,43 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : +# + +if ENV['VAGRANT_HOME'].nil? + ENV['VAGRANT_HOME'] = './' +end + +redis = { + :'centos5' => { :memory => '120', :ip => '10.1.1.10', :box => 'puppetlabs/centos-5.11-64-puppet', :domain => 'redis.local' }, + :'centos65' => { :memory => '120', :ip => '10.1.1.11', :box => 'puppetlabs/centos-6.5-64-puppet', :domain => 'redis.local' }, + :'precise' => { :memory => '120', :ip => '10.1.1.20', :box => 'puppetlabs/ubuntu-12.04-64-puppet', :domain => 'redis.local' }, + :'saucy' => { :memory => '120', :ip => '10.1.1.21', :box => 'puppetlabs/ubuntu-13.10-64-puppet', :domain => 'redis.local' }, + :'trusty' => { :memory => '240', :ip => '10.1.1.22', :box => 'puppetlabs/ubuntu-14.04-64-puppet', :domain => 'redis.local' }, + :'squeeze' => { :memory => '120', :ip => '10.1.1.30', :box => 'puppetlabs/debian-6.0.9-64-puppet', :domain => 'redis.local' }, + :'wheezy' => { :memory => '120', :ip => '10.1.1.31', :box => 'puppetlabs/debian-7.6-64-puppet', :domain => 'redis.local' }, +} + +Vagrant::Config.run("2") do |config| + config.vbguest.auto_update = true + config.hostmanager.enabled = false + + redis.each_pair do |name, opts| + config.vm.define name do |n| + config.vm.provider :virtualbox do |vb| + vb.customize ["modifyvm", :id, "--memory", opts[:memory] ] + end + n.vm.network "private_network", ip: opts[:ip] + n.vm.box = opts[:box] + n.vm.host_name = "#{name}" + "." + opts[:domain] + n.vm.synced_folder "#{ENV['VAGRANT_HOME']}","/etc/puppet/modules/redis" + n.vm.provision :shell, :inline => "gem install puppet facter --no-ri --no-rdoc" if name == "trusty" + n.vm.provision :shell, :inline => "puppet module install thias-sysctl --force" + n.vm.provision :puppet do |puppet| + puppet.manifests_path = "tests" + puppet.manifest_file = "init.pp" + #puppet.manifest_file = "sentinel.pp" + puppet.module_path = "./" + end + end + end + +end diff --git a/deployment_scripts/puppet/modules/redis/lib/facter/redis_version.rb b/deployment_scripts/puppet/modules/redis/lib/facter/redis_version.rb new file mode 100644 index 0000000..1aa7606 --- /dev/null +++ b/deployment_scripts/puppet/modules/redis/lib/facter/redis_version.rb @@ -0,0 +1,71 @@ +require 'facter' + +Facter.add("redis_version", :timeout => 120) do + confine :osfamily => "Debian" + + setcode do + + dpkg = `which apt-cache 2> /dev/null`.chomp + if dpkg == '' + dpkg = '/usr/bin/apt-cache' + end + + redis_version = Facter::Util::Resolution.exec('/usr/bin/redis-server --version') + if redis_version.nil? + redis_version = Facter::Util::Resolution.exec(dpkg+" show redis-server 2> /dev/null | /bin/grep -i 'version:' | /usr/bin/awk '{printf(\"%s\",$2)}' | sort -nr | head -1") + end + + case redis_version + when /2\.8\.[0-9]/ + #set version to 2.8 + redis_version = '2.8.x' + when /2\.6\.[0-9]/ + #set version to 2.6 + redis_version = '2.6.x' + when /2\.4\.[0-9]/ + #set version to 2.4 + redis_version = '2.4.x' + when /2\.2\.[0-9]/ + #set version to 2.2 + redis_version = '2.2.x' + else + redis_version = 'nil' + end + redis_version + end +end + +Facter.add("redis_version", :timeout => 120) do + confine :osfamily => "RedHat" + + setcode do + + yum = `which yum 2> /dev/null`.chomp + if yum == '' + yum = '/usr/bin/yum' + end + + redis_version = Facter::Util::Resolution.exec('/usr/sbin/redis-server --version') + if redis_version.nil? + redis_version = Facter::Util::Resolution.exec(yum+" info redis 2> /dev/null | /bin/grep '^Version' | /bin/awk -F ':' '{printf(\"%s\",$2)}' | sort -nr | head -1") + end + + case redis_version + when /2\.8\.[0-9]/ + #set version to 2.8 + redis_version = '2.8.x' + when /2\.6\.[0-9]/ + #set version to 2.6 + redis_version = '2.6.x' + when /2\.4\.[0-9]/ + #set version to 2.4 + redis_version = '2.4.x' + when /2\.2\.[0-9]/ + #set version to 2.2 + redis_version = '2.2.x' + else + redis_version = 'nil' + end + redis_version + end +end diff --git a/deployment_scripts/puppet/modules/redis/manifests/init.pp b/deployment_scripts/puppet/modules/redis/manifests/init.pp new file mode 100644 index 0000000..d2d3814 --- /dev/null +++ b/deployment_scripts/puppet/modules/redis/manifests/init.pp @@ -0,0 +1,211 @@ +# == Class: redis +# +# Install and configure a Redis server +# +# === Parameters +# +# All the redis.conf parameters can be passed to the class. +# See below for a complete list of parameters accepted. +# +# Check the README.md file for any further information about parameters for this class. +# +# === Examples +# +# class { redis: +# conf_port => '6380', +# conf_bind => '0.0.0.0', +# } +# +# === Authors +# +# Felipe Salum +# +# === Copyright +# +# Copyright 2013 Felipe Salum, unless otherwise noted. +# +class redis ( + $conf_activerehashing = 'yes', + $conf_aof_rewrite_incremental_fsync = 'yes', # 2.6+ + $conf_append = {}, # hash of custom variables+values + $conf_appendfilename = undef, # default appendonly.aof + $conf_appendfsync = 'everysec', + $conf_appendonly = 'no', + $conf_auto_aof_rewrite_min_size = '64mb', + $conf_auto_aof_rewrite_percentage = '100', + $conf_bind = '0.0.0.0', + $conf_client_output_buffer_limit_normal = '0 0 0', # 2.6+ + $conf_client_output_buffer_limit_pubsub = '32mb 8mb 60', # 2.6+ + $conf_client_output_buffer_limit_slave = '256mb 64mb 60', # 2.6+ + $conf_daemonize = 'yes', + $conf_databases = '16', + $conf_dbfilename = 'dump.rdb', + $conf_dir = '/var/lib/redis/', + $conf_glueoutputbuf = undef, + $conf_hash_max_zipmap_entries = '512', + $conf_hash_max_zipmap_value = '64', + $conf_hll_sparse_max_bytes = undef, # default 3000, 2.8.5?+ + $conf_hz = '10', # 2.6+ + $conf_include = [], # array of custom include files + $conf_list_max_ziplist_entries = '512', + $conf_list_max_ziplist_value = '64', + $conf_logfile = undef, #default "" + $conf_loglevel = 'notice', + $conf_lua_time_limit = '5000', # 2.6+ + $conf_masterauth = undef, + $conf_maxclients = undef, # default 10000 in 2.6+ + $conf_maxmemory = undef, + $conf_maxmemory_policy = undef, + $conf_maxmemory_samples = undef, + $conf_min_slaves_max_lag = undef, # default 10, 2.8+ + $conf_min_slaves_to_write = undef, # 2.8+ + $conf_no_appendfsync_on_rewrite = 'no', + $conf_nosave = undef, + $conf_notify_keyspace_events = undef, # 2.8+ + $conf_pidfile = undef, + $conf_port = '6379', + $conf_rdbchecksum = 'yes', # 2.6+ + $conf_rdbcompression = 'yes', + $conf_repl_backlog_size = '1mb', # 2,8+ + $conf_repl_backlog_ttl = '3600', # 2.8+ + $conf_repl_disable_tcp_nodelay = 'no', # 2,6+ + $conf_repl_ping_slave_period = '10', # 2.4+ + $conf_repl_timeout = '60', # 2.4+ + $conf_requirepass = undef, + $conf_save = {'900' =>'1', '300' => '10', '60' => '10000'}, + $conf_set_max_intset_entries = '512', + $conf_slave_priority = undef, # 2.6+ + $conf_slave_read_only = 'yes', # 2.6+ + $conf_slave_serve_stale_data = 'yes', + $conf_slaveof = undef, + $conf_slowlog_log_slower_than = '10000', + $conf_slowlog_max_len = '128', + $conf_stop_writes_on_bgsave_error = 'yes', # 2.6+ + $conf_syslog_enabled = undef, + $conf_syslog_facility = undef, + $conf_syslog_ident = undef, + $conf_tcp_backlog = undef, # default is 511, 2.8.5+ + $conf_tcp_keepalive = '0', # 2.6+ + $conf_timeout = '0', + $conf_unixsocket = '/tmp/redis.sock', # 2.2+ + $conf_unixsocketperm = '755', # 2.4+ + $conf_vm_enabled = 'no', # deprecated in 2.4+ + $conf_vm_max_memory = '0', # deprecated in 2.4+ + $conf_vm_max_threads = '4', # deprecated in 2.4+ + $conf_vm_page_size = '32', # deprecated in 2.4+ + $conf_vm_pages = '134217728', # deprecated in 2.4+ + $conf_vm_swap_file = '/tmp/redis.swap', # deprecated in 2.4+ + $conf_zset_max_ziplist_entries = '128', # 2.4+ + $conf_zset_max_ziplist_value = '64', # 2.4+ + $package_ensure = 'present', + $package_name = undef, + $redis_version_override = undef, + $service_enable = true, + $service_ensure = 'running', + $service_restart = true, + $system_sysctl = false, +) { + + include redis::params + + $conf_redis = $redis::params::conf + $conf_logrotate = $redis::params::conf_logrotate + $service = $redis::params::service + + if $redis_version_override { + $redis_version_real = $redis_version_override + } else { + $redis_version_real = $package_ensure ? { + /2\.2\..*/ => '2.2.x', + /2\.4\..*/ => '2.4.x', + /2\.6\..*/ => '2.6.x', + /2\.8\..*/ => '2.8.x', + default => $::redis_version + } + } + + if $package_name { + $package = $package_name + }else{ + $package = $redis::params::package + } + + if $conf_pidfile { + $conf_pidfile_real = $conf_pidfile + }else{ + $conf_pidfile_real = $::redis::params::pidfile + } + + if $conf_logfile { + $conf_logfile_real = $conf_logfile + }else{ + $conf_logfile_real = $::redis::params::logfile + } + + package { 'redis': + ensure => $package_ensure, + name => $package, + } + + service { 'redis': + ensure => $service_ensure, + name => $service, + enable => $service_enable, + hasrestart => true, + hasstatus => true, + require => [ Package['redis'], + Exec[$conf_dir], + File[$conf_redis] ], + } + + file { $conf_redis: + path => $conf_redis, + content => template('redis/redis.conf.erb'), + owner => root, + group => root, + mode => '0644', + require => Package['redis'], + } + + file { $conf_logrotate: + path => $conf_logrotate, + content => template('redis/logrotate.erb'), + owner => root, + group => root, + mode => '0644', + } + + exec { $conf_dir: + path => '/bin:/usr/bin:/sbin:/usr/sbin', + command => "mkdir -p ${conf_dir}", + user => root, + group => root, + creates => $conf_dir, + before => Service['redis'], + require => Package['redis'], + } + + file { $conf_dir: + ensure => directory, + owner => redis, + group => redis, + mode => '0755', + before => Service['redis'], + require => Exec[$conf_dir], + } + + if ( $system_sysctl == true ) { + # add necessary kernel parameters + # see the redis admin guide here: http://redis.io/topics/admin + sysctl { 'vm.overcommit_memory': + value => '1', + } + } + + if $service_restart == true { + # https://github.com/fsalum/puppet-redis/pull/28 + Exec[$conf_dir] ~> Service['redis'] + File[$conf_redis] ~> Service['redis'] + } + +} diff --git a/deployment_scripts/puppet/modules/redis/manifests/params.pp b/deployment_scripts/puppet/modules/redis/manifests/params.pp new file mode 100644 index 0000000..1373b4a --- /dev/null +++ b/deployment_scripts/puppet/modules/redis/manifests/params.pp @@ -0,0 +1,37 @@ +# Class: redis::params +# +# This class configures parameters for the puppet-redis module. +# +# Parameters: +# +# Actions: +# +# Requires: +# +# Sample Usage: +# +class redis::params { + + case $::osfamily { + 'redhat': { + $package = 'redis' + $service = 'redis' + $conf = '/etc/redis.conf' + $conf_logrotate = '/etc/logrotate.d/redis' + $pidfile = '/var/run/redis/redis.pid' + $logfile = '/var/log/redis/redis.log' + } + 'debian': { + $package = 'redis-server' + $service = 'redis-server' + $conf = '/etc/redis/redis.conf' + $conf_logrotate = '/etc/logrotate.d/redis-server' + $pidfile = '/var/run/redis/redis-server.pid' + $logfile = '/var/log/redis/redis-server.log' + } + default: { + fail("Unsupported osfamily: ${::osfamily}, module ${module_name} only support osfamily RedHat and Debian") + } + } + +} diff --git a/deployment_scripts/puppet/modules/redis/manifests/sentinel.pp b/deployment_scripts/puppet/modules/redis/manifests/sentinel.pp new file mode 100644 index 0000000..fa9c3ed --- /dev/null +++ b/deployment_scripts/puppet/modules/redis/manifests/sentinel.pp @@ -0,0 +1,152 @@ +# == Class: redis::sentinel +# +# Install and configure a Redis Sentinel +# +# === Parameters +# +# All the sentinel.conf parameters can be passed to the class. +# See below for a complete list of parameters accepted. +# +# Check the README.md file for any further information about parameters for this class. +# +# === Examples +# +# class { redis::sentinel: +# conf_port => '26379', +# sentinel_confs => { +# 'mymaster' => { +# 'monitor' => '127.0.0.1 6379 2', +# 'down-after-milliseconds' => '60000', +# 'notification-script' => '/etc/redis/scripts/thescript.py', +# 'parallel-syncs' => '3', +# } +# 'resque' => { +# 'monitor' => 'resque 6379 4', +# 'down-after-milliseconds' => '10000', +# 'failover-timeout' => 180000, +# 'notification-script' => '/etc/redis/scripts/thescript.py', +# 'parallel-syncs' => '5', +# } +# } +# } +# +# === Authors +# +# Victor Garcia +# +# === Copyright +# +# Copyright 2013 Felipe Salum, unless otherwise noted. +# +class redis::sentinel ( + $conf_port = '26379', + $conf_daemonize = 'yes', + $sentinel_confs = [], + $service_enable = true, + $service_ensure = 'running', + $service_restart = true, + $manage_upstart_scripts = true, + $package_name = undef, +) { + + include redis::sentinel_params + + $conf_sentinel = $redis::sentinel_params::conf + $conf_sentinel_orig = "${conf_sentinel}.puppet" + $conf_logrotate = $redis::sentinel_params::conf_logrotate + $service = $redis::sentinel_params::service + $upstart_script = $redis::sentinel_params::upstart_script + + if $package_name { + $package = $package_name + }else{ + $package = $redis::sentinel_params::package + } + + if $conf_pidfile { + $conf_pidfile_real = $conf_pidfile + }else{ + $conf_pidfile_real = $::redis::sentinel_params::pidfile + } + if $conf_logfile { + $conf_logfile_real = $conf_logfile + }else{ + $conf_logfile_real = $::redis::sentinel_params::logfile + } + + package { 'redis': + ensure => $package_ensure, + name => $package, + } + + if $manage_upstart_scripts == true { + service { 'sentinel': + ensure => $service_ensure, + name => $service, + hasrestart => true, + hasstatus => true, + require => [ File[$conf_sentinel_orig], + File[$upstart_script] ], + provider => 'upstart' + } + } else { + service { 'sentinel': + ensure => $service_ensure, + name => $service, + enable => $service_enable, + hasrestart => true, + hasstatus => true, + require => [ Package['redis'], + File[$conf_sentinel_orig] ], + } + } + + # Sentinel rewrites the config file so, to avoid overriding it + # with the original content everytime puppet runs, this manages the + # "notify" event on an original file that triggers a copy to the good one + # only if it changed. + file { $conf_sentinel_orig: + content => template('redis/sentinel.conf.erb'), + owner => redis, + group => redis, + mode => '0644', + require => Package['redis'], + notify => Exec["cp ${conf_sentinel_orig} ${conf_sentinel}"], + } + + file { $conf_sentinel: + owner => redis, + group => redis, + require => Package['redis'], + } + + exec { "cp ${conf_sentinel_orig} ${conf_sentinel}": + path => '/bin:/usr/bin:/sbin:/usr/sbin', + refreshonly => true, + user => redis, + group => redis, + notify => Service['sentinel'], + require => File[$conf_sentinel], + } + + file { $conf_logrotate: + path => $conf_logrotate, + content => template('redis/logrotate.erb'), + owner => root, + group => root, + mode => '0644', + } + + if $service_restart == true { + # https://github.com/fsalum/puppet-redis/pull/28 + File[$conf_sentinel_orig] ~> Service['sentinel'] + } + + if $manage_upstart_scripts == true { + file { $upstart_script: + ensure => present, + content => template('redis/sentinel-init.conf.erb'), + } + } + +} diff --git a/deployment_scripts/puppet/modules/redis/manifests/sentinel_params.pp b/deployment_scripts/puppet/modules/redis/manifests/sentinel_params.pp new file mode 100644 index 0000000..5fc8bf7 --- /dev/null +++ b/deployment_scripts/puppet/modules/redis/manifests/sentinel_params.pp @@ -0,0 +1,42 @@ +# Class: redis::sentinel_params +# +# This class configures sentinel parameters for the puppet-redis module. +# +# Parameters: +# +# Actions: +# +# Requires: +# +# Sample Usage: +# +class redis::sentinel_params { + + case $::osfamily { + # TODO: add redhat support + #'redhat': { + # $package = 'redis' + # $service = 'redis-sentinel' + # $conf = '/etc/sentinel.conf' + # $conf_dir = undef + # $conf_logrotate = '/etc/logrotate.d/sentinel' + # $pidfile = '/var/run/redis/sentinel.pid' + # $logfile = '/var/log/redis/sentinel.log' + # $upstart_script = '/etc/init/redis-sentinel.conf' + #} + 'debian': { + $package = 'redis-server' + $service = 'redis-sentinel' + $conf_dir = '/etc/redis' + $conf = '/etc/redis/sentinel.conf' + $conf_logrotate = '/etc/logrotate.d/redis-sentinel' + $pidfile = '/var/run/redis/redis-sentinel.pid' + $logfile = '/var/log/redis/redis-sentinel.log' + $upstart_script = '/etc/init/redis-sentinel.conf' + } + default: { + fail("Unsupported osfamily: ${::osfamily}, module ${module_name} only support osfamily RedHat and Debian") + } + } + +} diff --git a/deployment_scripts/puppet/modules/redis/metadata.json b/deployment_scripts/puppet/modules/redis/metadata.json new file mode 100644 index 0000000..917416f --- /dev/null +++ b/deployment_scripts/puppet/modules/redis/metadata.json @@ -0,0 +1,51 @@ +{ + "name": "fsalum-redis", + "version": "1.0.3", + "author": "Felipe Salum", + "summary": "Puppet module for Redis Server", + "license": "Apache-2.0", + "source": "git://github.com/fsalum/puppet-redis.git", + "project_page": "https://github.com/fsalum/puppet-redis", + "issues_url": "https://github.com/fsalum/puppet-redis/issues", + "tags": ["redis", "memcached", "cache", "nosql"], + "operatingsystem_support": [ + { + "operatingsystem": "Centos", + "operatingsystemrelease": [ + "5", + "6" + ] + }, + { + "operatingsystem": "Ubuntu", + "operatingsystemrelease": [ + "12.04", + "13.10", + "14.04" + ] + }, + { + "operatingsystem":"Debian", + "operatingsystemrelease": [ + "6", + "7" + ] + } + ], + "requirements": [ + { + "name": "pe", + "version_requirement": "3.x" + }, + { + "name": "puppet", + "version_requirement": "3.x" + } + ], + "dependencies": [ + { + "name": "thias/sysctl", + "version_requirement": ">= 0.3.0" + } + ] +} diff --git a/deployment_scripts/puppet/modules/redis/spec/spec_helper.rb b/deployment_scripts/puppet/modules/redis/spec/spec_helper.rb new file mode 100644 index 0000000..5fda588 --- /dev/null +++ b/deployment_scripts/puppet/modules/redis/spec/spec_helper.rb @@ -0,0 +1,17 @@ +dir = File.expand_path(File.dirname(__FILE__)) +$LOAD_PATH.unshift File.join(dir, 'lib') + +require 'mocha' +require 'puppet' +require 'rspec' +require 'spec/autorun' + +Spec::Runner.configure do |config| + config.mock_with :mocha +end + +# We need this because the RAL uses 'should' as a method. This +# allows us the same behaviour but with a different method name. +class Object + alias :must :should +end diff --git a/deployment_scripts/puppet/modules/redis/templates/logrotate.erb b/deployment_scripts/puppet/modules/redis/templates/logrotate.erb new file mode 100644 index 0000000..b9ba724 --- /dev/null +++ b/deployment_scripts/puppet/modules/redis/templates/logrotate.erb @@ -0,0 +1,9 @@ +<%= @conf_logfile_real %> { + weekly + rotate 10 + copytruncate + delaycompress + compress + notifempty + missingok +} diff --git a/deployment_scripts/puppet/modules/redis/templates/redis.conf.erb b/deployment_scripts/puppet/modules/redis/templates/redis.conf.erb new file mode 100644 index 0000000..aec4179 --- /dev/null +++ b/deployment_scripts/puppet/modules/redis/templates/redis.conf.erb @@ -0,0 +1,2790 @@ +<%- if @redis_version_real == "2.2.x" -%> +# MANAGED BY PUPPET # +# +# Redis 2.2 configuration file example + +# Note on units: when memory size is needed, it is possible to specifiy +# it in the usual form of 1k 5GB 4M and so forth: +# +# 1k => 1000 bytes +# 1kb => 1024 bytes +# 1m => 1000000 bytes +# 1mb => 1024*1024 bytes +# 1g => 1000000000 bytes +# 1gb => 1024*1024*1024 bytes +# +# units are case insensitive so 1GB 1Gb 1gB are all the same. + +# By default Redis does not run as a daemon. Use 'yes' if you need it. +# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. +<%- if @conf_daemonize -%> +daemonize <%= @conf_daemonize %> +<%- end -%> + +# When running daemonized, Redis writes a pid file in /var/run/redis.pid by +# default. You can specify a custom pid file location here. +pidfile <%= @conf_pidfile_real %> + +# Accept connections on the specified port, default is 6379. +# If port 0 is specified Redis will not listen on a TCP socket. +<%- if @conf_port -%> +port <%= @conf_port %> +<%- end -%> + +# If you want you can bind a single interface, if the bind option is not +# specified all the interfaces will listen for incoming connections. +# +# bind 127.0.0.1 +<%- if @conf_bind -%> +bind <%= @conf_bind %> +<%- end -%> + +# Specify the path for the unix socket that will be used to listen for +# incoming connections. There is no default, so Redis will not listen +# on a unix socket when not specified. +# +# unixsocket /tmp/redis.sock +<%- if @conf_unixsocket -%> +unixsocket <%= @conf_unixsocket %> +<%- end -%> + +# Close the connection after a client is idle for N seconds (0 to disable) +<%- if @conf_timeout -%> +timeout <%= @conf_timeout %> +<%- end -%> + +# Set server verbosity to 'debug' +# it can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +<%- if @conf_loglevel -%> +loglevel <%= @conf_loglevel %> +<%- end -%> + +# Specify the log file name. Also 'stdout' can be used to force +# Redis to log on the standard output. Note that if you use standard +# output for logging but daemonize, logs will be sent to /dev/null +logfile <%= @conf_logfile_real %> + +# To enable logging to the system logger, just set 'syslog-enabled' to yes, +# and optionally update the other syslog parameters to suit your needs. +# syslog-enabled no +<%- if @conf_syslog_enabled -%> +syslog-enabled <%= @conf_syslog_enabled %> +<%- end -%> + +# Specify the syslog identity. +# syslog-ident redis +<%- if @conf_syslog_ident -%> +syslog-ident <%= @conf_syslog_ident %> +<%- end -%> + +# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. +# syslog-facility local0 +<%- if @conf_syslog_facility -%> +syslog-facility <%= @conf_syslog_facility %> +<%- end -%> + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +<%- if @conf_databases -%> +databases <%= @conf_databases %> +<%- end -%> + +################################ SNAPSHOTTING ################################# +# +# Save the DB on disk: +# +# save +# +# Will save the DB if both the given number of seconds and the given +# number of write operations against the DB occurred. +# +# In the example below the behaviour will be to save: +# after 900 sec (15 min) if at least 1 key changed +# after 300 sec (5 min) if at least 10 keys changed +# after 60 sec if at least 10000 keys changed +# +# Note: you can disable saving at all commenting all the "save" lines. +<%- if @conf_nosave -%> +# do not persist to disk +<%- else -%> +<%- @conf_save.sort.each do |seconds, changes| -%> +save <%= seconds -%> <%= changes -%> <%= "\n" -%> +<%- end -%> +<%- end -%> + +# Compress string objects using LZF when dump .rdb databases? +# For default that's set to 'yes' as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +<%- if @conf_rdbcompression -%> +rdbcompression <%= @conf_rdbcompression %> +<%- end -%> + +# The filename where to dump the DB +<%- if @conf_dbfilename -%> +dbfilename <%= @conf_dbfilename %> +<%- end -%> + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# Also the Append Only File will be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +<%- if @conf_dir -%> +dir <%= @conf_dir %> +<%- end -%> + +################################# REPLICATION ################################# + +# Master-Slave replication. Use slaveof to make a Redis instance a copy of +# another Redis server. Note that the configuration is local to the slave +# so for example it is possible to configure the slave to save the DB with a +# different interval, or to listen to another port, and so on. +# +# slaveof +<%- if @conf_slaveof -%> +slaveof <%= @conf_slaveof %> +<%- end -%> + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the slave to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the slave request. +# +# masterauth +<%- if @conf_masterauth -%> +masterauth <%= @conf_masterauth %> +<%- end -%> + +# When a slave lost the connection with the master, or when the replication +# is still in progress, the slave can act in two different ways: +# +# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will +# still reply to client requests, possibly with out of data data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) if slave-serve-stale data is set to 'no' the slave will reply with +# an error "SYNC with master in progress" to all the kind of commands +# but to INFO and SLAVEOF. +# +<%- if @conf_slave_serve_stale_data -%> +slave-serve-stale-data <%= @conf_slave_serve_stale_data %> +<%- end -%> + +################################## SECURITY ################################### + +# Require clients to issue AUTH before processing any other +# commands. This might be useful in environments in which you do not trust +# others with access to the host running redis-server. +# +# This should stay commented out for backward compatibility and because most +# people do not need auth (e.g. they run their own servers). +# +# Warning: since Redis is pretty fast an outside user can try up to +# 150k passwords per second against a good box. This means that you should +# use a very strong password otherwise it will be very easy to break. +# +# requirepass foobared +<%- if @conf_requirepass -%> +requirepass <%= @conf_requirepass %> +<%- end -%> + +# Command renaming. +# +# It is possilbe to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# of hard to guess so that it will be still available for internal-use +# tools but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possilbe to completely kill a command renaming it into +# an empty string: +# +# rename-command CONFIG "" + +################################### LIMITS #################################### + +# Set the max number of connected clients at the same time. By default there +# is no limit, and it's up to the number of file descriptors the Redis process +# is able to open. The special value '0' means no limits. +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# maxclients 128 +<%- if @conf_maxclients -%> +maxclients <%= @conf_maxclients %> +<%- end -%> + +# Don't use more memory than the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys with an +# EXPIRE set. It will try to start freeing keys that are going to expire +# in little time and preserve keys with a longer time to live. +# Redis will also try to remove objects from free lists if possible. +# +# If all this fails, Redis will start to reply with errors to commands +# that will use more memory, like SET, LPUSH, and so on, and will continue +# to reply to most read-only commands like GET. +# +# WARNING: maxmemory can be a good idea mainly if you want to use Redis as a +# 'state' server or cache, not as a real DB. When Redis is used as a real +# database the memory usage will grow over the weeks, it will be obvious if +# it is going to use too much memory in the long run, and you'll have the time +# to upgrade. With maxmemory after the limit is reached you'll start to get +# errors for write operations, and this may even lead to DB inconsistency. +# +# maxmemory +<%- if @conf_maxmemory -%> +maxmemory <%= @conf_maxmemory %> +<%- end -%> + +# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory +# is reached? You can select among five behavior: +# +# volatile-lru -> remove the key with an expire set using an LRU algorithm +# allkeys-lru -> remove any key accordingly to the LRU algorithm +# volatile-random -> remove a random key with an expire set +# allkeys->random -> remove a random key, any key +# volatile-ttl -> remove the key with the nearest expire time (minor TTL) +# noeviction -> don't expire at all, just return an error on write operations +# +# Note: with all the kind of policies, Redis will return an error on write +# operations, when there are not suitable keys for eviction. +# +# At the date of writing this commands are: set setnx setex append +# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd +# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby +# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby +# getset mset msetnx exec sort +# +# The default is: +# +# maxmemory-policy volatile-lru +<%- if @conf_maxmemory_policy -%> +maxmemory-policy <%= @conf_maxmemory_policy %> +<%- end -%> + +# LRU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can select as well the sample +# size to check. For instance for default Redis will check three keys and +# pick the one that was used less recently, you can change the sample size +# using the following configuration directive. +# +# maxmemory-samples 3 +<%- if @conf_maxmemory_samples -%> +maxmemory-samples <%= @conf_maxmemory_samples %> +<%- end -%> + +############################## APPEND ONLY MODE ############################### + +# By default Redis asynchronously dumps the dataset on disk. If you can live +# with the idea that the latest records will be lost if something like a crash +# happens this is the preferred way to run Redis. If instead you care a lot +# about your data and don't want to that a single record can get lost you should +# enable the append only mode: when this mode is enabled Redis will append +# every write operation received in the file appendonly.aof. This file will +# be read on startup in order to rebuild the full dataset in memory. +# +# Note that you can have both the async dumps and the append only file if you +# like (you have to comment the "save" statements above to disable the dumps). +# Still if append only mode is enabled Redis will load the data from the +# log file at startup ignoring the dump.rdb file. +# +# IMPORTANT: Check the BGREWRITEAOF to check how to rewrite the append +# log file in background when it gets too big. + +<%- if @conf_appendonly -%> +appendonly <%= @conf_appendonly %> +<%- end -%> + +# The name of the append only file (default: "appendonly.aof") +# appendfilename appendonly.aof +<%- if @conf_appendfilename -%> +appendfilename <%= @conf_appendfilename %> +<%- end -%> + +# The fsync() call tells the Operating System to actually write data on disk +# instead to wait for more data in the output buffer. Some OS will really flush +# data on disk, some other OS will just try to do it ASAP. +# +# Redis supports three different modes: +# +# no: don't fsync, just let the OS flush the data when it wants. Faster. +# always: fsync after every write to the append only log . Slow, Safest. +# everysec: fsync only if one second passed since the last fsync. Compromise. +# +# The default is "everysec" that's usually the right compromise between +# speed and data safety. It's up to you to understand if you can relax this to +# "no" that will will let the operating system flush the output buffer when +# it wants, for better performances (but if you can live with the idea of +# some data loss consider the default persistence mode that's snapshotting), +# or on the contrary, use "always" that's very slow but a bit safer than +# everysec. +# +# If unsure, use "everysec". + +# appendfsync always +<%- if @conf_appendfsync -%> +appendfsync <%= @conf_appendfsync %> +<%- end -%> +# appendfsync no + +# When the AOF fsync policy is set to always or everysec, and a background +# saving process (a background save or AOF log background rewriting) is +# performing a lot of I/O against the disk, in some Linux configurations +# Redis may block too long on the fsync() call. Note that there is no fix for +# this currently, as even performing fsync in a different thread will block +# our synchronous write(2) call. +# +# In order to mitigate this problem it's possible to use the following option +# that will prevent fsync() from being called in the main process while a +# BGSAVE or BGREWRITEAOF is in progress. +# +# This means that while another child is saving the durability of Redis is +# the same as "appendfsync none", that in pratical terms means that it is +# possible to lost up to 30 seconds of log in the worst scenario (with the +# default Linux settings). +# +# If you have latency problems turn this to "yes". Otherwise leave it as +# "no" that is the safest pick from the point of view of durability. +<%- if @conf_no_appendfsync_on_rewrite -%> +no-appendfsync-on-rewrite <%= @conf_no_appendfsync_on_rewrite %> +<%- end -%> + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +<%- if @conf_slowlog_log_slower_than -%> +slowlog-log-slower-than <%= @conf_slowlog_log_slower_than %> +<%- end -%> + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +<%- if @conf_slowlog_max_len -%> +slowlog-max-len <%= @conf_slowlog_max_len %> +<%- end -%> + +################################ VIRTUAL MEMORY ############################### + +### WARNING! Virtual Memory is deprecated in Redis 2.4 +### The use of Virtual Memory is strongly discouraged. + +# Virtual Memory allows Redis to work with datasets bigger than the actual +# amount of RAM needed to hold the whole dataset in memory. +# In order to do so very used keys are taken in memory while the other keys +# are swapped into a swap file, similarly to what operating systems do +# with memory pages. +# +# To enable VM just set 'vm-enabled' to yes, and set the following three +# VM parameters accordingly to your needs. + +<%- if @conf_vm_enabled -%> +vm-enabled <%= @conf_vm_enabled %> +<%- end -%> +# vm-enabled yes + +# This is the path of the Redis swap file. As you can guess, swap files +# can't be shared by different Redis instances, so make sure to use a swap +# file for every redis process you are running. Redis will complain if the +# swap file is already in use. +# +# The best kind of storage for the Redis swap file (that's accessed at random) +# is a Solid State Disk (SSD). +# +# *** WARNING *** if you are using a shared hosting the default of putting +# the swap file under /tmp is not secure. Create a dir with access granted +# only to Redis user and configure Redis to create the swap file there. +<%- if @conf_vm_swap_file -%> +vm-swap-file <%= @conf_vm_swap_file %> +<%- end -%> + +# vm-max-memory configures the VM to use at max the specified amount of +# RAM. Everything that deos not fit will be swapped on disk *if* possible, that +# is, if there is still enough contiguous space in the swap file. +# +# With vm-max-memory 0 the system will swap everything it can. Not a good +# default, just specify the max amount of RAM you can in bytes, but it's +# better to leave some margin. For instance specify an amount of RAM +# that's more or less between 60 and 80% of your free RAM. +<%- if @conf_vm_max_memory -%> +vm-max-memory <%= @conf_vm_max_memory %> +<%- end -%> + +# Redis swap files is split into pages. An object can be saved using multiple +# contiguous pages, but pages can't be shared between different objects. +# So if your page is too big, small objects swapped out on disk will waste +# a lot of space. If you page is too small, there is less space in the swap +# file (assuming you configured the same number of total swap file pages). +# +# If you use a lot of small objects, use a page size of 64 or 32 bytes. +# If you use a lot of big objects, use a bigger page size. +# If unsure, use the default :) +<%- if @conf_vm_page_size -%> +vm-page-size <%= @conf_vm_page_size %> +<%- end -%> + +# Number of total memory pages in the swap file. +# Given that the page table (a bitmap of free/used pages) is taken in memory, +# every 8 pages on disk will consume 1 byte of RAM. +# +# The total swap size is vm-page-size * vm-pages +# +# With the default of 32-bytes memory pages and 134217728 pages Redis will +# use a 4 GB swap file, that will use 16 MB of RAM for the page table. +# +# It's better to use the smallest acceptable value for your application, +# but the default is large in order to work in most conditions. +<%- if @conf_vm_pages -%> +vm-pages <%= @conf_vm_pages %> +<%- end -%> + +# Max number of VM I/O threads running at the same time. +# This threads are used to read/write data from/to swap file, since they +# also encode and decode objects from disk to memory or the reverse, a bigger +# number of threads can help with big objects even if they can't help with +# I/O itself as the physical device may not be able to couple with many +# reads/writes operations at the same time. +# +# The special value of 0 turn off threaded I/O and enables the blocking +# Virtual Memory implementation. +<%- if @conf_vm_max_threads -%> +vm-max-threads <%= @conf_vm_max_threads %> +<%- end -%> + +############################### ADVANCED CONFIG ############################### + +# Hashes are encoded in a special way (much more memory efficient) when they +# have at max a given numer of elements, and the biggest element does not +# exceed a given threshold. You can configure this limits with the following +# configuration directives. +<%- if @conf_hash_max_zipmap_entries -%> +hash-max-zipmap-entries <%= @conf_hash_max_zipmap_entries %> +<%- end -%> +<%- if @conf_hash_max_zipmap_value -%> +hash-max-zipmap-value <%= @conf_hash_max_zipmap_value %> +<%- end -%> + +# Similarly to hashes, small lists are also encoded in a special way in order +# to save a lot of space. The special representation is only used when +# you are under the following limits: +<%- if @conf_list_max_ziplist_entries -%> +list-max-ziplist-entries <%= @conf_list_max_ziplist_entries %> +<%- end -%> +<%- if @conf_list_max_ziplist_value -%> +list-max-ziplist-value <%= @conf_list_max_ziplist_value %> +<%- end -%> + +# Sets have a special encoding in just one case: when a set is composed +# of just strings that happens to be integers in radix 10 in the range +# of 64 bit signed integers. +# The following configuration setting sets the limit in the size of the +# set in order to use this special memory saving encoding. +<%- if @conf_set_max_intset_entries -%> +set-max-intset-entries <%= @conf_set_max_intset_entries %> +<%- end -%> + +# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in +# order to help rehashing the main Redis hash table (the one mapping top-level +# keys to values). The hash table implementation redis uses (see dict.c) +# performs a lazy rehashing: the more operation you run into an hash table +# that is rhashing, the more rehashing "steps" are performed, so if the +# server is idle the rehashing is never complete and some more memory is used +# by the hash table. +# +# The default is to use this millisecond 10 times every second in order to +# active rehashing the main dictionaries, freeing memory when possible. +# +# If unsure: +# use "activerehashing no" if you have hard latency requirements and it is +# not a good thing in your environment that Redis can reply form time to time +# to queries with 2 milliseconds delay. +# +# use "activerehashing yes" if you don't have such hard requirements but +# want to free memory asap when possible. +<%- if @conf_activerehashing -%> +activerehashing <%= @conf_activerehashing %> +<%- end -%> + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all redis server but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# include /path/to/local.conf +# include /path/to/other.conf +<%- if @conf_include -%> +<%- @conf_include.each do |include| -%> +include <%= include %><%= "\n" -%> +<%- end -%> +<%- end -%> +<%- end -%> +<%- if @redis_version_real == "2.4.x" -%> +# MANAGED BY PUPPET # +# +# Redis 2.4 configuration file example + +# Note on units: when memory size is needed, it is possible to specifiy +# it in the usual form of 1k 5GB 4M and so forth: +# +# 1k => 1000 bytes +# 1kb => 1024 bytes +# 1m => 1000000 bytes +# 1mb => 1024*1024 bytes +# 1g => 1000000000 bytes +# 1gb => 1024*1024*1024 bytes +# +# units are case insensitive so 1GB 1Gb 1gB are all the same. + +# By default Redis does not run as a daemon. Use 'yes' if you need it. +# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. +<%- if @conf_daemonize -%> +daemonize <%= @conf_daemonize %> +<%- end -%> + +# When running daemonized, Redis writes a pid file in /var/run/redis.pid by +# default. You can specify a custom pid file location here. +pidfile <%= @conf_pidfile_real %> + +# Accept connections on the specified port, default is 6379. +# If port 0 is specified Redis will not listen on a TCP socket. +<%- if @conf_port -%> +port <%= @conf_port %> +<%- end -%> + +# If you want you can bind a single interface, if the bind option is not +# specified all the interfaces will listen for incoming connections. +# +# bind 127.0.0.1 +<%- if @conf_bind -%> +bind <%= @conf_bind %> +<%- end -%> + +# Specify the path for the unix socket that will be used to listen for +# incoming connections. There is no default, so Redis will not listen +# on a unix socket when not specified. +# +# unixsocket /tmp/redis.sock +<%- if @conf_unixsocket -%> +unixsocket <%= @conf_unixsocket %> +<%- end -%> +# unixsocketperm 755 +<%- if @conf_unixsocketperm -%> +unixsocketperm <%= @conf_unixsocketperm %> +<%- end -%> + +# Close the connection after a client is idle for N seconds (0 to disable) +<%- if @conf_timeout -%> +timeout <%= @conf_timeout %> +<%- end -%> + +# Set server verbosity to 'debug' +# it can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +<%- if @conf_loglevel -%> +loglevel <%= @conf_loglevel %> +<%- end -%> + +# Specify the log file name. Also 'stdout' can be used to force +# Redis to log on the standard output. Note that if you use standard +# output for logging but daemonize, logs will be sent to /dev/null +logfile <%= @conf_logfile_real %> + +# To enable logging to the system logger, just set 'syslog-enabled' to yes, +# and optionally update the other syslog parameters to suit your needs. +# syslog-enabled no +<%- if @conf_syslog_enabled -%> +syslog-enabled <%= @conf_syslog_enabled %> +<%- end -%> + +# Specify the syslog identity. +# syslog-ident redis +<%- if @conf_syslog_ident -%> +syslog-ident <%= @conf_syslog_ident %> +<%- end -%> + +# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. +# syslog-facility local0 +<%- if @conf_syslog_facility -%> +syslog-facility <%= @conf_syslog_facility %> +<%- end -%> + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +<%- if @conf_databases -%> +databases <%= @conf_databases %> +<%- end -%> + +################################ SNAPSHOTTING ################################# +# +# Save the DB on disk: +# +# save +# +# Will save the DB if both the given number of seconds and the given +# number of write operations against the DB occurred. +# +# In the example below the behaviour will be to save: +# after 900 sec (15 min) if at least 1 key changed +# after 300 sec (5 min) if at least 10 keys changed +# after 60 sec if at least 10000 keys changed +# +# Note: you can disable saving at all commenting all the "save" lines. +<%- if @conf_nosave -%> +# do not persist to disk +<%- else -%> +<%- @conf_save.sort.each do |seconds, changes| -%> +save <%= seconds -%> <%= changes -%> <%= "\n" -%> +<%- end -%> +<%- end -%> + +# Compress string objects using LZF when dump .rdb databases? +# For default that's set to 'yes' as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +<%- if @conf_rdbcompression -%> +rdbcompression <%= @conf_rdbcompression %> +<%- end -%> + +# The filename where to dump the DB +<%- if @conf_dbfilename -%> +dbfilename <%= @conf_dbfilename %> +<%- end -%> + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# Also the Append Only File will be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +<%- if @conf_dir -%> +dir <%= @conf_dir %> +<%- end -%> + +################################# REPLICATION ################################# + +# Master-Slave replication. Use slaveof to make a Redis instance a copy of +# another Redis server. Note that the configuration is local to the slave +# so for example it is possible to configure the slave to save the DB with a +# different interval, or to listen to another port, and so on. +# +# slaveof +<%- if @conf_slaveof -%> +slaveof <%= @conf_slaveof %> +<%- end -%> + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the slave to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the slave request. +# +# masterauth +<%- if @conf_masterauth -%> +masterauth <%= @conf_masterauth %> +<%- end -%> + +# When a slave lost the connection with the master, or when the replication +# is still in progress, the slave can act in two different ways: +# +# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will +# still reply to client requests, possibly with out of data data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) if slave-serve-stale data is set to 'no' the slave will reply with +# an error "SYNC with master in progress" to all the kind of commands +# but to INFO and SLAVEOF. +# +<%- if @conf_slave_serve_stale_data -%> +slave-serve-stale-data <%= @conf_slave_serve_stale_data %> +<%- end -%> + +# Slaves send PINGs to server in a predefined interval. It's possible to change +# this interval with the repl_ping_slave_period option. The default value is 10 +# seconds. +# +# repl-ping-slave-period 10 +<%- if @conf_repl_ping_slave_period -%> +repl-ping-slave-period <%= @conf_repl_ping_slave_period %> +<%- end -%> + +# The following option sets a timeout for both Bulk transfer I/O timeout and +# master data or ping response timeout. The default value is 60 seconds. +# +# It is important to make sure that this value is greater than the value +# specified for repl-ping-slave-period otherwise a timeout will be detected +# every time there is low traffic between the master and the slave. +# +# repl-timeout 60 +<%- if @conf_repl_timeout -%> +repl-timeout <%= @conf_repl_timeout %> +<%- end -%> + +# The slave priority is an integer number published by Redis in the INFO output. +# It is used by Redis Sentinel in order to select a slave to promote into a +# master if the master is no longer working correctly. +# +# A slave with a low priority number is considered better for promotion, so +# for instance if there are three slaves with priority 10, 100, 25 Sentinel will +# pick the one wtih priority 10, that is the lowest. +# +# However a special priority of 0 marks the slave as not able to perform the +# role of master, so a slave with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +<%- if @conf_slave_priority -%> +slave-priority <%= @conf_slave_priority %> +<%- end -%> + +################################## SECURITY ################################### + +# Require clients to issue AUTH before processing any other +# commands. This might be useful in environments in which you do not trust +# others with access to the host running redis-server. +# +# This should stay commented out for backward compatibility and because most +# people do not need auth (e.g. they run their own servers). +# +# Warning: since Redis is pretty fast an outside user can try up to +# 150k passwords per second against a good box. This means that you should +# use a very strong password otherwise it will be very easy to break. +# +# requirepass foobared +<%- if @conf_requirepass -%> +requirepass <%= @conf_requirepass %> +<%- end -%> + +# Command renaming. +# +# It is possilbe to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# of hard to guess so that it will be still available for internal-use +# tools but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possilbe to completely kill a command renaming it into +# an empty string: +# +# rename-command CONFIG "" + +################################### LIMITS #################################### + +# Set the max number of connected clients at the same time. By default there +# is no limit, and it's up to the number of file descriptors the Redis process +# is able to open. The special value '0' means no limits. +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# maxclients 128 +<%- if @conf_maxclients -%> +maxclients <%= @conf_maxclients %> +<%- end -%> + +# Don't use more memory than the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# accordingly to the eviction policy selected (see maxmemmory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +# This option is usually useful when using Redis as an LRU cache, or to set +# an hard memory limit for an instance (using the 'noeviction' policy). +# +# WARNING: If you have slaves attached to an instance with maxmemory on, +# the size of the output buffers needed to feed the slaves are subtracted +# from the used memory count, so that network problems / resyncs will +# not trigger a loop where keys are evicted, and in turn the output +# buffer of slaves is full with DELs of keys evicted triggering the deletion +# of more keys, and so forth until the database is completely emptied. +# +# In short... if you have slaves attached it is suggested that you set a lower +# limit for maxmemory so that there is some free RAM on the system for slave +# output buffers (but this is not needed if the policy is 'noeviction'). +# +# maxmemory +<%- if @conf_maxmemory -%> +maxmemory <%= @conf_maxmemory %> +<%- end -%> + +# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory +# is reached? You can select among five behavior: +# +# volatile-lru -> remove the key with an expire set using an LRU algorithm +# allkeys-lru -> remove any key accordingly to the LRU algorithm +# volatile-random -> remove a random key with an expire set +# allkeys->random -> remove a random key, any key +# volatile-ttl -> remove the key with the nearest expire time (minor TTL) +# noeviction -> don't expire at all, just return an error on write operations +# +# Note: with all the kind of policies, Redis will return an error on write +# operations, when there are not suitable keys for eviction. +# +# At the date of writing this commands are: set setnx setex append +# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd +# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby +# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby +# getset mset msetnx exec sort +# +# The default is: +# +# maxmemory-policy volatile-lru +<%- if @conf_maxmemory_policy -%> +maxmemory-policy <%= @conf_maxmemory_policy %> +<%- end -%> + +# LRU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can select as well the sample +# size to check. For instance for default Redis will check three keys and +# pick the one that was used less recently, you can change the sample size +# using the following configuration directive. +# +# maxmemory-samples 3 +<%- if @conf_maxmemory_samples -%> +maxmemory-samples <%= @conf_maxmemory_samples %> +<%- end -%> + +############################## APPEND ONLY MODE ############################### + +# By default Redis asynchronously dumps the dataset on disk. If you can live +# with the idea that the latest records will be lost if something like a crash +# happens this is the preferred way to run Redis. If instead you care a lot +# about your data and don't want to that a single record can get lost you should +# enable the append only mode: when this mode is enabled Redis will append +# every write operation received in the file appendonly.aof. This file will +# be read on startup in order to rebuild the full dataset in memory. +# +# Note that you can have both the async dumps and the append only file if you +# like (you have to comment the "save" statements above to disable the dumps). +# Still if append only mode is enabled Redis will load the data from the +# log file at startup ignoring the dump.rdb file. +# +# IMPORTANT: Check the BGREWRITEAOF to check how to rewrite the append +# log file in background when it gets too big. + +<%- if @conf_appendonly -%> +appendonly <%= @conf_appendonly %> +<%- end -%> + +# The name of the append only file (default: "appendonly.aof") +# appendfilename appendonly.aof +<%- if @conf_appendfilename -%> +appendfilename <%= @conf_appendfilename %> +<%- end -%> + +# The fsync() call tells the Operating System to actually write data on disk +# instead to wait for more data in the output buffer. Some OS will really flush +# data on disk, some other OS will just try to do it ASAP. +# +# Redis supports three different modes: +# +# no: don't fsync, just let the OS flush the data when it wants. Faster. +# always: fsync after every write to the append only log . Slow, Safest. +# everysec: fsync only if one second passed since the last fsync. Compromise. +# +# The default is "everysec" that's usually the right compromise between +# speed and data safety. It's up to you to understand if you can relax this to +# "no" that will will let the operating system flush the output buffer when +# it wants, for better performances (but if you can live with the idea of +# some data loss consider the default persistence mode that's snapshotting), +# or on the contrary, use "always" that's very slow but a bit safer than +# everysec. +# +# If unsure, use "everysec". + +# appendfsync always +<%- if @conf_appendfsync -%> +appendfsync <%= @conf_appendfsync %> +<%- end -%> +# appendfsync no + +# When the AOF fsync policy is set to always or everysec, and a background +# saving process (a background save or AOF log background rewriting) is +# performing a lot of I/O against the disk, in some Linux configurations +# Redis may block too long on the fsync() call. Note that there is no fix for +# this currently, as even performing fsync in a different thread will block +# our synchronous write(2) call. +# +# In order to mitigate this problem it's possible to use the following option +# that will prevent fsync() from being called in the main process while a +# BGSAVE or BGREWRITEAOF is in progress. +# +# This means that while another child is saving the durability of Redis is +# the same as "appendfsync none", that in pratical terms means that it is +# possible to lost up to 30 seconds of log in the worst scenario (with the +# default Linux settings). +# +# If you have latency problems turn this to "yes". Otherwise leave it as +# "no" that is the safest pick from the point of view of durability. +<%- if @conf_no_appendfsync_on_rewrite -%> +no-appendfsync-on-rewrite <%= @conf_no_appendfsync_on_rewrite %> +<%- end -%> + +# Automatic rewrite of the append only file. +# Redis is able to automatically rewrite the log file implicitly calling +# BGREWRITEAOF when the AOF log size will growth by the specified percentage. +# +# This is how it works: Redis remembers the size of the AOF file after the +# latest rewrite (or if no rewrite happened since the restart, the size of +# the AOF at startup is used). +# +# This base size is compared to the current size. If the current size is +# bigger than the specified percentage, the rewrite is triggered. Also +# you need to specify a minimal size for the AOF file to be rewritten, this +# is useful to avoid rewriting the AOF file even if the percentage increase +# is reached but it is still pretty small. +# +# Specify a precentage of zero in order to disable the automatic AOF +# rewrite feature. + +<%- if @conf_auto_aof_rewrite_percentage -%> +auto-aof-rewrite-percentage <%= @conf_auto_aof_rewrite_percentage %> +<%- end -%> +<%- if @conf_auto_aof_rewrite_min_size -%> +auto-aof-rewrite-min-size <%= @conf_auto_aof_rewrite_min_size %> +<%- end -%> + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +<%- if @conf_slowlog_log_slower_than -%> +slowlog-log-slower-than <%= @conf_slowlog_log_slower_than %> +<%- end -%> + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +<%- if @conf_slowlog_max_len -%> +slowlog-max-len <%= @conf_slowlog_max_len %> +<%- end -%> + +################################ VIRTUAL MEMORY ############################### + +### WARNING! Virtual Memory is deprecated in Redis 2.4 +### The use of Virtual Memory is strongly discouraged. + +# Virtual Memory allows Redis to work with datasets bigger than the actual +# amount of RAM needed to hold the whole dataset in memory. +# In order to do so very used keys are taken in memory while the other keys +# are swapped into a swap file, similarly to what operating systems do +# with memory pages. +# +# To enable VM just set 'vm-enabled' to yes, and set the following three +# VM parameters accordingly to your needs. + +<%- if @conf_vm_enabled -%> +vm-enabled <%= @conf_vm_enabled %> +<%- end -%> +# vm-enabled yes + +# This is the path of the Redis swap file. As you can guess, swap files +# can't be shared by different Redis instances, so make sure to use a swap +# file for every redis process you are running. Redis will complain if the +# swap file is already in use. +# +# The best kind of storage for the Redis swap file (that's accessed at random) +# is a Solid State Disk (SSD). +# +# *** WARNING *** if you are using a shared hosting the default of putting +# the swap file under /tmp is not secure. Create a dir with access granted +# only to Redis user and configure Redis to create the swap file there. +<%- if @conf_vm_swap_file -%> +vm-swap-file <%= @conf_vm_swap_file %> +<%- end -%> + +# vm-max-memory configures the VM to use at max the specified amount of +# RAM. Everything that deos not fit will be swapped on disk *if* possible, that +# is, if there is still enough contiguous space in the swap file. +# +# With vm-max-memory 0 the system will swap everything it can. Not a good +# default, just specify the max amount of RAM you can in bytes, but it's +# better to leave some margin. For instance specify an amount of RAM +# that's more or less between 60 and 80% of your free RAM. +<%- if @conf_vm_max_memory -%> +vm-max-memory <%= @conf_vm_max_memory %> +<%- end -%> + +# Redis swap files is split into pages. An object can be saved using multiple +# contiguous pages, but pages can't be shared between different objects. +# So if your page is too big, small objects swapped out on disk will waste +# a lot of space. If you page is too small, there is less space in the swap +# file (assuming you configured the same number of total swap file pages). +# +# If you use a lot of small objects, use a page size of 64 or 32 bytes. +# If you use a lot of big objects, use a bigger page size. +# If unsure, use the default :) +<%- if @conf_vm_page_size -%> +vm-page-size <%= @conf_vm_page_size %> +<%- end -%> + +# Number of total memory pages in the swap file. +# Given that the page table (a bitmap of free/used pages) is taken in memory, +# every 8 pages on disk will consume 1 byte of RAM. +# +# The total swap size is vm-page-size * vm-pages +# +# With the default of 32-bytes memory pages and 134217728 pages Redis will +# use a 4 GB swap file, that will use 16 MB of RAM for the page table. +# +# It's better to use the smallest acceptable value for your application, +# but the default is large in order to work in most conditions. +<%- if @conf_vm_pages -%> +vm-pages <%= @conf_vm_pages %> +<%- end -%> + +# Max number of VM I/O threads running at the same time. +# This threads are used to read/write data from/to swap file, since they +# also encode and decode objects from disk to memory or the reverse, a bigger +# number of threads can help with big objects even if they can't help with +# I/O itself as the physical device may not be able to couple with many +# reads/writes operations at the same time. +# +# The special value of 0 turn off threaded I/O and enables the blocking +# Virtual Memory implementation. +<%- if @conf_vm_max_threads -%> +vm-max-threads <%= @conf_vm_max_threads %> +<%- end -%> + +############################### ADVANCED CONFIG ############################### + +# Hashes are encoded in a special way (much more memory efficient) when they +# have at max a given numer of elements, and the biggest element does not +# exceed a given threshold. You can configure this limits with the following +# configuration directives. +<%- if @conf_hash_max_zipmap_entries -%> +hash-max-zipmap-entries <%= @conf_hash_max_zipmap_entries %> +<%- end -%> +<%- if @conf_hash_max_zipmap_value -%> +hash-max-zipmap-value <%= @conf_hash_max_zipmap_value %> +<%- end -%> + +# Similarly to hashes, small lists are also encoded in a special way in order +# to save a lot of space. The special representation is only used when +# you are under the following limits: +<%- if @conf_list_max_ziplist_entries -%> +list-max-ziplist-entries <%= @conf_list_max_ziplist_entries %> +<%- end -%> +<%- if @conf_list_max_ziplist_value -%> +list-max-ziplist-value <%= @conf_list_max_ziplist_value %> +<%- end -%> + +# Sets have a special encoding in just one case: when a set is composed +# of just strings that happens to be integers in radix 10 in the range +# of 64 bit signed integers. +# The following configuration setting sets the limit in the size of the +# set in order to use this special memory saving encoding. +<%- if @conf_set_max_intset_entries -%> +set-max-intset-entries <%= @conf_set_max_intset_entries %> +<%- end -%> + +# Similarly to hashes and lists, sorted sets are also specially encoded in +# order to save a lot of space. This encoding is only used when the length and +# elements of a sorted set are below the following limits: +<%- if @conf_zset_max_ziplist_entries -%> +zset-max-ziplist-entries <%= @conf_zset_max_ziplist_entries %> +<%- end -%> +<%- if @conf_zset_max_ziplist_value -%> +zset-max-ziplist-value <%= @conf_zset_max_ziplist_value %> +<%- end -%> + +# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in +# order to help rehashing the main Redis hash table (the one mapping top-level +# keys to values). The hash table implementation redis uses (see dict.c) +# performs a lazy rehashing: the more operation you run into an hash table +# that is rhashing, the more rehashing "steps" are performed, so if the +# server is idle the rehashing is never complete and some more memory is used +# by the hash table. +# +# The default is to use this millisecond 10 times every second in order to +# active rehashing the main dictionaries, freeing memory when possible. +# +# If unsure: +# use "activerehashing no" if you have hard latency requirements and it is +# not a good thing in your environment that Redis can reply form time to time +# to queries with 2 milliseconds delay. +# +# use "activerehashing yes" if you don't have such hard requirements but +# want to free memory asap when possible. +<%- if @conf_activerehashing -%> +activerehashing <%= @conf_activerehashing %> +<%- end -%> + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all redis server but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# include /path/to/local.conf +# include /path/to/other.conf +<%- if @conf_include -%> +<%- @conf_include.each do |include| -%> +include <%= include %><%= "\n" -%> +<%- end -%> +<%- end -%> +<%- end -%> +<%- if @redis_version_real == "2.6.x" -%> +# MANAGED BY PUPPET # +# +# Redis 2.6 configuration file example + +# Note on units: when memory size is needed, it is possible to specify +# it in the usual form of 1k 5GB 4M and so forth: +# +# 1k => 1000 bytes +# 1kb => 1024 bytes +# 1m => 1000000 bytes +# 1mb => 1024*1024 bytes +# 1g => 1000000000 bytes +# 1gb => 1024*1024*1024 bytes +# +# units are case insensitive so 1GB 1Gb 1gB are all the same. + +# By default Redis does not run as a daemon. Use 'yes' if you need it. +# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. +<%- if @conf_daemonize -%> +daemonize <%= @conf_daemonize %> +<%- end -%> + +# When running daemonized, Redis writes a pid file in /var/run/redis.pid by +# default. You can specify a custom pid file location here. +pidfile <%= @conf_pidfile_real %> + +# Accept connections on the specified port, default is 6379. +# If port 0 is specified Redis will not listen on a TCP socket. +<%- if @conf_port -%> +port <%= @conf_port %> +<%- end -%> + +# If you want you can bind a single interface, if the bind option is not +# specified all the interfaces will listen for incoming connections. +# +# bind 127.0.0.1 +<%- if @conf_bind -%> +bind <%= @conf_bind %> +<%- end -%> + +# Specify the path for the unix socket that will be used to listen for +# incoming connections. There is no default, so Redis will not listen +# on a unix socket when not specified. +# +# unixsocket /tmp/redis.sock +<%- if @conf_unixsocket -%> +unixsocket <%= @conf_unixsocket %> +<%- end -%> +# unixsocketperm 755 +<%- if @conf_unixsocketperm -%> +unixsocketperm <%= @conf_unixsocketperm %> +<%- end -%> + +# Close the connection after a client is idle for N seconds (0 to disable) +<%- if @conf_timeout -%> +timeout <%= @conf_timeout %> +<%- end -%> + +# TCP keepalive. +# +# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence +# of communication. This is useful for two reasons: +# +# 1) Detect dead peers. +# 2) Take the connection alive from the point of view of network +# equipment in the middle. +# +# On Linux, the specified value (in seconds) is the period used to send ACKs. +# Note that to close the connection the double of the time is needed. +# On other kernels the period depends on the kernel configuration. +# +# A reasonable value for this option is 60 seconds. +<%- if @conf_tcp_keepalive -%> +tcp-keepalive <%= @conf_tcp_keepalive %> +<%- end -%> + +# Specify the server verbosity level. +# This can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +<%- if @conf_loglevel -%> +loglevel <%= @conf_loglevel %> +<%- end -%> + +# Specify the log file name. Also 'stdout' can be used to force +# Redis to log on the standard output. Note that if you use standard +# output for logging but daemonize, logs will be sent to /dev/null +logfile <%= @conf_logfile_real %> + +# To enable logging to the system logger, just set 'syslog-enabled' to yes, +# and optionally update the other syslog parameters to suit your needs. +# syslog-enabled no +<%- if @conf_syslog_enabled -%> +syslog-enabled <%= @conf_syslog_enabled %> +<%- end -%> + +# Specify the syslog identity. +# syslog-ident redis +<%- if @conf_syslog_ident -%> +syslog-ident <%= @conf_syslog_ident %> +<%- end -%> + +# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. +# syslog-facility local0 +<%- if @conf_syslog_facility -%> +syslog-facility <%= @conf_syslog_facility %> +<%- end -%> + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +<%- if @conf_databases -%> +databases <%= @conf_databases %> +<%- end -%> + +################################ SNAPSHOTTING ################################# +# +# Save the DB on disk: +# +# save +# +# Will save the DB if both the given number of seconds and the given +# number of write operations against the DB occurred. +# +# In the example below the behaviour will be to save: +# after 900 sec (15 min) if at least 1 key changed +# after 300 sec (5 min) if at least 10 keys changed +# after 60 sec if at least 10000 keys changed +# +# Note: you can disable saving at all commenting all the "save" lines. +# +# It is also possible to remove all the previously configured save +# points by adding a save directive with a single empty string argument +# like in the following example: +# +# save "" +<%- if @conf_nosave -%> +# do not persist to disk +<%- else -%> +<%- @conf_save.sort.each do |seconds, changes| -%> +save <%= seconds -%> <%= changes -%> <%= "\n" -%> +<%- end -%> +<%- end -%> + +# By default Redis will stop accepting writes if RDB snapshots are enabled +# (at least one save point) and the latest background save failed. +# This will make the user aware (in an hard way) that data is not persisting +# on disk properly, otherwise chances are that no one will notice and some +# distater will happen. +# +# If the background saving process will start working again Redis will +# automatically allow writes again. +# +# However if you have setup your proper monitoring of the Redis server +# and persistence, you may want to disable this feature so that Redis will +# continue to work as usually even if there are problems with disk, +# permissions, and so forth. +<%- if @conf_stop_writes_on_bgsave_error -%> +stop-writes-on-bgsave-error <%= @conf_stop_writes_on_bgsave_error %> +<%- end -%> + +# Compress string objects using LZF when dump .rdb databases? +# For default that's set to 'yes' as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +<%- if @conf_rdbcompression -%> +rdbcompression <%= @conf_rdbcompression %> +<%- end -%> + +# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. +# This makes the format more resistant to corruption but there is a performance +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it +# for maximum performances. +# +# RDB files created with checksum disabled have a checksum of zero that will +# tell the loading code to skip the check. +<%- if @conf_rdbchecksum -%> +rdbchecksum <%= @conf_rdbchecksum %> +<%- end -%> + +# The filename where to dump the DB +<%- if @conf_dbfilename -%> +dbfilename <%= @conf_dbfilename %> +<%- end -%> + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# The Append Only File will also be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +<%- if @conf_dir -%> +dir <%= @conf_dir %> +<%- end -%> + +################################# REPLICATION ################################# + +# Master-Slave replication. Use slaveof to make a Redis instance a copy of +# another Redis server. Note that the configuration is local to the slave +# so for example it is possible to configure the slave to save the DB with a +# different interval, or to listen to another port, and so on. +# +# slaveof +<%- if @conf_slaveof -%> +slaveof <%= @conf_slaveof %> +<%- end -%> + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the slave to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the slave request. +# +# masterauth +<%- if @conf_masterauth -%> +masterauth <%= @conf_masterauth %> +<%- end -%> + +# When a slave loses its connection with the master, or when the replication +# is still in progress, the slave can act in two different ways: +# +# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will +# still reply to client requests, possibly with out of date data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) if slave-serve-stale-data is set to 'no' the slave will reply with +# an error "SYNC with master in progress" to all the kind of commands +# but to INFO and SLAVEOF. +# +<%- if @conf_slave_serve_stale_data -%> +slave-serve-stale-data <%= @conf_slave_serve_stale_data %> +<%- end -%> + +# You can configure a slave instance to accept writes or not. Writing against +# a slave instance may be useful to store some ephemeral data (because data +# written on a slave will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +# +# Since Redis 2.6 by default slaves are read-only. +# +# Note: read only slaves are not designed to be exposed to untrusted clients +# on the internet. It's just a protection layer against misuse of the instance. +# Still a read only slave exports by default all the administrative commands +# such as CONFIG, DEBUG, and so forth. To a limited extend you can improve +# security of read only slaves using 'rename-command' to shadow all the +# administrative / dangerous commands. +<%- if @conf_slave_read_only -%> +slave-read-only <%= @conf_slave_read_only %> +<%- end -%> + +# Slaves send PINGs to server in a predefined interval. It's possible to change +# this interval with the repl_ping_slave_period option. The default value is 10 +# seconds. +# +# repl-ping-slave-period 10 +<%- if @conf_repl_ping_slave_period -%> +repl-ping-slave-period <%= @conf_repl_ping_slave_period %> +<%- end -%> + +# The following option sets a timeout for both Bulk transfer I/O timeout and +# master data or ping response timeout. The default value is 60 seconds. +# +# It is important to make sure that this value is greater than the value +# specified for repl-ping-slave-period otherwise a timeout will be detected +# every time there is low traffic between the master and the slave. +# +# repl-timeout 60 +<%- if @conf_repl_timeout -%> +repl-timeout <%= @conf_repl_timeout %> +<%- end -%> + +# Disable TCP_NODELAY on the slave socket after SYNC? +# +# If you select "yes" Redis will use a smaller number of TCP packets and +# less bandwidth to send data to slaves. But this can add a delay for +# the data to appear on the slave side, up to 40 milliseconds with +# Linux kernels using a default configuration. +# +# If you select "no" the delay for data to appear on the slave side will +# be reduced but more bandwidth will be used for replication. +# +# By default we optimize for low latency, but in very high traffic conditions +# or when the master and slaves are many hops away, turning this to "yes" may +# be a good idea. +<%- if @conf_repl_disable_tcp_nodelay %> +repl-disable-tcp-nodelay <%= @conf_repl_disable_tcp_nodelay %> +<%- end -%> + +# The slave priority is an integer number published by Redis in the INFO output. +# It is used by Redis Sentinel in order to select a slave to promote into a +# master if the master is no longer working correctly. +# +# A slave with a low priority number is considered better for promotion, so +# for instance if there are three slaves with priority 10, 100, 25 Sentinel will +# pick the one wtih priority 10, that is the lowest. +# +# However a special priority of 0 marks the slave as not able to perform the +# role of master, so a slave with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +<%- if @conf_slave_priority -%> +slave-priority <%= @conf_slave_priority %> +<%- end -%> + +################################## SECURITY ################################### + +# Require clients to issue AUTH before processing any other +# commands. This might be useful in environments in which you do not trust +# others with access to the host running redis-server. +# +# This should stay commented out for backward compatibility and because most +# people do not need auth (e.g. they run their own servers). +# +# Warning: since Redis is pretty fast an outside user can try up to +# 150k passwords per second against a good box. This means that you should +# use a very strong password otherwise it will be very easy to break. +# +# requirepass foobared +<%- if @conf_requirepass -%> +requirepass <%= @conf_requirepass %> +<%- end -%> + +# Command renaming. +# +# It is possible to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# hard to guess so that it will still be available for internal-use tools +# but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possible to completely kill a command by renaming it into +# an empty string: +# +# rename-command CONFIG "" +# +# Please note that changing the name of commands that are logged into the +# AOF file or transmitted to slaves may cause problems. + +################################### LIMITS #################################### + +# Set the max number of connected clients at the same time. By default +# this limit is set to 10000 clients, however if the Redis server is not +# able to configure the process file limit to allow for the specified limit +# the max number of allowed clients is set to the current file limit +# minus 32 (as Redis reserves a few file descriptors for internal uses). +# +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# maxclients 10000 +<%- if @conf_maxclients -%> +maxclients <%= @conf_maxclients %> +<%- end -%> + +# Don't use more memory than the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# accordingly to the eviction policy selected (see maxmemmory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +# This option is usually useful when using Redis as an LRU cache, or to set +# an hard memory limit for an instance (using the 'noeviction' policy). +# +# WARNING: If you have slaves attached to an instance with maxmemory on, +# the size of the output buffers needed to feed the slaves are subtracted +# from the used memory count, so that network problems / resyncs will +# not trigger a loop where keys are evicted, and in turn the output +# buffer of slaves is full with DELs of keys evicted triggering the deletion +# of more keys, and so forth until the database is completely emptied. +# +# In short... if you have slaves attached it is suggested that you set a lower +# limit for maxmemory so that there is some free RAM on the system for slave +# output buffers (but this is not needed if the policy is 'noeviction'). +# +# maxmemory +<%- if @conf_maxmemory -%> +maxmemory <%= @conf_maxmemory %> +<%- end -%> + +# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory +# is reached. You can select among five behaviors: +# +# volatile-lru -> remove the key with an expire set using an LRU algorithm +# allkeys-lru -> remove any key accordingly to the LRU algorithm +# volatile-random -> remove a random key with an expire set +# allkeys-random -> remove a random key, any key +# volatile-ttl -> remove the key with the nearest expire time (minor TTL) +# noeviction -> don't expire at all, just return an error on write operations +# +# Note: with any of the above policies, Redis will return an error on write +# operations, when there are not suitable keys for eviction. +# +# At the date of writing this commands are: set setnx setex append +# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd +# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby +# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby +# getset mset msetnx exec sort +# +# The default is: +# +# maxmemory-policy volatile-lru +<%- if @conf_maxmemory_policy -%> +maxmemory-policy <%= @conf_maxmemory_policy %> +<%- end -%> + +# LRU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can select as well the sample +# size to check. For instance for default Redis will check three keys and +# pick the one that was used less recently, you can change the sample size +# using the following configuration directive. +# +# maxmemory-samples 3 +<%- if @conf_maxmemory_samples -%> +maxmemory-samples <%= @conf_maxmemory_samples %> +<%- end -%> + +############################## APPEND ONLY MODE ############################### + +# By default Redis asynchronously dumps the dataset on disk. This mode is +# good enough in many applications, but an issue with the Redis process or +# a power outage may result into a few minutes of writes lost (depending on +# the configured save points). +# +# The Append Only File is an alternative persistence mode that provides +# much better durability. For instance using the default data fsync policy +# (see later in the config file) Redis can lose just one second of writes in a +# dramatic event like a server power outage, or a single write if something +# wrong with the Redis process itself happens, but the operating system is +# still running correctly. +# +# AOF and RDB persistence can be enabled at the same time without problems. +# If the AOF is enabled on startup Redis will load the AOF, that is the file +# with the better durability guarantees. +# +# Please check http://redis.io/topics/persistence for more information. + +<%- if @conf_appendonly -%> +appendonly <%= @conf_appendonly %> +<%- end -%> + +# The name of the append only file (default: "appendonly.aof") +# appendfilename appendonly.aof +<%- if @conf_appendfilename -%> +appendfilename <%= @conf_appendfilename %> +<%- end -%> + +# The fsync() call tells the Operating System to actually write data on disk +# instead to wait for more data in the output buffer. Some OS will really flush +# data on disk, some other OS will just try to do it ASAP. +# +# Redis supports three different modes: +# +# no: don't fsync, just let the OS flush the data when it wants. Faster. +# always: fsync after every write to the append only log . Slow, Safest. +# everysec: fsync only one time every second. Compromise. +# +# The default is "everysec", as that's usually the right compromise between +# speed and data safety. It's up to you to understand if you can relax this to +# "no" that will let the operating system flush the output buffer when +# it wants, for better performances (but if you can live with the idea of +# some data loss consider the default persistence mode that's snapshotting), +# or on the contrary, use "always" that's very slow but a bit safer than +# everysec. +# +# More details please check the following article: +# http://antirez.com/post/redis-persistence-demystified.html +# +# If unsure, use "everysec". + +# appendfsync always +<%- if @conf_appendfsync -%> +appendfsync <%= @conf_appendfsync %> +<%- end -%> +# appendfsync no + +# When the AOF fsync policy is set to always or everysec, and a background +# saving process (a background save or AOF log background rewriting) is +# performing a lot of I/O against the disk, in some Linux configurations +# Redis may block too long on the fsync() call. Note that there is no fix for +# this currently, as even performing fsync in a different thread will block +# our synchronous write(2) call. +# +# In order to mitigate this problem it's possible to use the following option +# that will prevent fsync() from being called in the main process while a +# BGSAVE or BGREWRITEAOF is in progress. +# +# This means that while another child is saving, the durability of Redis is +# the same as "appendfsync none". In practical terms, this means that it is +# possible to lose up to 30 seconds of log in the worst scenario (with the +# default Linux settings). +# +# If you have latency problems turn this to "yes". Otherwise leave it as +# "no" that is the safest pick from the point of view of durability. +<%- if @conf_no_appendfsync_on_rewrite -%> +no-appendfsync-on-rewrite <%= @conf_no_appendfsync_on_rewrite %> +<%- end -%> + +# Automatic rewrite of the append only file. +# Redis is able to automatically rewrite the log file implicitly calling +# BGREWRITEAOF when the AOF log size grows by the specified percentage. +# +# This is how it works: Redis remembers the size of the AOF file after the +# latest rewrite (if no rewrite has happened since the restart, the size of +# the AOF at startup is used). +# +# This base size is compared to the current size. If the current size is +# bigger than the specified percentage, the rewrite is triggered. Also +# you need to specify a minimal size for the AOF file to be rewritten, this +# is useful to avoid rewriting the AOF file even if the percentage increase +# is reached but it is still pretty small. +# +# Specify a percentage of zero in order to disable the automatic AOF +# rewrite feature. + +<%- if @conf_auto_aof_rewrite_percentage -%> +auto-aof-rewrite-percentage <%= @conf_auto_aof_rewrite_percentage %> +<%- end -%> +<%- if @conf_auto_aof_rewrite_min_size -%> +auto-aof-rewrite-min-size <%= @conf_auto_aof_rewrite_min_size %> +<%- end -%> + +################################ LUA SCRIPTING ############################### + +# Max execution time of a Lua script in milliseconds. +# +# If the maximum execution time is reached Redis will log that a script is +# still in execution after the maximum allowed time and will start to +# reply to queries with an error. +# +# When a long running script exceed the maximum execution time only the +# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be +# used to stop a script that did not yet called write commands. The second +# is the only way to shut down the server in the case a write commands was +# already issue by the script but the user don't want to wait for the natural +# termination of the script. +# +# Set it to 0 or a negative value for unlimited execution without warnings. +<%- if @conf_lua_time_limit -%> +lua-time-limit <%= @conf_lua_time_limit %> +<%- end -%> + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +<%- if @conf_slowlog_log_slower_than -%> +slowlog-log-slower-than <%= @conf_slowlog_log_slower_than %> +<%- end -%> + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +<%- if @conf_slowlog_max_len -%> +slowlog-max-len <%= @conf_slowlog_max_len %> +<%- end -%> + +############################### ADVANCED CONFIG ############################### + +# Hashes are encoded using a memory efficient data structure when they have a +# small number of entries, and the biggest entry does not exceed a given +# threshold. These thresholds can be configured using the following directives. +<%- if @conf_hash_max_zipmap_entries -%> +hash-max-ziplist-entries <%= @conf_hash_max_zipmap_entries %> +<%- end -%> +<%- if @conf_hash_max_zipmap_value -%> +hash-max-ziplist-value <%= @conf_hash_max_zipmap_value %> +<%- end -%> + +# Similarly to hashes, small lists are also encoded in a special way in order +# to save a lot of space. The special representation is only used when +# you are under the following limits: +<%- if @conf_list_max_ziplist_entries -%> +list-max-ziplist-entries <%= @conf_list_max_ziplist_entries %> +<%- end -%> +<%- if @conf_list_max_ziplist_value -%> +list-max-ziplist-value <%= @conf_list_max_ziplist_value %> +<%- end -%> + +# Sets have a special encoding in just one case: when a set is composed +# of just strings that happens to be integers in radix 10 in the range +# of 64 bit signed integers. +# The following configuration setting sets the limit in the size of the +# set in order to use this special memory saving encoding. +<%- if @conf_set_max_intset_entries -%> +set-max-intset-entries <%= @conf_set_max_intset_entries %> +<%- end -%> + +# Similarly to hashes and lists, sorted sets are also specially encoded in +# order to save a lot of space. This encoding is only used when the length and +# elements of a sorted set are below the following limits: +<%- if @conf_zset_max_ziplist_entries -%> +zset-max-ziplist-entries <%= @conf_zset_max_ziplist_entries %> +<%- end -%> +<%- if @conf_zset_max_ziplist_value -%> +zset-max-ziplist-value <%= @conf_zset_max_ziplist_value %> +<%- end -%> + +# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in +# order to help rehashing the main Redis hash table (the one mapping top-level +# keys to values). The hash table implementation Redis uses (see dict.c) +# performs a lazy rehashing: the more operation you run into an hash table +# that is rehashing, the more rehashing "steps" are performed, so if the +# server is idle the rehashing is never complete and some more memory is used +# by the hash table. +# +# The default is to use this millisecond 10 times every second in order to +# active rehashing the main dictionaries, freeing memory when possible. +# +# If unsure: +# use "activerehashing no" if you have hard latency requirements and it is +# not a good thing in your environment that Redis can reply form time to time +# to queries with 2 milliseconds delay. +# +# use "activerehashing yes" if you don't have such hard requirements but +# want to free memory asap when possible. +<%- if @conf_activerehashing -%> +activerehashing <%= @conf_activerehashing %> +<%- end -%> + +# The client output buffer limits can be used to force disconnection of clients +# that are not reading data from the server fast enough for some reason (a +# common reason is that a Pub/Sub client can't consume messages as fast as the +# publisher can produce them). +# +# The limit can be set differently for the three different classes of clients: +# +# normal -> normal clients +# slave -> slave clients and MONITOR clients +# pubsub -> clients subcribed to at least one pubsub channel or pattern +# +# The syntax of every client-output-buffer-limit directive is the following: +# +# client-output-buffer-limit +# +# A client is immediately disconnected once the hard limit is reached, or if +# the soft limit is reached and remains reached for the specified number of +# seconds (continuously). +# So for instance if the hard limit is 32 megabytes and the soft limit is +# 16 megabytes / 10 seconds, the client will get disconnected immediately +# if the size of the output buffers reach 32 megabytes, but will also get +# disconnected if the client reaches 16 megabytes and continuously overcomes +# the limit for 10 seconds. +# +# By default normal clients are not limited because they don't receive data +# without asking (in a push way), but just after a request, so only +# asynchronous clients may create a scenario where data is requested faster +# than it can read. +# +# Instead there is a default limit for pubsub and slave clients, since +# subscribers and slaves receive data in a push fashion. +# +# Both the hard or the soft limit can be disabled by setting them to zero. +<%- if @conf_client_output_buffer_limit_normal -%> +client-output-buffer-limit normal <%= @conf_client_output_buffer_limit_normal %> +<%- end -%> +<%- if @conf_client_output_buffer_limit_slave -%> +client-output-buffer-limit slave <%= @conf_client_output_buffer_limit_slave %> +<%- end -%> +<%- if @conf_client_output_buffer_limit_pubsub -%> +client-output-buffer-limit pubsub <%= @conf_client_output_buffer_limit_pubsub %> +<%- end -%> + +# Redis calls an internal function to perform many background tasks, like +# closing connections of clients in timeot, purging expired keys that are +# never requested, and so forth. +# +# Not all tasks are perforemd with the same frequency, but Redis checks for +# tasks to perform accordingly to the specified "hz" value. +# +# By default "hz" is set to 10. Raising the value will use more CPU when +# Redis is idle, but at the same time will make Redis more responsive when +# there are many keys expiring at the same time, and timeouts may be +# handled with more precision. +# +# The range is between 1 and 500, however a value over 100 is usually not +# a good idea. Most users should use the default of 10 and raise this up to +# 100 only in environments where very low latency is required. +<%- if @conf_hz -%> +hz <%= @conf_hz %> +<%- end -%> + +# When a child rewrites the AOF file, if the following option is enabled +# the file will be fsync-ed every 32 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +<%- if @conf_aof_rewrite_incremental_fsync -%> +aof-rewrite-incremental-fsync <%= @conf_aof_rewrite_incremental_fsync %> +<%- end -%> + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all Redis server but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# include /path/to/local.conf +# include /path/to/other.conf +<%- if @conf_include -%> +<%- @conf_include.each do |include| -%> +include <%= include %><%= "\n" -%> +<%- end -%> +<%- end -%> +<%- end -%> +<%- if @redis_version_real == "2.8.x" -%> +# MANAGED BY PUPPET # +# +# Redis 2.8 configuration file example + +# Note on units: when memory size is needed, it is possible to specify +# it in the usual form of 1k 5GB 4M and so forth: +# +# 1k => 1000 bytes +# 1kb => 1024 bytes +# 1m => 1000000 bytes +# 1mb => 1024*1024 bytes +# 1g => 1000000000 bytes +# 1gb => 1024*1024*1024 bytes +# +# units are case insensitive so 1GB 1Gb 1gB are all the same. + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all Redis server but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# Notice option "include" won't be rewritten by command "CONFIG REWRITE" +# from admin or Redis Sentinel. Since Redis always uses the last processed +# line as value of a configuration directive, you'd better put includes +# at the beginning of this file to avoid overwriting config change at runtime. +# +# If instead you are interested in using includes to override configuration +# options, it is better to use include as the last line. +# +# include /path/to/local.conf +# include /path/to/other.conf +<%- if @conf_include -%> +<%- @conf_include.each do |include| -%> +include <%= include %><%= "\n" -%> +<%- end -%> +<%- end -%> + +################################ GENERAL ##################################### + +# By default Redis does not run as a daemon. Use 'yes' if you need it. +# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. +<%- if @conf_daemonize -%> +daemonize <%= @conf_daemonize %> +<%- end -%> + +# When running daemonized, Redis writes a pid file in /var/run/redis.pid by +# default. You can specify a custom pid file location here. +pidfile <%= @conf_pidfile_real %> + +# Accept connections on the specified port, default is 6379. +# If port 0 is specified Redis will not listen on a TCP socket. +<%- if @conf_port -%> +port <%= @conf_port %> +<%- end -%> + +# TCP listen() backlog. +# +# In high requests-per-second environments you need an high backlog in order +# to avoid slow clients connections issues. Note that the Linux kernel +# will silently truncate it to the value of /proc/sys/net/core/somaxconn so +# make sure to raise both the value of somaxconn and tcp_max_syn_backlog +# in order to get the desired effect. +<%- if @conf_tcp_backlog -%> +tcp-backlog <%= @conf_tcp_backlog %> +<%- end -%> + +# By default Redis listens for connections from all the network interfaces +# available on the server. It is possible to listen to just one or multiple +# interfaces using the "bind" configuration directive, followed by one or +# more IP addresses. +# +# Examples: +# +# bind 192.168.1.100 10.0.0.1 +# bind 127.0.0.1 +<%- if @conf_bind -%> +bind <%= @conf_bind %> +<%- end -%> + +# Specify the path for the Unix socket that will be used to listen for +# incoming connections. There is no default, so Redis will not listen +# on a unix socket when not specified. +# +# unixsocket /tmp/redis.sock +<%- if @conf_unixsocket -%> +unixsocket <%= @conf_unixsocket %> +<%- end -%> +# unixsocketperm 755 +<%- if @conf_unixsocketperm -%> +unixsocketperm <%= @conf_unixsocketperm %> +<%- end -%> + +# Close the connection after a client is idle for N seconds (0 to disable) +<%- if @conf_timeout -%> +timeout <%= @conf_timeout %> +<%- end -%> + +# TCP keepalive. +# +# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence +# of communication. This is useful for two reasons: +# +# 1) Detect dead peers. +# 2) Take the connection alive from the point of view of network +# equipment in the middle. +# +# On Linux, the specified value (in seconds) is the period used to send ACKs. +# Note that to close the connection the double of the time is needed. +# On other kernels the period depends on the kernel configuration. +# +# A reasonable value for this option is 60 seconds. +<%- if @conf_tcp_keepalive -%> +tcp-keepalive <%= @conf_tcp_keepalive %> +<%- end -%> + +# Specify the server verbosity level. +# This can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +<%- if @conf_loglevel -%> +loglevel <%= @conf_loglevel %> +<%- end -%> + +# Specify the log file name. Also the empty string can be used to force +# Redis to log on the standard output. Note that if you use standard +# output for logging but daemonize, logs will be sent to /dev/null +logfile <%= @conf_logfile_real %> + +# To enable logging to the system logger, just set 'syslog-enabled' to yes, +# and optionally update the other syslog parameters to suit your needs. +# syslog-enabled no +<%- if @conf_syslog_enabled -%> +syslog-enabled <%= @conf_syslog_enabled %> +<%- end -%> + +# Specify the syslog identity. +# syslog-ident redis +<%- if @conf_syslog_ident -%> +syslog-ident <%= @conf_syslog_ident %> +<%- end -%> + +# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. +# syslog-facility local0 +<%- if @conf_syslog_facility -%> +syslog-facility <%= @conf_syslog_facility %> +<%- end -%> + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +<%- if @conf_databases -%> +databases <%= @conf_databases %> +<%- end -%> + +################################ SNAPSHOTTING ################################ +# +# Save the DB on disk: +# +# save +# +# Will save the DB if both the given number of seconds and the given +# number of write operations against the DB occurred. +# +# In the example below the behaviour will be to save: +# after 900 sec (15 min) if at least 1 key changed +# after 300 sec (5 min) if at least 10 keys changed +# after 60 sec if at least 10000 keys changed +# +# Note: you can disable saving at all commenting all the "save" lines. +# +# It is also possible to remove all the previously configured save +# points by adding a save directive with a single empty string argument +# like in the following example: +# +# save "" +<%- if @conf_nosave -%> +# do not persist to disk +<%- else -%> +<%- @conf_save.sort.each do |seconds, changes| -%> +save <%= seconds -%> <%= changes -%> <%= "\n" -%> +<%- end -%> +<%- end -%> + +# By default Redis will stop accepting writes if RDB snapshots are enabled +# (at least one save point) and the latest background save failed. +# This will make the user aware (in a hard way) that data is not persisting +# on disk properly, otherwise chances are that no one will notice and some +# disaster will happen. +# +# If the background saving process will start working again Redis will +# automatically allow writes again. +# +# However if you have setup your proper monitoring of the Redis server +# and persistence, you may want to disable this feature so that Redis will +# continue to work as usual even if there are problems with disk, +# permissions, and so forth. +<%- if @conf_stop_writes_on_bgsave_error -%> +stop-writes-on-bgsave-error <%= @conf_stop_writes_on_bgsave_error %> +<%- end -%> + +# Compress string objects using LZF when dump .rdb databases? +# For default that's set to 'yes' as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +<%- if @conf_rdbcompression -%> +rdbcompression <%= @conf_rdbcompression %> +<%- end -%> + +# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. +# This makes the format more resistant to corruption but there is a performance +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it +# for maximum performances. +# +# RDB files created with checksum disabled have a checksum of zero that will +# tell the loading code to skip the check. +<%- if @conf_rdbchecksum -%> +rdbchecksum <%= @conf_rdbchecksum %> +<%- end -%> + +# The filename where to dump the DB +<%- if @conf_dbfilename -%> +dbfilename <%= @conf_dbfilename %> +<%- end -%> + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# The Append Only File will also be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +<%- if @conf_dir -%> +dir <%= @conf_dir %> +<%- end -%> + +################################# REPLICATION ################################# + +# Master-Slave replication. Use slaveof to make a Redis instance a copy of +# another Redis server. Note that the configuration is local to the slave +# so for example it is possible to configure the slave to save the DB with a +# different interval, or to listen to another port, and so on. +# +# slaveof +<%- if @conf_slaveof -%> +slaveof <%= @conf_slaveof %> +<%- end -%> + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the slave to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the slave request. +# +# masterauth +<%- if @conf_masterauth -%> +masterauth <%= @conf_masterauth %> +<%- end -%> + +# When a slave loses its connection with the master, or when the replication +# is still in progress, the slave can act in two different ways: +# +# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will +# still reply to client requests, possibly with out of date data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) if slave-serve-stale-data is set to 'no' the slave will reply with +# an error "SYNC with master in progress" to all the kind of commands +# but to INFO and SLAVEOF. +# +<%- if @conf_slave_serve_stale_data -%> +slave-serve-stale-data <%= @conf_slave_serve_stale_data %> +<%- end -%> + +# You can configure a slave instance to accept writes or not. Writing against +# a slave instance may be useful to store some ephemeral data (because data +# written on a slave will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +# +# Since Redis 2.6 by default slaves are read-only. +# +# Note: read only slaves are not designed to be exposed to untrusted clients +# on the internet. It's just a protection layer against misuse of the instance. +# Still a read only slave exports by default all the administrative commands +# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve +# security of read only slaves using 'rename-command' to shadow all the +# administrative / dangerous commands. +<%- if @conf_slave_read_only -%> +slave-read-only <%= @conf_slave_read_only %> +<%- end -%> + +# Slaves send PINGs to server in a predefined interval. It's possible to change +# this interval with the repl_ping_slave_period option. The default value is 10 +# seconds. +# +# repl-ping-slave-period 10 +<%- if @conf_repl_ping_slave_period -%> +repl-ping-slave-period <%= @conf_repl_ping_slave_period %> +<%- end -%> + +# The following option sets the replication timeout for: +# +# 1) Bulk transfer I/O during SYNC, from the point of view of slave. +# 2) Master timeout from the point of view of slaves (data, pings). +# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings). +# +# It is important to make sure that this value is greater than the value +# specified for repl-ping-slave-period otherwise a timeout will be detected +# every time there is low traffic between the master and the slave. +# +# repl-timeout 60 +<%- if @conf_repl_timeout -%> +repl-timeout <%= @conf_repl_timeout %> +<%- end -%> + +# Disable TCP_NODELAY on the slave socket after SYNC? +# +# If you select "yes" Redis will use a smaller number of TCP packets and +# less bandwidth to send data to slaves. But this can add a delay for +# the data to appear on the slave side, up to 40 milliseconds with +# Linux kernels using a default configuration. +# +# If you select "no" the delay for data to appear on the slave side will +# be reduced but more bandwidth will be used for replication. +# +# By default we optimize for low latency, but in very high traffic conditions +# or when the master and slaves are many hops away, turning this to "yes" may +# be a good idea. +<%- if @conf_repl_disable_tcp_nodelay %> +repl-disable-tcp-nodelay <%= @conf_repl_disable_tcp_nodelay %> +<%- end -%> + +# Set the replication backlog size. The backlog is a buffer that accumulates +# slave data when slaves are disconnected for some time, so that when a slave +# wants to reconnect again, often a full resync is not needed, but a partial +# resync is enough, just passing the portion of data the slave missed while +# disconnected. +# +# The biggest the replication backlog, the longer the time the slave can be +# disconnected and later be able to perform a partial resynchronization. +# +# The backlog is only allocated once there is at least a slave connected. +# +# repl-backlog-size 1mb +<%- if @conf_repl_backlog_size %> +repl-backlog-size <%= @conf_repl_backlog_size %> +<%- end -%> + +# After a master has no longer connected slaves for some time, the backlog +# will be freed. The following option configures the amount of seconds that +# need to elapse, starting from the time the last slave disconnected, for +# the backlog buffer to be freed. +# +# A value of 0 means to never release the backlog. +# +# repl-backlog-ttl 3600 +<%- if @conf_repl_backlog_ttl %> +repl-backlog-ttl <%= @conf_repl_backlog_ttl %> +<%- end -%> + +# The slave priority is an integer number published by Redis in the INFO output. +# It is used by Redis Sentinel in order to select a slave to promote into a +# master if the master is no longer working correctly. +# +# A slave with a low priority number is considered better for promotion, so +# for instance if there are three slaves with priority 10, 100, 25 Sentinel will +# pick the one with priority 10, that is the lowest. +# +# However a special priority of 0 marks the slave as not able to perform the +# role of master, so a slave with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +<%- if @conf_slave_priority -%> +slave-priority <%= @conf_slave_priority %> +<%- end -%> + +# It is possible for a master to stop accepting writes if there are less than +# N slaves connected, having a lag less or equal than M seconds. +# +# The N slaves need to be in "online" state. +# +# The lag in seconds, that must be <= the specified value, is calculated from +# the last ping received from the slave, that is usually sent every second. +# +# This option does not GUARANTEES that N replicas will accept the write, but +# will limit the window of exposure for lost writes in case not enough slaves +# are available, to the specified number of seconds. +# +# For example to require at least 3 slaves with a lag <= 10 seconds use: +# +# min-slaves-to-write 3 +# min-slaves-max-lag 10 +# +# Setting one or the other to 0 disables the feature. +# +# By default min-slaves-to-write is set to 0 (feature disabled) and +# min-slaves-max-lag is set to 10. +<%- if @conf_min_slaves_to_write -%> +min-slaves-to-write <%= @conf_min_slaves_to_write %> +<%- end -%> +<%- if @conf_min_slaves_max_lag -%> +min-slaves-max-lag <%= @conf_min_slaves_max_lag %> +<%- end -%> + +################################## SECURITY ################################### + +# Require clients to issue AUTH before processing any other +# commands. This might be useful in environments in which you do not trust +# others with access to the host running redis-server. +# +# This should stay commented out for backward compatibility and because most +# people do not need auth (e.g. they run their own servers). +# +# Warning: since Redis is pretty fast an outside user can try up to +# 150k passwords per second against a good box. This means that you should +# use a very strong password otherwise it will be very easy to break. +# +# requirepass foobared +<%- if @conf_requirepass -%> +requirepass <%= @conf_requirepass %> +<%- end -%> + +# Command renaming. +# +# It is possible to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# hard to guess so that it will still be available for internal-use tools +# but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possible to completely kill a command by renaming it into +# an empty string: +# +# rename-command CONFIG "" +# +# Please note that changing the name of commands that are logged into the +# AOF file or transmitted to slaves may cause problems. + +################################### LIMITS #################################### + +# Set the max number of connected clients at the same time. By default +# this limit is set to 10000 clients, however if the Redis server is not +# able to configure the process file limit to allow for the specified limit +# the max number of allowed clients is set to the current file limit +# minus 32 (as Redis reserves a few file descriptors for internal uses). +# +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# maxclients 10000 +<%- if @conf_maxclients -%> +maxclients <%= @conf_maxclients %> +<%- end -%> + +# Don't use more memory than the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# according to the eviction policy selected (see maxmemory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +# This option is usually useful when using Redis as an LRU cache, or to set +# a hard memory limit for an instance (using the 'noeviction' policy). +# +# WARNING: If you have slaves attached to an instance with maxmemory on, +# the size of the output buffers needed to feed the slaves are subtracted +# from the used memory count, so that network problems / resyncs will +# not trigger a loop where keys are evicted, and in turn the output +# buffer of slaves is full with DELs of keys evicted triggering the deletion +# of more keys, and so forth until the database is completely emptied. +# +# In short... if you have slaves attached it is suggested that you set a lower +# limit for maxmemory so that there is some free RAM on the system for slave +# output buffers (but this is not needed if the policy is 'noeviction'). +# +# maxmemory +<%- if @conf_maxmemory -%> +maxmemory <%= @conf_maxmemory %> +<%- end -%> + +# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory +# is reached. You can select among five behaviors: +# +# volatile-lru -> remove the key with an expire set using an LRU algorithm +# allkeys-lru -> remove any key accordingly to the LRU algorithm +# volatile-random -> remove a random key with an expire set +# allkeys-random -> remove a random key, any key +# volatile-ttl -> remove the key with the nearest expire time (minor TTL) +# noeviction -> don't expire at all, just return an error on write operations +# +# Note: with any of the above policies, Redis will return an error on write +# operations, when there are not suitable keys for eviction. +# +# At the date of writing this commands are: set setnx setex append +# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd +# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby +# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby +# getset mset msetnx exec sort +# +# The default is: +# +# maxmemory-policy volatile-lru +<%- if @conf_maxmemory_policy -%> +maxmemory-policy <%= @conf_maxmemory_policy %> +<%- end -%> + +# LRU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can select as well the sample +# size to check. For instance for default Redis will check three keys and +# pick the one that was used less recently, you can change the sample size +# using the following configuration directive. +# +# maxmemory-samples 3 +<%- if @conf_maxmemory_samples -%> +maxmemory-samples <%= @conf_maxmemory_samples %> +<%- end -%> + +############################## APPEND ONLY MODE ############################### + +# By default Redis asynchronously dumps the dataset on disk. This mode is +# good enough in many applications, but an issue with the Redis process or +# a power outage may result into a few minutes of writes lost (depending on +# the configured save points). +# +# The Append Only File is an alternative persistence mode that provides +# much better durability. For instance using the default data fsync policy +# (see later in the config file) Redis can lose just one second of writes in a +# dramatic event like a server power outage, or a single write if something +# wrong with the Redis process itself happens, but the operating system is +# still running correctly. +# +# AOF and RDB persistence can be enabled at the same time without problems. +# If the AOF is enabled on startup Redis will load the AOF, that is the file +# with the better durability guarantees. +# +# Please check http://redis.io/topics/persistence for more information. + +<%- if @conf_appendonly -%> +appendonly <%= @conf_appendonly %> +<%- end -%> + +# The name of the append only file (default: "appendonly.aof") + +<%- if @conf_appendfilename -%> +appendfilename <%= @conf_appendfilename %> +<%- end -%> + +# The fsync() call tells the Operating System to actually write data on disk +# instead to wait for more data in the output buffer. Some OS will really flush +# data on disk, some other OS will just try to do it ASAP. +# +# Redis supports three different modes: +# +# no: don't fsync, just let the OS flush the data when it wants. Faster. +# always: fsync after every write to the append only log . Slow, Safest. +# everysec: fsync only one time every second. Compromise. +# +# The default is "everysec", as that's usually the right compromise between +# speed and data safety. It's up to you to understand if you can relax this to +# "no" that will let the operating system flush the output buffer when +# it wants, for better performances (but if you can live with the idea of +# some data loss consider the default persistence mode that's snapshotting), +# or on the contrary, use "always" that's very slow but a bit safer than +# everysec. +# +# More details please check the following article: +# http://antirez.com/post/redis-persistence-demystified.html +# +# If unsure, use "everysec". + +# appendfsync always +<%- if @conf_appendfsync -%> +appendfsync <%= @conf_appendfsync %> +<%- end -%> +# appendfsync no + +# When the AOF fsync policy is set to always or everysec, and a background +# saving process (a background save or AOF log background rewriting) is +# performing a lot of I/O against the disk, in some Linux configurations +# Redis may block too long on the fsync() call. Note that there is no fix for +# this currently, as even performing fsync in a different thread will block +# our synchronous write(2) call. +# +# In order to mitigate this problem it's possible to use the following option +# that will prevent fsync() from being called in the main process while a +# BGSAVE or BGREWRITEAOF is in progress. +# +# This means that while another child is saving, the durability of Redis is +# the same as "appendfsync none". In practical terms, this means that it is +# possible to lose up to 30 seconds of log in the worst scenario (with the +# default Linux settings). +# +# If you have latency problems turn this to "yes". Otherwise leave it as +# "no" that is the safest pick from the point of view of durability. + +<%- if @conf_no_appendfsync_on_rewrite -%> +no-appendfsync-on-rewrite <%= @conf_no_appendfsync_on_rewrite %> +<%- end -%> + +# Automatic rewrite of the append only file. +# Redis is able to automatically rewrite the log file implicitly calling +# BGREWRITEAOF when the AOF log size grows by the specified percentage. +# +# This is how it works: Redis remembers the size of the AOF file after the +# latest rewrite (if no rewrite has happened since the restart, the size of +# the AOF at startup is used). +# +# This base size is compared to the current size. If the current size is +# bigger than the specified percentage, the rewrite is triggered. Also +# you need to specify a minimal size for the AOF file to be rewritten, this +# is useful to avoid rewriting the AOF file even if the percentage increase +# is reached but it is still pretty small. +# +# Specify a percentage of zero in order to disable the automatic AOF +# rewrite feature. + +<%- if @conf_auto_aof_rewrite_percentage -%> +auto-aof-rewrite-percentage <%= @conf_auto_aof_rewrite_percentage %> +<%- end -%> +<%- if @conf_auto_aof_rewrite_min_size -%> +auto-aof-rewrite-min-size <%= @conf_auto_aof_rewrite_min_size %> +<%- end -%> + +################################ LUA SCRIPTING ############################### + +# Max execution time of a Lua script in milliseconds. +# +# If the maximum execution time is reached Redis will log that a script is +# still in execution after the maximum allowed time and will start to +# reply to queries with an error. +# +# When a long running script exceed the maximum execution time only the +# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be +# used to stop a script that did not yet called write commands. The second +# is the only way to shut down the server in the case a write commands was +# already issue by the script but the user don't want to wait for the natural +# termination of the script. +# +# Set it to 0 or a negative value for unlimited execution without warnings. +<%- if @conf_lua_time_limit -%> +lua-time-limit <%= @conf_lua_time_limit %> +<%- end -%> + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +<%- if @conf_slowlog_log_slower_than -%> +slowlog-log-slower-than <%= @conf_slowlog_log_slower_than %> +<%- end -%> + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +<%- if @conf_slowlog_max_len -%> +slowlog-max-len <%= @conf_slowlog_max_len %> +<%- end -%> + +############################# Event notification ############################## + +# Redis can notify Pub/Sub clients about events happening in the key space. +# This feature is documented at http://redis.io/topics/keyspace-events +# +# For instance if keyspace events notification is enabled, and a client +# performs a DEL operation on key "foo" stored in the Database 0, two +# messages will be published via Pub/Sub: +# +# PUBLISH __keyspace@0__:foo del +# PUBLISH __keyevent@0__:del foo +# +# It is possible to select the events that Redis will notify among a set +# of classes. Every class is identified by a single character: +# +# K Keyspace events, published with __keyspace@__ prefix. +# E Keyevent events, published with __keyevent@__ prefix. +# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... +# $ String commands +# l List commands +# s Set commands +# h Hash commands +# z Sorted set commands +# x Expired events (events generated every time a key expires) +# e Evicted events (events generated when a key is evicted for maxmemory) +# A Alias for g$lshzxe, so that the "AKE" string means all the events. +# +# The "notify-keyspace-events" takes as argument a string that is composed +# by zero or multiple characters. The empty string means that notifications +# are disabled at all. +# +# Example: to enable list and generic events, from the point of view of the +# event name, use: +# +# notify-keyspace-events Elg +# +# Example 2: to get the stream of the expired keys subscribing to channel +# name __keyevent@0__:expired use: +# +# notify-keyspace-events Ex +# +# By default all notifications are disabled because most users don't need +# this feature and the feature has some overhead. Note that if you don't +# specify at least one of K or E, no events will be delivered. +<%- if @conf_notify_keyspace_events -%> +notify-keyspace-events <%= @conf_notify_keyspace_events %> +<%- end -%> + +############################### ADVANCED CONFIG ############################### + +# Hashes are encoded using a memory efficient data structure when they have a +# small number of entries, and the biggest entry does not exceed a given +# threshold. These thresholds can be configured using the following directives. +<%- if @conf_hash_max_zipmap_entries -%> +hash-max-ziplist-entries <%= @conf_hash_max_zipmap_entries %> +<%- end -%> +<%- if @conf_hash_max_zipmap_value -%> +hash-max-ziplist-value <%= @conf_hash_max_zipmap_value %> +<%- end -%> + +# Similarly to hashes, small lists are also encoded in a special way in order +# to save a lot of space. The special representation is only used when +# you are under the following limits: +<%- if @conf_list_max_ziplist_entries -%> +list-max-ziplist-entries <%= @conf_list_max_ziplist_entries %> +<%- end -%> +<%- if @conf_list_max_ziplist_value -%> +list-max-ziplist-value <%= @conf_list_max_ziplist_value %> +<%- end -%> + +# Sets have a special encoding in just one case: when a set is composed +# of just strings that happens to be integers in radix 10 in the range +# of 64 bit signed integers. +# The following configuration setting sets the limit in the size of the +# set in order to use this special memory saving encoding. +<%- if @conf_set_max_intset_entries -%> +set-max-intset-entries <%= @conf_set_max_intset_entries %> +<%- end -%> + +# Similarly to hashes and lists, sorted sets are also specially encoded in +# order to save a lot of space. This encoding is only used when the length and +# elements of a sorted set are below the following limits: +<%- if @conf_zset_max_ziplist_entries -%> +zset-max-ziplist-entries <%= @conf_zset_max_ziplist_entries %> +<%- end -%> +<%- if @conf_zset_max_ziplist_value -%> +zset-max-ziplist-value <%= @conf_zset_max_ziplist_value %> +<%- end -%> + +# HyperLogLog sparse representation bytes limit. The limit includes the +# 16 bytes header. When an HyperLogLog using the sparse representation crosses +# this limit, it is convereted into the dense representation. +# +# A value greater than 16000 is totally useless, since at that point the +# dense representation is more memory efficient. +# +# The suggested value is ~ 3000 in order to have the benefits of +# the space efficient encoding without slowing down too much PFADD, +# which is O(N) with the sparse encoding. Thev value can be raised to +# ~ 10000 when CPU is not a concern, but space is, and the data set is +# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. +<%- if @conf_hll_sparse_max_bytes -%> +hll-sparse-max-bytes <%= @conf_hll_sparse_max_bytes %> +<%- end -%> + +# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in +# order to help rehashing the main Redis hash table (the one mapping top-level +# keys to values). The hash table implementation Redis uses (see dict.c) +# performs a lazy rehashing: the more operation you run into a hash table +# that is rehashing, the more rehashing "steps" are performed, so if the +# server is idle the rehashing is never complete and some more memory is used +# by the hash table. +# +# The default is to use this millisecond 10 times every second in order to +# active rehashing the main dictionaries, freeing memory when possible. +# +# If unsure: +# use "activerehashing no" if you have hard latency requirements and it is +# not a good thing in your environment that Redis can reply form time to time +# to queries with 2 milliseconds delay. +# +# use "activerehashing yes" if you don't have such hard requirements but +# want to free memory asap when possible. +<%- if @conf_activerehashing -%> +activerehashing <%= @conf_activerehashing %> +<%- end -%> + +# The client output buffer limits can be used to force disconnection of clients +# that are not reading data from the server fast enough for some reason (a +# common reason is that a Pub/Sub client can't consume messages as fast as the +# publisher can produce them). +# +# The limit can be set differently for the three different classes of clients: +# +# normal -> normal clients +# slave -> slave clients and MONITOR clients +# pubsub -> clients subscribed to at least one pubsub channel or pattern +# +# The syntax of every client-output-buffer-limit directive is the following: +# +# client-output-buffer-limit +# +# A client is immediately disconnected once the hard limit is reached, or if +# the soft limit is reached and remains reached for the specified number of +# seconds (continuously). +# So for instance if the hard limit is 32 megabytes and the soft limit is +# 16 megabytes / 10 seconds, the client will get disconnected immediately +# if the size of the output buffers reach 32 megabytes, but will also get +# disconnected if the client reaches 16 megabytes and continuously overcomes +# the limit for 10 seconds. +# +# By default normal clients are not limited because they don't receive data +# without asking (in a push way), but just after a request, so only +# asynchronous clients may create a scenario where data is requested faster +# than it can read. +# +# Instead there is a default limit for pubsub and slave clients, since +# subscribers and slaves receive data in a push fashion. +# +# Both the hard or the soft limit can be disabled by setting them to zero. +<%- if @conf_client_output_buffer_limit_normal -%> +client-output-buffer-limit normal <%= @conf_client_output_buffer_limit_normal %> +<%- end -%> +<%- if @conf_client_output_buffer_limit_slave -%> +client-output-buffer-limit slave <%= @conf_client_output_buffer_limit_slave %> +<%- end -%> +<%- if @conf_client_output_buffer_limit_pubsub -%> +client-output-buffer-limit pubsub <%= @conf_client_output_buffer_limit_pubsub %> +<%- end -%> + +# Redis calls an internal function to perform many background tasks, like +# closing connections of clients in timeout, purging expired keys that are +# never requested, and so forth. +# +# Not all tasks are performed with the same frequency, but Redis checks for +# tasks to perform accordingly to the specified "hz" value. +# +# By default "hz" is set to 10. Raising the value will use more CPU when +# Redis is idle, but at the same time will make Redis more responsive when +# there are many keys expiring at the same time, and timeouts may be +# handled with more precision. +# +# The range is between 1 and 500, however a value over 100 is usually not +# a good idea. Most users should use the default of 10 and raise this up to +# 100 only in environments where very low latency is required. +<%- if @conf_hz -%> +hz <%= @conf_hz %> +<%- end -%> + +# When a child rewrites the AOF file, if the following option is enabled +# the file will be fsync-ed every 32 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +<%- if @conf_aof_rewrite_incremental_fsync -%> +aof-rewrite-incremental-fsync <%= @conf_aof_rewrite_incremental_fsync %> +<%- end -%> +<%- end -%> + +<%- if @conf_append -%> +<%- @conf_append.sort.each do |variable, value| -%> +<%= variable %> <%= value %><%= "\n" -%> +<%- end -%> +<%- end -%> diff --git a/deployment_scripts/puppet/modules/redis/templates/sentinel-init.conf.erb b/deployment_scripts/puppet/modules/redis/templates/sentinel-init.conf.erb new file mode 100644 index 0000000..4f056b2 --- /dev/null +++ b/deployment_scripts/puppet/modules/redis/templates/sentinel-init.conf.erb @@ -0,0 +1,14 @@ +# redis-sentinel - Redis Datastore Server +# +# Redis is a key value in memory persistent datastore + +start on (local-filesystems and runlevel [2345]) +stop on runlevel [016] +respawn +expect fork +limit nofile 20000 65000 +pre-start script +mkdir -p /var/run/redis-sentinel +chown redis:redis /var/run/redis-sentinel +end script +exec start-stop-daemon --start --chuid redis:redis --pidfile <%= @conf_pidfile_real %> --umask 007 --exec /usr/bin/redis-sentinel -- <%= @conf_sentinel %> diff --git a/deployment_scripts/puppet/modules/redis/templates/sentinel.conf.erb b/deployment_scripts/puppet/modules/redis/templates/sentinel.conf.erb new file mode 100644 index 0000000..a7f3d73 --- /dev/null +++ b/deployment_scripts/puppet/modules/redis/templates/sentinel.conf.erb @@ -0,0 +1,178 @@ +# By default Redis does not run as a daemon. Use 'yes' if you need it. +# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. +<%- if @conf_daemonize -%> +daemonize <%= @conf_daemonize %> +<%- end -%> + +# When running daemonized, Redis writes a pid file in /var/run/redis.pid by +# default. You can specify a custom pid file location here. +pidfile <%= @conf_pidfile_real %> + +# port +# The port that this sentinel instance will run on +<%- if @conf_port -%> +port <%= @conf_port %> +<%- end -%> + +# Specify the log file name. Also 'stdout' can be used to force +# Redis to log on the standard output. Note that if you use standard +# output for logging but daemonize, logs will be sent to /dev/null +logfile <%= @conf_logfile_real %> + +<%- if @sentinel_confs -%> + <%- @sentinel_confs.keys.sort.each do |master| -%> +# Sentinel configuration for <%= master %> +sentinel monitor <%= master %> <%= @sentinel_confs[master]['monitor'] %> +<%- @sentinel_confs[master].keys.sort.each do |key| -%> +<%- if key != 'monitor' -%> +sentinel <%= key %> <%= master %> <%= @sentinel_confs[master][key] %> +<%- end -%> +<%- end -%> +<%- end -%> +<%- end -%> + +# sentinel monitor +# +# Tells Sentinel to monitor this master, and to consider it in O_DOWN +# (Objectively Down) state only if at least sentinels agree. +# +# Note that whatever is the ODOWN quorum, a Sentinel will require to +# be elected by the majority of the known Sentinels in order to +# start a failover, so no failover can be performed in minority. +# +# Slaves are auto-discovered, so you don't need to specify slaves in +# any way. Sentinel itself will rewrite this configuration file adding +# the slaves using additional configuration options. +# Also note that the configuration file is rewritten when a +# slave is promoted to master. +# +# Note: master name should not include special characters or spaces. +# The valid charset is A-z 0-9 and the three characters ".-_". +#sentinel monitor mymaster 127.0.0.1 6379 2 + +# sentinel auth-pass +# +# Set the password to use to authenticate with the master and slaves. +# Useful if there is a password set in the Redis instances to monitor. +# +# Note that the master password is also used for slaves, so it is not +# possible to set a different password in masters and slaves instances +# if you want to be able to monitor these instances with Sentinel. +# +# However you can have Redis instances without the authentication enabled +# mixed with Redis instances requiring the authentication (as long as the +# password set is the same for all the instances requiring the password) as +# the AUTH command will have no effect in Redis instances with authentication +# switched off. +# +# Example: +# +# sentinel auth-pass mymaster MySUPER--secret-0123passw0rd + +# sentinel down-after-milliseconds +# +# Number of milliseconds the master (or any attached slave or sentinel) should +# be unreachable (as in, not acceptable reply to PING, continuously, for the +# specified period) in order to consider it in S_DOWN state (Subjectively +# Down). +# +# Default is 30 seconds. +#sentinel down-after-milliseconds mymaster 30000 + +# sentinel parallel-syncs +# +# How many slaves we can reconfigure to point to the new slave simultaneously +# during the failover. Use a low number if you use the slaves to serve query +# to avoid that all the slaves will be unreachable at about the same +# time while performing the synchronization with the master. +#sentinel parallel-syncs mymaster 1 + +# sentinel failover-timeout +# +# Specifies the failover timeout in milliseconds. It is used in many ways: +# +# - The time needed to re-start a failover after a previous failover was +# already tried against the same master by a given Sentinel, is two +# times the failover timeout. +# +# - The time needed for a slave replicating to a wrong master according +# to a Sentinel current configuration, to be forced to replicate +# with the right master, is exactly the failover timeout (counting since +# the moment a Sentinel detected the misconfiguration). +# +# - The time needed to cancel a failover that is already in progress but +# did not produced any configuration change (SLAVEOF NO ONE yet not +# acknowledged by the promoted slave). +# +# - The maximum time a failover in progress waits for all the slaves to be +# reconfigured as slaves of the new master. However even after this time +# the slaves will be reconfigured by the Sentinels anyway, but not with +# the exact parallel-syncs progression as specified. +# +# Default is 3 minutes. +#sentinel failover-timeout mymaster 180000 + +# SCRIPTS EXECUTION +# +# sentinel notification-script and sentinel reconfig-script are used in order +# to configure scripts that are called to notify the system administrator +# or to reconfigure clients after a failover. The scripts are executed +# with the following rules for error handling: +# +# If script exits with "1" the execution is retried later (up to a maximum +# number of times currently set to 10). +# +# If script exits with "2" (or an higher value) the script execution is +# not retried. +# +# If script terminates because it receives a signal the behavior is the same +# as exit code 1. +# +# A script has a maximum running time of 60 seconds. After this limit is +# reached the script is terminated with a SIGKILL and the execution retried. + +# NOTIFICATION SCRIPT +# +# sentinel notification-script +# +# Call the specified notification script for any sentinel event that is +# generated in the WARNING level (for instance -sdown, -odown, and so forth). +# This script should notify the system administrator via email, SMS, or any +# other messaging system, that there is something wrong with the monitored +# Redis systems. +# +# The script is called with just two arguments: the first is the event type +# and the second the event description. +# +# The script must exist and be executable in order for sentinel to start if +# this option is provided. +# +# Example: +# +# sentinel notification-script mymaster /var/redis/notify.sh + +# CLIENTS RECONFIGURATION SCRIPT +# +# sentinel client-reconfig-script +# +# When the master changed because of a failover a script can be called in +# order to perform application-specific tasks to notify the clients that the +# configuration has changed and the master is at a different address. +# +# The following arguments are passed to the script: +# +# +# +# is currently always "failover" +# is either "leader" or "observer" +# +# The arguments from-ip, from-port, to-ip, to-port are used to communicate +# the old address of the master and the new address of the elected slave +# (now a master). +# +# This script should be resistant to multiple invocations. +# +# Example: +# +# sentinel client-reconfig-script mymaster /var/redis/reconfig.sh + diff --git a/deployment_scripts/puppet/modules/redis/tests/Puppetfile b/deployment_scripts/puppet/modules/redis/tests/Puppetfile new file mode 100644 index 0000000..22d91f0 --- /dev/null +++ b/deployment_scripts/puppet/modules/redis/tests/Puppetfile @@ -0,0 +1,2 @@ +mod "redis", + :git => "git://github.com/fsalum/puppet-redis.git" diff --git a/deployment_scripts/puppet/modules/redis/tests/init.pp b/deployment_scripts/puppet/modules/redis/tests/init.pp new file mode 100644 index 0000000..aca371c --- /dev/null +++ b/deployment_scripts/puppet/modules/redis/tests/init.pp @@ -0,0 +1,97 @@ +node default { + + case $::osfamily { + 'RedHat': { + package { 'epel-release': + ensure => present, + source => 'http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm', + provider => rpm, + before => Class['redis'], + } + } + 'Debian': { + # redis is on repository + } + default: { + fail("Unsupported osfamily: ${::osfamily} operatingsystem: ${::operatingsystem}, module ${module_name} only support osfamily RedHat and Debian") + } + } + + class { 'redis': + conf_activerehashing => 'yes', + conf_aof_rewrite_incremental_fsync => 'yes', # 2.6+ + conf_appendfilename => 'appendonly.aof', + conf_appendfsync => 'everysec', + conf_appendonly => 'no', + conf_auto_aof_rewrite_min_size => '64mb', + conf_auto_aof_rewrite_percentage => '100', + conf_bind => '0.0.0.0', + conf_client_output_buffer_limit_normal => '0 0 0', # 2.6+ + conf_client_output_buffer_limit_pubsub => '32mb 8mb 60', # 2.6+ + conf_client_output_buffer_limit_slave => '256mb 64mb 60', # 2.6+ + conf_daemonize => 'yes', + conf_databases => '16', + conf_dbfilename => 'dump.rdb', + conf_dir => '/var/lib/redis/', + conf_glueoutputbuf => undef, + conf_hash_max_zipmap_entries => '512', + conf_hash_max_zipmap_value => '64', + conf_hll_sparse_max_bytes => undef, # default 3000 in 2.8+ + conf_hz => '10', # 2.6+ + conf_include => undef, + conf_list_max_ziplist_entries => '512', + conf_list_max_ziplist_value => '64', + conf_logfile => '', + conf_loglevel => 'notice', + conf_lua_time_limit => '5000', # 2.6+ + conf_masterauth => undef, + conf_maxclients => '128', # default 10000 in 2.6+ + conf_maxmemory => undef, + conf_maxmemory_policy => undef, + conf_maxmemory_samples => undef, + conf_min_slaves_max_lag => '10', # default 10, 2.8+ + conf_min_slaves_to_write => undef, # 2.8+ + conf_no_appendfsync_on_rewrite => 'no', + conf_nosave => undef, + conf_notify_keyspace_events => undef, # 2.8+ + conf_pidfile => undef, + conf_port => '6379', + conf_rdbchecksum => 'yes', # 2.6+ + conf_rdbcompression => 'yes', + conf_repl_backlog_size => '1mb', # 2,8+ + conf_repl_backlog_ttl => '3600', # 2.8+ + conf_repl_disable_tcp_nodelay => 'no', # 2,6+ + conf_repl_ping_slave_period => '10', # 2.4+ + conf_repl_timeout => '60', # 2.4+ + conf_requirepass => undef, + conf_save => {"900" =>"1", "300" => "10", "60" => "10000"}, + conf_set_max_intset_entries => '512', + conf_slave_priority => undef, # 2.6+ + conf_slave_read_only => 'yes', # 2.6+ + conf_slave_serve_stale_data => 'yes', + conf_slaveof => undef, + conf_slowlog_log_slower_than => '10000', + conf_slowlog_max_len => '128', + conf_stop_writes_on_bgsave_error => 'yes', # 2.6+ + conf_syslog_enabled => undef, + conf_syslog_facility => undef, + conf_syslog_ident => undef, + conf_tcp_backlog => undef, # 2.8.5+ + conf_tcp_keepalive => '0', # 2.6+ + conf_timeout => '0', + conf_vm_enabled => 'no', # deprecated in 2.4+ + conf_vm_max_memory => '0', # deprecated in 2.4+ + conf_vm_max_threads => '4', # deprecated in 2.4+ + conf_vm_page_size => '32', # deprecated in 2.4+ + conf_vm_pages => '134217728', # deprecated in 2.4+ + conf_vm_swap_file => '/tmp/redis.swap', # deprecated in 2.4+ + conf_zset_max_ziplist_entries => '128', # 2.4+ + conf_zset_max_ziplist_value => '64', # 2.4+ + package_ensure => 'present', + service_enable => true, + service_ensure => 'running', + service_restart => true, + system_sysctl => true, + } + +} diff --git a/deployment_scripts/puppet/modules/redis/tests/sentinel.pp b/deployment_scripts/puppet/modules/redis/tests/sentinel.pp new file mode 100644 index 0000000..8f4ee53 --- /dev/null +++ b/deployment_scripts/puppet/modules/redis/tests/sentinel.pp @@ -0,0 +1,38 @@ +node default { + + case $::osfamily { + #'RedHat': { + # package { 'epel-release': + # ensure => present, + # source => 'http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm', + # provider => rpm, + # before => Class['redis'], + # } + #} + 'Debian': { + # redis is on repository + } + default: { + fail("Unsupported osfamily: ${::osfamily} operatingsystem: ${::operatingsystem}, module ${module_name} only support osfamily RedHat and Debian") + } + } + + class { 'redis::sentinel': + conf_port => '26379', + sentinel_confs => { + 'mymaster' => { + 'monitor' => '127.0.0.1 6379 2', + 'down-after-milliseconds' => '60000', + 'failover-timeout' => 180000, + 'parallel-syncs' => '3', + }, + 'resque' => { + 'monitor' => '127.0.0.1 6379 4', + 'down-after-milliseconds' => '10000', + 'failover-timeout' => 180000, + 'parallel-syncs' => '5', + } + } + } + +}