diff --git a/README.md b/README.md index 1ad9223..1e1fc5c 100644 --- a/README.md +++ b/README.md @@ -57,3 +57,4 @@ Features * [Apache 2](https://github.com/computerminds/parrot/wiki/Apache-2) * [PHP](https://github.com/computerminds/parrot/wiki/PHP) using PHP-FPM for extra cool points. * [XDebug](https://github.com/computerminds/parrot/wiki/PHP-XDebug) +* Redis / Memcache (optional) diff --git a/Vagrantfile b/Vagrantfile index 9ccc242..22d1831 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -15,6 +15,8 @@ def parse_config( 'mysql_version' => '5.5', 'box_name' => 'Parrot', 'varnish_enabled' => false, + 'memcache_enabled' => false, + 'redis_enabled' => false, 'local_user_uid' => Process.uid, 'local_user_gid' => Process.gid, } @@ -101,6 +103,17 @@ Vagrant.configure('2') do |config| # Dovecot - IMAP config.vm.network :forwarded_port, :guest => 143, :host => 1143 + if (custom_config['memcache_enabled']) + # Memcache + config.vm.network :forwarded_port, :guest => 11211, :host => 11211 + end + + if (custom_config['redis_enabled']) + # Redis + config.vm.network :forwarded_port, :guest => 6379, :host => 6379 + end + + # Share an additional folder to the guest VM. The first argument is # an identifier, the second is the path on the guest to mount the # folder, and the third is the path on the host to the actual folder. @@ -133,6 +146,8 @@ Vagrant.configure('2') do |config| "parrot_mysql_version" => custom_config['mysql_version'], "apache_vhost_webroot_subdir" => custom_config['webroot_subdir'], "parrot_varnish_enabled" => custom_config['varnish_enabled'], + "parrot_memcache_enabled" => custom_config['memcache_enabled'], + "parrot_redis_enabled" => custom_config['redis_enabled'], "vagrant_host_user_uid" => custom_config['local_user_uid'], "vagrant_host_user_gid" => custom_config['local_user_gid'], } diff --git a/manifests/parrot.pp b/manifests/parrot.pp index 5384885..3e1ab3f 100644 --- a/manifests/parrot.pp +++ b/manifests/parrot.pp @@ -16,6 +16,18 @@ } } class { mailcollect: } + case $parrot_redis_enabled { + 'true', true: { + include redis + } + } + case $parrot_memcache_enabled { + 'true', true: { + class { 'memcached': + max_memory => 1024 + } + } + } package { 'vim': } package { 'vim-puppet': } diff --git a/modules/gcc/CHANGELOG b/modules/gcc/CHANGELOG new file mode 100644 index 0000000..27f6761 --- /dev/null +++ b/modules/gcc/CHANGELOG @@ -0,0 +1,21 @@ +2014-06-03 - Release 0.2.0 + +Summary: + +Just a few small tweaks. + +Features: +- Add g++ for RHEL. +- Fail on unsupported distributions. + +Fixes: + +2013-08-14 - Release 0.1.0 + +Features: +- Support osfamily instead of using `$operatingsystem`. Note that +Amazon Linux is RedHat osfamily on facter version 1.7 + +2011-06-03 - Dan Bode - 0.0.3 +* committed source to git +* added tests diff --git a/modules/gcc/LICENSE b/modules/gcc/LICENSE new file mode 100644 index 0000000..297f85c --- /dev/null +++ b/modules/gcc/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2013 Puppet Labs + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/modules/gcc/Modulefile b/modules/gcc/Modulefile new file mode 100644 index 0000000..15eeb0e --- /dev/null +++ b/modules/gcc/Modulefile @@ -0,0 +1,10 @@ +name 'puppetlabs-gcc' +version '0.2.0' +source 'git://github.com/puppetlabs/puppetlabs-gcc.git' +author 'puppetlabs' +license 'Apache 2.0' +summary 'module for installing gcc build utils' +description 'module for installing gcc build utils' +project_page 'https://github.com/puppetlabs/puppetlabs-gcc/' + + diff --git a/modules/gcc/checksums.json b/modules/gcc/checksums.json new file mode 100644 index 0000000..c70ae18 --- /dev/null +++ b/modules/gcc/checksums.json @@ -0,0 +1,9 @@ +{ + "CHANGELOG": "901e55ff61a47eb4078ba02a09f2f352", + "LICENSE": "6089b6bd1f0d807edb8bdfd76da0b038", + "Modulefile": "7b285361190d4be4abacecb088e43201", + "manifests/init.pp": "dabde84aa12aac6ea519ddffdc25feeb", + "manifests/params.pp": "52ed64183f18aa2e7c2f8edfbbdf3b56", + "tests/init.pp": "d97b158981e1e2b710a0dec5ce8d40d0", + "tests/params.pp": "34196cd17ebe3b0665a909625dc384f7" +} \ No newline at end of file diff --git a/modules/gcc/manifests/init.pp b/modules/gcc/manifests/init.pp new file mode 100644 index 0000000..5427779 --- /dev/null +++ b/modules/gcc/manifests/init.pp @@ -0,0 +1,20 @@ +# Class: gcc +# +# This class installs gcc +# +# Parameters: +# +# Actions: +# - Install the gcc package +# +# Requires: +# +# Sample Usage: +# +class gcc( + $gcc_package = $gcc::params::gcc_package, +) inherits gcc::params { + package { $gcc_package: + ensure => installed + } +} diff --git a/modules/gcc/manifests/params.pp b/modules/gcc/manifests/params.pp new file mode 100644 index 0000000..85801ce --- /dev/null +++ b/modules/gcc/manifests/params.pp @@ -0,0 +1,25 @@ +# Class: gcc::params +# +# This class manages parameters for the gcc module +# +# Parameters: +# +# Actions: +# +# Requires: +# +# Sample Usage: +# +class gcc::params { + case $::osfamily { + 'RedHat': { + $gcc_package = [ 'gcc', 'gcc-c++' ] + } + 'Debian': { + $gcc_package = [ 'gcc', 'build-essential' ] + } + default: { + fail("Class['gcc::params']: Unsupported osfamily: ${::osfamily}") + } + } +} diff --git a/modules/gcc/metadata.json b/modules/gcc/metadata.json new file mode 100644 index 0000000..2409bb1 --- /dev/null +++ b/modules/gcc/metadata.json @@ -0,0 +1,14 @@ +{ + "name": "puppetlabs-gcc", + "version": "0.2.0", + "author": "puppetlabs", + "summary": "module for installing gcc build utils", + "license": "Apache 2.0", + "source": "git://github.com/puppetlabs/puppetlabs-gcc.git", + "project_page": "https://github.com/puppetlabs/puppetlabs-gcc/", + "issues_url": "https://github.com/puppetlabs/puppetlabs-gcc/issues", + "description": "module for installing gcc build utils", + "dependencies": [ + + ] +} diff --git a/modules/gcc/tests/init.pp b/modules/gcc/tests/init.pp new file mode 100644 index 0000000..0331860 --- /dev/null +++ b/modules/gcc/tests/init.pp @@ -0,0 +1 @@ +include gcc diff --git a/modules/gcc/tests/params.pp b/modules/gcc/tests/params.pp new file mode 100644 index 0000000..1472ddb --- /dev/null +++ b/modules/gcc/tests/params.pp @@ -0,0 +1 @@ +include gcc::params diff --git a/modules/memcached/.fixtures.yml b/modules/memcached/.fixtures.yml new file mode 100755 index 0000000..21aa9eb --- /dev/null +++ b/modules/memcached/.fixtures.yml @@ -0,0 +1,10 @@ +fixtures: + repositories: + 'firewall': + repo: 'git://github.com/puppetlabs/puppetlabs-firewall.git' + ref: '0.1.0' + 'stdlib': + repo: 'git://github.com/puppetlabs/puppetlabs-stdlib.git' + ref: '3.2.0' + symlinks: + memcached: "#{source_dir}" diff --git a/modules/memcached/.gitattributes b/modules/memcached/.gitattributes new file mode 100755 index 0000000..78640e1 --- /dev/null +++ b/modules/memcached/.gitattributes @@ -0,0 +1,2 @@ +# Enforce Linux line-endings for template files on every operating system. +*.erb text eol=lf diff --git a/modules/memcached/.gitignore b/modules/memcached/.gitignore new file mode 100755 index 0000000..6c68bc7 --- /dev/null +++ b/modules/memcached/.gitignore @@ -0,0 +1,3 @@ +pkg/ +*.swp +.forge-releng/ diff --git a/modules/memcached/.travis.yml b/modules/memcached/.travis.yml new file mode 100755 index 0000000..8e14c54 --- /dev/null +++ b/modules/memcached/.travis.yml @@ -0,0 +1,38 @@ +--- +branches: + only: + - master +language: ruby +bundler_args: --without development +script: 'bundle exec rake validate && bundle exec rake lint && SPEC_OPTS="--format documentation" bundle exec rake spec' +after_success: + - git clone -q git://github.com/puppetlabs/ghpublisher.git .forge-releng + - .forge-releng/publish +rvm: + - 1.8.7 + - 1.9.3 +env: + matrix: + - PUPPET_GEM_VERSION="~> 2.7.0" + - PUPPET_GEM_VERSION="~> 3.0.0" + - PUPPET_GEM_VERSION="~> 3.1.0" + - PUPPET_GEM_VERSION="~> 3.2.0" + - PUPPET_GEM_VERSION="~> 3.3.0" + - PUPPET_GEM_VERSION="~> 3.4.0" + global: + - PUBLISHER_LOGIN=saz + - secure: |- + KHycFEf0ALVjITczYG0pcfk912muQkbJiGzKa5yyC8C9ppDW+dTYgDQu8AO1KXFHzds + NUASY2XNjrJNv27w7A2eMp88qU1ID1s8CWALph4fuxGcM/HoPw9q8sldJ9/sHGlY9Ye + DEeIvgt9qkwKtG/kb7dN7la42nv5fffWE95OU= +matrix: + include: + - rvm: 2.0.0 + env: PUPPET_GEM_VERSION="~> 3.2.0" + - rvm: 2.0.0 + env: PUPPET_GEM_VERSION="~> 3.3.0" + - rvm: 1.8.7 + env: PUPPET_GEM_VERSION="~> 2.6.0" +notifications: + email: false +gemfile: Gemfile diff --git a/modules/memcached/Gemfile b/modules/memcached/Gemfile new file mode 100755 index 0000000..0a648da --- /dev/null +++ b/modules/memcached/Gemfile @@ -0,0 +1,7 @@ +source "https://rubygems.org" + +puppetversion = ENV.key?('PUPPET_VERSION') ? "= #{ENV['PUPPET_VERSION']}" : ['>= 3.3'] +gem 'puppet', puppetversion +gem 'puppetlabs_spec_helper', '>= 0.1.0' +gem 'puppet-lint', '>= 0.3.2' +gem 'facter', '>= 1.7.0', "< 1.8.0" diff --git a/modules/memcached/LICENSE b/modules/memcached/LICENSE new file mode 100755 index 0000000..7c66189 --- /dev/null +++ b/modules/memcached/LICENSE @@ -0,0 +1,14 @@ + Copyright 2011 Steffen Zieger + Copyright 2014 Garrett Honeycutt + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/modules/memcached/README-DEVELOPER b/modules/memcached/README-DEVELOPER new file mode 100755 index 0000000..e6c4dab --- /dev/null +++ b/modules/memcached/README-DEVELOPER @@ -0,0 +1,9 @@ +In order to run tests: + - puppet and facter must be installed and available in Ruby's LOADPATH + - the latest revision of rspec-puppet must be installed + - rake, and rspec2 must be install + + - the name of the module directory needs to be memcached + +to run all tests: + rake spec diff --git a/modules/memcached/README.md b/modules/memcached/README.md new file mode 100755 index 0000000..b552f76 --- /dev/null +++ b/modules/memcached/README.md @@ -0,0 +1,50 @@ +# puppet-memcached [![Build Status](https://secure.travis-ci.org/saz/puppet-memcached.png)](http://travis-ci.org/saz/puppet-memcached) + +Manage memcached via Puppet + +## Show some love +If you find this module useful, send some bitcoins to 1Na3YFUmdxKxJLiuRXQYJU2kiNqA3KY2j9 + +## How to use + +### Use roughly 90% of memory + +```ruby + class { 'memcached': } +``` + +### Set a fixed memory limit in MB + +```ruby + class { 'memcached': + max_memory => 2048 + } +``` + +### Use 12% of available memory + +```ruby + class { 'memcached': + max_memory => '12%' + } +``` + +### Other class parameters + +* $package_ensure = 'present' +* $logfile = '/var/log/memcached.log' +* $max_memory = false +* $item_size = false +* $lock_memory = false (WARNING: good if used intelligently, google for -k key) +* $listen_ip = '0.0.0.0' +* $tcp_port = 11211 +* $udp_port = 11211 +* $manage_firewall = false +* $user = '' (OS specific setting, see params.pp) +* $max_connections = 8192 +* $verbosity = undef +* $unix_socket = undef +* $install_dev = false (TRUE if 'libmemcached-dev' package should be installed) +* $processorcount = $::processorcount +* $service_restart = true (restart service after configuration changes, false to prevent restarts) +* $use_sasl = false (start memcached with SASL support) diff --git a/modules/memcached/Rakefile b/modules/memcached/Rakefile new file mode 100755 index 0000000..06f1ab4 --- /dev/null +++ b/modules/memcached/Rakefile @@ -0,0 +1,19 @@ +require 'rubygems' +require 'puppetlabs_spec_helper/rake_tasks' +require 'puppet-lint/tasks/puppet-lint' +PuppetLint.configuration.send('disable_80chars') +PuppetLint.configuration.relative = true +PuppetLint.configuration.ignore_paths = ["spec/**/*.pp", "pkg/**/*.pp"] + +desc "Run puppet in noop mode and check for syntax errors." +task :validate do + Dir['manifests/**/*.pp'].each do |manifest| + sh "puppet parser validate --noop #{manifest}" + end + Dir['spec/**/*.rb','lib/**/*.rb'].each do |ruby_file| + sh "ruby -c #{ruby_file}" unless ruby_file =~ /spec\/fixtures/ + end + Dir['templates/**/*.erb'].each do |template| + sh "erb -P -x -T '-' #{template} | ruby -c" + end +end diff --git a/modules/memcached/lib/puppet/parser/functions/memcached_max_memory.rb b/modules/memcached/lib/puppet/parser/functions/memcached_max_memory.rb new file mode 100755 index 0000000..fac25fd --- /dev/null +++ b/modules/memcached/lib/puppet/parser/functions/memcached_max_memory.rb @@ -0,0 +1,38 @@ +module Puppet::Parser::Functions + newfunction(:memcached_max_memory, :type => :rvalue, :doc => <<-EOS + Calculate max_memory size from fact 'memsize' and passed argument. + EOS + ) do |arguments| + + raise(Puppet::ParseError, "memcached_max_memory(): " + + "Wrong number of arguments given " + + "(#{arguments.size} for 1)") if arguments.size != 1 + + arg = arguments[0] + memsize = lookupvar('memorysize') + + if arg and !arg.to_s.end_with?('%') + result_in_mb = arg.to_i + else + max_memory_percent = arg ? arg : '95%' + + # Taken from puppetlabs-stdlib to_bytes() function + value,prefix = */([0-9.e+-]*)\s*([^bB]?)/.match(memsize)[1,2] + + value = value.to_f + case prefix + when '' then value = value + when 'k' then value *= (1<<10) + when 'M' then value *= (1<<20) + when 'G' then value *= (1<<30) + when 'T' then value *= (1<<40) + when 'E' then value *= (1<<50) + else raise Puppet::ParseError, "memcached_max_memory(): Unknown prefix #{prefix}" + end + value = value.to_i + result_in_mb = ( (value / (1 << 20) ) * (max_memory_percent.to_f / 100.0) ).floor + end + + return result_in_mb + end +end diff --git a/modules/memcached/manifests/init.pp b/modules/memcached/manifests/init.pp new file mode 100755 index 0000000..19f825f --- /dev/null +++ b/modules/memcached/manifests/init.pp @@ -0,0 +1,89 @@ +# == Class: memcached +# +# Manage memcached +# +class memcached ( + $package_ensure = 'present', + $logfile = '/var/log/memcached.log', + $manage_firewall = false, + $max_memory = false, + $item_size = false, + $lock_memory = false, + $listen_ip = '0.0.0.0', + $tcp_port = 11211, + $udp_port = 11211, + $user = $::memcached::params::user, + $max_connections = '8192', + $verbosity = undef, + $unix_socket = undef, + $install_dev = false, + $processorcount = $::processorcount, + $service_restart = true, + $auto_removal = false, + $use_sasl = false +) inherits memcached::params { + + # validate type and convert string to boolean if necessary + if is_string($manage_firewall) { + $manage_firewall_bool = str2bool($manage_firewall) + } else { + $manage_firewall_bool = $manage_firewall + } + validate_bool($manage_firewall_bool) + validate_bool($service_restart) + + if $package_ensure == 'absent' { + $service_ensure = 'stopped' + $service_enable = false + } else { + $service_ensure = 'running' + $service_enable = true + } + + package { $memcached::params::package_name: + ensure => $package_ensure, + } + + if $install_dev { + package { $memcached::params::dev_package_name: + ensure => $package_ensure, + require => Package[$memcached::params::package_name] + } + } + + if $manage_firewall_bool == true { + firewall { "100_tcp_${tcp_port}_for_memcached": + port => $tcp_port, + proto => 'tcp', + action => 'accept', + } + + firewall { "100_udp_${udp_port}_for_memcached": + port => $udp_port, + proto => 'udp', + action => 'accept', + } + } + + if $service_restart { + $service_notify_real = Service[$memcached::params::service_name] + } else { + $service_notify_real = undef + } + + file { $memcached::params::config_file: + owner => 'root', + group => 'root', + mode => '0644', + content => template($memcached::params::config_tmpl), + require => Package[$memcached::params::package_name], + notify => $service_notify_real, + } + + service { $memcached::params::service_name: + ensure => $service_ensure, + enable => $service_enable, + hasrestart => true, + hasstatus => $memcached::params::service_hasstatus, + } +} diff --git a/modules/memcached/manifests/params.pp b/modules/memcached/manifests/params.pp new file mode 100755 index 0000000..20cb1ca --- /dev/null +++ b/modules/memcached/manifests/params.pp @@ -0,0 +1,40 @@ +# == Class: memcached::params +# +class memcached::params { + case $::osfamily { + 'Debian': { + $package_name = 'memcached' + $service_name = 'memcached' + $service_hasstatus = false + $dev_package_name = 'libmemcached-dev' + $config_file = '/etc/memcached.conf' + $config_tmpl = "${module_name}/memcached.conf.erb" + $user = 'nobody' + } + /RedHat|Suse/: { + $package_name = 'memcached' + $service_name = 'memcached' + $service_hasstatus = true + $dev_package_name = 'libmemcached-devel' + $config_file = '/etc/sysconfig/memcached' + $config_tmpl = "${module_name}/memcached_sysconfig.erb" + $user = 'memcached' + } + default: { + case $::operatingsystem { + 'Amazon': { + $package_name = 'memcached' + $service_name = 'memcached' + $service_hasstatus = true + $dev_package_name = 'libmemcached-devel' + $config_file = '/etc/sysconfig/memcached' + $config_tmpl = "${module_name}/memcached_sysconfig.erb" + $user = 'memcached' + } + default: { + fail("Unsupported platform: ${::osfamily}/${::operatingsystem}") + } + } + } + } +} diff --git a/modules/memcached/metadata.json b/modules/memcached/metadata.json new file mode 100755 index 0000000..5ee9067 --- /dev/null +++ b/modules/memcached/metadata.json @@ -0,0 +1,45 @@ +{ + "operatingsystem_support": [ + { + "operatingsystem": "RedHat" + }, + { + "operatingsystem": "CentOS" + }, + { + "operatingsystem": "OracleLinux" + }, + { + "operatingsystem": "Scientific" + }, + { + "operatingsystem": "Debian" + }, + { + "operatingsystem": "Ubuntu" + } + ], + "requirements": [ + { + "name": "pe", + "version_requirement": ">= 3.2.0 < 3.4.0" + }, + { + "name": "puppet", + "version_requirement": "3.x" + } + ], + "name": "saz-memcached", + "version": "2.6.0", + "author": "saz", + "summary": "UNKNOWN", + "license": "Apache License, Version 2.0", + "source": "git://github.com/saz/puppet-memcached.git", + "project_page": "https://github.com/saz/puppet-memcached", + "issues_url": "https://github.com/saz/puppet-memcached/issues", + "description": "Manage memcached via Puppet", + "dependencies": [ + {"name":"puppetlabs/stdlib","version_requirement":">= 3.2.0"}, + {"name":"puppetlabs/firewall","version_requirement":">= 0.1.0"} + ] +} diff --git a/modules/memcached/spec/classes/memcached_spec.rb b/modules/memcached/spec/classes/memcached_spec.rb new file mode 100755 index 0000000..b40599f --- /dev/null +++ b/modules/memcached/spec/classes/memcached_spec.rb @@ -0,0 +1,207 @@ +require 'spec_helper' +describe 'memcached' do + + describe 'with manage_firewall parameter' do + ['Debian','RedHat'].each do |osfam| + context "on osfamily #{osfam}" do + let(:facts) do + { :osfamily => osfam, + :memorysize => '1000 MB', + :processorcount => '1', + } + end + + ['true',true].each do |value| + context "set to #{value}" do + let(:params) { { :manage_firewall => value } } + + it { should contain_class('memcached') } + + it { should contain_firewall('100_tcp_11211_for_memcached') } + it { should contain_firewall('100_udp_11211_for_memcached') } + end + end + + ['false',false].each do |value| + context "set to #{value}" do + let(:params) { { :manage_firewall => value } } + + it { should contain_class('memcached') } + + it { should_not contain_firewall('100_tcp_11211_for_memcached') } + it { should_not contain_firewall('100_udp_11211_for_memcached') } + end + end + + context 'set to an invalid type (array)' do + let(:params) { { :manage_firewall => ['invalid','type'] } } + + it do + expect { + should contain_class('memcached') + }.to raise_error(Puppet::Error) + end + end + end + end + end + + let :default_params do + { + :package_ensure => 'present', + :logfile => '/var/log/memcached.log', + :max_memory => false, + :item_size => false, + :lock_memory => false, + :listen_ip => '0.0.0.0', + :tcp_port => '11211', + :udp_port => '11211', + :user => 'nobody', + :max_connections => '8192', + :install_dev => false, + :processorcount => 1, + :use_sasl => false + } + end + + [ {}, + { + :package_ensure => 'latest', + :logfile => '/var/log/memcached.log', + :max_memory => '2', + :item_size => false, + :lock_memory => true, + :listen_ip => '127.0.0.1', + :tcp_port => '11212', + :udp_port => '11213', + :user => 'somebdy', + :max_connections => '8193', + :verbosity => 'vvv', + :processorcount => 3, + :use_sasl => true + }, + { + :package_ensure => 'present', + :logfile => '/var/log/memcached.log', + :max_memory => '20%', + :lock_memory => false, + :listen_ip => '127.0.0.1', + :tcp_port => '11212', + :udp_port => '11213', + :user => 'somebdy', + :max_connections => '8193', + :verbosity => 'vvv', + :install_dev => true, + :processorcount => 1 + }, + { + :package_ensure => 'absent', + :install_dev => true + } + ].each do |param_set| + describe "when #{param_set == {} ? "using default" : "specifying"} class parameters" do + + let :param_hash do + default_params.merge(param_set) + end + + let :params do + param_set + end + + ['Debian'].each do |osfamily| + + let :facts do + { + :osfamily => osfamily, + :memorysize => '1000 MB', + :processorcount => '1', + } + end + + describe "on supported osfamily: #{osfamily}" do + + it { should contain_class("memcached::params") } + + it { should contain_package("memcached").with_ensure(param_hash[:package_ensure]) } + + it { should_not contain_firewall('100_tcp_11211_for_memcached') } + it { should_not contain_firewall('100_udp_11211_for_memcached') } + + it { + if param_hash[:install_dev] + should contain_package("libmemcached-dev").with_ensure(param_hash[:package_ensure]) + end + } + + it { should contain_file("/etc/memcached.conf").with( + 'owner' => 'root', + 'group' => 'root' + )} + + it { + if param_hash[:package_ensure] == 'absent' + should contain_service("memcached").with( + 'ensure' => 'stopped', + 'enable' => false + ) + else + should contain_service("memcached").with( + 'ensure' => 'running', + 'enable' => true, + 'hasrestart' => true, + 'hasstatus' => false + ) + end + } + + it 'should compile the template based on the class parameters' do + content = param_value( + subject, + 'file', + '/etc/memcached.conf', + 'content' + ) + expected_lines = [ + "logfile #{param_hash[:logfile]}", + "-l #{param_hash[:listen_ip]}", + "-p #{param_hash[:tcp_port]}", + "-U #{param_hash[:udp_port]}", + "-u #{param_hash[:user]}", + "-c #{param_hash[:max_connections]}", + "-t #{param_hash[:processorcount]}" + ] + if(param_hash[:max_memory]) + if(param_hash[:max_memory].end_with?('%')) + expected_lines.push("-m 200") + else + expected_lines.push("-m #{param_hash[:max_memory]}") + end + else + expected_lines.push("-m 950") + end + if(param_hash[:lock_memory]) + expected_lines.push("-k") + end + if(param_hash[:verbosity]) + expected_lines.push("-vvv") + end + if(param_hash[:use_sasl]) + expected_lines.push("-S") + end + (content.split("\n") & expected_lines).should =~ expected_lines + end + end + end + ['Redhat'].each do |osfamily| + describe 'on supported platform' do + it 'should fail' do + + end + end + end + end + end +end + +# vim: expandtab shiftwidth=2 softtabstop=2 diff --git a/modules/memcached/spec/spec.opts b/modules/memcached/spec/spec.opts new file mode 100755 index 0000000..91cd642 --- /dev/null +++ b/modules/memcached/spec/spec.opts @@ -0,0 +1,6 @@ +--format +s +--colour +--loadby +mtime +--backtrace diff --git a/modules/memcached/spec/spec_helper.rb b/modules/memcached/spec/spec_helper.rb new file mode 100755 index 0000000..dc7e9f4 --- /dev/null +++ b/modules/memcached/spec/spec_helper.rb @@ -0,0 +1,2 @@ +require 'rubygems' +require 'puppetlabs_spec_helper/module_spec_helper' diff --git a/modules/memcached/templates/memcached.conf.erb b/modules/memcached/templates/memcached.conf.erb new file mode 100755 index 0000000..ede73d0 --- /dev/null +++ b/modules/memcached/templates/memcached.conf.erb @@ -0,0 +1,62 @@ +# File managed by puppet + +# Run memcached as a daemon. +-d + +# pidfile +-P /var/run/memcached.pid + +# Log memcached's output +logfile <%= @logfile -%> + +<% if @verbosity -%> +# Verbosity +-<%= @verbosity %> +<% end -%> + +# Use MB memory max to use for object storage. +<% Puppet::Parser::Functions.function('memcached_max_memory') -%> +-m <%= scope.function_memcached_max_memory([@max_memory]) %> + +<% if @lock_memory -%> +# Lock down all paged memory. There is a limit on how much memory you may lock. +-k +<% end -%> + +<% if @use_sasl -%> +# Start with SASL support +-S +<% end -%> + +<% if @unix_socket -%> +# UNIX socket path to listen on +-s <%= @unix_socket %> +<% else -%> +# IP to listen on +-l <%= @listen_ip %> + +# TCP port to listen on +-p <%= @tcp_port %> + +# UDP port to listen on +-U <%= @udp_port %> +<% end -%> + +# Run daemon as user +-u <%= @user %> + +# Limit the number of simultaneous incoming connections. +-c <%= @max_connections %> + +# Number of threads to use to process incoming requests. +-t <%= @processorcount %> + +<% if @item_size -%> +# Override the default size of each slab page +-I <%= @item_size %> +<% end -%> + +<% if @auto_removal -%> +# Disable automatic removal of items from the cache when out of memory +-M +<% end -%> \ No newline at end of file diff --git a/modules/memcached/templates/memcached_sysconfig.erb b/modules/memcached/templates/memcached_sysconfig.erb new file mode 100755 index 0000000..f9f38f9 --- /dev/null +++ b/modules/memcached/templates/memcached_sysconfig.erb @@ -0,0 +1,52 @@ +<%- +result = [] +if @verbosity + result << '-' + @verbosity.to_s +end +if @lock_memory + result << '-k' +end +if @listen_ip + result << '-l ' + @listen_ip +end +if @udp_port + result << '-U ' + @udp_port.to_s +end +if @item_size + result << '-I ' + @item_size.to_s +end +result << '-t ' + @processorcount +if @logfile + result << '>> ' + @logfile + ' 2>&1' +end +-%> +<%- if scope['osfamily'] != 'Suse' -%> +PORT="<%= @tcp_port %>" +USER="<%= @user %>" +MAXCONN="<%= @max_connections %>" +<% Puppet::Parser::Functions.function('memcached_max_memory') -%> +CACHESIZE="<%= scope.function_memcached_max_memory([@max_memory]) %>" +OPTIONS="<%= result.join(' ') %>" +<%- else -%> +MEMCACHED_PARAMS="<%= result.join(' ') %>" + +## Path: Network/WWW/Memcached +## Description: username memcached should run as +## Type: string +## Default: "memcached" +## Config: memcached +# +# username memcached should run as +# +MEMCACHED_USER="<%= @user %>" + +## Path: Network/WWW/Memcached +## Description: group memcached should be run as +## Type: string +## Default: "memcached" +## Config: memcached +# +# group memcached should be run as +# +MEMCACHED_GROUP="<%= @user %>" +<%- end -%> diff --git a/modules/memcached/tests/init.pp b/modules/memcached/tests/init.pp new file mode 100755 index 0000000..22eecc3 --- /dev/null +++ b/modules/memcached/tests/init.pp @@ -0,0 +1 @@ +include memcached diff --git a/modules/parrot_php/manifests/init.pp b/modules/parrot_php/manifests/init.pp index b836f87..56bbd21 100644 --- a/modules/parrot_php/manifests/init.pp +++ b/modules/parrot_php/manifests/init.pp @@ -25,6 +25,15 @@ require => Class["parrot_repos"], } + case $parrot_memcache_enabled { + 'true', true: { + package { 'php5-memcached': + ensure => 'latest', + require => [Class["parrot_repos"], Package['php5']], + } + } + } + # We don't use xhprof from the ubuntu package any more. package { 'php5-xhprof': ensure => 'purged', diff --git a/modules/redis/CHANGES.md b/modules/redis/CHANGES.md new file mode 100644 index 0000000..f3982c9 --- /dev/null +++ b/modules/redis/CHANGES.md @@ -0,0 +1,54 @@ +redis module for puppet +======================= + +0.10.0 +------ +Default version to install is 2.8.12. +Expose new parameters to redis class. - @brucem +Add parameters to configure snapshotting. - @kjoconnor +Fix deprecation warning in template. - @aadamovich +Add support for redis 2.8. - @rickard-von-essen +Add versions to module dependencies. - @yjpa7145 +Updated download URL. - @coffeejunk +Adds user and group configuration parameters. - @yjpa7145 +Fix rubygems deprecation warning. - @yjpa7145 +Disable some puppet lint warnings. +Add chkconfig to init file. - @antonbabenko +Add redis::instance define to allow multiple instances. - @evanstachowiak + +0.0.9 +----- +Use maestrodev/wget and puppetlabs/gcc to replace some common package dependencies. - @garethr + +0.0.8 +----- +Fix init script when redis_bind_address is not defined (the default). + +0.0.7 +----- +Add support for parameterized listening port and bind address. + +0.0.6 +----- +Add support for installing any available version. + +0.0.5 +----- +Add option to install 2.6. +Add spec tests. + +0.0.4 +----- +It's possible to configure a password to redis setup. + +0.0.3 +----- +Fix init script. + +0.0.2 +----- +Change the name to redis so that module name and class name are in sync. + +0.0.1 +----- +First release! diff --git a/modules/redis/LICENSE b/modules/redis/LICENSE new file mode 100644 index 0000000..0eab6f7 --- /dev/null +++ b/modules/redis/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2014, Thomas Van Doren +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/modules/redis/README.md b/modules/redis/README.md new file mode 100644 index 0000000..545a02c --- /dev/null +++ b/modules/redis/README.md @@ -0,0 +1,58 @@ +redis puppet module +=================== + +[![Build Status](https://secure.travis-ci.org/thomasvandoren/puppet-redis.png)](http://travis-ci.org/thomasvandoren/puppet-redis) + +Install and configure redis. + +Usage +----- +Installs redis server and client with reasonable defaults. + +```puppet +include redis +``` + +Installs redis server and client with version 2.6.5. + +```puppet +class { 'redis': + version => '2.6.5', +} +``` + +Installs version 2.4.17, listens on default port 6379 with default settings. +Sets up 2nd instance on port 6900, binds to address 10.1.2.3 (instead of all +available interfaces), sets max memory to 1 gigabyte, and sets a password from +hiera. + +```puppet +class { 'redis': + version => '2.4.17', +} +redis::instance { 'redis-6900': + redis_port => '6900', + redis_bind_address => '10.1.2.3', + redis_password => hiera('redis_password'), + redis_max_memory => '1gb', +} +``` + +Development +----------- + +To run the linter and spec tests locally: + +```bash +bundle install --gemfile .gemfile +rake lint +rake spec +``` + +Authors +------- +Thomas Van Doren + +License +------- +BSD diff --git a/modules/redis/Rakefile b/modules/redis/Rakefile new file mode 100644 index 0000000..b715cbf --- /dev/null +++ b/modules/redis/Rakefile @@ -0,0 +1,8 @@ +require 'puppetlabs_spec_helper/rake_tasks' + +# This ensures that PuppetLint is available for configuration. +require 'puppet-lint/tasks/puppet-lint' + +PuppetLint.configuration.fail_on_warnings = true +PuppetLint.configuration.send('disable_80chars') +PuppetLint.configuration.send('disable_class_inherits_from_params_class') diff --git a/modules/redis/checksums.json b/modules/redis/checksums.json new file mode 100644 index 0000000..139aea4 --- /dev/null +++ b/modules/redis/checksums.json @@ -0,0 +1,17 @@ +{ + "CHANGES.md": "93923f51342ff0e345b0a9ce22d4aacc", + "LICENSE": "adb87d44dabfe739f1bb86bc2119c8eb", + "README.md": "791e77840ce63120ef30fe87e321f68a", + "Rakefile": "400ac17906e1c17ac34d0d2af719eabc", + "files/redis.conf": "403d36824791900fce32f2fd3635cd23", + "manifests/init.pp": "647b9d4baad0052e5749643a2eede327", + "manifests/instance.pp": "4262c72f94794d8e44357ad0fc5c3f4c", + "manifests/params.pp": "2d84a3571fb3dd2fda84dca38174d968", + "metadata.json": "267cd52b02c3690294638cbdbc982eac", + "spec/classes/redis_spec.rb": "6cfae0e8dc10001c2abb1d889db1f7db", + "spec/defines/instance_spec.rb": "5fd77d6cab51c6108992e2530f14f1b3", + "spec/spec_helper.rb": "3ea886dd135e120afa31e0aab12e85b0", + "templates/redis.init.erb": "a5ea0b1317b236d92d7689523a11bc9a", + "templates/redis_port.conf.erb": "57e1257a9fc86449ac1b54e2690e1aad", + "tests/init.pp": "7df9826add5de77d910d515c7f4f0993" +} \ No newline at end of file diff --git a/modules/redis/files/redis.conf b/modules/redis/files/redis.conf new file mode 100644 index 0000000..1c9233f --- /dev/null +++ b/modules/redis/files/redis.conf @@ -0,0 +1,492 @@ +# Redis configuration file example + +# Note on units: when memory size is needed, it is possible to specifiy +# it in the usual form of 1k 5GB 4M and so forth: +# +# 1k => 1000 bytes +# 1kb => 1024 bytes +# 1m => 1000000 bytes +# 1mb => 1024*1024 bytes +# 1g => 1000000000 bytes +# 1gb => 1024*1024*1024 bytes +# +# units are case insensitive so 1GB 1Gb 1gB are all the same. + +# By default Redis does not run as a daemon. Use 'yes' if you need it. +# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. +daemonize no + +# When running daemonized, Redis writes a pid file in /var/run/redis.pid by +# default. You can specify a custom pid file location here. +pidfile /var/run/redis.pid + +# Accept connections on the specified port, default is 6379. +# If port 0 is specified Redis will not listen on a TCP socket. +port 6379 + +# If you want you can bind a single interface, if the bind option is not +# specified all the interfaces will listen for incoming connections. +# +# bind 127.0.0.1 + +# Specify the path for the unix socket that will be used to listen for +# incoming connections. There is no default, so Redis will not listen +# on a unix socket when not specified. +# +# unixsocket /tmp/redis.sock +# unixsocketperm 755 + +# Close the connection after a client is idle for N seconds (0 to disable) +timeout 0 + +# Set server verbosity to 'debug' +# it can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +loglevel verbose + +# Specify the log file name. Also 'stdout' can be used to force +# Redis to log on the standard output. Note that if you use standard +# output for logging but daemonize, logs will be sent to /dev/null +logfile stdout + +# To enable logging to the system logger, just set 'syslog-enabled' to yes, +# and optionally update the other syslog parameters to suit your needs. +# syslog-enabled no + +# Specify the syslog identity. +# syslog-ident redis + +# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. +# syslog-facility local0 + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +databases 16 + +################################ SNAPSHOTTING ################################# +# +# Save the DB on disk: +# +# save +# +# Will save the DB if both the given number of seconds and the given +# number of write operations against the DB occurred. +# +# In the example below the behaviour will be to save: +# after 900 sec (15 min) if at least 1 key changed +# after 300 sec (5 min) if at least 10 keys changed +# after 60 sec if at least 10000 keys changed +# +# Note: you can disable saving at all commenting all the "save" lines. + +save 900 1 +save 300 10 +save 60 10000 + +# Compress string objects using LZF when dump .rdb databases? +# For default that's set to 'yes' as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +rdbcompression yes + +# The filename where to dump the DB +dbfilename dump.rdb + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# Also the Append Only File will be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +dir ./ + +################################# REPLICATION ################################# + +# Master-Slave replication. Use slaveof to make a Redis instance a copy of +# another Redis server. Note that the configuration is local to the slave +# so for example it is possible to configure the slave to save the DB with a +# different interval, or to listen to another port, and so on. +# +# slaveof + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the slave to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the slave request. +# +# masterauth + +# When a slave lost the connection with the master, or when the replication +# is still in progress, the slave can act in two different ways: +# +# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will +# still reply to client requests, possibly with out of data data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) if slave-serve-stale data is set to 'no' the slave will reply with +# an error "SYNC with master in progress" to all the kind of commands +# but to INFO and SLAVEOF. +# +slave-serve-stale-data yes + +# Slaves send PINGs to server in a predefined interval. It's possible to change +# this interval with the repl_ping_slave_period option. The default value is 10 +# seconds. +# +# repl-ping-slave-period 10 + +# The following option sets a timeout for both Bulk transfer I/O timeout and +# master data or ping response timeout. The default value is 60 seconds. +# +# It is important to make sure that this value is greater than the value +# specified for repl-ping-slave-period otherwise a timeout will be detected +# every time there is low traffic between the master and the slave. +# +# repl-timeout 60 + +################################## SECURITY ################################### + +# Require clients to issue AUTH before processing any other +# commands. This might be useful in environments in which you do not trust +# others with access to the host running redis. +# +# This should stay commented out for backward compatibility and because most +# people do not need auth (e.g. they run their own servers). +# +# Warning: since Redis is pretty fast an outside user can try up to +# 150k passwords per second against a good box. This means that you should +# use a very strong password otherwise it will be very easy to break. +# +# requirepass foobared + +# Command renaming. +# +# It is possilbe to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# of hard to guess so that it will be still available for internal-use +# tools but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possilbe to completely kill a command renaming it into +# an empty string: +# +# rename-command CONFIG "" + +################################### LIMITS #################################### + +# Set the max number of connected clients at the same time. By default there +# is no limit, and it's up to the number of file descriptors the Redis process +# is able to open. The special value '0' means no limits. +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# maxclients 128 + +# Don't use more memory than the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# accordingly to the eviction policy selected (see maxmemmory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +# This option is usually useful when using Redis as an LRU cache, or to set +# an hard memory limit for an instance (using the 'noeviction' policy). +# +# WARNING: If you have slaves attached to an instance with maxmemory on, +# the size of the output buffers needed to feed the slaves are subtracted +# from the used memory count, so that network problems / resyncs will +# not trigger a loop where keys are evicted, and in turn the output +# buffer of slaves is full with DELs of keys evicted triggering the deletion +# of more keys, and so forth until the database is completely emptied. +# +# In short... if you have slaves attached it is suggested that you set a lower +# limit for maxmemory so that there is some free RAM on the system for slave +# output buffers (but this is not needed if the policy is 'noeviction'). +# +# maxmemory + +# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory +# is reached? You can select among five behavior: +# +# volatile-lru -> remove the key with an expire set using an LRU algorithm +# allkeys-lru -> remove any key accordingly to the LRU algorithm +# volatile-random -> remove a random key with an expire set +# allkeys->random -> remove a random key, any key +# volatile-ttl -> remove the key with the nearest expire time (minor TTL) +# noeviction -> don't expire at all, just return an error on write operations +# +# Note: with all the kind of policies, Redis will return an error on write +# operations, when there are not suitable keys for eviction. +# +# At the date of writing this commands are: set setnx setex append +# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd +# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby +# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby +# getset mset msetnx exec sort +# +# The default is: +# +# maxmemory-policy volatile-lru + +# LRU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can select as well the sample +# size to check. For instance for default Redis will check three keys and +# pick the one that was used less recently, you can change the sample size +# using the following configuration directive. +# +# maxmemory-samples 3 + +############################## APPEND ONLY MODE ############################### + +# By default Redis asynchronously dumps the dataset on disk. If you can live +# with the idea that the latest records will be lost if something like a crash +# happens this is the preferred way to run Redis. If instead you care a lot +# about your data and don't want to that a single record can get lost you should +# enable the append only mode: when this mode is enabled Redis will append +# every write operation received in the file appendonly.aof. This file will +# be read on startup in order to rebuild the full dataset in memory. +# +# Note that you can have both the async dumps and the append only file if you +# like (you have to comment the "save" statements above to disable the dumps). +# Still if append only mode is enabled Redis will load the data from the +# log file at startup ignoring the dump.rdb file. +# +# IMPORTANT: Check the BGREWRITEAOF to check how to rewrite the append +# log file in background when it gets too big. + +appendonly no + +# The name of the append only file (default: "appendonly.aof") +# appendfilename appendonly.aof + +# The fsync() call tells the Operating System to actually write data on disk +# instead to wait for more data in the output buffer. Some OS will really flush +# data on disk, some other OS will just try to do it ASAP. +# +# Redis supports three different modes: +# +# no: don't fsync, just let the OS flush the data when it wants. Faster. +# always: fsync after every write to the append only log . Slow, Safest. +# everysec: fsync only if one second passed since the last fsync. Compromise. +# +# The default is "everysec" that's usually the right compromise between +# speed and data safety. It's up to you to understand if you can relax this to +# "no" that will will let the operating system flush the output buffer when +# it wants, for better performances (but if you can live with the idea of +# some data loss consider the default persistence mode that's snapshotting), +# or on the contrary, use "always" that's very slow but a bit safer than +# everysec. +# +# If unsure, use "everysec". + +# appendfsync always +appendfsync everysec +# appendfsync no + +# When the AOF fsync policy is set to always or everysec, and a background +# saving process (a background save or AOF log background rewriting) is +# performing a lot of I/O against the disk, in some Linux configurations +# Redis may block too long on the fsync() call. Note that there is no fix for +# this currently, as even performing fsync in a different thread will block +# our synchronous write(2) call. +# +# In order to mitigate this problem it's possible to use the following option +# that will prevent fsync() from being called in the main process while a +# BGSAVE or BGREWRITEAOF is in progress. +# +# This means that while another child is saving the durability of Redis is +# the same as "appendfsync none", that in pratical terms means that it is +# possible to lost up to 30 seconds of log in the worst scenario (with the +# default Linux settings). +# +# If you have latency problems turn this to "yes". Otherwise leave it as +# "no" that is the safest pick from the point of view of durability. +no-appendfsync-on-rewrite no + +# Automatic rewrite of the append only file. +# Redis is able to automatically rewrite the log file implicitly calling +# BGREWRITEAOF when the AOF log size will growth by the specified percentage. +# +# This is how it works: Redis remembers the size of the AOF file after the +# latest rewrite (or if no rewrite happened since the restart, the size of +# the AOF at startup is used). +# +# This base size is compared to the current size. If the current size is +# bigger than the specified percentage, the rewrite is triggered. Also +# you need to specify a minimal size for the AOF file to be rewritten, this +# is useful to avoid rewriting the AOF file even if the percentage increase +# is reached but it is still pretty small. +# +# Specify a precentage of zero in order to disable the automatic AOF +# rewrite feature. + +auto-aof-rewrite-percentage 100 +auto-aof-rewrite-min-size 64mb + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than 10000 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len 1024 + +################################ VIRTUAL MEMORY ############################### + +### WARNING! Virtual Memory is deprecated in Redis 2.4 +### The use of Virtual Memory is strongly discouraged. + +# Virtual Memory allows Redis to work with datasets bigger than the actual +# amount of RAM needed to hold the whole dataset in memory. +# In order to do so very used keys are taken in memory while the other keys +# are swapped into a swap file, similarly to what operating systems do +# with memory pages. +# +# To enable VM just set 'vm-enabled' to yes, and set the following three +# VM parameters accordingly to your needs. + +vm-enabled no +# vm-enabled yes + +# This is the path of the Redis swap file. As you can guess, swap files +# can't be shared by different Redis instances, so make sure to use a swap +# file for every redis process you are running. Redis will complain if the +# swap file is already in use. +# +# The best kind of storage for the Redis swap file (that's accessed at random) +# is a Solid State Disk (SSD). +# +# *** WARNING *** if you are using a shared hosting the default of putting +# the swap file under /tmp is not secure. Create a dir with access granted +# only to Redis user and configure Redis to create the swap file there. +vm-swap-file /tmp/redis.swap + +# vm-max-memory configures the VM to use at max the specified amount of +# RAM. Everything that deos not fit will be swapped on disk *if* possible, that +# is, if there is still enough contiguous space in the swap file. +# +# With vm-max-memory 0 the system will swap everything it can. Not a good +# default, just specify the max amount of RAM you can in bytes, but it's +# better to leave some margin. For instance specify an amount of RAM +# that's more or less between 60 and 80% of your free RAM. +vm-max-memory 0 + +# Redis swap files is split into pages. An object can be saved using multiple +# contiguous pages, but pages can't be shared between different objects. +# So if your page is too big, small objects swapped out on disk will waste +# a lot of space. If you page is too small, there is less space in the swap +# file (assuming you configured the same number of total swap file pages). +# +# If you use a lot of small objects, use a page size of 64 or 32 bytes. +# If you use a lot of big objects, use a bigger page size. +# If unsure, use the default :) +vm-page-size 32 + +# Number of total memory pages in the swap file. +# Given that the page table (a bitmap of free/used pages) is taken in memory, +# every 8 pages on disk will consume 1 byte of RAM. +# +# The total swap size is vm-page-size * vm-pages +# +# With the default of 32-bytes memory pages and 134217728 pages Redis will +# use a 4 GB swap file, that will use 16 MB of RAM for the page table. +# +# It's better to use the smallest acceptable value for your application, +# but the default is large in order to work in most conditions. +vm-pages 134217728 + +# Max number of VM I/O threads running at the same time. +# This threads are used to read/write data from/to swap file, since they +# also encode and decode objects from disk to memory or the reverse, a bigger +# number of threads can help with big objects even if they can't help with +# I/O itself as the physical device may not be able to couple with many +# reads/writes operations at the same time. +# +# The special value of 0 turn off threaded I/O and enables the blocking +# Virtual Memory implementation. +vm-max-threads 4 + +############################### ADVANCED CONFIG ############################### + +# Hashes are encoded in a special way (much more memory efficient) when they +# have at max a given numer of elements, and the biggest element does not +# exceed a given threshold. You can configure this limits with the following +# configuration directives. +hash-max-zipmap-entries 512 +hash-max-zipmap-value 64 + +# Similarly to hashes, small lists are also encoded in a special way in order +# to save a lot of space. The special representation is only used when +# you are under the following limits: +list-max-ziplist-entries 512 +list-max-ziplist-value 64 + +# Sets have a special encoding in just one case: when a set is composed +# of just strings that happens to be integers in radix 10 in the range +# of 64 bit signed integers. +# The following configuration setting sets the limit in the size of the +# set in order to use this special memory saving encoding. +set-max-intset-entries 512 + +# Similarly to hashes and lists, sorted sets are also specially encoded in +# order to save a lot of space. This encoding is only used when the length and +# elements of a sorted set are below the following limits: +zset-max-ziplist-entries 128 +zset-max-ziplist-value 64 + +# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in +# order to help rehashing the main Redis hash table (the one mapping top-level +# keys to values). The hash table implementation redis uses (see dict.c) +# performs a lazy rehashing: the more operation you run into an hash table +# that is rhashing, the more rehashing "steps" are performed, so if the +# server is idle the rehashing is never complete and some more memory is used +# by the hash table. +# +# The default is to use this millisecond 10 times every second in order to +# active rehashing the main dictionaries, freeing memory when possible. +# +# If unsure: +# use "activerehashing no" if you have hard latency requirements and it is +# not a good thing in your environment that Redis can reply form time to time +# to queries with 2 milliseconds delay. +# +# use "activerehashing yes" if you don't have such hard requirements but +# want to free memory asap when possible. +activerehashing yes + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all redis server but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# include /path/to/local.conf +# include /path/to/other.conf diff --git a/modules/redis/manifests/init.pp b/modules/redis/manifests/init.pp new file mode 100644 index 0000000..edde41c --- /dev/null +++ b/modules/redis/manifests/init.pp @@ -0,0 +1,166 @@ +# == Class: redis +# +# Install redis. +# +# === Parameters +# +# [*version*] +# Version to install. +# Default: 2.8.12 +# +# [*redis_src_dir*] +# Location to unpack source code before building and installing it. +# Default: /opt/redis-src +# +# [*redis_bin_dir*] +# Location to install redis binaries. +# Default: /opt/redis +# +# [*redis_port*] +# Accept redis connections on this port. +# Default: 6379 +# +# [*redis_bind_address*] +# Address to bind to. +# Default: false, which binds to all interfaces +# +# [*redis_max_memory*] +# Max memory usage configuration. +# Default: 4gb +# +# [*redis_max_clients*] +# Set the redis config value maxclients. If no value provided, it is +# not included in the configuration for 2.6+ and set to 0 (unlimited) +# for 2.4. +# Default: 0 (2.4) +# Default: nil (2.6+) +# +# [*redis_timeout*] +# Set the redis config value timeout (seconds). +# Default: 300 +# +# [*redis_loglevel*] +# Set the redis config value loglevel. Valid values are debug, +# verbose, notice, and warning. +# Default: notice +# +# [*redis_databases*] +# Set the redis config value databases. +# Default: 16 +# +# [*redis_slowlog_log_slower_than*] +# Set the redis config value slowlog-log-slower-than (microseconds). +# Default: 10000 +# +# [*redis_showlog_max_len*] +# Set the redis config value slowlog-max-len. +# Default: 1024 +# +# [*redis_password*] +# Password used by AUTH command. Will be set if it is not nil. +# Default: nil +# +# [*redis_saves*] +# Redis snapshotting parameters. Set to false for no snapshots. +# Default: ['save 900 1', 'save 300 10', 'save 60 10000'] +# +# === Examples +# +# include redis +# +# class { 'redis': +# version => '2.8', +# redis_src_dir => '/fake/path/redis-src', +# redis_bin_dir => '/fake/path/redis', +# } +# +# === Authors +# +# Thomas Van Doren +# +# === Copyright +# +# Copyright 2012 Thomas Van Doren, unless otherwise noted. +# +class redis ( + $version = $redis::params::version, + $redis_src_dir = $redis::params::redis_src_dir, + $redis_bin_dir = $redis::params::redis_bin_dir, + $redis_user = $redis::params::redis_user, + $redis_group = $redis::params::redis_group, + $redis_port = $redis::params::redis_port, + $redis_bind_address = $redis::params::redis_bind_address, + $redis_max_memory = $redis::params::redis_max_memory, + $redis_max_clients = $redis::params::redis_max_clients, + $redis_timeout = $redis::params::redis_timeout, + $redis_loglevel = $redis::params::redis_loglevel, + $redis_databases = $redis::params::redis_databases, + $redis_slowlog_log_slower_than = $redis::params::redis_slowlog_log_slower_than, + $redis_slowlog_max_len = $redis::params::redis_slowlog_max_len, + $redis_password = $redis::params::redis_password, + $redis_saves = $redis::params::redis_saves +) inherits redis::params { + + include wget + include gcc + + $redis_pkg_name = "redis-${version}.tar.gz" + $redis_pkg = "${redis_src_dir}/${redis_pkg_name}" + + # Install default instance + redis::instance { 'redis-default': + redis_port => $redis_port, + redis_bind_address => $redis_bind_address, + redis_max_memory => $redis_max_memory, + redis_max_clients => $redis_max_clients, + redis_timeout => $redis_timeout, + redis_loglevel => $redis_loglevel, + redis_databases => $redis_databases, + redis_slowlog_log_slower_than => $redis_slowlog_log_slower_than, + redis_slowlog_max_len => $redis_slowlog_max_len, + redis_password => $redis_password, + redis_saves => $redis_saves, + } + + File { + owner => $redis_user, + group => $redis_group + } + file { $redis_src_dir: + ensure => directory, + } + file { '/etc/redis': + ensure => directory, + } + file { 'redis-lib': + ensure => directory, + path => '/var/lib/redis', + } + + exec { 'get-redis-pkg': + command => "/usr/bin/wget --output-document ${redis_pkg} http://download.redis.io/releases/${redis_pkg_name}", + unless => "/usr/bin/test -f ${redis_pkg}", + require => File[$redis_src_dir], + } + + file { 'redis-cli-link': + ensure => link, + path => '/usr/local/bin/redis-cli', + target => "${redis_bin_dir}/bin/redis-cli", + } + exec { 'unpack-redis': + command => "tar --strip-components 1 -xzf ${redis_pkg}", + cwd => $redis_src_dir, + path => '/bin:/usr/bin', + unless => "test -f ${redis_src_dir}/Makefile", + require => Exec['get-redis-pkg'], + } + exec { 'install-redis': + command => "make && make install PREFIX=${redis_bin_dir}", + cwd => $redis_src_dir, + path => '/bin:/usr/bin', + unless => "test $(${redis_bin_dir}/bin/redis-server --version | cut -d ' ' -f 1) = 'Redis'", + require => [ Exec['unpack-redis'], Class['gcc'] ], + } + +} diff --git a/modules/redis/manifests/instance.pp b/modules/redis/manifests/instance.pp new file mode 100644 index 0000000..d7bdbcc --- /dev/null +++ b/modules/redis/manifests/instance.pp @@ -0,0 +1,134 @@ +# == Define: redis::instance +# +# Configure redis instance on an arbitrary port. +# +# === Parameters +# +# [*redis_port*] +# Accept redis connections on this port. +# Default: 6379 +# +# [*redis_bind_address*] +# Address to bind to. +# Default: false, which binds to all interfaces +# +# [*redis_max_memory*] +# Max memory usage configuration. +# Default: 4gb +# +# [*redis_max_clients*] +# Set the redis config value maxclients. If no value provided, it is +# not included in the configuration for 2.6+ and set to 0 (unlimited) +# for 2.4. +# Default: 0 (2.4) +# Default: nil (2.6+) +# +# [*redis_timeout*] +# Set the redis config value timeout (seconds). +# Default: 300 +# +# [*redis_loglevel*] +# Set the redis config value loglevel. Valid values are debug, +# verbose, notice, and warning. +# Default: notice +# +# [*redis_databases*] +# Set the redis config value databases. +# Default: 16 +# +# [*redis_slowlog_log_slower_than*] +# Set the redis config value slowlog-log-slower-than (microseconds). +# Default: 10000 +# +# [*redis_showlog_max_len*] +# Set the redis config value slowlog-max-len. +# Default: 1024 +# +# [*redis_password*] +# Password used by AUTH command. Will be setted if it is not nil. +# Default: nil +# +# [*redis_saves*] +# Redis snapshotting parameters. Set to false for no snapshots. +# Default: ['save 900 1', 'save 300 10', 'save 60 10000'] +# +# === Examples +# +# redis::instance { 'redis-6900': +# redis_port => '6900', +# redis_max_memory => '64gb', +# } +# +# === Authors +# +# Thomas Van Doren +# +# === Copyright +# +# Copyright 2012 Thomas Van Doren, unless otherwise noted. +# +define redis::instance ( + $redis_port = $redis::params::redis_port, + $redis_bind_address = $redis::params::redis_bind_address, + $redis_max_memory = $redis::params::redis_max_memory, + $redis_max_clients = $redis::params::redis_max_clients, + $redis_timeout = $redis::params::redis_timeout, + $redis_loglevel = $redis::params::redis_loglevel, + $redis_databases = $redis::params::redis_databases, + $redis_slowlog_log_slower_than = $redis::params::redis_slowlog_log_slower_than, + $redis_slowlog_max_len = $redis::params::redis_slowlog_max_len, + $redis_password = $redis::params::redis_password, + $redis_saves = $redis::params::redis_saves + ) { + + # Using Exec as a dependency here to avoid dependency cyclying when doing + # Class['redis'] -> Redis::Instance[$name] + Exec['install-redis'] -> Redis::Instance[$name] + include redis + + $version = $redis::version + + case $version { + /^2\.4\.\d+$/: { + if ($redis_max_clients == false) { + $real_redis_max_clients = 0 + } + else { + $real_redis_max_clients = $redis_max_clients + } + } + /^2\.[68]\.\d+$/: { + $real_redis_max_clients = $redis_max_clients + } + default: { + fail("Invalid redis version, ${version}. It must match 2.4.\\d+ or 2.[68].\\d+.") + } + } + + file { "redis-lib-port-${redis_port}": + ensure => directory, + path => "/var/lib/redis/${redis_port}", + } + + file { "redis-init-${redis_port}": + ensure => present, + path => "/etc/init.d/redis_${redis_port}", + mode => '0755', + content => template('redis/redis.init.erb'), + notify => Service["redis-${redis_port}"], + } + file { "redis_port_${redis_port}.conf": + ensure => present, + path => "/etc/redis/${redis_port}.conf", + mode => '0644', + content => template('redis/redis_port.conf.erb'), + } + + service { "redis-${redis_port}": + ensure => running, + name => "redis_${redis_port}", + enable => true, + require => [ File["redis_port_${redis_port}.conf"], File["redis-init-${redis_port}"], File["redis-lib-port-${redis_port}"] ], + subscribe => File["redis_port_${redis_port}.conf"], + } +} diff --git a/modules/redis/manifests/params.pp b/modules/redis/manifests/params.pp new file mode 100644 index 0000000..d3fb9ed --- /dev/null +++ b/modules/redis/manifests/params.pp @@ -0,0 +1,34 @@ +# == Class: redis::params +# +# Redis params. +# +# === Parameters +# +# === Authors +# +# Thomas Van Doren +# +# === Copyright +# +# Copyright 2012 Thomas Van Doren, unless otherwise noted. +# +class redis::params { + + $redis_port = '6379' + $redis_bind_address = false + $version = '2.8.12' + $redis_src_dir = '/opt/redis-src' + $redis_bin_dir = '/opt/redis' + $redis_max_memory = '4gb' + $redis_max_clients = false + $redis_timeout = 300 # 0 = disabled + $redis_loglevel = 'notice' + $redis_databases = 16 + $redis_slowlog_log_slower_than = 10000 # microseconds + $redis_slowlog_max_len = 1024 + $redis_password = false + $redis_saves = ['save 900 1', 'save 300 10', 'save 60 10000'] + $redis_user = 'root' + $redis_group = 'root' + +} diff --git a/modules/redis/metadata.json b/modules/redis/metadata.json new file mode 100644 index 0000000..4460a1c --- /dev/null +++ b/modules/redis/metadata.json @@ -0,0 +1,21 @@ +{ + "name": "thomasvandoren-redis", + "version": "0.10.0", + "author": "Thomas Van Doren", + "summary": "Install and configure redis.", + "license": "BSD", + "source": "git://github.com/thomasvandoren/puppet-redis.git", + "project_page": "https://github.com/thomasvandoren/puppet-redis", + "issues_url": "https://github.com/thomasvandoren/puppet-redis/issues", + "description": "Install and configure redis.", + "dependencies": [ + { + "name": "maestrodev/wget", + "version_requirement": ">= 1.1.0" + }, + { + "name": "puppetlabs/gcc", + "version_requirement": ">= 0.0.3" + } + ] +} diff --git a/modules/redis/spec/classes/redis_spec.rb b/modules/redis/spec/classes/redis_spec.rb new file mode 100644 index 0000000..c5760b1 --- /dev/null +++ b/modules/redis/spec/classes/redis_spec.rb @@ -0,0 +1,206 @@ +require 'spec_helper' + +describe 'redis', :type => 'class' do + + let :facts do + { + :osfamily => 'Debian' + } + end # let + + context "On a Debian OS with default params" do + it do + should compile.with_all_deps + + should contain_class('gcc') + should contain_class('wget') + + should contain_file('/opt/redis-src').with(:ensure => 'directory', + :owner => 'root', + :group => 'root') + should contain_file('/etc/redis').with(:ensure => 'directory', + :owner => 'root', + :group => 'root') + should contain_file('redis-lib').with(:ensure => 'directory', + :path => '/var/lib/redis', + :owner => 'root', + :group => 'root') + should contain_file("redis-lib-port-6379").with(:ensure => 'directory', + :path => '/var/lib/redis/6379', + :owner => 'root', + :group => 'root') + should contain_exec('get-redis-pkg').with_command(/http:\/\/download\.redis\.io\/releases\/redis-2\.8\.12\.tar\.gz/) + should contain_file('redis-cli-link').with(:ensure => 'link', + :path => '/usr/local/bin/redis-cli', + :target => '/opt/redis/bin/redis-cli') + + should contain_exec('unpack-redis').with(:cwd => '/opt/redis-src', + :path => '/bin:/usr/bin') + should contain_exec('install-redis').with(:cwd => '/opt/redis-src', + :path => '/bin:/usr/bin') + + should contain_service('redis-6379').with(:ensure => 'running', + :name => 'redis_6379', + :enable => true) + + should contain_file('redis-init-6379').with(:ensure => 'present', + :path => '/etc/init.d/redis_6379', + :mode => '0755', + :owner => 'root', + :group => 'root') + should contain_file('redis-init-6379').with_content(/^REDIS_BIND_ADDRESS="127.0.0.1"$/) + should contain_file('redis-init-6379').with_content(/^CLIEXEC="\/opt\/redis\/bin\/redis-cli -h \$REDIS_BIND_ADDRESS -p \$REDIS_PORT/) + + # These values were changed in 2.6. + should_not contain_file('redis_port_6379.conf').with_content(/maxclients 0/) + should_not contain_file('redis_port_6379.conf').with_content(/hash-max-zipmap-entries 512/) + should_not contain_file('redis_port_6379.conf').with_content(/hash-max-zipmap-value 64/) + should contain_file('redis_port_6379.conf').with_content(/hash-max-ziplist-entries 512/) + should contain_file('redis_port_6379.conf').with_content(/hash-max-ziplist-value 64/) + + # The bind config should not be present by default. + should_not contain_file('redis_port_6379.conf').with_content(/bind \d+\.\d+\.\d+\.\d+/) + end # it + end # context + + context "On a Debian OS with non-default src and bin locations" do + let :params do + { + :redis_src_dir => '/fake/path/to/redis-src', + :redis_bin_dir => '/fake/path/to/redis' + } + end # let + + it do + should compile.with_all_deps + + should contain_class('gcc') + should contain_class('wget') + + should contain_file('/fake/path/to/redis-src').with(:ensure => 'directory') + should contain_file('/etc/redis').with(:ensure => 'directory') + should contain_file('redis-lib').with(:ensure => 'directory', + :path => '/var/lib/redis') + should contain_file('redis-lib-port-6379').with(:ensure => 'directory', + :path => '/var/lib/redis/6379') + should contain_file('redis-init-6379').with(:ensure => 'present', + :path => '/etc/init.d/redis_6379', + :mode => '0755') + should contain_file('redis-cli-link').with(:ensure => 'link', + :path => '/usr/local/bin/redis-cli', + :target => '/fake/path/to/redis/bin/redis-cli') + + should contain_exec('unpack-redis').with(:cwd => '/fake/path/to/redis-src', + :path => '/bin:/usr/bin') + should contain_exec('install-redis').with(:cwd => '/fake/path/to/redis-src', + :path => '/bin:/usr/bin') + + should contain_service('redis-6379').with(:ensure => 'running', + :name => 'redis_6379', + :enable => true) + end # it + end # context + + context "On a Debian OS with version 2.6 param" do + let :params do + { + :version => '2.6.4' + } + end # let + + it do + should compile.with_all_deps + + should_not contain_file('redis-pkg') + should contain_exec('get-redis-pkg').with_command(/http:\/\/download\.redis\.io\/releases\/redis-2\.6\.4\.tar\.gz/) + + # Maxclients is left out for 2.6 unless it is explicitly set. + should_not contain_file('redis_port_6379.conf').with_content(/maxclients 0/) + + # These params were renamed b/w 2.4 and 2.6. + should contain_file('redis_port_6379.conf').with_content(/hash-max-ziplist-entries 512/) + should contain_file('redis_port_6379.conf').with_content(/hash-max-ziplist-value 64/) + should_not contain_file('redis_port_6379.conf').with_content(/hash-max-zipmap-entries 512/) + should_not contain_file('redis_port_6379.conf').with_content(/hash-max-zipmap-value 64/) + end # it + end # context + + context "With an invalid version param." do + let :params do + { + :version => 'bad version' + } + end # let + + it do + expect { should raise_error(Puppet::Error) } + end # it + end # context + + context "On a Debian system with a non-default user specified" do + let :params do + { :redis_user => 'my_user' } + end + + it do + should compile.with_all_deps + should contain_file('/opt/redis-src').with(:owner => 'my_user') + should contain_file('/etc/redis').with(:owner => 'my_user') + should contain_file('redis-lib').with(:owner => 'my_user') + should contain_file('redis-lib-port-6379').with(:owner => 'my_user') + should contain_file('redis-init-6379').with(:owner => 'my_user') + should_not contain_file('redis-pkg') + end # it + end # context + + context "On a Debian system with a non-default group specified" do + let :params do + { :redis_group => 'my_group' } + end + + it do + should compile.with_all_deps + should contain_file('/opt/redis-src').with(:group => 'my_group') + should contain_file('/etc/redis').with(:group => 'my_group') + should contain_file('redis-lib').with(:group => 'my_group') + should contain_file('redis-lib-port-6379').with(:group => 'my_group') + should contain_file('redis-init-6379').with(:group => 'my_group') + should_not contain_file('redis-pkg') + end # it + end # context + + context "On a Debian system with instance parameters specified" do + let :params do + { + :redis_port => '8000', + :redis_bind_address => '10.1.2.3', + :redis_max_memory => '64gb', + :redis_max_clients => '10000', + :redis_timeout => '15', + :redis_loglevel => 'warning', + :redis_databases => '64', + :redis_slowlog_log_slower_than => '5000', + :redis_slowlog_max_len => '4096', + :redis_password => 'sekrit', + :redis_saves => ['save 17 42', 'save 1 2'] + } + end # let + + it do + should compile.with_all_deps + should contain_file('redis_port_8000.conf').with_ensure('present') + should contain_file('redis_port_8000.conf').with_content(/^port 8000$/) + should contain_file('redis_port_8000.conf').with_content(/^bind 10\.1\.2\.3$/) + should contain_file('redis_port_8000.conf').with_content(/^maxmemory 64gb$/) + should contain_file('redis_port_8000.conf').with_content(/^maxclients 10000$/) + should contain_file('redis_port_8000.conf').with_content(/^timeout 15$/) + should contain_file('redis_port_8000.conf').with_content(/^loglevel warning$/) + should contain_file('redis_port_8000.conf').with_content(/^databases 64$/) + should contain_file('redis_port_8000.conf').with_content(/^slowlog-log-slower-than 5000$/) + should contain_file('redis_port_8000.conf').with_content(/^slowlog-max-len 4096$/) + should contain_file('redis_port_8000.conf').with_content(/^requirepass sekrit$/) + should contain_file('redis_port_8000.conf').with_content(/^save 17 42$/) + should contain_file('redis_port_8000.conf').with_content(/^save 1 2$/) + end # it + end # context +end # describe diff --git a/modules/redis/spec/defines/instance_spec.rb b/modules/redis/spec/defines/instance_spec.rb new file mode 100644 index 0000000..5c86d61 --- /dev/null +++ b/modules/redis/spec/defines/instance_spec.rb @@ -0,0 +1,115 @@ +require 'spec_helper' + +describe 'redis::instance', :type => 'define' do + let(:title) { 'redis-instance' } + + let :facts do + { + :osfamily => 'RedHat' + } + end # let + + context "On Debian systems with default parameters" do + it do + should contain_file('redis_port_6379.conf').with_content(/^port 6379$/) + should contain_file('redis_port_6379.conf').with_content(/^save 900 1$/) + should contain_file('redis_port_6379.conf').with_content(/^save 300 10$/) + should contain_file('redis_port_6379.conf').with_content(/^save 60 10000$/) + end # it + end # context + + context "On Debian systems with no password parameter" do + let :params do + { + :redis_password => false + } + end # let + + it do + should contain_file('redis_port_6379.conf').without_content(/^requirepass/) + end # it + end # context + + context "On Debian systems with password parameter" do + let :params do + { + :redis_port => '6900', + :redis_password => 'ThisIsAReallyBigSecret' + } + end # let + + it do + should compile.with_all_deps + + should contain_file('redis_port_6900.conf').with_content(/^requirepass ThisIsAReallyBigSecret/) + should contain_file('redis-init-6900').with_content(/^CLIEXEC="[\w\/]+redis-cli -h \$REDIS_BIND_ADDRESS -p \$REDIS_PORT -a ThisIsAReallyBigSecret/) + end # it + end # context + + context "With a non-default port parameter" do + let :params do + { + :redis_port => '6900' + } + end # let + + it do + should compile.with_all_deps + + should contain_file('redis_port_6900.conf').with_content(/^port 6900$/) + should contain_file('redis_port_6900.conf').with_content(/^pidfile \/var\/run\/redis_6900\.pid$/) + should contain_file('redis_port_6900.conf').with_content(/^logfile \/var\/log\/redis_6900\.log$/) + should contain_file('redis_port_6900.conf').with_content(/^dir \/var\/lib\/redis\/6900$/) + should contain_file('redis-init-6900').with_content(/^REDIS_PORT="6900"$/) + end # it + end # context + + context "With a non default bind address" do + let :params do + { + :redis_port => '6900', + :redis_bind_address => '10.1.2.3' + } + end # let + + it do + should compile.with_all_deps + + should contain_file('redis_port_6900.conf').with_content(/^bind 10\.1\.2\.3$/) + should contain_file('redis-init-6900').with_content(/^REDIS_BIND_ADDRESS="10.1.2.3"$/) + end # it + end # context + + context "On Debian systems with no saves" do + let :params do + { + :redis_port => '6380', + :redis_saves => false + } + end + + it do + should contain_file('redis_port_6380.conf').with_ensure('present') + should contain_file('redis_port_6380.conf').without_content(/^requirepass/) + should contain_file('redis_port_6380.conf').without_content(/^save 900 1$/) + end # it + end # context + + context "On Debian systems with saves set to \['save 3600 1000000'\]" do + let :params do + { + :redis_port => '7000', + :redis_saves => ['save 3600 1000000', 'save 17 42'] + } + end # let + + it do + should contain_file('redis_port_7000.conf').with_ensure('present') + should contain_file('redis_port_7000.conf').without_content(/^requirepass/) + should contain_file('redis_port_7000.conf').with_content(/^port 7000$/) + should contain_file('redis_port_7000.conf').with_content(/^save 3600 1000000$/) + should contain_file('redis_port_7000.conf').with_content(/^save 17 42$/) + should contain_file('redis_port_7000.conf').without_content(/^save 900 1$/) + end # it + end # context +end # describe diff --git a/modules/redis/spec/spec_helper.rb b/modules/redis/spec/spec_helper.rb new file mode 100644 index 0000000..dc7e9f4 --- /dev/null +++ b/modules/redis/spec/spec_helper.rb @@ -0,0 +1,2 @@ +require 'rubygems' +require 'puppetlabs_spec_helper/module_spec_helper' diff --git a/modules/redis/templates/redis.init.erb b/modules/redis/templates/redis.init.erb new file mode 100644 index 0000000..541d08d --- /dev/null +++ b/modules/redis/templates/redis.init.erb @@ -0,0 +1,97 @@ +#!/bin/sh +#Configurations injected by install_server below.... + +REDIS_PORT="<%= @redis_port %>" +REDIS_BIND_ADDRESS="<%= @redis_bind_address ? @redis_bind_address : '127.0.0.1' %>" + +EXEC=<%= scope.lookupvar('redis::redis_bin_dir') %>/bin/redis-server +CLIEXEC="<%= scope.lookupvar('redis::redis_bin_dir') %>/bin/redis-cli -h $REDIS_BIND_ADDRESS -p $REDIS_PORT <%= @redis_password ? '-a ' + @redis_password : '' %>" +PIDFILE="/var/run/redis_${REDIS_PORT}.pid" +CONF="/etc/redis/${REDIS_PORT}.conf" + +############### + +# chkconfig: 2345 95 20 +# description: redis_<%= @redis_port %> is the redis daemon. +### BEGIN INIT INFO +# Provides: redis_<%= @redis_port %> +# Required-Start: +# Required-Stop: +# Should-Start: +# Should-Stop: +# Short-Description: start and stop redis_<%= @redis_port %> +# Description: Redis daemon +### END INIT INFO + +set -e + +start() +{ + if [ -x $PIDFILE ] + then + echo "$PIDFILE exists, process is already running or crashed" + else + echo "Starting Redis server..." + $EXEC $CONF + fi +} + +stop() +{ + if [ ! -f $PIDFILE ] + then + echo "$PIDFILE does not exist, process is not running" + else + PID=$(cat $PIDFILE) + echo "Stopping ..." + $CLIEXEC shutdown || /bin/true + while [ -x /proc/${PID} ] + do + echo "Waiting for Redis to shutdown ..." + sleep 1 + done + echo "Redis stopped" + fi +} + +restart() +{ + stop + echo "Sleeping for 3 seconds..." + sleep 3 + start +} + +status() +{ + if [ ! -f $PIDFILE ] + then + echo "$PIDFILE does not exist, redis is not running" + exit 3 + elif [ ! -x /proc/$(cat $PIDFILE) ] + then + echo "$PIDFILE exists, process is not running though" + exit 1 + else + echo "redis is running with PID $(cat $PIDFILE)" + exit 0 + fi +} + +case "$1" in + start) + start + ;; + stop) + stop + ;; + restart) + restart + ;; + status) + status + ;; + *) + echo "Usage: $SCRIPTNAME {start|stop|restart|status}" + ;; +esac diff --git a/modules/redis/templates/redis_port.conf.erb b/modules/redis/templates/redis_port.conf.erb new file mode 100644 index 0000000..17ecfd6 --- /dev/null +++ b/modules/redis/templates/redis_port.conf.erb @@ -0,0 +1,412 @@ +# Note on units: when memory size is needed, it is possible to specifiy +# it in the usual form of 1k 5GB 4M and so forth: +# +# 1k => 1000 bytes +# 1kb => 1024 bytes +# 1m => 1000000 bytes +# 1mb => 1024*1024 bytes +# 1g => 1000000000 bytes +# 1gb => 1024*1024*1024 bytes +# +# units are case insensitive so 1GB 1Gb 1gB are all the same. + +# By default Redis does not run as a daemon. Use 'yes' if you need it. +# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. +daemonize yes + +# When running daemonized, Redis writes a pid file in /var/run/redis.pid by +# default. You can specify a custom pid file location here. +pidfile /var/run/redis_<%= @redis_port %>.pid + +# Accept connections on the specified port, default is 6379. +# If port 0 is specified Redis will not listen on a TCP socket. +port <%= @redis_port %> + +# If you want you can bind a single interface, if the bind option is not +# specified all the interfaces will listen for incoming connections. +<% if @redis_bind_address %> +bind <%= @redis_bind_address %> +<% end %> + +# Specify the path for the unix socket that will be used to listen for +# incoming connections. There is no default, so Redis will not listen +# on a unix socket when not specified. +# +# unixsocket /tmp/redis.sock + +# Close the connection after a client is idle for N seconds (0 to disable) +timeout <%= @redis_timeout %> + +# Set server verbosity to 'debug' +# it can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +loglevel <%= @redis_loglevel %> + +# Specify the log file name. Also 'stdout' can be used to force +# Redis to log on the standard output. Note that if you use standard +# output for logging but daemonize, logs will be sent to /dev/null +logfile /var/log/redis_<%= @redis_port %>.log + +# To enable logging to the system logger, just set 'syslog-enabled' to yes, +# and optionally update the other syslog parameters to suit your needs. +# syslog-enabled no + +# Specify the syslog identity. +# syslog-ident redis + +# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. +# syslog-facility local0 + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +databases <%= @redis_databases %> + +################################ SNAPSHOTTING ################################# +# +# Save the DB on disk: +# +# save +# +# Will save the DB if both the given number of seconds and the given +# number of write operations against the DB occurred. +# +# In the example below the behaviour will be to save: +# after 900 sec (15 min) if at least 1 key changed +# after 300 sec (5 min) if at least 10 keys changed +# after 60 sec if at least 10000 keys changed +# +# Note: you can disable saving at all commenting all the save lines. + +<% if @redis_saves %> +<%= @redis_saves.join("\n") %> +<% end -%> + +# Compress string objects using LZF when dump .rdb databases? +# For default that's set to 'yes' as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +rdbcompression yes + +# The filename where to dump the DB +dbfilename dump.rdb + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# Also the Append Only File will be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +dir /var/lib/redis/<%= @redis_port %> + +################################# REPLICATION ################################# + +# Master-Slave replication. Use slaveof to make a Redis instance a copy of +# another Redis server. Note that the configuration is local to the slave +# so for example it is possible to configure the slave to save the DB with a +# different interval, or to listen to another port, and so on. +# +# slaveof + +# If the master is password protected (using the requirepass configuration +# directive below) it is possible to tell the slave to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the slave request. +# +# masterauth + +# When a slave lost the connection with the master, or when the replication +# is still in progress, the slave can act in two different ways: +# +# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will +# still reply to client requests, possibly with out of data data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) if slave-serve-stale data is set to 'no' the slave will reply with +# an error SYNC with master in progress to all the kind of commands +# but to INFO and SLAVEOF. +# +slave-serve-stale-data yes + +################################## SECURITY ################################### + +# Require clients to issue AUTH before processing any other +# commands. This might be useful in environments in which you do not trust +# others with access to the host running redis. +# +# This should stay commented out for backward compatibility and because most +# people do not need auth (e.g. they run their own servers). +# +# Warning: since Redis is pretty fast an outside user can try up to +# 150k passwords per second against a good box. This means that you should +# use a very strong password otherwise it will be very easy to break. +# +<% if @redis_password %> +requirepass <%= @redis_password %> +<% end %> + +# Command renaming. +# +# It is possilbe to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# of hard to guess so that it will be still available for internal-use +# tools but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possilbe to completely kill a command renaming it into +# an empty string: +# +# rename-command CONFIG + +################################### LIMITS #################################### + +# Set the max number of connected clients at the same time. By default there +# is no limit, and it's up to the number of file descriptors the Redis process +# is able to open. The special value '0' means no limits. +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# maxclients 128 +<% if @real_redis_max_clients %> +maxclients <%= @real_redis_max_clients %> +<% end %> + +# Don't use more memory than the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys with an +# EXPIRE set. It will try to start freeing keys that are going to expire +# in little time and preserve keys with a longer time to live. +# Redis will also try to remove objects from free lists if possible. +# +# If all this fails, Redis will start to reply with errors to commands +# that will use more memory, like SET, LPUSH, and so on, and will continue +# to reply to most read-only commands like GET. +# +# WARNING: maxmemory can be a good idea mainly if you want to use Redis as a +# 'state' server or cache, not as a real DB. When Redis is used as a real +# database the memory usage will grow over the weeks, it will be obvious if +# it is going to use too much memory in the long run, and you'll have the time +# to upgrade. With maxmemory after the limit is reached you'll start to get +# errors for write operations, and this may even lead to DB inconsistency. +# +# maxmemory +maxmemory <%= @redis_max_memory %> + +# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory +# is reached? You can select among five behavior: +# +# volatile-lru -> remove the key with an expire set using an LRU algorithm +# allkeys-lru -> remove any key accordingly to the LRU algorithm +# volatile-random -> remove a random key with an expire set +# allkeys->random -> remove a random key, any key +# volatile-ttl -> remove the key with the nearest expire time (minor TTL) +# noeviction -> don't expire at all, just return an error on write operations +# +# Note: with all the kind of policies, Redis will return an error on write +# operations, when there are not suitable keys for eviction. +# +# At the date of writing this commands are: set setnx setex append +# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd +# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby +# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby +# getset mset msetnx exec sort +# +# The default is: +# +# maxmemory-policy volatile-lru + +# LRU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can select as well the sample +# size to check. For instance for default Redis will check three keys and +# pick the one that was used less recently, you can change the sample size +# using the following configuration directive. +# +# maxmemory-samples 3 + +############################## APPEND ONLY MODE ############################### + +# By default Redis asynchronously dumps the dataset on disk. If you can live +# with the idea that the latest records will be lost if something like a crash +# happens this is the preferred way to run Redis. If instead you care a lot +# about your data and don't want to that a single record can get lost you should +# enable the append only mode: when this mode is enabled Redis will append +# every write operation received in the file appendonly.aof. This file will +# be read on startup in order to rebuild the full dataset in memory. +# +# Note that you can have both the async dumps and the append only file if you +# like (you have to comment the save statements above to disable the dumps). +# Still if append only mode is enabled Redis will load the data from the +# log file at startup ignoring the dump.rdb file. +# +# IMPORTANT: Check the BGREWRITEAOF to check how to rewrite the append +# log file in background when it gets too big. + +appendonly no + +# The name of the append only file (default: appendonly.aof) +# appendfilename appendonly.aof + +# The fsync() call tells the Operating System to actually write data on disk +# instead to wait for more data in the output buffer. Some OS will really flush +# data on disk, some other OS will just try to do it ASAP. +# +# Redis supports three different modes: +# +# no: don't fsync, just let the OS flush the data when it wants. Faster. +# always: fsync after every write to the append only log . Slow, Safest. +# everysec: fsync only if one second passed since the last fsync. Compromise. +# +# The default is everysec that's usually the right compromise between +# speed and data safety. It's up to you to understand if you can relax this to +# no that will will let the operating system flush the output buffer when +# it wants, for better performances (but if you can live with the idea of +# some data loss consider the default persistence mode that's snapshotting), +# or on the contrary, use always that's very slow but a bit safer than +# everysec. +# +# If unsure, use everysec. + +# appendfsync always +appendfsync everysec +# appendfsync no + +# When the AOF fsync policy is set to always or everysec, and a background +# saving process (a background save or AOF log background rewriting) is +# performing a lot of I/O against the disk, in some Linux configurations +# Redis may block too long on the fsync() call. Note that there is no fix for +# this currently, as even performing fsync in a different thread will block +# our synchronous write(2) call. +# +# In order to mitigate this problem it's possible to use the following option +# that will prevent fsync() from being called in the main process while a +# BGSAVE or BGREWRITEAOF is in progress. +# +# This means that while another child is saving the durability of Redis is +# the same as appendfsync none, that in pratical terms means that it is +# possible to lost up to 30 seconds of log in the worst scenario (with the +# default Linux settings). +# +# If you have latency problems turn this to yes. Otherwise leave it as +# no that is the safest pick from the point of view of durability. +no-appendfsync-on-rewrite no + +# Automatic rewrite of the append only file. +# Redis is able to automatically rewrite the log file implicitly calling +# BGREWRITEAOF when the AOF log size will growth by the specified percentage. +# +# This is how it works: Redis remembers the size of the AOF file after the +# latest rewrite (or if no rewrite happened since the restart, the size of +# the AOF at startup is used). +# +# This base size is compared to the current size. If the current size is +# bigger than the specified percentage, the rewrite is triggered. Also +# you need to specify a minimal size for the AOF file to be rewritten, this +# is useful to avoid rewriting the AOF file even if the percentage increase +# is reached but it is still pretty small. +# +# Specify a precentage of zero in order to disable the automatic AOF +# rewrite feature. + +auto-aof-rewrite-percentage 100 +auto-aof-rewrite-min-size 64mb + +################################ LUA SCRIPTING ############################### + +# Max execution time of a Lua script in milliseconds. +# This prevents that a programming error generating an infinite loop will block +# your server forever. Set it to 0 or a negative value for unlimited execution. +#lua-time-limit 60000 + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than <%= @redis_slowlog_log_slower_than %> + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len <%= @redis_slowlog_max_len %> + +############################### ADVANCED CONFIG ############################### + +# Hashes are encoded in a special way (much more memory efficient) when they +# have at max a given numer of elements, and the biggest element does not +# exceed a given threshold. You can configure this limits with the following +# configuration directives. +<% if @version =~ /^2\.4\.\d+$/ %> +hash-max-zipmap-entries 512 +hash-max-zipmap-value 64 +<% elsif @version =~ /^2\.[68]\.\d+$/ %> +hash-max-ziplist-entries 512 +hash-max-ziplist-value 64 +<% end %> + +# Similarly to hashes, small lists are also encoded in a special way in order +# to save a lot of space. The special representation is only used when +# you are under the following limits: +list-max-ziplist-entries 512 +list-max-ziplist-value 64 + +# Sets have a special encoding in just one case: when a set is composed +# of just strings that happens to be integers in radix 10 in the range +# of 64 bit signed integers. +# The following configuration setting sets the limit in the size of the +# set in order to use this special memory saving encoding. +set-max-intset-entries 512 + +# Similarly to hashes and lists, sorted sets are also specially encoded in +# order to save a lot of space. This encoding is only used when the length and +# elements of a sorted set are below the following limits: +zset-max-ziplist-entries 128 +zset-max-ziplist-value 64 + +# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in +# order to help rehashing the main Redis hash table (the one mapping top-level +# keys to values). The hash table implementation redis uses (see dict.c) +# performs a lazy rehashing: the more operation you run into an hash table +# that is rhashing, the more rehashing steps are performed, so if the +# server is idle the rehashing is never complete and some more memory is used +# by the hash table. +# +# The default is to use this millisecond 10 times every second in order to +# active rehashing the main dictionaries, freeing memory when possible. +# +# If unsure: +# use activerehashing no if you have hard latency requirements and it is +# not a good thing in your environment that Redis can reply form time to time +# to queries with 2 milliseconds delay. +# +# use activerehashing yes if you don't have such hard requirements but +# want to free memory asap when possible. +activerehashing yes + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all redis server but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# include /path/to/local.conf +# include /path/to/other.conf diff --git a/modules/redis/tests/init.pp b/modules/redis/tests/init.pp new file mode 100644 index 0000000..8cd7ceb --- /dev/null +++ b/modules/redis/tests/init.pp @@ -0,0 +1,4 @@ +# +# Smoke test. +# +include redis diff --git a/modules/wget/Gemfile b/modules/wget/Gemfile new file mode 100644 index 0000000..6cab033 --- /dev/null +++ b/modules/wget/Gemfile @@ -0,0 +1,12 @@ +source "https://rubygems.org" + +group :rake do + gem 'puppet', '>=2.7.17', :require => false + gem 'rspec-puppet', '>=1.0.0', :require => false + gem 'rake', '>=0.9.2.2', :require => false + gem 'puppet-lint', '>=0.1.12', :require => false + gem 'puppetlabs_spec_helper', :require => false + gem 'puppet-blacksmith', '>=1.0.5', :require => false + gem 'rspec-system-puppet', :require => false + gem 'rspec-system-serverspec', :require => false +end diff --git a/modules/wget/Gemfile.lock b/modules/wget/Gemfile.lock new file mode 100644 index 0000000..5c01487 --- /dev/null +++ b/modules/wget/Gemfile.lock @@ -0,0 +1,102 @@ +GEM + remote: https://rubygems.org/ + specs: + builder (3.2.2) + diff-lcs (1.2.5) + excon (0.31.0) + facter (1.7.5) + fog (1.19.0) + builder + excon (~> 0.31.0) + formatador (~> 0.2.0) + mime-types + multi_json (~> 1.0) + net-scp (~> 1.1) + net-ssh (>= 2.1.3) + nokogiri (~> 1.5) + ruby-hmac + formatador (0.2.4) + hiera (1.3.2) + json_pure + highline (1.6.21) + json_pure (1.8.1) + kwalify (0.7.2) + metaclass (0.0.4) + mime-types (1.25.1) + mocha (1.0.0) + metaclass (~> 0.0.1) + multi_json (1.9.2) + net-scp (1.1.2) + net-ssh (>= 2.6.5) + net-ssh (2.8.0) + nokogiri (1.5.11) + puppet (3.4.3) + facter (~> 1.6) + hiera (~> 1.0) + rgen (~> 0.6.5) + puppet-blacksmith (2.0.2) + nokogiri + puppet (>= 2.7.16) + rest-client + puppet-lint (0.3.2) + puppetlabs_spec_helper (0.4.1) + mocha (>= 0.10.5) + rake + rspec (>= 2.9.0) + rspec-puppet (>= 0.1.1) + rake (10.2.2) + rbvmomi (1.8.1) + builder + nokogiri (>= 1.4.1) + trollop + rest-client (1.6.7) + mime-types (>= 1.16) + rgen (0.6.6) + rspec (2.14.1) + rspec-core (~> 2.14.0) + rspec-expectations (~> 2.14.0) + rspec-mocks (~> 2.14.0) + rspec-core (2.14.8) + rspec-expectations (2.14.5) + diff-lcs (>= 1.1.3, < 2.0) + rspec-mocks (2.14.6) + rspec-puppet (1.0.1) + rspec + rspec-system (2.8.0) + fog (~> 1.18) + kwalify (~> 0.7.2) + mime-types (~> 1.16) + net-scp (~> 1.1) + net-ssh (~> 2.7) + nokogiri (~> 1.5.10) + rbvmomi (~> 1.6) + rspec (~> 2.14) + systemu (~> 2.5) + rspec-system-puppet (2.2.1) + rspec-system (~> 2.0) + rspec-system-serverspec (2.0.1) + rspec-system (~> 2.0) + serverspec (~> 0.0) + specinfra (~> 0.0) + ruby-hmac (0.4.0) + serverspec (0.15.4) + highline + net-ssh + rspec (>= 2.13.0) + specinfra (>= 0.7.1) + specinfra (0.7.1) + systemu (2.6.0) + trollop (2.0) + +PLATFORMS + ruby + +DEPENDENCIES + puppet (>= 2.7.17) + puppet-blacksmith (>= 1.0.5) + puppet-lint (>= 0.1.12) + puppetlabs_spec_helper + rake (>= 0.9.2.2) + rspec-puppet (>= 1.0.0) + rspec-system-puppet + rspec-system-serverspec diff --git a/modules/wget/LICENSE b/modules/wget/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/modules/wget/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/modules/wget/Modulefile b/modules/wget/Modulefile new file mode 100644 index 0000000..130f019 --- /dev/null +++ b/modules/wget/Modulefile @@ -0,0 +1,8 @@ +name 'maestrodev-wget' +version '1.4.4' +source 'http://github.com/maestrodev/puppet-wget.git' +author 'maestrodev' +license 'Apache License, Version 2.0' +summary 'Download files with wget' +description 'A module for wget that allows downloading of files, supporting authentication' +project_page 'http://github.com/maestrodev/puppet-wget' diff --git a/modules/wget/README.md b/modules/wget/README.md new file mode 100644 index 0000000..22d4884 --- /dev/null +++ b/modules/wget/README.md @@ -0,0 +1,90 @@ +[![Build Status](https://maestro.maestrodev.com/api/v1/projects/27/compositions/106/badge/icon)](https://maestro.maestrodev.com/projects/27/compositions/106) + + + +A Puppet module to download files with wget, supporting authentication. + +# Example + +install wget: + +```puppet + include wget +``` + +```puppet + wget::fetch { "download Google's index": + source => 'http://www.google.com/index.html', + destination => '/tmp/index.html', + timeout => 0, + verbose => false, + } +``` +or alternatively: + +```puppet + wget::fetch { 'http://www.google.com/index.html': + destination => '/tmp/index.html', + timeout => 0, + verbose => false, + } +``` +This fetches a document which requires authentication: + +```puppet + wget::fetch { 'Fetch secret PDF': + source => 'https://confidential.example.com/secret.pdf', + destination => '/tmp/secret.pdf', + user => 'user', + password => 'p$ssw0rd', + timeout => 0, + verbose => false, + } +``` + +This caches the downloaded file in an intermediate directory to avoid +repeatedly downloading it. This uses the timestamping (-N) and prefix (-P) +wget options to only re-download if the source file has been updated. + +```puppet + wget::fetch { 'https://tool.com/downloads/tool-1.0.tgz': + destination => '/tmp/tool-1.0.tgz', + cache_dir => '/var/cache/wget', + } +``` + +It's assumed that the cached file will be named after the source's URL +basename but this assumption can be broken if wget follows some redirects. In +this case you must inform the correct filename in the cache like this: + +```puppet + wget::fetch { 'https://tool.com/downloads/tool-latest.tgz': + destination => '/tmp/tool-1.0.tgz', + cache_dir => '/var/cache/wget', + cache_file => 'tool-1.1.tgz', + } +``` + +# Testing + +`rake` will run the rspec-puppet specs + +`rake spec:system` will run the rspec-system specs with vagrant + +`RS_DESTROY=no rake spec:system` to avoid destroying the vm after running the tests + +# License + +Copyright 2011-2013 MaestroDev + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/modules/wget/Rakefile b/modules/wget/Rakefile new file mode 100644 index 0000000..387fbbb --- /dev/null +++ b/modules/wget/Rakefile @@ -0,0 +1,14 @@ +require 'bundler' +Bundler.require(:rake) +require 'rake/clean' + +CLEAN.include('spec/fixtures/manifests/', 'spec/fixtures/modules/', 'doc', 'pkg') +CLOBBER.include('.tmp', '.librarian') + +require 'puppetlabs_spec_helper/rake_tasks' +require 'puppet_blacksmith/rake_tasks' +require 'rspec-system/rake_task' + +task :spec_system => :clean + +task :default => [:clean, :spec] diff --git a/modules/wget/manifests/authfetch.pp b/modules/wget/manifests/authfetch.pp new file mode 100644 index 0000000..bde7469 --- /dev/null +++ b/modules/wget/manifests/authfetch.pp @@ -0,0 +1,35 @@ +################################################################################ +# Definition: wget::authfetch +# +# This class will download files from the internet. You may define a web proxy +# using $::http_proxy if necessary. Username must be provided. And the user's +# password must be stored in the password variable within the .wgetrc file. +# +################################################################################ +define wget::authfetch ( + $destination, + $user, + $source = $title, + $password = '', + $timeout = '0', + $verbose = false, + $redownload = false, + $nocheckcertificate = false, + $execuser = undef, +) { + + notice("wget::authfetch is deprecated, use wget::fetch with user/password params") + + wget::fetch { $title: + destination => $destination, + source => $source, + timeout => $timeout, + verbose => $verbose, + redownload => $redownload, + nocheckcertificate => $nocheckcertificate, + execuser => $execuser, + user => $user, + password => $password, + } + +} diff --git a/modules/wget/manifests/fetch.pp b/modules/wget/manifests/fetch.pp new file mode 100644 index 0000000..5bd02f1 --- /dev/null +++ b/modules/wget/manifests/fetch.pp @@ -0,0 +1,105 @@ +################################################################################ +# Definition: wget::fetch +# +# This class will download files from the internet. You may define a web proxy +# using $http_proxy if necessary. +# +################################################################################ +define wget::fetch ( + $destination, + $source = $title, + $timeout = '0', + $verbose = false, + $redownload = false, + $nocheckcertificate = false, + $execuser = undef, + $user = undef, + $password = undef, + $cache_dir = undef, + $cache_file = undef, +) { + + include wget + + $http_proxy_env = $::http_proxy ? { + undef => [], + default => [ "HTTP_PROXY=${::http_proxy}", "http_proxy=${::http_proxy}" ], + } + $https_proxy_env = $::https_proxy ? { + undef => [], + default => [ "HTTPS_PROXY=${::https_proxy}", "https_proxy=${::https_proxy}" ], + } + $password_env = $user ? { + undef => [], + default => [ "WGETRC=${destination}.wgetrc" ], + } + + # not using stdlib.concat to avoid extra dependency + $environment = split(inline_template("<%= (@http_proxy_env+@https_proxy_env+@password_env).join(',') %>"),',') + + $verbose_option = $verbose ? { + true => '--verbose', + false => '--no-verbose' + } + + $unless_test = $redownload ? { + true => 'test', + false => "test -s ${destination}" + } + + $nocheckcert_option = $nocheckcertificate ? { + true => ' --no-check-certificate', + false => '' + } + + $user_option = $user ? { + undef => '', + default => " --user=${user}", + } + + if $user != undef { + $wgetrc_content = $::operatingsystem ? { + # This is to work around an issue with macports wget and out of date CA cert bundle. This requires + # installing the curl-ca-bundle package like so: + # + # sudo port install curl-ca-bundle + 'Darwin' => "password=${password}\nCA_CERTIFICATE=/opt/local/share/curl/curl-ca-bundle.crt\n", + default => "password=${password}", + } + + file { "${destination}.wgetrc": + owner => $execuser, + mode => '0600', + content => $wgetrc_content, + before => Exec["wget-${name}"], + } + } + + $output_option = $cache_dir ? { + undef => " --output-document='${destination}'", + default => " -N -P '${cache_dir}'", + } + + exec { "wget-${name}": + command => "wget ${verbose_option}${nocheckcert_option}${user_option}${output_option} '${source}'", + timeout => $timeout, + unless => $unless_test, + environment => $environment, + user => $cache_dir ? { undef => $execuser, default => undef }, + path => '/usr/bin:/usr/sbin:/bin:/usr/local/bin:/opt/local/bin', + require => Class['wget'], + } + + if $cache_dir != undef { + $cache = $cache_file ? { + undef => inline_template("<%= require 'uri'; File.basename(URI::parse(@source).path) %>"), + default => $cache_file, + } + file { $destination: + ensure => file, + source => "${cache_dir}/${cache}", + owner => $execuser, + require => Exec["wget-${name}"], + } + } +} diff --git a/modules/wget/manifests/init.pp b/modules/wget/manifests/init.pp new file mode 100644 index 0000000..11d9c58 --- /dev/null +++ b/modules/wget/manifests/init.pp @@ -0,0 +1,22 @@ +################################################################################ +# Class: wget +# +# This class will install wget - a tool used to download content from the web. +# +################################################################################ +class wget ( + $version = present, +) { + + if $::kernel == 'Linux' { + if ! defined(Package['wget']) { + package { 'wget': ensure => $version } + } + } + + if $::kernel == 'FreeBSD' { + if ! defined(Package['ftp/wget']) { + package { 'ftp/wget': ensure => $version } + } + } +} diff --git a/modules/wget/metadata.json b/modules/wget/metadata.json new file mode 100644 index 0000000..399927f --- /dev/null +++ b/modules/wget/metadata.json @@ -0,0 +1,33 @@ +{ + "name": "maestrodev-wget", + "version": "1.4.4", + "source": "http://github.com/maestrodev/puppet-wget.git", + "author": "maestrodev", + "license": "Apache License, Version 2.0", + "summary": "Download files with wget", + "description": "A module for wget that allows downloading of files, supporting authentication", + "project_page": "http://github.com/maestrodev/puppet-wget", + "dependencies": [ + + ], + "types": [ + + ], + "checksums": { + "Gemfile": "bc9d7feebb9c7a41500404b7df7ade6b", + "Gemfile.lock": "37f44c0d18cab72b85c583ec1dcdaad5", + "LICENSE": "3b83ef96387f14655fc854ddc3c6bd57", + "Modulefile": "0a9679b300d8f5fbcf7b7062ae29b660", + "README.md": "dd15db096e342b4aca02835fad8c949b", + "Rakefile": "cd85ca2ed96d27eae84e7781b92b9b2f", + "manifests/authfetch.pp": "4e02d11bba3d72b1ea09b854fa272f27", + "manifests/fetch.pp": "ad2c9c4a8678e6a955351e0ed3315206", + "manifests/init.pp": "d186945bf91813070e769f8367f7026f", + "spec/classes/init_spec.rb": "7f8b9b8cdbe43ee2ae237f4f2e8564df", + "spec/defines/authfetch_spec.rb": "f227ee172638327899734969e93c25f8", + "spec/defines/fetch_spec.rb": "5b3c484e4cc4bd236783342dd723a018", + "spec/spec_helper.rb": "eeba023ffd9eb797e44508ea2f99f90e", + "spec/spec_helper_system.rb": "deaf51d85121ccd2edd4db144941b2ce", + "spec/system/wget_system_spec.rb": "13f5696b3b9a9ba89a1c6f0da60e171b" + } +} \ No newline at end of file diff --git a/modules/wget/spec/classes/init_spec.rb b/modules/wget/spec/classes/init_spec.rb new file mode 100644 index 0000000..03a3ea2 --- /dev/null +++ b/modules/wget/spec/classes/init_spec.rb @@ -0,0 +1,32 @@ +require 'spec_helper' + +describe 'wget' do + + context 'no version specified', :compile do + it { should contain_package('wget').with_ensure('present') } + end + + context 'version is 1.2.3', :compile do + let(:params) { {:version => '1.2.3'} } + + it { should contain_package('wget').with_ensure('1.2.3') } + end + + context 'running on OS X', :compile do + let(:facts) { { + :operatingsystem => 'Darwin', + :kernel => 'Darwin' + } } + + it { should_not contain_package('wget') } + end + + context 'running on FreeBSD', :compile do + let(:facts) { { + :operatingsystem => 'FreeBSD', + :kernel => 'FreeBSD' + } } + + it { should contain_package('ftp/wget') } + end +end diff --git a/modules/wget/spec/defines/authfetch_spec.rb b/modules/wget/spec/defines/authfetch_spec.rb new file mode 100644 index 0000000..97cdfbc --- /dev/null +++ b/modules/wget/spec/defines/authfetch_spec.rb @@ -0,0 +1,33 @@ +require 'spec_helper' + +describe 'wget::authfetch' do + let(:title) { 'authtest' } + let(:params) {{ + :source => 'http://localhost/source', + :destination => destination, + :user => 'myuser', + :password => 'mypassword', + }} + + let(:destination) { "/tmp/dest" } + + context "with default params", :compile do + it { should contain_exec('wget-authtest').with({ + 'command' => "wget --no-verbose --user=myuser --output-document='#{destination}' 'http://localhost/source'", + 'environment' => "WGETRC=#{destination}.wgetrc" + }) + } + it { should contain_file("#{destination}.wgetrc").with_content('password=mypassword') } + end + + context "with user", :compile do + let(:params) { super().merge({ + :execuser => 'testuser', + })} + + it { should contain_exec('wget-authtest').with({ + 'command' => "wget --no-verbose --user=myuser --output-document='#{destination}' 'http://localhost/source'", + 'user' => 'testuser' + }) } + end +end diff --git a/modules/wget/spec/defines/fetch_spec.rb b/modules/wget/spec/defines/fetch_spec.rb new file mode 100644 index 0000000..807a98a --- /dev/null +++ b/modules/wget/spec/defines/fetch_spec.rb @@ -0,0 +1,111 @@ +require 'spec_helper' + +describe 'wget::fetch' do + let(:title) { 'test' } + let(:facts) {{}} + + let(:params) {{ + :source => 'http://localhost/source', + :destination => destination, + }} + + let(:destination) { "/tmp/dest" } + + context "with default params", :compile do + it { should contain_exec('wget-test').with({ + 'command' => "wget --no-verbose --output-document='#{destination}' 'http://localhost/source'", + 'environment' => [] + }) } + end + + context "with user", :compile do + let(:params) { super().merge({ + :execuser => 'testuser', + })} + + it { should contain_exec('wget-test').with({ + 'command' => "wget --no-verbose --output-document='#{destination}' 'http://localhost/source'", + 'user' => 'testuser', + 'environment' => [] + }) } + end + + context "with authentication", :compile do + let(:params) { super().merge({ + :user => 'myuser', + :password => 'mypassword' + })} + + context "with default params" do + it { should contain_exec('wget-test').with({ + 'command' => "wget --no-verbose --user=myuser --output-document='#{destination}' 'http://localhost/source'", + 'environment' => "WGETRC=#{destination}.wgetrc" + }) + } + it { should contain_file("#{destination}.wgetrc").with_content('password=mypassword') } + end + + context "with user", :compile do + let(:params) { super().merge({ + :execuser => 'testuser', + })} + + it { should contain_exec('wget-test').with({ + 'command' => "wget --no-verbose --user=myuser --output-document='#{destination}' 'http://localhost/source'", + 'user' => 'testuser', + 'environment' => "WGETRC=#{destination}.wgetrc" + }) } + end + + context "using proxy", :compile do + let(:facts) { super().merge({ + :http_proxy => 'http://proxy:1000', + :https_proxy => 'http://proxy:1000' + }) } + it { should contain_exec('wget-test').with({ + 'command' => "wget --no-verbose --user=myuser --output-document='#{destination}' 'http://localhost/source'", + 'environment' => ["HTTP_PROXY=http://proxy:1000", "http_proxy=http://proxy:1000", "HTTPS_PROXY=http://proxy:1000", "https_proxy=http://proxy:1000", "WGETRC=#{destination}.wgetrc"] + }) + } + it { should contain_file("#{destination}.wgetrc").with_content('password=mypassword') } + end + end + + context "with cache", :compile do + let(:params) { super().merge({ + :cache_dir => '/tmp/cache', + :execuser => 'testuser', + })} + + it { should contain_exec('wget-test').with({ + 'command' => "wget --no-verbose -N -P '/tmp/cache' 'http://localhost/source'", + 'environment' => [] + }) } + + it { should contain_file("#{destination}").with({ + 'ensure' => "file", + 'source' => "/tmp/cache/source", + 'owner' => "testuser", + }) } + end + + context "with cache file", :compile do + let(:params) { super().merge({ + :cache_dir => '/tmp/cache', + :cache_file => 'newsource', + :execuser => 'testuser', + })} + + it { should contain_exec('wget-test').with({ + 'command' => "wget --no-verbose -N -P '/tmp/cache' 'http://localhost/source'", + 'environment' => [] + }) } + + it { should contain_file("#{destination}").with({ + 'ensure' => "file", + 'source' => "/tmp/cache/newsource", + 'owner' => "testuser", + }) } + end + +end diff --git a/modules/wget/spec/spec_helper.rb b/modules/wget/spec/spec_helper.rb new file mode 100644 index 0000000..7512713 --- /dev/null +++ b/modules/wget/spec/spec_helper.rb @@ -0,0 +1,26 @@ +require 'puppetlabs_spec_helper/module_spec_helper' + +RSpec.configure do |c| + c.mock_framework = :mocha + c.default_facts = { + :operatingsystem => 'CentOS', + :kernel => 'Linux' + } + c.treat_symbols_as_metadata_keys_with_true_values = true + + c.before(:each) do + # work around https://tickets.puppetlabs.com/browse/PUP-1547 + # ensure that there's at least one provider available by emulating that any command exists + require 'puppet/confine/exists' + Puppet::Confine::Exists.any_instance.stubs(:which => '') + # avoid "Only root can execute commands as other users" + Puppet.features.stubs(:root? => true) + + Puppet::Util::Log.level = :warning + Puppet::Util::Log.newdestination(:console) + end +end + +shared_examples :compile, :compile => true do + it { should compile.with_all_deps } +end diff --git a/modules/wget/spec/spec_helper_system.rb b/modules/wget/spec/spec_helper_system.rb new file mode 100644 index 0000000..5acf39e --- /dev/null +++ b/modules/wget/spec/spec_helper_system.rb @@ -0,0 +1,31 @@ +require 'puppet' +require 'rspec-system/spec_helper' +require 'rspec-system-puppet/helpers' +require 'rspec-system-serverspec/helpers' + +include RSpecSystemPuppet::Helpers +include Serverspec::Helper::RSpecSystem + +RSpec.configure do |c| + # Enable color in Jenkins + c.tty = true + + c.before(:each) do + Puppet::Util::Log.level = :warning + Puppet::Util::Log.newdestination(:console) + end + + c.before :suite do + #Install puppet + puppet_install + puppet_module_install source: proj_dir, module_name: 'wget' + end +end + +def fixture_rcp(src, dest) + rcp sp: "#{proj_dir}/spec/fixtures/#{src}", dp: dest +end + +def proj_dir + File.absolute_path File.join File.dirname(__FILE__), '..' +end diff --git a/modules/wget/spec/system/wget_system_spec.rb b/modules/wget/spec/system/wget_system_spec.rb new file mode 100644 index 0000000..e3f736d --- /dev/null +++ b/modules/wget/spec/system/wget_system_spec.rb @@ -0,0 +1,41 @@ +require 'spec_helper_system' + +describe 'wget' do + before(:all) do + puppet_apply(%Q( + class { 'wget': } + )).exit_code.should be_zero + end + + before do + shell "rm -f /tmp/index*" + end + + it 'should be idempotent' do + pp = %Q( + wget::fetch { "download Google's index": + source => 'http://www.google.com/index.html', + destination => '/tmp/index.html', + timeout => 0, + verbose => false, + } + ) + expect(puppet_apply(pp).exit_code).to eq(2) + expect(puppet_apply(pp).exit_code).to be_zero + end + + context 'when running as user' do + let(:pp) { %Q( + wget::fetch { 'download Google index': + source => 'http://www.google.com/index.html', + destination => '/tmp/index-vagrant.html', + timeout => 0, + verbose => false, + } + ) } + subject { shell "cat << EOF | su - vagrant -c 'puppet apply --verbose --modulepath=/etc/puppet/modules'\n#{pp}" } + its(:exit_code) { should be_zero } + its(:stdout) { should =~ %r{Wget::Fetch\[download Google index\].*returns: executed successfully} } + end + +end