swift updates for roles and services

* update role to match our chef repo
* allow platform service names
* improved logging
* use memcache list from common
* remove old unneeded templates and service providers
* add ring gzone and region attributes
* cleanup deprecated get_secret calls
* add templates for sync, expirer and recon
* added more tests

Sorry for the long patch, but this cookbook has been difficult
to deal with.

Change-Id: I2373d65b02c56ada1be8703be0492fab3597be26
Implements: blueprint object-storage-cleanup
This commit is contained in:
Mark Vanderwiel 2015-03-05 15:15:53 -06:00
parent 3d5bb7f7a4
commit 1fff96638a
23 changed files with 499 additions and 292 deletions

View File

@ -69,55 +69,6 @@ client
Attributes
==========
* ```default[:swift][:authmode]``` - "swauth" or "keystone" (default "keystone").
* ```default[:swift][:swauth_source]``` - "git" or "package"(default). Selects between installing python-swauth from git or system package
* ```default[:swift][:swauth_repository]``` - Specifies git repo. Default "https://github.com/gholt/swauth.git"
* ```default[:swift][:swauth_version]``` - Specifies git repo tagged branch. Default "1.0.8"
* ```default[:swift][:swift_hash]``` - swift_hash_path_suffix in /etc/swift/swift.conf (defaults to 107c0568ea84)
* ```default[:swift][:audit_hour]``` - Hour to run swift_auditor on storage nodes (defaults to 5)
* ```default[:swift][:disk_enum_expr]``` - Eval-able expression that lists
candidate disk nodes for disk probing. The result should be a hash
with keys being the device name (without the leading "/dev/") and a
hash block of any extra info associated with the device. For
example: { "sdc" => { "model": "Hitachi 7K3000" }}. Largely,
though, if you are going to make a list of valid devices, you
probably know all the valid devices, and don't need to pass any
metadata about them, so { "sdc" => {}} is probably enough. Example
expression: Hash[('a'..'f').to_a.collect{|x| [ "sd{x}", {} ]}]
* ```default[:swift][:ring][:part_power]``` - controls the size of the ring (defaults to 18)
* ```default[:swift][:ring][:min_part_hours]``` - the minimum number of hours before swift is allowed to migrate a partition (defaults to 1)
* ```default[:swift][:ring][:replicas]``` - how many replicas swift should retain (defaults to 3)
* ```default[:swift][:disk_test_filter]``` - an array of expressions that must
all be true in order a block device to be considered for
formatting and inclusion in the cluster. Each rule gets evaluated
with "candidate" set to the device name (without the leading
"/dev/") and info set to the node hash value. Default rules:
* "candidate =~ /sd[^a]/ or candidate =~ /hd[^a]/ or candidate =~
/vd[^a]/"
* "File.exists?('/dev/ + candidate)"
* "not system('/sbin/sfdisk -V /dev/' + candidate + '>/dev/null 2>&2')"
* "info['removable'] = 0" ])
* ```default[:swift][:expected_disks]``` - an array of device names that the
operator expecs to be identified by the previous two values. This
acts as a second-check on discovered disks. If this array doesn't
match the found disks, then chef processing will be stopped.
Example: ("b".."f").collect{|x| "sd#{x}"}. Default: none.
There are other attributes that must be set depending on authmode.
For "swauth", the following attributes are used:

View File

@ -26,8 +26,8 @@ default['openstack']['object-storage']['service_role'] = 'admin'
default['openstack']['object-storage']['user'] = 'swift'
# Default swift group
default['openstack']['object-storage']['group'] = 'swift'
default['openstack']['compute']['region'] = node['openstack']['region']
# Default region
default['openstack']['object-storage']['region'] = node['openstack']['region']
# Set to some text value if you want templated config files
# to contain a custom banner at the top of the written file
@ -41,10 +41,24 @@ default['openstack']['object-storage']['custom_template_banner'] = "
#--------------------
default['openstack']['object-storage']['state'] = {}
# Hour to run swift_auditor on storage nodes
default['openstack']['object-storage']['audit_hour'] = '5'
# Eval-able expression that lists
# candidate disk nodes for disk probing. The result should be a hash
# with keys being the device name (without the leading "/dev/") and a
# hash block of any extra info associated with the device. For
# example: { "sdc" => { "model": "Hitachi 7K3000" }}. Largely,
# though, if you are going to make a list of valid devices, you
# probably know all the valid devices, and don't need to pass any
# metadata about them, so { "sdc" => {}} is probably enough. Example
# expression: Hash[('a'..'f').to_a.collect{|x| [ "sd{x}", {} ]}]
default['openstack']['object-storage']['disk_enum_expr'] = 'node[:block_device]'
# Flag to rebuild rings
default['openstack']['object-storage']['auto_rebuild_rings'] = false
# ip for git builder
default['openstack']['object-storage']['git_builder_ip'] = '127.0.0.1'
# How many server changes require before a rebalance is done
default['openstack']['object-storage']['wait_for'] = 1
# swift_hash_path_suffix and swift_hash_path_prefix are used as part of the
# the hashing algorithm when determining data placement in the cluster.
@ -127,12 +141,12 @@ default['openstack']['object-storage']['swift_secret_databag_name'] = nil
# roles
#--------------------
default['openstack']['object-storage']['setup_chef_role'] = 'swift-setup'
default['openstack']['object-storage']['management_server_chef_role'] = 'swift-management-server'
default['openstack']['object-storage']['proxy_server_chef_role'] = 'swift-proxy-server'
default['openstack']['object-storage']['object_server_chef_role'] = 'swift-object-server'
default['openstack']['object-storage']['account_server_chef_role'] = 'swift-account-server'
default['openstack']['object-storage']['container_server_chef_role'] = 'swift-container-server'
default['openstack']['object-storage']['setup_chef_role'] = 'os-object-storage-setup'
default['openstack']['object-storage']['management_server_chef_role'] = 'os-object-storage-management'
default['openstack']['object-storage']['proxy_server_chef_role'] = 'os-object-storage-proxy'
default['openstack']['object-storage']['object_server_chef_role'] = 'os-object-storage-object'
default['openstack']['object-storage']['account_server_chef_role'] = 'os-object-storage-account'
default['openstack']['object-storage']['container_server_chef_role'] = 'os-object-storage-container'
#--------------------
# authentication
@ -160,8 +174,14 @@ default['openstack']['object-storage']['dispersion']['auth_key'] = nil
# a safe setting for testing but part_power should be set to
# 26 in production to allow a swift cluster with 50,000 spindles
default['openstack']['object-storage']['ring']['part_power'] = 18
# The minimum number of hours before swift is allowed to migrate a partition
default['openstack']['object-storage']['ring']['min_part_hours'] = 1
# How many replicas swift should retain
default['openstack']['object-storage']['ring']['replicas'] = 3
# Zone, will look like "z1" on swift-ring-builder command line
default['openstack']['object-storage']['ring']['zone'] = '1'
# Region, will look like "r1" on swift-ring-builder command line
default['openstack']['object-storage']['ring']['region'] = '1'
#------------------
# statistics
@ -252,6 +272,8 @@ default['openstack']['sysctl']['net.ipv4.tcp_syncookies'] = 0
# Each predicate is evaluated in turn, and a false from the predicate
# will result in the disk not being considered as a candidate for
# formatting.
# Each rule gets evaluated with "candidate" set to the device name
# without the leading "/dev/") and info set to the node hash value.
default['openstack']['object-storage']['disk_test_filter'] = [
'candidate =~ /(sd|hd|xvd|vd)(?!a$)[a-z]+/',
"File.exist?('/dev/' + candidate)",
@ -468,7 +490,6 @@ when 'rhel'
'service_suffix' => '',
'git_dir' => '/var/lib/git',
'git_service' => 'git',
'service_provider' => Chef::Provider::Service::Redhat,
'override_options' => '',
'swift_statsd_publish' => '/usr/bin/swift-statsd-publish.py'
}
@ -488,7 +509,6 @@ when 'debian'
'service_suffix' => '',
'git_dir' => '/var/cache/git',
'git_service' => 'git-daemon',
'service_provider' => Chef::Provider::Service::Upstart,
'override_options' => "-o Dpkg::Options:='--force-confold' -o Dpkg::Option:='--force-confdef'",
'swift_statsd_publish' => '/usr/local/bin/swift-statsd-publish.py'
}

View File

@ -0,0 +1,26 @@
# encoding: UTF-8
#
# Cookbook Name:: openstack-object-storage
# Library:: service_utils
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Service Related Utilities
module ServiceUtils
# Build platform specific service name
def svc_name(service_name)
platform_options = node['openstack']['object-storage']['platform']
platform_options['service_prefix'] + service_name + platform_options['service_suffix']
end
end

View File

@ -34,14 +34,14 @@ def load_current_resource
sfdisk_get_size(dev_name)
end
Chef::Log.info('About to print partition table')
Chef::Log.info("About to print partition table for #{dev_name}")
s = <<EOF
current state for dev #{dev_name}
Size in 1K blocks: #{@current.blocks}
EOF
Chef::Log.info('Printing partition table')
Chef::Log.info("Printing partition table for #{dev_name}")
num = 0
parts.each do |p|
@ -59,14 +59,14 @@ end
# /dev/sdb: 261 cylinders, 255 heads, 63 sectors/track
def sfdisk_get_size(dev_name)
out = Mixlib::ShellOut.new("sfdisk #{dev_name} -s").run_command.stdout
Chef::Log.info("updating geo using sfdisk: #{out}")
Chef::Log.info("size 1k blocks: #{out.to_i} for #{dev_name}")
# sfdisk sees the world as 1k blocks
@current.blocks(out.to_i)
end
def parted_partition_parse(dev_name)
Chef::Log.debug("reading partition table for #{dev_name}")
Chef::Log.info("reading partition table for #{dev_name}")
# Run parted to get basic info about the disk
# sample output:
@ -80,7 +80,7 @@ def parted_partition_parse(dev_name)
end
def parted_parse_results(input)
Chef::Log.debug('read:' + input.inspect)
Chef::Log.debug('partition table: ' + input.inspect)
input = input.to_a
part_tab = []
catch :parse_error do
@ -155,7 +155,7 @@ action :ensure_exists do
idx = 0
current_block = 0
Chef::Log.info("Checking partition #{idx}")
Chef::Log.info("Checking partition #{idx} for #{dev_name}")
req.each do |params|
if cur[idx].nil?
@ -177,13 +177,13 @@ action :ensure_exists do
recreate = true unless (cur_size > cur_min) && (cur_size < cur_max)
current_block += cur[idx][:size]
Chef::Log.info("partition #{idx} #{(recreate ? 'differs' : 'is same')}: #{cur_size}/#{req_size}")
Chef::Log.info("partition #{idx} #{(recreate ? 'differs' : 'is same')}: #{cur_size}/#{req_size} for #{dev_name}")
idx += 1
end
end
if !recreate
Chef::Log.info('partition table matches - not recreating')
Chef::Log.info('partition table matches for #{dev_name} - not recreating')
else
### make sure to ensure that there are no mounted
### filesystems on the device
@ -195,14 +195,14 @@ action :ensure_exists do
mounted << md[1]
end
mounted.each do |m|
Chef::Log.info("unmounting #{m}")
Chef::Log.info("unmounting #{m} for #{dev_name}")
shell_out!("umount #{m}")
end
# Nuke current partition table.
execute 'create new partition table' do
command "parted -s -m #{dev_name} mktable gpt"
end
Chef::Log.info("Creating partition table for #{dev_name}")
cmd = Mixlib::ShellOut.new("parted -s -m #{dev_name} mktable gpt").run_command
Chef::Log.info("Created partition table for #{dev_name} out:#{cmd.stdout.strip} err:#{cmd.stderr.strip}")
# create new partitions
idx = 0
@ -217,14 +217,11 @@ action :ensure_exists do
requested_size = "#{params[:size]}M"
end
s = "parted -m -s #{dev_name} "
s << "mkpart #{idx} #{start_block} #{requested_size}" # #{params[:type]}
Chef::Log.info("creating new partition #{idx + 1} with:" + s)
execute "creating partition #{idx}" do
command s
end
idx += 1
Chef::Log.info("Creating partition #{idx + 1} for #{dev_name}")
cmd = Mixlib::ShellOut.new("parted -m -s #{dev_name} mkpart #{idx} #{start_block} #{requested_size}").run_command
Chef::Log.info("Created partition #{idx + 1} for #{dev_name} out:#{cmd.stdout.strip} err:#{cmd.stderr.strip}")
idx += 1
end
update = true
end
@ -233,25 +230,22 @@ action :ensure_exists do
idx = 1
req.each do |params|
device = "#{dev_name}#{idx}"
Chef::Log.info("Checking #{device}")
Chef::Log.info("Testing file system on #{device} for type #{params[:type]}")
if ::File.exist?(device)
# FIXME: check the format on the file system. This should be
# handled by a disk format provider. Maybe the xfs/btrfs/etc
# providers?
Chef::Log.info("Testing file system on #{device} for type #{params[:type]}")
case params[:type]
when 'xfs'
unless Mixlib::ShellOut.new("xfs_admin -l #{device}").run_command.exitstatus
Mixlib::ShellOut.new("mkfs.xfs -f -i size=512 #{device}").run_command
update = true
end
when 'ext4'
unless Mixlib::ShellOut.new("tune2fs -l #{device} | grep \"Filesystem volume name:\" | awk \'{print $4}\' | grep -v \"<none>\"").run_command.exitstatus
Mixlib::ShellOut.new("mkfs.ext4 #{device}").run_command
update = true
end
case params[:type]
when 'xfs'
if Mixlib::ShellOut.new("xfs_admin -l #{device}").run_command.error?
Chef::Log.info("Creating file system on #{device} for type #{params[:type]}")
cmd = Mixlib::ShellOut.new("mkfs.xfs -L swift -f -i size=512 #{device}").run_command
Chef::Log.info("Created file system on #{device} for type #{params[:type]} out:#{cmd.stdout.strip} err:#{cmd.stderr.strip}")
update = true
end
when 'ext4'
unless Mixlib::ShellOut.new("tune2fs -l #{device} | awk \'/Filesystem volume name:/{print $4}\' | grep -v \"<none>\"").run_command.exitstatus
Chef::Log.info("Creating file system on #{device} for type #{params[:type]}")
cmd = Mixlib::ShellOut.new("mkfs.ext4 -L swift #{device}").run_command
Chef::Log.info("Created file system on #{device} for type #{params[:type]} out:#{cmd.stdout.strip} err:#{cmd.stderr.strip}")
update = true
end
end
end

View File

@ -21,6 +21,7 @@
#
require 'chef/util/file_edit'
require 'pp'
action :ensure_exists do
proposed_devices = @new_resource.devices
@ -69,15 +70,15 @@ action :ensure_exists do
inverted_mounts = dev_info.reduce({}) { |hsh, (k, v)| hsh.merge(v['mountpoint'] => v.merge('uuid' => k)) }
fstabs = ::File.readlines('/etc/fstab').reduce({}) do |hash, line|
line = line.split('#')[0].split
Chef::Log.info("#{line[0]} ... #{line[1]}")
Chef::Log.debug("#{line[0]} ... #{line[1]}")
hash.merge(line[1] => line[0])
end
fstabs.reject! { |k, v| !k || !v || !k.length || !v.length }
Chef::Log.info("Mounts: #{mounts}")
Chef::Log.info("Valid Mounts: #{valid_mounts}")
Chef::Log.info("Mountpoints: #{mountpoints}")
Chef::Log.info("Fstabs: #{fstabs}")
Chef::Log.info("Mounts: #{PP.pp(mounts, '')}")
Chef::Log.info("Valid Mounts: #{PP.pp(valid_mounts, '')}")
Chef::Log.info("Mountpoints: #{PP.pp(mountpoints, '')}")
Chef::Log.info("Fstabs: #{PP.pp(fstabs, '')}")
# mounts in /srv/node that shouldn't be there
(mounts.keys.select { |x| x && x[/^#{path}/] } - valid_mounts).each do |dev|
@ -167,5 +168,6 @@ action :ensure_exists do
ip: v['ip']
}
end
Chef::Log.info("State: #{PP.pp(node['openstack']['object-storage']['state']['devs'], '')}")
end
end

View File

@ -23,9 +23,11 @@
require 'pp'
# rubocop:disable PerlBackrefs, CyclomaticComplexity, MethodLength
def generate_script
def generate_script # rubocop:disable Metrics/AbcSize
# need to load and parse the existing rings.
ports = { 'object' => '6000', 'container' => '6001', 'account' => '6002' }
ports = { 'object' => node['openstack']['object-storage']['network']['object-bind-port'],
'container' => node['openstack']['object-storage']['network']['container-bind-port'],
'account' => node['openstack']['object-storage']['network']['account-bind-port'] }
must_rebalance = false
ring_path = @new_resource.ring_path
@ -39,13 +41,14 @@ def generate_script
if ::File.exist?("#{ring_path}/#{which}.builder")
IO.popen("su swift -c 'swift-ring-builder #{ring_path}/#{which}.builder'") do |pipe|
ring_data[:raw][which] = pipe.readlines
# Chef::Log.debug("#{ which.capitalize } Ring data: #{ring_data[:raw][which]}")
Chef::Log.debug("#{which} Ring data raw:\n#{PP.pp(ring_data[:raw][which], '')}")
ring_data[:parsed][which] = parse_ring_output(ring_data[:raw][which])
Chef::Log.info("#{which} Ring data parsed:\n#{PP.pp(ring_data[:parsed][which], '')}")
node.set['openstack']['object-storage']['state']['ring'][which] = ring_data[:parsed][which]
end
else
Chef::Log.info("#{which.capitalize} ring builder files do not exist!")
Chef::Log.info("#{which} ring builder files do not exist!")
end
# collect all the ring data, and note what disks are in use. All I really
@ -60,31 +63,28 @@ def generate_script
end
end
Chef::Log.debug("#{which.capitalize} Ring - In use: #{PP.pp(ring_data[:in_use][which], '')}")
Chef::Log.info("#{which} Ring - In use: #{PP.pp(ring_data[:in_use][which], '')}")
# figure out what's present in the cluster
disk_data[which] = {}
role = node['openstack']['object-storage']["#{which}_server_chef_role"]
disk_state, _, _ = Chef::Search::Query.new.search(:node, "chef_environment:#{node.chef_environment} AND roles:#{role}")
Chef::Log.info("#{which} node count: #{disk_state.count} for role: #{role}")
# for a running track of available disks
disk_data[:available] ||= {}
disk_data[:available][which] ||= {}
disk_state.each do |swiftnode|
if swiftnode['openstack']['object-storage']['state'] && swiftnode['openstack']['object-storage']['state']['devs']
Chef::Log.info("#{which} node: #{swiftnode[:hostname]} state:\n#{PP.pp(swiftnode['openstack']['object-storage']['state'], '')}")
if swiftnode['openstack']['object-storage']['state']['devs']
swiftnode['openstack']['object-storage']['state']['devs'].each do |k, v|
disk_data[which][v[:ip]] = disk_data[which][v[:ip]] || {}
disk_data[which][v[:ip]][k] = {}
v.keys.each { |x| disk_data[which][v[:ip]][k].store(x, v[x]) }
if swiftnode['openstack']['object-storage'].key?("#{which}-zone")
disk_data[which][v[:ip]][k]['zone'] = swiftnode['openstack']['object-storage']["#{which}-zone"]
elsif swiftnode['openstack']['object-storage'].key?('zone')
disk_data[which][v[:ip]][k]['zone'] = swiftnode['openstack']['object-storage']['zone']
else
fail "Node #{swiftnode[:hostname]} has no zone assigned"
end
disk_data[which][v[:ip]][k]['region'] = swiftnode['openstack']['object-storage']['ring']['region']
disk_data[which][v[:ip]][k]['zone'] = swiftnode['openstack']['object-storage']['ring']['zone']
disk_data[:available][which][v[:mountpoint]] = v[:ip]
@ -94,7 +94,7 @@ def generate_script
end
end
end
Chef::Log.debug("#{which.capitalize} Ring - Avail: #{PP.pp(disk_data[:available][which], '')}")
Chef::Log.info("#{which} Ring - Avail:\n#{PP.pp(disk_data[:available][which], '')}")
end
# Have the raw data, now bump it together and drop the script
@ -102,6 +102,7 @@ def generate_script
s = "#!/bin/bash\n\n# This script is automatically generated.\n"
s << "# Running it will likely blow up your system if you don't review it carefully.\n"
s << "# You have been warned.\n\n"
s << "set -x\n"
unless node['openstack']['object-storage']['auto_rebuild_rings']
s << "if [ \"$1\" != \"--force\" ]; then\n"
s << " echo \"Auto rebuild rings is disabled, so you must use --force to generate rings\"\n"
@ -109,7 +110,7 @@ def generate_script
s << "fi\n\n"
end
# Chef::Log.debug("#{PP.pp(disk_data, dump='')}")
Chef::Log.info("Disk data: #{PP.pp(disk_data, '')}")
new_disks = {}
missing_disks = {}
@ -122,8 +123,8 @@ def generate_script
# find all in-ring disks that are not in the cluster
missing_disks[which] = ring_data[:in_use][which].reject { |k, v| disk_data[:available][which].key?(k) }
Chef::Log.debug("#{which.capitalize} Ring - Missing: #{PP.pp(missing_disks[which], '')}")
Chef::Log.debug("#{which.capitalize} Ring - New: #{PP.pp(new_disks[which], '')}")
Chef::Log.info("#{which} Ring - Missing:\n#{PP.pp(missing_disks[which], '')}")
Chef::Log.info("#{which} Ring - New:\n#{PP.pp(new_disks[which], '')}")
s << "\n# -- #{which.capitalize} Servers --\n\n"
disk_data[which].keys.sort.each do |ip|
@ -150,7 +151,7 @@ def generate_script
disk_data[which][ip].keys.sort.each do |uuid|
v = disk_data[which][ip][uuid]
if new_disks[which].key?(v['mountpoint'])
s << "swift-ring-builder #{ring_path}/#{which}.builder add z#{v['zone']}-#{v['ip']}:#{ports[which]}/#{v['mountpoint']} #{v['size']}\n"
s << "swift-ring-builder #{ring_path}/#{which}.builder add r#{v['region']}z#{v['zone']}-#{v['ip']}:#{ports[which]}/#{v['device']} #{v['size']}\n"
must_rebalance = true
end
end
@ -159,7 +160,7 @@ def generate_script
# remove the disks -- sort to ensure consistent order
missing_disks[which].keys.sort.each do |mountpoint|
diskinfo = ring_data[:parsed][which][:hosts].select { |k, v| v.key?(mountpoint) }.map { |_, v| v[mountpoint] }[0]
Chef::Log.debug("Missing diskinfo: #{PP.pp(diskinfo, '')}")
Chef::Log.info("#{which} Missing diskinfo:\n#{PP.pp(diskinfo, '')}")
description = Hash[diskinfo.select { |k, v| [:zone, :ip, :device].include?(k) }].map { |k, v| "#{k}: #{v}" }.join(', ')
s << "# #{description}\n"
s << "swift-ring-builder #{ring_path}/#{which}.builder remove d#{missing_disks[which][mountpoint]}\n"
@ -169,18 +170,16 @@ def generate_script
s << "\n"
if must_rebalance
s << "swift-ring-builder #{ring_path}/#{which}.builder rebalance\n\n\n"
# we'll only rebalance if we meet the minimums for new adds
if node['openstack']['object-storage']['wait_for'] > new_servers.count
Chef::Log.info("#{which} New servers, but not enough to force a rebalance")
must_rebalance = false
else
s << "swift-ring-builder #{ring_path}/#{which}.builder rebalance\n\n\n"
end
else
s << "# #{which.capitalize} ring has no outstanding changes!\n\n"
end
# we'll only rebalance if we meet the minimums for new adds
if node['openstack']['object-storage'].key?('wait_for')
if node['openstack']['object-storage']['wait_for'] > new_servers.count
Chef::Log.debug('New servers, but not enough to force a rebalance')
must_rebalance = false
end
end
end
[s, must_rebalance]
end
@ -266,7 +265,7 @@ def parse_ring_output(ring_data)
end
action :ensure_exists do
Chef::Log.debug("Ensuring #{new_resource.name}")
Chef::Log.info("Ensuring #{new_resource.name}")
new_resource.updated_by_last_action(false)
s, must_update = generate_script

View File

@ -22,6 +22,10 @@ include_recipe 'openstack-object-storage::common'
include_recipe 'openstack-object-storage::storage-common'
include_recipe 'openstack-object-storage::disks'
class Chef::Recipe # rubocop:disable Documentation
include ServiceUtils
end
platform_options = node['openstack']['object-storage']['platform']
platform_options['account_packages'].each.each do |pkg|
@ -31,11 +35,13 @@ platform_options['account_packages'].each.each do |pkg|
end
end
svc_names = {}
%w{swift-account swift-account-auditor swift-account-reaper swift-account-replicator}.each do |svc|
service_name = platform_options['service_prefix'] + svc + platform_options['service_suffix']
svc_names[svc] = svc_name(svc)
end
svc_names.values.each do |svc|
service svc do
service_name service_name
provider platform_options['service_provider']
supports status: true, restart: true
action [:enable, :start]
only_if '[ -e /etc/swift/account-server.conf ] && [ -e /etc/swift/account.ring.gz ]'
@ -53,8 +59,8 @@ template '/etc/swift/account-server.conf' do
'bind_port' => node['openstack']['object-storage']['network']['account-bind-port']
)
notifies :restart, 'service[swift-account]', :immediately
notifies :restart, 'service[swift-account-auditor]', :immediately
notifies :restart, 'service[swift-account-reaper]', :immediately
notifies :restart, 'service[swift-account-replicator]', :immediately
notifies :restart, "service[#{svc_names['swift-account']}]", :immediately
notifies :restart, "service[#{svc_names['swift-account-auditor']}]", :immediately
notifies :restart, "service[#{svc_names['swift-account-reaper']}]", :immediately
notifies :restart, "service[#{svc_names['swift-account-replicator']}]", :immediately
end

View File

@ -79,9 +79,9 @@ end
# determine hash
if node['openstack']['object-storage']['swift_secret_databag_name'].nil?
swift_hash_path_prefix = node['openstack']['object-storage']['swift_hash_path_prefix']
swift_hash_path_prefix = get_secret 'swift_hash_path_prefix' if swift_hash_path_prefix.nil?
swift_hash_path_prefix = get_password 'token', 'swift_hash_path_prefix' if swift_hash_path_prefix.nil?
swift_hash_path_suffix = node['openstack']['object-storage']['swift_hash_path_suffix']
swift_hash_path_suffix = get_secret 'swift_hash_path_suffix' if swift_hash_path_suffix.nil?
swift_hash_path_suffix = get_password 'token', 'swift_hash_path_suffix' if swift_hash_path_suffix.nil?
else
# Deprecated, else case to be removed.
swift_secrets = Chef::EncryptedDataBagItem.load 'secrets', node['openstack']['object-storage']['swift_secret_databag_name']

View File

@ -22,6 +22,10 @@ include_recipe 'openstack-object-storage::common'
include_recipe 'openstack-object-storage::storage-common'
include_recipe 'openstack-object-storage::disks'
class Chef::Recipe # rubocop:disable Documentation
include ServiceUtils
end
platform_options = node['openstack']['object-storage']['platform']
platform_options['container_packages'].each do |pkg|
@ -31,18 +35,31 @@ platform_options['container_packages'].each do |pkg|
end
end
svc_names = {}
%w{swift-container swift-container-auditor swift-container-replicator swift-container-updater}.each do |svc|
service_name = platform_options['service_prefix'] + svc + platform_options['service_suffix']
svc_names[svc] = svc_name(svc)
end
svc_names.values.each do |svc|
service svc do
service_name service_name
provider platform_options['service_provider']
supports status: true, restart: true
action [:enable, :start]
only_if '[ -e /etc/swift/container-server.conf ] && [ -e /etc/swift/container.ring.gz ]'
end
end
memcache_servers = memcached_servers.join ','
template '/etc/swift/container-reconciler.conf' do
source 'container-reconciler.conf.erb'
owner node['openstack']['object-storage']['user']
group node['openstack']['object-storage']['group']
mode 00600
variables(
'memcache_servers' => memcache_servers
)
end
template '/etc/swift/container-server.conf' do
source 'container-server.conf.erb'
owner node['openstack']['object-storage']['user']
@ -53,17 +70,24 @@ template '/etc/swift/container-server.conf' do
'bind_port' => node['openstack']['object-storage']['network']['container-bind-port']
)
notifies :restart, 'service[swift-container]', :immediately
notifies :restart, 'service[swift-container-replicator]', :immediately
notifies :restart, 'service[swift-container-updater]', :immediately
notifies :restart, 'service[swift-container-auditor]', :immediately
notifies :restart, "service[#{svc_names['swift-container']}]", :immediately
notifies :restart, "service[#{svc_names['swift-container-replicator']}]", :immediately
notifies :restart, "service[#{svc_names['swift-container-updater']}]", :immediately
notifies :restart, "service[#{svc_names['swift-container-auditor']}]", :immediately
end
service_name = platform_options['service_prefix'] + 'swift-container-sync' + platform_options['service_suffix']
unless node['openstack']['object-storage']['container-server']['allowed_sync_hosts'] == []
service 'swift-container-sync' do
service_name service_name
provider platform_options['service_provider']
service_name = svc_name('swift-container-sync')
template '/etc/swift/container-sync-realms.conf' do
source 'container-sync-realms.conf.erb'
owner node['openstack']['object-storage']['user']
group node['openstack']['object-storage']['group']
mode 00600
notifies :restart, "service[#{service_name}]", :immediately
end
service service_name do
supports status: false, restart: true
action [:enable, :start]
only_if '[ -e /etc/swift/container-server.conf ] && [ -e /etc/swift/container.ring.gz ]'

View File

@ -29,7 +29,7 @@ end
identity_admin_endpoint = admin_endpoint 'identity-admin'
token = get_secret 'openstack_identity_bootstrap_token'
token = get_password 'token', 'openstack_identity_bootstrap_token'
auth_url = ::URI.decode identity_admin_endpoint.to_s
admin_api_endpoint = admin_endpoint 'object-storage-api'

View File

@ -57,10 +57,10 @@ end
# determine where to find dispersion login information
if node['openstack']['object-storage']['swift_secret_databag_name'].nil?
auth_user = node['openstack']['object-storage']['dispersion']['auth_user']
auth_user = get_secret 'dispersion_auth_user' if auth_user.nil?
auth_user = get_password 'token', 'dispersion_auth_user' if auth_user.nil?
auth_key = node['openstack']['object-storage']['dispersion']['auth_key']
auth_key = get_secret 'dispersion_auth_key' if auth_key.nil?
auth_key = get_password 'token', 'dispersion_auth_key' if auth_key.nil?
else
# Deprecated, else case to be removed.
swift_secrets = Chef::EncryptedDataBagItem.load 'secrets', node['openstack']['object-storage']['swift_secret_databag_name']

View File

@ -22,6 +22,10 @@ include_recipe 'openstack-object-storage::common'
include_recipe 'openstack-object-storage::storage-common'
include_recipe 'openstack-object-storage::disks'
class Chef::Recipe # rubocop:disable Documentation
include ServiceUtils
end
platform_options = node['openstack']['object-storage']['platform']
platform_options['object_packages'].each do |pkg|
@ -31,14 +35,13 @@ platform_options['object_packages'].each do |pkg|
end
end
svc_names = {}
%w{swift-object swift-object-replicator swift-object-auditor swift-object-updater}.each do |svc|
service_name = platform_options['service_prefix'] + svc + platform_options['service_suffix']
svc_names[svc] = svc_name(svc)
end
svc_names.values.each do |svc|
service svc do
service_name service_name
provider platform_options['service_provider']
# the default ubuntu provider uses invoke-rc.d, which apparently is
# status-illy broken in ubuntu
supports status: false, restart: true
action [:enable, :start]
only_if '[ -e /etc/swift/object-server.conf ] && [ -e /etc/swift/object.ring.gz ]'
@ -46,6 +49,18 @@ end
end
memcache_servers = memcached_servers.join ','
template '/etc/swift/object-expirer.conf' do
source 'object-expirer.conf.erb'
owner node['openstack']['object-storage']['user']
group node['openstack']['object-storage']['group']
mode 00600
variables(
'memcache_servers' => memcache_servers
)
end
template '/etc/swift/object-server.conf' do
source 'object-server.conf.erb'
owner node['openstack']['object-storage']['user']
@ -56,10 +71,10 @@ template '/etc/swift/object-server.conf' do
'bind_port' => node['openstack']['object-storage']['network']['object-bind-port']
)
notifies :restart, 'service[swift-object]', :immediately
notifies :restart, 'service[swift-object-replicator]', :immediately
notifies :restart, 'service[swift-object-updater]', :immediately
notifies :restart, 'service[swift-object-auditor]', :immediately
notifies :restart, "service[#{svc_names['swift-object']}]", :immediately
notifies :restart, "service[#{svc_names['swift-object-replicator']}]", :immediately
notifies :restart, "service[#{svc_names['swift-object-updater']}]", :immediately
notifies :restart, "service[#{svc_names['swift-object-auditor']}]", :immediately
end
cron 'swift-recon' do

View File

@ -22,6 +22,7 @@ include_recipe 'openstack-object-storage::memcached'
class Chef::Recipe # rubocop:disable Documentation
include IPUtils
include ServiceUtils
end
if node.run_list.expand(node.chef_environment).recipes.include?('openstack-object-storage::setup')
@ -80,8 +81,8 @@ when 'keystone'
package 'python-keystoneclient' do
action :upgrade
end
identity_endpoint = endpoint 'identity-api'
identity_admin_endpoint = endpoint 'identity-admin'
identity_endpoint = internal_endpoint 'identity-internal'
identity_admin_endpoint = admin_endpoint 'identity-admin'
service_pass = get_password 'service', 'openstack-object-storage'
auth_uri = auth_uri_transform identity_endpoint.to_s, node['openstack']['object-storage']['api']['auth']['version']
@ -98,35 +99,17 @@ directory '/var/cache/swift' do
mode 00700
end
swift_proxy_service = platform_options['service_prefix'] + 'swift-proxy' + platform_options['service_suffix']
service 'swift-proxy' do
service_name swift_proxy_service
provider platform_options['service_provider']
proxy_service_name = svc_name('swift-proxy')
service proxy_service_name do
supports status: true, restart: true
action [:enable, :start]
only_if '[ -e /etc/swift/proxy-server.conf ] && [ -e /etc/swift/object.ring.gz ]'
end
# use localhost when using chef solo otherwise, include all memcache
# servers from all known proxies
if Chef::Config[:solo]
memcache_servers = ['127.0.0.1:11211']
else
memcache_servers = []
proxy_role = node['openstack']['object-storage']['proxy_server_chef_role']
proxy_nodes = search(:node, "chef_environment:#{node.chef_environment} AND roles:#{proxy_role}")
proxy_nodes.each do |proxy|
proxy_ip = locate_ip_in_cidr(node['openstack']['object-storage']['network']['proxy-cidr'], proxy)
next unless proxy_ip # skip nil ips so we dont break the config
server_str = "#{proxy_ip}:11211"
memcache_servers << server_str unless memcache_servers.include?(server_str)
end
end
# determine authkey to use
if node['openstack']['object-storage']['swift_secret_databag_name'].nil?
authkey = node['openstack']['object-storage']['authkey']
authkey = get_secret 'swift_authkey' if authkey.nil?
authkey = get_password 'token', 'swift_authkey' if authkey.nil?
else
# Deprecated, else case to be removed.
swift_secrets = Chef::EncryptedDataBagItem.load 'secrets', node['openstack']['object-storage']['swift_secret_databag_name']
@ -140,6 +123,8 @@ proxy_api_bind_port = proxy_api_bind.port if proxy_api_bind_port.nil?
proxy_api_bind_host = node['openstack']['object-storage']['network']['proxy-bind-ip']
proxy_api_bind_host = proxy_api_bind.host if proxy_api_bind_host.nil?
memcache_servers = memcached_servers.join ','
# create proxy config file
template '/etc/swift/proxy-server.conf' do
source 'proxy-server.conf.erb'
@ -156,5 +141,5 @@ template '/etc/swift/proxy-server.conf' do
'identity_admin_endpoint' => identity_admin_endpoint,
'service_pass' => service_pass
)
notifies :restart, 'service[swift-proxy]', :immediately
notifies :restart, "service[#{proxy_service_name}]", :immediately
end

View File

@ -114,5 +114,45 @@ describe 'openstack-object-storage::container-server' do
end
end
end
describe '/etc/swift/object-reconciler.conf' do
let(:file) { chef_run.template('/etc/swift/container-reconciler.conf') }
it_behaves_like 'custom template banner displayer' do
let(:file_name) { file.name }
end
it 'creates object-reconciler.conf' do
expect(chef_run).to create_template(file.name).with(
user: 'swift',
group: 'swift',
mode: 00600
)
end
it 'sets the memcache_servers attribute' do
expect(chef_run).to render_file(file.name).with_content(/^memcache_servers = host1:111,host2:222$/)
end
end
describe '/etc/swift/container-sync-realms.conf' do
before do
node.set['openstack']['object-storage']['container-server']['allowed_sync_hosts'] = ['host1', 'host2', 'host3']
end
let(:file) { chef_run.template('/etc/swift/container-sync-realms.conf') }
it_behaves_like 'custom template banner displayer' do
let(:file_name) { file.name }
end
it 'creates container-sync-realms.conf' do
expect(chef_run).to create_template(file.name).with(
user: 'swift',
group: 'swift',
mode: 00600
)
end
end
end
end

View File

@ -68,5 +68,25 @@ describe 'openstack-object-storage::object-server' do
end
end
end
describe '/etc/swift/object-expirer.conf' do
let(:file) { chef_run.template('/etc/swift/object-expirer.conf') }
it_behaves_like 'custom template banner displayer' do
let(:file_name) { file.name }
end
it 'creates object-expirerr.conf' do
expect(chef_run).to create_template(file.name).with(
user: 'swift',
group: 'swift',
mode: 00600
)
end
it 'sets the memcache_servers attribute' do
expect(chef_run).to render_file(file.name).with_content(/^memcache_servers = host1:111,host2:222$/)
end
end
end
end

View File

@ -306,7 +306,7 @@ describe 'openstack-object-storage::proxy-server' do
end
it 'sets the memcache_servers attribute' do
expect(chef_run).to render_file(file.name).with_content(/^memcache_servers = 127.0.0.1:11211$/)
expect(chef_run).to render_file(file.name).with_content(/^memcache_servers = host1:111,host2:222$/)
end
context 'domain_remap' do

View File

@ -68,30 +68,32 @@ shared_context 'swift-stubs' do
'service_pass' => 'foobar'
}
}
allow_any_instance_of(Chef::Recipe).to receive(:search).with(:node, 'chef_environment:_default AND roles:swift-setup').and_return([n])
allow_any_instance_of(Chef::Recipe).to receive(:search).with(:node, 'chef_environment:_default AND roles:os-object-storage-setup').and_return([n])
allow(Chef::Application).to receive(:fatal!)
allow_any_instance_of(Chef::Recipe).to receive(:get_secret)
.with('openstack_identity_bootstrap_token')
allow_any_instance_of(Chef::Recipe).to receive(:get_password)
.with('token', 'openstack_identity_bootstrap_token')
.and_return('bootstrap-token')
allow_any_instance_of(Chef::Recipe).to receive(:get_password)
.with('service', 'openstack-object-storage')
.and_return('swift-pass')
allow_any_instance_of(Chef::Recipe).to receive(:get_secret)
.with('swift_hash_path_prefix')
allow_any_instance_of(Chef::Recipe).to receive(:get_password)
.with('token', 'swift_hash_path_prefix')
.and_return('swift_hash_path_prefix-secret')
allow_any_instance_of(Chef::Recipe).to receive(:get_secret)
.with('swift_hash_path_suffix')
allow_any_instance_of(Chef::Recipe).to receive(:get_password)
.with('token', 'swift_hash_path_suffix')
.and_return('swift_hash_path_suffix-secret')
allow_any_instance_of(Chef::Recipe).to receive(:get_secret)
.with('swift_authkey')
allow_any_instance_of(Chef::Recipe).to receive(:get_password)
.with('token', 'swift_authkey')
.and_return('swift_authkey-secret')
allow_any_instance_of(Chef::Recipe).to receive(:get_secret)
.with('dispersion_auth_user')
allow_any_instance_of(Chef::Recipe).to receive(:get_password)
.with('token', 'dispersion_auth_user')
.and_return('dispersion_auth_user-secret')
allow_any_instance_of(Chef::Recipe).to receive(:get_secret)
.with('dispersion_auth_key')
allow_any_instance_of(Chef::Recipe).to receive(:get_password)
.with('token', 'dispersion_auth_key')
.and_return('dispersion_auth_key-secret')
allow_any_instance_of(Chef::Recipe).to receive(:memcached_servers)
.and_return(['host1:111', 'host2:222'])
end
end

View File

@ -0,0 +1,54 @@
<%= node['openstack']['object-storage']['custom_template_banner'] %>
[DEFAULT]
# swift_dir = /etc/swift
# user = swift
# You can specify default log routing here if you want:
# log_name = swift
# log_facility = LOG_LOCAL0
# log_level = INFO
# log_address = /dev/log
#
# comma separated list of functions to call to setup custom log handlers.
# functions get passed: conf, name, log_to_console, log_route, fmt, logger,
# adapted_logger
# log_custom_handlers =
#
# If set, log_udp_host will override log_address
# log_udp_host =
# log_udp_port = 514
#
# You can enable StatsD logging here:
# log_statsd_host = localhost
# log_statsd_port = 8125
# log_statsd_default_sample_rate = 1.0
# log_statsd_sample_rate_factor = 1.0
# log_statsd_metric_prefix =
[container-reconciler]
# The reconciler will re-attempt reconciliation if the source object is not
# available up to reclaim_age seconds before it gives up and deletes the entry
# in the queue.
# reclaim_age = 604800
# The cycle time of the daemon
# interval = 30
# Server errors from requests will be retried by default
# request_tries = 3
[pipeline:main]
pipeline = catch_errors proxy-logging cache proxy-server
[app:proxy-server]
use = egg:swift#proxy
# See proxy-server.conf-sample for options
[filter:cache]
use = egg:swift#memcache
memcache_servers = <%= @memcache_servers %>
[filter:proxy-logging]
use = egg:swift#proxy_logging
[filter:catch_errors]
use = egg:swift#catch_errors
# See proxy-server.conf-sample for options

View File

@ -0,0 +1,49 @@
<%= node['openstack']['object-storage']['custom_template_banner'] %>
# [DEFAULT]
# The number of seconds between checking the modified time of this config file
# for changes and therefore reloading it.
# mtime_check_interval = 300
# [realm1]
# key = realm1key
# key2 = realm1key2
# cluster_name1 = https://host1/v1/
# cluster_name2 = https://host2/v1/
#
# [realm2]
# key = realm2key
# key2 = realm2key2
# cluster_name3 = https://host3/v1/
# cluster_name4 = https://host4/v1/
# Each section name is the name of a sync realm. A sync realm is a set of
# clusters that have agreed to allow container syncing with each other. Realm
# names will be considered case insensitive.
#
# The key is the overall cluster-to-cluster key used in combination with the
# external users' key that they set on their containers' X-Container-Sync-Key
# metadata header values. These keys will be used to sign each request the
# container sync daemon makes and used to validate each incoming container sync
# request.
#
# The key2 is optional and is an additional key incoming requests will be
# checked against. This is so you can rotate keys if you wish; you move the
# existing key to key2 and make a new key value.
#
# Any values in the realm section whose names begin with cluster_ will indicate
# the name and endpoint of a cluster and will be used by external users in
# their containers' X-Container-Sync-To metadata header values with the format
# "realm_name/cluster_name/container_name". Realm and cluster names are
# considered case insensitive.
#
# The endpoint is what the container sync daemon will use when sending out
# requests to that cluster. Keep in mind this endpoint must be reachable by all
# container servers, since that is where the container sync daemon runs. Note
# the the endpoint ends with /v1/ and that the container sync daemon will then
# add the account/container/obj name after that.
#
# Distribute this container-sync-realms.conf file to all your proxy servers
# and container servers.

View File

@ -0,0 +1,111 @@
<%= node['openstack']['object-storage']['custom_template_banner'] %>
[DEFAULT]
# swift_dir = /etc/swift
# user = swift
# You can specify default log routing here if you want:
# log_name = swift
# log_facility = LOG_LOCAL0
# log_level = INFO
# log_address = /dev/log
# The following caps the length of log lines to the value given; no limit if
# set to 0, the default.
# log_max_line_length = 0
#
# comma separated list of functions to call to setup custom log handlers.
# functions get passed: conf, name, log_to_console, log_route, fmt, logger,
# adapted_logger
# log_custom_handlers =
#
# If set, log_udp_host will override log_address
# log_udp_host =
# log_udp_port = 514
#
# You can enable StatsD logging here:
# log_statsd_host = localhost
# log_statsd_port = 8125
# log_statsd_default_sample_rate = 1.0
# log_statsd_sample_rate_factor = 1.0
# log_statsd_metric_prefix =
[object-expirer]
# interval = 300
# auto_create_account_prefix = .
# expiring_objects_account_name = expiring_objects
# report_interval = 300
# concurrency is the level of concurrency o use to do the work, this value
# must be set to at least 1
# concurrency = 1
# processes is how many parts to divide the work into, one part per process
# that will be doing the work
# processes set 0 means that a single process will be doing all the work
# processes can also be specified on the command line and will override the
# config value
# processes = 0
# process is which of the parts a particular process will work on
# process can also be specified on the command line and will overide the config
# value
# process is "zero based", if you want to use 3 processes, you should run
# processes with process set to 0, 1, and 2
# process = 0
# The expirer will re-attempt expiring if the source object is not available
# up to reclaim_age seconds before it gives up and deletes the entry in the
# queue.
# reclaim_age = 604800
# recon_cache_path = /var/cache/swift
[pipeline:main]
pipeline = catch_errors proxy-logging cache proxy-server
[app:proxy-server]
use = egg:swift#proxy
# See proxy-server.conf-sample for options
[filter:cache]
use = egg:swift#memcache
memcache_servers = <%= @memcache_servers %>
[filter:catch_errors]
use = egg:swift#catch_errors
# See proxy-server.conf-sample for options
[filter:proxy-logging]
use = egg:swift#proxy_logging
# If not set, logging directives from [DEFAULT] without "access_" will be used
# access_log_name = swift
# access_log_facility = LOG_LOCAL0
# access_log_level = INFO
# access_log_address = /dev/log
#
# If set, access_log_udp_host will override access_log_address
# access_log_udp_host =
# access_log_udp_port = 514
#
# You can use log_statsd_* from [DEFAULT] or override them here:
# access_log_statsd_host = localhost
# access_log_statsd_port = 8125
# access_log_statsd_default_sample_rate = 1.0
# access_log_statsd_sample_rate_factor = 1.0
# access_log_statsd_metric_prefix =
# access_log_headers = false
#
# If access_log_headers is True and access_log_headers_only is set only
# these headers are logged. Multiple headers can be defined as comma separated
# list like this: access_log_headers_only = Host, X-Object-Meta-Mtime
# access_log_headers_only =
#
# By default, the X-Auth-Token is logged. To obscure the value,
# set reveal_sensitive_prefix to the number of characters to log.
# For example, if set to 12, only the first 12 characters of the
# token appear in the log. An unauthorized access of the log file
# won't allow unauthorized usage of the token. However, the first
# 12 or so characters is unique enough that you can trace/debug
# token usage. Set to 0 to suppress the token completely (replaced
# by '...' in the log).
# Note: reveal_sensitive_prefix will not affect the value
# logged with access_log_headers=True.
# reveal_sensitive_prefix = 16
#
# What HTTP methods are allowed for StatsD logging (comma-sep); request methods
# not in this list will have "BAD_METHOD" for the <verb> portion of the metric.
# log_statsd_valid_http_methods = GET,HEAD,POST,PUT,DELETE,COPY,OPTIONS

View File

@ -79,7 +79,7 @@ use = egg:swift#healthcheck
[filter:cache]
use = egg:swift#memcache
memcache_servers = <%= @memcache_servers.join(',') %>
memcache_servers = <%= @memcache_servers %>
[filter:ratelimit]
use = egg:swift#ratelimit

View File

@ -1,78 +0,0 @@
#!/bin/sh
<%= node['openstack']['object-storage']['custom_template_banner'] %>
### BEGIN INIT INFO
# Provides: <%= @exec %>
# Required-Start: $remote_fs
# Required-Stop: $remote_fs
# Default-Stop: 0 1 6
# Description: <%= @description %>
### END INIT INFO
# chkconfig: - 98 02
. /etc/rc.d/init.d/functions
name="<%= @exec =%>"
[ -e "/etc/sysconfig/openstack-swift-$name" ] && . "/etc/sysconfig/openstack-swift-$name"
lockfile="/var/lock/subsys/openstack-swift-$name"
start() {
swift-init "$name" start
retval=$?
[ $retval -eq 0 ] && touch $lockfile
return $retval
}
stop() {
swift-init "$name" stop
retval=$?
[ $retval -eq 0 ] && rm -f $lockfile
return $retval
}
restart() {
stop
start
}
rh_status() {
swift-init "$name" status
retval=$?
return $retval
}
rh_status_q() {
rh_status &> /dev/null
}
case "$1" in
start)
rh_status_q && exit 0
$1
;;
stop)
rh_status_q || exit 0
$1
;;
restart)
$1
;;
reload)
;;
status)
rh_status
;;
condrestart|try-restart)
rh_status_q || exit 0
restart
;;
*)
echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart}"
exit 2
esac
exit $?

View File

@ -1,13 +0,0 @@
<%= node['openstack']['object-storage']['custom_template_banner'] %>
[Unit]
Description=<%= @description %>
After=syslog.target network.target
[Service]
Type=simple
User=<%= @user %>
ExecStart=<%= @exec %>
[Install]
WantedBy=multi-user.target