Merge "Remove upgrade tarball related code"

This commit is contained in:
Jenkins 2015-11-18 17:29:53 +00:00 committed by Gerrit Code Review
commit cd084cf5c4
38 changed files with 14 additions and 5292 deletions

View File

@ -74,5 +74,4 @@ include $(SOURCE_DIR)/packages/module.mk
include $(SOURCE_DIR)/docker/module.mk
include $(SOURCE_DIR)/bootstrap/module.mk
include $(SOURCE_DIR)/iso/module.mk
include $(SOURCE_DIR)/upgrade/module.mk
include $(SOURCE_DIR)/virtualbox.mk

View File

@ -28,8 +28,6 @@ Directory structure:
Scripts that are used for building Fuel RPM and DEB packages.
- ```specs```
RPM spec for fuel and fuel-release packages.
- ```upgrade```
Scripts that are used for building Fuel upgrade tarball. (being deprecated)
- ```utils```
Auxiliary scripts. (being deprecated)
- ```virtualbox```

View File

@ -41,7 +41,6 @@ DEPS_DIR_CURRENT:=$(abspath $(DEPS_DIR_CURRENT))
# Artifacts names
ISO_NAME?=fuel-$(PRODUCT_VERSION)
UPGRADE_TARBALL_NAME?=fuel-$(PRODUCT_VERSION)-upgrade
OPENSTACK_PATCH_TARBALL_NAME?=fuel-$(PRODUCT_VERSION)-patch
VBOX_SCRIPTS_NAME?=vbox-scripts-$(PRODUCT_VERSION)
BOOTSTRAP_ART_NAME?=bootstrap.tar.gz
@ -57,7 +56,6 @@ TARGET_CENTOS_IMG_ART_NAME?=centos_target_images.tar
# Where we put artifacts
ISO_PATH:=$(ARTS_DIR)/$(ISO_NAME).iso
UPGRADE_TARBALL_PATH:=$(ARTS_DIR)/$(UPGRADE_TARBALL_NAME).tar
VBOX_SCRIPTS_PATH:=$(ARTS_DIR)/$(VBOX_SCRIPTS_NAME).zip
MASTER_IP?=10.20.0.2
@ -259,22 +257,6 @@ SANDBOX_COPY_CERTS?=0
# Development option only:
# Please dont change them if you dont know what they do ##
# If not empty, will try save "build/upgrade/deps" pip cache from upgrade module only,
# to file $(ARTS_DIR)/$(SAVE_UPGRADE_PIP_ART)
# Example:
# SAVE_UPGRADE_PIP_ART?=fuel-dev.art_pip_from_upg_module.tar.gz
SAVE_UPGRADE_PIP_ART?=
# If not empty, will try to download this archive and use like pip cache
# for creating upgrade module.
# Example:
# USE_UPGRADE_PIP_ART_HTTP_LINK?=http://127.0.0.1/files/deps.pip.tar.gz
# Content example:
# deps.pip.tar.gz:\
# \argparse-1.2.1.tar.gz
# \docker-py-0.3.2.tar.gz
USE_UPGRADE_PIP_ART_HTTP_LINK?=
# Work-around for: LP1482667
# If not empty, will try to download prepeared upstream puppet modules source,
# which used like requirements for build fuel-library package.

View File

@ -1,7 +1,7 @@
.PHONY: all iso version-yaml centos-repo ubuntu-repo
.DELETE_ON_ERROR: $(ISO_PATH)
all: iso version-yaml
all: iso version-yaml openstack-yaml
ISOROOT:=$(BUILD_DIR)/iso/isoroot
@ -128,9 +128,20 @@ $(BUILD_DIR)/iso/isoroot-dotfiles.done: \
$(ISOROOT)/.treeinfo
$(ACTION.TOUCH)
$(ISOROOT)/openstack_version: $(BUILD_DIR)/upgrade/$(OPENSTACK_YAML_ART_NAME)
$(ISOROOT)/openstack_version: $(BUILD_DIR)/iso/$(OPENSTACK_YAML_ART_NAME)
mkdir -p $(@D)
python -c "import yaml; print filter(lambda r: r['fields'].get('name'), yaml.load(open('$(BUILD_DIR)/upgrade/$(OPENSTACK_YAML_ART_NAME)')))[0]['fields']['version']" > $@
python -c "import yaml; print filter(lambda r: r['fields'].get('name'), yaml.load(open('$(BUILD_DIR)/iso/$(OPENSTACK_YAML_ART_NAME)')))[0]['fields']['version']" > $@
openstack-yaml: $(ARTS_DIR)/$(OPENSTACK_YAML_ART_NAME)
$(ARTS_DIR)/$(OPENSTACK_YAML_ART_NAME): $(BUILD_DIR)/iso/$(OPENSTACK_YAML_ART_NAME)
$(ACTION.COPY)
$(BUILD_DIR)/iso/$(OPENSTACK_YAML_ART_NAME): $(BUILD_DIR)/repos/fuel-nailgun.done
mkdir -p $(@D)
cp $(BUILD_DIR)/repos/fuel-nailgun/nailgun/nailgun/fixtures/openstack.yaml $@
$(BUILD_DIR)/iso/isoroot-files.done: \
$(BUILD_DIR)/iso/isoroot-dotfiles.done \
@ -259,4 +270,3 @@ $(ISO_PATH): $(BUILD_DIR)/iso/isoroot.done
-isohybrid-gpt-basdat \
-o $@ $(BUILD_DIR)/iso/isoroot-mkisofs
implantisomd5 $@

View File

@ -1,26 +0,0 @@
require 'pathname'
require Pathname.new(__FILE__).dirname.dirname.expand_path + 'corosync'
Puppet::Type.type(:cs_commit).provide(:crm, :parent => Puppet::Provider::Corosync) do
commands :crm => 'crm'
commands :crm_attribute => 'crm_attribute'
commands :crm_shadow => 'crm_shadow'
def self.instances
block_until_ready
[]
end
def sync(cib)
self.class.block_until_ready
begin
crm_shadow '--force', '--commit', cib
rescue Puppet::ExecutionFailure => e
#FIXME(aglarendil): reckless retry to commit shadow again
#lp/bug1283062
debug("shadow commit failed. trying one more time")
if e =~ /Application of an update diff failed/
crm_shadow '--force', '--commit', cib
end
end
end
end

View File

@ -1,120 +0,0 @@
class heat::engine (
$pacemaker = false,
$ocf_scripts_dir = '/usr/lib/ocf/resource.d',
$ocf_scripts_provider = 'mirantis',
) {
include heat::params
$service_name = $::heat::params::engine_service_name
$package_name = $::heat::params::engine_package_name
$pacemaker_service_name = "p_${service_name}"
package { 'heat-engine' :
ensure => installed,
name => $package_name,
}
if !$pacemaker {
# standard service mode
service { 'heat-engine_service':
ensure => 'running',
name => $service_name,
enable => true,
hasstatus => true,
hasrestart => true,
}
} else {
# pacemaker resource mode
if $::osfamily == 'RedHat' {
$ocf_script_template = 'heat_engine_centos.ocf.erb'
} else {
$ocf_script_template = 'heat_engine_ubuntu.ocf.erb'
}
file { 'heat-engine-ocf' :
ensure => present,
path => "${ocf_scripts_dir}/${ocf_scripts_provider}/${service_name}",
mode => '0755',
owner => 'root',
group => 'root',
content => template("heat/${ocf_script_template}"),
}
service { 'heat-engine_service' :
ensure => 'running',
name => $pacemaker_service_name,
enable => true,
hasstatus => true,
hasrestart => true,
provider => 'pacemaker',
}
service { 'heat-engine_stopped' :
name => $service_name,
ensure => 'stopped',
enable => false,
}
cs_shadow { $pacemaker_service_name :
cib => $pacemaker_service_name,
}
cs_commit { $pacemaker_service_name :
cib => $pacemaker_service_name,
}
cs_resource { $pacemaker_service_name :
ensure => present,
cib => $pacemaker_service_name,
primitive_class => 'ocf',
provided_by => $ocf_scripts_provider,
primitive_type => $service_name,
metadata => { 'resource-stickiness' => '1' },
operations => {
'monitor' => { 'interval' => '20', 'timeout' => '30' },
'start' => { 'timeout' => '60' },
'stop' => { 'timeout' => '60' },
},
}
# remove old service from 5.0 release
$wrong_service_name = $service_name
cs_resource { $wrong_service_name :
ensure => 'absent',
cib => $pacemaker_service_name,
}
Heat_config<||> ->
File['heat-engine-ocf'] ->
Cs_shadow[$pacemaker_service_name] ->
Cs_resource[$service_name] ->
Cs_resource[$pacemaker_service_name] ->
Cs_commit[$pacemaker_service_name] ->
Service['heat-engine_stopped'] ->
Service['heat-engine_service']
}
exec {'heat-encryption-key-replacement':
command => 'sed -i "s/%ENCRYPTION_KEY%/`hexdump -n 16 -v -e \'/1 "%02x"\' /dev/random`/" /etc/heat/heat.conf',
path => [ '/usr/bin', '/bin' ],
onlyif => 'grep -c ENCRYPTION_KEY /etc/heat/heat.conf',
}
Package['heat-common'] -> Package['heat-engine'] -> File['/etc/heat/heat.conf'] -> Heat_config<||> ~> Service['heat-engine_service']
File['/etc/heat/heat.conf'] -> Exec['heat-encryption-key-replacement'] -> Service['heat-engine_service']
File['/etc/heat/heat.conf'] ~> Service['heat-engine_service']
Class['heat::db'] -> Service['heat-engine_service']
Heat_config<||> -> Exec['heat_db_sync'] ~> Service['heat-engine_service']
Package<| title == 'heat-engine'|> ~> Service<| title == 'heat-engine_service'|>
if !defined(Service['heat-engine_service']) {
notify{ "Module ${module_name} cannot notify service heat-engine on package update": }
}
}

View File

@ -1,61 +0,0 @@
module MongoCommon
def mongo_local(cmd, database = @resource[:admin_database], username = @resource[:admin_username], password = @resource[:admin_password])
mongo_cmd = [
@resource[:mongo_path],
'--quiet',
'--eval',
cmd,
database,
]
output = Puppet::Util::Execution.execute(mongo_cmd, :failonfail => false, :combine => false)
rc = $?.exitstatus
Puppet.debug "Local Mongo: #{cmd} -> #{rc}: #{output}"
[output, rc]
end
def mongo_remote(cmd, database = @resource[:admin_database], username = @resource[:admin_username], password = @resource[:admin_password])
mongo_cmd = [
@resource[:mongo_path],
'--username',
username,
'--password',
password,
'--host',
@resource[:admin_host],
'--port',
@resource[:admin_port],
'--quiet',
'--eval',
cmd,
database,
]
output = Puppet::Util::Execution.execute(mongo_cmd, :failonfail => false, :combine => false)
rc = $?.exitstatus
Puppet.debug "Remote Mongo: #{cmd} -> #{rc}: #{output}"
[output, rc]
end
def mongo(cmd, database = @resource[:admin_database], username = @resource[:admin_username], password = @resource[:admin_password])
output, rc = mongo_remote(cmd, database,username,password)
return output if rc == 0
output, rc = mongo_local(cmd, database,username,password)
return output if rc == 0
raise Puppet::ExecutionFailure, output
end
def block_until_mongodb(tries = 10)
begin
mongo('db.getMongo()')
rescue => e
debug('MongoDB server not ready, retrying')
sleep 2
if (tries -= 1) > 0
retry
else
raise e
end
end
end
end

View File

@ -1,26 +0,0 @@
Puppet::Type.type(:mongodb_database).provide(:mongodb) do
require File.join(File.dirname(__FILE__), '..', 'common.rb')
desc "Manages MongoDB database."
defaultfor :kernel => 'Linux'
include MongoCommon
def create
Puppet.debug "mongo_database: #{@resource[:name]} create"
mongo('db.dummyData.insert({"created_by_puppet": 1})', @resource[:name])
end
def destroy
Puppet.debug "mongo_database: #{@resource[:name]} destroy"
mongo('db.dropDatabase()', @resource[:name])
end
def exists?
Puppet.debug "mongo_database: '#{@resource[:name]}' exists?"
block_until_mongodb(@resource[:tries])
current_databases = mongo('db.getMongo().getDBNames()').strip.split(',')
exists = current_databases.include?(@resource[:name])
Puppet.debug "mongo_database: '#{@resource[:name]}' all: #{current_databases.inspect} '#{@resource[:name]}' exists? #{exists}"
exists
end
end

View File

@ -1,49 +0,0 @@
Puppet::Type.type(:mongodb_user).provide(:mongodb) do
require File.join(File.dirname(__FILE__), '..', 'common.rb')
desc "Manage users for a MongoDB database."
defaultfor :kernel => 'Linux'
include MongoCommon
def create
Puppet.debug "mongodb_user: #{@resource[:name]} database '#{@resource[:database]}' create"
mongo("db.getMongo().getDB('#{@resource[:database]}').system.users.insert({user:'#{@resource[:name]}', pwd:'#{@resource[:password_hash]}', roles: #{@resource[:roles].inspect}})")
end
def destroy
Puppet.debug "mongodb_user: #{@resource[:name]} database '#{@resource[:database]}' destroy"
mongo("db.getMongo().getDB('#{@resource[:database]}').removeUser('#{@resource[:name]}')")
end
def exists?
Puppet.debug "mongodb_user: '#{@resource[:name]}' database '#{@resource[:database]}' exists?"
block_until_mongodb(@resource[:tries])
exists = mongo("db.getMongo().getDB('#{@resource[:database]}').system.users.find({user:'#{@resource[:name]}'}).count()").strip.to_i > 0
Puppet.debug "mongodb_user: '#{@resource[:name]}' database '#{@resource[:database]}' exists? #{exists}"
exists
end
def password_hash
Puppet.debug "mongodb_user: '#{@resource[:name]}' database '#{@resource[:database]}' password_hash get"
hash = mongo("db.getMongo().getDB('#{@resource[:database]}').system.users.findOne({user:'#{@resource[:name]}'})['pwd']").strip
Puppet.debug "mongodb_user: '#{@resource[:name]}' database '#{@resource[:database]}' password_hash: #{hash}"
hash
end
def password_hash=(value)
Puppet.debug "mongodb_user: '#{@resource[:name]}' database '#{@resource[:database]}' password_hash set #{value.inspect}"
mongo("db.getMongo().getDB('#{@resource[:database]}').system.users.update({user:'#{@resource[:name]}'}, { $set: {pwd:'#{value}'}})")
end
def roles
Puppet.debug "mongodb_user: '#{@resource[:name]}' database '#{@resource[:database]}' roles get"
roles = mongo("db.getMongo().getDB('#{@resource[:database]}').system.users.findOne({user:'#{@resource[:name]}'})['roles']").strip.split(',').sort
Puppet.debug "mongodb_user: '#{@resource[:name]}' roles: #{roles.inspect}"
roles
end
def roles=(value)
Puppet.debug "mongodb_user: '#{@resource[:name]}' database '#{@resource[:database]}' roles set #{value.inspect}"
mongo("db.getMongo().getDB('#{@resource[:database]}').system.users.update({user:'#{@resource[:name]}'}, { $set: {roles: #{@resource[:roles].inspect}}})")
end
end

View File

@ -1,56 +0,0 @@
Puppet::Type.newtype(:mongodb_database) do
@doc = "Manage MongoDB databases."
ensurable
newparam(:name, :namevar=>true) do
desc "The name of the database."
newvalues(/^\w+$/)
end
newparam(:admin_username) do
desc "Administrator user login"
defaultto 'admin'
end
newparam(:admin_password) do
desc "Administrator user password"
end
newparam(:admin_host) do
desc "Connect to this host as an admin user"
defaultto 'localhost'
end
newparam(:admin_port) do
desc "Connect to this port as an admin user"
defaultto '27017'
end
newparam(:admin_database) do
desc "Connect to this database as an admin user"
defaultto 'admin'
end
newparam(:mongo_path) do
desc "Path to mongo binary"
defaultto '/usr/bin/mongo'
end
newparam(:tries) do
desc "The maximum amount of two second tries to wait MongoDB startup."
defaultto 10
newvalues(/^\d+$/)
munge do |value|
Integer(value)
end
end
autorequire(:package) do
'mongodb'
end
autorequire(:service) do
'mongodb'
end
end

View File

@ -1,92 +0,0 @@
Puppet::Type.newtype(:mongodb_user) do
@doc = 'Manage a MongoDB user. This includes management of users password as well as privileges.'
ensurable
def initialize(*args)
super
# Sort roles array before comparison.
self[:roles] = Array(self[:roles]).sort!
end
newparam(:name, :namevar=>true) do
desc "The name of the user."
end
newparam(:admin_username) do
desc "Administrator user login"
defaultto 'admin'
end
newparam(:admin_password) do
desc "Administrator user password"
end
newparam(:admin_host) do
desc "Connect to this host as an admin user"
defaultto 'localhost'
end
newparam(:admin_port) do
desc "Connect to this port as an admin user"
defaultto '27017'
end
newparam(:mongo_path) do
desc "Path to mongo binary"
defaultto '/usr/bin/mongo'
end
newparam(:admin_database) do
desc "Connect to this database as an admin user"
defaultto 'admin'
end
newparam(:database) do
desc "The user's target database."
defaultto do
fail("Parameter 'database' must be set")
end
newvalues(/^\w+$/)
end
newparam(:tries) do
desc "The maximum amount of two second tries to wait MongoDB startup."
defaultto 10
newvalues(/^\d+$/)
munge do |value|
Integer(value)
end
end
newproperty(:roles, :array_matching => :all) do
desc "The user's roles."
defaultto ['dbAdmin']
newvalue(/^\w+$/)
# Pretty output for arrays.
def should_to_s(value)
value.inspect
end
def is_to_s(value)
value.inspect
end
end
newproperty(:password_hash) do
desc "The password hash of the user. Use mongodb_password() for creating hash."
defaultto do
fail("Property 'password_hash' must be set. Use mongodb_password() for creating hash.")
end
newvalue(/^\w+$/)
end
autorequire(:package) do
'mongodb'
end
autorequire(:service) do
'mongodb'
end
end

View File

@ -1,55 +0,0 @@
# == Class: mongodb::db
#
# Class for creating mongodb databases and users.
#
# == Parameters
#
# user - Database username.
# password_hash - Hashed password. Hex encoded md5 hash of "$username:mongo:$password".
# password - Plain text user password. This is UNSAFE, use 'password_hash' unstead.
# roles (default: ['dbAdmin']) - array with user roles.
# tries (default: 10) - The maximum amount of two second tries to wait MongoDB startup.
#
define mongodb::db (
$user,
$password_hash = false,
$password = false,
$roles = ['dbAdmin'],
$tries = 10,
$admin_username = undef,
$admin_password = undef,
$admin_host = undef,
$admin_database = undef,
) {
mongodb_database { $name:
ensure => present,
tries => $tries,
admin_username => $admin_username,
admin_password => $admin_password,
admin_host => $admin_host,
admin_database => $admin_database,
require => Class['mongodb::server'],
}
if $password_hash {
$hash = $password_hash
} elsif $password {
$hash = mongodb_password($user, $password)
} else {
fail("Parameter 'password_hash' or 'password' should be provided to mongodb::db.")
}
mongodb_user { $user:
ensure => present,
password_hash => $hash,
database => $name,
roles => $roles,
admin_username => $admin_username,
admin_password => $admin_password,
admin_host => $admin_host,
admin_database => $admin_database,
require => Mongodb_database[$name],
}
}

View File

@ -1,58 +0,0 @@
# == Class: openstack::mongo
class openstack::mongo (
$ceilometer_database = "ceilometer",
$ceilometer_user = "ceilometer",
$ceilometer_metering_secret = undef,
$ceilometer_db_password = "ceilometer",
$ceilometer_metering_secret = "ceilometer",
$mongodb_port = 27017,
$mongodb_bind_address = ['0.0.0.0'],
$verbose = false,
$use_syslog = true,
) {
class {'::mongodb::client':
} ->
class {'::mongodb::server':
port => $mongodb_port,
verbose => $verbose,
use_syslog => $use_syslog,
bind_ip => $mongodb_bind_address,
auth => true,
} ->
mongodb::db { $ceilometer_database:
user => $ceilometer_user,
password => $ceilometer_db_password,
roles => ['readWrite', 'dbAdmin', 'dbOwner'],
admin_username => 'admin',
admin_password => $ceilometer_db_password,
admin_database => 'admin',
} ->
mongodb::db { 'admin':
user => 'admin',
password => $ceilometer_db_password,
roles => [
'userAdmin',
'readWrite',
'dbAdmin',
'dbAdminAnyDatabase',
'readAnyDatabase',
'readWriteAnyDatabase',
'userAdminAnyDatabase',
'clusterAdmin',
'clusterManager',
'clusterMonitor',
'hostManager',
'root',
'restore',
],
admin_username => 'admin',
admin_password => $ceilometer_db_password,
admin_database => 'admin',
}
}

View File

@ -1,82 +0,0 @@
# == Class: openstack::mongo_primary
class openstack::mongo_primary (
$ceilometer_database = "ceilometer",
$ceilometer_user = "ceilometer",
$ceilometer_metering_secret = undef,
$ceilometer_db_password = "ceilometer",
$ceilometer_metering_secret = "ceilometer",
$ceilometer_replset_members = ['mongo2', 'mongo3'],
$mongodb_bind_address = ['0.0.0.0'],
$mongodb_port = 27017,
$use_syslog = true,
$verbose = false,
) {
if size($ceilometer_replset_members) > 0 {
$replset_setup = true
$keyfile = '/etc/mongodb.key'
$replset = 'ceilometer'
} else {
$replset_setup = false
$keyfile = undef
$replset = undef
}
notify {"MongoDB params: $mongodb_bind_address" :} ->
class {'::mongodb::client':
} ->
class {'::mongodb::server':
port => $mongodb_port,
verbose => $verbose,
use_syslog => $use_syslog,
bind_ip => $mongodb_bind_address,
auth => true,
replset => $replset,
keyfile => $keyfile,
} ->
class {'::mongodb::replset':
replset_setup => $replset_setup,
replset_members => $ceilometer_replset_members,
} ->
notify {"mongodb configuring databases" :} ->
mongodb::db { $ceilometer_database:
user => $ceilometer_user,
password => $ceilometer_db_password,
roles => [ 'readWrite', 'dbAdmin', 'dbOwner' ],
admin_username => 'admin',
admin_password => $ceilometer_db_password,
admin_database => 'admin',
} ->
mongodb::db { 'admin':
user => 'admin',
password => $ceilometer_db_password,
roles => [
'userAdmin',
'readWrite',
'dbAdmin',
'dbAdminAnyDatabase',
'readAnyDatabase',
'readWriteAnyDatabase',
'userAdminAnyDatabase',
'clusterAdmin',
'clusterManager',
'clusterMonitor',
'hostManager',
'root',
'restore',
],
admin_username => 'admin',
admin_password => $ceilometer_db_password,
admin_database => 'admin',
} ->
notify {"mongodb primary finished": }
}

View File

@ -1,147 +0,0 @@
module Rpmvercmp
def self.debug
@debug = false unless defined? @debug
@debug
end
def self.debug=(debug)
@debug = debug
end
# check that the element is not fully integer
def self.not_integer?(s)
!Integer(s)
rescue
true
end
# convert label to epoch, version, release
def self.label_to_elements(label)
return [nil, nil, nil] unless label
label = label.split ':'
if label.length > 1
epoch = label.shift
else
epoch = nil
end
label = label.join '-'
label = label.split('-')
if label.length > 1
version = label.shift
release = label.join '-'
else
version = label.first
release = nil
end
[epoch, version, release]
end
def self.simple_checks(var1, var2)
return 0 if var1 == var2
return 1 if var1 and not var2
return -1 if not var1 and var2
0
end
# compare two blocks
# first is larger -> 1
# second is larger -> -1
# equal -> 0
def self.compare_blocks(block1, block2)
block1 = get_string block1
block2 = get_string block2
rc = simple_checks block1, block2
return rc if rc != 0
# ~ sign has the highest sorting priority
if block1.start_with? '~' and !block2.start_with? '~'
return 1
elsif !block1.start_with? '~' and block2.start_with? '~'
return -1
end
if not_integer?(block1) && not_integer?(block2)
# Both not integers:
# compare strings
block1 <=> block2
else
# One of elements is integer:
# convert both to int and compare
block1.to_i <=> block2.to_i
end
end
# compare two elements
# first is larger -> 1
# second is larger -> -1
# equal -> 0
def self.compare_elements(element1, element2)
element1 = get_string element1
element2 = get_string element2
rc = simple_checks element1, element2
return rc if rc != 0
# split both versions to elements
separators = /[\._\-+]/
blocks1 = element1.split separators
blocks2 = element2.split separators
# compare each element from first to same element from second
while blocks1.length > 0 or blocks2.length > 0
b1 = blocks1.shift
b2 = blocks2.shift
rc = compare_blocks b1, b2
puts "Blocks: #{b1} vs #{b2} = #{rc}" if debug
# return result on first non-equal match
return rc if rc != 0
end
# there is nothing left to compare: return equal
0
end
def self.get_string(value)
return '' unless value
value.to_s
end
def self.compare_fuel(label1, label2)
return 0 if label1 == label2
return 0 if !label1.include? 'fuel' and !label2.include? 'fuel'
return 1 if label1.include? 'fuel' and !label2.include? 'fuel'
return -1 if !label1.include? 'fuel' and label2.include? 'fuel'
label1 =~ /fuel([\d\.]*)/
ver1 = $1
label2 =~ /fuel([\d\.]*)/
ver2 = $1
compare_elements ver1, ver2
end
def self.compare_labels(label1, label2)
label1 = get_string label1
label2 = get_string label2
rc = simple_checks label1, label2
return rc if rc != 0
rc = compare_fuel label1, label2
puts "Fuel: #{rc}" if debug
return rc if rc != 0
elements1 = label_to_elements label1
elements2 = label_to_elements label2
while elements1.length > 0 or elements2.length > 0
e1 = elements1.shift
e2 = elements2.shift
rc = compare_elements e1, e2
puts "Elements: #{e1.inspect} vs #{e2.inspect} = #{rc}" if debug
return rc if rc != 0
end
0
end
end

View File

@ -1,251 +0,0 @@
require 'puppet/util/package'
require 'yaml'
require File.join(File.dirname(__FILE__), 'rpmvercmp.rb')
Puppet::Type.type(:package).provide :yum, :parent => :rpm, :source => :rpm do
desc "Support via `yum`.
Using this provider's `uninstallable` feature will not remove dependent packages. To
remove dependent packages with this provider use the `purgeable` feature, but note this
feature is destructive and should be used with the utmost care."
has_feature :versionable
commands :yum => "yum", :rpm => "rpm", :python => "python"
self::YUMHELPER = File::join(File::dirname(__FILE__), "yumhelper.py")
attr_accessor :latest_info
if command('rpm')
confine :true => begin
rpm('--version')
rescue Puppet::ExecutionFailure
false
else
true
end
end
defaultfor :operatingsystem => [:fedora, :centos, :redhat]
def self.prefetch(packages)
raise Puppet::Error, "The yum provider can only be used as root" if Process.euid != 0
super
return unless packages.detect { |name, package| package.should(:ensure) == :latest }
# collect our 'latest' info
updates = {}
python(self::YUMHELPER).each_line do |l|
l.chomp!
next if l.empty?
if l[0,4] == "_pkg"
hash = nevra_to_hash(l[5..-1])
[hash[:name], "#{hash[:name]}.#{hash[:arch]}"].each do |n|
updates[n] ||= []
updates[n] << hash
end
end
end
# Add our 'latest' info to the providers.
packages.each do |name, package|
if info = updates[package[:name]]
package.provider.latest_info = info[0]
end
end
end
def pkg_list
raw_pkgs = rpm [ '-q', '-a', '--queryformat', '%{NAME}|%{VERSION}-%{RELEASE}\n' ]
pkgs = {}
raw_pkgs.split("\n").each do |l|
line = l.split '|'
name = line[0]
version = line[1]
next if !name || !version
pkgs.store name, version
end
pkgs
end
# Substract packages in hash b from packages in hash a
# in noval is true only package name matters and version is ignored
# @param a <Hash[String]>
# @param b <Hash[String]>
# @param ignore_versions <TrueClass,FalseClass>
def package_diff(a, b, ignore_versions = false)
result = a.dup
b.each_pair do |k, v|
if a.key? k
if a[k] == v or ignore_versions
result.delete k
end
end
end
result
end
# find package names in both a and b hashes
# values are taken from a
# @param a <Hash[String]>
# @param b <Hash[String]>
def package_updates(a, b)
common_keys = a.keys & b.keys
common_keys = a.keys & b.keys
common_keys.inject({}) { |result, p| result.merge({p => a[p]}) }
end
def install
should = @resource.should(:ensure)
self.debug "Ensuring => #{should}"
wanted = @resource[:name]
operation = :install
yum_options = %w(-d 0 -e 0 -y)
@file_dir = '/var/lib/puppet/rollback'
from = @property_hash[:ensure]
to = should
name = @resource[:name]
Puppet.notice "Installing package #{name} from #{from} to #{to}"
case should
when true, false, Symbol
# pass
should = nil
else
# Add the package version
wanted += "-#{should}"
is = self.query
if is && Rpmvercmp.compare_labels(should, is[:ensure]) < 0
self.debug "Downgrading package #{@resource[:name]} from version #{is[:ensure]} to #{should}"
operation = :downgrade
end
end
rollback_file = File.join @file_dir, "#{name}_#{to}_#{from}.yaml"
diff = read_diff rollback_file
if diff.is_a?(Hash) && diff.key?('installed') && diff.key?('removed')
# rollback
# reverse the update process instead of usuall install
Puppet.debug "Found rollback file at #{rollback_file}"
installed = diff['installed']
removed = diff['removed']
# calculate package sets
to_update = package_updates removed, installed
to_install = package_diff removed, installed
to_remove = package_diff installed, removed, true
Puppet.debug "Install: #{to_install.map {|p| "#{p[0]}-#{p[1]}" }. join ' '}" if to_install.any?
Puppet.debug "Remove: #{to_remove.map {|p| "#{p[0]}-#{p[1]}" }. join ' '}" if to_remove.any?
Puppet.debug "Update: #{to_update.map {|p| "#{p[0]}-#{p[1]}" }. join ' '}" if to_update.any?
to_install = to_install.merge to_update
yum_shell yum_options, operation, to_install, to_remove
elsif from.is_a?(String) && to.is_a?(String)
# update form one version to another
before, after = yum_with_changes yum_options, operation, wanted
diff = make_package_diff before, after
file_path = File.join @file_dir, "#{name}_#{from}_#{to}.yaml"
save_diff file_path, diff
Puppet.debug "Saving diff file to #{file_path}"
else
# just a simple install
output = yum "-d", "0", "-e", "0", "-y", operation, wanted
end
is = check_query
raise Puppet::Error, "Could not find package #{self.name}" unless is
# FIXME: Should we raise an exception even if should == :latest
# and yum updated us to a version other than @param_hash[:ensure] ?
raise Puppet::Error, "Failed to update to version #{should}, got version #{is[:ensure]} instead" if should && should != is[:ensure]
end
# run the yum shell to install and remove packages
# @param options <Array[String]>
# @param operation <String,Symbol>
# @param to_install <Hash>
# @param to_remove <Hash>
def yum_shell(options, operation, to_install, to_remove)
tmp_file = '/tmp/yum.shell'
yum_shell = ''
yum_shell += "#{operation} #{to_install.map {|p| "#{p[0]}-#{p[1]}" }. join ' '}\n" if to_install.any?
yum_shell += "remove #{to_remove.map {|p| "#{p[0]}-#{p[1]}" }. join ' '}\n" if to_remove.any?
yum_shell += "run\n"
File.open(tmp_file, 'w') { |file| file.write yum_shell }
output = yum "--setopt", "obsoletes=0", options, 'shell', tmp_file
File.delete tmp_file
end
# package state query executed after the install to check its success
# separate method is made because it can be stubbed by the spec
# @return Hash
def check_query
self.query
end
# combine before and after lists into a diff
# @param before <Hash[String]>
# @param after <Hash[String]>
def make_package_diff(before, after)
installed = package_diff after, before
removed = package_diff before, after
{ 'installed' => installed, 'removed' => removed }
end
# run yum operation and get package
# lists before and after of it
# @param options <Array[String]>
# @param operation <String,Symbol>
# @param wanted <String>
def yum_with_changes(options, operation, wanted)
before = pkg_list
yum options, operation, wanted
after = pkg_list
[ before, after ]
end
# saves diff hash into a file
# @param file_path <String>
# @param diff <Hash[String]>
def save_diff(file_path, diff)
Dir.mkdir @file_dir unless File.directory? @file_dir
File.open(file_path, 'w') { |file| file.write YAML.dump(diff) + "\n" }
end
# reads diff hash from a file
# @param file_path <String>
# @returns <Hash[String]>
def read_diff(file_path)
return unless File.readable? file_path
diff = YAML.load_file file_path
return unless diff.is_a? Hash
diff
end
# What's the latest package version available?
def latest
upd = latest_info
unless upd.nil?
# FIXME: there could be more than one update for a package
# because of multiarch
return "#{upd[:epoch]}:#{upd[:version]}-#{upd[:release]}"
else
# Yum didn't find updates, pretend the current
# version is the latest
raise Puppet::DevError, "Tried to get latest on a missing package" if properties[:ensure] == :absent
return properties[:ensure]
end
end
def update
# Install in yum can be used for update, too
self.install
end
def purge
yum "-y", :erase, @resource[:name]
end
end

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,26 +0,0 @@
require 'pathname'
require Pathname.new(__FILE__).dirname.dirname.expand_path + 'corosync'
Puppet::Type.type(:cs_commit).provide(:crm, :parent => Puppet::Provider::Corosync) do
commands :crm => 'crm'
commands :crm_attribute => 'crm_attribute'
commands :crm_shadow => 'crm_shadow'
def self.instances
block_until_ready
[]
end
def sync(cib)
self.class.block_until_ready
begin
crm_shadow '--force', '--commit', cib
rescue Puppet::ExecutionFailure => e
#FIXME(aglarendil): reckless retry to commit shadow again
#lp/bug1283062
debug("shadow commit failed. trying one more time")
if e =~ /Application of an update diff failed/
crm_shadow '--force', '--commit', cib
end
end
end
end

View File

@ -1,120 +0,0 @@
class heat::engine (
$pacemaker = false,
$ocf_scripts_dir = '/usr/lib/ocf/resource.d',
$ocf_scripts_provider = 'mirantis',
) {
include heat::params
$service_name = $::heat::params::engine_service_name
$package_name = $::heat::params::engine_package_name
$pacemaker_service_name = "p_${service_name}"
package { 'heat-engine' :
ensure => installed,
name => $package_name,
}
if !$pacemaker {
# standard service mode
service { 'heat-engine_service':
ensure => 'running',
name => $service_name,
enable => true,
hasstatus => true,
hasrestart => true,
}
} else {
# pacemaker resource mode
if $::osfamily == 'RedHat' {
$ocf_script_template = 'heat_engine_centos.ocf.erb'
} else {
$ocf_script_template = 'heat_engine_ubuntu.ocf.erb'
}
file { 'heat-engine-ocf' :
ensure => present,
path => "${ocf_scripts_dir}/${ocf_scripts_provider}/${service_name}",
mode => '0755',
owner => 'root',
group => 'root',
content => template("heat/${ocf_script_template}"),
}
service { 'heat-engine_service' :
ensure => 'running',
name => $pacemaker_service_name,
enable => true,
hasstatus => true,
hasrestart => true,
provider => 'pacemaker',
}
service { 'heat-engine_stopped' :
name => $service_name,
ensure => 'stopped',
enable => false,
}
cs_shadow { $pacemaker_service_name :
cib => $pacemaker_service_name,
}
cs_commit { $pacemaker_service_name :
cib => $pacemaker_service_name,
}
cs_resource { $pacemaker_service_name :
ensure => present,
cib => $pacemaker_service_name,
primitive_class => 'ocf',
provided_by => $ocf_scripts_provider,
primitive_type => $service_name,
metadata => { 'resource-stickiness' => '1' },
operations => {
'monitor' => { 'interval' => '20', 'timeout' => '30' },
'start' => { 'timeout' => '60' },
'stop' => { 'timeout' => '60' },
},
}
# remove old service from 5.0 release
$wrong_service_name = $service_name
cs_resource { $wrong_service_name :
ensure => 'absent',
cib => $pacemaker_service_name,
}
Heat_config<||> ->
File['heat-engine-ocf'] ->
Cs_shadow[$pacemaker_service_name] ->
Cs_resource[$service_name] ->
Cs_resource[$pacemaker_service_name] ->
Cs_commit[$pacemaker_service_name] ->
Service['heat-engine_stopped'] ->
Service['heat-engine_service']
}
exec {'heat-encryption-key-replacement':
command => 'sed -i "s/%ENCRYPTION_KEY%/`hexdump -n 16 -v -e \'/1 "%02x"\' /dev/random`/" /etc/heat/heat.conf',
path => [ '/usr/bin', '/bin' ],
onlyif => 'grep -c ENCRYPTION_KEY /etc/heat/heat.conf',
}
Package['heat-common'] -> Package['heat-engine'] -> File['/etc/heat/heat.conf'] -> Heat_config<||> ~> Service['heat-engine_service']
File['/etc/heat/heat.conf'] -> Exec['heat-encryption-key-replacement'] -> Service['heat-engine_service']
File['/etc/heat/heat.conf'] ~> Service['heat-engine_service']
Class['heat::db'] -> Service['heat-engine_service']
Heat_config<||> -> Exec['heat_db_sync'] ~> Service['heat-engine_service']
Package<| title == 'heat-engine'|> ~> Service<| title == 'heat-engine_service'|>
if !defined(Service['heat-engine_service']) {
notify{ "Module ${module_name} cannot notify service heat-engine on package update": }
}
}

View File

@ -1,61 +0,0 @@
module MongoCommon
def mongo_local(cmd, database = @resource[:admin_database], username = @resource[:admin_username], password = @resource[:admin_password])
mongo_cmd = [
@resource[:mongo_path],
'--quiet',
'--eval',
cmd,
database,
]
output = Puppet::Util::Execution.execute(mongo_cmd, :failonfail => false, :combine => false)
rc = $?.exitstatus
Puppet.debug "Local Mongo: #{cmd} -> #{rc}: #{output}"
[output, rc]
end
def mongo_remote(cmd, database = @resource[:admin_database], username = @resource[:admin_username], password = @resource[:admin_password])
mongo_cmd = [
@resource[:mongo_path],
'--username',
username,
'--password',
password,
'--host',
@resource[:admin_host],
'--port',
@resource[:admin_port],
'--quiet',
'--eval',
cmd,
database,
]
output = Puppet::Util::Execution.execute(mongo_cmd, :failonfail => false, :combine => false)
rc = $?.exitstatus
Puppet.debug "Remote Mongo: #{cmd} -> #{rc}: #{output}"
[output, rc]
end
def mongo(cmd, database = @resource[:admin_database], username = @resource[:admin_username], password = @resource[:admin_password])
output, rc = mongo_remote(cmd, database,username,password)
return output if rc == 0
output, rc = mongo_local(cmd, database,username,password)
return output if rc == 0
raise Puppet::ExecutionFailure, output
end
def block_until_mongodb(tries = 10)
begin
mongo('db.getMongo()')
rescue => e
debug('MongoDB server not ready, retrying')
sleep 2
if (tries -= 1) > 0
retry
else
raise e
end
end
end
end

View File

@ -1,26 +0,0 @@
Puppet::Type.type(:mongodb_database).provide(:mongodb) do
require File.join(File.dirname(__FILE__), '..', 'common.rb')
desc "Manages MongoDB database."
defaultfor :kernel => 'Linux'
include MongoCommon
def create
Puppet.debug "mongo_database: #{@resource[:name]} create"
mongo('db.dummyData.insert({"created_by_puppet": 1})', @resource[:name])
end
def destroy
Puppet.debug "mongo_database: #{@resource[:name]} destroy"
mongo('db.dropDatabase()', @resource[:name])
end
def exists?
Puppet.debug "mongo_database: '#{@resource[:name]}' exists?"
block_until_mongodb(@resource[:tries])
current_databases = mongo('db.getMongo().getDBNames()').strip.split(',')
exists = current_databases.include?(@resource[:name])
Puppet.debug "mongo_database: '#{@resource[:name]}' all: #{current_databases.inspect} '#{@resource[:name]}' exists? #{exists}"
exists
end
end

View File

@ -1,49 +0,0 @@
Puppet::Type.type(:mongodb_user).provide(:mongodb) do
require File.join(File.dirname(__FILE__), '..', 'common.rb')
desc "Manage users for a MongoDB database."
defaultfor :kernel => 'Linux'
include MongoCommon
def create
Puppet.debug "mongodb_user: #{@resource[:name]} database '#{@resource[:database]}' create"
mongo("db.getMongo().getDB('#{@resource[:database]}').system.users.insert({user:'#{@resource[:name]}', pwd:'#{@resource[:password_hash]}', roles: #{@resource[:roles].inspect}})")
end
def destroy
Puppet.debug "mongodb_user: #{@resource[:name]} database '#{@resource[:database]}' destroy"
mongo("db.getMongo().getDB('#{@resource[:database]}').removeUser('#{@resource[:name]}')")
end
def exists?
Puppet.debug "mongodb_user: '#{@resource[:name]}' database '#{@resource[:database]}' exists?"
block_until_mongodb(@resource[:tries])
exists = mongo("db.getMongo().getDB('#{@resource[:database]}').system.users.find({user:'#{@resource[:name]}'}).count()").strip.to_i > 0
Puppet.debug "mongodb_user: '#{@resource[:name]}' database '#{@resource[:database]}' exists? #{exists}"
exists
end
def password_hash
Puppet.debug "mongodb_user: '#{@resource[:name]}' database '#{@resource[:database]}' password_hash get"
hash = mongo("db.getMongo().getDB('#{@resource[:database]}').system.users.findOne({user:'#{@resource[:name]}'})['pwd']").strip
Puppet.debug "mongodb_user: '#{@resource[:name]}' database '#{@resource[:database]}' password_hash: #{hash}"
hash
end
def password_hash=(value)
Puppet.debug "mongodb_user: '#{@resource[:name]}' database '#{@resource[:database]}' password_hash set #{value.inspect}"
mongo("db.getMongo().getDB('#{@resource[:database]}').system.users.update({user:'#{@resource[:name]}'}, { $set: {pwd:'#{value}'}})")
end
def roles
Puppet.debug "mongodb_user: '#{@resource[:name]}' database '#{@resource[:database]}' roles get"
roles = mongo("db.getMongo().getDB('#{@resource[:database]}').system.users.findOne({user:'#{@resource[:name]}'})['roles']").strip.split(',').sort
Puppet.debug "mongodb_user: '#{@resource[:name]}' roles: #{roles.inspect}"
roles
end
def roles=(value)
Puppet.debug "mongodb_user: '#{@resource[:name]}' database '#{@resource[:database]}' roles set #{value.inspect}"
mongo("db.getMongo().getDB('#{@resource[:database]}').system.users.update({user:'#{@resource[:name]}'}, { $set: {roles: #{@resource[:roles].inspect}}})")
end
end

View File

@ -1,56 +0,0 @@
Puppet::Type.newtype(:mongodb_database) do
@doc = "Manage MongoDB databases."
ensurable
newparam(:name, :namevar=>true) do
desc "The name of the database."
newvalues(/^\w+$/)
end
newparam(:admin_username) do
desc "Administrator user login"
defaultto 'admin'
end
newparam(:admin_password) do
desc "Administrator user password"
end
newparam(:admin_host) do
desc "Connect to this host as an admin user"
defaultto 'localhost'
end
newparam(:admin_port) do
desc "Connect to this port as an admin user"
defaultto '27017'
end
newparam(:admin_database) do
desc "Connect to this database as an admin user"
defaultto 'admin'
end
newparam(:mongo_path) do
desc "Path to mongo binary"
defaultto '/usr/bin/mongo'
end
newparam(:tries) do
desc "The maximum amount of two second tries to wait MongoDB startup."
defaultto 10
newvalues(/^\d+$/)
munge do |value|
Integer(value)
end
end
autorequire(:package) do
'mongodb'
end
autorequire(:service) do
'mongodb'
end
end

View File

@ -1,92 +0,0 @@
Puppet::Type.newtype(:mongodb_user) do
@doc = 'Manage a MongoDB user. This includes management of users password as well as privileges.'
ensurable
def initialize(*args)
super
# Sort roles array before comparison.
self[:roles] = Array(self[:roles]).sort!
end
newparam(:name, :namevar=>true) do
desc "The name of the user."
end
newparam(:admin_username) do
desc "Administrator user login"
defaultto 'admin'
end
newparam(:admin_password) do
desc "Administrator user password"
end
newparam(:admin_host) do
desc "Connect to this host as an admin user"
defaultto 'localhost'
end
newparam(:admin_port) do
desc "Connect to this port as an admin user"
defaultto '27017'
end
newparam(:mongo_path) do
desc "Path to mongo binary"
defaultto '/usr/bin/mongo'
end
newparam(:admin_database) do
desc "Connect to this database as an admin user"
defaultto 'admin'
end
newparam(:database) do
desc "The user's target database."
defaultto do
fail("Parameter 'database' must be set")
end
newvalues(/^\w+$/)
end
newparam(:tries) do
desc "The maximum amount of two second tries to wait MongoDB startup."
defaultto 10
newvalues(/^\d+$/)
munge do |value|
Integer(value)
end
end
newproperty(:roles, :array_matching => :all) do
desc "The user's roles."
defaultto ['dbAdmin']
newvalue(/^\w+$/)
# Pretty output for arrays.
def should_to_s(value)
value.inspect
end
def is_to_s(value)
value.inspect
end
end
newproperty(:password_hash) do
desc "The password hash of the user. Use mongodb_password() for creating hash."
defaultto do
fail("Property 'password_hash' must be set. Use mongodb_password() for creating hash.")
end
newvalue(/^\w+$/)
end
autorequire(:package) do
'mongodb'
end
autorequire(:service) do
'mongodb'
end
end

View File

@ -1,55 +0,0 @@
# == Class: mongodb::db
#
# Class for creating mongodb databases and users.
#
# == Parameters
#
# user - Database username.
# password_hash - Hashed password. Hex encoded md5 hash of "$username:mongo:$password".
# password - Plain text user password. This is UNSAFE, use 'password_hash' unstead.
# roles (default: ['dbAdmin']) - array with user roles.
# tries (default: 10) - The maximum amount of two second tries to wait MongoDB startup.
#
define mongodb::db (
$user,
$password_hash = false,
$password = false,
$roles = ['dbAdmin'],
$tries = 10,
$admin_username = undef,
$admin_password = undef,
$admin_host = undef,
$admin_database = undef,
) {
mongodb_database { $name:
ensure => present,
tries => $tries,
admin_username => $admin_username,
admin_password => $admin_password,
admin_host => $admin_host,
admin_database => $admin_database,
require => Class['mongodb::server'],
}
if $password_hash {
$hash = $password_hash
} elsif $password {
$hash = mongodb_password($user, $password)
} else {
fail("Parameter 'password_hash' or 'password' should be provided to mongodb::db.")
}
mongodb_user { $user:
ensure => present,
password_hash => $hash,
database => $name,
roles => $roles,
admin_username => $admin_username,
admin_password => $admin_password,
admin_host => $admin_host,
admin_database => $admin_database,
require => Mongodb_database[$name],
}
}

View File

@ -1,184 +0,0 @@
#
# == Class: openstack::glance
#
# Installs and configures Glance
# Assumes the following:
# - Keystone for authentication
# - keystone tenant: services
# - keystone username: glance
# - storage backend: file
#
# === Parameters
#
# [db_host] Host where DB resides. Required.
# [glance_user_password] Password for glance auth user. Required.
# [glance_db_password] Password for glance DB. Required.
# [keystone_host] Host whre keystone is running. Optional. Defaults to '127.0.0.1'
# [auth_uri] URI used for auth. Optional. Defaults to "http://${keystone_host}:5000/"
# [db_type] Type of sql databse to use. Optional. Defaults to 'mysql'
# [glance_db_user] Name of glance DB user. Optional. Defaults to 'glance'
# [glance_db_dbname] Name of glance DB. Optional. Defaults to 'glance'
# [verbose] Rather to print more verbose (INFO+) output. If non verbose and non debug, would give
# syslog_log_level (default is WARNING) output. Optional. Defaults to false.
# [debug] Rather to print even more verbose (DEBUG+) output. If true, would ignore verbose option.
# Optional. Defaults to false.
# [enabled] Used to indicate if the service should be active (true) or passive (false).
# Optional. Defaults to true
# [use_syslog] Rather or not service should log to syslog. Optional. Default to false.
# [syslog_log_facility] Facility for syslog, if used. Optional. Note: duplicating conf option
# wouldn't have been used, but more powerfull rsyslog features managed via conf template instead
# [syslog_log_level] logging level for non verbose and non debug mode. Optional.
# [glance_image_cache_max_size] the maximum size of glance image cache. Optional. Default is 10G.
#
# === Example
#
# class { 'openstack::glance':
# glance_user_password => 'changeme',
# db_password => 'changeme',
# db_host => '127.0.0.1',
# }
class openstack::glance (
$db_host = 'localhost',
$glance_user_password = false,
$glance_db_password = false,
$bind_host = '127.0.0.1',
$keystone_host = '127.0.0.1',
$registry_host = '127.0.0.1',
$auth_uri = "http://127.0.0.1:5000/",
$db_type = 'mysql',
$glance_db_user = 'glance',
$glance_db_dbname = 'glance',
$glance_backend = 'file',
$verbose = false,
$debug = false,
$enabled = true,
$use_syslog = false,
# Facility is common for all glance services
$syslog_log_facility = 'LOG_LOCAL2',
$syslog_log_level = 'WARNING',
$glance_image_cache_max_size = '10737418240',
$idle_timeout = '3600',
$max_pool_size = '10',
$max_overflow = '30',
$max_retries = '-1',
$rabbit_password = false,
$rabbit_userid = 'guest',
$rabbit_host = 'localhost',
$rabbit_port = '5672',
$rabbit_hosts = false,
$rabbit_virtual_host = '/',
$rabbit_use_ssl = false,
$rabbit_notification_exchange = 'glance',
$rabbit_notification_topic = 'notifications',
$amqp_durable_queues = false,
) {
validate_string($glance_user_password)
validate_string($glance_db_password)
validate_string($rabbit_password)
# Configure the db string
case $db_type {
'mysql': {
$sql_connection = "mysql://${glance_db_user}:${glance_db_password}@${db_host}/${glance_db_dbname}?read_timeout=60"
}
}
# Install and configure glance-api
class { 'glance::api':
verbose => $verbose,
debug => $debug,
bind_host => $bind_host,
auth_type => 'keystone',
auth_port => '35357',
auth_host => $keystone_host,
keystone_tenant => 'services',
keystone_user => 'glance',
keystone_password => $glance_user_password,
sql_connection => $sql_connection,
enabled => $enabled,
registry_host => $registry_host,
use_syslog => $use_syslog,
syslog_log_facility => $syslog_log_facility,
syslog_log_level => $syslog_log_level,
image_cache_max_size => $glance_image_cache_max_size,
max_retries => $max_retries,
max_pool_size => $max_pool_size,
max_overflow => $max_overflow,
idle_timeout => $idle_timeout,
}
# Install and configure glance-registry
class { 'glance::registry':
verbose => $verbose,
debug => $debug,
bind_host => $bind_host,
auth_host => $keystone_host,
auth_port => '35357',
auth_type => 'keystone',
keystone_tenant => 'services',
keystone_user => 'glance',
keystone_password => $glance_user_password,
sql_connection => $sql_connection,
enabled => $enabled,
use_syslog => $use_syslog,
syslog_log_facility => $syslog_log_facility,
syslog_log_level => $syslog_log_level,
max_retries => $max_retries,
max_pool_size => $max_pool_size,
max_overflow => $max_overflow,
idle_timeout => $idle_timeout,
}
# puppet-glance assumes rabbit_hosts is an array of [node:port, node:port]
# but we pass it as a amqp_hosts string of 'node:port, node:port' in Fuel
if !is_array($rabbit_hosts) {
$rabbit_hosts_real = split($rabbit_hosts, ',')
} else {
$rabbit_hosts_real = $rabbit_hosts
}
# Configure rabbitmq notifications
# TODO(bogdando) sync qpid support from upstream
class { 'glance::notify::rabbitmq':
rabbit_password => $rabbit_password,
rabbit_userid => $rabbit_userid,
rabbit_hosts => $rabbit_hosts_real,
rabbit_host => $rabbit_host,
rabbit_port => $rabbit_port,
rabbit_virtual_host => $rabbit_virtual_host,
rabbit_use_ssl => $rabbit_use_ssl,
rabbit_notification_exchange => $rabbit_notification_exchange,
rabbit_notification_topic => $rabbit_notification_topic,
amqp_durable_queues => $amqp_durable_queues,
}
# Configure file storage backend
if $glance_backend == "swift" {
if !defined(Package['swift']) {
include ::swift::params
package { "swift":
name => $::swift::params::package_name,
ensure =>present
}
}
Package["swift"] ~> Service['glance-api']
Package['swift'] -> Swift::Ringsync <||>
Package<| title == 'swift'|> ~> Service<| title == 'glance-api'|>
if !defined(Service['glance-api']) {
notify{ "Module ${module_name} cannot notify service glance-api on package swift update": }
}
class { "glance::backend::$glance_backend":
swift_store_user => "services:glance",
swift_store_key=> $glance_user_password,
swift_store_create_container_on_put => "True",
swift_store_auth_address => "http://${keystone_host}:5000/v2.0/"
}
} else {
class { "glance::backend::$glance_backend": }
}
}

View File

@ -1,58 +0,0 @@
# == Class: openstack::mongo
class openstack::mongo (
$ceilometer_database = "ceilometer",
$ceilometer_user = "ceilometer",
$ceilometer_metering_secret = undef,
$ceilometer_db_password = "ceilometer",
$ceilometer_metering_secret = "ceilometer",
$mongodb_port = 27017,
$mongodb_bind_address = ['0.0.0.0'],
$verbose = false,
$use_syslog = true,
) {
class {'::mongodb::client':
} ->
class {'::mongodb::server':
port => $mongodb_port,
verbose => $verbose,
use_syslog => $use_syslog,
bind_ip => $mongodb_bind_address,
auth => true,
} ->
mongodb::db { $ceilometer_database:
user => $ceilometer_user,
password => $ceilometer_db_password,
roles => ['readWrite', 'dbAdmin', 'dbOwner'],
admin_username => 'admin',
admin_password => $ceilometer_db_password,
admin_database => 'admin',
} ->
mongodb::db { 'admin':
user => 'admin',
password => $ceilometer_db_password,
roles => [
'userAdmin',
'readWrite',
'dbAdmin',
'dbAdminAnyDatabase',
'readAnyDatabase',
'readWriteAnyDatabase',
'userAdminAnyDatabase',
'clusterAdmin',
'clusterManager',
'clusterMonitor',
'hostManager',
'root',
'restore',
],
admin_username => 'admin',
admin_password => $ceilometer_db_password,
admin_database => 'admin',
}
}

View File

@ -1,82 +0,0 @@
# == Class: openstack::mongo_primary
class openstack::mongo_primary (
$ceilometer_database = "ceilometer",
$ceilometer_user = "ceilometer",
$ceilometer_metering_secret = undef,
$ceilometer_db_password = "ceilometer",
$ceilometer_metering_secret = "ceilometer",
$ceilometer_replset_members = ['mongo2', 'mongo3'],
$mongodb_bind_address = ['0.0.0.0'],
$mongodb_port = 27017,
$use_syslog = true,
$verbose = false,
) {
if size($ceilometer_replset_members) > 0 {
$replset_setup = true
$keyfile = '/etc/mongodb.key'
$replset = 'ceilometer'
} else {
$replset_setup = false
$keyfile = undef
$replset = undef
}
notify {"MongoDB params: $mongodb_bind_address" :} ->
class {'::mongodb::client':
} ->
class {'::mongodb::server':
port => $mongodb_port,
verbose => $verbose,
use_syslog => $use_syslog,
bind_ip => $mongodb_bind_address,
auth => true,
replset => $replset,
keyfile => $keyfile,
} ->
class {'::mongodb::replset':
replset_setup => $replset_setup,
replset_members => $ceilometer_replset_members,
} ->
notify {"mongodb configuring databases" :} ->
mongodb::db { $ceilometer_database:
user => $ceilometer_user,
password => $ceilometer_db_password,
roles => [ 'readWrite', 'dbAdmin', 'dbOwner' ],
admin_username => 'admin',
admin_password => $ceilometer_db_password,
admin_database => 'admin',
} ->
mongodb::db { 'admin':
user => 'admin',
password => $ceilometer_db_password,
roles => [
'userAdmin',
'readWrite',
'dbAdmin',
'dbAdminAnyDatabase',
'readAnyDatabase',
'readWriteAnyDatabase',
'userAdminAnyDatabase',
'clusterAdmin',
'clusterManager',
'clusterMonitor',
'hostManager',
'root',
'restore',
],
admin_username => 'admin',
admin_password => $ceilometer_db_password,
admin_database => 'admin',
} ->
notify {"mongodb primary finished": }
}

View File

@ -1,148 +0,0 @@
#
class openstack::swift::proxy (
$swift_user_password = 'swift_pass',
$swift_hash_suffix = 'swift_secret',
$swift_local_net_ip = $::ipaddress_eth0,
$ring_part_power = 18,
$ring_replicas = 3,
$ring_min_part_hours = 1,
$proxy_pipeline = [
'catch_errors',
'healthcheck',
'cache',
'ratelimit',
'swift3',
's3token',
'authtoken',
'keystone',
'proxy-server'],
$proxy_workers = $::processorcount,
$proxy_port = '8080',
$proxy_allow_account_management = true,
$proxy_account_autocreate = true,
$ratelimit_clock_accuracy = 1000,
$ratelimit_max_sleep_time_seconds = 60,
$ratelimit_log_sleep_time_seconds = 0,
$ratelimit_rate_buffer_seconds = 5,
$ratelimit_account_ratelimit = 0,
$package_ensure = 'present',
$controller_node_address = '10.0.0.1',
$memcached = true,
$swift_proxies = {
'127.0.0.1' => '127.0.0.1'
}
,
$primary_proxy = false,
$swift_devices = undef,
$master_swift_proxy_ip = undef,
$collect_exported = false,
$rings = ['account', 'object', 'container'],
$debug = false,
$verbose = true,
$syslog_log_level = 'WARNING',
) {
if !defined(Class['swift']) {
class { 'swift':
swift_hash_suffix => $swift_hash_suffix,
package_ensure => $package_ensure,
}
}
if $memcached and !defined(Class['memcached']) {
class { 'memcached': }
}
class { '::swift::proxy':
proxy_local_net_ip => $swift_local_net_ip,
pipeline => $proxy_pipeline,
port => $proxy_port,
workers => $proxy_workers,
allow_account_management => $proxy_allow_account_management,
account_autocreate => $proxy_account_autocreate,
package_ensure => $package_ensure,
debug => $debug,
verbose => $verbose,
syslog_log_level => $syslog_log_level,
}
# configure all of the middlewares
class { ['::swift::proxy::catch_errors', '::swift::proxy::healthcheck', '::swift::proxy::swift3',]:
}
$cache_addresses = inline_template("<%= @swift_proxies.keys.uniq.sort.collect {|ip| ip + ':11211' }.join ',' %>")
class { '::swift::proxy::cache': memcache_servers => split($cache_addresses, ',') }
class { '::swift::proxy::ratelimit':
clock_accuracy => $ratelimit_clock_accuracy,
max_sleep_time_seconds => $ratelimit_max_sleep_time_seconds,
log_sleep_time_seconds => $ratelimit_log_sleep_time_seconds,
rate_buffer_seconds => $ratelimit_rate_buffer_seconds,
account_ratelimit => $ratelimit_account_ratelimit,
}
class { '::swift::proxy::s3token':
auth_host => $controller_node_address,
auth_port => '35357',
}
class { '::swift::proxy::keystone':
operator_roles => ['admin', 'SwiftOperator'],
}
class { '::swift::proxy::authtoken':
admin_user => 'swift',
admin_tenant_name => 'services',
admin_password => $swift_user_password,
auth_host => $controller_node_address,
}
if $primary_proxy {
# collect all of the resources that are needed
# to balance the ring
if $collect_exported {
Ring_object_device <<| tag == "${::deployment_id}::${::environment}" |>>
Ring_container_device <<| tag == "${::deployment_id}::${::environment}" |>>
Ring_account_device <<| tag == "${::deployment_id}::${::environment}" |>>
}
# create the ring
class { 'swift::ringbuilder':
# the part power should be determined by assuming 100 partitions per drive
part_power => $ring_part_power,
replicas => $ring_replicas,
min_part_hours => $ring_min_part_hours,
require => Class['swift'],
before => [Class['::swift::proxy']],
}
# sets up an rsync db that can be used to sync the ring DB
class { 'swift::ringserver':
local_net_ip => $swift_local_net_ip,
}
# anchors
Anchor <| title == 'rebalance_end' |> -> Service['swift-proxy']
Anchor <| title == 'rebalance_end' |> -> Swift::Storage::Generic <| |>
} else {
validate_string($master_swift_proxy_ip)
if member($rings, 'account') and ! defined(Swift::Ringsync['account']) {
swift::ringsync { 'account': ring_server => $master_swift_proxy_ip }
}
if member($rings, 'object') and ! defined(Swift::Ringsync['object']) {
swift::ringsync { 'object': ring_server => $master_swift_proxy_ip }
}
if member($rings, 'container') and ! defined(Swift::Ringsync['container']) {
swift::ringsync { 'container': ring_server => $master_swift_proxy_ip }
}
Swift::Ringsync <| |> ~> Service["swift-proxy"]
}
# deploy a script that can be used for testing
file { '/tmp/swift_keystone_test.rb': source => 'puppet:///modules/swift/swift_keystone_test.rb' }
}

View File

@ -1,147 +0,0 @@
module Rpmvercmp
def self.debug
@debug = false unless defined? @debug
@debug
end
def self.debug=(debug)
@debug = debug
end
# check that the element is not fully integer
def self.not_integer?(s)
!Integer(s)
rescue
true
end
# convert label to epoch, version, release
def self.label_to_elements(label)
return [nil, nil, nil] unless label
label = label.split ':'
if label.length > 1
epoch = label.shift
else
epoch = nil
end
label = label.join '-'
label = label.split('-')
if label.length > 1
version = label.shift
release = label.join '-'
else
version = label.first
release = nil
end
[epoch, version, release]
end
def self.simple_checks(var1, var2)
return 0 if var1 == var2
return 1 if var1 and not var2
return -1 if not var1 and var2
0
end
# compare two blocks
# first is larger -> 1
# second is larger -> -1
# equal -> 0
def self.compare_blocks(block1, block2)
block1 = get_string block1
block2 = get_string block2
rc = simple_checks block1, block2
return rc if rc != 0
# ~ sign has the highest sorting priority
if block1.start_with? '~' and !block2.start_with? '~'
return 1
elsif !block1.start_with? '~' and block2.start_with? '~'
return -1
end
if not_integer?(block1) && not_integer?(block2)
# Both not integers:
# compare strings
block1 <=> block2
else
# One of elements is integer:
# convert both to int and compare
block1.to_i <=> block2.to_i
end
end
# compare two elements
# first is larger -> 1
# second is larger -> -1
# equal -> 0
def self.compare_elements(element1, element2)
element1 = get_string element1
element2 = get_string element2
rc = simple_checks element1, element2
return rc if rc != 0
# split both versions to elements
separators = /[\._\-+]/
blocks1 = element1.split separators
blocks2 = element2.split separators
# compare each element from first to same element from second
while blocks1.length > 0 or blocks2.length > 0
b1 = blocks1.shift
b2 = blocks2.shift
rc = compare_blocks b1, b2
puts "Blocks: #{b1} vs #{b2} = #{rc}" if debug
# return result on first non-equal match
return rc if rc != 0
end
# there is nothing left to compare: return equal
0
end
def self.get_string(value)
return '' unless value
value.to_s
end
def self.compare_fuel(label1, label2)
return 0 if label1 == label2
return 0 if !label1.include? 'fuel' and !label2.include? 'fuel'
return 1 if label1.include? 'fuel' and !label2.include? 'fuel'
return -1 if !label1.include? 'fuel' and label2.include? 'fuel'
label1 =~ /fuel([\d\.]*)/
ver1 = $1
label2 =~ /fuel([\d\.]*)/
ver2 = $1
compare_elements ver1, ver2
end
def self.compare_labels(label1, label2)
label1 = get_string label1
label2 = get_string label2
rc = simple_checks label1, label2
return rc if rc != 0
rc = compare_fuel label1, label2
puts "Fuel: #{rc}" if debug
return rc if rc != 0
elements1 = label_to_elements label1
elements2 = label_to_elements label2
while elements1.length > 0 or elements2.length > 0
e1 = elements1.shift
e2 = elements2.shift
rc = compare_elements e1, e2
puts "Elements: #{e1.inspect} vs #{e2.inspect} = #{rc}" if debug
return rc if rc != 0
end
0
end
end

View File

@ -1,251 +0,0 @@
require 'puppet/util/package'
require 'yaml'
require File.join(File.dirname(__FILE__), 'rpmvercmp.rb')
Puppet::Type.type(:package).provide :yum, :parent => :rpm, :source => :rpm do
desc "Support via `yum`.
Using this provider's `uninstallable` feature will not remove dependent packages. To
remove dependent packages with this provider use the `purgeable` feature, but note this
feature is destructive and should be used with the utmost care."
has_feature :versionable
commands :yum => "yum", :rpm => "rpm", :python => "python"
self::YUMHELPER = File::join(File::dirname(__FILE__), "yumhelper.py")
attr_accessor :latest_info
if command('rpm')
confine :true => begin
rpm('--version')
rescue Puppet::ExecutionFailure
false
else
true
end
end
defaultfor :operatingsystem => [:fedora, :centos, :redhat]
def self.prefetch(packages)
raise Puppet::Error, "The yum provider can only be used as root" if Process.euid != 0
super
return unless packages.detect { |name, package| package.should(:ensure) == :latest }
# collect our 'latest' info
updates = {}
python(self::YUMHELPER).each_line do |l|
l.chomp!
next if l.empty?
if l[0,4] == "_pkg"
hash = nevra_to_hash(l[5..-1])
[hash[:name], "#{hash[:name]}.#{hash[:arch]}"].each do |n|
updates[n] ||= []
updates[n] << hash
end
end
end
# Add our 'latest' info to the providers.
packages.each do |name, package|
if info = updates[package[:name]]
package.provider.latest_info = info[0]
end
end
end
def pkg_list
raw_pkgs = rpm [ '-q', '-a', '--queryformat', '%{NAME}|%{VERSION}-%{RELEASE}\n' ]
pkgs = {}
raw_pkgs.split("\n").each do |l|
line = l.split '|'
name = line[0]
version = line[1]
next if !name || !version
pkgs.store name, version
end
pkgs
end
# Substract packages in hash b from packages in hash a
# in noval is true only package name matters and version is ignored
# @param a <Hash[String]>
# @param b <Hash[String]>
# @param ignore_versions <TrueClass,FalseClass>
def package_diff(a, b, ignore_versions = false)
result = a.dup
b.each_pair do |k, v|
if a.key? k
if a[k] == v or ignore_versions
result.delete k
end
end
end
result
end
# find package names in both a and b hashes
# values are taken from a
# @param a <Hash[String]>
# @param b <Hash[String]>
def package_updates(a, b)
common_keys = a.keys & b.keys
common_keys = a.keys & b.keys
common_keys.inject({}) { |result, p| result.merge({p => a[p]}) }
end
def install
should = @resource.should(:ensure)
self.debug "Ensuring => #{should}"
wanted = @resource[:name]
operation = :install
yum_options = %w(-d 0 -e 0 -y)
@file_dir = '/var/lib/puppet/rollback'
from = @property_hash[:ensure]
to = should
name = @resource[:name]
Puppet.notice "Installing package #{name} from #{from} to #{to}"
case should
when true, false, Symbol
# pass
should = nil
else
# Add the package version
wanted += "-#{should}"
is = self.query
if is && Rpmvercmp.compare_labels(should, is[:ensure]) < 0
self.debug "Downgrading package #{@resource[:name]} from version #{is[:ensure]} to #{should}"
operation = :downgrade
end
end
rollback_file = File.join @file_dir, "#{name}_#{to}_#{from}.yaml"
diff = read_diff rollback_file
if diff.is_a?(Hash) && diff.key?('installed') && diff.key?('removed')
# rollback
# reverse the update process instead of usuall install
Puppet.debug "Found rollback file at #{rollback_file}"
installed = diff['installed']
removed = diff['removed']
# calculate package sets
to_update = package_updates removed, installed
to_install = package_diff removed, installed
to_remove = package_diff installed, removed, true
Puppet.debug "Install: #{to_install.map {|p| "#{p[0]}-#{p[1]}" }. join ' '}" if to_install.any?
Puppet.debug "Remove: #{to_remove.map {|p| "#{p[0]}-#{p[1]}" }. join ' '}" if to_remove.any?
Puppet.debug "Update: #{to_update.map {|p| "#{p[0]}-#{p[1]}" }. join ' '}" if to_update.any?
to_install = to_install.merge to_update
yum_shell yum_options, operation, to_install, to_remove
elsif from.is_a?(String) && to.is_a?(String)
# update form one version to another
before, after = yum_with_changes yum_options, operation, wanted
diff = make_package_diff before, after
file_path = File.join @file_dir, "#{name}_#{from}_#{to}.yaml"
save_diff file_path, diff
Puppet.debug "Saving diff file to #{file_path}"
else
# just a simple install
output = yum "-d", "0", "-e", "0", "-y", operation, wanted
end
is = check_query
raise Puppet::Error, "Could not find package #{self.name}" unless is
# FIXME: Should we raise an exception even if should == :latest
# and yum updated us to a version other than @param_hash[:ensure] ?
raise Puppet::Error, "Failed to update to version #{should}, got version #{is[:ensure]} instead" if should && should != is[:ensure]
end
# run the yum shell to install and remove packages
# @param options <Array[String]>
# @param operation <String,Symbol>
# @param to_install <Hash>
# @param to_remove <Hash>
def yum_shell(options, operation, to_install, to_remove)
tmp_file = '/tmp/yum.shell'
yum_shell = ''
yum_shell += "#{operation} #{to_install.map {|p| "#{p[0]}-#{p[1]}" }. join ' '}\n" if to_install.any?
yum_shell += "remove #{to_remove.map {|p| "#{p[0]}-#{p[1]}" }. join ' '}\n" if to_remove.any?
yum_shell += "run\n"
File.open(tmp_file, 'w') { |file| file.write yum_shell }
output = yum "--setopt", "obsoletes=0", options, 'shell', tmp_file
File.delete tmp_file
end
# package state query executed after the install to check its success
# separate method is made because it can be stubbed by the spec
# @return Hash
def check_query
self.query
end
# combine before and after lists into a diff
# @param before <Hash[String]>
# @param after <Hash[String]>
def make_package_diff(before, after)
installed = package_diff after, before
removed = package_diff before, after
{ 'installed' => installed, 'removed' => removed }
end
# run yum operation and get package
# lists before and after of it
# @param options <Array[String]>
# @param operation <String,Symbol>
# @param wanted <String>
def yum_with_changes(options, operation, wanted)
before = pkg_list
yum options, operation, wanted
after = pkg_list
[ before, after ]
end
# saves diff hash into a file
# @param file_path <String>
# @param diff <Hash[String]>
def save_diff(file_path, diff)
Dir.mkdir @file_dir unless File.directory? @file_dir
File.open(file_path, 'w') { |file| file.write YAML.dump(diff) + "\n" }
end
# reads diff hash from a file
# @param file_path <String>
# @returns <Hash[String]>
def read_diff(file_path)
return unless File.readable? file_path
diff = YAML.load_file file_path
return unless diff.is_a? Hash
diff
end
# What's the latest package version available?
def latest
upd = latest_info
unless upd.nil?
# FIXME: there could be more than one update for a package
# because of multiarch
return "#{upd[:epoch]}:#{upd[:version]}-#{upd[:release]}"
else
# Yum didn't find updates, pretend the current
# version is the latest
raise Puppet::DevError, "Tried to get latest on a missing package" if properties[:ensure] == :absent
return properties[:ensure]
end
end
def update
# Install in yum can be used for update, too
self.install
end
def purge
yum "-y", :erase, @resource[:name]
end
end

View File

@ -1,85 +0,0 @@
Puppet::Type.newtype(:ring_devices) do
newparam(:name, :namevar => true) do
end
newparam(:storages) do
desc 'list of all swift storages'
validate do |value|
if value.is_a? Hash
fail(Puppet::Error, "#{value} should be a Hash and include ip address") unless value['storage_address']
else
value.each do |element|
fail(Puppet::Error, "#{element} should be a Hash and include ip address") unless element.is_a?(Hash) && element['storage_address']
end
end
end
munge do |value|
value.is_a?(Hash) ? [value] : value
end
end
autorequire(:ring_account_device) do
autos = []
catalog.resources.find_all { |r| r.is_a?(Puppet::Type.type("ring_account_device".to_sym)) }.each do |r|
autos << r
end
autos
end
autorequire(:ring_object_device) do
autos = []
catalog.resources.find_all { |r| r.is_a?(Puppet::Type.type("ring_object_device".to_sym)) }.each do |r|
autos << r
end
autos
end
autorequire(:ring_container_device) do
autos = []
catalog.resources.find_all { |r| r.is_a?(Puppet::Type.type("ring_container_device".to_sym)) }.each do |r|
autos << r
end
autos
end
# Default resources for swift ring builder
def resources
resources = []
default_storage = {
'swift_zone' => 100,
'object_port'=>6000,
'container_port'=>6001,
'account_port'=>6002,
'mountpoints'=> "1 1\n2 1",
'weight'=> 100,
'types'=>['container', 'object', 'account'],
}
self[:storages].each do |storage|
merged_storage = default_storage.merge(storage)
merged_storage['types'].collect do |type|
port = merged_storage["#{type}_port"]
options = {
:name=>"#{merged_storage['storage_address']}:#{port}",
:mountpoints=>merged_storage['mountpoints'],
:zone => merged_storage['swift_zone']
}
resources += [Puppet::Type.type("ring_#{type}_device".to_sym).new(options)]
end
end
resources
end
def eval_generate
resources
end
end

View File

@ -1,92 +0,0 @@
# Install and configure base swift components
#
# == Parameters
# [*swift_hash_suffix*] string of text to be used
# as a salt when hashing to determine mappings in the ring.
# This file should be the same on every node in the cluster.
# [*package_ensure*] The ensure state for the swift package.
# Optional. Defaults to present.
#
# == Dependencies
#
# Class['ssh::server::install']
#
# == Authors
#
# Dan Bode dan@puppetlabs.com
#
# == Copyright
#
# Copyright 2011 Puppetlabs Inc, unless otherwise noted.
#
class swift(
$swift_hash_suffix = undef,
$package_ensure = 'present',
) {
include swift::params
if !defined(Class['ssh::server::install']) {
class{ 'ssh::server::install': }
}
Class['ssh::server::install'] -> Class['swift']
class {'rsync::server':}
if !defined(Package['swift']) {
package { 'swift':
ensure => $package_ensure,
name => $::swift::params::package_name,
}
Package['swift'] -> Swift::Ringsync <||>
}
File { owner => 'swift', group => 'swift', require => Package['swift'] }
file { '/tmp/keystone-signing-swift':
ensure => directory,
owner => 'swift',
group => 'swift',
mode => '0700',
}
file { '/var/lib/glance':
ensure => directory,
owner => 'glance',
group => 'glance',
mode => '0775',
}
file { '/etc/swift':
ensure => directory,
mode => '2770',
}
user {'swift': ensure => present}
file { '/var/lib/swift':
ensure => directory,
owner => 'swift',
}
file {'/var/cache/swift':
ensure => directory
}
file { '/etc/swift/backups':
ensure => directory,
owner => 'swift',
group => 'swift'
}
file { '/var/run/swift':
ensure => directory,
}
file { '/etc/swift/swift.conf':
ensure => present,
mode => '0660',
content => template('swift/swift.conf.erb'),
}
}

View File

@ -1,127 +0,0 @@
#
# TODO - assumes that proxy server is always a memcached server
#
# TODO - the full list of all things that can be configured is here
# https://github.com/openstack/swift/tree/master/swift/common/middleware
#
# Installs and configures the swift proxy node.
#
# [*Parameters*]
#
# [*proxy_local_net_ip*] The address that the proxy will bind to.
# Required.
# [*port*] The port to which the proxy server will bind.
# Optional. Defaults to 8080.
# [*pipeline*] The list of elements of the swift proxy pipeline.
# Currently supports healthcheck, cache, proxy-server, and
# one of the following auth_types: tempauth, swauth, keystone.
# Each of the specified elements also need to be declared externally
# as a puppet class with the exception of proxy-server.
# Optional. Defaults to ['healthcheck', 'cache', 'tempauth', 'proxy-server']
# [*workers*] Number of threads to process requests.
# Optional. Defaults to the number of processors.
# [*allow_account_management*]
# Rather or not requests through this proxy can create and
# delete accounts. Optional. Defaults to true.
# [*account_autocreate*] Rather accounts should automatically be created.
# Has to be set to true for tempauth. Optional. Defaults to true.
# [*package_ensure*] Ensure state of the swift proxy package.
# Optional. Defaults to present.
#
# == Examples
#
# == Authors
#
# Dan Bode dan@puppetlabs.com
#
# == Copyright
#
# Copyright 2011 Puppetlabs Inc, unless otherwise noted.
#
class swift::proxy(
$proxy_local_net_ip,
$port = '8080',
$pipeline = ['healthcheck', 'cache', 'tempauth', 'proxy-server'],
$workers = $::processorcount,
$allow_account_management = true,
$account_autocreate = true,
$package_ensure = 'present',
$debug = false,
$verbose = true,
$syslog_log_level = 'WARNING',
) {
include 'swift::params'
include 'concat::setup'
validate_bool($account_autocreate)
validate_bool($allow_account_management)
validate_array($pipeline)
if(member($pipeline, 'tempauth')) {
$auth_type = 'tempauth'
} elsif(member($pipeline, 'swauth')) {
$auth_type = 'swauth'
} elsif(member($pipeline, 'keystone')) {
$auth_type = 'keystone'
} else {
warning('no auth type provided in the pipeline')
}
if(! member($pipeline, 'proxy-server')) {
warning("swift storage server ${type} must specify ${type}-server")
}
if($auth_type == 'tempauth' and ! $account_autocreate ){
fail("\$account_autocreate must be set to true when auth type is tempauth")
}
if $::osfamily == "Debian"
{
package { 'swift-plugin-s3':
ensure => present,
before=>Package['swift-proxy']
}
}
package { 'swift-proxy':
name => $::swift::params::proxy_package_name,
ensure => $package_ensure,
}
Package['swift-proxy'] -> Swift::Ringsync <||>
concat { '/etc/swift/proxy-server.conf':
owner => 'swift',
group => 'swift',
mode => '0660',
require => Package['swift-proxy'],
}
$required_classes = split(
inline_template(
"<%=
(@pipeline - ['proxy-server']).collect do |x|
'swift::proxy::' + x
end.join(',')
%>"), ',')
# you can now add your custom fragments at the user level
concat::fragment { 'swift_proxy':
target => "/etc/swift/proxy-server.conf",
content => template('swift/proxy-server.conf.erb'),
order => '00',
# require classes for each of the elements of the pipeline
# this is to ensure the user gets reasonable elements if he
# does not specify the backends for every specified element of
# the pipeline
before => Class[$required_classes],
}
service { 'swift-proxy':
name => $::swift::params::proxy_service_name,
ensure => running,
enable => true,
provider => $::swift::params::service_provider,
require => [Concat['/etc/swift/proxy-server.conf']],
subscribe => [Concat['/etc/swift/proxy-server.conf']],
}
}

View File

@ -1,83 +0,0 @@
# Creates the files packages and services that are
# needed to deploy each type of storage server.
#
# == Parameters
# [*package_ensure*] The desired ensure state of the swift storage packages.
# Optional. Defaults to present.
# [*service_provider*] The provider to use for the service
#
# == Dependencies
# Requires Class[swift::storage]
# == Examples
#
# == Authors
#
# Dan Bode dan@puppetlabs.com
#
# == Copyright
#
# Copyright 2011 Puppetlabs Inc, unless otherwise noted.
define swift::storage::generic(
$package_ensure = 'present',
$service_provider = $::swift::params::service_provider
) {
include swift::params
Class['swift::storage'] -> Swift::Storage::Generic[$name]
validate_re($name, '^object|container|account$')
package { "swift-${name}":
# this is a way to dynamically build the variables to lookup
# sorry its so ugly :(
name => inline_template("<%= scope.lookupvar('::swift::params::${name}_package_name') %>"),
ensure => $package_ensure,
} ~>
Service <| title == "swift-${name}" or title == "swift-${name}-replicator" |>
if !defined(Service["swift-${name}"]) {
notify{ "Module ${module_name} cannot notify service swift-${name} on package update": }
}
if !defined(Service["swift-${name}-replicator"]) {
notify{ "Module ${module_name} cannot notify service swift-${name}-replicator on package update": }
}
Package["swift-${name}"] -> Swift::Ringsync <||>
file { "/etc/swift/${name}-server/":
ensure => directory,
owner => 'swift',
group => 'swift',
}
service { "swift-${name}":
name => inline_template("<%= scope.lookupvar('::swift::params::${name}_service_name') %>"),
ensure => running,
enable => true,
hasstatus => true,
provider => $service_provider,
subscribe => Package["swift-${name}"],
}
if $::osfamily == "RedHat" {
service { "swift-${name}-replicator":
start => "/usr/bin/swift-init ${name}-replicator start",
ensure => running,
enable => true,
hasstatus => true,
provider => base,
subscribe => Package["swift-${name}"],
}
}
else
{
service { "swift-${name}-replicator":
name => inline_template("<%= scope.lookupvar('::swift::params::${name}_replicator_service_name') %>"),
ensure => running,
enable => true,
hasstatus => true,
provider => $service_provider,
subscribe => Package["swift-${name}"],
}
}
}

View File

@ -1,139 +0,0 @@
.PHONY: all upgrade-lrzip openstack-yaml
.DELETE_ON_ERROR: $(UPGRADE_TARBALL_PATH).lrz
.DELETE_ON_ERROR: $(BUILD_DIR)/upgrade/common-part.tar
.DELETE_ON_ERROR: $(BUILD_DIR)/upgrade/openstack-part.tar
.DELETE_ON_ERROR: $(BUILD_DIR)/upgrade/$(SAVE_UPGRADE_PIP_ART)
.DELETE_ON_ERROR: $(BUILD_DIR)/upgrade/openstack-part.tar
all: upgrade-lrzip openstack-yaml
upgrade-lrzip: UPGRADERS ?= "host-system docker openstack"
upgrade-lrzip: $(UPGRADE_TARBALL_PATH).lrz
PYTHON_VIRTUALENV_PKGS:=python-devel-2.6.6-52.el6.x86_64.rpm python-virtualenv-1.11.6-1.mira1.noarch.rpm
########################
# UPGRADE LRZIP ARTIFACT
########################
$(UPGRADE_TARBALL_PATH).lrz: \
$(BUILD_DIR)/upgrade/openstack-part.done \
$(BUILD_DIR)/upgrade/common-part.tar
mkdir -p $(@D)
rm -f $(BUILD_DIR)/upgrade/upgrade-lrzip.tar
tar Af $(BUILD_DIR)/upgrade/upgrade-lrzip.tar $(BUILD_DIR)/upgrade/openstack-part.tar
tar Af $(BUILD_DIR)/upgrade/upgrade-lrzip.tar $(BUILD_DIR)/upgrade/common-part.tar
lrzip -L2 -U -D -f $(BUILD_DIR)/upgrade/upgrade-lrzip.tar -o $@
########################
# OPENSTACK_YAML ARTIFACT
########################
openstack-yaml: $(ARTS_DIR)/$(OPENSTACK_YAML_ART_NAME)
$(ARTS_DIR)/$(OPENSTACK_YAML_ART_NAME): $(BUILD_DIR)/upgrade/$(OPENSTACK_YAML_ART_NAME)
$(ACTION.COPY)
$(BUILD_DIR)/upgrade/$(OPENSTACK_YAML_ART_NAME): $(BUILD_DIR)/repos/fuel-nailgun.done
mkdir -p $(@D)
cp $(BUILD_DIR)/repos/fuel-nailgun/nailgun/nailgun/fixtures/openstack.yaml $@
########################
# UPGRADE DEPENDENCIES
########################
$(BUILD_DIR)/upgrade/deps.done: \
$(BUILD_DIR)/repos/fuel-upgrade.done
mkdir -p $(BUILD_DIR)/upgrade/deps
virtualenv $(BUILD_DIR)/upgrade/venv
# Requires virtualenv, pip, python-dev packages
ifeq ($(USE_UPGRADE_PIP_ART_HTTP_LINK),)
echo "Using mirror pip-install approach"
$(BUILD_DIR)/upgrade/venv/bin/pip install -r $(BUILD_DIR)/repos/fuel-upgrade/requirements.txt --download $(BUILD_DIR)/upgrade/deps --no-use-wheel
else
echo "Using artifact from $(USE_UPGRADE_PIP_ART_HTTP_LINK) for pip-install"
wget -v --no-check-certificate $(USE_UPGRADE_PIP_ART_HTTP_LINK) -O $(BUILD_DIR)/upgrade/deps.tar.gz.tmp
mv $(BUILD_DIR)/upgrade/deps.tar.gz.tmp $(BUILD_DIR)/upgrade/deps.tar.gz
mkdir -p $(BUILD_DIR)/upgrade/deps/
tar xvf $(BUILD_DIR)/upgrade/deps.tar.gz --strip-components=1 -C $(BUILD_DIR)/upgrade/deps/
endif
cd $(BUILD_DIR)/repos/fuel-upgrade && $(BUILD_DIR)/upgrade/venv/bin/python setup.py sdist --dist-dir $(BUILD_DIR)/upgrade/deps
$(ACTION.TOUCH)
# FIXME: (skulanov)
# since we don't have python-virtualenv on our release mirror
# and not going to publish it over updates channel
# we need to download and install packages manually
$(addprefix $(BUILD_DIR)/upgrade/,$(PYTHON_VIRTUALENV_PKGS)):
@mkdir -p $(@D)
wget -nv -O $@.tmp http://mirror.fuel-infra.org/fwm/6.1/centos/os/x86_64/Packages/$(@F)
mv $@.tmp $@
# Save pip artifact, if needed
$(BUILD_DIR)/upgrade/$(SAVE_UPGRADE_PIP_ART): $(BUILD_DIR)/upgrade/deps.done
mkdir -p $(@D)
rm -f $@
tar czf $@ -C $(BUILD_DIR)/upgrade deps
$(ARTS_DIR)/$(SAVE_UPGRADE_PIP_ART): $(BUILD_DIR)/upgrade/$(SAVE_UPGRADE_PIP_ART)
$(ACTION.COPY)
########################
# COMMON PART
########################
$(BUILD_DIR)/upgrade/common-part.tar: \
$(ARTS_DIR)/$(VERSION_YAML_ART_NAME) \
$(BUILD_DIR)/upgrade/deps.done \
$(addprefix $(BUILD_DIR)/upgrade/,$(PYTHON_VIRTUALENV_PKGS))
mkdir -p $(@D)
rm -f $@
tar rf $@ -C $(BUILD_DIR)/upgrade --xform s:^:upgrade/: $(PYTHON_VIRTUALENV_PKGS)
tar rf $@ -C $(BUILD_DIR)/upgrade --xform s:^:upgrade/: deps
sed 's/{{UPGRADERS}}/${UPGRADERS}/g' $(SOURCE_DIR)/upgrade/upgrade_template.sh > $(BUILD_DIR)/upgrade/upgrade.sh
tar rf $@ --mode=755 -C $(BUILD_DIR)/upgrade upgrade.sh
tar rf $@ --mode=755 -C $(ARTS_DIR) --xform s:^:upgrade/config/: $(VERSION_YAML_ART_NAME)
ifneq ($(SAVE_UPGRADE_PIP_ART),)
$(BUILD_DIR)/upgrade/common-part.tar: $(ARTS_DIR)/$(SAVE_UPGRADE_PIP_ART)
endif
########################
# OPENSTACK PART
########################
$(BUILD_DIR)/upgrade/openstack_version: $(ARTS_DIR)/$(OPENSTACK_YAML_ART_NAME)
python -c "import yaml; print filter(lambda r: r['fields'].get('name'), yaml.load(open('$(ARTS_DIR)/$(OPENSTACK_YAML_ART_NAME)')))[0]['fields']['version']" > $@
$(BUILD_DIR)/upgrade/openstack-part.done: CENTOS_REPO_ART=$(CENTOS_REPO_ART_NAME)
$(BUILD_DIR)/upgrade/openstack-part.done: CENTOS_REPO_ART_TOPDIR=centos-repo
$(BUILD_DIR)/upgrade/openstack-part.done: UBUNTU_REPO_ART=$(UBUNTU_REPO_ART_NAME)
$(BUILD_DIR)/upgrade/openstack-part.done: UBUNTU_REPO_ART_TOPDIR=ubuntu-repo
$(BUILD_DIR)/upgrade/openstack-part.done: $(ARTS_DIR)/$(CENTOS_REPO_ART_NAME)
$(BUILD_DIR)/upgrade/openstack-part.done: $(ARTS_DIR)/$(UBUNTU_REPO_ART_NAME)
$(BUILD_DIR)/upgrade/openstack-part.done: BASE=$(BUILD_DIR)/upgrade/openstack-part
$(BUILD_DIR)/upgrade/openstack-part.done: OPENSTACK_VERSION=$(shell cat $(BUILD_DIR)/upgrade/openstack_version)
$(BUILD_DIR)/upgrade/openstack-part.done: CENTOS_BASE=$(BASE)/upgrade/repos/$(OPENSTACK_VERSION)/centos/x86_64
$(BUILD_DIR)/upgrade/openstack-part.done: UBUNTU_BASE=$(BASE)/upgrade/repos/$(OPENSTACK_VERSION)/ubuntu/x86_64
$(BUILD_DIR)/upgrade/openstack-part.done: RELEASES_BASE=$(BASE)/upgrade/releases
$(BUILD_DIR)/upgrade/openstack-part.done: RELEASE_VERSIONS_BASE=$(BASE)/upgrade/release_versions
$(BUILD_DIR)/upgrade/openstack-part.done: \
$(BUILD_DIR)/upgrade/openstack_version \
$(ARTS_DIR)/$(OPENSTACK_YAML_ART_NAME) \
$(ARTS_DIR)/$(VERSION_YAML_ART_NAME)
rm -f $@
mkdir -p $(@D)
# CENTOS REPO
mkdir -p $(CENTOS_BASE)
tar xf $(ARTS_DIR)/$(CENTOS_REPO_ART) -C $(CENTOS_BASE) --xform s:^$(CENTOS_REPO_ART_TOPDIR)/::
# UBUNTU REPO
mkdir -p $(UBUNTU_BASE)
tar xf $(ARTS_DIR)/$(UBUNTU_REPO_ART) -C $(UBUNTU_BASE) --xform s:^$(UBUNTU_REPO_ART_TOPDIR)/::
# OPENSTACK-YAML
mkdir -p $(RELEASES_BASE)
cp $(ARTS_DIR)/$(OPENSTACK_YAML_ART_NAME) $(RELEASES_BASE)/$(OPENSTACK_VERSION).yaml
# VERSION-YAML
mkdir -p $(RELEASE_VERSIONS_BASE)
cp $(ARTS_DIR)/$(VERSION_YAML_ART_NAME) $(RELEASE_VERSIONS_BASE)/$(OPENSTACK_VERSION).yaml
# This is for backward compatibility with upgrade script.
# It tries to figure out whether a particular update bundle diffirential or not.
echo "diff_releases: {}" > $(RELEASES_BASE)/metadata.yaml
# ARCHIVING
tar rf $(BUILD_DIR)/upgrade/openstack-part.tar -C $(BASE) .
$(ACTION.TOUCH)

View File

@ -1,116 +0,0 @@
#!/bin/bash
SCRIPT_PATH=$(dirname $(readlink -e $0))
UPGRADE_PATH=$SCRIPT_PATH/upgrade
VIRTUALENV_PATH=$UPGRADE_PATH/.fuel-upgrade-venv
UPGRADERS=${UPGRADERS:-{{UPGRADERS}}}
LOCK_FILE=/var/lock/fuel_upgarde.lock
function error {
local message="$1"
local code="${2:-1}"
echo "${message}"
exit "${code}"
}
function install_python-virtualenv ()
{
local pkgs="python-devel-2.6.6-52.el6.x86_64.rpm python-virtualenv-1.11.6-1.mira1.noarch.rpm"
for pkg in $pkgs; do
rpm -i "${UPGRADE_PATH}/${pkg}"
done
}
function prepare_virtualenv {
# FIXME: (skulanov)
# since we don't have python-virtualenv on our release mirror
# and not going to publish it over updates channel
# we need to download and install packages manually
if ! which virtualenv >/dev/null; then
install_python-virtualenv || error "Failed to install python-virtualenv"
fi
rm -rf $VIRTUALENV_PATH
virtualenv $VIRTUALENV_PATH
$VIRTUALENV_PATH/bin/pip install fuel_upgrade --no-index --find-links file://$UPGRADE_PATH/deps || error "Failed to install fuel_upgrade script"
}
function run_upgrade {
# prepare virtualenv for fuel_upgrade script
prepare_virtualenv
local args=()
local kwargs=("--src=$UPGRADE_PATH")
while [ -n "$1" ]; do
if [ $1 == \-\-password ]; then
kwargs=("${kwargs[@]}" "$1=$2"); shift
elif [[ $1 == \-* ]]; then
kwargs=("${kwargs[@]}" "$1")
else
args=("${args[@]}" "$1")
fi
shift
done
[ -z "${args[0]}" ] && args=("${UPGRADERS[@]}")
# run fuel_upgrade script
$VIRTUALENV_PATH/bin/python "$VIRTUALENV_PATH/bin/fuel-upgrade" "${kwargs[@]}" ${args[@]} || \
error "Upgrade failed" $?
}
function switch_to_version {
version=$1
version_path=/etc/fuel/$version/version.yaml
if [ ! -f $version_path ]; then
error "Version ${version} not found"
fi
# Replace symlink to current version
ln -sf $version_path /etc/fuel/version.yaml
# Replace symlink to supervisor scripts
ln -nsf /etc/supervisord.d/$version /etc/supervisord.d/current
# Stop all supervisor services
supervisorctl stop all &
# And at the same time stop all docker containers
docker stop -t=4 $(docker ps -q)
# Restart supervisor
service supervisord restart
exit
}
function show_version {
cat $UPGRADE_PATH/config/version.yaml
exit
}
function upgrade {
(flock -n 9 || error "Upgrade is already running. Lock file: ${LOCK_FILE}"
run_upgrade "$@"
) 9> $LOCK_FILE
}
case "$1" in
--switch-to-version)
case "$2" in
"") error '--switch-to-version requires parameter' ;;
*) switch_to_version $2 ; exit ;;
esac ;;
--version) show_version ; exit ;;
*) upgrade "$@" ; exit ;;
esac