merge with develop (e26de7111c) branch

This commit is contained in:
Vladimir Kuklin 2013-07-30 20:33:03 +04:00
commit 6a6c825bf1
47 changed files with 499 additions and 189 deletions

20
.gitignore vendored
View File

@ -1,9 +1,25 @@
# IDEs
.idea
.settings
.project
# ISO
iso/local_mirror
iso/build
# Python
*.pyc
# Doc
_build
metadata.json
.project
# Editors
*.swp
*~
# Vagrant
.vagrant
Vagrantfile
metadata.json
Gemfile.lock

View File

@ -20,7 +20,7 @@ common:
pool_start: 10.49.54.225
pool_end: 10.49.54.239
segment_range: 900:999
tenant_network_type: vlan
tenant_network_type: gre
network_manager: nova.network.manager.FlatDHCPManager
auto_assign_floating_ip: true
quantum_netnode_on_cnt: true

View File

@ -120,6 +120,8 @@ ruby-devel
system-config-firewall-base
wget
yum
yum-plugin-versionlock
yum-utils
# COBBLER EMBEDDED SNIPPET: 'puppet_install_if_enabled'
# LISTS puppet PACKAGE IF puppet_auto_setup VARIABLE IS SET TO 1
@ -136,6 +138,7 @@ $SNIPPET('mcollective_install_if_enabled')
yum versionlock puppet
yum versionlock kernel
yum versionlock iproute2
yum-config-manager --disableplugin=fastestmirror --save &>/dev/null
echo -e "modprobe nf_conntrack_ipv4\nmodprobe nf_conntrack_ipv6" >> /etc/rc.modules
chmod +x /etc/rc.modules

View File

@ -97,6 +97,8 @@ wget
crontabs
cronie
ruby-augeas
yum-plugin-versionlock
yum-utils
# COBBLER EMBEDDED SNIPPET: 'puppet_install_if_enabled'
# LISTS puppet PACKAGE IF puppet_auto_setup VARIABLE IS SET TO 1
$SNIPPET('puppet_install_if_enabled')
@ -112,6 +114,7 @@ $SNIPPET('mcollective_install_if_enabled')
yum versionlock puppet
yum versionlock kernel
yum versionlock iproute2
yum-config-manager --disableplugin=fastestmirror --save &>/dev/null
echo -e "modprobe nf_conntrack_ipv4\nmodprobe nf_conntrack_ipv6" >> /etc/rc.modules
chmod +x /etc/rc.modules

View File

@ -0,0 +1,3 @@
# Raising open file limit for OpenStack services
* soft nofile 102400
* hard nofile 112640

View File

@ -18,9 +18,9 @@ define corosync::cleanup () {
Cs_resource <| name == $name |> ~> Exec["crm resource cleanup $name"]
##FIXME: we need to create a better way to workaround crm commit <-> cleanup race condition than a simple sleep
#Workaround for hostname bugs with FQDN vs short hostname
exec { "crm resource cleanup $name":
command => "bash -c \"(sleep 5 && crm_resource --resource $name --cleanup --node `hostname -s`) || :\"",
command => "bash -c \"(sleep 5 && crm_resource --resource $name --cleanup --node `uname -n`) || :\"",
path => ['/bin', '/usr/bin', '/sbin', '/usr/sbin'],
returns => [0,""],
refreshonly => true,

View File

@ -88,8 +88,18 @@ class corosync (
# this value is provided. This is emulating a required variable as defined in
# parameterized class.
file { 'limitsconf':
ensure => present,
path => '/etc/security/limits.conf',
source => 'puppet:///modules/corosync/limits.conf',
replace => true,
owner => '0',
group => '0',
mode => '0644',
before => Service["corosync"],
}
# Using the Puppet infrastructure's ca as the authkey, this means any node in
# Puppet can join the cluster. Totally not ideal, going to come up with
# something better.

View File

@ -9,7 +9,7 @@ describe Puppet::Type.type(:cs_colocation).provider(:crm) do
it "should create colocation with corresponding members" do
resource[:primitives] = ["p_1", "p_2"]
resource[:score] = "inf"
provider.stubs(:crm)
provider.class.stubs(:exec_withenv).returns(0)
tmpfile = StringIO.new()
Tempfile.stubs(:open).with("puppet_crm_update").yields(tmpfile)
tmpfile.stubs(:path)

View File

@ -8,7 +8,7 @@ describe Puppet::Type.type(:cs_group).provider(:crm) do
describe "#create" do
it "should create group with corresponding mebers" do
resource[:primitives] = ["p_1", "p_2"]
provider.stubs(:crm)
provider.class.stubs(:exec_withenv).returns(0)
tmpfile = StringIO.new()
Tempfile.stubs(:open).with("puppet_crm_update").yields(tmpfile)
tmpfile.stubs(:path)

View File

@ -6,13 +6,16 @@ describe Puppet::Type.type(:cs_location).provider(:crm) do
let(:provider) { resource.provider }
describe "#create" do
before(:each) do
provider.class.stubs(:exec_withenv).returns(0)
end
it "should create location with corresponding members" do
resource[:primitive] = "p_1"
resource[:rules] = [
{:score=> "inf",:expressions => [{:attribute=>"pingd",:operation=>"defined"}]}
]
provider.stubs(:crm)
tmpfile = StringIO.new()
Tempfile.stubs(:open).with("puppet_crm_update").yields(tmpfile)
tmpfile.stubs(:path)
@ -27,7 +30,6 @@ describe Puppet::Type.type(:cs_location).provider(:crm) do
{:score=> "inf",:date_expressions => [{:date_spec=>{:hours=>"10", :weeks=>"5"}, :operation=>"date_spec", :start=>"", :end=>""}]}
]
provider.stubs(:crm)
tmpfile = StringIO.new()
Tempfile.stubs(:open).with("puppet_crm_update").yields(tmpfile)
tmpfile.stubs(:path)
@ -42,7 +44,6 @@ describe Puppet::Type.type(:cs_location).provider(:crm) do
{:score=> "inf",:date_expressions => [{:operation=>"lt", :end=>"20131212",:start=>""}]}
]
provider.stubs(:crm)
tmpfile = StringIO.new()
Tempfile.stubs(:open).with("puppet_crm_update").yields(tmpfile)
tmpfile.stubs(:path)
@ -57,7 +58,6 @@ describe Puppet::Type.type(:cs_location).provider(:crm) do
{:score=> "inf",:date_expressions => [{:operation=>"gt", :end=>"",:start=>"20121212"}]}
]
provider.stubs(:crm)
tmpfile = StringIO.new()
Tempfile.stubs(:open).with("puppet_crm_update").yields(tmpfile)
tmpfile.stubs(:path)
@ -73,7 +73,6 @@ describe Puppet::Type.type(:cs_location).provider(:crm) do
{:score=> "inf",:date_expressions => [{:operation=>"in_range", :end=>"",:start=>"20121212", :duration=>{:weeks=>"5"}}]}
]
provider.stubs(:crm)
tmpfile = StringIO.new()
Tempfile.stubs(:open).with("puppet_crm_update").yields(tmpfile)
tmpfile.stubs(:path)

View File

@ -10,7 +10,7 @@ describe Puppet::Type.type(:cs_order).provider(:crm) do
resource[:first] = "p_1"
resource[:second] = "p_2"
resource[:score] = "inf"
provider.stubs(:crm)
provider.class.stubs(:exec_withenv).returns(0)
tmpfile = StringIO.new()
Tempfile.stubs(:open).with("puppet_crm_update").yields(tmpfile)
tmpfile.stubs(:path)

View File

@ -6,7 +6,11 @@ describe Puppet::Type.type(:cs_property).provider(:crm) do
let(:provider) { resource.provider }
describe "#create" do
it "should create property with corresponding value" do
before(:each) do
provider.class.stubs(:exec_withenv).returns(0)
end
xit "should create property with corresponding value" do
resource[:value]= "myvalue"
provider.expects(:crm).with('configure', 'property', '$id="cib-bootstrap-options"', "myproperty=myvalue")
provider.create

View File

@ -6,13 +6,16 @@ describe Puppet::Type.type(:cs_resource).provider(:crm) do
let(:provider) { resource.provider }
describe "#create" do
before(:each) do
provider.class.stubs(:exec_withenv).returns(0)
end
it "should create resource with corresponding members" do
provider.class.stubs(:prefetch)
resource[:primitive_type] = "Dummy"
resource[:provided_by] = "pacemaker"
resource[:primitive_class] = "ocf"
resource[:operations] = {"monitor"=>{"interval"=>"20"}}
provider.stubs(:crm)
tmpfile = StringIO.new()
Tempfile.stubs(:open).with("puppet_crm_update").yields(tmpfile)
tmpfile.stubs(:path)

View File

@ -37,7 +37,7 @@ describe Puppet::Type.type(:cs_colocation) do
end
it "should validate the score values" do
["fadsfasdf",Complex(1,1)].each do |value|
['fadsfasdf', nil].each do |value|
expect { subject.new(
:name => "mock_colocation",
:primitives => ['foo','bar'],

View File

@ -37,7 +37,7 @@ describe Puppet::Type.type(:cs_order) do
end
it "should validate the score values" do
["fadsfasdf",Complex(1,1)].each do |value|
['fadsfasdf', '10a', nil].each do |value|
expect { subject.new(
:name => "mock_colocation",
:primitives => ['foo','bar'],

View File

@ -282,20 +282,24 @@ case "$mode" in
cd $basedir
echo $echo_n "Starting MySQL"
if test -x $bindir/mysqld_safe
then
# Give extra arguments to mysqld with the my.cnf file. This script
# may be overwritten at next upgrade.
$bindir/mysqld_safe --datadir="$datadir" --pid-file="$mysqld_pid_file_path" $other_args >/dev/null 2>&1 &
wait_for_pid created "$!" "$mysqld_pid_file_path"; return_value=$?
if [ -f /tmp/wsrep-init-file ]; then
if test -x $bindir/mysqld_safe
then
# Give extra arguments to mysqld with the my.cnf file. This script
# may be overwritten at next upgrade.
$bindir/mysqld_safe --datadir="$datadir" --pid-file="$mysqld_pid_file_path" $other_args >/dev/null 2>&1 &
wait_for_pid created "$!" "$mysqld_pid_file_path"; return_value=$?
# Make lock for RedHat / SuSE
if test -w "$lockdir"
then
touch "$lock_file_path"
fi
# Make lock for RedHat / SuSE
if test -w "$lockdir"
then
touch "$lock_file_path"
fi
exit $return_value
exit $return_value
else
log_failure_msg "Couldn't find MySQL server ($bindir/mysqld_safe)"
fi
else
if [ "$(crm_attribute -t crm_config --name mysqlprimaryinit --query 2> /dev/null | awk -F\= '{print $4}')" = 'done' ] ; then
$bindir/mysqld_safe --datadir="$datadir" --pid-file="$mysqld_pid_file_path" $other_args >/dev/null 2>&1 &

View File

@ -82,6 +82,10 @@ class galera (
ensure => present,
}
package { 'bc':
ensure => present,
}
package { 'perl':
ensure => present,
before => Package['MySQL-client']

View File

@ -0,0 +1,3 @@
--format doc
--colour
--backtrace

View File

@ -31,12 +31,24 @@ class mysql::params {
$basedir = '/usr'
$datadir = '/var/lib/mysql'
$service_name = 'mysql'
$client_package_name = 'MySQL-client'
$client_version = '5.5.28-6'
$server_package_name = 'MySQL-server'
$server_version = '5.5.28-6'
$shared_package_name = 'MySQL-shared'
$shared_version = '5.5.28_wsrep_23.7'
case $::operatingsystem {
'RedHat': {
$client_package_name = 'mysql'
$client_version = '5.1.69-1'
$server_package_name = 'mysql-server'
$server_version = '5.1.69-1'
$shared_package_name = 'mysql-libs'
$shared_version = '5.1.69-1'
}
default: {
$client_package_name = 'MySQL-client'
$client_version = '5.5.28_wsrep_23.7'
$server_package_name = 'MySQL-server'
$server_version = '5.5.28_wsrep_23.7'
$shared_package_name = 'MySQL-shared'
$shared_version = '5.5.28_wsrep_23.7'
}
}
$socket = '/var/lib/mysql/mysql.sock'
$pidfile = '/var/run/mysqld/mysqld.pid'
$config_file = '/etc/my.cnf'

View File

@ -0,0 +1,24 @@
require File.expand_path('../../spec_helper', __FILE__)
describe command('mysql --user=myuser --password=mypassword --host=localhost -e "show grants"') do
it { should return_exit_status 0 }
it { should return_stdout /GRANT SELECT, RELOAD, LOCK TABLES ON *.* TO 'myuser'@'localhost'/ }
end
describe file('/usr/local/sbin/mysqlbackup.sh') do
it { should be_file }
it { should be_mode 700 }
it { should be_owned_by 'root' }
it { should be_grouped_into 'root' }
end
describe file('/tmp/backups') do
it { should be_directory }
it { should be_mode 700 }
it { should be_owned_by 'root' }
it { should be_grouped_into 'root' }
end
describe cron do
it { should have_entry('5 23 * * * /usr/local/sbin/mysqlbackup.sh').with_user('root') }
end

View File

@ -0,0 +1,11 @@
require File.expand_path('../../spec_helper', __FILE__)
$mysql_client_package = case attr[:osfamily]
when 'Debian' then 'mysql-client'
when 'RedHat' then 'MySQL-client'
else 'mysql-client'
end
describe package($mysql_client_package) do
it { should be_installed }
end

View File

@ -0,0 +1,11 @@
require File.expand_path('../../spec_helper', __FILE__)
$mysql_java_package = case attr[:osfamily]
when 'Debian' then 'libmysql-java'
when 'RedHat' then 'mysql-connector-java'
else 'mysql-connector-java'
end
describe package($mysql_java_package) do
it { should be_installed }
end

View File

@ -0,0 +1,11 @@
require File.expand_path('../../spec_helper', __FILE__)
$mysql_python_package = case attr[:osfamily]
when 'Debian' then 'python-mysqldb'
when 'RedHat' then 'MySQL-python'
else 'mysql-python'
end
describe package($mysql_python_package) do
it { should be_installed }
end

View File

@ -0,0 +1,5 @@
require File.expand_path('../../spec_helper', __FILE__)
describe command('ruby -e "require \'rubygems\'; require \'mysql\';"') do
it { should return_exit_status 0 }
end

View File

@ -0,0 +1,65 @@
require File.expand_path('../../spec_helper', __FILE__)
$mysql_server_package = case attr[:osfamily]
when 'Debian' then 'mysql-server'
when 'RedHat' then 'MySQL-server'
else 'mysql-server'
end
$mysql_server_service = case attr[:osfamily]
when 'Debian' then 'mysql'
when 'RedHat' then 'mysql'
else 'mysql'
end
describe package($mysql_server_package) do
it { should be_installed }
end
describe service($mysql_server_service) do
it { should be_running }
it { should be_enabled }
end
describe file('/root/.my.cnf') do
it { should be_file }
it { should contain '[client]' }
it { should contain 'user=root' }
it { should contain 'host=localhost' }
it { should contain 'password=password' }
end
describe command('mysql -B -e "show create database redmine_db"') do
it { should return_exit_status 0 }
it { should return_stdout /CHARACTER SET utf8/ }
end
describe command('mysql -B -e "show create database other_db"') do
it { should return_exit_status 0 }
it { should return_stdout /CHARACTER SET utf8/ }
end
describe command('mysql -B -e "show create database old_db"') do
it { should return_exit_status 0 }
it { should return_stdout /CHARACTER SET latin1/ }
end
describe command('mysql --user=dan --password=blah --host=localhost -e "show grants"') do
it { should return_exit_status 0 }
it { should return_stdout /GRANT ALL PRIVILEGES ON `other_db`.* TO 'dan'@'localhost' WITH GRANT OPTION/ }
end
describe command('mysql --user=redmine --password=redmine --host=localhost -e "show grants"') do
it { should return_exit_status 0 }
it { should return_stdout /GRANT ALL PRIVILEGES ON `redmine_db`.* TO 'redmine'@'localhost' WITH GRANT OPTION/ }
end
describe command('mysql -B -e "show create database test"') do
it { should_not return_exit_status 0 }
end
describe command('mysql -e "show grants for \'\'@\'localhost\'"') do
it { should_not return_exit_status 0 }
it { should_not return_stdout /GRANT USAGE ON/ }
end

View File

@ -0,0 +1,34 @@
require 'rubygems'
require 'serverspec'
require 'pathname'
require 'facter'
require 'puppet'
include Serverspec::Helper::Exec
include Serverspec::Helper::DetectOS
Puppet.parse_config
if Puppet[:libdir] && !Facter.search_path.include?(Puppet[:libdir])
Facter.search(Puppet[:libdir])
end
facts = {}
Facter.list.each do |fact|
facts[fact] = Facter.value(fact) || ''
end
Facter.list.map { |fact| [fact, Facter.value(fact) || ''].flatten }
RSpec.configure do |c|
if ENV['ASK_SUDO_PASSWORD']
require 'highline/import'
c.sudo_password = ask("Enter sudo password: ") { |q| q.echo = false }
else
c.sudo_password = ENV['SUDO_PASSWORD']
end
attr_set facts
c.before :all do
ENV['LANG'] = 'C'
end
end

View File

@ -1,6 +0,0 @@
--format
s
--colour
--loadby
mtime
--backtrace

View File

@ -0,0 +1,71 @@
require 'find'
TESTS_DIR = File.dirname(File.expand_path(__FILE__))
SPEC_DIR = File.expand_path(TESTS_DIR + '/../spec/integration')
if ENV['puppet_debug']
PUPPET_OPTIONS = "--detailed-exitcodes --verbose --debug --trace --evaltrace"
else
PUPPET_OPTIONS = "--detailed-exitcodes"
end
RSPEC_OPTIONS = "--color -f doc"
def puppet(manifest_file)
fail "No such manifest '#{manifest_file}'!" unless File.exist?(manifest_file)
sh "puppet apply #{PUPPET_OPTIONS} '#{manifest_file}'" do |ok, res|
fail "Apply of manifest '#{manifest_file}' failed with exit code #{res.exitstatus}!" unless [0,2].include?(res.exitstatus)
end
end
def rspec(test_name)
rspec_file = "#{SPEC_DIR}/default/#{test_name}_spec.rb"
rspec_file = "#{SPEC_DIR}/#{test_name}_spec.rb" unless File.exists?(rspec_file)
if File.exists?(rspec_file)
Dir.chdir(SPEC_DIR) || fail("Can't cd to #{SPEC_DIR}!")
sh "rspec #{RSPEC_OPTIONS} '#{rspec_file}'" do |ok, res|
fail("Test #{test_name} failed with exit code #{res.exitstatus}!") unless ok
end
else
puts "Spec file for test '#{test_name}' doesn't exist! Skipping test phase."
end
end
Dir.chdir(TESTS_DIR) || exit(1)
all_tasks = []
Find.find('.') do |path|
next unless File.file?(path)
next unless path.end_with?('.pp')
path.sub!('./','')
test_name = path.chomp('.pp')
namespace test_name do
task :run => [ :apply, :test ] do
puts "#{test_name} run ends"
end
task :apply do
puppet(path)
puts "#{test_name} have been applied!"
end
task :test do
rspec(test_name)
puts "#{test_name} have been tested!"
end
desc "#{test_name} integration test"
end
task test_name do
Rake::Task["#{test_name}:apply"].invoke
Rake::Task["#{test_name}:test"].invoke
end
all_tasks.push(test_name)
end
desc "Run all tests"
task :all do
all_tasks.each do |test_name|
pwd = Dir.pwd
Rake::Task["#{test_name}:apply"].invoke
Rake::Task["#{test_name}:test"].invoke
Dir.chdir(pwd)
end
end
task :default => [:all]

View File

@ -1,12 +0,0 @@
class { 'mysql::server':
config_hash => {'root_password' => 'password'}
}
database{ ['test1', 'test2', 'test3']:
ensure => present,
charset => 'utf8',
require => Class['mysql::server'],
}
database{ 'test4':
ensure => present,
charset => 'latin1',
}

View File

@ -1,3 +0,0 @@
database_grant{'test1@localhost/redmine':
privileges => [update],
}

View File

@ -1,23 +0,0 @@
$mysql_root_pw = 'password'
class { 'mysql::server':
config_hash => {
root_password => 'password',
}
}
database_user{ 'redmine@localhost':
ensure => present,
password_hash => mysql_password('redmine'),
require => Class['mysql::server'],
}
database_user{ 'dan@localhost':
ensure => present,
password_hash => mysql_password('blah')
}
database_user{ 'dan@%':
ensure => present,
password_hash => mysql_password('blah'),
}

View File

@ -1,3 +1,50 @@
class { 'openstack::mirantis_repos': }
class { 'mysql::server':
config_hash => { 'root_password' => 'password', },
config_hash => {
'root_password' => 'password',
}
}
class { 'mysql::server::account_security': }
database{ ['redmine_db', 'other_db']:
ensure => present,
charset => 'utf8',
}
database{ 'old_db':
ensure => present,
charset => 'latin1',
}
database_grant{'redmine@localhost/redmine_db':
privileges => ['all'],
}
database_grant{'dan@localhost/other_db':
privileges => ['all'],
}
database_user{ 'redmine@localhost':
ensure => present,
password_hash => mysql_password('redmine'),
require => Class['mysql::server'],
}
database_user{ 'dan@localhost':
ensure => present,
password_hash => mysql_password('blah'),
require => Class['mysql::server'],
}
database_user{ 'dan@%':
ensure => present,
password_hash => mysql_password('blah'),
require => Class['mysql::server'],
}
Class['openstack::mirantis_repos'] -> Class['mysql::server']
Class['mysql::server'] -> Database <| |>
Class['mysql::server'] -> Database_grant <| |>
Class['mysql::server'] -> Database_user <| |>

View File

@ -1,4 +0,0 @@
class { 'mysql::server':
config_hash => { 'root_password' => 'password', },
}
class { 'mysql::server::account_security': }

View File

@ -210,7 +210,7 @@ $vlan_start = 300
# Segmentation type for isolating traffic between tenants
# Consult Openstack Quantum docs
$tenant_network_type = 'vlan'
$tenant_network_type = 'gre'
# Which IP address will be used for creating GRE tunnels.
$quantum_gre_bind_addr = $internal_address

View File

@ -240,7 +240,7 @@ $vlan_start = 300
# Segmentation type for isolating traffic between tenants
# Consult Openstack Quantum docs
$tenant_network_type = 'vlan'
$tenant_network_type = 'gre'
# Which IP address will be used for creating GRE tunnels.
$quantum_gre_bind_addr = $internal_address

View File

@ -195,7 +195,7 @@ $vlan_start = 300
# Segmentation type for isolating traffic between tenants
# Consult Openstack Quantum docs
$tenant_network_type = 'vlan'
$tenant_network_type = 'gre'
# Which IP address will be used for creating GRE tunnels.
$quantum_gre_bind_addr = $internal_address

View File

@ -164,7 +164,7 @@ $vlan_start = 300
# Segmentation type for isolating traffic between tenants
# Consult Openstack Quantum docs
$tenant_network_type = 'vlan'
$tenant_network_type = 'gre'
# Which IP address will be used for creating GRE tunnels.
$quantum_gre_bind_addr = $internal_address

View File

@ -1001,6 +1001,10 @@ mysql_start() {
mysql_extra_params="--skip-slave-start"
fi
if [ -f /tmp/wsrep-init-file ] ; then
killall mysqld mysqld_safe
sleep 15
killall -s KILL mysqld mysqld_safe
sleep 2
if [ "$(crm_attribute -t crm_config --name mysqlprimaryinit --query 2> /dev/null | awk -F\= '{print $4}')" = 'done' ] ; then
${OCF_RESKEY_binary} \
--pid-file=$OCF_RESKEY_pid \

View File

@ -300,7 +300,7 @@ quantum_dhcp_agent_start() {
sleep 1
done
sleep 7 ; q-agent-cleanup.py --agent=dhcp --reschedule --remove-dead 2>&1 >> /var/log/quantum/rescheduling.log
sleep 13 ; q-agent-cleanup.py --agent=dhcp --reschedule --remove-dead 2>&1 >> /var/log/quantum/rescheduling.log
ocf_log info "OpenStack DHCP Server (quantum-dhcp-agent) started"
return $OCF_SUCCESS

View File

@ -44,7 +44,8 @@ OCF_RESKEY_username_default="quantum"
OCF_RESKEY_password_default="quantum_pass"
OCF_RESKEY_tenant_default="services"
OCF_RESKEY_external_bridge_default="br-ex"
OCF_RESKEY_debug_default=false
OCF_RESKEY_debug_syslog=false
: ${OCF_RESKEY_os_auth_url=${OCF_RESKEY_os_auth_url_default}}
: ${OCF_RESKEY_username=${OCF_RESKEY_username_default}}
@ -56,7 +57,8 @@ OCF_RESKEY_external_bridge_default="br-ex"
: ${OCF_RESKEY_user=${OCF_RESKEY_user_default}}
: ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}}
: ${OCF_RESKEY_external_bridge=${OCF_RESKEY_external_bridge_default}}
: ${OCF_RESKEY_debug=${OCF_RESKEY_debug_default}}
: ${OCF_RESKEY_syslog=${OCF_RESKEY_syslog_default}}
#######################################################################
@ -182,7 +184,21 @@ External bridge for l3-agent
<content type="string" />
</parameter>
<parameter name="debug" unique="0" required="0">
<longdesc lang="en">
Enable debug logging
</longdesc>
<shortdesc lang="en">Enable debug logging</shortdesc>
<content type="boolean" default="false"/>
</parameter>
<parameter name="syslog" unique="0" required="0">
<longdesc lang="en">
Enable logging to syslog
</longdesc>
<shortdesc lang="en">Enable logging to syslog</shortdesc>
<content type="boolean" default="false"/>
</parameter>
<parameter name="additional_parameters" unique="0" required="0">
<longdesc lang="en">
@ -248,13 +264,13 @@ quantum_l3_agent_status() {
if [ $? -eq 0 ]
then
ocf_log warn "OpenStack OVS Server (quantum-l3-agent) was run, but no pid file found."
ocf_log warn "Will use $pid as PID of process (quantum-l3-agent)"
ocf_log warn "Writing $pid into $OCF_RESKEY_pid"
echo $pid > $OCF_RESKEY_pid
else
return $OCF_NOT_RUNNING
fi
ocf_log warn "OpenStack quantum-l3-agent was run, but no pid file found."
ocf_log warn "Will use $pid as PID of process (quantum-l3-agent)"
ocf_log warn "Writing $pid into $OCF_RESKEY_pid"
echo $pid > $OCF_RESKEY_pid
else
return $OCF_NOT_RUNNING
fi
else
pid=`cat $OCF_RESKEY_pid`
fi
@ -264,7 +280,7 @@ quantum_l3_agent_status() {
if [ $rc -eq 0 ]; then
return $OCF_SUCCESS
else
ocf_log info "Old PID file found, but OpenStack OVS Server (quantum-l3-agent) is not running"
ocf_log info "Old PID file found, but OpenStack quantum-l3-agent is not running"
return $OCF_NOT_RUNNING
fi
}
@ -289,16 +305,41 @@ quantum_l3_agent_start() {
quantum_l3_agent_status
rc=$?
if [ $rc -eq $OCF_SUCCESS ]; then
ocf_log info "OpenStack L3 Server (quantum-l3-agent) already running"
ocf_log info "OpenStack quantum-l3-agent already running"
return $OCF_SUCCESS
fi
L3_PID=`pgrep -u ${OCF_RESKEY_user} -f ${OCF_RESKEY_binary}`
if [ "xx$L3_PID" != "xx" ]; then
ocf_log info "OpenStack quantum-l3-agent already running with PID=$L3_PID"
return $OCF_SUCCESS
fi
clean_up
if ocf_is_true ${OCF_RESKEY_syslog} ; then
L3_SYSLOG=" | logger -t quantum-quantum.agent.l3 "
if ocf_is_true ${OCF_RESKEY_debug} ; then
L3_LOG=" | tee -ia /var/log/quantum/l3.log "
else
L3_LOG=" "
fi
else
L3_SYSLOG=""
if ocf_is_true ${OCF_RESKEY_debug} ; then
L3_LOG=" >> /var/log/quantum/l3.log "
else
L3_LOG=" >> /dev/null "
fi
fi
# run the actual quantum-l3-agent daemon. Don't use ocf_run as we're sending the tool's output
# straight to /dev/null anyway and using ocf_run would break stdout-redirection here.
su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \
--config-file=$OCF_RESKEY_plugin_config $OCF_RESKEY_additional_parameters"' >> \
/dev/null 2>&1 & echo $!' > $OCF_RESKEY_pid
--config-file=$OCF_RESKEY_plugin_config $OCF_RESKEY_additional_parameters "' \
2>&1 '" $L3_LOG $L3_SYSLOG &"
L3_PID=`pgrep -u ${OCF_RESKEY_user} -f ${OCF_RESKEY_binary}`
echo "$L3_PID" > $OCF_RESKEY_pid
# Spin waiting for the server to come up.
# Let the CRM/LRM time us out if required
@ -307,13 +348,13 @@ quantum_l3_agent_start() {
rc=$?
[ $rc -eq $OCF_SUCCESS ] && break
if [ $rc -ne $OCF_NOT_RUNNING ]; then
ocf_log err "OpenStack L3 Server (quantum-l3-agent) start failed"
ocf_log err "OpenStack quantum-l3-agent start failed"
exit $OCF_ERR_GENERIC
fi
sleep 1
done
sleep 7 ; q-agent-cleanup.py --agent=l3 --reschedule --remove-dead 2>&1 >> /var/log/quantum/rescheduling.log
sleep 13 ; q-agent-cleanup.py --agent=l3 --reschedule --remove-dead 2>&1 >> /var/log/quantum/rescheduling.log
ocf_log info "OpenStack L3 Server (quantum-l3-agent) started"
return $OCF_SUCCESS
@ -326,7 +367,7 @@ quantum_l3_agent_stop() {
quantum_l3_agent_status
rc=$?
if [ $rc -eq $OCF_NOT_RUNNING ]; then
clean_up
clean_up
ocf_log info "OpenStack L3 Server (quantum-l3-agent) already stopped"
return $OCF_SUCCESS
fi
@ -366,10 +407,10 @@ quantum_l3_agent_stop() {
ocf_run kill -s KILL $pid
fi
ocf_log info "OpenStack L3 Server (quantum-l3-agent) stopped"
ocf_log info "OpenStack quantum-l3-agent stopped"
rm -f $OCF_RESKEY_pid
clean_up
clean_up
return $OCF_SUCCESS
}

View File

@ -399,16 +399,21 @@ class QuantumCleaner(object):
lambda rou: lucky_ids.add(rou['id']),
self._list_routers_on_l3_agent(agents['alive'][0]['id'])
)
# remove dead agents after rescheduling
for agent in agents['dead']:
self.log.info("remove dead L3 agent: {0}".format(agent['id']))
if not self.options.get('noop'):
self._quantum_API_call(self.client.delete_agent, agent['id'])
# move routers from dead to alive agent
for rou in filter(lambda rr: not(rr[0]['id'] in lucky_ids), dead_routers):
self.log.info("unschedule router {rou} from L3 agent {agent}".format(
rou=rou[0]['id'],
agent=rou[1]
))
if not self.options.get('noop'):
self._remove_router_from_l3_agent(rou[1], rou[0]['id'])
#todo: if error:
#
# self.log.info("unschedule router {rou} from L3 agent {agent}".format(
# rou=rou[0]['id'],
# agent=rou[1]
# ))
# if not self.options.get('noop'):
# self._remove_router_from_l3_agent(rou[1], rou[0]['id'])
# #todo: if error:
# #
self.log.info("schedule router {rou} to L3 agent {agent}".format(
rou=rou[0]['id'],
agent=agents['alive'][0]['id']
@ -416,11 +421,6 @@ class QuantumCleaner(object):
if not self.options.get('noop'):
self._add_router_to_l3_agent(agents['alive'][0]['id'], rou[0]['id'])
#todo: if error:
# remove dead agents after rescheduling
for agent in agents['dead']:
self.log.info("remove dead L3 agent: {0}".format(agent['id']))
if not self.options.get('noop'):
self._quantum_API_call(self.client.delete_agent, agent['id'])
self.log.debug("_reschedule_agent_l3: end.")
def _reschedule_agent(self, agent):
@ -522,7 +522,7 @@ if __name__ == '__main__':
LOG.addHandler(logging.handlers.WatchedFileHandler(args.log))
LOG.setLevel(_log_level)
LOG.debug("Started")
LOG.info("Started: {0}".format(' '.join(sys.argv)))
cleaner = QuantumCleaner(get_authconfig(args.authconf), options=vars(args), log=LOG)
rc = 0
if vars(args).get('test-hostnames'):

View File

@ -0,0 +1 @@
Defaults:root !requiretty

View File

@ -259,6 +259,8 @@ class quantum::agents::l3 (
'tenant' => $auth_tenant,
'username' => $auth_user,
'password' => $auth_password,
'debug' => $debug,
'syslog' => $::use_syslog,
},
operations => {
'monitor' => {

View File

@ -73,6 +73,15 @@ class quantum (
source => "puppet:///modules/quantum/q-agent-cleanup.py",
}
file {'quantum-root':
path => '/etc/sudoers.d/quantum-root',
mode => 600,
owner => root,
group => root,
source => "puppet:///modules/quantum/quantum-root",
before => Package['quantum'],
}
file {'/var/cache/quantum':
ensure => directory,
path => '/var/cache/quantum',
@ -158,12 +167,16 @@ class quantum (
'keystone_authtoken/admin_user': value => $auth_user;
'keystone_authtoken/admin_password': value => $auth_password;
}
# logging for agents grabbing from stderr. It's workarround for bug in quantum-logging
# server givs this parameters from command line
quantum_config {
'DEFAULT/log_config': ensure=> absent;
'DEFAULT/log_file': ensure=> absent;
'DEFAULT/log_dir': ensure=> absent;
'DEFAULT/use_syslog': ensure=> absent;
'DEFAULT/use_stderr': value => true;
}
if $use_syslog {
quantum_config {
'DEFAULT/log_config': value => "/etc/quantum/logging.conf";
'DEFAULT/log_file': ensure=> absent;
'DEFAULT/logdir': ensure=> absent;
}
file { "quantum-logging.conf":
content => template('quantum/logging.conf.erb'),
path => "/etc/quantum/logging.conf",
@ -182,17 +195,13 @@ class quantum (
File['quantum-logging.conf'] -> Anchor<| title == 'quantum-dhcp-agent' |>
} else {
quantum_config {
'DEFAULT/log_config': ensure=> absent;
'DEFAULT/log_file': value => $log_file;
file { "quantum-logging.conf":
content => template('quantum/logging.conf-nosyslog.erb'),
path => "/etc/quantum/logging.conf",
owner => "root",
group => "root",
mode => 644,
}
# file { "quantum-logging.conf":
# content => template('quantum/logging.conf-nosyslog.erb'),
# path => "/etc/quantum/logging.conf",
# owner => "root",
# group => "root",
# mode => 644,
# }
}
File <| title=='/etc/quantum' |> -> File <| title=='quantum-logging.conf' |>

View File

@ -1,8 +1,8 @@
[loggers]
keys = root, l3agent, ovsagent, dhcpagent, metadata
keys = root
[handlers]
keys = quantum, l3agent, ovsagent, dhcpagent, metadata
keys = root
[formatters]
keys = default
@ -12,55 +12,13 @@ format=%(asctime)s %(levelname)s %(name)s:%(lineno)d %(message)s
[logger_root]
level = DEBUG
handlers = quantum
qualname = quantum
[handler_quantum]
class = logging.FileHandler
args = ('/var/log/quantum/quantum.log',)
formatter = default
[logger_l3agent]
handlers = l3agent
level=NOTSET
qualname = quantum.agent.l3_agent
[handler_l3agent]
class = logging.FileHandler
args = ('/var/log/quantum/quantum_l3.log',)
formatter = default
handlers = root
propagate = 1
[logger_dhcpagent]
handlers = dhcpagent
[handler_root]
class = StreamHandler
level=NOTSET
qualname = quantum.agent.dhcp_agent
[handler_dhcpagent]
class = logging.FileHandler
args = ('/var/log/quantum/quantum_dhcp.log',)
formatter = default
[logger_ovsagent]
handlers = ovsagent
level=NOTSET
qualname = quantum.plugins.openvswitch.agent.ovs_quantum_agent
[handler_ovsagent]
class = logging.FileHandler
args = ('/var/log/quantum/quantum_ovs.log',)
formatter = default
[logger_metadata]
handlers = metadata
level=NOTSET
qualname = quantum.agent.metadata
[handler_metadata]
class = logging.FileHandler
args = ('/var/log/quantum/quantum_metadata.log',)
formatter = default
args = (sys.stdout,)

View File

@ -14,7 +14,7 @@ class Puppet::Provider::SwiftRingBuilder < Puppet::Provider
if File.exists?(builder_file_path)
if rows = swift_ring_builder(builder_file_path).split("\n")[4..-1]
rows.each do |row|
if row =~ /^\s+(\d+)\s+(\d+)\s+(\d+)\s+(\S+)\s+(\d+)\s+(\S+)\s+(\d+\.\d+)\s+(\d+)\s+(\d?-?\d+\.\d+)\s+(\S*)$/
if row =~ /^\s+(\d+)\s+(\d+)\s+(\d+)\s+(\S+)\s+(\d+)\s+(\S+)\s+(\d+\.\d+)\s+(\d+)?\s+\d?(-?\d+\.\d+)\s+(\S*)$/
object_hash["#{$4}:#{$5}"] = {
:id => $1,
:region => $2,

View File

@ -20,7 +20,7 @@ common:
pool_start: 10.49.54.225
pool_end: 10.49.54.239
segment_range: 900:999
tenant_network_type: vlan
tenant_network_type: gre
network_manager: nova.network.manager.FlatDHCPManager
auto_assign_floating_ip: true
quantum_netnode_on_cnt: true