Agreed on naming in config file

Also add info about used config options

Change-Id: I072ea6d1fd54afa92f5b99fee8a1a53a3b15f6fd
Closes-Bug: #1418575
This commit is contained in:
Vladimir Sharshov (warpc) 2015-02-19 14:40:41 +03:00
parent 423b6258af
commit 93536e8628
35 changed files with 157 additions and 157 deletions

View File

@ -79,7 +79,7 @@ Astute.logger.formatter = proc do |severity, datetime, progname, msg|
"#{datetime.strftime("%Y-%m-%dT%H:%M:%S")} #{severity_map[severity]}: [#{Process.pid}] #{msg}\n"
end
Astute.logger.info "Starting..."
Astute.logger.info "Starting with settings\n#{Astute.config.to_yaml}"
Raemon::Master.start(options.workers, Astute::Server::Worker,
:detach => options.daemonize,

View File

@ -1,22 +1,22 @@
# This is example config file for Astute. Your config file should be placed
# to /opt/astute/astute.conf. You can check default values in config.rb file.
---
# MC_RETRIES is used in mclient.rb file.
# MClient tries MC_RETRIES times to call MCagent before failure.
MC_RETRIES: 5
# PUPPET_TIMEOUT is used in puppetd.rb file.
# mc_retries is used in mclient.rb file.
# MClient tries mc_retries times to call MCagent before failure.
mc_retries: 5
# puppet_timeout is used in puppetd.rb file.
# Maximum time (in seconds) Astute waits for the whole deployment.
PUPPET_TIMEOUT: 3600
# PUPPET_DEPLOY_INTERVAL is used in puppetd.rb file.
# Astute sleeps for PUPPET_DEPLOY_INTERVAL seconds, then check Puppet agents
puppet_timeout: 3600
# puppet_deploy_interval is used in puppetd.rb file.
# Astute sleeps for puppet_deploy_interval seconds, then check Puppet agents
# statuses again.
PUPPET_DEPLOY_INTERVAL: 2
# PUPPET_FADE_TIMEOUT is used in puppetd.rb file.
puppet_deploy_interval: 2
# puppet_fade_timeout is used in puppetd.rb file.
# After Puppet agent has finished real work it spend some time to graceful exit.
# PUPPET_FADE_TIMEOUT means how long (in seconds) Astute can take for Puppet
# puppet_fade_timeout means how long (in seconds) Astute can take for Puppet
# to exit after real work has finished.
PUPPET_FADE_TIMEOUT: 120
# PUPPET_FADE_INTERVAL is used in puppetd.rb file.
# Retry every PUPPET_FADE_INTERVAL seconds to check puppet state if it was
puppet_fade_timeout: 120
# puppet_fade_interval is used in puppetd.rb file.
# Retry every puppet_fade_interval seconds to check puppet state if it was
# in 'running' state.
PUPPET_FADE_INTERVAL: 10
puppet_fade_interval: 10

View File

@ -87,7 +87,7 @@ module Astute
begin
Astute.logger.debug("Waiting for reboot to be complete: nodes: #{reboot_events.keys}")
failed_nodes = []
Timeout::timeout(Astute.config.REBOOT_TIMEOUT) do
Timeout::timeout(Astute.config.reboot_timeout) do
while not reboot_events.empty?
reboot_events.each do |node_name, event_id|
event_status = @engine.event_status(event_id)

View File

@ -55,38 +55,38 @@ module Astute
conf = {}
# Library settings
conf[:PUPPET_TIMEOUT] = 90 * 60 # maximum time it waits for the whole deployment
conf[:PUPPET_DEPLOY_INTERVAL] = 2 # sleep for ## sec, then check puppet status again
conf[:PUPPET_FADE_TIMEOUT] = 120 # how long it can take for puppet to exit after dumping to last_run_summary
conf[:MC_RETRIES] = 10 # MClient tries to call mcagent before failure
conf[:MC_RETRY_INTERVAL] = 1 # MClient sleeps for ## sec between retries
conf[:PUPPET_FADE_INTERVAL] = 30 # retry every ## seconds to check puppet state if it was running
conf[:PROVISIONING_TIMEOUT] = 90 * 60 # timeout for booting target OS in provision
conf[:REBOOT_TIMEOUT] = 240 # how long it can take for node to reboot
conf[:DUMP_TIMEOUT] = 3600 # maximum time it waits for the dump (meaningles to be larger
conf[:puppet_timeout] = 90 * 60 # maximum time it waits for single puppet run
conf[:puppet_deploy_interval] = 2 # sleep for ## sec, then check puppet status again
conf[:puppet_fade_timeout] = 120 # how long it can take for puppet to exit after dumping to last_run_summary
conf[:mc_retries] = 10 # MClient tries to call mcagent before failure
conf[:mc_retry_interval] = 1 # MClient sleeps for ## sec between retries
conf[:puppet_fade_interval] = 30 # retry every ## seconds to check puppet state if it was running
conf[:provisioning_timeout] = 90 * 60 # timeout for booting target OS in provision
conf[:reboot_timeout] = 240 # how long it can take for node to reboot
conf[:dump_timeout] = 3600 # maximum time it waits for the dump (meaningles to be larger
# than the specified in timeout of execute_shell_command mcagent
conf[:PUPPET_SSH_KEYS_DIR] = '/var/lib/astute' # folder where ssh keys will be saved. Warning!
conf[:puppet_ssh_keys_dir] = '/var/lib/astute' # folder where ssh keys will be saved. Warning!
# Do not change this at least your clear know what you do!
conf[:PUPPET_SSH_KEYS] = [
conf[:puppet_ssh_keys] = [
'neutron',
'nova',
'ceph',
'mysql',
] # name of ssh keys what will be generated and uploaded to all nodes before deploy
conf[:PUPPET_KEYS] = [
conf[:puppet_keys] = [
'mongodb'
] # name of keys what will be generated and uploaded to all nodes before deploy
conf[:PUPPET_KEYS_DIR] = '/var/lib/astute' # folder where ssh keys will be saved. Warning!
conf[:puppet_keys_dir] = '/var/lib/astute' # folder where ssh keys will be saved. Warning!
# Do not change this at least your clear know what you do!
conf[:MAX_NODES_PER_CALL] = 50 # how many nodes to deploy in one puppet call
conf[:SSH_RETRIES] = 5 # SSH tries to call ssh client before failure
conf[:SSH_RETRY_TIMEOUT] = 30 # SSH sleeps for ## sec between retries
conf[:max_nodes_per_call] = 50 # how many nodes to deploy in one puppet call
conf[:ssh_retries] = 5 # SSH tries to call ssh client before failure
conf[:ssh_retry_timeout] = 30 # SSH sleeps for ## sec between retries
conf[:MAX_NODES_PER_REMOVE_CALL] = 10 # how many nodes to remove in one call
conf[:NODES_REMOVE_INTERVAL] = 10 # sleeps for ## sec between remove calls
conf[:max_nodes_per_remove_call] = 10 # how many nodes to remove in one call
conf[:nodes_remove_interval] = 10 # sleeps for ## sec between remove calls
conf[:DHCP_REPEAT] = 3 # Dhcp discover will be sended 3 times
conf[:dhcp_repeat] = 3 # Dhcp discover will be sended 3 times
conf[:iops] = 120 # Default IOPS master node IOPS performance
conf[:splay_factor] = 180 # Formula: 20(amount of nodes nodes) div 120(iops) = 0.1667

View File

@ -200,7 +200,7 @@ module Astute
# Prevent high load for tasks
def perform_with_limit(nodes, &block)
nodes.each_slice(Astute.config[:MAX_NODES_PER_CALL]) do |part|
nodes.each_slice(Astute.config[:max_nodes_per_call]) do |part|
block.call(part)
end
end

View File

@ -44,7 +44,7 @@ module Astute
# can perform multiple roles.
group_by_uniq_values(nodes).each do |nodes_group|
# Prevent deploy too many nodes at once
nodes_group.each_slice(Astute.config[:MAX_NODES_PER_CALL]) do |part|
nodes_group.each_slice(Astute.config[:max_nodes_per_call]) do |part|
if !fail_deploy
# Pre deploy hooks

View File

@ -91,7 +91,7 @@ class Astute::DeploymentEngine::GranularDeployment < Astute::DeploymentEngine
while @task_manager.task_in_queue?
nodes_to_report = []
sleep Astute.config.PUPPET_DEPLOY_INTERVAL
sleep Astute.config.puppet_deploy_interval
@task_manager.node_uids.each do |node_id|
if task = @task_manager.current_task(node_id)
case status = check_status(node_id)

View File

@ -51,7 +51,7 @@ class Astute::DeploymentEngine::Tasklib < Astute::DeploymentEngine
@task_manager = TaskManager.new(nodes)
@debug = nodes.first['debug']
Timeout::timeout(Astute.config.PUPPET_TIMEOUT) do
Timeout::timeout(Astute.config.puppet_timeout) do
pre_tasklib_deploy
deploy_nodes
@ -114,7 +114,7 @@ class Astute::DeploymentEngine::Tasklib < Astute::DeploymentEngine
while @task_manager.task_in_queue?
nodes_to_report = []
sleep Astute.config.PUPPET_DEPLOY_INTERVAL
sleep Astute.config.puppet_deploy_interval
@task_manager.node_uids.each do |node_id|
if task = @task_manager.current_task(node_id)
case status = check_status(node_id, task)

View File

@ -17,7 +17,7 @@ module Astute
module Dump
def self.dump_environment(ctx, settings)
lastdump = settings['lastdump']
timeout = Astute.config.DUMP_TIMEOUT
timeout = Astute.config.dump_timeout
shell = MClient.new(ctx, 'execute_shell_command', ['master'], check_result=true, timeout=timeout, retries=1)
upload_file = MClient.new(ctx, 'uploadfile', ['master'])
begin

View File

@ -24,7 +24,7 @@ module Astute
attr_accessor :retries
def initialize(ctx, agent, nodes=nil, check_result=true, timeout=nil, retries=Astute.config.MC_RETRIES)
def initialize(ctx, agent, nodes=nil, check_result=true, timeout=nil, retries=Astute.config.mc_retries)
@task_id = ctx.task_id
@agent = agent
@nodes = nodes.map { |n| n.to_s } if nodes

View File

@ -105,8 +105,8 @@ module Astute
timeout = hook['parameters']['timeout'] || 300
cwd = hook['parameters']['cwd'] || "/"
retries = hook['parameters']['retries'] || Astute.config.MC_RETRIES
interval = hook['parameters']['interval'] || Astute.config.MC_RETRY_INTERVAL
retries = hook['parameters']['retries'] || Astute.config.mc_retries
interval = hook['parameters']['interval'] || Astute.config.mc_retry_interval
shell_command = "cd #{cwd} && #{hook['parameters']['cmd']}"
is_success = false
@ -288,7 +288,7 @@ module Astute
end
def perform_with_limit(nodes, &block)
nodes.each_slice(Astute.config[:MAX_NODES_PER_CALL]) do |part|
nodes.each_slice(Astute.config[:max_nodes_per_call]) do |part|
block.call(part)
end
end

View File

@ -58,7 +58,7 @@ module Astute
nodes.each do |node|
data_to_send[node['uid'].to_s] = make_interfaces_to_send(node['networks'], joined=false).to_json
end
repeat = Astute.config.DHCP_REPEAT
repeat = Astute.config.dhcp_repeat
result = net_probe.dhcp_discover(:interfaces => data_to_send,
:timeout => 10, :repeat => repeat).map do |response|
format_dhcp_response(response)

View File

@ -28,10 +28,10 @@ module Astute
# 3. If exception is raised here, we should not fully fall into error, but only failed node
erased_nodes, error_nodes, inaccessible_nodes = remove_nodes(@nodes)
retry_remove_nodes(error_nodes, erased_nodes,
Astute.config[:MC_RETRIES], Astute.config[:MC_RETRY_INTERVAL])
Astute.config[:mc_retries], Astute.config[:mc_retry_interval])
retry_remove_nodes(inaccessible_nodes, erased_nodes,
Astute.config[:MC_RETRIES], Astute.config[:MC_RETRY_INTERVAL])
Astute.config[:mc_retries], Astute.config[:mc_retry_interval])
answer = {'nodes' => serialize_nodes(erased_nodes)}
@ -122,8 +122,8 @@ module Astute
Astute.logger.info "#{@ctx.task_id}: Starting removing of nodes: #{nodes.uids.inspect}"
results = []
nodes.uids.sort.each_slice(Astute.config[:MAX_NODES_PER_REMOVE_CALL]).with_index do |part, i|
sleep Astute.config[:NODES_REMOVE_INTERVAL] if i != 0
nodes.uids.sort.each_slice(Astute.config[:max_nodes_per_remove_call]).with_index do |part, i|
sleep Astute.config[:nodes_remove_interval] if i != 0
results += mclient_remove_piece_nodes(part)
end
results

View File

@ -91,7 +91,7 @@ module Astute
# process and then reboot nodes
# TODO(kozhukalov): do not forget about execute_shell_command timeout which is 3600
# watch_provision_progress has PROVISIONING_TIMEOUT + 3600 is much longer than PROVISIONING_TIMEOUT
# watch_provision_progress has provisioning_timeout + 3600 is much longer than provisioning_timeout
if provision_method == 'image'
# disabling pxe boot
cobbler.netboot_nodes(nodes, false)
@ -171,7 +171,7 @@ module Astute
nodes_not_booted = nodes.map{ |n| n['uid'] }
result_msg = {'nodes' => []}
begin
Timeout.timeout(Astute.config.PROVISIONING_TIMEOUT) do # Timeout for booting target OS
Timeout.timeout(Astute.config.provisioning_timeout) do # Timeout for booting target OS
catch :done do
loop do
sleep_not_greater_than(20) do
@ -375,8 +375,8 @@ module Astute
mco_result = {}
nodes_uids = nodes.map{ |n| n['uid'] }
Astute.config.MC_RETRIES.times do |i|
sleep Astute.config.NODES_REMOVE_INTERVAL
Astute.config.mc_retries.times do |i|
sleep Astute.config.nodes_remove_interval
Astute.logger.debug "Trying to connect to nodes #{nodes_uids} using mcollective"
nodes_types = node_type(ctx.reporter, ctx.task_id, nodes_uids, 2)

View File

@ -24,8 +24,8 @@ module Astute
deployment_id = deployment_info.first['deployment_id']
raise "Deployment_id is missing" unless deployment_id
Astute.config.PUPPET_KEYS.each do |key_name|
dir_path = File.join(Astute.config.PUPPET_KEYS_DIR, deployment_id.to_s, key_name)
Astute.config.puppet_keys.each do |key_name|
dir_path = File.join(Astute.config.puppet_keys_dir, deployment_id.to_s, key_name)
key_path = File.join(dir_path, key_name + '.key')
FileUtils.mkdir_p dir_path

View File

@ -24,8 +24,8 @@ module Astute
deployment_id = deployment_info.first['deployment_id']
raise "Deployment_id is missing" unless deployment_id
Astute.config.PUPPET_SSH_KEYS.each do |key_name|
dir_path = File.join(Astute.config.PUPPET_SSH_KEYS_DIR, deployment_id.to_s, key_name)
Astute.config.puppet_ssh_keys.each do |key_name|
dir_path = File.join(Astute.config.puppet_ssh_keys_dir, deployment_id.to_s, key_name)
key_path = File.join(dir_path, key_name)
FileUtils.mkdir_p dir_path

View File

@ -21,10 +21,10 @@ module Astute
cmd = "ntpdate -u $(egrep '^server' /etc/ntp.conf | sed '/^#/d' | awk '{print $2}')"
succeeded = false
Astute.config.MC_RETRIES.times.each do
Astute.config.mc_retries.times.each do
succeeded = run_shell_command_remotely(context, nodes_uids, cmd)
return if succeeded
sleep Astute.config.MC_RETRY_INTERVAL
sleep Astute.config.mc_retry_interval
end
if !succeeded

View File

@ -72,10 +72,10 @@ module Astute
succeeded = false
nodes_uids = deployment_info.map{ |n| n['uid'] }.uniq
Astute.config.MC_RETRIES.times.each do
Astute.config.mc_retries.times.each do
succeeded = run_shell_command_remotely(context, nodes_uids, cmd)
return if succeeded
sleep Astute.config.MC_RETRY_INTERVAL
sleep Astute.config.mc_retry_interval
end
if !succeeded

View File

@ -27,17 +27,17 @@ module Astute
private
def upload_keys(context, node_uids, deployment_id)
Astute.config.PUPPET_KEYS.each do |key_name|
Astute.config.puppet_keys.each do |key_name|
upload_mclient = MClient.new(context, "uploadfile", node_uids)
key = key_name + '.key'
source_path = File.join(
Astute.config.PUPPET_KEYS_DIR,
Astute.config.puppet_keys_dir,
deployment_id,
key_name,
key
)
destination_path = File.join(
Astute.config.PUPPET_KEYS_DIR,
Astute.config.puppet_keys_dir,
key_name,
key
)

View File

@ -27,16 +27,16 @@ module Astute
private
def upload_keys(context, node_uids, deployment_id)
Astute.config.PUPPET_SSH_KEYS.each do |key_name|
Astute.config.puppet_ssh_keys.each do |key_name|
upload_mclient = MClient.new(context, "uploadfile", node_uids)
[key_name, key_name + ".pub"].each do |ssh_key|
source_path = File.join(
Astute.config.PUPPET_SSH_KEYS_DIR,
Astute.config.puppet_ssh_keys_dir,
deployment_id,
key_name,
ssh_key)
destination_path = File.join(
Astute.config.PUPPET_SSH_KEYS_DIR,
Astute.config.puppet_ssh_keys_dir,
key_name,
ssh_key)
content = File.read(source_path)

View File

@ -25,7 +25,7 @@ module Astute
@puppet_manifest = puppet_manifest || '/etc/puppet/manifests/site.pp'
@puppet_modules = puppet_modules || '/etc/puppet/modules'
@cwd = cwd || '/'
@time_observer = TimeObserver.new(timeout || Astute.config.PUPPET_TIMEOUT)
@time_observer = TimeObserver.new(timeout || Astute.config.puppet_timeout)
@prev_summary = nil
@is_hung = false
end
@ -38,7 +38,7 @@ module Astute
puppetd_runonce
end
# expect to run this method with respect of Astute.config.PUPPET_FADE_INTERVAL
# expect to run this method with respect of Astute.config.puppet_fade_interval
def status
raise Timeout::Error unless @time_observer.enough_time?
@ -122,7 +122,7 @@ module Astute
# Returns list of nodes uids which appear to be with hung puppet.
def puppetd_runonce
started = Time.now.to_i
while Time.now.to_i - started < Astute.config.PUPPET_FADE_TIMEOUT
while Time.now.to_i - started < Astute.config.puppet_fade_timeout
status = puppet_status
is_stopped = stopped?(status)
@ -133,7 +133,7 @@ module Astute
puppet_run if is_stopped || is_idling
break if !is_running && !is_idling
sleep Astute.config.PUPPET_FADE_INTERVAL
sleep Astute.config.puppet_fade_interval
end
if is_running || is_idling

View File

@ -28,7 +28,7 @@ module Astute
@cwd = cwd || '/'
Astute.logger.debug "Waiting for puppet to finish deployment on all
nodes (timeout = #{Astute.config.PUPPET_TIMEOUT} sec)..."
nodes (timeout = #{Astute.config.puppet_timeout} sec)..."
time_before = Time.now
deploy_nodes
@ -45,7 +45,7 @@ module Astute
puppet_tasks.each(&:run)
while puppet_tasks.any? { |t| t.status == 'deploying' }
sleep Astute.config.PUPPET_DEPLOY_INTERVAL
sleep Astute.config.puppet_deploy_interval
end
end

View File

@ -16,7 +16,7 @@ module Astute
class Rsyslogd
def self.send_sighup(ctx, master_ip)
timeout = Astute.config.SSH_RETRY_TIMEOUT
timeout = Astute.config.ssh_retry_timeout
shell = MClient.new(ctx, 'execute_shell_command', ['master'],
check_result=true, timeout=timeout, retries=1)
cmd = "ssh root@#{master_ip} 'pkill -HUP rsyslogd'"

View File

@ -18,7 +18,7 @@ require 'timeout'
module Astute
class Ssh
def self.execute(ctx, nodes, cmd, timeout=60, retries=Astute.config.SSH_RETRIES)
def self.execute(ctx, nodes, cmd, timeout=60, retries=Astute.config.ssh_retries)
nodes_to_process = nodes.map { |n| n['admin_ip'] }
Astute.logger.debug "Run shell command '#{cmd}' using ssh"
@ -40,7 +40,7 @@ module Astute
break if nodes_to_process.empty?
sleep Astute.config.SSH_RETRY_TIMEOUT
sleep Astute.config.ssh_retry_timeout
end
inaccessible_nodes = nodes_to_process
@ -79,7 +79,7 @@ module Astute
servers = []
channel = nil
Net::SSH::Multi.start(:concurrent_connections => Astute.config.MAX_NODES_PER_CALL,
Net::SSH::Multi.start(:concurrent_connections => Astute.config.max_nodes_per_call,
:on_error => :warn) do |session|
nodes.each do |name|
session.use name,

View File

@ -31,14 +31,14 @@ Dir[File.join(File.dirname(__FILE__), 'unit/fixtures/*.rb')].each { |file| requi
# NOTE(mihgen): I hate to wait for unit tests to complete,
# resetting time to sleep significantly increases tests speed
Astute.config.PUPPET_DEPLOY_INTERVAL = 0
Astute.config.PUPPET_FADE_INTERVAL = 0
Astute.config.PUPPET_FADE_TIMEOUT = 1
Astute.config.MC_RETRY_INTERVAL = 0
Astute.config.PROVISIONING_TIMEOUT = 0
Astute.config.REBOOT_TIMEOUT = 0
Astute.config.SSH_RETRY_TIMEOUT = 0
Astute.config.NODES_REMOVE_INTERVAL = 0
Astute.config.puppet_deploy_interval = 0
Astute.config.puppet_fade_interval = 0
Astute.config.puppet_fade_timeout = 1
Astute.config.mc_retry_interval = 0
Astute.config.provisioning_timeout = 0
Astute.config.reboot_timeout = 0
Astute.config.ssh_retry_timeout = 0
Astute.config.nodes_remove_interval = 0
Astute.logger = Logger.new(STDERR)
RSpec.configure do |c|

View File

@ -278,13 +278,13 @@ describe Astute::DeploymentEngine do
context 'limits' do
around(:each) do |example|
old_value = Astute.config.MAX_NODES_PER_CALL
old_value = Astute.config.max_nodes_per_call
example.run
Astute.config.MAX_NODES_PER_CALL = old_value
Astute.config.max_nodes_per_call = old_value
end
it 'should affect nodes with same priorities in next deployment group' do
Astute.config.MAX_NODES_PER_CALL = 1
Astute.config.max_nodes_per_call = 1
nodes = [
{'uid' => '2', 'priority' => 10, 'role' => 'primary-controller', 'fail_if_error' => true},
@ -304,13 +304,13 @@ describe Astute::DeploymentEngine do
context 'limits' do
around(:each) do |example|
old_value = Astute.config.MAX_NODES_PER_CALL
old_value = Astute.config.max_nodes_per_call
example.run
Astute.config.MAX_NODES_PER_CALL = old_value
Astute.config.max_nodes_per_call = old_value
end
it 'number of nodes running in parallel should be limited' do
Astute.config.MAX_NODES_PER_CALL = 1
Astute.config.max_nodes_per_call = 1
nodes = [
{'uid' => 1, 'priority' => 10, 'role' => 'compute'},

View File

@ -23,9 +23,9 @@ describe Astute::NailgunHooks do
let(:ctx) { mock_ctx }
around(:each) do |example|
old_value = Astute.config.MAX_NODES_PER_CALL
old_value = Astute.config.max_nodes_per_call
example.run
Astute.config.MAX_NODES_PER_CALL = old_value
Astute.config.max_nodes_per_call = old_value
end
let(:upload_file_hook) do
@ -297,7 +297,7 @@ describe Astute::NailgunHooks do
end
it 'should limit nodes processing in parallel' do
Astute.config.MAX_NODES_PER_CALL = 2
Astute.config.max_nodes_per_call = 2
hooks = Astute::NailgunHooks.new([shell_hook], ctx)
hooks.expects(:run_shell_command).once.with(
@ -336,14 +336,14 @@ describe Astute::NailgunHooks do
it 'if exit code not eql 0 -> raise error' do
hooks = Astute::NailgunHooks.new([shell_hook], ctx)
hooks.expects(:run_shell_command).returns({:data => {:exit_code => 1}}).times(Astute.config.MC_RETRIES)
hooks.expects(:run_shell_command).returns({:data => {:exit_code => 1}}).times(Astute.config.mc_retries)
expect {hooks.process}.to raise_error(Astute::DeploymentEngineError, /Failed to deploy plugin/)
end
it 'if exit code not presence -> raise error' do
hooks = Astute::NailgunHooks.new([shell_hook], ctx)
hooks.expects(:run_shell_command).returns({:data => {}}).times(Astute.config.MC_RETRIES)
hooks.expects(:run_shell_command).returns({:data => {}}).times(Astute.config.mc_retries)
expect {hooks.process}.to raise_error(Astute::DeploymentEngineError, /Failed to deploy plugin/)
end
@ -389,7 +389,7 @@ describe Astute::NailgunHooks do
end
it 'should limit nodes processing in parallel' do
Astute.config.MAX_NODES_PER_CALL = 1
Astute.config.max_nodes_per_call = 1
hooks = Astute::NailgunHooks.new([upload_file_hook], ctx)
hooks.expects(:upload_file).once.with(
@ -487,7 +487,7 @@ describe Astute::NailgunHooks do
end
it 'should limit nodes processing in parallel' do
Astute.config.MAX_NODES_PER_CALL = 1
Astute.config.max_nodes_per_call = 1
hooks = Astute::NailgunHooks.new([sync_hook], ctx)
hooks.expects(:run_shell_command).once.with(
@ -618,7 +618,7 @@ describe Astute::NailgunHooks do
end
it 'should limit nodes processing in parallel' do
Astute.config.MAX_NODES_PER_CALL = 1
Astute.config.max_nodes_per_call = 1
hooks = Astute::NailgunHooks.new([puppet_hook], ctx)
hooks.expects(:run_puppet).once.with(
@ -796,7 +796,7 @@ describe Astute::NailgunHooks do
end
it 'should limit nodes processing in parallel' do
Astute.config.MAX_NODES_PER_CALL = 1
Astute.config.max_nodes_per_call = 1
hooks = Astute::NailgunHooks.new([reboot_hook], ctx)

View File

@ -113,8 +113,8 @@ describe Astute::NodesRemover do
)
end
it 'should try maximum MC_RETRIES + 1 times to erase node if node get error' do
retries = Astute.config[:MC_RETRIES]
it 'should try maximum mc_retries + 1 times to erase node if node get error' do
retries = Astute.config[:mc_retries]
expect(retries).to eq(10)
remover = Astute::NodesRemover.new(ctx, nodes)
@ -122,8 +122,8 @@ describe Astute::NodesRemover do
remover.remove
end
it 'should try maximum MC_RETRIES + 1 times to erase node if node is inaccessible' do
retries = Astute.config[:MC_RETRIES]
it 'should try maximum mc_retries + 1 times to erase node if node is inaccessible' do
retries = Astute.config[:mc_retries]
expect(retries).to eq(10)
remover = Astute::NodesRemover.new(ctx, nodes)
@ -182,9 +182,9 @@ describe Astute::NodesRemover do
context 'nodes limits' do
around(:each) do |example|
old_value = Astute.config.MAX_NODES_PER_REMOVE_CALL
old_value = Astute.config.max_nodes_per_remove_call
example.run
Astute.config.MAX_NODES_PER_REMOVE_CALL = old_value
Astute.config.max_nodes_per_remove_call = old_value
end
let(:mcollective_answer1) do
@ -196,7 +196,7 @@ describe Astute::NodesRemover do
end
before(:each) do
Astute.config.MAX_NODES_PER_REMOVE_CALL = 1
Astute.config.max_nodes_per_remove_call = 1
Astute::NodesRemover.any_instance.expects(:mclient_remove_piece_nodes).twice
.returns(mcollective_answer1)
@ -208,7 +208,7 @@ describe Astute::NodesRemover do
end
it 'should sleep between group of nodes' do
Astute::NodesRemover.any_instance.expects(:sleep).with(Astute.config.NODES_REMOVE_INTERVAL)
Astute::NodesRemover.any_instance.expects(:sleep).with(Astute.config.nodes_remove_interval)
Astute::NodesRemover.new(ctx, nodes).remove
end

View File

@ -530,19 +530,19 @@ describe Astute::Orchestrator do
describe '#stop_provision' do
around(:each) do |example|
old_ssh_retries = Astute.config.SSH_RETRIES
old_mc_retries = Astute.config.MC_RETRIES
old_nodes_rm_interal = Astute.config.NODES_REMOVE_INTERVAL
old_ssh_retries = Astute.config.ssh_retries
old_mc_retries = Astute.config.mc_retries
old_nodes_rm_interal = Astute.config.nodes_remove_interval
example.run
Astute.config.SSH_RETRIES = old_ssh_retries
Astute.config.MC_RETRIES = old_mc_retries
Astute.config.NODES_REMOVE_INTERVAL = old_nodes_rm_interal
Astute.config.ssh_retries = old_ssh_retries
Astute.config.mc_retries = old_mc_retries
Astute.config.nodes_remove_interval = old_nodes_rm_interal
end
before(:each) do
Astute.config.SSH_RETRIES = 1
Astute.config.MC_RETRIES = 1
Astute.config.NODES_REMOVE_INTERVAL = 0
Astute.config.ssh_retries = 1
Astute.config.mc_retries = 1
Astute.config.nodes_remove_interval = 0
end
it 'erase nodes using ssh' do
@ -666,7 +666,7 @@ describe Astute::Orchestrator do
Astute::NodesRemover.any_instance.stubs(:remove)
.once.returns({"nodes"=>[{"uid"=>"1", }]})
@orchestrator.expects(:sleep).with(Astute.config.NODES_REMOVE_INTERVAL)
@orchestrator.expects(:sleep).with(Astute.config.nodes_remove_interval)
@orchestrator.stop_provision(@reporter,
data['task_uuid'],
@ -675,8 +675,8 @@ describe Astute::Orchestrator do
end
it 'perform several attempts to find and erase nodes using mcollective' do
Astute.config.MC_RETRIES = 2
Astute.config.NODES_REMOVE_INTERVAL = 0
Astute.config.mc_retries = 2
Astute.config.nodes_remove_interval = 0
@orchestrator.stubs(:stop_provision_via_ssh)
.returns({'nodes' => [{'uid' => "1"}],

View File

@ -18,7 +18,7 @@ describe Astute::GenerateKeys do
include SpecHelpers
before(:each) do
Astute.config.PUPPET_KEYS = ['mongodb']
Astute.config.puppet_keys = ['mongodb']
end
let(:ctx) do
@ -31,11 +31,11 @@ describe Astute::GenerateKeys do
let(:generate_keys) { Astute::GenerateKeys.new }
around(:each) do |example|
old_keys_dir = Astute.config.PUPPET_KEYS_DIR
old_puppet_keys = Astute.config.PUPPET_KEYS
old_keys_dir = Astute.config.puppet_keys_dir
old_puppet_keys = Astute.config.puppet_keys
example.run
Astute.config.PUPPET_KEYS_DIR = old_keys_dir
Astute.config.PUPPET_KEYS = old_puppet_keys
Astute.config.puppet_keys_dir = old_keys_dir
Astute.config.puppet_keys = old_puppet_keys
end
it 'should raise error if deployment_id is not set' do
@ -48,7 +48,7 @@ describe Astute::GenerateKeys do
generate_keys.stubs(:run_system_command).returns([0, "", ""])
Dir.mktmpdir do |temp_dir|
Astute.config.PUPPET_KEYS_DIR = temp_dir
Astute.config.puppet_keys_dir = temp_dir
generate_keys.process(deploy_data, ctx)
expect { File.directory? File.join(temp_dir, 'mongodb.key') }.to be_true
@ -86,7 +86,7 @@ describe Astute::GenerateKeys do
File.stubs(:directory?).returns(true)
key_path = File.join(
Astute.config.PUPPET_KEYS_DIR,
Astute.config.puppet_keys_dir,
deploy_data.first['deployment_id'].to_s,
'mongodb',
'mongodb.key'
@ -99,7 +99,7 @@ describe Astute::GenerateKeys do
it 'should not overwrite files' do
Dir.mktmpdir do |temp_dir|
Astute.config.PUPPET_KEYS_DIR = temp_dir
Astute.config.puppet_keys_dir = temp_dir
key_path = File.join(temp_dir,'mongodb', 'mongodb.key')
FileUtils.mkdir_p File.join(temp_dir, 'mongodb')
File.open(key_path, 'w') { |file| file.write("say no overwrite") }
@ -111,15 +111,15 @@ describe Astute::GenerateKeys do
end
it 'should check next key if find existing' do
Astute.config.PUPPET_KEYS = ['mongodb', 'test']
Astute.config.puppet_keys = ['mongodb', 'test']
mongodb_key_path = File.join(
Astute.config.PUPPET_KEYS_DIR,
Astute.config.puppet_keys_dir,
deploy_data.first['deployment_id'].to_s,
'mongodb',
'mongodb.key'
)
test_key_path = File.join(
Astute.config.PUPPET_KEYS_DIR,
Astute.config.puppet_keys_dir,
deploy_data.first['deployment_id'].to_s,
'test',
'test.key'

View File

@ -18,15 +18,15 @@ describe Astute::GenerateSshKeys do
include SpecHelpers
around(:each) do |example|
old_puppet_ssh_keys = Astute.config.PUPPET_KEYS
old_ssh_keys_dir = Astute.config.PUPPET_SSH_KEYS_DIR
old_puppet_ssh_keys = Astute.config.puppet_keys
old_ssh_keys_dir = Astute.config.puppet_ssh_keys_dir
example.run
Astute.config.PUPPET_SSH_KEYS_DIR = old_ssh_keys_dir
Astute.config.PUPPET_KEYS = old_puppet_ssh_keys
Astute.config.puppet_ssh_keys_dir = old_ssh_keys_dir
Astute.config.puppet_keys = old_puppet_ssh_keys
end
before(:each) do
Astute.config.PUPPET_SSH_KEYS = ['nova']
Astute.config.puppet_ssh_keys = ['nova']
end
let(:ctx) do
@ -48,7 +48,7 @@ describe Astute::GenerateSshKeys do
generate_ssh_keys.stubs(:run_system_command).returns([0, "", ""])
Dir.mktmpdir do |temp_dir|
Astute.config.PUPPET_SSH_KEYS_DIR = temp_dir
Astute.config.puppet_ssh_keys_dir = temp_dir
generate_ssh_keys.process(deploy_data, ctx)
expect { File.directory? File.join(temp_dir, 'nova') }.to be_true
@ -85,7 +85,7 @@ describe Astute::GenerateSshKeys do
FileUtils.stubs(:mkdir_p).returns(true)
File.stubs(:directory?).returns(true)
key_path = File.join(Astute.config.PUPPET_SSH_KEYS_DIR, deploy_data.first['deployment_id'].to_s, 'nova', 'nova')
key_path = File.join(Astute.config.puppet_ssh_keys_dir, deploy_data.first['deployment_id'].to_s, 'nova', 'nova')
cmd = "ssh-keygen -b 2048 -t rsa -N '' -f #{key_path} 2>&1"
generate_ssh_keys.expects(:run_system_command).with(cmd).returns([0, "", ""])
@ -94,7 +94,7 @@ describe Astute::GenerateSshKeys do
it 'should not overwrite files' do
Dir.mktmpdir do |temp_dir|
Astute.config.PUPPET_SSH_KEYS_DIR = temp_dir
Astute.config.puppet_ssh_keys_dir = temp_dir
key_path = File.join(temp_dir,'nova', 'nova')
FileUtils.mkdir_p File.join(temp_dir,'nova')
File.open(key_path, 'w') { |file| file.write("say no overwrite") }
@ -106,9 +106,9 @@ describe Astute::GenerateSshKeys do
end
it 'should check next key if find existing' do
Astute.config.PUPPET_SSH_KEYS = ['nova', 'test']
nova_key_path = File.join(Astute.config.PUPPET_SSH_KEYS_DIR, deploy_data.first['deployment_id'].to_s, 'nova', 'nova')
test_key_path = File.join(Astute.config.PUPPET_SSH_KEYS_DIR, deploy_data.first['deployment_id'].to_s, 'test', 'test')
Astute.config.puppet_ssh_keys = ['nova', 'test']
nova_key_path = File.join(Astute.config.puppet_ssh_keys_dir, deploy_data.first['deployment_id'].to_s, 'nova', 'nova')
test_key_path = File.join(Astute.config.puppet_ssh_keys_dir, deploy_data.first['deployment_id'].to_s, 'test', 'test')
FileUtils.stubs(:mkdir_p).returns(true).twice
File.stubs(:directory?).returns(true).twice

View File

@ -18,7 +18,7 @@ describe Astute::UpdateRepoSources do
include SpecHelpers
before(:each) do
Astute.config.PUPPET_SSH_KEYS = ['nova']
Astute.config.puppet_ssh_keys = ['nova']
end
let(:ctx) do
@ -52,9 +52,9 @@ describe Astute::UpdateRepoSources do
let(:update_repo_sources) { Astute::UpdateRepoSources.new }
around(:each) do |example|
old_ssh_keys_dir = Astute.config.PUPPET_SSH_KEYS_DIR
old_ssh_keys_dir = Astute.config.puppet_ssh_keys_dir
example.run
Astute.config.PUPPET_SSH_KEYS_DIR = old_ssh_keys_dir
Astute.config.puppet_ssh_keys_dir = old_ssh_keys_dir
end
context 'source configuration generation' do
@ -152,7 +152,7 @@ describe Astute::UpdateRepoSources do
it 'should raise error if metadata not updated' do
nodes.first['cobbler']['profile'] = 'ubuntu_1204_x86_64'
mclient.expects(:execute).with(:cmd => 'apt-get clean; apt-get update').returns(fail_return).times(Astute.config[:MC_RETRIES])
mclient.expects(:execute).with(:cmd => 'apt-get clean; apt-get update').returns(fail_return).times(Astute.config[:mc_retries])
expect { update_repo_sources.process(nodes, ctx) }.to raise_error(Astute::DeploymentEngineError,
/Run command:/)
end

View File

@ -18,13 +18,13 @@ describe Astute::UploadKeys do
include SpecHelpers
around(:each) do |example|
old_puppet_keys = Astute.config.PUPPET_KEYS
old_puppet_keys = Astute.config.puppet_keys
example.run
Astute.config.PUPPET_KEYS = old_puppet_keys
Astute.config.puppet_keys = old_puppet_keys
end
before(:each) do
Astute.config.PUPPET_KEYS = ['mongodb']
Astute.config.puppet_keys = ['mongodb']
end
let(:ctx) do
@ -45,7 +45,7 @@ describe Astute::UploadKeys do
File.stubs(:read).returns("private key").once
mclient.expects(:upload).with(
:path => File.join(
Astute.config.PUPPET_KEYS_DIR,
Astute.config.puppet_keys_dir,
'mongodb',
'mongodb.key'
),

View File

@ -18,13 +18,13 @@ describe Astute::UploadSshKeys do
include SpecHelpers
around(:each) do |example|
old_puppet_ssh_keys = Astute.config.PUPPET_SSH_KEYS
old_puppet_ssh_keys = Astute.config.puppet_ssh_keys
example.run
Astute.config.PUPPET_SSH_KEYS = old_puppet_ssh_keys
Astute.config.puppet_ssh_keys = old_puppet_ssh_keys
end
before(:each) do
Astute.config.PUPPET_SSH_KEYS = ['nova']
Astute.config.puppet_ssh_keys = ['nova']
end
let(:ctx) do
@ -43,7 +43,7 @@ describe Astute::UploadSshKeys do
Astute::MClient.any_instance.stubs(:check_results_with_retries).returns(mclient)
File.stubs(:read).returns("private key").then.returns("public key")
mclient.expects(:upload).with(:path => File.join(Astute.config.PUPPET_SSH_KEYS_DIR, 'nova', 'nova'),
mclient.expects(:upload).with(:path => File.join(Astute.config.puppet_ssh_keys_dir, 'nova', 'nova'),
:content => "private key",
:user_owner => 'root',
:group_owner => 'root',
@ -52,7 +52,7 @@ describe Astute::UploadSshKeys do
:overwrite => true,
:parents => true
)
mclient.expects(:upload).with(:path => File.join(Astute.config.PUPPET_SSH_KEYS_DIR, 'nova', 'nova.pub'),
mclient.expects(:upload).with(:path => File.join(Astute.config.puppet_ssh_keys_dir, 'nova', 'nova.pub'),
:content => "public key",
:user_owner => 'root',
:group_owner => 'root',

View File

@ -82,7 +82,7 @@ describe "PuppetdDeployer" do
.then.returns('ready')
.then.returns('ready')
PuppetdDeployer.expects(:sleep).with(Astute.config.PUPPET_DEPLOY_INTERVAL)
PuppetdDeployer.expects(:sleep).with(Astute.config.puppet_deploy_interval)
PuppetdDeployer.deploy(ctx, nodes)
end
end