Add retries for upload tasks: upload_file and upload_files

Some of mcollective client for some reason can ignore task
from Astute. For such cases Astute shoult retry it request.

Also:
- refactoring tasks to support class hook post_initialize
  instead of super
- change @task and @ctx to equal instance methods
- removed old fixtures

Change-Id: I96613f53303fd71acc437d2f8f47b599bcf3b5d9
This commit is contained in:
Vladimir Sharshov (warpc) 2017-02-16 18:00:21 +03:00
parent 003a0a0efd
commit c489e972ff
26 changed files with 154 additions and 539 deletions

View File

@ -61,6 +61,7 @@ module Astute
conf[:puppet_start_timeout] = 10 # how long it can take for puppet to start
conf[:puppet_start_interval] = 2 # interval between attemps to start puppet
conf[:puppet_retries] = 2 # how many times astute will try to run puppet
conf[:upload_retries] = 3 # how many times astute will try to run upload task
conf[:puppet_succeed_retries] = 0 # use this to rerun a puppet task again if it was successful (idempotency)
conf[:puppet_undefined_retries] = 3 # how many times astute will try to get actual status of node before fail
conf[:puppet_module_path] = '/etc/puppet/modules' # where we should find basic modules for puppet

View File

@ -22,13 +22,34 @@ module Astute
# Run upload without check using mcollective agent
# @param [Hash] mco_params Upload file options
# @param [Integer] timeout Timeout for upload command
# @return [true, false] upload result
def upload_without_check(mco_params)
upload_mclient = upload_mclient(
:check_result => false,
:timeout => mco_params['timeout']
)
upload(mco_params, upload_mclient)
end
# Run upload with check using mcollective agent
# @param [Hash] mco_params Upload file options
# @return [true, false] upload result
def upload_with_check(mco_params)
upload_mclient = upload_mclient(
:check_result => true,
:timeout => mco_params['timeout'],
:retries => mco_params['retries']
)
upload(mco_params, upload_mclient)
end
private
def upload(mco_params, magent)
mco_params = setup_default(mco_params)
results = upload_file(_check_result=false, mco_params['timeout'])
.upload(
results = magent.upload(
:path => mco_params['path'],
:content => mco_params['content'],
:overwrite => mco_params['overwrite'],
@ -55,17 +76,16 @@ module Astute
false
end
private
# Create configured shell mcollective agent
# @return [Astute::MClient]
def upload_file(check_result=false, timeout=2)
def upload_mclient(args={})
MClient.new(
@ctx,
"uploadfile",
[@node_id],
check_result,
timeout
args.fetch(:check_result, false),
args.fetch(:timeout, 2),
args.fetch(:retries, Astute.config.upload_retries)
)
end
@ -73,6 +93,7 @@ module Astute
# @param [Hash] mco_params Upload file options
# @return [Hash] mco_params
def setup_default(mco_params)
mco_params['retries'] ||= Astute.config.upload_retries
mco_params['timeout'] ||= Astute.config.upload_timeout
mco_params['overwrite'] = true if mco_params['overwrite'].nil?
mco_params['parents'] = true if mco_params['parents'].nil?

View File

@ -16,7 +16,7 @@ module Astute
class Task
ALLOWED_STATUSES = [:successful, :failed, :running, :pending, :skipped]
attr_reader :task
attr_reader :task, :ctx
def initialize(task, context)
# WARNING: this code expect that only one node will be send
# on one hook.
@ -24,6 +24,7 @@ module Astute
@status = :pending
@ctx = context
@time_start = Time.now.to_i
post_initialize(task, context)
end
# Run current task on node, specified in task
@ -98,6 +99,10 @@ module Astute
@status == :failed
end
def post_initialize(task, context)
nil
end
private
# Run current task on node, specified in task
@ -149,6 +154,13 @@ module Astute
UploadFileMClient.new(@ctx, node_uid).upload_without_check(mco_params)
end
# Create file with content on selected node
# should use only for small file
# Synchronous (blocking) call
def upload_file_with_check(node_uid, mco_params)
UploadFileMClient.new(@ctx, node_uid).upload_with_check(mco_params)
end
def failed!
self.status = :failed
time_summary

View File

@ -15,8 +15,7 @@
module Astute
class CobblerSync < Task
def initialize(task, context)
super
def post_initialize(task, context)
@work_thread = nil
end
@ -24,8 +23,8 @@ module Astute
def process
cobbler = CobblerManager.new(
@task['parameters']['provisioning_info']['engine'],
@ctx.reporter
task['parameters']['provisioning_info']['engine'],
ctx.reporter
)
@work_thread = Thread.new { cobbler.sync }
end
@ -35,7 +34,7 @@ module Astute
end
def validation
validate_presence(@task['parameters'], 'provisioning_info')
validate_presence(task['parameters'], 'provisioning_info')
end
end

View File

@ -15,10 +15,9 @@
module Astute
class CopyFiles < Task
def initialize(task, context)
super
def post_initialize(task, context)
@work_thread = nil
@files_status = @task['parameters']['files'].inject({}) do |f_s, n|
@files_status = task['parameters']['files'].inject({}) do |f_s, n|
f_s.merge({ n['src']+n['dst'] => :pending })
end
end
@ -26,16 +25,16 @@ module Astute
private
def process
@task['parameters']['files'].each do |file|
task['parameters']['files'].each do |file|
if File.file?(file['src']) && File.readable?(file['src'])
parameters = {
'content' => File.binread(file['src']),
'path' => file['dst'],
'permissions' => file['permissions'] || @task['parameters']['permissions'],
'dir_permissions' => file['dir_permissions'] || @task['parameters']['dir_permissions'],
'permissions' => file['permissions'] || task['parameters']['permissions'],
'dir_permissions' => file['dir_permissions'] || task['parameters']['dir_permissions'],
}
@files_status[file['src']+file['dst']] =
upload_file(@task['node_id'], parameters)
upload_file(task['node_id'], parameters)
else
@files_status[file['src']+file['dst']] = false
end
@ -51,8 +50,8 @@ module Astute
end
def validation
validate_presence(@task, 'node_id')
validate_presence(@task['parameters'], 'files')
validate_presence(task, 'node_id')
validate_presence(task['parameters'], 'files')
end
end

View File

@ -16,7 +16,7 @@ module Astute
class EraseNode < Task
def summary
{'task_summary' => "Node #{@task['node_id']} was erased without reboot"\
{'task_summary' => "Node #{task['node_id']} was erased without reboot"\
" with result #{@status}"}
end
@ -31,20 +31,20 @@ module Astute
end
def validation
validate_presence(@task, 'node_id')
validate_presence(task, 'node_id')
end
def erase_node
remover = MClient.new(
@ctx,
ctx,
"erase_node",
Array(@task['node_id']),
Array(task['node_id']),
_check_result=false)
response = remover.erase_node(:reboot => false)
Astute.logger.debug "#{@ctx.task_id}: Data received from node "\
"#{@task['node_id']} :\n#{response.pretty_inspect}"
Astute.logger.debug "#{ctx.task_id}: Data received from node "\
"#{task['node_id']} :\n#{response.pretty_inspect}"
rescue Astute::MClientTimeout, Astute::MClientError => e
Astute.logger.error("#{@ctx.task_id}: #{task_name} mcollective " \
Astute.logger.error("#{ctx.task_id}: #{task_name} mcollective " \
"erase node command failed with error #{e.message}")
failed!
end

View File

@ -18,8 +18,7 @@ module Astute
# Accept to run shell tasks using existing shell asynchronous
# mechanism. It will run task on master node.
def initialize(task, context)
super
def post_initialize(task, context)
@shell_task = nil
end
@ -34,7 +33,7 @@ module Astute
def process
@shell_task = Shell.new(
generate_master_shell,
@ctx
ctx
)
@shell_task.run
end
@ -44,18 +43,18 @@ module Astute
end
def validation
validate_presence(@task['parameters'], 'cmd')
validate_presence(task['parameters'], 'cmd')
end
def setup_default
@task['parameters']['timeout'] ||= Astute.config.shell_timeout
@task['parameters']['cwd'] ||= Astute.config.shell_cwd
@task['parameters']['retries'] ||= Astute.config.shell_retries
@task['parameters']['interval'] ||= Astute.config.shell_interval
task['parameters']['timeout'] ||= Astute.config.shell_timeout
task['parameters']['cwd'] ||= Astute.config.shell_cwd
task['parameters']['retries'] ||= Astute.config.shell_retries
task['parameters']['interval'] ||= Astute.config.shell_interval
end
def generate_master_shell
@task.merge('node_id' => 'master')
task.merge('node_id' => 'master')
end
end
end

View File

@ -15,13 +15,12 @@
module Astute
class MoveToBootstrap < Task
def initialize(task, context)
super
def post_initialize(task, context)
@work_thread = nil
end
def summary
{'task_summary' => "Node #{@task['node_id']} was move to bootstrap with"\
{'task_summary' => "Node #{task['node_id']} was move to bootstrap with"\
" result #{@status}"}
end
@ -29,39 +28,39 @@ module Astute
def process
cobbler = CobblerManager.new(
@task['parameters']['provisioning_info']['engine'],
@ctx.reporter
task['parameters']['provisioning_info']['engine'],
ctx.reporter
)
@work_thread = Thread.new do
is_exist = cobbler.existent_node?(@task['parameters']['provisioning_info']['slave_name'])
is_exist = cobbler.existent_node?(task['parameters']['provisioning_info']['slave_name'])
# Change node type to prevent wrong node detection as provisioned
# Also this type if node will not rebooted, Astute will be allowed
# to try to reboot such nodes again
change_nodes_type('reprovisioned') if is_exist
bootstrap_profile = @task['parameters']['provisioning_info']['profile'] ||
bootstrap_profile = task['parameters']['provisioning_info']['profile'] ||
Astute.config.bootstrap_profile
cobbler.edit_node(@task['parameters']['provisioning_info']['slave_name'],
cobbler.edit_node(task['parameters']['provisioning_info']['slave_name'],
{'profile' => bootstrap_profile})
cobbler.netboot_node(@task['parameters']['provisioning_info']['slave_name'],
cobbler.netboot_node(task['parameters']['provisioning_info']['slave_name'],
true)
Reboot.new({'node_id' => @task['node_id']}, @ctx).sync_run if is_exist
Reboot.new({'node_id' => task['node_id']}, ctx).sync_run if is_exist
Rsyslogd.send_sighup(
@ctx,
@task['parameters']['provisioning_info']['engine']['master_ip']
ctx,
task['parameters']['provisioning_info']['engine']['master_ip']
)
cobbler.remove_node(@task['parameters']['provisioning_info']['slave_name'])
cobbler.remove_node(task['parameters']['provisioning_info']['slave_name'])
# NOTE(kozhukalov): We try to find out if there are systems
# in the Cobbler with the same MAC addresses. If so, Cobbler is going
# to throw MAC address duplication error. We need to remove these
# nodes.
mac_duplicate_names = cobbler.node_mac_duplicate_names(@task['parameters']['provisioning_info'])
mac_duplicate_names = cobbler.node_mac_duplicate_names(task['parameters']['provisioning_info'])
cobbler.remove_nodes(mac_duplicate_names.map {|n| {'slave_name' => n}})
cobbler.add_node(@task['parameters']['provisioning_info'])
cobbler.add_node(task['parameters']['provisioning_info'])
end
end
@ -70,18 +69,18 @@ module Astute
end
def validation
validate_presence(@task['parameters'], 'provisioning_info')
validate_presence(@task, 'node_id')
validate_presence(task['parameters'], 'provisioning_info')
validate_presence(task, 'node_id')
end
def change_nodes_type(type="image")
run_shell_without_check(
@task['node_id'],
task['node_id'],
"echo '#{type}' > /etc/nailgun_systemtype",
_timeout=5
)[:stdout]
rescue Astute::MClientTimeout, Astute::MClientError => e
Astute.logger.debug("#{@ctx.task_id}: #{task_name} mcollective " \
Astute.logger.debug("#{ctx.task_id}: #{task_name} mcollective " \
"change type command failed with error #{e.message}")
nil
end

View File

@ -17,7 +17,7 @@ module Astute
class NoopEraseNode < Noop
def summary
{'task_summary' => "Node #{@task['node_id']} was erased without reboot (noop mode)"}
{'task_summary' => "Node #{task['node_id']} was erased without reboot (noop mode)"}
end
end

View File

@ -22,7 +22,7 @@ module Astute
def process
@shell_task = NoopShell.new(
generate_master_shell,
@ctx
ctx
)
@shell_task.run
end

View File

@ -17,7 +17,7 @@ module Astute
class NoopMoveToBootstrap < Noop
def summary
{'task_summary' => "Node #{@task['node_id']} was move to bootstrap (noop mode)"}
{'task_summary' => "Node #{task['node_id']} was move to bootstrap (noop mode)"}
end
end

View File

@ -21,8 +21,8 @@ module Astute
def setup_default
super
@task['parameters']['puppet_noop_run'] = true
@task['parameters']['raw_report'] = true
task['parameters']['puppet_noop_run'] = true
task['parameters']['raw_report'] = true
end
end

View File

@ -17,7 +17,7 @@ module Astute
class NoopReboot < Noop
def summary
{'task_summary' => "Node #{@task['node_id']} was rebooted (noop mode)"}
{'task_summary' => "Node #{task['node_id']} was rebooted (noop mode)"}
end
end

View File

@ -20,14 +20,14 @@ module Astute
def process
run_shell_without_check(
@task['node_id'],
task['node_id'],
"mkdir -p #{SHELL_MANIFEST_DIR}",
timeout=2
)
upload_shell_manifest
@puppet_task = NoopPuppet.new(
generate_puppet_hook,
@ctx
ctx
)
@puppet_task.run
end

View File

@ -62,7 +62,7 @@ module Astute
@puppet_task ||= PuppetJob.new(
task_name,
PuppetMClient.new(
@ctx,
ctx,
task['node_id'],
task['parameters'],
),

View File

@ -15,15 +15,14 @@
module Astute
class Reboot < Task
def initialize(task, context)
super
def post_initialize(task, context)
@control_time = nil
@time_start = nil
@already_rebooted = false
end
def summary
{'task_summary' => "Node #{@task['node_id']} was rebooted with "\
{'task_summary' => "Node #{task['node_id']} was rebooted with "\
"result: #{@status}"}
end
@ -34,7 +33,7 @@ module Astute
@time_start = Time.now.to_i
if @control_time == 0
failed!
Astute.logger.warn("#{@ctx.task_id}: #{task_name} failed because" \
Astute.logger.warn("#{ctx.task_id}: #{task_name} failed because" \
"task could not get valid info about boot time")
return
end
@ -42,10 +41,10 @@ module Astute
end
def calculate_status
if Time.now.to_i - @time_start > @task['parameters']['timeout']
if Time.now.to_i - @time_start > task['parameters']['timeout']
failed!
Astute.logger.warn("#{@ctx.task_id}: #{task_name} failed because" \
"reboot timeout #{@task['parameters']['timeout']} expired")
Astute.logger.warn("#{ctx.task_id}: #{task_name} failed because" \
"reboot timeout #{task['parameters']['timeout']} expired")
return
end
@ -57,40 +56,40 @@ module Astute
end
def validation
validate_presence(@task, 'node_id')
validate_presence(task, 'node_id')
end
def setup_default
@task.fetch('parameters', {})['timeout'] ||= Astute.config.reboot_timeout
task.fetch('parameters', {})['timeout'] ||= Astute.config.reboot_timeout
end
def reboot
run_shell_without_check(
@task['node_id'],
task['node_id'],
RebootCommand::CMD,
_timeout=2
)
rescue Astute::MClientTimeout, Astute::MClientError => e
Astute.logger.error("#{@ctx.task_id}: #{task_name} mcollective " \
Astute.logger.error("#{ctx.task_id}: #{task_name} mcollective " \
"reboot command failed with error #{e.message}")
failed!
end
def boot_time
run_shell_without_check(
@task['node_id'],
task['node_id'],
"stat --printf='%Y' /proc/1",
_timeout=2
)[:stdout].to_i
rescue Astute::MClientTimeout, Astute::MClientError => e
Astute.logger.debug("#{@ctx.task_id}: #{task_name} mcollective " \
Astute.logger.debug("#{ctx.task_id}: #{task_name} mcollective " \
"boot time command failed with error #{e.message}")
0
end
def update_online_node_status
run_shell_without_check(
@task['node_id'],
task['node_id'],
"flock -w 0 -o /var/lock/nailgun-agent.lock -c '/usr/bin/nailgun-agent"\
" 2>&1 | tee -a /var/log/nailgun-agent.log | "\
"/usr/bin/logger -t nailgun-agent'",

View File

@ -20,8 +20,7 @@ module Astute
# mechanism. It create and upload 2 files: shell script and
# puppet manifest. Then run puppet manifest
def initialize(task, context)
super
def post_initialize(task, context)
@puppet_task = nil
end
@ -32,7 +31,7 @@ module Astute
end
def node_id
@task['node_id']
task['node_id']
end
private
@ -41,14 +40,14 @@ module Astute
def process
run_shell_without_check(
@task['node_id'],
task['node_id'],
"mkdir -p #{SHELL_MANIFEST_DIR}",
_timeout=2
)
upload_shell_manifest
@puppet_task = Puppet.new(
generate_puppet_hook,
@ctx
ctx
)
@puppet_task.run
end
@ -58,15 +57,15 @@ module Astute
end
def validation
validate_presence(@task, 'node_id')
validate_presence(@task['parameters'], 'cmd')
validate_presence(task, 'node_id')
validate_presence(task['parameters'], 'cmd')
end
def setup_default
@task['parameters']['timeout'] ||= Astute.config.shell_timeout
@task['parameters']['cwd'] ||= Astute.config.shell_cwd
@task['parameters']['retries'] ||= Astute.config.shell_retries
@task['parameters']['interval'] ||= Astute.config.shell_interval
task['parameters']['timeout'] ||= Astute.config.shell_timeout
task['parameters']['cwd'] ||= Astute.config.shell_cwd
task['parameters']['retries'] ||= Astute.config.shell_retries
task['parameters']['interval'] ||= Astute.config.shell_interval
end
def puppet_exec_template
@ -85,8 +84,8 @@ module Astute
end
def shell_exec_template
command = "cd #{@task['parameters']['cwd']} &&" \
" #{@task['parameters']['cmd']}"
command = "cd #{task['parameters']['cwd']} &&" \
" #{task['parameters']['cmd']}"
template = <<-eos
#!/bin/bash
# Puppet shell wrapper for task: <%= task_name %>
@ -106,7 +105,7 @@ module Astute
end
def upload_puppet_manifest
upload_file(@task['node_id'], {
upload_file(task['node_id'], {
'path' => puppet_exec_file_path,
'content' => puppet_exec_template,
'permissions' => '0755'
@ -114,7 +113,7 @@ module Astute
end
def upload_shell_file
upload_file(@task['node_id'], {
upload_file(task['node_id'], {
'path' => shell_exec_file_path,
'content' => shell_exec_template,
'permissions' => '0755'
@ -127,7 +126,7 @@ module Astute
end
def timeout
@task['parameters']['timeout']
task['parameters']['timeout']
end
def manifest_name
@ -136,13 +135,13 @@ module Astute
def generate_puppet_hook
{
'node_id' => @task['node_id'],
'id' => @task['id'],
'node_id' => task['node_id'],
'id' => task['id'],
'parameters' => {
"puppet_manifest" => manifest_name,
"cwd" => SHELL_MANIFEST_DIR,
"timeout" => @task['parameters']['timeout'],
"retries" => @task['parameters']['retries']
"timeout" => task['parameters']['timeout'],
"retries" => task['parameters']['retries']
}
}
end

View File

@ -20,7 +20,7 @@ module Astute
def process
@shell_task = Shell.new(
generate_shell_hook,
@ctx
ctx
)
@shell_task.run
end
@ -30,28 +30,28 @@ module Astute
end
def validation
validate_presence(@task, 'node_id')
validate_presence(@task['parameters'], 'dst')
validate_presence(@task['parameters'], 'src')
validate_presence(task, 'node_id')
validate_presence(task['parameters'], 'dst')
validate_presence(task['parameters'], 'src')
end
def setup_default
@task['parameters']['timeout'] ||= 300
@task['parameters']['retries'] ||= 10
task['parameters']['timeout'] ||= 300
task['parameters']['retries'] ||= 10
end
def generate_shell_hook
path = @task['parameters']['dst']
path = task['parameters']['dst']
rsync_cmd = "mkdir -p #{path} && rsync #{Astute.config.rsync_options}" \
" #{@task['parameters']['src']} #{path}"
" #{task['parameters']['src']} #{path}"
{
"node_id" => @task['node_id'],
"id" => @task['id'],
"node_id" => task['node_id'],
"id" => task['id'],
"parameters" => {
"cmd" => rsync_cmd,
"cwd" => "/",
"timeout" => @task['parameters']['timeout'],
"retries" => @task['parameters']['retries']
"timeout" => task['parameters']['timeout'],
"retries" => task['parameters']['retries']
}
}
end

View File

@ -15,15 +15,14 @@
module Astute
class UploadFile < Task
def initialize(task, context)
super
def post_initialize(task, context)
@upload_status = :pending
end
private
def process
@upload_status = upload_file(@task['node_id'], @task['parameters'])
@upload_status = upload_file_with_check(task['node_id'], task['parameters'])
end
def calculate_status
@ -34,14 +33,15 @@ module Astute
end
def validation
validate_presence(@task, 'node_id')
validate_presence(@task['parameters'], 'path')
validate_presence(@task['parameters'], 'data')
validate_presence(task, 'node_id')
validate_presence(task['parameters'], 'path')
validate_presence(task['parameters'], 'data')
end
def setup_default
@task['parameters']['content'] = @task['parameters']['data']
@task['parameters']['timeout'] ||= Astute.config.upload_timeout
task['parameters']['content'] = task['parameters']['data']
task['parameters']['timeout'] ||= Astute.config.upload_timeout
task['parameters']['retries'] ||= Astute.config.upload_retries
end
end

View File

@ -15,9 +15,8 @@
module Astute
class UploadFiles < Task
def initialize(task, context)
super
@nodes_status = @task['parameters']['nodes'].inject({}) do |n_s, n|
def post_initialize(task, context)
@nodes_status = task['parameters']['nodes'].inject({}) do |n_s, n|
n_s.merge({ n['uid'] => :pending })
end
end
@ -25,7 +24,7 @@ module Astute
private
def process
hook['parameters']['nodes'].each do |node|
task['parameters']['nodes'].each do |node|
node['files'].each do |file|
parameters = {
'content' => file['data'],
@ -34,7 +33,7 @@ module Astute
'dir_permissions' => file['dir_permissions'] || '0755',
}
if @nodes_status[node['uid']]
@nodes_status[node['uid']] = upload_file(node['uid'], parameters)
@nodes_status[node['uid']] = upload_file_with_check(node['uid'], parameters)
end
end
end
@ -49,7 +48,7 @@ module Astute
end
def validation
validate_presence(@task['parameters'], 'nodes')
validate_presence(task['parameters'], 'nodes')
end
end

View File

@ -33,8 +33,6 @@ require 'fuel_deployment'
Deployment::Log.logger.level = Logger::DEBUG
Dir[File.join(File.dirname(__FILE__), 'unit/fixtures/*.rb')].each { |file| require file }
# NOTE(mihgen): I hate to wait for unit tests to complete,
# resetting time to sleep significantly increases tests speed
Astute.config.puppet_deploy_interval = 0
@ -93,9 +91,4 @@ module SpecHelpers
ctx
end
# Transform fixtures from nailgun format node['roles'] array to node['role'] string
def nodes_with_role(data, role)
data.select { |n| n['role'] == role }
end
end

View File

@ -1,59 +0,0 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
module Fixtures
def self.common_attrs(deployment_mode, nodes)
nodes.each do |node|
node.merge!(
"deployment_id" => 1,
"puppet_debug" => true,
"storage_network_range" => "172.16.0.0/24",
"auto_assign_floating_ip" => false,
"mysql" => {
"root_password" => "Z2EqsZo5"
},
"keystone" => {
"admin_token" => "5qKy0i63",
"db_password" => "HHQ86Rym",
"admin_tenant" => "admin"
},
"nova" => {
"user_password" => "h8RY8SE7",
"db_password" => "Xl9I51Cb"
},
"glance" => {
"user_password" => "nDlUxuJq",
"db_password" => "V050pQAn"
},
"rabbit" => {
"user" => "nova",
"password" => "FLF3txKC"
},
"management_network_range" => "192.168.0.0/24",
"public_network_range" => "240.0.1.0/24",
"fixed_network_range" => "10.0.0.0/24",
"floating_network_range" => "240.0.0.0/24",
"task_uuid" => "19d99029-350a-4c9c-819c-1f294cf9e741",
"deployment_mode" => deployment_mode,
"controller_nodes" => controller_nodes(nodes)
)
end
end
def self.controller_nodes(nodes)
controller_nodes = nodes.select{ |n| n['role'] == 'controller' }.map { |e| deep_copy e }
end
end

View File

@ -1,179 +0,0 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
module Fixtures
def self.common_nodes
[
{
"mac" => "52:54:00:0E:B8:F5",
"status" => "provisioning",
"uid" => "1",
"error_type" => nil,
"fqdn" => "controller-1.mirantis.com",
"role" => "controller",
"priority" => 10,
"network_data" => [
{
"gateway" => "192.168.0.1",
"name" => "management",
"dev" => "eth0",
"brd" => "192.168.0.255",
"netmask" => "255.255.255.0",
"vlan" => 102,
"ip" => "192.168.0.2/24"
}, {
"gateway" => "240.0.1.1",
"name" => "public",
"dev" => "eth0",
"brd" => "240.0.1.255",
"netmask" => "255.255.255.0",
"vlan" => 101,
"ip" => "240.0.1.2/24"
}, {
"name" => "floating",
"dev" => "eth0",
"vlan" => 120
}, {
"name" => "fixed",
"dev" => "eth0",
"vlan" => 103
}, {
"name" => "storage",
"dev" => "eth0",
"vlan" => 104,
"ip" => "172.16.1.2/24",
"netmask" => "255.255.255.0",
"brd" => "172.16.1.255"
}
],
"id" => 1,
"ip" => "10.20.0.200",
'meta' => meta
}, {
"mac" => "52:54:00:50:91:DD",
"status" => "provisioning",
"uid" => 2,
"error_type" => nil,
"fqdn" => "compute-2.mirantis.com",
"role" => "compute",
"priority" => 100,
"network_data" => [
{
"gateway" => "192.168.0.1",
"name" => "management",
"dev" => "eth0",
"brd" => "192.168.0.255",
"netmask" => "255.255.255.0",
"vlan" => 102,
"ip" => "192.168.0.3/24"
},
{
"gateway" => "240.0.1.1",
"name" => "public",
"dev" => "eth0",
"brd" => "240.0.1.255",
"netmask" => "255.255.255.0",
"vlan" => 101,
"ip" => "240.0.1.3/24"
},
{
"name" => "floating",
"dev" => "eth0",
"vlan" => 120
},
{
"name" => "fixed",
"dev" => "eth0",
"vlan" => 103
},
{
"name" => "storage",
"dev" => "eth0",
"vlan" => 104,
"ip" => "172.16.1.3/24",
"netmask" => "255.255.255.0",
"brd" => "172.16.1.255"
}
],
"id" => 2,
"ip" => "10.20.0.221",
'meta' => meta
}, {
"mac" => "52:54:00:C3:2C:28",
"status" => "provisioning",
"uid" => 3,
"error_type" => nil,
"fqdn" => "compute-3.mirantis.com",
"role" => "compute",
"priority" => 100,
"network_data" => [
{
"gateway" => "192.168.0.1",
"name" => "management",
"dev" => "eth0",
"brd" => "192.168.0.255",
"netmask" => "255.255.255.0",
"vlan" => 102,
"ip" => "192.168.0.4/24"
},
{
"gateway" => "240.0.1.1",
"name" => "public",
"dev" => "eth0",
"brd" => "240.0.1.255",
"netmask" => "255.255.255.0",
"vlan" => 101,
"ip" => "240.0.1.4/24"
},
{
"name" => "floating",
"dev" => "eth0",
"vlan" => 120
},
{
"name" => "fixed",
"dev" => "eth0",
"vlan" => 103
},
{
"name" => "storage",
"dev" => "eth0",
"vlan" => 104,
"ip" => "172.16.1.4/24",
"netmask" => "255.255.255.0",
"brd" => "172.16.1.255"
}
],
"id" => 3,
"ip" => "10.20.0.68",
'meta' => meta
}
]
end
def self.meta
{
'interfaces' => [
{
'name' => 'eth1',
}, {
'name' => 'eth0',
}
]
}
end
end

View File

@ -1,24 +0,0 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
module Fixtures
def self.ha_deploy
deploy_info = Fixtures.common_attrs('ha', Fixtures.ha_nodes)
deploy_info.each do |node|
node.merge(
'management_vip' => "192.168.0.111"
)
end
end
end

View File

@ -1,123 +0,0 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
module Fixtures
def self.ha_nodes
common_nodes + [
{
"mac" => "52:54:00:0E:88:88",
"status" => "provisioned",
"uid" => "4",
"error_type" => nil,
"fqdn" => "controller-4.mirantis.com",
"role" => "primary-controller",
"priority" => 0,
"network_data" => [
{
"gateway" => "192.168.0.1",
"name" => "management",
"dev" => "eth0",
"brd" => "192.168.0.255",
"netmask" => "255.255.255.0",
"vlan" => 102,
"ip" => "192.168.0.5/24"
},
{
"gateway" => "240.0.1.1",
"name" => "public",
"dev" => "eth0",
"brd" => "240.0.1.255",
"netmask" => "255.255.255.0",
"vlan" => 101,
"ip" => "240.0.1.5/24"
},
{
"name" => "floating",
"dev" => "eth0",
"vlan" => 120
},
{
"name" => "fixed",
"dev" => "eth0",
"vlan" => 103
},
{
"name" => "storage",
"dev" => "eth0",
"vlan" => 104,
"ip" => "172.16.1.5/24",
"netmask" => "255.255.255.0",
"brd" => "172.16.1.255"
}
],
"id" => 4,
"ip" => "10.20.0.205",
'meta' => meta
},
{
"mac" => "52:54:00:0E:99:99",
"status" => "provisioned",
"uid" => "5",
"error_type" => nil,
"fqdn" => "controller-5.mirantis.com",
"role" => "controller",
"priority" => 50,
"network_data" => [
{
"gateway" => "192.168.0.1",
"name" => "management",
"dev" => "eth0",
"brd" => "192.168.0.255",
"netmask" => "255.255.255.0",
"vlan" => 102,
"ip" => "192.168.0.6/24"
},
{
"gateway" => "240.0.1.1",
"name" => "public",
"dev" => "eth0",
"brd" => "240.0.1.255",
"netmask" => "255.255.255.0",
"vlan" => 101,
"ip" => "240.0.1.6/24"
},
{
"name" => "floating",
"dev" => "eth0",
"vlan" => 120
},
{
"name" => "fixed",
"dev" => "eth0",
"vlan" => 103
},
{
"name" => "storage",
"dev" => "eth0",
"vlan" => 104,
"ip" => "172.16.1.6/24",
"netmask" => "255.255.255.0",
"brd" => "172.16.1.255"
}
],
"id" => 5,
"ip" => "10.20.0.206",
'meta' => meta
}
]
end
end

View File

@ -1,20 +0,0 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
module Fixtures
def self.multi_deploy
Fixtures.common_attrs('multinode', Fixtures.common_nodes)
end
end