Merge pull request #61 from warpc/deduplication_yaml

Deduplication YAML and validation
This commit is contained in:
Vladimir Sharshov 2013-09-09 03:02:04 -07:00
commit 0e9015a5d7
28 changed files with 1345 additions and 748 deletions

View File

@ -13,6 +13,8 @@ Gem::Specification.new do |s|
s.add_dependency 'activesupport', '3.0.10'
s.add_dependency 'mcollective-client', '2.3.1'
s.add_dependency 'symboltable', '1.0.2'
s.add_dependency 'rest-client', '~> 1.6.7'
s.add_dependency 'kwalify', '~> 0.7.2'
s.add_development_dependency 'rake', '10.0.4'
s.add_development_dependency 'rspec', '2.13.0'
@ -20,7 +22,7 @@ Gem::Specification.new do |s|
s.add_development_dependency 'simplecov', '~> 0.7.1'
s.add_development_dependency 'simplecov-rcov', '~> 0.2.3'
s.files = Dir.glob("{bin,lib,spec}/**/*")
s.files = Dir.glob("{bin,lib,spec,examples}/**/*")
s.executables = ['astute']
s.require_path = 'lib'
end

View File

@ -23,6 +23,7 @@ require 'optparse'
require 'yaml'
require 'astute'
require 'astute/version'
require 'astute/cli/enviroment'
class ConsoleReporter
def report(msg)
@ -30,6 +31,19 @@ class ConsoleReporter
end
end
def report_and_exit(exception, verbose)
$stderr.puts "Error: #{exception.inspect}"
unless verbose
puts "Hint: use astute with --verbose or check log (#{Astute::LOG_PATH}) for more details"
end
Astute.logger.error exception.format_backtrace
exit Astute::FAIL
end
def analize_deploy(status)
status.has_value?('error') ? Astute::FAIL : Astute::SUCCESS
end
opts = {}
optparse = OptionParser.new do |o|
o.banner = "Usage: bin/astute -c COMMAND -f FILENAME "
@ -63,7 +77,11 @@ end
reporter = ConsoleReporter.new
Astute.logger = Logger.new(STDOUT) if opts[:verbose]
environment = YAML.load_file(opts[:filename])
begin
environment = Astute::Cli::Enviroment.new(opts[:filename], opts[:command])
rescue Errno::ENOENT, Psych::SyntaxError, Astute::Cli::Enviroment::ValidationError => e
report_and_exit(e, opts[:verbose])
end
deploy_engine = nil
@ -71,13 +89,11 @@ if environment['attributes'] && environment['attributes']['deployment_engine']
case environment['attributes']['deployment_engine']
when 'nailyfact'
deploy_engine = Astute::DeploymentEngine::NailyFact
when 'simplepuppet'
deploy_engine = Astute::DeploymentEngine::SimplePuppet # It just calls puppet and doesn't do any magic
end
end
if [:deploy, :provision, :provision_and_deploy].include? opts[:command]
orchestrator = Astute::Orchestrator.new(deploy_engine, log_parsing=false)
orchestrator = Astute::Orchestrator.new(deploy_engine, log_parsing=true)
end
def console_provision(orchestrator, reporter, environment)
@ -85,6 +101,7 @@ def console_provision(orchestrator, reporter, environment)
if res == Astute::SUCCESS
puts "restarting nodes..."
sleep 5
puts "start watching progress"
res = orchestrator.provision(reporter, environment['task_uuid'], environment['nodes'])
end
res
@ -95,20 +112,17 @@ result = Astute::SUCCESS
begin
result = case opts[:command]
when :deploy
orchestrator.deploy(reporter, environment['task_uuid'], environment['nodes'], environment['attributes'])
analize_deploy orchestrator.deploy(reporter, environment['task_uuid'], environment['nodes'], environment['attributes'])
when :provision
console_provision(orchestrator, reporter, environment)
when :provision_and_deploy
res = console_provision(orchestrator, reporter, environment)
if res == Astute::SUCCESS
res = orchestrator.deploy(reporter, environment['task_uuid'], environment['nodes'], environment['attributes'])
res = analize_deploy orchestrator.deploy(reporter, environment['task_uuid'], environment['nodes'], environment['attributes'])
end
res
end
rescue => e
result = Astute::FAIL
puts "Error: #{e.inspect}"
puts "Hint: use astute with --verbose or check log (#{Astute::LOG_PATH}) for more details" unless opts[:verbose]
Astute.logger.error e.backtrace.join("\n")
report_and_exit(e, opts[:verbose])
end
exit result

2
examples/convert.rb Normal file → Executable file
View File

@ -87,7 +87,7 @@ nodes.each do |node,macaddr|
result['id'] = id
result['uid'] = uid
result['name_servers'] = master_ip
result['role'] = role
result['roles'] = [role]
result['fqdn'] = cobbler_dnsname
system_disk=json_node['meta']['disks'].select {|disk| disk['name'] == 'vda'}.first
cinder_disk=json_node['meta']['disks'].select {|disk| disk['name'] == 'vdb'}.first

View File

@ -0,0 +1,100 @@
# Nodes
nodes:
- name: controller-8
roles:
- controller
public_br: br-ex
internal_br: br-mgmt
interfaces:
- name: eth2
static: 0
mac_address: 08:00:27:D9:E9:FE
onboot: 'no'
peerdns: 'no'
network_name:
- fixed
- name: eth1
static: 0
ip_address: 10.20.1.114
netmask: 255.255.255.0
mac_address: 08:00:27:00:D1:2E
onboot: 'no'
peerdns: 'no'
network_name:
- management
- storage
- name: eth0
dns_name: controller-8.domain.tld # fqdn
static: 0
mac_address: 08:00:27:1D:28:71 # mac
onboot: 'yes'
peerdns: 'no'
use_for_provision: true
network_name:
- public
default_gateway: 10.20.0.1
attributes:
master_ip: 10.20.0.2
use_cow_images: true
libvirt_type: kvm
dns_nameservers:
- 10.20.0.2
verbose: true
debug: true
auto_assign_floating_ip: true
start_guests_on_host_boot: true
create_networks: true
compute_scheduler_driver: nova.scheduler.multi.MultiScheduler
quantum: true
management_vip: 10.20.1.200
public_vip: 10.20.0.200
quantum_parameters:
tenant_network_type: gre
segment_range: 300:500
metadata_proxy_shared_secret: quantum
mysql:
root_password: root
glance:
db_password: glance
user_password: glance
swift:
user_password: swift_pass
nova:
db_password: nova
user_password: nova
access:
password: admin
user: admin
tenant: admin
email: admin@example.org
keystone:
db_password: keystone
admin_token: nova
quantum_access:
user_password: quantum
db_password: quantum
rabbit:
password: nova
user: nova
cinder:
password: cinder
user: cinder
floating_network_range: 10.20.0.150/28
fixed_network_range: 10.20.2.0/24
base_syslog:
syslog_port: '514'
syslog_server: 10.20.0.2
syslog:
syslog_port: '514'
syslog_transport: udp
syslog_server: ''
use_unicast_corosync: false
horizon_use_ssl: 'false'
cinder_nodes:
- controller
ntp_servers:
- pool.ntp.org
deployment_mode: multinode
deployment_source: cli
deployment_id: 1

View File

@ -0,0 +1,117 @@
---
# Base config
task_uuid: deployment_task
engine:
url: http://localhost/cobbler_api
username: cobbler
password: cobbler
# These parameters can be overridden in the specification of a particular node
common_node_settings:
name_servers: "10.20.0.2"
profile: centos-x86_64
# These parameters can be overridden in the specification of a particular node
common_power_info:
power_type: ssh
power_user: root
power_pass: /root/.ssh/bootstrap.rsa
netboot_enabled: 1
# These parameters can be overridden in the specification of a particular node
common_ks_meta:
mco_enable: 1
mco_vhost: mcollective
mco_pskey: unset
mco_user: mcollective
puppet_enable: 0
install_log_2_syslog: 1
mco_password: marionette
puppet_auto_setup: 1
puppet_master: fuel.domain.tld
mco_auto_setup: 1
auth_key: '""'
puppet_version: 2.7.19
mco_connector: rabbitmq
mco_host: 10.20.0.2
# Nodes
nodes:
- name: controller-8
hostname: controller-8.domain.tld
# Data for provision
ks_meta:
# ks_spaces: '"[{"type": "disk", "id": "disk/by-path/pci-0000:00:0d.0-scsi-0:0:0:0",
# "volumes": [{"type": "boot", "size": 300}, {"mount": "/boot", "type": "raid",
# "size": 200}, {"type": "lvm_meta", "name": "os", "size": 64}, {"size": 11264,
# "type": "pv", "vg": "os"}, {"type": "lvm_meta", "name": "image", "size": 64},
# {"size": 4492, "type": "pv", "vg": "image"}], "size": 16384}]"'
ks_disks:
# All size should be set in megabytes
- type: "disk"
id: "disk/by-path/pci-0000:00:0d.0-scsi-0:0:0:0"
size: 16384
volumes:
- type: "boot"
size: 300
- type: "raid"
mount: "/boot"
size: 200
- type: "lvm_meta"
name: "os"
size: 64
- type: "pv"
size: 11264
vg: os
- type: "pv"
vg: "image"
size: 4492
- type: "vg"
id: "os"
min_size: 11264
label: "Base System"
volumes:
- type: "lv"
mount: "/"
name: root
size: 10048
- type: "lv"
mount: "/swap"
name: swap
size: 1024
- type: "vg"
id: "image"
min_size: 4492
label: "Image Storage"
volumes:
- type: "lv"
mount: "/var/lib/glance"
name: glance
size: 4200
interfaces:
- name: eth2
ip_address: 10.20.0.187
netmask: 255.255.255.0
static: 0
mac_address: '08:00:27:31:09:34'
onboot: 'no'
peerdns: 'no'
- name: eth1
ip_address: 10.20.0.186
netmask: 255.255.255.0
static: 0
mac_address: 08:00:27:93:54:B0
onboot: 'no'
peerdns: 'no'
- name: eth0
#ip_address: 10.20.0.49 # ip, power_address
#netmask: 255.255.255.0
dns_name: controller-8.domain.tld # fqdn
static: 1
mac_address: 08:00:27:1D:28:71 # mac
onboot: 'yes'
peerdns: 'no'
use_for_provision: true
#End data for provision

View File

@ -1,73 +0,0 @@
# This is example environment configuration file for Astute.
---
# `task_uuid' is used for logging purposes only. You can use it to tag log
# messages which relaited to different Astute runs.
task_uuid: deployment_task
# `attributes' section describe attributes of OpenStack installation.
attributes:
# `deployment_mode' shows what type of installation you choose.
# Can be:
# `singlenode' - means only one node will be deployed. It will
# contain Controller and Compute components of OpenStack.
# `multinode' - means one Controller node and some Compute nodes
# will be deployed.
# `ha' - means at least three Controller nodes and some Compute
# nodes will be deployed in High Availability mode of Controller
# components.
# In last two cases Astute first of all deployes Contoller components,
# after that it deployes Compute components and finaly deployes other
# components if they exist.
deployment_mode: multinode
# `deployment_engine' - shows how you will handle attributes for Puppet
# manifests.
# `simplepuppet' - means you should manualy set up all necessary attributes
# in your site.pp file. This engine simply run Puppet agent on the
# client's side. In this case Puppet agent uses attributes from site.pp.
# `nailyfact' - means it automaticaly calculate some necessary attributes
# e.g. `controller_node_public' address. Also it store necessary
# attributes on the node's side. In this case Puppet agent uses these
# attributes via `facts'.
deployment_engine: simplepuppet
# All other attributes are optional and make sence for `nailyfact' deployment
# engine only.
glance:
db_password: glance
user_password: glance
mysql:
root_password: nova
keystone:
db_password: admin
admin_tenant: admin
admin_token: admin
rabbit:
password: nova
user: nova
nova:
db_password: nova
user_password: nova
auto_assign_floating_ip: false
fixed_network_range: 10.0.0.0/24
storage_network_range: 172.16.0.0/24
floating_network_range: 240.0.0.0/24
management_network_range: 192.168.0.0/24
public_network_range: 240.0.1.0/24
# `nodes' section obviously describe your nodes.
nodes:
# `role' attribute can be:
# `controller' - node should include Controller component of OpenStack.
# `compute' - node should include Compute component of OpenStack.
# other values - any other components.
# Nodes with different roles should be deployed in determinate order.
# See `deployment_mode' for explanations.
- role: controller
# `uid' is unique identifier of node. Can be any string. It's used to call
# particular node via MCollective.
uid: devnailgun1.mirantis.com
# All other node's attributes are optional and make sence for `nailyfact'
# deployment engine only.
- role: compute
uid: devnailgun2.mirantis.com

View File

@ -1,56 +0,0 @@
node_01:
mac: 64:43:7B:CA:56:DD
name: controller-01
ip: 10.20.0.94
profile: centos-x86_64
fqdn: controller-01.domain.tld
power_type: ssh
power_user: root
power_pass: /root/.ssh/bootstrap.rsa
power_address: 10.20.0.94
netboot_enabled: '1'
name_servers: ! '"10.20.0.2"'
#Write size in megabytes
ks_meta:
ks_spaces: '"[{\"type\": \"disk\", \"id\": \"disk/by-path/pci-0000:00:06.0-virtio-pci-virtio3\",
\"volumes\": [{\"mount\": \"/boot\", \"type\": \"partition\", \"size\": 200},
{\"type\": \"mbr\"}, {\"size\": 20000, \"type\": \"pv\", \"vg\": \"os\"}],
\"size\": 20480}, {\"type\": \"vg\", \"id\": \"os\", \"volumes\": [{\"mount\":
\"/\", \"type\": \"lv\", \"name\": \"root\", \"size\": 10240 }, {\"mount\":
\"swap\", \"type\": \"lv\", \"name\": \"swap\", \"size\": 2048}]}]"'
mco_enable: 1
mco_vhost: mcollective
mco_pskey: unset
mco_user: mcollective
puppet_enable: 0
install_log_2_syslog: 1
mco_password: marionette
puppet_auto_setup: 1
puppet_master: fuelweb.domain.tld
mco_auto_setup: 1
auth_key: ! '""'
puppet_version: 2.7.19
mco_connector: rabbitmq
mco_host: 10.20.0.2
interfaces:
eth0:
ip_address: 10.20.0.94
netmask: 255.255.255.0
dns_name: controller-01.domain.tld
static: '1'
mac_address: 64:43:7B:CA:56:DD
interfaces_extra:
eth2:
onboot: 'no'
peerdns: 'no'
eth1:
onboot: 'no'
peerdns: 'no'
eth0:
onboot: 'yes'
peerdns: 'no'
engine:
url: http://localhost/cobbler_api
username: cobbler
password: cobbler

View File

@ -1,241 +0,0 @@
---
# Base config
task_uuid: deployment_task
engine:
url: http://localhost/cobbler_api
username: cobbler
password: cobbler
power_info: &power_info
power_type: ssh
power_user: root
name_servers: ! '"10.20.0.2"'
power_pass: /root/.ssh/bootstrap.rsa
netboot_enabled: '1'
ks_meta: &ks_meta
mco_enable: 1
mco_vhost: mcollective
mco_pskey: unset
mco_user: mcollective
puppet_enable: 0
install_log_2_syslog: 1
mco_password: marionette
puppet_auto_setup: 1
puppet_master: fuelweb.domain.tld
mco_auto_setup: 1
auth_key: ! '""'
puppet_version: 2.7.19
mco_connector: rabbitmq
mco_host: 10.20.0.2
# Nodes
node_22: &node_22
id: 22
uid: 22
mac: 08:00:27:C2:06:DE
ip: &ip 10.20.0.95
fqdn: &fqdn controller-22.domain.tld
# Data for provision
profile: centos-x86_64
ks_meta:
<<: *ks_meta
ks_spaces: ! '"[{\"type\": \"disk\", \"id\": \"disk/by-path/pci-0000:00:0d.0-scsi-0:0:0:0\",
\"volumes\": [{\"mount\": \"/boot\", \"type\": \"partition\", \"size\": 209715200},
{\"type\": \"mbr\"}, {\"size\": 16959668224, \"type\": \"pv\", \"vg\": \"os\"}],
\"size\": 17179869184}, {\"type\": \"disk\", \"id\": \"disk/by-path/pci-0000:00:0d.0-scsi-1:0:0:0\",
\"volumes\": [{\"size\": 536860426240, \"type\": \"pv\", \"vg\": \"os\"}], \"size\":
536870912000}, {\"type\": \"disk\", \"id\": \"disk/by-path/pci-0000:00:0d.0-scsi-2:0:0:0\",
\"volumes\": [{\"size\": 2411714314240, \"type\": \"pv\", \"vg\": \"os\"}],
\"size\": 2411724800000}, {\"type\": \"vg\", \"id\": \"os\", \"volumes\": [{\"mount\":
\"/\", \"type\": \"lv\", \"name\": \"root\", \"size\": 2963243016192}, {\"mount\":
\"swap\", \"type\": \"lv\", \"name\": \"swap\", \"size\": 2090065920}]}]"'
<<: *power_info
name: controller-22
hostname: *fqdn
power_address: *ip
interfaces:
eth2:
ip_address: 10.20.0.187
netmask: 255.255.255.0
static: '0'
mac_address: '08:00:27:31:09:34'
eth1:
ip_address: 10.20.0.186
netmask: 255.255.255.0
static: '0'
mac_address: 08:00:27:93:54:B0
eth0:
ip_address: 10.20.0.188
netmask: 255.255.255.0
dns_name: *fqdn
static: '0'
mac_address: 08:00:27:C2:06:DE
interfaces_extra:
eth2:
onboot: 'no'
peerdns: 'no'
eth1:
onboot: 'no'
peerdns: 'no'
eth0:
onboot: 'yes'
peerdns: 'no'
#End data for provision
meta:
memory:
total: 778694656
interfaces:
- mac: '08:00:27:31:09:34'
max_speed: 100
name: eth2
current_speed: 100
- mac: 08:00:27:93:54:B0
max_speed: 100
name: eth1
current_speed: 100
- name: eth0
ip: 10.20.0.95
netmask: 255.255.255.0
mac: 08:00:27:C2:06:DE
max_speed: 100
current_speed: 100
disks:
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-2:0:0:0
name: sdc
size: 2411724800000
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-1:0:0:0
name: sdb
size: 536870912000
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-0:0:0:0
name: sda
size: 17179869184
system:
serial: '0'
version: '1.2'
fqdn: bootstrap
family: Virtual Machine
manufacturer: VirtualBox
cpu:
real: 0
total: 1
spec:
- model: Intel(R) Core(TM)2 Duo CPU P8600 @ 2.40GHz
frequency: 2397
error_type:
network_data:
- name: management
ip: 192.168.6.2/24
vlan: 125
dev: eth0
netmask: 255.255.255.0
brd: 192.168.6.255
gateway: 192.168.6.1
- name: public
ip: 240.0.13.2/24
vlan: 124
dev: eth0
netmask: 255.255.255.0
brd: 240.0.13.255
gateway: 240.0.13.1
- name: storage
ip: 172.16.6.2/24
vlan: 126
dev: eth0
netmask: 255.255.255.0
brd: 172.16.6.255
gateway: 172.16.6.1
- name: management
ip: 192.168.6.5/24
vlan: 125
dev: eth0
netmask: 255.255.255.0
brd: 192.168.6.255
gateway: 192.168.6.1
- name: public
ip: 240.0.13.5/24
vlan: 124
dev: eth0
netmask: 255.255.255.0
brd: 240.0.13.255
gateway: 240.0.13.1
- name: storage
ip: 172.16.6.5/24
vlan: 126
dev: eth0
netmask: 255.255.255.0
brd: 172.16.6.255
gateway: 172.16.6.1
- vlan: 124
name: floating
dev: eth0
- vlan: 127
name: fixed
dev: eth0
- name: admin
dev: eth0
role: controller
online: true
progress: 0
nodes:
- <<: *node_22
attributes:
#deployment_engine: simplepuppet
use_cow_images: true
network_manager: FlatDHCPManager
libvirt_type: qemu
controller_nodes:
- <<: *node_22
mysql:
root_password: X1HWFL2i
glance:
db_password: hW3VFgdb
user_password: UMDMMYfp
network_size: 256
swift:
user_password: ODwuK9ij
fixed_network_range: 10.0.6.0/24
nova:
db_password: vlY5FhkA
user_password: UeVjkUxq
access:
password: admin
user: admin
tenant: admin
email: admin@example.org
keystone:
db_password: XjwwZsBU
admin_token: giVDBp05
auto_assign_floating_ip: false
start_guests_on_host_boot: true
rabbit:
password: 3ix8DkDi
management_network_range: 192.168.6.0/24
base_syslog:
syslog_port: '514'
syslog_server: 10.20.0.2
storage_network_range: 172.16.6.0/24
floating_network_range:
- 240.0.12.10
- 240.0.12.11
auth_key: ''
syslog:
syslog_port: '514'
syslog_transport: udp
syslog_server: ''
compute_scheduler_driver: nova.scheduler.filter_scheduler.FilterScheduler
deployment_mode: multinode
cinder:
db_password: rveahKih
user_password: ENwyu6oa
deployment_id: 8

View File

@ -1,190 +0,0 @@
---
# Base config
task_uuid: deployment_task
engine:
url: http://localhost/cobbler_api
username: cobbler
password: cobbler
power_info: &power_info
power_type: ssh
power_user: root
name_servers: ! '"10.20.0.2"'
power_pass: /root/.ssh/bootstrap.rsa
netboot_enabled: '1'
ks_meta: &ks_meta
mco_enable: 1
mco_vhost: mcollective
mco_pskey: unset
mco_user: mcollective
puppet_enable: 0
install_log_2_syslog: 1
mco_password: marionette
puppet_auto_setup: 1
puppet_master: fuelweb.domain.tld
mco_auto_setup: 1
auth_key: ! '""'
puppet_version: 2.7.19
mco_connector: rabbitmq
mco_host: 10.20.0.2
# Nodes
node_22: &node_22
id: 22
uid: 22
mac: 08:00:27:C2:06:DE
ip: &ip 10.20.0.95
fqdn: &fqdn controller-22.domain.tld
# Data for provision
profile: centos-x86_64
ks_meta:
<<: *ks_meta
ks_spaces: ! '"[{\"type\": \"disk\", \"id\": \"disk/by-path/pci-0000:00:0d.0-scsi-0:0:0:0\",
\"volumes\": [{\"mount\": \"/boot\", \"type\": \"partition\", \"size\": 209715200},
{\"type\": \"mbr\"}, {\"size\": 16959668224, \"type\": \"pv\", \"vg\": \"os\"}],
\"size\": 17179869184}, {\"type\": \"disk\", \"id\": \"disk/by-path/pci-0000:00:0d.0-scsi-1:0:0:0\",
\"volumes\": [{\"size\": 536860426240, \"type\": \"pv\", \"vg\": \"os\"}], \"size\":
536870912000}, {\"type\": \"disk\", \"id\": \"disk/by-path/pci-0000:00:0d.0-scsi-2:0:0:0\",
\"volumes\": [{\"size\": 2411714314240, \"type\": \"pv\", \"vg\": \"os\"}],
\"size\": 2411724800000}, {\"type\": \"vg\", \"id\": \"os\", \"volumes\": [{\"mount\":
\"/\", \"type\": \"lv\", \"name\": \"root\", \"size\": 2963243016192}, {\"mount\":
\"swap\", \"type\": \"lv\", \"name\": \"swap\", \"size\": 2090065920}]}]"'
<<: *power_info
name: controller-22
hostname: *fqdn
power_address: *ip
interfaces:
eth2:
ip_address: 10.20.0.187
netmask: 255.255.255.0
static: '0'
mac_address: '08:00:27:31:09:34'
eth1:
ip_address: 10.20.0.186
netmask: 255.255.255.0
static: '0'
mac_address: 08:00:27:93:54:B0
eth0:
ip_address: 10.20.0.188
netmask: 255.255.255.0
dns_name: *fqdn
static: '0'
mac_address: 08:00:27:C2:06:DE
interfaces_extra:
eth2:
onboot: 'no'
peerdns: 'no'
eth1:
onboot: 'no'
peerdns: 'no'
eth0:
onboot: 'yes'
peerdns: 'no'
#End data for provision
meta:
memory:
total: 778694656
interfaces:
- mac: '08:00:27:31:09:34'
max_speed: 100
name: eth2
current_speed: 100
- mac: 08:00:27:93:54:B0
max_speed: 100
name: eth1
current_speed: 100
- name: eth0
ip: 10.20.0.95
netmask: 255.255.255.0
mac: 08:00:27:C2:06:DE
max_speed: 100
current_speed: 100
disks:
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-2:0:0:0
name: sdc
size: 2411724800000
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-1:0:0:0
name: sdb
size: 536870912000
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-0:0:0:0
name: sda
size: 17179869184
system:
serial: '0'
version: '1.2'
fqdn: bootstrap
family: Virtual Machine
manufacturer: VirtualBox
cpu:
real: 0
total: 1
spec:
- model: Intel(R) Core(TM)2 Duo CPU P8600 @ 2.40GHz
frequency: 2397
error_type:
network_data:
- name: management
ip: 192.168.6.2/24
vlan: 125
dev: eth0
netmask: 255.255.255.0
brd: 192.168.6.255
gateway: 192.168.6.1
- name: public
ip: 240.0.13.2/24
vlan: 124
dev: eth0
netmask: 255.255.255.0
brd: 240.0.13.255
gateway: 240.0.13.1
- name: storage
ip: 172.16.6.2/24
vlan: 126
dev: eth0
netmask: 255.255.255.0
brd: 172.16.6.255
gateway: 172.16.6.1
- name: management
ip: 192.168.6.5/24
vlan: 125
dev: eth0
netmask: 255.255.255.0
brd: 192.168.6.255
gateway: 192.168.6.1
- name: public
ip: 240.0.13.5/24
vlan: 124
dev: eth0
netmask: 255.255.255.0
brd: 240.0.13.255
gateway: 240.0.13.1
- name: storage
ip: 172.16.6.5/24
vlan: 126
dev: eth0
netmask: 255.255.255.0
brd: 172.16.6.255
gateway: 172.16.6.1
- vlan: 124
name: floating
dev: eth0
- vlan: 127
name: fixed
dev: eth0
- name: admin
dev: eth0
role: controller
online: true
progress: 0
nodes:
- <<: *node_22

View File

@ -1,13 +0,0 @@
# This example environment config is used in unittest simplepuppet_deploy_spec.
---
nodes:
- status: provisioned
role: controller
uid: devnailgun.mirantis.com
- status: provisioned
role: compute
uid: devnailgun.mirantis.com
attributes:
deployment_mode: multinode
deployment_engine: simplepuppet
task_uuid: deployment_task

View File

@ -17,6 +17,7 @@ require 'astute/ruby_removed_functions'
require 'json'
require 'logger'
require 'shellwords'
require 'active_support/all'
require 'astute/ext/exception'
require 'astute/ext/deep_copy'
@ -28,7 +29,6 @@ require 'astute/deployment_engine'
require 'astute/network'
require 'astute/puppetd'
require 'astute/rpuppet'
require 'astute/deployment_engine/simple_puppet'
require 'astute/deployment_engine/nailyfact'
require 'astute/cobbler'

View File

@ -0,0 +1,361 @@
type: map
mapping:
"task_uuid":
type: text
"nodes":
type: seq
required: true
desc: Array of nodes
name: Nodes
sequence:
- type: map
mapping:
"id":
type: int
unique: yes
"uid":
type: int
unique: yes
"fqdn":
type: text
desc: Fully-qualified domain name of the node
"default_gateway":
type: text
desc: Default gateway for network_data
"roles":
type: seq
required: true
desc: Array of roles
sequence:
- type: text
required: true
enum: ["primary-controller", "controller", "storage", "swift-proxy", "primary-swift-proxy", "compute", "quantum"]
"status":
type: text
enum: ["ready", "provisioned", "provisioning", "discover"]
# Quantum true
"public_br":
type: text
desc: Name of the public bridge for Quantum-enabled configuration
# Quantum true
"internal_br":
type: text
desc: Name of the internal bridge for Quantum-enabled configuration
"interfaces":
type: seq
required: true
sequence:
- type: map
mapping:
"name":
type: text
required: true
unique: yes
"ip_address":
type: text
unique: yes
"netmask":
type: text
"dns_name":
type: text
unique: yes
"static":
type: int
range: { min: 0, max: 1 }
"mac_address":
type: text
required: true
unique: yes
"onboot":
type: text
required: true
enum: ['yes', 'no']
"peerdns":
type: text
required: true
enum: ['yes', 'no']
"use_for_provision":
type: bool
default: false
name: use_for_provision
"network_name":
type: seq
desc: Array of OpenStack network names
sequence:
- type: text
enum: ["public", "management", "storage", "fixed"]
# Full config block
"network_data":
type: seq
desc: Array of network interfaces hashes
sequence:
- type: map
mapping:
"name":
type: any
#unique: true
#enum: ['management', 'public', 'storage', 'fixed']
desc: Network type
"dev":
type: text
"ip":
type: text
"netmask":
type: text
"gateway":
type: text
"attributes":
type: map
required: true
name: Attributes
desc: General parameters for deployment
mapping:
"deployment_id":
type: int
desc: Id of deployment used do differentiate environments
"deployment_source":
type: text
enum: ['cli', 'web']
required: true
"management_vip":
type: text
required: true
desc: "Virtual IP address for internal services (MySQL, AMQP, internal OpenStack endpoints)"
"public_vip":
type: text
required: true
desc: "Virtual IP address for public services: Horizon, public OpenStack endpoints"
"master_ip":
type: text
required: true
desc: IP of puppet master
"deployment_mode":
type: text
enum: ['ha', 'ha_full', 'multinode']
desc:
required: true
"access":
type: map
required: true
mapping:
"password":
type: text
required: true
"user":
type: text
required: true
"tenant":
type: text
required: true
"email":
type: text
required: true
"use_cow_images":
type: bool
required: true
desc: Whether to use cow images
"auto_assign_floating_ip":
type: bool
required: true
desc: Whether to assign floating IPs automatically
"libvirt_type":
type: text
enum: [qemu, kvm]
required: true
desc: "Nova libvirt hypervisor type. Values: qemu|kvm"
"start_guests_on_host_boot":
type: bool
required: true
"create_networks":
type: bool
required: true
desc: Whether to create fixed or floating networks
"quantum":
type: bool
required: true
# Quantum true
"quantum_parameters":
type: map
mapping:
"tenant_network_type":
type: text
enum: ['gre', 'vlan']
required: true
desc: "Which type of network segmentation to use. Values: gre|vlan"
"segment_range":
type: text
required: true
desc: "Range of IDs for network segmentation. Consult Quantum documentation."
"metadata_proxy_shared_secret":
type: text
required: true
desc: Shared secret for metadata proxy services
"mysql":
type: map
required: true
desc: Credentials for MySQL
mapping:
"root_password":
type: text
required: true
"swift":
type: map
required: true
desc: Credentials for Swift
mapping:
"user_password":
type: text
required: true
"glance":
type: map
required: true
desc: Credentials for Glance
mapping:
"user_password":
type: text
required: true
"db_password":
type: text
required: true
"nova":
type: map
required: true
desc: Credentials for Nova
mapping:
"user_password":
type: text
required: true
"db_password":
type: text
required: true
"keystone":
type: map
required: true
desc: Credentials for Keystone
mapping:
"db_password":
type: text
required: true
"admin_token":
type: text
required: true
# Quantum true
"quantum_access":
type: map
desc: Credentials for Quantum Access
mapping:
"user_password":
type: text
required: true
"db_password":
type: text
required: true
"rabbit":
type: map
required: true
desc: Credentials for RabbitMQ
mapping:
"user":
type: text
required: true
"password":
type: text
required: true
"cinder":
type: map
required: true
desc: Credentials for Cinder
mapping:
"user":
type: text
required: true
"password":
type: text
required: true
# CIDR (for quantum == true) or array if IPs (for quantum == false)
"floating_network_range":
type: any
required: true
desc: |
Used for creation of floating networks/IPs during deployment.
CIDR (for quantum == true) or array if IPs (for quantum == false)
"fixed_network_range":
type: text
required: true
desc: CIDR for fixed network created during deployment
"ntp_servers":
type: seq
required: true
desc: Array of ntp servers
sequence:
- type: "text"
required: true
"dns_nameservers":
type: seq
required: true
desc: Array of DNS servers configured during deployment phase
sequence:
- type: "text"
required: true
"cinder_nodes":
type: seq
desc: |
Array of nodes to use as cinder-volume backends. Values: 'all'|<hostname>|
<internal IP address of node>|'controller'|<node_role>"
sequence:
- type: "text"
required: true
"base_syslog":
type: map
required: true
desc: Main syslog server configuration
mapping:
"syslog_server":
type: text
required: true
"syslog_port":
type: text
required: true
"syslog":
type: map
required: true
desc: Additional syslog servers configuration
mapping:
"syslog_port":
type: text
"syslog_transport":
type: text
enum: ['tcp', 'udp']
"syslog_server":
type: text
"horizon_use_ssl":
type: text
enum: ['false', 'default', 'exist', 'custom']
desc: Use HTTP or HTTPS for OpenStack dashboard (Horizon)
"compute_scheduler_driver":
type: text
enum: ['nova.scheduler.multi.MultiScheduler', 'nova.scheduler.filter_scheduler.FilterScheduler']
desc: Nova scheduler driver class
"use_unicast_corosync":
type: bool
default: false
desc: |
Fuel uses Corosync and Pacemaker cluster engines for HA scenarios, thus
requiring consistent multicast networking. Sometimes it is not possible to configure
multicast in your network. In this case, you can tweak Corosync to use unicast addressing
by setting use_unicast_corosync variable to true.
"auth_key":
type: text
"network_manager":
type: text
"verbose":
type: bool
desc: How much information OpenStack provides when performing configuration (verbose mode)
"debug":
type: bool
desc: How much information OpenStack provides when performing configuration (debug mode)
"deployment_id":
type: int
required: true
desc: Deployment ID(for CI just set 1)

View File

@ -0,0 +1,374 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
require 'yaml'
require 'rest-client'
require 'astute/ext/hash'
require 'astute/cli/enviroment'
require 'astute/cli/yaml_validator'
module Astute
module Cli
class Enviroment
POWER_INFO_KEYS = ['power_type', 'power_user', 'power_pass', 'netboot_enabled']
ID_KEYS = ['id', 'uid']
COMMON_NODE_KEYS = ['name_servers', 'profile']
KS_META_KEYS = ['mco_enable', 'mco_vhost', 'mco_pskey', 'mco_user', 'puppet_enable',
'install_log_2_syslog', 'mco_password', 'puppet_auto_setup', 'puppet_master',
'mco_auto_setup', 'auth_key', 'puppet_version', 'mco_connector', 'mco_host']
NETWORK_KEYS = ['ip', 'mac', 'fqdn']
PROVISIONING_NET_KEYS = ['power_address']
PROVISION_OPERATIONS = [:provision, :provision_and_deploy]
DEPLOY_OPERATIONS = [:deploy, :provision_and_deploy]
CIDR_REGEXP = '^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|
2[0-4][0-9]|25[0-5])(\/(\d|[1-2]\d|3[0-2]))$'
def initialize(file, operation)
@config = YAML.load_file(file)
validate_enviroment(operation)
to_full_config(operation)
end
def [](key)
@config[key]
end
private
def to_full_config(operation)
@config['nodes'].each do |node|
# Common section
node['meta'] ||= {}
define_network_ids(node)
define_id_and_uid(node)
# Provision section
if PROVISION_OPERATIONS.include? operation
define_power_address(node)
define_interfaces_and_interfaces_extra(node)
define_ks_spaces(node)
define_power_info(node)
define_ks_meta(node)
define_node_settings(node)
define_disks_section(node)
end
# Deploy section
if DEPLOY_OPERATIONS.include? operation
define_meta_interfaces(node)
define_fqdn(node)
define_network_data(node)
end
end
end
def validate_enviroment(operation)
validator = YamlValidator.new(operation)
errors = validator.validate(@config)
errors.each do |e|
if e.message.include?("is undefined")
Astute.logger.warn "[#{e.path}] #{e.message}"
else
Astute.logger.error "[#{e.path}] #{e.message}"
$stderr.puts "[#{e.path}] #{e.message}"
end
end
if errors.select {|e| !e.message.include?("is undefined") }.size > 0
raise Enviroment::ValidationError, "Environment validation failed"
end
if DEPLOY_OPERATIONS.include?(operation)
if @config['attributes']['quantum']
@config['nodes'].each do |node|
['public_br', 'internal_br'].each do |br|
if node[br].nil? || node[br].empty?
raise Enviroment::ValidationError, "Node #{node['name'] || node['hostname']}
required 'public_br' and 'internal_br' when quantum is 'true'"
end
end
end
errors = []
['quantum_parameters', 'quantum_access'].each do |param|
errors << param unless @config['attributes'][param].present?
end
errors.each do |field|
msg = "#{field} is required when quantim is true"
raise Enviroment::ValidationError, msg
end
if !is_cidr_notation?(@config['attributes']['floating_network_range'])
msg = "'floating_network_range' is required CIDR notation when quantum is 'true'"
raise Enviroment::ValidationError, msg
end
if !is_cidr_notation?(@config['attributes']['floating_network_range'])
msg = "'floating_network_range' is required CIDR notation"
raise Enviroment::ValidationError, msg
end
else
if @config['attributes']['floating_network_range'].is_a?(Array)
msg = "'floating_network_range' is required array of IPs when quantum is 'false'"
raise Enviroment::ValidationError, msg
end
end
if !is_cidr_notation?(@config['attributes']['fixed_network_range'])
msg = "'fixed_network_range' is required CIDR notation"
raise Enviroment::ValidationError, msg
end
end
end
# Get data about discovered nodes using FuelWeb API
def find_node_api_data(node)
@api_data ||= begin
response = RestClient.get 'http://localhost:8000/api/nodes'
@api_data = JSON.parse(response).freeze
end
if node['mac']
api_node = @api_data.find{ |n| n['mac'].upcase == node['mac'].upcase }
return api_node if api_node
end
raise Enviroment::ValidationError, "Node #{node['name']} with mac address #{node['mac']}
not find among discovered nodes"
end
# Set uniq id and uid for node from Nailgun using FuelWeb API
def define_id_and_uid(node)
id = find_node_api_data(node)['id']
# This params set for node by Nailgun and should not be edit by user
node.merge!(
'id' => id,
'uid' => id
)
end
# Set meta/disks section for node. This data used in provision to calculate the percentage
# completion of the installation process.
# Example result for node['meta']
# "disks": [
# {
# "model": "VBOX HARDDISK",
# "disk": "disk/by-path/pci-0000:00:0d.0-scsi-0:0:0:0",
# "name": "sda",
# "size": 17179869184
# }...
# ]
def define_disks_section(node)
node['meta']['disks'] = find_node_api_data(node)['meta']['disks']
end
def define_parameters(node, config_group_name, keys, position=nil)
position ||= node
if @config[config_group_name]
config_group = @config[config_group_name]
keys.each do |key|
position.reverse_merge!(key => config_group[key])
end
end
absent_keys = position.absent_keys(keys)
if !absent_keys.empty?
raise Enviroment::ValidationError, "Please set #{config_group_name} block or
set params for #{node['name']} manually #{absent_keys.each {|k| p k}}"
end
@config.delete(config_group)
end
# Add common params from common_node_settings to every node. Already certain parameters will not be changed.
def define_node_settings(node)
define_parameters(node, 'common_node_settings', COMMON_NODE_KEYS)
end
# Add common params from common_power_info to every node. Already certain parameters will not be changed.
def define_power_info(node)
define_parameters(node, 'common_power_info', POWER_INFO_KEYS)
end
# Add common params from common_ks_meta to every node. Already certain parameters will not be changed.
def define_ks_meta(node)
define_parameters(node, 'common_ks_meta', KS_META_KEYS, node['ks_meta'])
end
# Add duplicates network params to node: ip, mac, fqdn
def define_network_ids(node)
network_eth = node['interfaces'].find {|eth| eth['use_for_provision'] } rescue nil
if network_eth
if network_eth['ip_address'].blank?
node['mac'] = network_eth['mac_address']
api_node = find_node_api_data(node)
api_provision_eth = api_node['meta']['interfaces'].find { |n| n['mac'].to_s.upcase == network_eth['mac_address'].to_s.upcase }
network_eth['ip_address'] = api_provision_eth['ip']
network_eth['netmask'] = api_provision_eth['netmask']
end
node.reverse_merge!(
'ip' => network_eth['ip_address'],
'mac' => network_eth['mac_address'],
'fqdn' => network_eth['dns_name']
)
network_eth.delete('use_for_provision')
end
absent_keys = node.absent_keys(NETWORK_KEYS)
if !absent_keys.empty?
raise Enviroment::ValidationError, "Please set 'use_for_provision' parameter
for #{node['name']} or set manually #{absent_keys.each {|k| p k}}"
end
end
# Add duplicates network params to node: power_address
def define_power_address(node)
node['power_address'] = node['ip'] or raise Enviroment::ValidationError, "Please
set 'power_address' parameter for #{node['name']}"
end
# Extend blocks interfaces and interfaces_extra to old formats:
# interfaces:
# eth0:
# ip_address: 10.20.0.188
# netmask: 255.255.255.0
# dns_name: controller-22.domain.tld
# static: '0'
# mac_address: 08:00:27:C2:06:DE
# interfaces_extra:
# eth0:
# onboot: 'yes'
# peerdns: 'no'
def define_interfaces_and_interfaces_extra(node)
return if [node['interfaces'], node['extra_interfaces']].all? {|i| i.is_a?(Hash)}
formated_interfaces = {}
interfaces_extra_interfaces = {}
node['interfaces'].each do |eth|
formated_interfaces[eth['name']] = eth
formated_interfaces[eth['name']].delete('name')
interfaces_extra_interfaces[eth['name']] = {
'onboot' => eth['onboot'],
'peerdns' => eth['onboot']
}
end
node['interfaces'] = formated_interfaces
node['extra_interfaces'] = interfaces_extra_interfaces
end
# Add duplicate param 'fqdn' to node if it is not specified
def define_fqdn(node)
node['fqdn'] ||= find_node_api_data(node)['meta']['system']['fqdn']
end
# Add meta/interfaces section for node:
# meta:
# interfaces:
# - name: eth0
# ip: 10.20.0.95
# netmask: 255.255.255.0
# mac: 08:00:27:C2:06:DE
# max_speed: 100
# current_speed: 100
def define_meta_interfaces(node)
node['meta']['interfaces'] = find_node_api_data(node)['meta']['interfaces']
end
# Add network_data section for node:
# network_data:
# - dev: eth1
# ip: 10.108.1.8
# name: public
# netmask: 255.255.255.0
# - dev: eth0
# ip: 10.108.0.8
# name:
# - management
# - storage
def define_network_data(node)
return if node['network_data'].is_a?(Array) && !node['network_data'].empty?
node['network_data'] = []
# If define_interfaces_and_interfaces_extra was call or format of config is full
if node['interfaces'].is_a?(Hash)
node['interfaces'].each do |key, value|
node['network_data'] << {
'dev' => key,
'ip' => value['ip_address'],
'name' => value['network_name'],
'netmask' => value['netmask']
}
end
else
node['interfaces'].each do |eth|
node['network_data'] << {
'dev' => eth['name'],
'ip' => eth['ip_address'],
'name' => eth['network_name'],
'netmask' => eth['netmask']
}
end
end
end
# Generate 'ks_spaces' param from 'ks_disks' param in section 'ks_meta'
# Example input for 'ks_disks' param:
# [{
# "type"=>"disk",
# "id"=>"disk/by-path/pci-0000:00:0d.0-scsi-0:0:0:0",
# "size"=>16384,
# "volumes"=>[
# {
# "type"=>"boot",
# "size"=>300
# },
# {
# "type"=>"pv",
# "size"=>16174,
# "vg"=>"os"
# }
# ]
# }]
# Example result for 'ks_spaces' param: "[{"type": "disk", "id": "disk/by-path/pci-0000:00:0d.0-scsi-0:0:0:0", "volumes": [{"type": "boot", "size": 300}, {"mount": "/boot", "type": "raid", "size": 200}, {"type": "lvm_meta", "name": "os", "size": 64}, {"size": 11264, "type": "pv", "vg": "os"}, {"type": "lvm_meta", "name": "image", "size": 64}, {"size": 4492, "type": "pv", "vg": "image"}], "size": 16384}]"
def define_ks_spaces(node)
if node['ks_meta']['ks_spaces'].present?
node['ks_meta'].delete('ks_disks')
return
end
if node['ks_meta']['ks_disks'].blank?
raise Enviroment::ValidationError, "Please set 'ks_disks' or 'ks_spaces' parameter
in section ks_meta for #{node['name']}"
end
node['ks_meta']['ks_spaces'] = '"' + node['ks_meta']['ks_disks'].to_json.gsub("\"", "\\\"") + '"'
node['ks_meta'].delete('ks_disks')
end
def is_cidr_notation?(value)
cidr = Regexp.new(CIDR_REGEXP)
!cidr.match(value).nil?
end
end # class end
class Enviroment::ValidationError < StandardError; end
end # module Cli
end

View File

@ -0,0 +1,263 @@
type: map
mapping:
"task_uuid":
type: text
"engine":
type: map
required: true
desc: Cobbler engine credentials
mapping:
"url":
type: text
required: true
"username":
type: text
required: true
"password":
type: text
required: true
"common_power_info":
type: map
mapping:
"power_type":
type: text
required: true
desc: Cobbler power-type. Consult cobbler documentation for available options.
"power_user":
type: text
required: true
desc: Username for cobbler to manage power of this machine
"power_pass":
type: text
required: true
desc: Password/credentials for cobbler to manage power of this machine
"netboot_enabled":
type: int
required: true
desc: Disable/enable netboot for this node.
range: { min: 0, max: 1 }
"common_node_settings":
type: map
mapping:
"name_servers":
type: text
required: true
"profile":
type: text
enum: ["centos-x86_64", "ubuntu_1204_x86_64", 'rhel-x86_64']
desc: Cobbler profile for the node.
"common_ks_meta":
type: map
mapping:
"mco_enable":
type: int
range: { min: 0, max: 1 }
required: true
"mco_vhost":
type: text
required: true
"mco_pskey":
type: text
required: true
"mco_user":
type: text
required: true
"mco_password":
type: text
required: true
"puppet_enable":
type: int
range: { min: 0, max: 1 }
required: true
"puppet_auto_setup":
type: int
range: { min: 0, max: 1 }
required: true
"puppet_master":
type: text
required: true
"mco_auto_setup":
type: int
range: { min: 0, max: 1 }
required: true
"auth_key":
type: text
required: true
"puppet_version":
type: text
"install_log_2_syslog":
type: int
range: { min: 0, max: 1 }
required: true
"mco_connector":
type: text
required: true
"mco_host":
type: text
required: true
"nodes":
type: seq
required: true
desc: Array of nodes
sequence:
- type: map
mapping:
"id":
type: int
unique: yes
desc: MCollective node id in mcollective server.cfg
"uid":
type: int
unique: yes
desc: UID of the node for deployment engine. Should be equal to `id`
"name":
type: text
required: true
unique: yes
desc: Name of the system in cobbler
"hostname":
type: text
required: true
"fqdn":
type: text
desc: Fully-qualified domain name of the node
"profile":
type: text
enum: ["centos-x86_64", "ubuntu_1204_x86_64", 'rhel-x86_64']
desc: Cobbler profile for the node.
"ip":
type: text
"mac":
type: text
"power_address":
type: text
desc: IP address of the device managing the node power state
"power_type":
type: text
desc: Cobbler power-type. Consult cobbler documentation for available options.
"power_user":
type: text
desc: Username for cobbler to manage power of this machine
"name_servers":
type: text
"power_pass":
type: text
desc: Password/credentials for cobbler to manage power of this machine
"netboot_enabled":
type: int
range: { min: 0, max: 1 }
desc: Disable/enable netboot for this node.
"ks_meta":
type: map
required: true
desc: Kickstart metadata used during provisioning
mapping:
"mco_enable":
type: int
range: { min: 0, max: 1 }
"mco_vhost":
type: text
"mco_pskey":
type: text
"mco_user":
type: text
"mco_password":
type: text
"puppet_enable":
type: int
range: { min: 0, max: 1 }
"puppet_auto_setup":
type: int
range: { min: 0, max: 1 }
"puppet_master":
type: text
"mco_auto_setup":
type: int
range: { min: 0, max: 1 }
"auth_key":
type: text
"puppet_version":
type: text
"install_log_2_syslog":
type: int
range: { min: 0, max: 1 }
"mco_connector":
type: text
"mco_host":
type: text
"ks_spaces":
type: text
"ks_disks":
type: seq
sequence:
- type: map
required: true
mapping:
"type":
type: str
required: true
enum: [disk, vg]
"id":
type: text
required: true
unique: yes
"size":
type: int
"min_size":
type: int
"label":
type: text
"volumes":
type: seq
sequence:
- type: map
mapping:
"type":
type: text
required: true
enum: [lv, pv, partition, mbr, raid, lvm_meta, boot]
"mount":
type: text
"size":
type: int
"vg":
type: text
"name":
type: text
"interfaces":
type: seq
required: true
sequence:
- type: map
mapping:
"name":
type: text
required: true
unique: yes
"ip_address":
type: text
unique: yes
"netmask":
type: text
"dns_name":
type: text
unique: yes
"static":
type: int
range: { min: 0, max: 1 }
"mac_address":
type: text
required: true
unique: yes
"onboot":
type: text
required: true
enum: ['yes', 'no']
"peerdns":
type: text
required: true
enum: ['yes', 'no']
"use_for_provision":
type: bool
default: false
name: use_for_provision

View File

@ -0,0 +1,46 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
require 'kwalify'
module Astute
module Cli
class YamlValidator < Kwalify::Validator
def initialize(operation)
schemas = if [:deploy, :provision].include? operation
[operation]
elsif operation == :provision_and_deploy
[:provision, :deploy]
else
raise "Incorrect scheme for validation"
end
schema_hashes = []
schema_dir_path = File.expand_path(File.dirname(__FILE__))
schemas.each do |schema_name|
schema_path = File.join(schema_dir_path, "#{schema_name}_schema.yaml")
schema_hashes << YAML.load_file(schema_path)
end
#p schema_hashes[0].recursive_merge!(schema_hashes[1])
#FIXME: key 'hostname:' is undefined for provision_and_deploy. Why?
@schema = schema_hashes.size == 1 ? schema_hashes.first : schema_hashes[0].deep_merge(schema_hashes[1])
super(@schema)
end
end # YamlValidator
end # Cli
end # Astute

View File

@ -15,12 +15,24 @@
module Astute
class Context
attr_accessor :task_id, :reporter, :deploy_log_parser
attr_accessor :reporter, :deploy_log_parser
attr_reader :task_id, :status
def initialize(task_id, reporter, deploy_log_parser=nil)
@task_id = task_id
@reporter = reporter
@status = {}
@deploy_log_parser = deploy_log_parser
end
def report_and_update_status(data)
if data['nodes']
data['nodes'].each do |node|
status.merge! node['uid'] => node['status'] if node['uid'] && node['status']
end
end
reporter.report(data)
end
end
end

View File

@ -31,7 +31,7 @@ module Astute
attrs['use_cinder'] ||= nodes.any?{|n| n['role'] == 'cinder'}
@ctx.deploy_log_parser.deploy_type = attrs['deployment_mode']
Astute.logger.info "Deployment mode #{attrs['deployment_mode']}"
result = self.send("deploy_#{attrs['deployment_mode']}", nodes, attrs)
self.send("deploy_#{attrs['deployment_mode']}", nodes, attrs)
end
def method_missing(method, *args)

View File

@ -16,6 +16,8 @@
class Astute::DeploymentEngine::NailyFact < Astute::DeploymentEngine
def deploy(nodes, attrs)
attrs.reverse_merge!('verbose' => true, 'debug' => false)
# Convert multi roles node to separate one role nodes
fuel_nodes = []
nodes = nodes.each do |node|
@ -35,7 +37,7 @@ class Astute::DeploymentEngine::NailyFact < Astute::DeploymentEngine
# calculate_networks method is common and you can find it in superclass
# if node['network_data'] is undefined, we use empty list because we later try to iterate over it
# otherwise we will get KeyError
node_network_data = node['network_data'].nil? ? [] : node['network_data']
node_network_data = node['network_data'] || []
interfaces = node['meta']['interfaces']
network_data_puppet = calculate_networks(node_network_data, interfaces)
attrs_to_puppet = {

View File

@ -1,26 +0,0 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class Astute::DeploymentEngine::SimplePuppet < Astute::DeploymentEngine
# It is trivial puppet run. It's assumed that user has prepared site.pp
# with all required parameters for modules
def deploy_piece(nodes, attrs, retries=2, change_node_status=true)
return false unless validate_nodes(nodes)
@ctx.reporter.report nodes_status(nodes, 'deploying', {'progress' => 0})
Astute::PuppetdDeployer.deploy(@ctx, nodes, retries, change_node_status)
nodes_roles = nodes.map { |n| { n['uid'] => n['role'] } }
Astute.logger.info "#{@ctx.task_id}: Finished deployment of nodes => roles: #{nodes_roles.inspect}"
end
end

22
lib/astute/ext/hash.rb Normal file
View File

@ -0,0 +1,22 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class Hash
def absent_keys(array)
array.select { |key| self[key].blank? }
end
end

View File

@ -46,7 +46,7 @@ module Astute
{'pattern' => 'wait while node rebooting', 'supposed_time' => 20},
].reverse,
'filename' => 'install/anaconda.log',
'path_format' => "<%= @pattern_spec['path_prefix'] %><%= node['fqdn'] %>/<%= @pattern_spec['filename'] %>",
'path_format' => "<%= @pattern_spec['path_prefix'] %><%= node['ip'] %>/<%= @pattern_spec['filename'] %>",
},
'anaconda-log-supposed-time-kvm' => # key for default kvm provision pattern
@ -70,7 +70,7 @@ module Astute
{'pattern' => 'wait while node rebooting', 'supposed_time' => 20},
].reverse,
'filename' => 'install/anaconda.log',
'path_format' => "<%= @pattern_spec['path_prefix'] %><%= node['fqdn'] %>/<%= @pattern_spec['filename'] %>",
'path_format' => "<%= @pattern_spec['path_prefix'] %><%= node['ip'] %>/<%= @pattern_spec['filename'] %>",
},
'puppet-log-components-list-ha-controller' => # key for default HA deploy pattern
@ -78,7 +78,7 @@ module Astute
'endlog_patterns' => [{'pattern' => /Finished catalog run in [0-9]+\.[0-9]* seconds\n/, 'progress' => 1.0}],
'chunk_size' => 40000,
'filename' => 'puppet-agent.log',
'path_format' => "<%= @pattern_spec['path_prefix'] %><%= node['fqdn'] %>/<%= @pattern_spec['filename'] %>",
'path_format' => "<%= @pattern_spec['path_prefix'] %><%= node['ip'] %>/<%= @pattern_spec['filename'] %>",
'components_list' => [
{'name' => 'Galera', 'weight' => 5, 'patterns' => [
{'pattern' => '/Stage[main]/Galera/File[/etc/mysql]/ensure) created', 'progress' => 0.1},
@ -243,7 +243,7 @@ module Astute
'endlog_patterns' => [{'pattern' => /Finished catalog run in [0-9]+\.[0-9]* seconds\n/, 'progress' => 1.0}],
'chunk_size' => 40000,
'filename' => 'puppet-agent.log',
'path_format' => "<%= @pattern_spec['path_prefix'] %><%= node['fqdn'] %>/<%= @pattern_spec['filename'] %>",
'path_format' => "<%= @pattern_spec['path_prefix'] %><%= node['ip'] %>/<%= @pattern_spec['filename'] %>",
'components_list' => [
{'name' => 'Keystone', 'weight' => 10, 'patterns' => [
{'pattern' => '/Stage[main]/Keystone::Python/Package[python-keystone]/ensure) created', 'progress' => 1},
@ -296,7 +296,7 @@ module Astute
'endlog_patterns' => [{'pattern' => /Finished catalog run in [0-9]+\.[0-9]* seconds\n/, 'progress' => 1.0}],
'chunk_size' => 40000,
'filename' => 'puppet-agent.log',
'path_format' => "<%= @pattern_spec['path_prefix'] %><%= node['fqdn'] %>/<%= @pattern_spec['filename'] %>",
'path_format' => "<%= @pattern_spec['path_prefix'] %><%= node['ip'] %>/<%= @pattern_spec['filename'] %>",
'components_list' => [
{'name' => 'Glance', 'weight' => 10, 'patterns' => [
{'pattern' => '/Stage[main]/Glance/Package[glance]/ensure) created', 'progress' => 0.1},
@ -383,7 +383,7 @@ module Astute
'endlog_patterns' => [{'pattern' => /Finished catalog run in [0-9]+\.[0-9]* seconds\n/, 'progress' => 1.0}],
'chunk_size' => 40000,
'filename' => 'puppet-agent.log',
'path_format' => "<%= @pattern_spec['path_prefix'] %><%= node['fqdn'] %>/<%= @pattern_spec['filename'] %>",
'path_format' => "<%= @pattern_spec['path_prefix'] %><%= node['ip'] %>/<%= @pattern_spec['filename'] %>",
'components_list' => [
{'name' => 'Glance', 'weight' => 10, 'patterns' => [
{'pattern' => '/Stage[main]/Glance/Package[glance]/ensure) created', 'progress' => 0.1},
@ -470,7 +470,7 @@ module Astute
'endlog_patterns' => [{'pattern' => /Finished catalog run in [0-9]+\.[0-9]* seconds\n/, 'progress' => 1.0}],
'chunk_size' => 40000,
'filename' => 'puppet-agent.log',
'path_format' => "<%= @pattern_spec['path_prefix'] %><%= node['fqdn'] %>/<%= @pattern_spec['filename'] %>",
'path_format' => "<%= @pattern_spec['path_prefix'] %><%= node['ip'] %>/<%= @pattern_spec['filename'] %>",
'components_list' => [
{'name' => 'Keystone', 'weight' => 10, 'patterns' => [
{'pattern' => '/Stage[main]/Keystone::Python/Package[python-keystone]/ensure) created', 'progress' => 1},

View File

@ -42,7 +42,7 @@ module Astute
deploy_engine_instance = @deploy_engine.new(context)
Astute.logger.info "Using #{deploy_engine_instance.class} for deployment."
deploy_engine_instance.deploy(nodes, attrs)
return SUCCESS
context.status
end
def fast_provision(reporter, engine_attrs, nodes)

View File

@ -100,7 +100,7 @@ module Astute
Timeout::timeout(Astute.config.PUPPET_TIMEOUT) do
puppetd = MClient.new(ctx, "puppetd", uids)
puppetd.on_respond_timeout do |uids|
ctx.reporter.report('nodes' => uids.map{|uid| {'uid' => uid, 'status' => 'error', 'error_type' => 'deploy'}})
ctx.report_and_update_status('nodes' => uids.map{|uid| {'uid' => uid, 'status' => 'error', 'error_type' => 'deploy'}})
end if change_node_status
prev_summary = puppetd.last_run_summary
puppetd_runonce(puppetd, uids)
@ -151,7 +151,8 @@ module Astute
"trace: #{e.format_backtrace}"
end
end
ctx.reporter.report('nodes' => nodes_to_report) if nodes_to_report.any?
ctx.report_and_update_status('nodes' => nodes_to_report) if nodes_to_report.any?
# we will iterate only over running nodes and those that we restart deployment for
nodes_to_check = calc_nodes['running'] + nodes_to_retry

View File

@ -14,5 +14,5 @@
module Astute
VERSION = '0.0.1'
VERSION = '0.0.2'
end

View File

@ -123,7 +123,7 @@ describe LogParser do
Dir.mktmpdir do |dir|
# Create temp log files and structures.
pattern_spec['path_prefix'] = "#{dir}/"
path = "#{pattern_spec['path_prefix']}#{node['fqdn']}/#{pattern_spec['filename']}"
path = "#{pattern_spec['path_prefix']}#{node['ip']}/#{pattern_spec['filename']}"
Dir.mkdir(File.dirname(File.dirname(path)))
Dir.mkdir(File.dirname(path))
node['file'] = File.open(path, 'w')
@ -192,7 +192,7 @@ describe LogParser do
# Create temp log files and structures.
pattern_spec['path_prefix'] = "#{dir}/"
nodes.each do |node|
path = "#{pattern_spec['path_prefix']}#{node['fqdn']}/#{pattern_spec['filename']}"
path = "#{pattern_spec['path_prefix']}#{node['ip']}/#{pattern_spec['filename']}"
Dir.mkdir(File.dirname(path))
node['file'] = File.open(path, 'w')
src_filename = File.join(File.dirname(__FILE__), "..", "example-logs", node['src_filename'])

View File

@ -22,11 +22,8 @@ describe "Puppetd" do
context "PuppetdDeployer" do
before :each do
@ctx = mock
@ctx.stubs(:task_id)
@reporter = mock('reporter')
@ctx.stubs(:reporter).returns(ProxyReporter::DeploymentProxyReporter.new(@reporter))
@ctx.stubs(:deploy_log_parser).returns(Astute::LogParser::NoParsing.new)
@ctx = Context.new("task id", ProxyReporter::DeploymentProxyReporter.new(@reporter), Astute::LogParser::NoParsing.new)
end
it "reports ready status for node if puppet deploy finished successfully" do

View File

@ -1,115 +0,0 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
require File.join(File.dirname(__FILE__), '../spec_helper')
describe "SimplePuppet DeploymentEngine" do
context "When deploy is called, " do
before(:each) do
@ctx = mock
@ctx.stubs(:task_id)
@ctx.stubs(:deploy_log_parser).returns(Astute::LogParser::NoParsing.new)
@reporter = mock('reporter')
@reporter.stub_everything
@ctx.stubs(:reporter).returns(Astute::ProxyReporter::DeploymentProxyReporter.new(@reporter))
@deploy_engine = Astute::DeploymentEngine::SimplePuppet.new(@ctx)
@env = YAML.load_file(File.join(File.dirname(__FILE__), "..", "..", "examples", "no_attrs.yaml"))
end
it "it should call valid method depends on attrs" do
nodes = [{'uid' => 1}]
attrs = {'deployment_mode' => 'ha'}
@deploy_engine.expects(:attrs_ha).never # It is not supported in SimplePuppet
@deploy_engine.expects(:deploy_ha).with(nodes, attrs)
# All implementations of deploy_piece go to subclasses
@deploy_engine.respond_to?(:deploy_piece).should be_true
@deploy_engine.deploy(nodes, attrs)
end
it "it should raise an exception if deployment mode is unsupported" do
nodes = [{'uid' => 1}]
attrs = {'deployment_mode' => 'unknown'}
expect {@deploy_engine.deploy(nodes, attrs)}.to raise_exception(
/Method deploy_unknown is not implemented/)
end
it "multinode deploy should not raise any exception" do
@env['attributes']['deployment_mode'] = "multinode"
Astute::Metadata.expects(:publish_facts).never # It is not supported in SimplePuppet
# we got two calls, one for controller, and another for all computes
Astute::PuppetdDeployer.expects(:deploy).twice
@deploy_engine.deploy(@env['nodes'], @env['attributes'])
end
it "ha deploy should not raise any exception" do
@env['attributes']['deployment_mode'] = "ha"
Astute::Metadata.expects(:publish_facts).never
Astute::PuppetdDeployer.expects(:deploy).times(2)
@deploy_engine.deploy(@env['nodes'], @env['attributes'])
end
it "ha_compact deploy should not raise any exception" do
@env['attributes']['deployment_mode'] = "ha_compact"
@env['nodes'].concat([{'uid'=>'c1', 'role'=>'controller'},
{'uid'=>'c2', 'role'=>'controller'},
{'uid'=>'o1', 'role'=>'other'}])
controller_nodes = @env['nodes'].select{|n| n['role'] == 'controller'}
compute_nodes = @env['nodes'].select{|n| n['role'] == 'compute'}
other_nodes = @env['nodes'] - controller_nodes
primary_ctrl_nodes = [controller_nodes.shift]
Astute::Metadata.expects(:publish_facts).never
controller_nodes.each do |n|
Astute::PuppetdDeployer.expects(:deploy).with(@ctx, [n], 2, true).once
end
Astute::PuppetdDeployer.expects(:deploy).with(@ctx, primary_ctrl_nodes, 2, true).once
Astute::PuppetdDeployer.expects(:deploy).with(@ctx, other_nodes, instance_of(Fixnum), true).once
@deploy_engine.deploy(@env['nodes'], @env['attributes'])
end
it "ha_full deploy should not raise any exception" do
@env['attributes']['deployment_mode'] = "ha_full"
@env['nodes'].concat([{'uid'=>'c1', 'role'=>'controller'}, {'uid'=>'c2', 'role'=>'controller'},
{'uid'=>'st1', 'role'=>'storage'}, {'uid'=>'st2', 'role'=>'storage'},
{'uid'=>'sw1', 'role'=>'primary-swift-proxy'}, {'uid'=>'sw2', 'role'=>'swift-proxy'},
{'uid'=>'o1', 'role'=>'other'}])
controller_nodes = @env['nodes'].select{|n| n['role'] == 'controller'}
primary_ctrl_nodes = [controller_nodes.shift]
compute_nodes = @env['nodes'].select{|n| n['role'] == 'compute'}
storage_nodes = @env['nodes'].select {|n| n['role'] == 'storage'}
proxy_nodes = @env['nodes'].select {|n| n['role'] == 'swift-proxy'}
primary_proxy_nodes = @env['nodes'].select {|n| n['role'] == 'primary-swift-proxy'}
other_nodes = @env['nodes'] - controller_nodes - primary_proxy_nodes - \
primary_ctrl_nodes - proxy_nodes - storage_nodes
Astute::Metadata.expects(:publish_facts).never
Astute::PuppetdDeployer.expects(:deploy).with(@ctx, primary_proxy_nodes, 2, true).once
Astute::PuppetdDeployer.expects(:deploy).with(@ctx, proxy_nodes, 2, true).once
Astute::PuppetdDeployer.expects(:deploy).with(@ctx, storage_nodes, 2, true).once
Astute::PuppetdDeployer.expects(:deploy).with(@ctx, primary_ctrl_nodes, 2, true).once
controller_nodes.each do |n|
Astute::PuppetdDeployer.expects(:deploy).with(@ctx, [n], 2, true).once
end
# Astute::PuppetdDeployer.expects(:deploy).with(@ctx, primary_ctrl_nodes, 0, false).once
# Astute::PuppetdDeployer.expects(:deploy).with(@ctx, quantum_nodes, 2, true).once
Astute::PuppetdDeployer.expects(:deploy).with(@ctx, other_nodes, 2, true).once
@deploy_engine.deploy(@env['nodes'], @env['attributes'])
end
end
end