The first group of separate roles

* globals - a lot of global variables from
  our previous deployment left for compatibility.
  We should remove this dependency as soon as possible.
* hiera - configure hiera to use astute.yaml
* network - setup neutron or nova based network
* hosts - update /etc/hosts on node
* firewall - setup firewall rules
* tools - install several tools that are needed for
  manual debugging and development

* amqp_hosts function to generate the rabbitma host list

Change-Id: I70883a675c96169b130946bb5651600056ed5bad
Related-Blueprint: fuel-library-modularization
Fuel-CI: disable
This commit is contained in:
Dmitry Ilyin 2014-12-17 21:42:39 +03:00 committed by Bogdan Dobrelya
parent 6f76515306
commit 45713255f2
10 changed files with 492 additions and 1 deletions

View File

@ -0,0 +1,38 @@
module Puppet::Parser::Functions
newfunction(:amqp_hosts, :type => :rvalue,
:doc => <<-EOS
Returns the list of amqp host:port blocks separated by comma
EOS
) do |arguments|
raise(Puppet::ParseError, 'No nodes data provided!') if arguments.size < 1
amqp_nodes = arguments[0]
amqp_port = arguments[1] || '5673'
prefer_node = arguments[2]
# split nodes by comma if the are provided as a string
if amqp_nodes.is_a? String
amqp_nodes = amqp_nodes.split(",").map { |n| n.strip }
end
amqp_nodes = Array(amqp_nodes)
# rotate nodes array random times (host name as a seed)
if amqp_nodes.length > 1
shake_times = function_fqdn_rand([amqp_nodes.length]).to_i
shake_times.times do
amqp_nodes.push amqp_nodes.shift
end
end
# move prefered node to the first position if it's present
if prefer_node and amqp_nodes.include? prefer_node
amqp_nodes.delete prefer_node
amqp_nodes.unshift prefer_node
end
amqp_nodes.map { |n| "#{n}:#{amqp_port}" }.join ', '
end
end
# vim: set ts=2 sw=2 et :

View File

@ -0,0 +1,22 @@
import 'globals.pp'
# Workaround for fuel bug with firewall
firewall {'003 remote rabbitmq ':
sport => [ 4369, 5672, 15672, 41055, 55672, 61613 ],
source => $master_ip,
proto => 'tcp',
action => 'accept',
require => Class['openstack::firewall'],
}
firewall {'004 remote puppet ':
sport => [ 8140 ],
source => $master_ip,
proto => 'tcp',
action => 'accept',
require => Class['openstack::firewall'],
}
class { 'openstack::firewall' :
nova_vnc_ip_range => $management_network_range,
}

View File

@ -0,0 +1,190 @@
notice('Import Globals start')
$fuel_settings = parseyaml($astute_settings_yaml)
$nodes_hash = hiera('nodes', {})
$storage_hash = hiera('storage', {})
$syslog_hash = hiera('syslog', {})
$base_syslog_hash = hiera('base_syslog', {})
$sahara_hash = hiera('sahara', {})
$murano_hash = hiera('murano', {})
$heat_hash = hiera('heat', {})
$vcenter_hash = hiera('vcenter', {})
$nova_hash = hiera('nova', {})
$mysql_hash = hiera('mysql', {})
$rabbit_hash = hiera('rabbit', {})
$glance_hash = hiera('glance', {})
$keystone_hash = hiera('keystone', {})
$swift_hash = hiera('swift', {})
$cinder_hash = hiera('cinder', {})
$access_hash = hiera('access', {})
$role = hiera('role')
$cinder_nodes_array = hiera('cinder_nodes', [])
$dns_nameservers = hiera('dns_nameservers', [])
$use_neutron = hiera('quantum')
$network_scheme = hiera('network_scheme')
$disable_offload = hiera('disable_offload')
$verbose = true
$debug = hiera('debug', false)
$use_monit = false
$master_ip = hiera('master_ip')
$management_network_range = hiera('management_network_range')
$use_syslog = hiera('use_syslog', true)
$syslog_log_facility_glance = 'LOG_LOCAL2'
$syslog_log_facility_cinder = 'LOG_LOCAL3'
$syslog_log_facility_neutron = 'LOG_LOCAL4'
$syslog_log_facility_nova = 'LOG_LOCAL6'
$syslog_log_facility_keystone = 'LOG_LOCAL7'
$syslog_log_facility_murano = 'LOG_LOCAL0'
$syslog_log_facility_heat = 'LOG_LOCAL0'
$syslog_log_facility_sahara = 'LOG_LOCAL0'
$syslog_log_facility_ceilometer = 'LOG_LOCAL0'
$syslog_log_facility_ceph = 'LOG_LOCAL0'
$nova_report_interval = '60'
$nova_service_down_time = '180'
$openstack_version = {
'keystone' => 'installed',
'glance' => 'installed',
'horizon' => 'installed',
'nova' => 'installed',
'novncproxy' => 'installed',
'cinder' => 'installed',
}
$nova_rate_limits = {
'POST' => 100000,
'POST_SERVERS' => 100000,
'PUT' => 1000,
'GET' => 100000,
'DELETE' => 100000
}
$cinder_rate_limits = {
'POST' => 100000,
'POST_SERVERS' => 100000,
'PUT' => 100000,
'GET' => 100000,
'DELETE' => 100000
}
$default_ceilometer_hash = {
'enabled' => false,
'db_password' => 'ceilometer',
'user_password' => 'ceilometer',
'metering_secret' => 'ceilometer',
}
$ceilometer_hash = hiera('ceilometer', $default_ceilometer_hash)
$node = filter_nodes($nodes_hash, 'name', $::hostname)
if empty($node) {
fail("Node hostname is not defined in the hash structure")
}
$default_gateway = $node[0]['default_gateway']
prepare_network_config($network_scheme)
if $use_neutron {
$internal_int = get_network_role_property('management', 'interface')
$internal_address = get_network_role_property('management', 'ipaddr')
$internal_netmask = get_network_role_property('management', 'netmask')
$public_int = get_network_role_property('ex', 'interface')
$public_address = get_network_role_property('ex', 'ipaddr')
$public_netmask = get_network_role_property('ex', 'netmask')
$storage_address = get_network_role_property('storage', 'ipaddr')
$storage_netmask = get_network_role_property('storage', 'netmask')
#
$novanetwork_params = {}
$neutron_config = hiera('quantum_settings')
$network_provider = 'neutron'
$neutron_db_password = $neutron_config['database']['passwd']
$neutron_user_password = $neutron_config['keystone']['admin_password']
$neutron_metadata_proxy_secret = $neutron_config['metadata']['metadata_proxy_shared_secret']
$base_mac = $neutron_config['L2']['base_mac']
$nsx_config = hiera('nsx_plugin')
if $nsx_config['metadata']['enabled'] {
$use_vmware_nsx = true
$neutron_nsx_config = $nsx_plugin
}
} else {
$internal_address = $node[0]['internal_address']
$internal_netmask = $node[0]['internal_netmask']
$public_address = $node[0]['public_address']
$public_netmask = $node[0]['public_netmask']
$storage_address = $node[0]['storage_address']
$storage_netmask = $node[0]['storage_netmask']
$public_br = $node[0]['public_br']
$internal_br = $node[0]['internal_br']
$public_int = hiera('public_interface')
$internal_int = hiera('management_interface')
#
$neutron_config = {}
$novanetwork_params = hiera('novanetwork_parameters')
$network_size = $novanetwork_params['network_size']
$num_networks = $novanetwork_params['num_networks']
$vlan_start = $novanetwork_params['vlan_start']
$network_provider = 'nova'
$network_config = {
'vlan_start' => $vlan_start,
}
$network_manager = "nova.network.manager.${novanetwork_params['network_manager']}"
}
$queue_provider = 'rabbitmq'
$custom_mysql_setup_class = 'galera'
$controller = filter_nodes($nodes_hash, 'role', 'controller')
$controller_node_address = $controller[0]['internal_address']
$controller_node_public = $controller[0]['public_address']
$roles = node_roles($nodes_hash, hiera('uid'))
# AMQP client configuration
$amqp_port = '5672'
$amqp_hosts = "${controller_node_address}:${amqp_port}"
$rabbit_ha_queues = false
# RabbitMQ server configuration
$rabbitmq_bind_ip_address = 'UNSET' # bind RabbitMQ to 0.0.0.0
$rabbitmq_bind_port = $amqp_port
$rabbitmq_cluster_nodes = [$controller[0]['name']] # has to be hostnames
# SQLAlchemy backend configuration
$max_pool_size = min($::processorcount * 5 + 0, 30 + 0)
$max_overflow = min($::processorcount * 5 + 0, 60 + 0)
$max_retries = '-1'
$idle_timeout = '3600'
$nova_db_password = $nova_hash['db_password']
$cinder_iscsi_bind_addr = $storage_address
$sql_connection = "mysql://nova:${nova_db_password}@${controller_node_address}/nova?read_timeout=60"
$mirror_type = 'external'
$multi_host = true
# Determine who should get the volume service
if (member($roles, 'cinder') and $storage_hash['volumes_lvm']) {
$manage_volumes = 'iscsi'
} elsif (member($roles, 'cinder') and $storage_hash['volumes_vmdk']) {
$manage_volumes = 'vmdk'
} elsif ($storage_hash['volumes_ceph']) {
$manage_volumes = 'ceph'
} else {
$manage_volumes = false
}
#Determine who should be the default backend
if ($storage_hash['images_ceph']) {
$glance_backend = 'ceph'
$glance_known_stores = [ 'glance.store.rbd.Store', 'glance.store.http.Store' ]
} elsif ($storage_hash['images_vcenter']) {
$glance_backend = 'vmware'
$glance_known_stores = [ 'glance.store.vmware_datastore.Store', 'glance.store.http.Store' ]
} else {
$glance_backend = 'file'
$glance_known_stores = false
}
notice('Import Globals end')

View File

@ -0,0 +1,47 @@
$data_dir = '/etc/hiera'
$data_name = 'astute'
$astute_data_file = '/etc/astute.yaml'
$hiera_main_config = '/etc/hiera.yaml'
$hiera_puppet_config = '/etc/puppet/hiera.yaml'
$hiera_data_file = "${data_dir}/${data_name}.yaml"
File {
owner => 'root',
group => 'root',
mode => '0755',
}
$hiera_config_content = inline_template('
---
:backends:
- yaml
:hierarchy:
- <%= @data_name %>
:yaml:
:datadir: <%= @data_dir %>
')
file { 'hiera_data_dir' :
ensure => 'directory',
path => $data_dir,
}
file { 'hiera_config' :
ensure => 'present',
path => $hiera_main_config,
content => $hiera_config_content,
}
file { 'hiera_data_astute' :
ensure => 'symlink',
path => $hiera_data_file,
target => $astute_data_file,
}
file { 'hiera_puppet_config' :
ensure => 'symlink',
path => $hiera_puppet_config,
target => $hiera_main_config,
}

View File

@ -0,0 +1,5 @@
import 'globals.pp'
class { "l23network::hosts_file":
nodes => $nodes_hash,
}

View File

@ -0,0 +1,45 @@
import 'globals.pp'
if $disable_offload {
L23network::L3::Ifconfig<||> {
ethtool => {
'K' => ['gso off', 'gro off'],
}
}
}
class { 'l23network' :
use_ovs => $use_neutron,
}
class advanced_node_netconfig {
$sdn = generate_network_config()
notify {"SDN: ${sdn}": }
}
if $use_neutron {
class {'advanced_node_netconfig': }
} else {
class { 'osnailyfacter::network_setup':
interfaces => keys(hiera('network_data')),
network_settings => hiera('network_data'),
}
}
# setting kernel reserved ports
# defaults are 49000,35357,41055,58882
class { 'openstack::reserved_ports': }
### TCP connections keepalives and failover related parameters ###
# configure TCP keepalive for host OS.
# Send 3 probes each 8 seconds, if the connection was idle
# for a 30 seconds. Consider it dead, if there was no responces
# during the check time frame, i.e. 30+3*8=54 seconds overall.
# (note: overall check time frame should be lower then
# nova_report_interval).
class { 'openstack::keepalive' :
tcpka_time => '30',
tcpka_probes => '8',
tcpka_intvl => '3',
tcp_retries2 => '5',
}

View File

@ -0,0 +1,48 @@
- id: netconfig
type: puppet
groups: [primary-controller, controller, cinder, compute, ceph-osd, zabbix-server, primary-mongo, mongo]
required_for: [deploy]
requires: [hiera]
parameters:
puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/netconfig.pp
puppet_modules: /etc/puppet/modules
timeout: 3600
- id: tools
type: puppet
groups: [primary-controller, controller, cinder, compute, ceph-osd, zabbix-server, primary-mongo, mongo]
required_for: [deploy]
requires: [hiera]
parameters:
puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/tools.pp
puppet_modules: /etc/puppet/modules
timeout: 3600
- id: hosts
type: puppet
groups: [primary-controller, controller, cinder, compute, ceph-osd, zabbix-server, primary-mongo, mongo]
required_for: [deploy]
requires: [netconfig]
parameters:
puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/hosts.pp
puppet_modules: /etc/puppet/modules
timeout: 3600
- id: firewall
type: puppet
groups: [primary-controller, controller, cinder, compute, ceph-osd, zabbix-server, primary-mongo, mongo]
required_for: [deploy]
requires: [netconfig]
parameters:
puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/firewall.pp
puppet_modules: /etc/puppet/modules
timeout: 3600
- id: hiera
type: puppet
role: [primary-controller, controller, cinder, compute, ceph-osd, zabbix-server, primary-mongo, mongo]
required_for: [deploy]
parameters:
puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/hiera.pp
puppet_modules: /etc/puppet/modules
timeout: 3600

View File

@ -0,0 +1,24 @@
class { 'osnailyfacter::atop': }
$tools = [
'screen',
'tmux',
'man',
'htop',
'tcpdump',
'strace',
]
package { $tools :
ensure => 'present',
}
class { 'puppet::pull' :
modules_source => hiera('puppet_modules_source'),
manifests_source => hiera('puppet_manifests_source'),
}
$deployment_mode = hiera('deployment_mode')
if ($deployment_mode == 'ha') or ($deployment_mode == 'ha_compact') {
include haproxy::status
}

View File

@ -0,0 +1,73 @@
require 'spec_helper'
describe 'the amqp_hosts function' do
let(:scope) { PuppetlabsSpec::PuppetInternals.scope }
it 'should exist' do
expect(
Puppet::Parser::Functions.function('amqp_hosts')
).to eq('function_amqp_hosts')
end
it 'should raise an error if there is less than 1 arguments' do
expect {
scope.function_amqp_hosts([])
}.to raise_error
end
it 'should convert the array on nodes to host:port pairs' do
scope.expects(:lookupvar).with('::fqdn', {}).returns('127.0.0.1')
expect(
scope.function_amqp_hosts([%w(192.168.0.1 192.168.0.2 192.168.0.3), '5673'])
).to eq '192.168.0.3:5673, 192.168.0.1:5673, 192.168.0.2:5673'
end
it 'should use port 5673 by default if it was not provided' do
scope.expects(:lookupvar).with('::fqdn', {}).returns('127.0.0.1')
expect(
scope.function_amqp_hosts([%w(192.168.0.1 192.168.0.2 192.168.0.3)])
).to eq '192.168.0.3:5673, 192.168.0.1:5673, 192.168.0.2:5673'
end
it 'should use different order for different fqdns' do
scope.expects(:lookupvar).with('::fqdn', {}).returns('192.168.0.1')
expect(
scope.function_amqp_hosts([%w(192.168.0.1 192.168.0.2 192.168.0.3), '5673'])
).to eq '192.168.0.1:5673, 192.168.0.2:5673, 192.168.0.3:5673'
end
it 'should be able to use another port value' do
scope.expects(:lookupvar).with('::fqdn', {}).returns('127.0.0.1')
expect(
scope.function_amqp_hosts([%w(192.168.0.1 192.168.0.2 192.168.0.3), '123'])
).to eq '192.168.0.3:123, 192.168.0.1:123, 192.168.0.2:123'
end
it 'should move prefered host to the first place if i was found in the list' do
scope.expects(:lookupvar).with('::fqdn', {}).returns('127.0.0.1')
expect(
scope.function_amqp_hosts([%w(192.168.0.1 192.168.0.2 192.168.0.3), '5673', '192.168.0.1'])
).to eq '192.168.0.1:5673, 192.168.0.3:5673, 192.168.0.2:5673'
end
it 'should ignore prefered host if it is not in the list' do
scope.expects(:lookupvar).with('::fqdn', {}).returns('127.0.0.1')
expect(
scope.function_amqp_hosts([%w(192.168.0.1 192.168.0.2 192.168.0.3), '5673', '172.16.0.1'])
).to eq '192.168.0.3:5673, 192.168.0.1:5673, 192.168.0.2:5673'
end
it 'should be able to work with comma separated host list' do
scope.expects(:lookupvar).with('::fqdn', {}).returns('127.0.0.1')
expect(
scope.function_amqp_hosts(['192.168.0.1, 192.168.0.2,192.168.0.3', '5673'])
).to eq '192.168.0.3:5673, 192.168.0.1:5673, 192.168.0.2:5673'
end
it 'should be able to work with a single host' do
expect(
scope.function_amqp_hosts(['192.168.0.1', '5673'])
).to eq '192.168.0.1:5673'
end
end

View File

@ -7,4 +7,3 @@ main_manifest="/etc/puppet/manifests/site.pp"
rsync -rvc --delete "${remote_modules}/" "${local_modules}/"
rsync -rvc --delete "${remote_manifests}/" "${local_manifests}/"
puppet apply --verbose --debug --trace "${main_manifest}"