Refactor osnailyfacter/modular/ceph

Refactor osnailyfacter/modular/ceph to be compatible with Puppet Master

Blueprint: fuel-refactor-osnailyfacter-for-puppet-master-compatibility

Change-Id: I0ef8238d27cd8ea5cbe0061940ef1ba9b31d056e
This commit is contained in:
Alexander Noskov 2016-03-18 12:50:15 +03:00
parent bf060907fd
commit a8111ff8b4
14 changed files with 562 additions and 535 deletions

View File

@ -0,0 +1,107 @@
class osnailyfacter::ceph::ceph_compute {
notice('MODULAR: ceph/ceph_compute.pp')
$mon_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('ceph_monitor_nodes'), 'ceph/public')
$storage_hash = hiera_hash('storage', {})
$use_neutron = hiera('use_neutron')
$public_vip = hiera('public_vip')
$management_vip = hiera('management_vip')
$use_syslog = hiera('use_syslog', true)
$syslog_log_facility_ceph = hiera('syslog_log_facility_ceph','LOG_LOCAL0')
$keystone_hash = hiera_hash('keystone', {})
# Cinder settings
$cinder_pool = 'volumes'
# Glance settings
$glance_pool = 'images'
#Nova Compute settings
$compute_user = 'compute'
$compute_pool = 'compute'
if ($storage_hash['images_ceph']) {
$glance_backend = 'ceph'
} elsif ($storage_hash['images_vcenter']) {
$glance_backend = 'vmware'
} else {
$glance_backend = 'swift'
}
if ($storage_hash['volumes_ceph'] or
$storage_hash['images_ceph'] or
$storage_hash['objects_ceph'] or
$storage_hash['ephemeral_ceph']
) {
$use_ceph = true
} else {
$use_ceph = false
}
if $use_ceph {
$ceph_primary_monitor_node = hiera('ceph_primary_monitor_node')
$primary_mons = keys($ceph_primary_monitor_node)
$primary_mon = $ceph_primary_monitor_node[$primary_mons[0]]['name']
prepare_network_config(hiera_hash('network_scheme', {}))
$ceph_cluster_network = get_network_role_property('ceph/replication', 'network')
$ceph_public_network = get_network_role_property('ceph/public', 'network')
$per_pool_pg_nums = $storage_hash['per_pool_pg_nums']
class { '::ceph':
primary_mon => $primary_mon,
mon_hosts => keys($mon_address_map),
mon_ip_addresses => values($mon_address_map),
cluster_node_address => $public_vip,
osd_pool_default_size => $storage_hash['osd_pool_size'],
osd_pool_default_pg_num => $storage_hash['pg_num'],
osd_pool_default_pgp_num => $storage_hash['pg_num'],
use_rgw => false,
glance_backend => $glance_backend,
rgw_pub_ip => $public_vip,
rgw_adm_ip => $management_vip,
rgw_int_ip => $management_vip,
cluster_network => $ceph_cluster_network,
public_network => $ceph_public_network,
use_syslog => $use_syslog,
syslog_log_level => hiera('syslog_log_level_ceph', 'info'),
syslog_log_facility => $syslog_log_facility_ceph,
rgw_keystone_admin_token => $keystone_hash['admin_token'],
ephemeral_ceph => $storage_hash['ephemeral_ceph']
}
service { $::ceph::params::service_nova_compute :}
ceph::pool {$compute_pool:
user => $compute_user,
acl => "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=${cinder_pool}, allow rx pool=${glance_pool}, allow rwx pool=${compute_pool}'",
keyring_owner => 'nova',
pg_num => pick($per_pool_pg_nums[$compute_pool], '1024'),
pgp_num => pick($per_pool_pg_nums[$compute_pool], '1024'),
}
include ::ceph::nova_compute
if ($storage_hash['ephemeral_ceph']) {
include ::ceph::ephemeral
Class['::ceph::conf'] -> Class['::ceph::ephemeral'] ~>
Service[$::ceph::params::service_nova_compute]
}
Class['::ceph::conf'] ->
Ceph::Pool[$compute_pool] ->
Class['::ceph::nova_compute'] ~>
Service[$::ceph::params::service_nova_compute]
Exec { path => [ '/bin/', '/sbin/' , '/usr/bin/', '/usr/sbin/' ],
cwd => '/root',
}
}
if !($storage_hash['ephemeral_ceph']) {
class { '::ceph::ephemeral':
libvirt_images_type => 'default',
}
}
}

View File

@ -0,0 +1,87 @@
class osnailyfacter::ceph::ceph_osd {
notice('MODULAR: ceph/ceph_osd.pp')
# Pulling hiera
$storage_hash = hiera_hash('storage', {})
$public_vip = hiera('public_vip')
$management_vip = hiera('management_vip')
$service_endpoint = hiera('service_endpoint')
$use_neutron = hiera('use_neutron', false)
$mp_hash = hiera('mp')
$verbose = pick($storage_hash['verbose'], true)
$debug = pick($storage_hash['debug'], hiera('debug', true))
$auto_assign_floating_ip = hiera('auto_assign_floating_ip', false)
$keystone_hash = hiera_hash('keystone', {})
$access_hash = hiera_hash('access', {})
$network_scheme = hiera_hash('network_scheme', {})
$neutron_mellanox = hiera('neutron_mellanox', false)
$syslog_hash = hiera_hash('syslog', {})
$use_syslog = hiera('use_syslog', true)
$mon_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('ceph_monitor_nodes'), 'ceph/public')
$ceph_primary_monitor_node = hiera('ceph_primary_monitor_node')
$primary_mons = keys($ceph_primary_monitor_node)
$primary_mon = $ceph_primary_monitor_node[$primary_mons[0]]['name']
prepare_network_config($network_scheme)
$ceph_cluster_network = get_network_role_property('ceph/replication', 'network')
$ceph_public_network = get_network_role_property('ceph/public', 'network')
$ceph_tuning_settings_hash = hiera_hash('ceph_tuning_settings', {})
$ssl_hash = hiera_hash('use_ssl', {})
$admin_auth_protocol = get_ssl_property($ssl_hash, {}, 'keystone', 'admin', 'protocol', 'http')
$admin_auth_address = get_ssl_property($ssl_hash, {}, 'keystone', 'admin', 'hostname', [$service_endpoint, $management_vip])
$admin_identity_url = "${admin_auth_protocol}://${admin_auth_address}:35357"
class { '::ceph':
primary_mon => $primary_mon,
mon_hosts => keys($mon_address_map),
mon_ip_addresses => values($mon_address_map),
cluster_node_address => $public_vip,
osd_pool_default_size => $storage_hash['osd_pool_size'],
osd_pool_default_pg_num => $storage_hash['pg_num'],
osd_pool_default_pgp_num => $storage_hash['pg_num'],
use_rgw => $storage_hash['objects_ceph'],
rgw_keystone_url => $admin_identity_url,
glance_backend => $glance_backend,
rgw_pub_ip => $public_vip,
rgw_adm_ip => $management_vip,
rgw_int_ip => $management_vip,
cluster_network => $ceph_cluster_network,
public_network => $ceph_public_network,
use_syslog => $use_syslog,
syslog_log_level => hiera('syslog_log_level_ceph', 'info'),
syslog_log_facility => hiera('syslog_log_facility_ceph','LOG_LOCAL0'),
rgw_keystone_admin_token => $keystone_hash['admin_token'],
ephemeral_ceph => $storage_hash['ephemeral_ceph'],
}
if $ceph_tuning_settings_hash != {} {
ceph_conf {
'global/debug_default' : value => $debug;
'global/max_open_files' : value => $ceph_tuning_settings_hash['max_open_files'];
'osd/osd_mkfs_type' : value => $ceph_tuning_settings_hash['osd_mkfs_type'];
'osd/osd_mount_options_xfs' : value => $ceph_tuning_settings_hash['osd_mount_options_xfs'];
'osd/osd_op_threads' : value => $ceph_tuning_settings_hash['osd_op_threads'];
'osd/filestore_queue_max_ops' : value => $ceph_tuning_settings_hash['filestore_queue_max_ops'];
'osd/filestore_queue_committing_max_ops' : value => $ceph_tuning_settings_hash['filestore_queue_committing_max_ops'];
'osd/journal_max_write_entries' : value => $ceph_tuning_settings_hash['journal_max_write_entries'];
'osd/journal_queue_max_ops' : value => $ceph_tuning_settings_hash['journal_queue_max_ops'];
'osd/objecter_inflight_ops' : value => $ceph_tuning_settings_hash['objecter_inflight_ops'];
'osd/filestore_queue_max_bytes' : value => $ceph_tuning_settings_hash['filestore_queue_max_bytes'];
'osd/filestore_queue_committing_max_bytes': value => $ceph_tuning_settings_hash['filestore_queue_committing_max_bytes'];
'osd/journal_max_write_bytes' : value => $ceph_tuning_settings_hash['journal_queue_max_bytes'];
'osd/journal_queue_max_bytes' : value => $ceph_tuning_settings_hash['journal_queue_max_bytes'];
'osd/ms_dispatch_throttle_bytes' : value => $ceph_tuning_settings_hash['ms_dispatch_throttle_bytes'];
'osd/objecter_infilght_op_bytes' : value => $ceph_tuning_settings_hash['objecter_infilght_op_bytes'];
'osd/filestore_max_sync_interval' : value => $ceph_tuning_settings_hash['filestore_max_sync_interval'];
}
# File /root/ceph.conf is symlink which is created after /etc/ceph/ceph.conf in ceph::conf class
File<| title == '/root/ceph.conf' |> -> Ceph_conf <||>
}
# TODO(bogdando) add monit ceph-osd services monitoring, if required
#################################################################
# vim: set ts=2 sw=2 et :
}

View File

@ -0,0 +1,84 @@
class osnailyfacter::ceph::ceph_pools {
notice('MODULAR: ceph/ceph_pools.pp')
$storage_hash = hiera_hash('storage', {})
$osd_pool_default_pg_num = $storage_hash['pg_num']
$osd_pool_default_pgp_num = $storage_hash['pg_num']
# Cinder settings
$cinder_user = 'volumes'
$cinder_pool = 'volumes'
# Cinder Backup settings
$cinder_backup_user = 'backups'
$cinder_backup_pool = 'backups'
# Glance settings
$glance_user = 'images'
$glance_pool = 'images'
Exec { path => [ '/bin/', '/sbin/' , '/usr/bin/', '/usr/sbin/' ],
cwd => '/root',
}
$per_pool_pg_nums = $storage_hash['per_pool_pg_nums']
# DO NOT SPLIT ceph auth command lines! See http://tracker.ceph.com/issues/3279
ceph::pool {$glance_pool:
user => $glance_user,
acl => "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=${glance_pool}'",
keyring_owner => 'glance',
pg_num => pick($per_pool_pg_nums[$glance_pool], '256'),
pgp_num => pick($per_pool_pg_nums[$glance_pool], '256'),
}
ceph::pool {$cinder_pool:
user => $cinder_user,
acl => "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=${cinder_pool}, allow rx pool=${glance_pool}'",
keyring_owner => 'cinder',
pg_num => pick($per_pool_pg_nums[$cinder_pool], '2048'),
pgp_num => pick($per_pool_pg_nums[$cinder_pool], '2048'),
}
ceph::pool {$cinder_backup_pool:
user => $cinder_backup_user,
acl => "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=${cinder_backup_pool}, allow rwx pool=${cinder_pool}'",
keyring_owner => 'cinder',
pg_num => pick($per_pool_pg_nums[$cinder_backup_pool], '512'),
pgp_num => pick($per_pool_pg_nums[$cinder_backup_pool], '512'),
}
Ceph::Pool[$glance_pool] -> Ceph::Pool[$cinder_pool] -> Ceph::Pool[$cinder_backup_pool]
if ($storage_hash['volumes_ceph']) {
include ::cinder::params
service { 'cinder-volume':
ensure => 'running',
name => $::cinder::params::volume_service,
hasstatus => true,
hasrestart => true,
}
Ceph::Pool[$cinder_pool] ~> Service['cinder-volume']
service { 'cinder-backup':
ensure => 'running',
name => $::cinder::params::backup_service,
hasstatus => true,
hasrestart => true,
}
Ceph::Pool[$cinder_backup_pool] ~> Service['cinder-backup']
}
if ($storage_hash['images_ceph']) {
include ::glance::params
service { 'glance-api':
ensure => 'running',
name => $::glance::params::api_service_name,
hasstatus => true,
hasrestart => true,
}
Ceph::Pool[$glance_pool] ~> Service['glance-api']
}
}

View File

@ -0,0 +1,42 @@
class osnailyfacter::ceph::enable_rados {
notice('MODULAR: ceph/enable_rados.pp')
include ::ceph::params
$radosgw_service = $::ceph::params::service_radosgw
$radosgw_override_file = '/etc/init/radosgw-all.override'
if ($::operatingsystem == 'Ubuntu') {
# ensure the service is stopped and will not start on boot
service { 'radosgw':
enable => false,
provider => 'debian',
}
service { 'radosgw-all':
ensure => running,
enable => true,
provider => 'upstart',
}
file {$radosgw_override_file:
ensure => present,
mode => '0644',
owner => 'root',
group => 'root',
content => "start on runlevel [2345]\nstop on starting rc RUNLEVEL=[016]\n",
}
Service['radosgw'] ->
File[$radosgw_override_file] ~>
Service['radosgw-all']
}
else {
service { $radosgw_service:
ensure => running,
enable => true,
}
}
}

View File

@ -0,0 +1,99 @@
class osnailyfacter::ceph::mon {
notice('MODULAR: ceph/mon.pp')
$storage_hash = hiera_hash('storage', {})
$use_neutron = hiera('use_neutron')
$public_vip = hiera('public_vip')
$management_vip = hiera('management_vip')
$use_syslog = hiera('use_syslog', true)
$syslog_log_facility_ceph = hiera('syslog_log_facility_ceph','LOG_LOCAL0')
$keystone_hash = hiera_hash('keystone', {})
$mon_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('ceph_monitor_nodes'), 'ceph/public')
if ($storage_hash['images_ceph']) {
$glance_backend = 'ceph'
} elsif ($storage_hash['images_vcenter']) {
$glance_backend = 'vmware'
} else {
$glance_backend = 'swift'
}
if ($storage_hash['volumes_ceph'] or
$storage_hash['images_ceph'] or
$storage_hash['objects_ceph'] or
$storage_hash['ephemeral_ceph']
) {
$use_ceph = true
} else {
$use_ceph = false
}
if $use_ceph {
$ceph_primary_monitor_node = hiera('ceph_primary_monitor_node')
$primary_mons = keys($ceph_primary_monitor_node)
$primary_mon = $ceph_primary_monitor_node[$primary_mons[0]]['name']
prepare_network_config(hiera_hash('network_scheme', {}))
$ceph_cluster_network = get_network_role_property('ceph/replication', 'network')
$ceph_public_network = get_network_role_property('ceph/public', 'network')
$mon_addr = get_network_role_property('ceph/public', 'ipaddr')
class { '::ceph':
primary_mon => $primary_mon,
mon_hosts => keys($mon_address_map),
mon_ip_addresses => values($mon_address_map),
mon_addr => $mon_addr,
cluster_node_address => $public_vip,
osd_pool_default_size => $storage_hash['osd_pool_size'],
osd_pool_default_pg_num => $storage_hash['pg_num'],
osd_pool_default_pgp_num => $storage_hash['pg_num'],
use_rgw => false,
glance_backend => $glance_backend,
rgw_pub_ip => $public_vip,
rgw_adm_ip => $management_vip,
rgw_int_ip => $management_vip,
cluster_network => $ceph_cluster_network,
public_network => $ceph_public_network,
use_syslog => $use_syslog,
syslog_log_level => hiera('syslog_log_level_ceph', 'info'),
syslog_log_facility => $syslog_log_facility_ceph,
rgw_keystone_admin_token => $keystone_hash['admin_token'],
ephemeral_ceph => $storage_hash['ephemeral_ceph']
}
if ($storage_hash['volumes_ceph']) {
include ::cinder::params
service { 'cinder-volume':
ensure => 'running',
name => $::cinder::params::volume_service,
hasstatus => true,
hasrestart => true,
}
service { 'cinder-backup':
ensure => 'running',
name => $::cinder::params::backup_service,
hasstatus => true,
hasrestart => true,
}
Class['::ceph'] ~> Service['cinder-volume']
Class['::ceph'] ~> Service['cinder-backup']
}
if ($storage_hash['images_ceph']) {
include ::glance::params
service { 'glance-api':
ensure => 'running',
name => $::glance::params::api_service_name,
hasstatus => true,
hasrestart => true,
}
Class['::ceph'] ~> Service['glance-api']
}
}
}

View File

@ -0,0 +1,111 @@
class osnailyfacter::ceph::radosgw {
notice('MODULAR: ceph/radosgw.pp')
$storage_hash = hiera_hash('storage', {})
$use_neutron = hiera('use_neutron')
$public_vip = hiera('public_vip')
$keystone_hash = hiera_hash('keystone', {})
$management_vip = hiera('management_vip')
$service_endpoint = hiera('service_endpoint')
$public_ssl_hash = hiera_hash('public_ssl')
$radosgw_large_pool_name = '.rgw'
$mon_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('ceph_monitor_nodes'), 'ceph/public')
$external_lb = hiera('external_lb', false)
$ssl_hash = hiera_hash('use_ssl', {})
$admin_identity_protocol = get_ssl_property($ssl_hash, {}, 'keystone', 'admin', 'protocol', 'http')
$admin_identity_address = get_ssl_property($ssl_hash, {}, 'keystone', 'admin', 'hostname', [$service_endpoint, $management_vip])
$admin_identity_url = "${admin_identity_protocol}://${admin_identity_address}:35357"
if ($storage_hash['volumes_ceph'] or
$storage_hash['images_ceph'] or
$storage_hash['objects_ceph']
) {
$use_ceph = true
} else {
$use_ceph = false
}
if $use_ceph and $storage_hash['objects_ceph'] {
$ceph_primary_monitor_node = hiera('ceph_primary_monitor_node')
$primary_mons = keys($ceph_primary_monitor_node)
$primary_mon = $ceph_primary_monitor_node[$primary_mons[0]]['name']
prepare_network_config(hiera_hash('network_scheme', {}))
$ceph_cluster_network = get_network_role_property('ceph/replication', 'network')
$ceph_public_network = get_network_role_property('ceph/public', 'network')
$rgw_ip_address = get_network_role_property('ceph/radosgw', 'ipaddr')
# Listen directives with host required for ip_based vhosts
class { '::osnailyfacter::apache':
listen_ports => hiera_array('apache_ports', ['0.0.0.0:80', '0.0.0.0:8888']),
}
if ($::osfamily == 'Debian'){
apache::mod {'rewrite': }
apache::mod {'proxy': }
apache::mod {'proxy_fcgi': }
}
include ::tweaks::apache_wrappers
include ::ceph::params
$haproxy_stats_url = "http://${service_endpoint}:10000/;csv"
$internal_auth_protocol = get_ssl_property($ssl_hash, {}, 'keystone', 'internal', 'protocol', 'http')
$internal_auth_address = get_ssl_property($ssl_hash, {}, 'keystone', 'internal', 'hostname', [$service_endpoint, $management_vip])
$internal_auth_url = "${internal_auth_protocol}://${internal_auth_address}:5000"
class { '::osnailyfacter::wait_for_keystone_backends': }
Class['::osnailyfacter::wait_for_keystone_backends'] -> Class['::ceph::keystone']
class { '::ceph::radosgw':
# SSL
use_ssl => false,
public_ssl => $public_ssl_hash['services'],
# Ceph
primary_mon => $primary_mon,
pub_ip => $public_vip,
adm_ip => $management_vip,
int_ip => $management_vip,
# RadosGW settings
rgw_host => $::hostname,
rgw_ip => $rgw_ip_address,
rgw_port => '6780',
swift_endpoint_port => '8080',
rgw_keyring_path => '/etc/ceph/keyring.radosgw.gateway',
rgw_socket_path => '/tmp/radosgw.sock',
rgw_frontends => 'fastcgi socket_port=9000 socket_host=127.0.0.1',
rgw_log_file => '/var/log/ceph/radosgw.log',
rgw_data => '/var/lib/ceph/radosgw',
rgw_dns_name => "*.${::domain}",
rgw_print_continue => true,
#rgw Keystone settings
rgw_use_pki => false,
rgw_use_keystone => true,
rgw_keystone_url => $admin_identity_url,
rgw_keystone_admin_token => $keystone_hash['admin_token'],
rgw_keystone_token_cache_size => '10',
rgw_keystone_accepted_roles => '_member_, Member, admin, swiftoperator',
rgw_keystone_revocation_interval => '1000000',
rgw_s3_auth_use_keystone => false,
rgw_nss_db_path => '/etc/ceph/nss',
rgw_large_pool_name => $radosgw_large_pool_name,
rgw_large_pool_pg_nums => pick($storage_hash['per_pool_pg_nums'][$radosgw_large_pool_name], '512'),
#rgw Log settings
use_syslog => hiera('use_syslog', true),
syslog_facility => hiera('syslog_log_facility_ceph', 'LOG_LOCAL0'),
syslog_level => hiera('syslog_log_level_ceph', 'info'),
}
Exec { path => [ '/bin/', '/sbin/' , '/usr/bin/', '/usr/sbin/' ],
cwd => '/root',
}
}
}

View File

@ -0,0 +1,25 @@
class osnailyfacter::ceph::updatedb {
notice('MODULAR: ceph/updatedb.pp')
$storage_hash = hiera_hash('storage', {})
if ($storage_hash['volumes_ceph'] or
$storage_hash['images_ceph'] or
$storage_hash['objects_ceph']
) {
$use_ceph = true
} else {
$use_ceph = false
}
if $use_ceph {
exec {'Ensure /var/lib/ceph in the updatedb PRUNEPATH':
path => [ '/usr/bin', '/bin' ],
command => "sed -i -Ee 's|(PRUNEPATHS *= *\"[^\"]*)|\\1 /var/lib/ceph|' /etc/updatedb.conf",
unless => "test ! -f /etc/updatedb.conf || grep 'PRUNEPATHS *= *.*/var/lib/ceph.*' /etc/updatedb.conf",
}
}
}

View File

@ -1,83 +1 @@
notice('MODULAR: ceph-osd.pp')
# Pulling hiera
$storage_hash = hiera_hash('storage', {})
$public_vip = hiera('public_vip')
$management_vip = hiera('management_vip')
$service_endpoint = hiera('service_endpoint')
$use_neutron = hiera('use_neutron', false)
$mp_hash = hiera('mp')
$verbose = pick($storage_hash['verbose'], true)
$debug = pick($storage_hash['debug'], hiera('debug', true))
$auto_assign_floating_ip = hiera('auto_assign_floating_ip', false)
$keystone_hash = hiera_hash('keystone', {})
$access_hash = hiera_hash('access', {})
$network_scheme = hiera_hash('network_scheme', {})
$neutron_mellanox = hiera('neutron_mellanox', false)
$syslog_hash = hiera_hash('syslog', {})
$use_syslog = hiera('use_syslog', true)
$mon_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('ceph_monitor_nodes'), 'ceph/public')
$ceph_primary_monitor_node = hiera('ceph_primary_monitor_node')
$primary_mons = keys($ceph_primary_monitor_node)
$primary_mon = $ceph_primary_monitor_node[$primary_mons[0]]['name']
prepare_network_config($network_scheme)
$ceph_cluster_network = get_network_role_property('ceph/replication', 'network')
$ceph_public_network = get_network_role_property('ceph/public', 'network')
$ceph_tuning_settings_hash = hiera_hash('ceph_tuning_settings', {})
$ssl_hash = hiera_hash('use_ssl', {})
$admin_auth_protocol = get_ssl_property($ssl_hash, {}, 'keystone', 'admin', 'protocol', 'http')
$admin_auth_address = get_ssl_property($ssl_hash, {}, 'keystone', 'admin', 'hostname', [$service_endpoint, $management_vip])
$admin_identity_url = "${admin_auth_protocol}://${admin_auth_address}:35357"
class {'ceph':
primary_mon => $primary_mon,
mon_hosts => keys($mon_address_map),
mon_ip_addresses => values($mon_address_map),
cluster_node_address => $public_vip,
osd_pool_default_size => $storage_hash['osd_pool_size'],
osd_pool_default_pg_num => $storage_hash['pg_num'],
osd_pool_default_pgp_num => $storage_hash['pg_num'],
use_rgw => $storage_hash['objects_ceph'],
rgw_keystone_url => $admin_identity_url,
glance_backend => $glance_backend,
rgw_pub_ip => $public_vip,
rgw_adm_ip => $management_vip,
rgw_int_ip => $management_vip,
cluster_network => $ceph_cluster_network,
public_network => $ceph_public_network,
use_syslog => $use_syslog,
syslog_log_level => hiera('syslog_log_level_ceph', 'info'),
syslog_log_facility => hiera('syslog_log_facility_ceph','LOG_LOCAL0'),
rgw_keystone_admin_token => $keystone_hash['admin_token'],
ephemeral_ceph => $storage_hash['ephemeral_ceph'],
}
if $ceph_tuning_settings_hash != {} {
ceph_conf {
'global/debug_default' : value => $debug;
'global/max_open_files' : value => $ceph_tuning_settings_hash['max_open_files'];
'osd/osd_mkfs_type' : value => $ceph_tuning_settings_hash['osd_mkfs_type'];
'osd/osd_mount_options_xfs' : value => $ceph_tuning_settings_hash['osd_mount_options_xfs'];
'osd/osd_op_threads' : value => $ceph_tuning_settings_hash['osd_op_threads'];
'osd/filestore_queue_max_ops' : value => $ceph_tuning_settings_hash['filestore_queue_max_ops'];
'osd/filestore_queue_committing_max_ops' : value => $ceph_tuning_settings_hash['filestore_queue_committing_max_ops'];
'osd/journal_max_write_entries' : value => $ceph_tuning_settings_hash['journal_max_write_entries'];
'osd/journal_queue_max_ops' : value => $ceph_tuning_settings_hash['journal_queue_max_ops'];
'osd/objecter_inflight_ops' : value => $ceph_tuning_settings_hash['objecter_inflight_ops'];
'osd/filestore_queue_max_bytes' : value => $ceph_tuning_settings_hash['filestore_queue_max_bytes'];
'osd/filestore_queue_committing_max_bytes': value => $ceph_tuning_settings_hash['filestore_queue_committing_max_bytes'];
'osd/journal_max_write_bytes' : value => $ceph_tuning_settings_hash['journal_queue_max_bytes'];
'osd/journal_queue_max_bytes' : value => $ceph_tuning_settings_hash['journal_queue_max_bytes'];
'osd/ms_dispatch_throttle_bytes' : value => $ceph_tuning_settings_hash['ms_dispatch_throttle_bytes'];
'osd/objecter_infilght_op_bytes' : value => $ceph_tuning_settings_hash['objecter_infilght_op_bytes'];
'osd/filestore_max_sync_interval' : value => $ceph_tuning_settings_hash['filestore_max_sync_interval'];
}
# File /root/ceph.conf is symlink which is created after /etc/ceph/ceph.conf in ceph::conf class
File<| title == '/root/ceph.conf' |> -> Ceph_conf <||>
}
# TODO(bogdando) add monit ceph-osd services monitoring, if required
#################################################################
# vim: set ts=2 sw=2 et :
include ::osnailyfacter::ceph::ceph_osd

View File

@ -1,106 +1 @@
notice('MODULAR: ceph/ceph_compute.pp')
$mon_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('ceph_monitor_nodes'), 'ceph/public')
$storage_hash = hiera_hash('storage', {})
$use_neutron = hiera('use_neutron')
$public_vip = hiera('public_vip')
$management_vip = hiera('management_vip')
$use_syslog = hiera('use_syslog', true)
$syslog_log_facility_ceph = hiera('syslog_log_facility_ceph','LOG_LOCAL0')
$keystone_hash = hiera_hash('keystone', {})
# Cinder settings
$cinder_pool = 'volumes'
# Glance settings
$glance_pool = 'images'
#Nova Compute settings
$compute_user = 'compute'
$compute_pool = 'compute'
if ($storage_hash['images_ceph']) {
$glance_backend = 'ceph'
} elsif ($storage_hash['images_vcenter']) {
$glance_backend = 'vmware'
} else {
$glance_backend = 'swift'
}
if ($storage_hash['volumes_ceph'] or
$storage_hash['images_ceph'] or
$storage_hash['objects_ceph'] or
$storage_hash['ephemeral_ceph']
) {
$use_ceph = true
} else {
$use_ceph = false
}
if $use_ceph {
$ceph_primary_monitor_node = hiera('ceph_primary_monitor_node')
$primary_mons = keys($ceph_primary_monitor_node)
$primary_mon = $ceph_primary_monitor_node[$primary_mons[0]]['name']
prepare_network_config(hiera_hash('network_scheme', {}))
$ceph_cluster_network = get_network_role_property('ceph/replication', 'network')
$ceph_public_network = get_network_role_property('ceph/public', 'network')
$per_pool_pg_nums = $storage_hash['per_pool_pg_nums']
class {'ceph':
primary_mon => $primary_mon,
mon_hosts => keys($mon_address_map),
mon_ip_addresses => values($mon_address_map),
cluster_node_address => $public_vip,
osd_pool_default_size => $storage_hash['osd_pool_size'],
osd_pool_default_pg_num => $storage_hash['pg_num'],
osd_pool_default_pgp_num => $storage_hash['pg_num'],
use_rgw => false,
glance_backend => $glance_backend,
rgw_pub_ip => $public_vip,
rgw_adm_ip => $management_vip,
rgw_int_ip => $management_vip,
cluster_network => $ceph_cluster_network,
public_network => $ceph_public_network,
use_syslog => $use_syslog,
syslog_log_level => hiera('syslog_log_level_ceph', 'info'),
syslog_log_facility => $syslog_log_facility_ceph,
rgw_keystone_admin_token => $keystone_hash['admin_token'],
ephemeral_ceph => $storage_hash['ephemeral_ceph']
}
service { $::ceph::params::service_nova_compute :}
ceph::pool {$compute_pool:
user => $compute_user,
acl => "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=${cinder_pool}, allow rx pool=${glance_pool}, allow rwx pool=${compute_pool}'",
keyring_owner => 'nova',
pg_num => pick($per_pool_pg_nums[$compute_pool], '1024'),
pgp_num => pick($per_pool_pg_nums[$compute_pool], '1024'),
}
include ceph::nova_compute
if ($storage_hash['ephemeral_ceph']) {
include ceph::ephemeral
Class['ceph::conf'] -> Class['ceph::ephemeral'] ~>
Service[$::ceph::params::service_nova_compute]
}
Class['ceph::conf'] ->
Ceph::Pool[$compute_pool] ->
Class['ceph::nova_compute'] ~>
Service[$::ceph::params::service_nova_compute]
Exec { path => [ '/bin/', '/sbin/' , '/usr/bin/', '/usr/sbin/' ],
cwd => '/root',
}
}
if !($storage_hash['ephemeral_ceph']) {
class { 'ceph::ephemeral':
libvirt_images_type => 'default',
}
}
include ::osnailyfacter::ceph::ceph_compute

View File

@ -1,82 +1 @@
notice('MODULAR: ceph/ceph_pools')
$storage_hash = hiera_hash('storage', {})
$osd_pool_default_pg_num = $storage_hash['pg_num']
$osd_pool_default_pgp_num = $storage_hash['pg_num']
# Cinder settings
$cinder_user = 'volumes'
$cinder_pool = 'volumes'
# Cinder Backup settings
$cinder_backup_user = 'backups'
$cinder_backup_pool = 'backups'
# Glance settings
$glance_user = 'images'
$glance_pool = 'images'
Exec { path => [ '/bin/', '/sbin/' , '/usr/bin/', '/usr/sbin/' ],
cwd => '/root',
}
$per_pool_pg_nums = $storage_hash['per_pool_pg_nums']
# DO NOT SPLIT ceph auth command lines! See http://tracker.ceph.com/issues/3279
ceph::pool {$glance_pool:
user => $glance_user,
acl => "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=${glance_pool}'",
keyring_owner => 'glance',
pg_num => pick($per_pool_pg_nums[$glance_pool], '256'),
pgp_num => pick($per_pool_pg_nums[$glance_pool], '256'),
}
ceph::pool {$cinder_pool:
user => $cinder_user,
acl => "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=${cinder_pool}, allow rx pool=${glance_pool}'",
keyring_owner => 'cinder',
pg_num => pick($per_pool_pg_nums[$cinder_pool], '2048'),
pgp_num => pick($per_pool_pg_nums[$cinder_pool], '2048'),
}
ceph::pool {$cinder_backup_pool:
user => $cinder_backup_user,
acl => "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=${cinder_backup_pool}, allow rwx pool=${cinder_pool}'",
keyring_owner => 'cinder',
pg_num => pick($per_pool_pg_nums[$cinder_backup_pool], '512'),
pgp_num => pick($per_pool_pg_nums[$cinder_backup_pool], '512'),
}
Ceph::Pool[$glance_pool] -> Ceph::Pool[$cinder_pool] -> Ceph::Pool[$cinder_backup_pool]
if ($storage_hash['volumes_ceph']) {
include ::cinder::params
service { 'cinder-volume':
ensure => 'running',
name => $::cinder::params::volume_service,
hasstatus => true,
hasrestart => true,
}
Ceph::Pool[$cinder_pool] ~> Service['cinder-volume']
service { 'cinder-backup':
ensure => 'running',
name => $::cinder::params::backup_service,
hasstatus => true,
hasrestart => true,
}
Ceph::Pool[$cinder_backup_pool] ~> Service['cinder-backup']
}
if ($storage_hash['images_ceph']) {
include ::glance::params
service { 'glance-api':
ensure => 'running',
name => $::glance::params::api_service_name,
hasstatus => true,
hasrestart => true,
}
Ceph::Pool[$glance_pool] ~> Service['glance-api']
}
include ::osnailyfacter::ceph::ceph_pools

View File

@ -1,40 +1 @@
notice('MODULAR: ceph/enable_rados.pp')
include ::ceph::params
$radosgw_service = $::ceph::params::service_radosgw
$radosgw_override_file = '/etc/init/radosgw-all.override'
if ($::operatingsystem == 'Ubuntu') {
# ensure the service is stopped and will not start on boot
service { 'radosgw':
enable => false,
provider => 'debian',
}
service { 'radosgw-all':
ensure => running,
enable => true,
provider => 'upstart',
}
file {$radosgw_override_file:
ensure => present,
mode => '0644',
owner => 'root',
group => 'root',
content => "start on runlevel [2345]\nstop on starting rc RUNLEVEL=[016]\n",
}
Service['radosgw'] ->
File[$radosgw_override_file] ~>
Service['radosgw-all']
}
else {
service { $radosgw_service:
ensure => running,
enable => true,
}
}
include ::osnailyfacter::ceph::enable_rados

View File

@ -1,95 +1 @@
notice('MODULAR: ceph/mon.pp')
$storage_hash = hiera_hash('storage', {})
$use_neutron = hiera('use_neutron')
$public_vip = hiera('public_vip')
$management_vip = hiera('management_vip')
$use_syslog = hiera('use_syslog', true)
$syslog_log_facility_ceph = hiera('syslog_log_facility_ceph','LOG_LOCAL0')
$keystone_hash = hiera_hash('keystone', {})
$mon_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('ceph_monitor_nodes'), 'ceph/public')
if ($storage_hash['images_ceph']) {
$glance_backend = 'ceph'
} elsif ($storage_hash['images_vcenter']) {
$glance_backend = 'vmware'
} else {
$glance_backend = 'swift'
}
if ($storage_hash['volumes_ceph'] or
$storage_hash['images_ceph'] or
$storage_hash['objects_ceph'] or
$storage_hash['ephemeral_ceph']
) {
$use_ceph = true
} else {
$use_ceph = false
}
if $use_ceph {
$ceph_primary_monitor_node = hiera('ceph_primary_monitor_node')
$primary_mons = keys($ceph_primary_monitor_node)
$primary_mon = $ceph_primary_monitor_node[$primary_mons[0]]['name']
prepare_network_config(hiera_hash('network_scheme', {}))
$ceph_cluster_network = get_network_role_property('ceph/replication', 'network')
$ceph_public_network = get_network_role_property('ceph/public', 'network')
$mon_addr = get_network_role_property('ceph/public', 'ipaddr')
class {'ceph':
primary_mon => $primary_mon,
mon_hosts => keys($mon_address_map),
mon_ip_addresses => values($mon_address_map),
mon_addr => $mon_addr,
cluster_node_address => $public_vip,
osd_pool_default_size => $storage_hash['osd_pool_size'],
osd_pool_default_pg_num => $storage_hash['pg_num'],
osd_pool_default_pgp_num => $storage_hash['pg_num'],
use_rgw => false,
glance_backend => $glance_backend,
rgw_pub_ip => $public_vip,
rgw_adm_ip => $management_vip,
rgw_int_ip => $management_vip,
cluster_network => $ceph_cluster_network,
public_network => $ceph_public_network,
use_syslog => $use_syslog,
syslog_log_level => hiera('syslog_log_level_ceph', 'info'),
syslog_log_facility => $syslog_log_facility_ceph,
rgw_keystone_admin_token => $keystone_hash['admin_token'],
ephemeral_ceph => $storage_hash['ephemeral_ceph']
}
if ($storage_hash['volumes_ceph']) {
include ::cinder::params
service { 'cinder-volume':
ensure => 'running',
name => $::cinder::params::volume_service,
hasstatus => true,
hasrestart => true,
}
service { 'cinder-backup':
ensure => 'running',
name => $::cinder::params::backup_service,
hasstatus => true,
hasrestart => true,
}
Class['ceph'] ~> Service['cinder-volume']
Class['ceph'] ~> Service['cinder-backup']
}
if ($storage_hash['images_ceph']) {
include ::glance::params
service { 'glance-api':
ensure => 'running',
name => $::glance::params::api_service_name,
hasstatus => true,
hasrestart => true,
}
Class['ceph'] ~> Service['glance-api']
}
}
include ::osnailyfacter::ceph::mon

View File

@ -1,108 +1 @@
notice('MODULAR: ceph/radosgw.pp')
$storage_hash = hiera_hash('storage', {})
$use_neutron = hiera('use_neutron')
$public_vip = hiera('public_vip')
$keystone_hash = hiera_hash('keystone', {})
$management_vip = hiera('management_vip')
$service_endpoint = hiera('service_endpoint')
$public_ssl_hash = hiera_hash('public_ssl')
$radosgw_large_pool_name = ".rgw"
$mon_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('ceph_monitor_nodes'), 'ceph/public')
$external_lb = hiera('external_lb', false)
$ssl_hash = hiera_hash('use_ssl', {})
$admin_identity_protocol = get_ssl_property($ssl_hash, {}, 'keystone', 'admin', 'protocol', 'http')
$admin_identity_address = get_ssl_property($ssl_hash, {}, 'keystone', 'admin', 'hostname', [$service_endpoint, $management_vip])
$admin_identity_url = "${admin_identity_protocol}://${admin_identity_address}:35357"
if ($storage_hash['volumes_ceph'] or
$storage_hash['images_ceph'] or
$storage_hash['objects_ceph']
) {
$use_ceph = true
} else {
$use_ceph = false
}
if $use_ceph and $storage_hash['objects_ceph'] {
$ceph_primary_monitor_node = hiera('ceph_primary_monitor_node')
$primary_mons = keys($ceph_primary_monitor_node)
$primary_mon = $ceph_primary_monitor_node[$primary_mons[0]]['name']
prepare_network_config(hiera_hash('network_scheme', {}))
$ceph_cluster_network = get_network_role_property('ceph/replication', 'network')
$ceph_public_network = get_network_role_property('ceph/public', 'network')
$rgw_ip_address = get_network_role_property('ceph/radosgw', 'ipaddr')
# Listen directives with host required for ip_based vhosts
class { 'osnailyfacter::apache':
listen_ports => hiera_array('apache_ports', ['0.0.0.0:80', '0.0.0.0:8888']),
}
if ($::osfamily == 'Debian'){
apache::mod {'rewrite': }
apache::mod {'proxy': }
apache::mod {'proxy_fcgi': }
}
include ::tweaks::apache_wrappers
include ceph::params
$haproxy_stats_url = "http://${service_endpoint}:10000/;csv"
$internal_auth_protocol = get_ssl_property($ssl_hash, {}, 'keystone', 'internal', 'protocol', 'http')
$internal_auth_address = get_ssl_property($ssl_hash, {}, 'keystone', 'internal', 'hostname', [$service_endpoint, $management_vip])
$internal_auth_url = "${internal_auth_protocol}://${internal_auth_address}:5000"
class { '::osnailyfacter::wait_for_keystone_backends': }
Class[::Osnailyfacter::Wait_for_keystone_backends] -> Class['ceph::keystone']
class { 'ceph::radosgw':
# SSL
use_ssl => false,
public_ssl => $public_ssl_hash['services'],
# Ceph
primary_mon => $primary_mon,
pub_ip => $public_vip,
adm_ip => $management_vip,
int_ip => $management_vip,
# RadosGW settings
rgw_host => $::hostname,
rgw_ip => $rgw_ip_address,
rgw_port => '6780',
swift_endpoint_port => '8080',
rgw_keyring_path => '/etc/ceph/keyring.radosgw.gateway',
rgw_socket_path => '/tmp/radosgw.sock',
rgw_frontends => 'fastcgi socket_port=9000 socket_host=127.0.0.1',
rgw_log_file => '/var/log/ceph/radosgw.log',
rgw_data => '/var/lib/ceph/radosgw',
rgw_dns_name => "*.${::domain}",
rgw_print_continue => true,
#rgw Keystone settings
rgw_use_pki => false,
rgw_use_keystone => true,
rgw_keystone_url => $admin_identity_url,
rgw_keystone_admin_token => $keystone_hash['admin_token'],
rgw_keystone_token_cache_size => '10',
rgw_keystone_accepted_roles => '_member_, Member, admin, swiftoperator',
rgw_keystone_revocation_interval => '1000000',
rgw_s3_auth_use_keystone => false,
rgw_nss_db_path => '/etc/ceph/nss',
rgw_large_pool_name => $radosgw_large_pool_name,
rgw_large_pool_pg_nums => pick($storage_hash['per_pool_pg_nums'][$radosgw_large_pool_name], '512'),
#rgw Log settings
use_syslog => hiera('use_syslog', true),
syslog_facility => hiera('syslog_log_facility_ceph', 'LOG_LOCAL0'),
syslog_level => hiera('syslog_log_level_ceph', 'info'),
}
Exec { path => [ '/bin/', '/sbin/' , '/usr/bin/', '/usr/sbin/' ],
cwd => '/root',
}
}
include ::osnailyfacter::ceph::radosgw

View File

@ -1,21 +1 @@
notice('MODULAR: ceph/updatedb.pp')
$storage_hash = hiera_hash('storage', {})
if ($storage_hash['volumes_ceph'] or
$storage_hash['images_ceph'] or
$storage_hash['objects_ceph']
) {
$use_ceph = true
} else {
$use_ceph = false
}
if $use_ceph {
exec {"Ensure /var/lib/ceph in the updatedb PRUNEPATH":
path => [ '/usr/bin', '/bin' ],
command => "sed -i -Ee 's|(PRUNEPATHS *= *\"[^\"]*)|\\1 /var/lib/ceph|' /etc/updatedb.conf",
unless => "test ! -f /etc/updatedb.conf || grep 'PRUNEPATHS *= *.*/var/lib/ceph.*' /etc/updatedb.conf",
}
}
include ::osnailyfacter::ceph::updatedb