Fix ceph yaqls

* Ceph yaqls update
* Remove unneeded predeployment ceph-keys sharing
* Remove non-idempotent osd-activate service
* Remove unneeded quorum check and ceph reload
* Small code style changes
* Added cross-dependencies for osd and mon

Related-Blueprint: fuel-upstream-ceph
Change-Id: I044000f231c4d8a7534ac806069e51b4e6995406
This commit is contained in:
Oleksiy Molchanov 2016-04-26 12:46:52 +03:00
parent 7caacaecf5
commit 8f607eef91
9 changed files with 184 additions and 268 deletions

View File

@ -1,6 +1,5 @@
class osnailyfacter::ceph::ceph_osd {
# TODO(bogdando) add monit ceph-osd services monitoring, if required
notice('MODULAR: ceph-osd.pp')
$storage_hash = hiera('storage', {})
@ -87,11 +86,6 @@ class osnailyfacter::ceph::ceph_osd {
class { '::ceph::osds':
args => $osd_devices_hash,
} ->
service {'ceph-osd-all-starter':
ensure => running,
provider => upstart,
}
if $ceph_tuning_settings != {} {

View File

@ -12,93 +12,99 @@ class osnailyfacter::ceph::ceph_pools {
$glance_user = 'images'
$glance_pool = 'images'
if empty($mon_key) {
fail('Please provide mon_key')
}
if empty($fsid) {
fail('Please provide fsid')
}
class {'ceph':
fsid => $fsid
}
$per_pool_pg_nums = $storage_hash['per_pool_pg_nums']
if ($storage_hash['volumes_ceph'] or
$storage_hash['images_ceph'] or
$storage_hash['objects_ceph'] or
$storage_hash['ephemeral_ceph']
) {
if empty($mon_key) {
fail('Please provide mon_key')
}
if empty($fsid) {
fail('Please provide fsid')
}
class {'ceph':
fsid => $fsid
}
# DO NOT SPLIT ceph auth command lines! See http://tracker.ceph.com/issues/3279
ceph::pool { $glance_pool:
pg_num => pick($per_pool_pg_nums[$glance_pool], '256'),
pgp_num => pick($per_pool_pg_nums[$glance_pool], '256'),
}
ceph::key { "client.${glance_user}":
secret => $mon_key,
user => 'glance',
group => 'glance',
cap_mon => 'allow r',
cap_osd => "allow class-read object_prefix rbd_children, allow rwx pool=${glance_pool}",
inject => true,
}
ceph::pool { $cinder_pool:
pg_num => pick($per_pool_pg_nums[$cinder_pool], '256'),
pgp_num => pick($per_pool_pg_nums[$cinder_pool], '256'),
}
ceph::key { "client.${cinder_user}":
secret => $mon_key,
user => 'cinder',
group => 'cinder',
cap_mon => 'allow r',
cap_osd => "allow class-read object_prefix rbd_children, allow rwx pool=${cinder_pool}, allow rx pool=${glance_pool}",
inject => true,
}
ceph::pool { $cinder_backup_pool:
pg_num => pick($per_pool_pg_nums[$cinder_backup_pool], '256'),
pgp_num => pick($per_pool_pg_nums[$cinder_backup_pool], '256'),
}
ceph::key { "client.${cinder_backup_user}":
secret => $mon_key,
user => 'cinder',
group => 'cinder',
cap_mon => 'allow r',
cap_osd => "allow class-read object_prefix rbd_children, allow rwx pool=${cinder_backup_pool}, allow rwx pool=${cinder_pool}",
inject => true,
}
if ($storage_hash['volumes_ceph']) {
include ::cinder::params
service { 'cinder-volume':
ensure => 'running',
name => $::cinder::params::volume_service,
hasstatus => true,
hasrestart => true,
ceph::pool { $glance_pool:
pg_num => pick($per_pool_pg_nums[$glance_pool], '256'),
pgp_num => pick($per_pool_pg_nums[$glance_pool], '256'),
}
Ceph::Pool[$cinder_pool] ~> Service['cinder-volume']
service { 'cinder-backup':
ensure => 'running',
name => $::cinder::params::backup_service,
hasstatus => true,
hasrestart => true,
ceph::key { "client.${glance_user}":
secret => $mon_key,
user => 'glance',
group => 'glance',
cap_mon => 'allow r',
cap_osd => "allow class-read object_prefix rbd_children, allow rwx pool=${glance_pool}",
inject => true,
}
Ceph::Pool[$cinder_backup_pool] ~> Service['cinder-backup']
}
if ($storage_hash['images_ceph']) {
include ::glance::params
service { 'glance-api':
ensure => 'running',
name => $::glance::params::api_service_name,
hasstatus => true,
hasrestart => true,
ceph::pool { $cinder_pool:
pg_num => pick($per_pool_pg_nums[$cinder_pool], '256'),
pgp_num => pick($per_pool_pg_nums[$cinder_pool], '256'),
}
Ceph::Pool[$glance_pool] ~> Service['glance-api']
}
ceph::key { "client.${cinder_user}":
secret => $mon_key,
user => 'cinder',
group => 'cinder',
cap_mon => 'allow r',
cap_osd => "allow class-read object_prefix rbd_children, allow rwx pool=${cinder_pool}, allow rx pool=${glance_pool}",
inject => true,
}
ceph::pool { $cinder_backup_pool:
pg_num => pick($per_pool_pg_nums[$cinder_backup_pool], '256'),
pgp_num => pick($per_pool_pg_nums[$cinder_backup_pool], '256'),
}
ceph::key { "client.${cinder_backup_user}":
secret => $mon_key,
user => 'cinder',
group => 'cinder',
cap_mon => 'allow r',
cap_osd => "allow class-read object_prefix rbd_children, allow rwx pool=${cinder_backup_pool}, allow rwx pool=${cinder_pool}",
inject => true,
}
if ($storage_hash['volumes_ceph']) {
include ::cinder::params
service { 'cinder-volume':
ensure => 'running',
name => $::cinder::params::volume_service,
hasstatus => true,
hasrestart => true,
}
Ceph::Pool[$cinder_pool] ~> Service['cinder-volume']
service { 'cinder-backup':
ensure => 'running',
name => $::cinder::params::backup_service,
hasstatus => true,
hasrestart => true,
}
Ceph::Pool[$cinder_backup_pool] ~> Service['cinder-backup']
}
if ($storage_hash['images_ceph']) {
include ::glance::params
service { 'glance-api':
ensure => 'running',
name => $::glance::params::api_service_name,
hasstatus => true,
hasrestart => true,
}
Ceph::Pool[$glance_pool] ~> Service['glance-api']
}
}
}

View File

@ -42,18 +42,11 @@ class osnailyfacter::ceph::mon {
$mon_initial_members = $mon_hosts
$mon_host = $mon_ips
}
if ($storage_hash['volumes_ceph'] or
$storage_hash['images_ceph'] or
$storage_hash['objects_ceph'] or
$storage_hash['ephemeral_ceph']
$storage_hash['images_ceph'] or
$storage_hash['objects_ceph'] or
$storage_hash['ephemeral_ceph']
) {
$use_ceph = true
} else {
$use_ceph = false
}
if $use_ceph {
if empty($admin_key) {
fail('Please provide admin_key')
@ -93,7 +86,7 @@ class osnailyfacter::ceph::mon {
}
Ceph::Key {
inject => true,
inject => true,
inject_as_id => 'mon.',
inject_keyring => "/var/lib/ceph/mon/ceph-${::hostname}/keyring",
}
@ -116,7 +109,7 @@ class osnailyfacter::ceph::mon {
if ($storage_hash['volumes_ceph']) {
include ::cinder::params
service { 'cinder-volume':
service { 'cinder-volume':
ensure => 'running',
name => $::cinder::params::volume_service,
hasstatus => true,
@ -135,7 +128,7 @@ class osnailyfacter::ceph::mon {
}
if ($storage_hash['images_ceph']) {
include ::glance::params
include ::glance::params
service { 'glance-api':
ensure => 'running',
name => $::glance::params::api_service_name,

View File

@ -9,36 +9,13 @@ class osnailyfacter::ceph::primary_mon_update {
$storage_hash = hiera('storage', {})
if ($storage_hash['volumes_ceph'] or
$storage_hash['images_ceph'] or
$storage_hash['objects_ceph'] or
$storage_hash['ephemeral_ceph']
$storage_hash['images_ceph'] or
$storage_hash['objects_ceph'] or
$storage_hash['ephemeral_ceph']
) {
$use_ceph = true
} else {
$use_ceph = false
}
if $use_ceph {
exec {'Wait for Ceph quorum':
path => [ '/bin/', '/sbin/' , '/usr/bin/', '/usr/sbin/' ],
command => "ceph mon stat | grep -q 'quorum.*${node_hostname}'",
tries => 12, # This is necessary to prevent a race: mon must establish
# a quorum before it can generate keys, observed this takes upto 15 seconds
# Keys must exist prior to other commands running
try_sleep => 5,
refreshonly => true,
}
ceph_config {
'global/mon_host': value => $mon_ips;
'global/mon_initial_members': value => $mon_hosts;
}
exec {'reload Ceph for HA':
path => [ '/bin/', '/sbin/' , '/usr/bin/', '/usr/sbin/' ],
command => 'service ceph reload',
}
Exec['Wait for Ceph quorum'] -> Ceph_config<||> ~> Exec['reload Ceph for HA']
}
}

View File

@ -54,16 +54,16 @@ class osnailyfacter::ceph::radosgw {
include ::tweaks::apache_wrappers
include ::ceph::params
#######################################
# Ugly hack to support our ceph package
#######################################
#######################################
# TODO (omolchanov): Remove template once we switch to systemd
#######################################
file { '/etc/init/radosgw.conf':
ensure => present,
content => template('osnailyfacter/radosgw-init.erb'),
before => Ceph::Rgw[$gateway_name],
}
#######################################
#######################################
ceph::rgw { $gateway_name:
frontend_type => 'apache-proxy-fcgi',

View File

@ -2,26 +2,26 @@ class osnailyfacter::ceph::radosgw_keystone {
$storage_hash = hiera_hash('storage', {})
$public_vip = hiera('public_vip')
$region = hiera('region', 'RegionOne')
$management_vip = hiera('management_vip')
$public_ssl_hash = hiera_hash('public_ssl')
$ssl_hash = hiera_hash('use_ssl', {})
$public_protocol = get_ssl_property($ssl_hash, $public_ssl_hash, 'radosgw', 'public', 'protocol', 'http')
$public_address = get_ssl_property($ssl_hash, $public_ssl_hash, 'radosgw', 'public', 'hostname', [$public_vip])
$internal_protocol = get_ssl_property($ssl_hash, {}, 'radosgw', 'internal', 'protocol', 'http')
$internal_address = get_ssl_property($ssl_hash, {}, 'radosgw', 'internal', 'hostname', [$management_vip])
$admin_protocol = get_ssl_property($ssl_hash, {}, 'radosgw', 'admin', 'protocol', 'http')
$admin_address = get_ssl_property($ssl_hash, {}, 'radosgw', 'admin', 'hostname', [$management_vip])
$public_url = "${public_protocol}://${public_address}:8080/swift/v1"
$internal_url = "${internal_protocol}://${internal_address}:8080/swift/v1"
$admin_url = "${admin_protocol}://${admin_address}:8080/swift/v1"
if $storage_hash['objects_ceph'] {
$public_vip = hiera('public_vip')
$region = hiera('region', 'RegionOne')
$management_vip = hiera('management_vip')
$public_ssl_hash = hiera_hash('public_ssl')
$ssl_hash = hiera_hash('use_ssl', {})
$public_protocol = get_ssl_property($ssl_hash, $public_ssl_hash, 'radosgw', 'public', 'protocol', 'http')
$public_address = get_ssl_property($ssl_hash, $public_ssl_hash, 'radosgw', 'public', 'hostname', [$public_vip])
$internal_protocol = get_ssl_property($ssl_hash, {}, 'radosgw', 'internal', 'protocol', 'http')
$internal_address = get_ssl_property($ssl_hash, {}, 'radosgw', 'internal', 'hostname', [$management_vip])
$admin_protocol = get_ssl_property($ssl_hash, {}, 'radosgw', 'admin', 'protocol', 'http')
$admin_address = get_ssl_property($ssl_hash, {}, 'radosgw', 'admin', 'hostname', [$management_vip])
$public_url = "${public_protocol}://${public_address}:8080/swift/v1"
$internal_url = "${internal_protocol}://${internal_address}:8080/swift/v1"
$admin_url = "${admin_protocol}://${admin_address}:8080/swift/v1"
class {'::osnailyfacter::wait_for_keystone_backends': }
keystone::resource::service_identity { 'radosgw':

View File

@ -1,39 +1,3 @@
# PRE_DEPLOYMENT Tasks
#
- id: copy_keys_ceph
type: copy_files
version: 2.0.0
role: ['/.*/']
required_for: [pre_deployment_end]
requires: [generate_keys_ceph]
cross-depends:
- name: generate_keys_ceph
role: master
parameters:
files:
- src: /var/lib/fuel/keys/{CLUSTER_ID}/ceph/ceph.pub
dst: /var/lib/astute/ceph/ceph.pub
- src: /var/lib/fuel/keys/{CLUSTER_ID}/ceph/ceph
dst: /var/lib/astute/ceph/ceph
permissions: '0600'
dir_permissions: '0700'
- id: generate_keys_ceph
type: shell
version: 2.0.0
role: master
requires: [pre_deployment_start]
required_for: [copy_keys_ceph]
parameters:
cmd: sh /etc/puppet/modules/osnailyfacter/modular/astute/generate_keys.sh -i {CLUSTER_ID} -s 'ceph' -p /var/lib/fuel/keys/
timeout: 180
#
# DEPLOYMENT Tasks
#
- id: top-role-ceph-osd
type: puppet
version: 2.1.0
@ -41,63 +5,20 @@
required_for: [deploy_end]
requires: [hosts, firewall]
condition:
yaql_exp: >
changedAny($.storage, $.debug, $.network_scheme, $.get('use_syslog'),
$.network_metadata.nodes.values().where(
('primary-controller' in $.node_roles) or
('controller' in $.node_roles)),
$.get('ceph_tuning_settings'), $.get('syslog_log_level_ceph'),
$.get('syslog_log_facility_ceph'))
yaql_exp:
changedAny($.storage, $.ceph_tuning_settings, $.get('use_ssl'),
$.get('use_syslog'), $.network_scheme,
(len($.network_metadata.nodes.values().where(
('controller' in $.node_roles) or
('primary-controller' in $.node_roles)))),
('primary-controller' in $.roles))
cross-depends:
- name: ceph-mon
- name: /(primary-)?ceph-mon/
parameters:
puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/ceph/ceph-osd.pp
puppet_modules: /etc/puppet/modules
timeout: 3600
- id: ceph-radosgw
version: 2.1.0
type: puppet
role: [primary-controller, controller]
required_for: [upload_cirros, post_deployment_end]
requires: [post_deployment_start]
condition:
yaql_exp: &ceph_radosgw >
changedAny($.storage, $.keystone, $.network_metadata.vips,
$.get('external_lb'),
(len($.network_metadata.nodes.values().where(
('controller' in $.node_roles) or
('primary-controller' in $.node_roles)))),
$.get('use_ssl'), ('primary-controller' in $.roles), $.network_scheme,
$.get('apache_ports'), $.get('use_syslog'),
$.get('syslog_log_facility_ceph'), $.get('syslog_log_level_ceph'))
parameters:
puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/ceph/radosgw.pp
puppet_modules: /etc/puppet/modules
timeout: 3600
test_pre:
cmd: ruby /etc/puppet/modules/osnailyfacter/modular/ceph/radosgw_pre.rb
test_post:
cmd: ruby /etc/puppet/modules/osnailyfacter/modular/ceph/radosgw_post.rb
- id: radosgw-keystone
type: puppet
version: 2.1.0
role: [primary-controller]
required_for: [ceph-radosgw]
requires: [post_deployment_start]
condition:
yaql_exp: >
changedAny($.storage, $.network_metadata.vips,
$.get('region', 'RegionOne'), $.public_ssl, $.get('use_ssl'))
cross-depends:
- name: /(primary-)?keystone/
role: self
parameters:
puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/ceph/radosgw_keystone.pp
puppet_modules: /etc/puppet/modules
timeout: 1800
- id: primary-ceph-mon
type: puppet
version: 2.1.0
@ -106,12 +27,11 @@
requires: [primary-openstack-controller, openstack-controller, openstack-cinder]
condition:
yaql_exp: &ceph_mon >
changedAny($.storage, $.quantum, $.network_metadata,
$.get('use_syslog'), $.get('syslog_log_facility_ceph'), $.keystone,
($.storage.objects_ceph or $.storage.images_ceph or
$.storage.volumes_ceph or $.storage.ephemeral_ceph) and
changedAny($.storage, $.get('use_syslog'), $.network_scheme,
(len($.network_metadata.nodes.values().where(
('controller' in $.node_roles) or
('primary-controller' in $.node_roles)))),
$.network_scheme, $.get('syslog_log_level_ceph'))
$.node_roles.any($.matches('(primary-)?controller'))))))
parameters:
puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/ceph/mon.pp
puppet_modules: /etc/puppet/modules
@ -135,30 +55,67 @@
- id: primary-mon-update
type: puppet
version: 2.1.0
role: [primary-controller]
required_for: [post_deployment_end]
requires: [post_deployment_start]
groups: [primary-controller]
required_for: [deploy_end, controller_remaining_tasks]
requires: [primary-ceph-mon]
condition:
yaql_exp: *ceph_mon
cross-depends:
- name: /(primary-)?ceph-mon/
cross-depended-by:
- name: ceph_create_pools
parameters:
puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/ceph/primary_mon_update.pp
puppet_modules: /etc/puppet/modules
timeout: 3600
#
# POST_DEPLOYMENT Tasks
#
- id: ceph-radosgw
version: 2.1.0
type: puppet
groups: [primary-controller, controller]
required_for: [deploy_end]
requires: [radosgw-keystone, apache, keystone, ceph-mon]
condition:
yaql_exp:
$.storage.objects_ceph and
changedAny($.storage, $.get('use_syslog'), $.keystone, $.network_metadata.vips, $.get('use_ssl'),
(len($.network_metadata.nodes.values().where(
('controller' in $.node_roles) or
('primary-controller' in $.node_roles)))),
('primary-controller' in $.roles), $.network_scheme, $.get('apache_ports'))
parameters:
puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/ceph/radosgw.pp
puppet_modules: /etc/puppet/modules
timeout: 3600
test_pre:
cmd: ruby /etc/puppet/modules/osnailyfacter/modular/ceph/radosgw_pre.rb
test_post:
cmd: ruby /etc/puppet/modules/osnailyfacter/modular/ceph/radosgw_post.rb
- id: radosgw-keystone
type: puppet
version: 2.1.0
groups: [primary-controller]
required_for: [ceph-radosgw]
requires: [keystone, primary-mon-update]
condition:
yaql_exp:
$.storage.objects_ceph and
changedAny($.storage, $.network_metadata.vips,
$.get('region', 'RegionOne'), $.public_ssl, $.get('use_ssl'))
cross-depends:
- name: keystone
parameters:
puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/ceph/radosgw_keystone.pp
puppet_modules: /etc/puppet/modules
timeout: 1800
- id: ceph-compute
type: puppet
version: 2.1.0
role: [compute]
groups: [compute]
cross-depends:
- name: ceph_create_pools
requires: [ceph_create_pools]
required_for: [post_deployment_end]
- name: /(primary-)?ceph-mon/
requires: [top-role-compute]
required_for: [deploy_end]
condition:
yaql_exp: &storage_changed >
($.storage.objects_ceph or $.storage.images_ceph or
@ -177,6 +134,10 @@
test_post:
cmd: ruby /etc/puppet/modules/osnailyfacter/modular/ceph/ceph_compute_post.rb
#
# POST_DEPLOYMENT Tasks
#
- id: ceph_create_pools
type: puppet
version: 2.1.0
@ -203,7 +164,7 @@
condition:
yaql_exp: *ceph_changed
requires: [post_deployment_start]
required_for: [ceph-radosgw, upload_cirros]
required_for: [upload_cirros]
parameters:
cmd: ruby /etc/puppet/modules/osnailyfacter/modular/ceph/ceph_ready_check.rb
timeout: 1800
@ -213,7 +174,7 @@
version: 2.1.0
role: [primary-controller, controller, ceph-osd]
condition:
yaql_exp: *storage_changed
yaql_exp: *ceph_changed
requires: [post_deployment_start]
required_for: [post_deployment_end]
parameters:

View File

@ -133,13 +133,6 @@ describe manifest do
)
end
it 'should start osd daemons' do
should contain_service('ceph-osd-all-starter').with(
'ensure' => 'running',
'provider' => 'upstart',
).that_requires('Class[ceph::osds]')
end
if ceph_tuning_settings != {}
it 'should set Ceph tuning settings' do
should contain_ceph_config('global/debug_default').with(:value => debug)

View File

@ -18,18 +18,10 @@ describe manifest do
storage_hash['ephemeral_ceph']
)
it 'should wait for ceph to be ready' do
should contain_exec('Wait for Ceph quorum')
end
it 'should add parameters to ceph.conf' do
should contain_ceph_config('global/mon_host').with(:value => mon_ips)
should contain_ceph_config('global/mon_initial_members').with(:value => mon_hosts)
end
it 'should reload Ceph' do
should contain_exec('reload Ceph for HA')
end
end
end
test_ubuntu_and_centos manifest