Ceph: fix PG count number

Fix PG groups overestimation

Change-Id: I8c401e9abe9798ded87f542c0d707198148d07d1
Closes-Bug: #1464656
This commit is contained in:
koder aka kdanilov 2015-07-23 05:28:55 +03:00 committed by Kyrylo Galanov
parent 12646476cb
commit ab3679782c
30 changed files with 175 additions and 40 deletions

View File

@ -59,6 +59,9 @@ class ceph (
$rgw_int_ip = $cluster_node_address,
$rgw_s3_auth_use_keystone = true,
$rgw_large_pool_name = '.rgw',
$rgw_large_pool_pg_nums = '512',
# Cinder settings
$volume_driver = 'cinder.volume.drivers.rbd.RBDDriver',
$glance_api_version = '2',

View File

@ -29,6 +29,8 @@ class ceph::radosgw (
$rgw_keystone_accepted_roles = $::ceph::rgw_keystone_accepted_roles,
$rgw_keystone_revocation_interval = $::ceph::rgw_keystone_revocation_interval,
$rgw_nss_db_path = $::ceph::rgw_nss_db_path,
$rgw_large_pool_name = $::ceph::rgw_large_pool_name,
$rgw_large_pool_pg_nums = $::ceph::rgw_large_pool_pg_nums,
$pub_ip = $::ceph::rgw_pub_ip,
$adm_ip = $::ceph::rgw_adm_ip,
$int_ip = $::ceph::rgw_int_ip,
@ -214,6 +216,11 @@ class ceph::radosgw (
creates => $keyring_path
}
exec { "Create ${rgw_large_pool_name} pool":
command => "ceph -n ${radosgw_auth_key} osd pool create ${rgw_large_pool_name} ${rgw_large_pool_pg_nums} ${rgw_large_pool_pg_nums}",
unless => "rados lspools | grep '^${rgw_large_pool_name}$'",
}
file { $keyring_path: mode => '0640', }
Ceph_conf <||> ->
@ -233,6 +240,7 @@ class ceph::radosgw (
Exec["ceph create ${radosgw_auth_key}"] ->
Exec["Populate ${radosgw_auth_key} keyring"] ->
File[$keyring_path] ->
Exec["Create ${rgw_large_pool_name} pool"] ->
Firewall['012 RadosGW allow'] ~>
Service <| title == 'httpd' |>
}

View File

@ -44,6 +44,8 @@ if $use_ceph {
$ceph_cluster_network = get_network_role_property('ceph/replication', 'network')
$ceph_public_network = get_network_role_property('ceph/public', 'network')
$per_pool_pg_nums = $storage_hash['per_pool_pg_nums']
class {'ceph':
primary_mon => $primary_mon,
mon_hosts => keys($mon_address_map),
@ -73,8 +75,8 @@ if $use_ceph {
user => $compute_user,
acl => "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=${cinder_pool}, allow rx pool=${glance_pool}, allow rwx pool=${compute_pool}'",
keyring_owner => 'nova',
pg_num => $storage_hash['pg_num'],
pgp_num => $storage_hash['pg_num'],
pg_num => pick($per_pool_pg_nums[$compute_pool], '1024'),
pgp_num => pick($per_pool_pg_nums[$compute_pool], '1024'),
}
include ceph::nova_compute

View File

@ -18,29 +18,31 @@ Exec { path => [ '/bin/', '/sbin/' , '/usr/bin/', '/usr/sbin/' ],
cwd => '/root',
}
$per_pool_pg_nums = $storage_hash['per_pool_pg_nums']
# DO NOT SPLIT ceph auth command lines! See http://tracker.ceph.com/issues/3279
ceph::pool {$glance_pool:
user => $glance_user,
acl => "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=${glance_pool}'",
keyring_owner => 'glance',
pg_num => $osd_pool_default_pg_num,
pgp_num => $osd_pool_default_pg_num,
pg_num => pick($per_pool_pg_nums[$glance_pool], '256'),
pgp_num => pick($per_pool_pg_nums[$glance_pool], '256'),
}
ceph::pool {$cinder_pool:
user => $cinder_user,
acl => "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=${cinder_pool}, allow rx pool=${glance_pool}'",
keyring_owner => 'cinder',
pg_num => $osd_pool_default_pg_num,
pgp_num => $osd_pool_default_pg_num,
pg_num => pick($per_pool_pg_nums[$cinder_pool], '2048'),
pgp_num => pick($per_pool_pg_nums[$cinder_pool], '2048'),
}
ceph::pool {$cinder_backup_pool:
user => $cinder_backup_user,
acl => "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=${cinder_backup_pool}, allow rx pool=${cinder_pool}'",
keyring_owner => 'cinder',
pg_num => $osd_pool_default_pg_num,
pgp_num => $osd_pool_default_pg_num,
pg_num => pick($per_pool_pg_nums[$cinder_backup_pool], '512'),
pgp_num => pick($per_pool_pg_nums[$cinder_backup_pool], '512'),
}
Ceph::Pool[$glance_pool] -> Ceph::Pool[$cinder_pool] -> Ceph::Pool[$cinder_backup_pool]

View File

@ -7,6 +7,7 @@ $keystone_hash = hiera('keystone', {})
$management_vip = hiera('management_vip')
$service_endpoint = hiera('service_endpoint')
$public_ssl_hash = hiera('public_ssl')
$radosgw_large_pool_name = ".rgw"
$mon_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('ceph_monitor_nodes'), 'ceph/public')
if ($storage_hash['volumes_ceph'] or
@ -94,6 +95,8 @@ if $use_ceph and $storage_hash['objects_ceph'] {
rgw_keystone_revocation_interval => '1000000',
rgw_nss_db_path => '/etc/ceph/nss',
rgw_s3_auth_use_keystone => hiera('rgw_s3_auth_use_keystone', true),
rgw_large_pool_name => $radosgw_large_pool_name,
rgw_large_pool_pg_nums => pick($storage_hash['per_pool_pg_nums'][$radosgw_large_pool_name], '512'),
#rgw Log settings
use_syslog => hiera('use_syslog', true),

View File

@ -695,6 +695,12 @@ storage:
objects_ceph: false
osd_pool_size: '2'
pg_num: 128
per_pool_pg_nums:
default_pg_num: 128
compute: 1024
backups: 512
".rgw": 512
images: 256
volumes_ceph: false
volumes_lvm: true
storage_network_range: 192.168.1.0/24

View File

@ -816,7 +816,6 @@ storage:
pg_num: 256
per_pool_pg_nums:
default_pg_num: 256
cinder_volume: 2048
compute: 1024
backups: 512
".rgw": 512

View File

@ -836,7 +836,7 @@ storage:
pg_num: 256
per_pool_pg_nums:
default_pg_num: 256
cinder_volume: 2048
volumes: 2048
compute: 1024
backups: 512
".rgw": 512

View File

@ -670,6 +670,12 @@ storage:
objects_ceph: true
osd_pool_size: '2'
pg_num: 256
per_pool_pg_nums:
default_pg_num: 256
compute: 1024
backups: 512
".rgw": 512
images: 256
volumes_ceph: true
volumes_lvm: false
storage_network_range: 192.168.1.0/24

View File

@ -671,6 +671,12 @@ storage:
objects_ceph: true
osd_pool_size: '2'
pg_num: 256
per_pool_pg_nums:
default_pg_num: 256
compute: 1024
backups: 512
".rgw": 512
images: 256
volumes_ceph: true
volumes_lvm: false
storage_network_range: 192.168.1.0/24

View File

@ -744,6 +744,13 @@ storage:
objects_ceph: true
osd_pool_size: '2'
pg_num: 256
per_pool_pg_nums:
default_pg_num: 256
volumes: 64
compute: 1024
backups: 512
".rgw": 512
images: 256
volumes_ceph: true
volumes_lvm: false
storage_network_range: 192.168.1.0/24

View File

@ -744,6 +744,13 @@ storage:
objects_ceph: true
osd_pool_size: '2'
pg_num: 256
per_pool_pg_nums:
default_pg_num: 256
volumes: 64
compute: 1024
backups: 512
".rgw": 512
images: 256
volumes_ceph: true
volumes_lvm: false
storage_network_range: 192.168.1.0/24

View File

@ -671,6 +671,12 @@ storage:
objects_ceph: true
osd_pool_size: '2'
pg_num: 256
per_pool_pg_nums:
default_pg_num: 256
compute: 1024
backups: 512
".rgw": 512
images: 256
volumes_ceph: true
volumes_lvm: false
storage_network_range: 192.168.1.0/24

View File

@ -660,6 +660,12 @@ storage:
objects_ceph: false
osd_pool_size: '2'
pg_num: 128
per_pool_pg_nums:
default_pg_num: 128
compute: 1024
backups: 512
".rgw": 512
images: 256
volumes_ceph: false
volumes_lvm: true
storage_network_range: 192.168.1.0/24

View File

@ -682,6 +682,12 @@ storage:
objects_ceph: false
osd_pool_size: '2'
pg_num: 128
per_pool_pg_nums:
default_pg_num: 128
compute: 1024
backups: 512
".rgw": 512
images: 256
volumes_ceph: false
volumes_lvm: true
storage_network_range: 192.168.1.0/24

View File

@ -681,6 +681,12 @@ storage:
objects_ceph: false
osd_pool_size: '2'
pg_num: 128
per_pool_pg_nums:
default_pg_num: 128
compute: 1024
backups: 512
".rgw": 512
images: 256
volumes_ceph: false
volumes_lvm: true
storage_network_range: 192.168.1.0/24

View File

@ -835,7 +835,6 @@ storage:
pg_num: 128
per_pool_pg_nums:
default_pg_num: 128
cinder_volume: 2048
compute: 1024
backups: 512
".rgw": 512

View File

@ -689,6 +689,12 @@ storage:
objects_ceph: false
osd_pool_size: '2'
pg_num: 128
per_pool_pg_nums:
default_pg_num: 128
compute: 1024
backups: 512
".rgw": 512
images: 256
volumes_ceph: false
volumes_lvm: true
storage_network_range: 192.168.1.0/24

View File

@ -553,6 +553,12 @@ storage:
objects_ceph: false
osd_pool_size: '2'
pg_num: 128
per_pool_pg_nums:
default_pg_num: 128
compute: 1024
backups: 512
".rgw": 512
images: 256
volumes_ceph: false
volumes_lvm: true
storage_network_range: 192.168.1.0/24

View File

@ -552,6 +552,12 @@ storage:
objects_ceph: false
osd_pool_size: '2'
pg_num: 128
per_pool_pg_nums:
default_pg_num: 128
compute: 1024
backups: 512
".rgw": 512
images: 256
volumes_ceph: false
volumes_lvm: true
storage_network_range: 192.168.1.0/24

View File

@ -318,6 +318,12 @@ storage:
images_vcenter: true
osd_pool_size: '2'
pg_num: 128
per_pool_pg_nums:
default_pg_num: 128
compute: 1024
backups: 512
".rgw": 512
images: 256
images_ceph: false
metadata:
weight: 60

View File

@ -317,6 +317,12 @@ storage:
images_vcenter: false
osd_pool_size: '2'
pg_num: 128
per_pool_pg_nums:
default_pg_num: 128
compute: 1024
backups: 512
".rgw": 512
images: 256
images_ceph: false
metadata:
weight: 60

View File

@ -317,6 +317,12 @@ storage:
images_vcenter: false
osd_pool_size: '2'
pg_num: 128
per_pool_pg_nums:
default_pg_num: 128
compute: 1024
backups: 512
".rgw": 512
images: 256
images_ceph: false
metadata:
weight: 60

View File

@ -317,6 +317,12 @@ storage:
images_vcenter: false
osd_pool_size: '2'
pg_num: 128
per_pool_pg_nums:
default_pg_num: 128
compute: 1024
backups: 512
".rgw": 512
images: 256
images_ceph: false
metadata:
weight: 60

View File

@ -317,6 +317,12 @@ storage:
images_vcenter: false
osd_pool_size: '2'
pg_num: 128
per_pool_pg_nums:
default_pg_num: 128
compute: 1024
backups: 512
".rgw": 512
images: 256
images_ceph: false
metadata:
weight: 60

View File

@ -317,6 +317,12 @@ storage:
images_vcenter: false
osd_pool_size: '2'
pg_num: 128
per_pool_pg_nums:
default_pg_num: 128
compute: 1024
backups: 512
".rgw": 512
images: 256
images_ceph: false
metadata:
weight: 60

View File

@ -13,7 +13,7 @@ describe manifest do
end
ceph_tuning_settings = Noop.hiera 'ceph_tuning_settings'
if (storage_hash['images_ceph'] or storage_hash['objects_ceph'] or storage_hash['objects_ceph'])
if (storage_hash['images_ceph'] or storage_hash['objects_ceph'])
it { should contain_class('ceph').with(
'mon_hosts' => ceph_monitor_nodes.keys,
'osd_pool_default_size' => storage_hash['osd_pool_size'],

View File

@ -19,8 +19,8 @@ describe manifest do
it { should contain_class('ceph::conf') }
it { should contain_ceph__pool('compute').with(
'pg_num' => storage_hash['pg_num'],
'pgp_num' => storage_hash['pg_num'],)
'pg_num' => storage_hash['per_pool_pg_nums']['compute'],
'pgp_num' => storage_hash['per_pool_pg_nums']['compute'],)
}
it { should contain_ceph__pool('compute').that_requires('Class[ceph::conf]') }

View File

@ -6,29 +6,31 @@ describe manifest do
shared_examples 'catalog' do
storage_hash = Noop.hiera 'storage'
it { should contain_ceph__pool('images').with(
'pg_num' => storage_hash['pg_num'],
'pgp_num' => storage_hash['pg_num'],)
}
it { should contain_ceph__pool('volumes').with(
'pg_num' => storage_hash['pg_num'],
'pgp_num' => storage_hash['pg_num'],)
}
it { should contain_ceph__pool('backups').with(
'pg_num' => storage_hash['pg_num'],
'pgp_num' => storage_hash['pg_num'],)
}
if (storage_hash['images_ceph'] or storage_hash['objects_ceph'])
it { should contain_ceph__pool('images').with(
'pg_num' => storage_hash['per_pool_pg_nums']['images'],
'pgp_num' => storage_hash['per_pool_pg_nums']['images'],)
}
it { should contain_ceph__pool('volumes').with(
'pg_num' => storage_hash['per_pool_pg_nums']['volumes'],
'pgp_num' => storage_hash['per_pool_pg_nums']['volumes'],)
}
it { should contain_ceph__pool('backups').with(
'pg_num' => storage_hash['per_pool_pg_nums']['backups'],
'pgp_num' => storage_hash['per_pool_pg_nums']['backups'],)
}
if storage_hash['volumes_ceph']
it { should contain_ceph__pool('volumes').that_notifies('Service[cinder-volume]') }
it { should contain_ceph__pool('backups').that_notifies('Service[cinder-backup]') }
it { should contain_service('cinder-volume') }
it { should contain_service('cinder-backup') }
end
if storage_hash['volumes_ceph']
it { should contain_ceph__pool('volumes').that_notifies('Service[cinder-volume]') }
it { should contain_ceph__pool('backups').that_notifies('Service[cinder-backup]') }
it { should contain_service('cinder-volume') }
it { should contain_service('cinder-backup') }
end
if storage_hash['images_ceph']
it { should contain_ceph__pool('images').that_notifies('Service[glance-api]') }
it { should contain_service('glance-api') }
if storage_hash['images_ceph']
it { should contain_ceph__pool('images').that_notifies('Service[glance-api]') }
it { should contain_service('glance-api') }
end
end
end

View File

@ -7,8 +7,11 @@ describe manifest do
storage_hash = Noop.hiera 'storage'
ceph_monitor_nodes = Noop.hiera 'ceph_monitor_nodes'
if (storage_hash['images_ceph'] or storage_hash['objects_ceph'] or storage_hash['objects_ceph'])
if (storage_hash['images_ceph'] or storage_hash['objects_ceph'])
rgw_large_pool_name = '.rgw'
rgw_large_pool_pg_nums = storage_hash['per_pool_pg_nums'][rgw_large_pool_name]
rgw_id = 'radosgw.gateway'
radosgw_auth_key = "client.#{rgw_id}"
rgw_s3_auth_use_keystone = Noop.hiera 'rgw_s3_auth_use_keystone', true
it { should contain_class('ceph::radosgw').with(
@ -28,12 +31,16 @@ describe manifest do
it { should contain_haproxy_backend_status('keystone-public').that_comes_before('Class[ceph::keystone]') }
it { should contain_haproxy_backend_status('keystone-admin').that_comes_before('Class[ceph::keystone]') }
it {
should contain_service('httpd').with(
'hasrestart' => true,
'restart' => 'sleep 30 && apachectl graceful || apachectl restart',
it { should contain_service('httpd').with(
:hasrestart => true,
:restart => 'sleep 30 && apachectl graceful || apachectl restart',
)
}
it { should contain_exec("Create #{rgw_large_pool_name} pool").with(
:command => "ceph -n #{radosgw_auth_key} osd pool create #{rgw_large_pool_name} #{rgw_large_pool_pg_nums} #{rgw_large_pool_pg_nums}",
:unless => "rados lspools | grep '^#{rgw_large_pool_name}$'"
)
}
end
end