Adds support for configurable ceph pool replication count

Fixes: bug 1251560
This commit is contained in:
Edward Hope-Morley 2013-11-15 11:33:41 +00:00
parent 1ccad73db7
commit 79cbb29951
4 changed files with 48 additions and 13 deletions

View File

@ -64,3 +64,13 @@ options:
The name that will be used to create the Ceph's RBD image with. If the
image name exists in Ceph, it will be re-used and the data will be
overwritten.
ceph-osd-replication-count:
default: 2
type: int
description: |
This value dictates the number of replicas ceph must make of any
object it stores within the rabbitmq rbd pool. Of course, this only
applies if using Ceph as a backend store. Note that once the rabbitmq
rbd pool has been created, changing this value will not have any
effect (although it can be changed in ceph by manually configuring
your ceph cluster).

View File

@ -9,6 +9,7 @@
#
import commands
import json
import subprocess
import os
import shutil
@ -66,16 +67,37 @@ def pool_exists(service, name):
return name in out
def create_pool(service, name):
cmd = [
'rados',
'--id',
service,
'mkpool',
name
]
execute(cmd)
def get_osds(service):
'''
Return a list of all Ceph Object Storage Daemons
currently in the cluster
'''
return json.loads(subprocess.check_output(['ceph', '--id', service,
'osd', 'ls', '--format=json']))
def create_pool(service, name, replicas=2):
''' Create a new RADOS pool '''
if pool_exists(service, name):
utils.juju_log('WARNING',
"Ceph pool {} already exists, "
"skipping creation".format(name))
return
# Calculate the number of placement groups based
# on upstream recommended best practices.
pgnum = (len(get_osds(service)) * 100 / replicas)
cmd = [
'ceph', '--id', service,
'osd', 'pool', 'create',
name, str(pgnum)
]
subprocess.check_call(cmd)
cmd = [
'ceph', '--id', service,
'osd', 'pool', 'set', name,
'size', str(replicas)
]
subprocess.check_call(cmd)
def keyfile_path(service):
return KEYFILE % service
@ -220,7 +242,8 @@ def copy_files(src, dst, symlinks=False, ignore=None):
def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
blk_device, fstype, system_services=[]):
blk_device, fstype, system_services=[],
rbd_pool_replicas=2):
"""
To be called from the current cluster leader.
Ensures given pool and RBD image exists, is mapped to a block device,
@ -235,7 +258,7 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
# Ensure pool, RBD image, RBD mappings are in place.
if not pool_exists(service, pool):
utils.juju_log('INFO', 'ceph: Creating new pool %s.' % pool)
create_pool(service, pool)
create_pool(service, pool, replicas=rbd_pool_replicas)
if not rbd_exists(service, pool, rbd_img):
utils.juju_log('INFO', 'ceph: Creating RBD image (%s).' % rbd_img)

View File

@ -215,11 +215,13 @@ def ceph_changed():
rbd_size = utils.config_get('rbd-size')
sizemb = int(rbd_size.split('G')[0]) * 1024
blk_device = '/dev/rbd/%s/%s' % (POOL_NAME, rbd_img)
rbd_pool_rep_count = utils.config_get('ceph-osd-replication-count')
ceph.ensure_ceph_storage(service=SERVICE_NAME, pool=POOL_NAME,
rbd_img=rbd_img, sizemb=sizemb,
fstype='ext4', mount_point=RABBIT_DIR,
blk_device=blk_device,
system_services=['rabbitmq-server'])
system_services=['rabbitmq-server'],
rbd_pool_replicas=rbd_pool_rep_count)
else:
utils.juju_log('INFO',
'This is not the peer leader. Not configuring RBD.')

View File

@ -1 +1 @@
97
98