Add support for use of directories instead of devices for OSD's

This commit is contained in:
James Page 2013-08-27 11:44:50 +01:00
parent c00dec2a4f
commit 14bef22e44
5 changed files with 67 additions and 15 deletions

View File

@ -27,15 +27,20 @@ These two pieces of configuration must NOT be changed post bootstrap; attempting
to do this will cause a reconfiguration error and new service units will not join
the existing ceph cluster.
The charm also supports the specification of storage devices to be used in the
The charm also supports the specification of storage devices to be used in the
ceph cluster.
osd-devices:
A list of devices that the charm will attempt to detect, initialise and
activate as ceph storage.
This this can be a superset of the actual storage devices presented to
each service unit and can be changed post ceph bootstrap using `juju set`.
This can be a superset of the actual storage devices presented to each
service unit and can be changed post ceph bootstrap using `juju set`.
The full path of each device must be provided, e.g. /dev/vdb.
For Ceph >= 0.56.6 (Raring or the Grizzly Cloud Archive) use of
directories instead of devices is also supported.
At a minimum you must provide a juju config file during initial deployment
with the fsid and monitor-secret options (contents of cepy.yaml below):
@ -66,10 +71,6 @@ Location: http://jujucharms.com/charms/ceph
Technical Bootnotes
===================
This charm is currently deliberately inflexible and potentially destructive.
It is designed to deploy on exactly three machines. Each machine will run mon
and osd.
This charm uses the new-style Ceph deployment as reverse-engineered from the
Chef cookbook at https://github.com/ceph/ceph-cookbooks, although we selected
a different strategy to form the monitor cluster. Since we don't know the

View File

@ -39,6 +39,9 @@ options:
.
These devices are the range of devices that will be checked for and
used across all service units.
.
For ceph >= 0.56.6 these can also be directories instead of devices - the
charm assumes anything not starting with /dev is a directory instead.
osd-journal:
type: string
description: |

View File

@ -15,14 +15,18 @@ import apt_pkg as apt
from charmhelpers.core.host import (
mkdir,
service_restart,
log
)
from charmhelpers.core.hookenv import (
log,
ERROR,
config,
)
from charmhelpers.contrib.storage.linux.utils import (
zap_disk,
is_block_device
is_block_device,
)
from utils import (
get_unit_hostname
get_unit_hostname,
)
LEADER = 'leader'
@ -119,6 +123,16 @@ def is_osd_disk(dev):
return False
def start_osds(devices):
if get_ceph_version() < "0.56.6":
# Only supports block devices - force a rescan
rescan_osd_devices()
else:
# Use ceph-disk-activate for later ceph versions
for dev_or_path in devices:
subprocess.check_call(['ceph-disk-activate', dev_or_path])
def rescan_osd_devices():
cmd = [
'udevadm', 'trigger',
@ -291,6 +305,13 @@ def update_monfs():
def osdize(dev, osd_format, osd_journal, reformat_osd=False):
if dev.startswith('/dev'):
osdize_dev(dev, osd_format, osd_journal, reformat_osd)
else:
osdize_dir(dev)
def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False):
if not os.path.exists(dev):
log('Path {} does not exist - bailing'.format(dev))
return
@ -327,6 +348,25 @@ def osdize(dev, osd_format, osd_journal, reformat_osd=False):
subprocess.check_call(cmd)
def osdize_dir(path):
if os.path.exists(os.path.join(path, 'upstart')):
log('Path {} is already configured as an OSD - bailing'.format(path))
return
if get_ceph_version() < "0.56.6":
log('Unable to use directories for OSDs with ceph < 0.56.6',
level=ERROR)
raise
mkdir(path)
cmd = [
'ceph-disk-prepare',
'--data-dir',
path
]
subprocess.check_call(cmd)
def device_mounted(dev):
return subprocess.call(['grep', '-wqs', dev + '1', '/proc/mounts']) == 0

View File

@ -102,7 +102,7 @@ def config_changed():
with open(JOURNAL_ZAPPED, 'w') as zapped:
zapped.write('DONE')
for dev in config('osd-devices').split(' '):
for dev in get_devices():
ceph.osdize(dev, config('osd-format'), config('osd-journal'),
reformat_osd())
@ -112,7 +112,7 @@ def config_changed():
ceph.wait_for_bootstrap()
if ceph.is_bootstrapped():
ceph.rescan_osd_devices()
ceph.start_osds(get_devices())
log('End config-changed hook.')
@ -139,6 +139,13 @@ def reformat_osd():
return False
def get_devices():
if config('osd-devices'):
return config('osd-devices').split(' ')
else:
return []
@hooks.hook('mon-relation-departed',
'mon-relation-joined')
def mon_relation():
@ -149,7 +156,7 @@ def mon_relation():
if len(get_mon_hosts()) >= moncount:
ceph.bootstrap_monitor_cluster(config('monitor-secret'))
ceph.wait_for_bootstrap()
ceph.rescan_osd_devices()
ceph.start_osds(get_devices())
notify_osds()
notify_radosgws()
notify_client()
@ -258,7 +265,8 @@ def start():
# In case we're being redeployed to the same machines, try
# to make sure everything is running as soon as possible.
service_restart('ceph-mon-all')
ceph.rescan_osd_devices()
if ceph.is_bootstrapped():
ceph.start_osds(get_devices())
if __name__ == '__main__':

View File

@ -1 +1 @@
92
96