Add osd-devices and osd-journal block storage

Add the "osd-devices" block-type storage, with
minimum of 0 and no maximum. Volumes assigned to
a unit will be added as OSDs.

Also, add the "osd-journal" block-type storage,
with minimum of 0 and maximum of 1. The osd-journal
storage, if supplied, will be used in favour of
osd-journal configuration if supplied.

For now, handling of osd-journal is static, just
as the configuration method was before. Removing
or adding the journal after deployment is not
currently supported. This is possible with further
changes, but requires stopping Ceph and migrating
the existing journal, and so out of scope here.
This commit is contained in:
Andrew Wilkins 2015-11-23 17:13:18 +08:00
parent b6b03952b8
commit 669fd96b11
5 changed files with 50 additions and 10 deletions

View File

@ -40,7 +40,8 @@ options:
The devices to format and set up as osd volumes.
.
These devices are the range of devices that will be checked for and
used across all service units.
used across all service units, in addition to any volumes attached
via the --storage flag during deployment.
.
For ceph >= 0.56.6 these can also be directories instead of devices - the
charm assumes anything not starting with /dev is a directory instead.

View File

@ -29,6 +29,8 @@ from charmhelpers.core.hookenv import (
relations_of_type,
status_set,
local_unit,
storage_get,
storage_list
)
from charmhelpers.core.host import (
service_restart,
@ -145,7 +147,7 @@ def config_changed():
if e_mountpoint and ceph.filesystem_mounted(e_mountpoint):
umount(e_mountpoint)
osd_journal = config('osd-journal')
osd_journal = get_osd_journal()
if (osd_journal and not os.path.exists(JOURNAL_ZAPPED) and
os.path.exists(osd_journal)):
ceph.zap_disk(osd_journal)
@ -158,16 +160,36 @@ def config_changed():
ceph.bootstrap_monitor_cluster(config('monitor-secret'))
ceph.wait_for_bootstrap()
if ceph.is_bootstrapped():
for dev in get_devices():
ceph.osdize(dev, config('osd-format'), config('osd-journal'),
reformat_osd(), config('ignore-device-errors'))
ceph.start_osds(get_devices())
storage_changed()
if relations_of_type('nrpe-external-master'):
update_nrpe_config()
@hooks.hook('osd-devices-storage-attached', 'osd-devices-storage-detaching')
def storage_changed():
if ceph.is_bootstrapped():
for dev in get_devices():
ceph.osdize(dev, config('osd-format'), get_osd_journal(),
reformat_osd(), config('ignore-device-errors'))
ceph.start_osds(get_devices())
def get_osd_journal():
'''
Returns the block device path to use for the OSD journal, if any.
If there is an osd-journal storage instance attached, it will be
used as the journal. Otherwise, the osd-journal configuration will
be returned.
'''
storage_ids = storage_list('osd-journal')
if storage_ids:
# There can be at most one osd-journal storage instance.
return storage_get('location', storage_ids[0])
return config('osd-journal')
def get_mon_hosts():
hosts = []
addr = get_public_addr()
@ -207,9 +229,15 @@ def reformat_osd():
def get_devices():
if config('osd-devices'):
return config('osd-devices').split(' ')
devices = config('osd-devices').split(' ')
else:
return []
devices = []
# List storage instances for the 'osd-devices'
# store declared for this charm too, and add
# their block device paths to the list.
storage_ids = storage_list('osd-devices')
devices.extend((storage_get('location', s) for s in storage_ids))
return devices
@hooks.hook('mon-relation-joined')
@ -231,7 +259,7 @@ def mon_relation():
ceph.bootstrap_monitor_cluster(config('monitor-secret'))
ceph.wait_for_bootstrap()
for dev in get_devices():
ceph.osdize(dev, config('osd-format'), config('osd-journal'),
ceph.osdize(dev, config('osd-format'), get_osd_journal(),
reformat_osd(), config('ignore-device-errors'))
ceph.start_osds(get_devices())
notify_osds()

View File

@ -0,0 +1 @@
ceph_hooks.py

View File

@ -0,0 +1 @@
ceph_hooks.py

View File

@ -25,3 +25,12 @@ provides:
nrpe-external-master:
interface: nrpe-external-master
scope: container
storage:
osd-devices:
type: block
multiple:
range: 0-
osd-journal:
type: block
multiple:
range: 0-1