library support

This commit is contained in:
Chris MacNaughton 2016-05-12 08:47:37 -04:00
parent f961745319
commit 3e633ce608
3 changed files with 230 additions and 98 deletions

View File

@ -17,6 +17,7 @@ import subprocess
import time
import os
import re
import socket
import sys
import shutil
from charmhelpers.cli.host import mounts
@ -27,16 +28,24 @@ from charmhelpers.core.host import (
lsb_release,
service_stop,
service_restart)
from charms.reactive import is_state
from charmhelpers.core.hookenv import (
log,
ERROR,
WARNING,
DEBUG,
cached,
config,
unit_get,
status_set,
relation_ids,
related_units,
relation_get,
)
from charmhelpers.fetch import (
apt_cache
apt_cache,
# apt_install,
# filter_installed_packages
)
from charmhelpers.contrib.storage.linux.utils import (
zap_disk,
@ -47,27 +56,10 @@ from charmhelpers.contrib.storage.linux.utils import (
# get_unit_hostname,
# )
# Imports from utils.py
import socket
import re
from charmhelpers.core.hookenv import (
unit_get,
cached,
config,
status_set,
)
from charmhelpers.fetch import (
apt_install,
filter_installed_packages
)
from charmhelpers.core.host import (
lsb_release
)
from charmhelpers.contrib.network.ip import (
get_address_in_network,
get_ipv6_addr
get_ipv6_addr,
format_ipv6_addr
)
# try:
@ -77,7 +69,24 @@ from charmhelpers.contrib.network.ip import (
# fatal=True)
# import dns.resolver
import dns.resolver
### This is migrated from hooks/utils.py
def get_mon_hosts():
hosts = []
if is_state('ceph_mon.installed'):
addr = get_public_addr()
hosts.append('{}:6789'.format(format_ipv6_addr(addr) or addr))
for relid in relation_ids('mon'):
for unit in related_units(relid):
addr = relation_get('ceph-public-address', unit, relid)
if addr is not None:
hosts.append('{}:6789'.format(
format_ipv6_addr(addr) or addr))
hosts.sort()
return hosts
def enable_pocket(pocket):
apt_sources = "/etc/apt/sources.list"
@ -164,8 +173,6 @@ def assert_charm_supports_ipv6():
"versions less than Trusty 14.04")
### This is migrated from hooks/ceph.py
LEADER = 'leader'
PEON = 'peon'
QUORUM = [LEADER, PEON]
@ -609,7 +616,7 @@ def generate_monitor_secret():
'--name=mon.',
'--gen-key'
]
res = subprocess.check_output(cmd)
res = subprocess.getoutput(cmd)
return "{}==".format(res.split('=')[1].strip())
@ -684,6 +691,7 @@ _upgrade_caps = {
'mon': ['allow rwx']
}
def get_radosgw_key():
return get_named_key('radosgw.gateway', _radosgw_caps)
@ -706,6 +714,8 @@ osd_upgrade_caps = {
'allow command "config-key exists"',
]
}
def get_upgrade_key():
return get_named_key('upgrade-osd', _upgrade_caps)
@ -936,63 +946,3 @@ def get_running_osds():
return result.split()
except subprocess.CalledProcessError:
return []
def emit_cephconf(mon_hosts):
networks = get_networks('ceph-public-network')
public_network = ', '.join(networks)
networks = get_networks('ceph-cluster-network')
cluster_network = ', '.join(networks)
cephcontext = {
'auth_supported': config('auth-supported'),
'mon_hosts': ' '.join(mon_hosts),
'fsid': leader_get('fsid'),
'old_auth': cmp_pkgrevno('ceph', "0.51") < 0,
'osd_journal_size': config('osd-journal-size'),
'use_syslog': str(config('use-syslog')).lower(),
'ceph_public_network': public_network,
'ceph_cluster_network': cluster_network,
'loglevel': config('loglevel'),
'dio': str(config('use-direct-io')).lower(),
}
if config('prefer-ipv6'):
dynamic_ipv6_address = get_ipv6_addr()[0]
if not public_network:
cephcontext['public_addr'] = dynamic_ipv6_address
if not cluster_network:
cephcontext['cluster_addr'] = dynamic_ipv6_address
if az_info():
cephcontext['crush_location'] = "root=default rack={} host={}" \
.format(az_info(), socket.gethostname())
# Install ceph.conf as an alternative to support
# co-existence with other charms that write this file
charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name())
mkdir(os.path.dirname(charm_ceph_conf), owner=ceph.ceph_user(),
group=ceph.ceph_user())
render('ceph.conf', charm_ceph_conf, cephcontext, perms=0o644)
install_alternative('ceph.conf', '/etc/ceph/ceph.conf',
charm_ceph_conf, 100)
def get_fsid():
return get_conf('fsid')
def get_auth():
return get_conf('auth')
def get_conf(name):
for relid in relation_ids('mon'):
for unit in related_units(relid):
conf = relation_get(name,
unit, relid)
if conf:
return conf
return None

View File

@ -1,25 +1,47 @@
from charms.reactive import when, when_not, set_state
from charms import reactive
from charms.reactive import when, when_not, set_state, is_state
import charms.apt
from charms.ceph_base import (
get_networks,
get_public_addr,
# get_networks,
# get_public_addr,
get_mon_hosts,
is_bootstrapped,
is_quorum,
get_running_osds,
assert_charm_supports_ipv6
)
from charmhelpers.contrib.hardening.harden import harden
# from charmhelpers.core.host import (
# umount,
# )
from charmhelpers.core import hookenv
from charmhelpers.core.hookenv import (
# log,
config,
relation_ids,
related_units,
relation_get,
status_set,
local_unit
)
from charmhelpers.core.sysctl import create as create_sysctl
# from charmhelpers.contrib.hardening.harden import harden
@when_not('ceph.installed')
@harden()
# @harden()
def install_ceph_base():
charms.apt.add_source(config('source'), key=config('key'))
charms.apt.queue_install(['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python3-ceph', 'xfsprogs'])
charms.apt.queue_install(charms.ceph_base.PACKAGES)
charms.apt.install_queued()
set_state('ceph.installed')
@when('config.changed')
@harden()
@when('config.changed', 'ceph.installed')
# @harden()
def config_changed():
# # Check if an upgrade was requested
# check_for_upgrade()
@ -30,9 +52,127 @@ def config_changed():
sysctl_dict = config('sysctl')
if sysctl_dict:
create_sysctl(sysctl_dict, '/etc/sysctl.d/50-ceph-osd-charm.conf')
create_sysctl(sysctl_dict, '/etc/sysctl.d/50-ceph-charm.conf')
# if relations_of_type('nrpe-external-master'):
# update_nrpe_config()
e_mountpoint = config('ephemeral-unmount')
if e_mountpoint and ceph.filesystem_mounted(e_mountpoint):
umount(e_mountpoint)
prepare_disks_and_activate()
# sysctl_dict = config('sysctl')
# if sysctl_dict:
# create_sysctl(sysctl_dict, '/etc/sysctl.d/50-ceph-osd-charm.conf')
# e_mountpoint = config('ephemeral-unmount')
# if e_mountpoint and ceph.filesystem_mounted(e_mountpoint):
# umount(e_mountpoint)
# prepare_disks_and_activate()
def assess_status():
'''Assess status of current unit'''
# is_state('ceph_mon.bootstrapped')
statuses = set([])
messages = set([])
if is_state('ceph_mon.installed'):
(status, message) = log_monitor()
statuses.add(status)
messages.add(message)
if is_state('ceph_osd.installed'):
(status, message) = log_osds()
statuses.add(status)
messages.add(message)
if 'blocked' in statuses:
status = 'blocked'
elif 'waiting' in statuses:
status = 'waiting'
else:
status = 'active'
message = '; '.join(messages)
status_set(status, message)
def get_conf(name):
for relid in relation_ids('mon'):
for unit in related_units(relid):
conf = relation_get(name,
unit, relid)
if conf:
return conf
return None
def log_monitor():
moncount = int(config('monitor-count'))
units = get_peer_units()
# not enough peers and mon_count > 1
if len(units.keys()) < moncount:
return ('blocked', 'Insufficient peer units to bootstrap'
' cluster (require {})'.format(moncount))
# mon_count > 1, peers, but no ceph-public-address
ready = sum(1 for unit_ready in units.values() if unit_ready)
if ready < moncount:
return ('waiting', 'Peer units detected, waiting for addresses')
# active - bootstrapped + quorum status check
if is_bootstrapped() and is_quorum():
return ('active', 'Unit is ready and clustered')
else:
# Unit should be running and clustered, but no quorum
# TODO: should this be blocked or waiting?
return ('blocked', 'Unit not clustered (no quorum)')
# If there's a pending lock for this unit,
# can i get the lock?
# reboot the ceph-mon process
def get_peer_units():
"""
Returns a dictionary of unit names from the mon peer relation with
a flag indicating whether the unit has presented its address
"""
units = {}
units[local_unit()] = True
for relid in relation_ids('mon'):
for unit in related_units(relid):
addr = relation_get('ceph-public-address', unit, relid)
units[unit] = addr is not None
return units
def log_osds():
if not is_state('ceph_mon.installed'):
# Check for mon relation
if len(relation_ids('mon')) < 1:
status_set('blocked', 'Missing relation: monitor')
return
# Check for monitors with presented addresses
# Check for bootstrap key presentation
monitors = get_mon_hosts()
if len(monitors) < 1 or not get_conf('osd_bootstrap_key'):
status_set('waiting', 'Incomplete relation: monitor')
return
# Check for OSD device creation parity i.e. at least some devices
# must have been presented and used for this charm to be operational
running_osds = get_running_osds()
if not running_osds:
return ('blocked',
'No block devices detected using current configuration')
else:
return ('active',
'Unit is ready ({} OSD)'.format(len(running_osds)))
# Per https://github.com/juju-solutions/charms.reactive/issues/33,
# this module may be imported multiple times so ensure the
# initialization hook is only registered once. I have to piggy back
# onto the namespace of a module imported before reactive discovery
# to do this.
if not hasattr(reactive, '_ceph_log_registered'):
# We need to register this to run every hook, not just during install
# and config-changed, to protect against race conditions. If we don't
# do this, then the config in the hook environment may show updates
# to running hooks well before the config-changed hook has been invoked
# and the intialization provided an opertunity to be run.
hookenv.atexit(assess_status)
reactive._ceph_log_registered = True

42
templates/ceph.conf Normal file
View File

@ -0,0 +1,42 @@
[global]
{% if old_auth %}
auth supported = {{ auth_supported }}
{% else %}
auth cluster required = {{ auth_supported }}
auth service required = {{ auth_supported }}
auth client required = {{ auth_supported }}
{% endif %}
keyring = /etc/ceph/$cluster.$name.keyring
mon host = {{ mon_hosts }}
fsid = {{ fsid }}
log to syslog = {{ use_syslog }}
err to syslog = {{ use_syslog }}
clog to syslog = {{ use_syslog }}
mon cluster log to syslog = {{ use_syslog }}
{%- if ceph_public_network is string %}
public network = {{ ceph_public_network }}
{%- endif %}
{%- if ceph_cluster_network is string %}
cluster network = {{ ceph_cluster_network }}
{%- endif %}
{% if public_addr %}
public addr = {{ public_addr }}
{% endif %}
{% if cluster_addr %}
cluster addr = {{ cluster_addr }}
{%- endif %}
[mon]
keyring = /var/lib/ceph/mon/$cluster-$id/keyring
[mds]
keyring = /var/lib/ceph/mds/$cluster-$id/keyring
[osd]
keyring = /var/lib/ceph/osd/$cluster-$id/keyring
osd journal size = {{ osd_journal_size }}
filestore xattr use omap = true