From 2d0b3fe9b80dcb20febf3b347991532a3dce6532 Mon Sep 17 00:00:00 2001 From: James Page Date: Sun, 23 Jun 2013 20:10:07 +0100 Subject: [PATCH 01/20] Initial move to charm-helpers --- .pydevproject | 4 +- charm-helpers-sync.yaml | 4 + hooks/ceph.py | 4 +- hooks/charmhelpers/__init__.py | 0 hooks/charmhelpers/core/__init__.py | 0 hooks/charmhelpers/core/hookenv.py | 320 ++++++++++++++++++++++++++++ hooks/charmhelpers/core/host.py | 261 +++++++++++++++++++++++ hooks/hooks.py | 247 +++++++++++---------- 8 files changed, 720 insertions(+), 120 deletions(-) create mode 100644 charm-helpers-sync.yaml create mode 100644 hooks/charmhelpers/__init__.py create mode 100644 hooks/charmhelpers/core/__init__.py create mode 100644 hooks/charmhelpers/core/hookenv.py create mode 100644 hooks/charmhelpers/core/host.py diff --git a/.pydevproject b/.pydevproject index a61f5fb..998e0aa 100644 --- a/.pydevproject +++ b/.pydevproject @@ -1,7 +1,5 @@ - - - + python 2.7 Default diff --git a/charm-helpers-sync.yaml b/charm-helpers-sync.yaml new file mode 100644 index 0000000..7ee93b7 --- /dev/null +++ b/charm-helpers-sync.yaml @@ -0,0 +1,4 @@ +branch: lp:charm-helpers +destination: hooks/charmhelpers +include: + - core diff --git a/hooks/ceph.py b/hooks/ceph.py index 6502b18..5b107b9 100644 --- a/hooks/ceph.py +++ b/hooks/ceph.py @@ -10,7 +10,7 @@ import json import subprocess import time -import utils +#import utils import os import apt_pkg as apt @@ -18,6 +18,8 @@ LEADER = 'leader' PEON = 'peon' QUORUM = [LEADER, PEON] +PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', 'xfsprogs'] + def is_quorum(): asok = "/var/run/ceph/ceph-mon.{}.asok".format(utils.get_unit_hostname()) diff --git a/hooks/charmhelpers/__init__.py b/hooks/charmhelpers/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/hooks/charmhelpers/core/__init__.py b/hooks/charmhelpers/core/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py new file mode 100644 index 0000000..e28bfd9 --- /dev/null +++ b/hooks/charmhelpers/core/hookenv.py @@ -0,0 +1,320 @@ +"Interactions with the Juju environment" +# Copyright 2013 Canonical Ltd. +# +# Authors: +# Charm Helpers Developers + +import os +import json +import yaml +import subprocess +import UserDict + +CRITICAL = "CRITICAL" +ERROR = "ERROR" +WARNING = "WARNING" +INFO = "INFO" +DEBUG = "DEBUG" +MARKER = object() + +cache = {} + + +def cached(func): + ''' Cache return values for multiple executions of func + args + + For example: + + @cached + def unit_get(attribute): + pass + + unit_get('test') + + will cache the result of unit_get + 'test' for future calls. + ''' + def wrapper(*args, **kwargs): + global cache + key = str((func, args, kwargs)) + try: + return cache[key] + except KeyError: + res = func(*args, **kwargs) + cache[key] = res + return res + return wrapper + + +def flush(key): + ''' Flushes any entries from function cache where the + key is found in the function+args ''' + flush_list = [] + for item in cache: + if key in item: + flush_list.append(item) + for item in flush_list: + del cache[item] + + +def log(message, level=None): + "Write a message to the juju log" + command = ['juju-log'] + if level: + command += ['-l', level] + command += [message] + subprocess.call(command) + + +class Serializable(UserDict.IterableUserDict): + "Wrapper, an object that can be serialized to yaml or json" + + def __init__(self, obj): + # wrap the object + UserDict.IterableUserDict.__init__(self) + self.data = obj + + def __getattr__(self, attr): + # See if this object has attribute. + if attr in ("json", "yaml", "data"): + return self.__dict__[attr] + # Check for attribute in wrapped object. + got = getattr(self.data, attr, MARKER) + if got is not MARKER: + return got + # Proxy to the wrapped object via dict interface. + try: + return self.data[attr] + except KeyError: + raise AttributeError(attr) + + def json(self): + "Serialize the object to json" + return json.dumps(self.data) + + def yaml(self): + "Serialize the object to yaml" + return yaml.dump(self.data) + + +def execution_environment(): + """A convenient bundling of the current execution context""" + context = {} + context['conf'] = config() + context['reltype'] = relation_type() + context['relid'] = relation_id() + context['unit'] = local_unit() + context['rels'] = relations() + context['rel'] = relation_get() + context['env'] = os.environ + return context + + +def in_relation_hook(): + "Determine whether we're running in a relation hook" + return 'JUJU_RELATION' in os.environ + + +def relation_type(): + "The scope for the current relation hook" + return os.environ.get('JUJU_RELATION', None) + + +def relation_id(): + "The relation ID for the current relation hook" + return os.environ.get('JUJU_RELATION_ID', None) + + +def local_unit(): + "Local unit ID" + return os.environ['JUJU_UNIT_NAME'] + + +def remote_unit(): + "The remote unit for the current relation hook" + return os.environ['JUJU_REMOTE_UNIT'] + + +@cached +def config(scope=None): + "Juju charm configuration" + config_cmd_line = ['config-get'] + if scope is not None: + config_cmd_line.append(scope) + config_cmd_line.append('--format=json') + try: + return Serializable(json.loads( + subprocess.check_output(config_cmd_line) + )) + except ValueError: + return None + + +@cached +def relation_get(attribute=None, unit=None, rid=None): + _args = ['relation-get', '--format=json'] + if rid: + _args.append('-r') + _args.append(rid) + _args.append(attribute or '-') + if unit: + _args.append(unit) + try: + return Serializable(json.loads(subprocess.check_output(_args))) + except ValueError: + return None + + +def relation_set(relation_id=None, relation_settings={}, **kwargs): + relation_cmd_line = ['relation-set'] + if relation_id is not None: + relation_cmd_line.extend(('-r', relation_id)) + for k, v in relation_settings.items(): + relation_cmd_line.append('{}={}'.format(k, v)) + for k, v in kwargs.items(): + relation_cmd_line.append('{}={}'.format(k, v)) + subprocess.check_call(relation_cmd_line) + # Flush cache of any relation-gets for local unit + flush(local_unit()) + + +@cached +def relation_ids(reltype=None): + "A list of relation_ids" + reltype = reltype or relation_type() + relid_cmd_line = ['relation-ids', '--format=json'] + if reltype is not None: + relid_cmd_line.append(reltype) + return json.loads(subprocess.check_output(relid_cmd_line)) + return [] + + +@cached +def related_units(relid=None): + "A list of related units" + relid = relid or relation_id() + units_cmd_line = ['relation-list', '--format=json'] + if relid is not None: + units_cmd_line.extend(('-r', relid)) + return json.loads(subprocess.check_output(units_cmd_line)) + + +@cached +def relation_for_unit(unit=None, rid=None): + "Get the json represenation of a unit's relation" + unit = unit or remote_unit() + relation = relation_get(unit=unit, rid=rid) + for key in relation: + if key.endswith('-list'): + relation[key] = relation[key].split() + relation['__unit__'] = unit + return Serializable(relation) + + +@cached +def relations_for_id(relid=None): + "Get relations of a specific relation ID" + relation_data = [] + relid = relid or relation_ids() + for unit in related_units(relid): + unit_data = relation_for_unit(unit, relid) + unit_data['__relid__'] = relid + relation_data.append(unit_data) + return relation_data + + +@cached +def relations_of_type(reltype=None): + "Get relations of a specific type" + relation_data = [] + reltype = reltype or relation_type() + for relid in relation_ids(reltype): + for relation in relations_for_id(relid): + relation['__relid__'] = relid + relation_data.append(relation) + return relation_data + + +@cached +def relation_types(): + "Get a list of relation types supported by this charm" + charmdir = os.environ.get('CHARM_DIR', '') + mdf = open(os.path.join(charmdir, 'metadata.yaml')) + md = yaml.safe_load(mdf) + rel_types = [] + for key in ('provides', 'requires', 'peers'): + section = md.get(key) + if section: + rel_types.extend(section.keys()) + mdf.close() + return rel_types + + +@cached +def relations(): + rels = {} + for reltype in relation_types(): + relids = {} + for relid in relation_ids(reltype): + units = {local_unit(): relation_get(unit=local_unit(), rid=relid)} + for unit in related_units(relid): + reldata = relation_get(unit=unit, rid=relid) + units[unit] = reldata + relids[relid] = units + rels[reltype] = relids + return rels + + +def open_port(port, protocol="TCP"): + "Open a service network port" + _args = ['open-port'] + _args.append('{}/{}'.format(port, protocol)) + subprocess.check_call(_args) + + +def close_port(port, protocol="TCP"): + "Close a service network port" + _args = ['close-port'] + _args.append('{}/{}'.format(port, protocol)) + subprocess.check_call(_args) + + +@cached +def unit_get(attribute): + _args = ['unit-get', '--format=json', attribute] + try: + return json.loads(subprocess.check_output(_args)) + except ValueError: + return None + + +def unit_private_ip(): + return unit_get('private-address') + + +class UnregisteredHookError(Exception): + pass + + +class Hooks(object): + def __init__(self): + super(Hooks, self).__init__() + self._hooks = {} + + def register(self, name, function): + self._hooks[name] = function + + def execute(self, args): + hook_name = os.path.basename(args[0]) + if hook_name in self._hooks: + self._hooks[hook_name]() + else: + raise UnregisteredHookError(hook_name) + + def hook(self, *hook_names): + def wrapper(decorated): + for hook_name in hook_names: + self.register(hook_name, decorated) + else: + self.register(decorated.__name__, decorated) + return decorated + return wrapper diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py new file mode 100644 index 0000000..cee4ee0 --- /dev/null +++ b/hooks/charmhelpers/core/host.py @@ -0,0 +1,261 @@ +"""Tools for working with the host system""" +# Copyright 2012 Canonical Ltd. +# +# Authors: +# Nick Moffitt +# Matthew Wedgwood + +import apt_pkg +import os +import pwd +import grp +import subprocess +import hashlib + +from hookenv import log, execution_environment + + +def service_start(service_name): + service('start', service_name) + + +def service_stop(service_name): + service('stop', service_name) + + +def service_restart(service_name): + service('restart', service_name) + + +def service_reload(service_name, restart_on_failure=False): + if not service('reload', service_name) and restart_on_failure: + service('restart', service_name) + + +def service(action, service_name): + cmd = ['service', service_name, action] + return subprocess.call(cmd) == 0 + + +def adduser(username, password=None, shell='/bin/bash', system_user=False): + """Add a user""" + try: + user_info = pwd.getpwnam(username) + log('user {0} already exists!'.format(username)) + except KeyError: + log('creating user {0}'.format(username)) + cmd = ['useradd'] + if system_user or password is None: + cmd.append('--system') + else: + cmd.extend([ + '--create-home', + '--shell', shell, + '--password', password, + ]) + cmd.append(username) + subprocess.check_call(cmd) + user_info = pwd.getpwnam(username) + return user_info + + +def add_user_to_group(username, group): + """Add a user to a group""" + cmd = [ + 'gpasswd', '-a', + username, + group + ] + log("Adding user {} to group {}".format(username, group)) + subprocess.check_call(cmd) + + +def rsync(from_path, to_path, flags='-r', options=None): + """Replicate the contents of a path""" + context = execution_environment() + options = options or ['--delete', '--executability'] + cmd = ['/usr/bin/rsync', flags] + cmd.extend(options) + cmd.append(from_path.format(**context)) + cmd.append(to_path.format(**context)) + log(" ".join(cmd)) + return subprocess.check_output(cmd).strip() + + +def symlink(source, destination): + """Create a symbolic link""" + context = execution_environment() + log("Symlinking {} as {}".format(source, destination)) + cmd = [ + 'ln', + '-sf', + source.format(**context), + destination.format(**context) + ] + subprocess.check_call(cmd) + + +def mkdir(path, owner='root', group='root', perms=0555, force=False): + """Create a directory""" + context = execution_environment() + log("Making dir {} {}:{} {:o}".format(path, owner, group, + perms)) + uid = pwd.getpwnam(owner.format(**context)).pw_uid + gid = grp.getgrnam(group.format(**context)).gr_gid + realpath = os.path.abspath(path) + if os.path.exists(realpath): + if force and not os.path.isdir(realpath): + log("Removing non-directory file {} prior to mkdir()".format(path)) + os.unlink(realpath) + else: + os.makedirs(realpath, perms) + os.chown(realpath, uid, gid) + + +def write_file(path, fmtstr, owner='root', group='root', perms=0444, **kwargs): + """Create or overwrite a file with the contents of a string""" + context = execution_environment() + context.update(kwargs) + log("Writing file {} {}:{} {:o}".format(path, owner, group, + perms)) + uid = pwd.getpwnam(owner.format(**context)).pw_uid + gid = grp.getgrnam(group.format(**context)).gr_gid + with open(path.format(**context), 'w') as target: + os.fchown(target.fileno(), uid, gid) + os.fchmod(target.fileno(), perms) + target.write(fmtstr.format(**context)) + + +def render_template_file(source, destination, **kwargs): + """Create or overwrite a file using a template""" + log("Rendering template {} for {}".format(source, + destination)) + context = execution_environment() + with open(source.format(**context), 'r') as template: + write_file(destination.format(**context), template.read(), + **kwargs) + + +def filter_installed_packages(packages): + """Returns a list of packages that require installation""" + apt_pkg.init() + cache = apt_pkg.Cache() + _pkgs = [] + for package in packages: + try: + p = cache[package] + p.current_ver or _pkgs.append(package) + except KeyError: + log('Package {} has no installation candidate.'.format(package), + level='WARNING') + _pkgs.append(package) + return _pkgs + + +def apt_install(packages, options=None, fatal=False): + """Install one or more packages""" + options = options or [] + cmd = ['apt-get', '-y'] + cmd.extend(options) + cmd.append('install') + if isinstance(packages, basestring): + cmd.append(packages) + else: + cmd.extend(packages) + log("Installing {} with options: {}".format(packages, + options)) + if fatal: + subprocess.check_call(cmd) + else: + subprocess.call(cmd) + + +def apt_update(fatal=False): + """Update local apt cache""" + cmd = ['apt-get', 'update'] + if fatal: + subprocess.check_call(cmd) + else: + subprocess.call(cmd) + + +def mount(device, mountpoint, options=None, persist=False): + '''Mount a filesystem''' + cmd_args = ['mount'] + if options is not None: + cmd_args.extend(['-o', options]) + cmd_args.extend([device, mountpoint]) + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError, e: + log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) + return False + if persist: + # TODO: update fstab + pass + return True + + +def umount(mountpoint, persist=False): + '''Unmount a filesystem''' + cmd_args = ['umount', mountpoint] + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError, e: + log('Error unmounting {}\n{}'.format(mountpoint, e.output)) + return False + if persist: + # TODO: update fstab + pass + return True + + +def mounts(): + '''List of all mounted volumes as [[mountpoint,device],[...]]''' + with open('/proc/mounts') as f: + # [['/mount/point','/dev/path'],[...]] + system_mounts = [m[1::-1] for m in [l.strip().split() + for l in f.readlines()]] + return system_mounts + + +def file_hash(path): + ''' Generate a md5 hash of the contents of 'path' or None if not found ''' + if os.path.exists(path): + h = hashlib.md5() + with open(path, 'r') as source: + h.update(source.read()) # IGNORE:E1101 - it does have update + return h.hexdigest() + else: + return None + + +def restart_on_change(restart_map): + ''' Restart services based on configuration files changing + + This function is used a decorator, for example + + @restart_on_change({ + '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] + }) + def ceph_client_changed(): + ... + + In this example, the cinder-api and cinder-volume services + would be restarted if /etc/ceph/ceph.conf is changed by the + ceph_client_changed function. + ''' + def wrap(f): + def wrapped_f(*args): + checksums = {} + for path in restart_map: + checksums[path] = file_hash(path) + f(*args) + restarts = [] + for path in restart_map: + if checksums[path] != file_hash(path): + restarts += restart_map[path] + for service_name in list(set(restarts)): + service('restart', service_name) + return wrapped_f + return wrap diff --git a/hooks/hooks.py b/hooks/hooks.py index b74eefb..d702962 100755 --- a/hooks/hooks.py +++ b/hooks/hooks.py @@ -15,7 +15,35 @@ import shutil import sys import ceph -import utils +#import utils +from charmhelpers.core.hookenv import ( + log, + ERROR, + config, + relation_ids, + related_units, + relation_get, + relation_set, + remote_unit, + Hooks, + UnregisteredHookError + ) +from charmhelpers.core.host import ( + apt_install, + apt_update, + filter_installed_packages, + mkdir + ) + +from utils import ( + render_template, + configure_source, + get_host_ip, + get_unit_hostname + ) + + +hooks = Hooks() def install_upstart_scripts(): @@ -25,54 +53,55 @@ def install_upstart_scripts(): shutil.copy(x, '/etc/init/') +@hooks.hook('install') def install(): - utils.juju_log('INFO', 'Begin install hook.') - utils.configure_source() - utils.install('ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', 'xfsprogs') + log('Begin install hook.') + configure_source() + apt_update(fatal=True) + apt_install(packages=ceph.PACKAGES, fatal=True) install_upstart_scripts() - utils.juju_log('INFO', 'End install hook.') + log('End install hook.') def emit_cephconf(): cephcontext = { - 'auth_supported': utils.config_get('auth-supported'), + 'auth_supported': config('auth-supported'), 'mon_hosts': ' '.join(get_mon_hosts()), - 'fsid': utils.config_get('fsid'), + 'fsid': config('fsid'), 'version': ceph.get_ceph_version() } with open('/etc/ceph/ceph.conf', 'w') as cephconf: - cephconf.write(utils.render_template('ceph.conf', cephcontext)) + cephconf.write(render_template('ceph.conf', cephcontext)) JOURNAL_ZAPPED = '/var/lib/ceph/journal_zapped' +@hooks.hook('config-changed') def config_changed(): - utils.juju_log('INFO', 'Begin config-changed hook.') + log('Begin config-changed hook.') - utils.juju_log('INFO', 'Monitor hosts are ' + repr(get_mon_hosts())) + log('Monitor hosts are ' + repr(get_mon_hosts())) # Pre-flight checks - if not utils.config_get('fsid'): - utils.juju_log('CRITICAL', 'No fsid supplied, cannot proceed.') + if not config('fsid'): + log('No fsid supplied, cannot proceed.', level=ERROR) sys.exit(1) - if not utils.config_get('monitor-secret'): - utils.juju_log('CRITICAL', - 'No monitor-secret supplied, cannot proceed.') + if not config('monitor-secret'): + log('No monitor-secret supplied, cannot proceed.', level=ERROR) sys.exit(1) - if utils.config_get('osd-format') not in ceph.DISK_FORMATS: - utils.juju_log('CRITICAL', - 'Invalid OSD disk format configuration specified') + if config('osd-format') not in ceph.DISK_FORMATS: + log('Invalid OSD disk format configuration specified', level=ERROR) sys.exit(1) emit_cephconf() - e_mountpoint = utils.config_get('ephemeral-unmount') + e_mountpoint = config('ephemeral-unmount') if (e_mountpoint and filesystem_mounted(e_mountpoint)): subprocess.call(['umount', e_mountpoint]) - osd_journal = utils.config_get('osd-journal') + osd_journal = config('osd-journal') if (osd_journal and not os.path.exists(JOURNAL_ZAPPED) and os.path.exists(osd_journal)): @@ -80,31 +109,31 @@ def config_changed(): with open(JOURNAL_ZAPPED, 'w') as zapped: zapped.write('DONE') - for dev in utils.config_get('osd-devices').split(' '): + for dev in config('osd-devices').split(' '): osdize(dev) # Support use of single node ceph if (not ceph.is_bootstrapped() and - int(utils.config_get('monitor-count')) == 1): + int(config('monitor-count')) == 1): bootstrap_monitor_cluster() ceph.wait_for_bootstrap() if ceph.is_bootstrapped(): ceph.rescan_osd_devices() - utils.juju_log('INFO', 'End config-changed hook.') + log('End config-changed hook.') def get_mon_hosts(): hosts = [] - hosts.append('{}:6789'.format(utils.get_host_ip())) + hosts.append('{}:6789'.format(get_host_ip())) - for relid in utils.relation_ids('mon'): - for unit in utils.relation_list(relid): + for relid in relation_ids('mon'): + for unit in related_units(relid): hosts.append( - '{}:6789'.format(utils.get_host_ip( - utils.relation_get('private-address', - unit, relid))) + '{}:6789'.format(get_host_ip( + relation_get('private-address', + unit, relid))) ) hosts.sort() @@ -112,7 +141,7 @@ def get_mon_hosts(): def update_monfs(): - hostname = utils.get_unit_hostname() + hostname = get_unit_hostname() monfs = '/var/lib/ceph/mon/ceph-{}'.format(hostname) upstart = '{}/upstart'.format(monfs) if (os.path.exists(monfs) and @@ -124,20 +153,19 @@ def update_monfs(): def bootstrap_monitor_cluster(): - hostname = utils.get_unit_hostname() + hostname = get_unit_hostname() path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) done = '{}/done'.format(path) upstart = '{}/upstart'.format(path) - secret = utils.config_get('monitor-secret') + secret = config('monitor-secret') keyring = '/var/lib/ceph/tmp/{}.mon.keyring'.format(hostname) if os.path.exists(done): - utils.juju_log('INFO', - 'bootstrap_monitor_cluster: mon already initialized.') + log('bootstrap_monitor_cluster: mon already initialized.') else: # Ceph >= 0.61.3 needs this for ceph-mon fs creation - os.makedirs('/var/run/ceph', mode=0755) - os.makedirs(path) + mkdir('/var/run/ceph', perms=0755) + mkdir(path) # end changes for Ceph >= 0.61.3 try: subprocess.check_call(['ceph-authtool', keyring, @@ -162,7 +190,7 @@ def bootstrap_monitor_cluster(): def reformat_osd(): - if utils.config_get('osd-reformat'): + if config('osd-reformat'): return True else: return False @@ -170,31 +198,28 @@ def reformat_osd(): def osdize(dev): if not os.path.exists(dev): - utils.juju_log('INFO', - 'Path {} does not exist - bailing'.format(dev)) + log('Path {} does not exist - bailing'.format(dev)) return if (ceph.is_osd_disk(dev) and not reformat_osd()): - utils.juju_log('INFO', - 'Looks like {} is already an OSD, skipping.' + log('Looks like {} is already an OSD, skipping.' .format(dev)) return if device_mounted(dev): - utils.juju_log('INFO', - 'Looks like {} is in use, skipping.'.format(dev)) + log('Looks like {} is in use, skipping.'.format(dev)) return cmd = ['ceph-disk-prepare'] # Later versions of ceph support more options if ceph.get_ceph_version() >= "0.48.3": - osd_format = utils.config_get('osd-format') + osd_format = config('osd-format') if osd_format: cmd.append('--fs-type') cmd.append(osd_format) cmd.append(dev) - osd_journal = utils.config_get('osd-journal') + osd_journal = config('osd-journal') if (osd_journal and os.path.exists(osd_journal)): cmd.append(osd_journal) @@ -213,11 +238,13 @@ def filesystem_mounted(fs): return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0 +@hooks.hook('mon-relation-departed') +@hooks.hook('mon-relation-joined') def mon_relation(): - utils.juju_log('INFO', 'Begin mon-relation hook.') + log('Begin mon-relation hook.') emit_cephconf() - moncount = int(utils.config_get('monitor-count')) + moncount = int(config('monitor-count')) if len(get_mon_hosts()) >= moncount: bootstrap_monitor_cluster() ceph.wait_for_bootstrap() @@ -226,127 +253,115 @@ def mon_relation(): notify_radosgws() notify_client() else: - utils.juju_log('INFO', - 'Not enough mons ({}), punting.'.format( + log('Not enough mons ({}), punting.'.format( len(get_mon_hosts()))) - utils.juju_log('INFO', 'End mon-relation hook.') + log('End mon-relation hook.') def notify_osds(): - utils.juju_log('INFO', 'Begin notify_osds.') + log('Begin notify_osds.') - for relid in utils.relation_ids('osd'): - utils.relation_set(fsid=utils.config_get('fsid'), - osd_bootstrap_key=ceph.get_osd_bootstrap_key(), - auth=utils.config_get('auth-supported'), - rid=relid) + for relid in relation_ids('osd'): + relation_set(relation_id=relid, + fsid=config('fsid'), + osd_bootstrap_key=ceph.get_osd_bootstrap_key(), + auth=config('auth-supported')) - utils.juju_log('INFO', 'End notify_osds.') + log('End notify_osds.') def notify_radosgws(): - utils.juju_log('INFO', 'Begin notify_radosgws.') + log('Begin notify_radosgws.') - for relid in utils.relation_ids('radosgw'): - utils.relation_set(radosgw_key=ceph.get_radosgw_key(), - auth=utils.config_get('auth-supported'), - rid=relid) + for relid in relation_ids('radosgw'): + relation_set(relation_id=relid, + radosgw_key=ceph.get_radosgw_key(), + auth=config('auth-supported')) - utils.juju_log('INFO', 'End notify_radosgws.') + log('End notify_radosgws.') def notify_client(): - utils.juju_log('INFO', 'Begin notify_client.') + log('Begin notify_client.') - for relid in utils.relation_ids('client'): - units = utils.relation_list(relid) + for relid in relation_ids('client'): + units = related_units(relid) if len(units) > 0: service_name = units[0].split('/')[0] - utils.relation_set(key=ceph.get_named_key(service_name), - auth=utils.config_get('auth-supported'), - rid=relid) + relation_set(relation_id=relid, + key=ceph.get_named_key(service_name), + auth=config('auth-supported')) - utils.juju_log('INFO', 'End notify_client.') + log('End notify_client.') +@hooks.hook('osd-relation-joined') def osd_relation(): - utils.juju_log('INFO', 'Begin osd-relation hook.') + log('Begin osd-relation hook.') if ceph.is_quorum(): - utils.juju_log('INFO', - 'mon cluster in quorum - providing fsid & keys') - utils.relation_set(fsid=utils.config_get('fsid'), - osd_bootstrap_key=ceph.get_osd_bootstrap_key(), - auth=utils.config_get('auth-supported')) + log('mon cluster in quorum - providing fsid & keys') + relation_set(fsid=config('fsid'), + osd_bootstrap_key=ceph.get_osd_bootstrap_key(), + auth=config('auth-supported')) else: - utils.juju_log('INFO', - 'mon cluster not in quorum - deferring fsid provision') + log('mon cluster not in quorum - deferring fsid provision') - utils.juju_log('INFO', 'End osd-relation hook.') + log('End osd-relation hook.') +@hooks.hook('radosgw-relation-joined') def radosgw_relation(): - utils.juju_log('INFO', 'Begin radosgw-relation hook.') + log('Begin radosgw-relation hook.') - utils.install('radosgw') # Install radosgw for admin tools + # Install radosgw for admin tools + apt_install(packages=filter_installed_packages(['radosgw'])) if ceph.is_quorum(): - utils.juju_log('INFO', - 'mon cluster in quorum - \ - providing radosgw with keys') - utils.relation_set(radosgw_key=ceph.get_radosgw_key(), - auth=utils.config_get('auth-supported')) + log('mon cluster in quorum - providing radosgw with keys') + relation_set(radosgw_key=ceph.get_radosgw_key(), + auth=config('auth-supported')) else: - utils.juju_log('INFO', - 'mon cluster not in quorum - deferring key provision') + log('mon cluster not in quorum - deferring key provision') - utils.juju_log('INFO', 'End radosgw-relation hook.') + log('End radosgw-relation hook.') +@hooks.hook('client-relation-joined') def client_relation(): - utils.juju_log('INFO', 'Begin client-relation hook.') + log('Begin client-relation hook.') if ceph.is_quorum(): - utils.juju_log('INFO', - 'mon cluster in quorum - \ - providing client with keys') - service_name = os.environ['JUJU_REMOTE_UNIT'].split('/')[0] - utils.relation_set(key=ceph.get_named_key(service_name), - auth=utils.config_get('auth-supported')) + log('mon cluster in quorum - providing client with keys') + service_name = remote_unit().split('/')[0] + relation_set(key=ceph.get_named_key(service_name), + auth=config('auth-supported')) else: - utils.juju_log('INFO', - 'mon cluster not in quorum - deferring key provision') + log('mon cluster not in quorum - deferring key provision') - utils.juju_log('INFO', 'End client-relation hook.') + log('End client-relation hook.') +@hooks.hook('upgrade-charm') def upgrade_charm(): - utils.juju_log('INFO', 'Begin upgrade-charm hook.') + log('Begin upgrade-charm hook.') emit_cephconf() - utils.install('xfsprogs') + apt_install(packages=filter_installed_packages(ceph.PACKAGES), fatal=True) install_upstart_scripts() update_monfs() - utils.juju_log('INFO', 'End upgrade-charm hook.') + log('End upgrade-charm hook.') +@hooks.hook('start') def start(): # In case we're being redeployed to the same machines, try # to make sure everything is running as soon as possible. - subprocess.call(['start', 'ceph-mon-all-starter']) + subprocess.call(['start', 'ceph-mon-all']) ceph.rescan_osd_devices() -utils.do_hooks({ - 'config-changed': config_changed, - 'install': install, - 'mon-relation-departed': mon_relation, - 'mon-relation-joined': mon_relation, - 'osd-relation-joined': osd_relation, - 'radosgw-relation-joined': radosgw_relation, - 'client-relation-joined': client_relation, - 'start': start, - 'upgrade-charm': upgrade_charm, - }) - -sys.exit(0) +try: + hooks.execute(sys.argv) +except UnregisteredHookError as e: + log('Unknown hook {} - skipping.'.format(e)) From 0f3f4343ff00f51974ca749409b5419d7850becd Mon Sep 17 00:00:00 2001 From: James Page Date: Sun, 23 Jun 2013 20:24:03 +0100 Subject: [PATCH 02/20] Pickup fixes for string handling --- charm-helpers-sync.yaml | 2 +- hooks/ceph.py | 11 +++++++---- hooks/charmhelpers/core/hookenv.py | 31 ++++++++++++++++++++++-------- 3 files changed, 31 insertions(+), 13 deletions(-) diff --git a/charm-helpers-sync.yaml b/charm-helpers-sync.yaml index 7ee93b7..e55f90c 100644 --- a/charm-helpers-sync.yaml +++ b/charm-helpers-sync.yaml @@ -1,4 +1,4 @@ -branch: lp:charm-helpers +branch: ../..//charm-helpers/charm-helpers destination: hooks/charmhelpers include: - core diff --git a/hooks/ceph.py b/hooks/ceph.py index 5b107b9..ed58b6e 100644 --- a/hooks/ceph.py +++ b/hooks/ceph.py @@ -13,6 +13,9 @@ import time #import utils import os import apt_pkg as apt +from utils import ( + get_unit_hostname + ) LEADER = 'leader' PEON = 'peon' @@ -22,7 +25,7 @@ PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', 'xfsprogs'] def is_quorum(): - asok = "/var/run/ceph/ceph-mon.{}.asok".format(utils.get_unit_hostname()) + asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) cmd = [ "ceph", "--admin-daemon", @@ -46,7 +49,7 @@ def is_quorum(): def is_leader(): - asok = "/var/run/ceph/ceph-mon.{}.asok".format(utils.get_unit_hostname()) + asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) cmd = [ "ceph", "--admin-daemon", @@ -75,7 +78,7 @@ def wait_for_quorum(): def add_bootstrap_hint(peer): - asok = "/var/run/ceph/ceph-mon.{}.asok".format(utils.get_unit_hostname()) + asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) cmd = [ "ceph", "--admin-daemon", @@ -198,7 +201,7 @@ def get_named_key(name, caps=None): '--name', 'mon.', '--keyring', '/var/lib/ceph/mon/ceph-{}/keyring'.format( - utils.get_unit_hostname() + get_unit_hostname() ), 'auth', 'get-or-create', 'client.{}'.format(name), ] diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py index e28bfd9..b28240c 100644 --- a/hooks/charmhelpers/core/hookenv.py +++ b/hooks/charmhelpers/core/hookenv.py @@ -87,6 +87,14 @@ class Serializable(UserDict.IterableUserDict): except KeyError: raise AttributeError(attr) + def __getstate__(self): + # Pickle as a standard dictionary. + return self.data + + def __setstate__(self, state): + # Unpickle into our wrapper. + self.data = state + def json(self): "Serialize the object to json" return json.dumps(self.data) @@ -142,11 +150,13 @@ def config(scope=None): config_cmd_line.append(scope) config_cmd_line.append('--format=json') try: - return Serializable(json.loads( - subprocess.check_output(config_cmd_line) - )) + value = json.loads(subprocess.check_output(config_cmd_line)) except ValueError: return None + if isinstance(value, dict): + return Serializable(value) + else: + return value @cached @@ -159,19 +169,24 @@ def relation_get(attribute=None, unit=None, rid=None): if unit: _args.append(unit) try: - return Serializable(json.loads(subprocess.check_output(_args))) + value = json.loads(subprocess.check_output(_args)) except ValueError: return None + if isinstance(value, dict): + return Serializable(value) + else: + return value def relation_set(relation_id=None, relation_settings={}, **kwargs): relation_cmd_line = ['relation-set'] if relation_id is not None: relation_cmd_line.extend(('-r', relation_id)) - for k, v in relation_settings.items(): - relation_cmd_line.append('{}={}'.format(k, v)) - for k, v in kwargs.items(): - relation_cmd_line.append('{}={}'.format(k, v)) + for k, v in (relation_settings.items() + kwargs.items()): + if v is None: + relation_cmd_line.append('{}='.format(k)) + else: + relation_cmd_line.append('{}={}'.format(k, v)) subprocess.check_call(relation_cmd_line) # Flush cache of any relation-gets for local unit flush(local_unit()) From c4554864f6650acea86342b11ef7a41895b54839 Mon Sep 17 00:00:00 2001 From: James Page Date: Sun, 23 Jun 2013 20:38:19 +0100 Subject: [PATCH 03/20] Switch back to main charmhelpers branch --- charm-helpers-sync.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charm-helpers-sync.yaml b/charm-helpers-sync.yaml index e55f90c..7ee93b7 100644 --- a/charm-helpers-sync.yaml +++ b/charm-helpers-sync.yaml @@ -1,4 +1,4 @@ -branch: ../..//charm-helpers/charm-helpers +branch: lp:charm-helpers destination: hooks/charmhelpers include: - core From 54b7af2130b095c91d8f849bc9ddc66f3a52467c Mon Sep 17 00:00:00 2001 From: James Page Date: Sun, 23 Jun 2013 20:44:05 +0100 Subject: [PATCH 04/20] Minor tidy --- hooks/hooks.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/hooks/hooks.py b/hooks/hooks.py index d702962..d0f2927 100755 --- a/hooks/hooks.py +++ b/hooks/hooks.py @@ -42,7 +42,6 @@ from utils import ( get_unit_hostname ) - hooks = Hooks() @@ -238,8 +237,8 @@ def filesystem_mounted(fs): return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0 -@hooks.hook('mon-relation-departed') -@hooks.hook('mon-relation-joined') +@hooks.hook('mon-relation-departed', + 'mon-relation-joined') def mon_relation(): log('Begin mon-relation hook.') emit_cephconf() @@ -361,7 +360,8 @@ def start(): ceph.rescan_osd_devices() -try: - hooks.execute(sys.argv) -except UnregisteredHookError as e: - log('Unknown hook {} - skipping.'.format(e)) +if __name__ == '__main__': + try: + hooks.execute(sys.argv) + except UnregisteredHookError as e: + log('Unknown hook {} - skipping.'.format(e)) From c2d08e0a79ca0e28e811a0bd16e71ef486601768 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 24 Jun 2013 09:43:24 +0100 Subject: [PATCH 05/20] Trim down utils.py to remaining calls --- hooks/charmhelpers/core/host.py | 4 +- hooks/hooks.py | 2 +- hooks/utils.py | 155 +++++--------------------------- 3 files changed, 28 insertions(+), 133 deletions(-) diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py index cee4ee0..19951ed 100644 --- a/hooks/charmhelpers/core/host.py +++ b/hooks/charmhelpers/core/host.py @@ -12,6 +12,8 @@ import grp import subprocess import hashlib +from collections import OrderedDict + from hookenv import log, execution_environment @@ -255,7 +257,7 @@ def restart_on_change(restart_map): for path in restart_map: if checksums[path] != file_hash(path): restarts += restart_map[path] - for service_name in list(set(restarts)): + for service_name in list(OrderedDict.fromkeys(restarts)): service('restart', service_name) return wrapped_f return wrap diff --git a/hooks/hooks.py b/hooks/hooks.py index d0f2927..819219d 100755 --- a/hooks/hooks.py +++ b/hooks/hooks.py @@ -55,7 +55,7 @@ def install_upstart_scripts(): @hooks.hook('install') def install(): log('Begin install hook.') - configure_source() + configure_source(config('source')) apt_update(fatal=True) apt_install(packages=ceph.PACKAGES, fatal=True) install_upstart_scripts() diff --git a/hooks/utils.py b/hooks/utils.py index f5b1721..48c1871 100644 --- a/hooks/utils.py +++ b/hooks/utils.py @@ -7,47 +7,34 @@ # Paul Collins # -import os import subprocess import socket -import sys import re - - -def do_hooks(hooks): - hook = os.path.basename(sys.argv[0]) - - try: - hook_func = hooks[hook] - except KeyError: - juju_log('INFO', - "This charm doesn't know how to handle '{}'.".format(hook)) - else: - hook_func() - - -def install(*pkgs): - cmd = [ - 'apt-get', - '-y', - 'install' - ] - for pkg in pkgs: - cmd.append(pkg) - subprocess.check_call(cmd) +from charmhelpers.core.hookenv import ( + config, + unit_get, + cached + ) +from charmhelpers.core.host import ( + apt_install, + apt_update, + filter_installed_packages + ) TEMPLATES_DIR = 'templates' try: import jinja2 except ImportError: - install('python-jinja2') + apt_install(filter_installed_packages(['python-jinja2']), + fatal=True) import jinja2 try: import dns.resolver except ImportError: - install('python-dnspython') + apt_install(filter_installed_packages(['python-dnspython']), + fatal=True) import dns.resolver @@ -65,8 +52,7 @@ deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main """ -def configure_source(): - source = str(config_get('source')) +def configure_source(source=None): if not source: return if source.startswith('ppa:'): @@ -76,14 +62,15 @@ def configure_source(): ] subprocess.check_call(cmd) if source.startswith('cloud:'): - install('ubuntu-cloud-keyring') - pocket = source.split(':')[1] + apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), + fatal=True) + pocket = source.split(':')[-1] with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: apt.write(CLOUD_ARCHIVE.format(pocket)) if source.startswith('http:'): with open('/etc/apt/sources.list.d/ceph.list', 'w') as apt: apt.write("deb " + source + "\n") - key = config_get('key') + key = config('key') if key: cmd = [ 'apt-key', @@ -91,11 +78,7 @@ def configure_source(): '--recv-keys', key ] subprocess.check_call(cmd) - cmd = [ - 'apt-get', - 'update' - ] - subprocess.check_call(cmd) + apt_update(fatal=True) def enable_pocket(pocket): @@ -109,105 +92,15 @@ def enable_pocket(pocket): else: sources.write(line) -# Protocols -TCP = 'TCP' -UDP = 'UDP' - - -def expose(port, protocol='TCP'): - cmd = [ - 'open-port', - '{}/{}'.format(port, protocol) - ] - subprocess.check_call(cmd) - - -def juju_log(severity, message): - cmd = [ - 'juju-log', - '--log-level', severity, - message - ] - subprocess.check_call(cmd) - - -def relation_ids(relation): - cmd = [ - 'relation-ids', - relation - ] - return subprocess.check_output(cmd).split() # IGNORE:E1103 - - -def relation_list(rid): - cmd = [ - 'relation-list', - '-r', rid, - ] - return subprocess.check_output(cmd).split() # IGNORE:E1103 - - -def relation_get(attribute, unit=None, rid=None): - cmd = [ - 'relation-get', - ] - if rid: - cmd.append('-r') - cmd.append(rid) - cmd.append(attribute) - if unit: - cmd.append(unit) - value = str(subprocess.check_output(cmd)).strip() - if value == "": - return None - else: - return value - - -def relation_set(**kwargs): - cmd = [ - 'relation-set' - ] - args = [] - for k, v in kwargs.items(): - if k == 'rid': - cmd.append('-r') - cmd.append(v) - else: - args.append('{}={}'.format(k, v)) - cmd += args - subprocess.check_call(cmd) - - -def unit_get(attribute): - cmd = [ - 'unit-get', - attribute - ] - value = str(subprocess.check_output(cmd)).strip() - if value == "": - return None - else: - return value - - -def config_get(attribute): - cmd = [ - 'config-get', - attribute - ] - value = str(subprocess.check_output(cmd)).strip() - if value == "": - return None - else: - return value - +@cached def get_unit_hostname(): return socket.gethostname() -def get_host_ip(hostname=unit_get('private-address')): +@cached +def get_host_ip(hostname=None): + hostname = hostname or unit_get('private-address') try: # Test to see if already an IPv4 address socket.inet_aton(hostname) From bc84ae25594805f187d585ad21462b058d18c3d9 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 24 Jun 2013 09:44:02 +0100 Subject: [PATCH 06/20] Remove commented out import --- hooks/ceph.py | 1 - 1 file changed, 1 deletion(-) diff --git a/hooks/ceph.py b/hooks/ceph.py index ed58b6e..d9f1f4d 100644 --- a/hooks/ceph.py +++ b/hooks/ceph.py @@ -10,7 +10,6 @@ import json import subprocess import time -#import utils import os import apt_pkg as apt from utils import ( From 068b8b05a48f7a45c154bf10642e8be4e3955dbf Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 24 Jun 2013 10:13:52 +0100 Subject: [PATCH 07/20] Add flake8 and charm proof target for Make --- Makefile | 5 ++ hooks/ceph.py | 127 ++++++++++++++++++++++++++++++------ hooks/hooks.py | 172 +++++++++++-------------------------------------- hooks/utils.py | 14 ++-- metadata.yaml | 3 +- 5 files changed, 158 insertions(+), 163 deletions(-) create mode 100644 Makefile diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..1e9ee8d --- /dev/null +++ b/Makefile @@ -0,0 +1,5 @@ +#!/usr/bin/make + +lint: + flake8 --exclude hooks/charmhelpers hooks + charm proof diff --git a/hooks/ceph.py b/hooks/ceph.py index d9f1f4d..ec6fb01 100644 --- a/hooks/ceph.py +++ b/hooks/ceph.py @@ -12,9 +12,14 @@ import subprocess import time import os import apt_pkg as apt +from charmhelpers.core.host import ( + mkdir, + service_start, + log +) from utils import ( - get_unit_hostname - ) + get_unit_hostname +) LEADER = 'leader' PEON = 'peon' @@ -30,7 +35,7 @@ def is_quorum(): "--admin-daemon", asok, "mon_status" - ] + ] if os.path.exists(asok): try: result = json.loads(subprocess.check_output(cmd)) @@ -54,7 +59,7 @@ def is_leader(): "--admin-daemon", asok, "mon_status" - ] + ] if os.path.exists(asok): try: result = json.loads(subprocess.check_output(cmd)) @@ -84,7 +89,7 @@ def add_bootstrap_hint(peer): asok, "add_bootstrap_peer_hint", peer - ] + ] if os.path.exists(asok): # Ignore any errors for this call subprocess.call(cmd) @@ -93,7 +98,7 @@ DISK_FORMATS = [ 'xfs', 'ext4', 'btrfs' - ] +] def is_osd_disk(dev): @@ -103,7 +108,7 @@ def is_osd_disk(dev): for line in info: if line.startswith( 'Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D' - ): + ): return True except subprocess.CalledProcessError: pass @@ -114,7 +119,7 @@ def rescan_osd_devices(): cmd = [ 'udevadm', 'trigger', '--subsystem-match=block', '--action=add' - ] + ] subprocess.call(cmd) @@ -144,7 +149,7 @@ def import_osd_bootstrap_key(key): '--create-keyring', '--name=client.bootstrap-osd', '--add-key={}'.format(key) - ] + ] subprocess.check_call(cmd) # OSD caps taken from ceph-create-keys @@ -152,10 +157,10 @@ _osd_bootstrap_caps = { 'mon': [ 'allow command osd create ...', 'allow command osd crush set ...', - r'allow command auth add * osd allow\ * mon allow\ rwx', + r'allow command auth add * osd allow\ * mon allow\ rwx', 'allow command mon getmap' - ] - } + ] +} def get_osd_bootstrap_key(): @@ -173,14 +178,14 @@ def import_radosgw_key(key): '--create-keyring', '--name=client.radosgw.gateway', '--add-key={}'.format(key) - ] + ] subprocess.check_call(cmd) # OSD caps taken from ceph-create-keys _radosgw_caps = { 'mon': ['allow r'], 'osd': ['allow rwx'] - } +} def get_radosgw_key(): @@ -190,7 +195,7 @@ def get_radosgw_key(): _default_caps = { 'mon': ['allow r'], 'osd': ['allow rwx'] - } +} def get_named_key(name, caps=None): @@ -200,16 +205,16 @@ def get_named_key(name, caps=None): '--name', 'mon.', '--keyring', '/var/lib/ceph/mon/ceph-{}/keyring'.format( - get_unit_hostname() - ), + get_unit_hostname() + ), 'auth', 'get-or-create', 'client.{}'.format(name), - ] + ] # Add capabilities for subsystem, subcaps in caps.iteritems(): cmd.extend([ subsystem, '; '.join(subcaps), - ]) + ]) output = subprocess.check_output(cmd).strip() # IGNORE:E1103 # get-or-create appears to have different output depending # on whether its 'get' or 'create' @@ -225,6 +230,42 @@ def get_named_key(name, caps=None): return key +def bootstrap_monitor_cluster(secret): + hostname = get_unit_hostname() + path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) + done = '{}/done'.format(path) + upstart = '{}/upstart'.format(path) + keyring = '/var/lib/ceph/tmp/{}.mon.keyring'.format(hostname) + + if os.path.exists(done): + log('bootstrap_monitor_cluster: mon already initialized.') + else: + # Ceph >= 0.61.3 needs this for ceph-mon fs creation + mkdir('/var/run/ceph', perms=0755) + mkdir(path) + # end changes for Ceph >= 0.61.3 + try: + subprocess.check_call(['ceph-authtool', keyring, + '--create-keyring', '--name=mon.', + '--add-key={}'.format(secret), + '--cap', 'mon', 'allow *']) + + subprocess.check_call(['ceph-mon', '--mkfs', + '-i', hostname, + '--keyring', keyring]) + + with open(done, 'w'): + pass + with open(upstart, 'w'): + pass + + service_start('ceph-mon-all') + except: + raise + finally: + os.unlink(keyring) + + def get_ceph_version(): apt.init() cache = apt.Cache() @@ -237,3 +278,51 @@ def get_ceph_version(): def version_compare(a, b): return apt.version_compare(a, b) + + +def update_monfs(): + hostname = get_unit_hostname() + monfs = '/var/lib/ceph/mon/ceph-{}'.format(hostname) + upstart = '{}/upstart'.format(monfs) + if os.path.exists(monfs) and not os.path.exists(upstart): + # Mark mon as managed by upstart so that + # it gets start correctly on reboots + with open(upstart, 'w'): + pass + + +def osdize(dev, osd_format, osd_journal, reformat_osd=False): + if not os.path.exists(dev): + log('Path {} does not exist - bailing'.format(dev)) + return + + if (is_osd_disk(dev) and not reformat_osd): + log('Looks like {} is already an OSD, skipping.'.format(dev)) + return + + if device_mounted(dev): + log('Looks like {} is in use, skipping.'.format(dev)) + return + + cmd = ['ceph-disk-prepare'] + # Later versions of ceph support more options + if get_ceph_version() >= "0.48.3": + if osd_format: + cmd.append('--fs-type') + cmd.append(osd_format) + cmd.append(dev) + if osd_journal and os.path.exists(osd_journal): + cmd.append(osd_journal) + else: + # Just provide the device - no other options + # for older versions of ceph + cmd.append(dev) + subprocess.call(cmd) + + +def device_mounted(dev): + return subprocess.call(['grep', '-wqs', dev + '1', '/proc/mounts']) == 0 + + +def filesystem_mounted(fs): + return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0 diff --git a/hooks/hooks.py b/hooks/hooks.py index 819219d..75ced03 100755 --- a/hooks/hooks.py +++ b/hooks/hooks.py @@ -10,37 +10,36 @@ import glob import os -import subprocess import shutil import sys import ceph #import utils from charmhelpers.core.hookenv import ( - log, - ERROR, - config, - relation_ids, - related_units, - relation_get, - relation_set, - remote_unit, - Hooks, - UnregisteredHookError - ) + log, + ERROR, + config, + relation_ids, + related_units, + relation_get, + relation_set, + remote_unit, + Hooks, + UnregisteredHookError +) from charmhelpers.core.host import ( - apt_install, - apt_update, - filter_installed_packages, - mkdir - ) + apt_install, + apt_update, + filter_installed_packages, + service_start, + umount +) from utils import ( - render_template, - configure_source, - get_host_ip, - get_unit_hostname - ) + render_template, + configure_source, + get_host_ip, +) hooks = Hooks() @@ -68,7 +67,7 @@ def emit_cephconf(): 'mon_hosts': ' '.join(get_mon_hosts()), 'fsid': config('fsid'), 'version': ceph.get_ceph_version() - } + } with open('/etc/ceph/ceph.conf', 'w') as cephconf: cephconf.write(render_template('ceph.conf', cephcontext)) @@ -96,25 +95,23 @@ def config_changed(): emit_cephconf() e_mountpoint = config('ephemeral-unmount') - if (e_mountpoint and - filesystem_mounted(e_mountpoint)): - subprocess.call(['umount', e_mountpoint]) + if e_mountpoint and ceph.filesystem_mounted(e_mountpoint): + umount(e_mountpoint) osd_journal = config('osd-journal') - if (osd_journal and - not os.path.exists(JOURNAL_ZAPPED) and - os.path.exists(osd_journal)): + if (osd_journal and not os.path.exists(JOURNAL_ZAPPED) + and os.path.exists(osd_journal)): ceph.zap_disk(osd_journal) with open(JOURNAL_ZAPPED, 'w') as zapped: zapped.write('DONE') for dev in config('osd-devices').split(' '): - osdize(dev) + ceph.osdize(dev, config('osd-format'), config('osd-journal'), + reformat_osd()) # Support use of single node ceph - if (not ceph.is_bootstrapped() and - int(config('monitor-count')) == 1): - bootstrap_monitor_cluster() + if (not ceph.is_bootstrapped() and int(config('monitor-count')) == 1): + ceph.bootstrap_monitor_cluster(config('monitor-secret')) ceph.wait_for_bootstrap() if ceph.is_bootstrapped(): @@ -130,64 +127,14 @@ def get_mon_hosts(): for relid in relation_ids('mon'): for unit in related_units(relid): hosts.append( - '{}:6789'.format(get_host_ip( - relation_get('private-address', - unit, relid))) - ) + '{}:6789'.format(get_host_ip(relation_get('private-address', + unit, relid))) + ) hosts.sort() return hosts -def update_monfs(): - hostname = get_unit_hostname() - monfs = '/var/lib/ceph/mon/ceph-{}'.format(hostname) - upstart = '{}/upstart'.format(monfs) - if (os.path.exists(monfs) and - not os.path.exists(upstart)): - # Mark mon as managed by upstart so that - # it gets start correctly on reboots - with open(upstart, 'w'): - pass - - -def bootstrap_monitor_cluster(): - hostname = get_unit_hostname() - path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) - done = '{}/done'.format(path) - upstart = '{}/upstart'.format(path) - secret = config('monitor-secret') - keyring = '/var/lib/ceph/tmp/{}.mon.keyring'.format(hostname) - - if os.path.exists(done): - log('bootstrap_monitor_cluster: mon already initialized.') - else: - # Ceph >= 0.61.3 needs this for ceph-mon fs creation - mkdir('/var/run/ceph', perms=0755) - mkdir(path) - # end changes for Ceph >= 0.61.3 - try: - subprocess.check_call(['ceph-authtool', keyring, - '--create-keyring', '--name=mon.', - '--add-key={}'.format(secret), - '--cap', 'mon', 'allow *']) - - subprocess.check_call(['ceph-mon', '--mkfs', - '-i', hostname, - '--keyring', keyring]) - - with open(done, 'w'): - pass - with open(upstart, 'w'): - pass - - subprocess.check_call(['start', 'ceph-mon-all-starter']) - except: - raise - finally: - os.unlink(keyring) - - def reformat_osd(): if config('osd-reformat'): return True @@ -195,48 +142,6 @@ def reformat_osd(): return False -def osdize(dev): - if not os.path.exists(dev): - log('Path {} does not exist - bailing'.format(dev)) - return - - if (ceph.is_osd_disk(dev) and not - reformat_osd()): - log('Looks like {} is already an OSD, skipping.' - .format(dev)) - return - - if device_mounted(dev): - log('Looks like {} is in use, skipping.'.format(dev)) - return - - cmd = ['ceph-disk-prepare'] - # Later versions of ceph support more options - if ceph.get_ceph_version() >= "0.48.3": - osd_format = config('osd-format') - if osd_format: - cmd.append('--fs-type') - cmd.append(osd_format) - cmd.append(dev) - osd_journal = config('osd-journal') - if (osd_journal and - os.path.exists(osd_journal)): - cmd.append(osd_journal) - else: - # Just provide the device - no other options - # for older versions of ceph - cmd.append(dev) - subprocess.call(cmd) - - -def device_mounted(dev): - return subprocess.call(['grep', '-wqs', dev + '1', '/proc/mounts']) == 0 - - -def filesystem_mounted(fs): - return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0 - - @hooks.hook('mon-relation-departed', 'mon-relation-joined') def mon_relation(): @@ -245,15 +150,15 @@ def mon_relation(): moncount = int(config('monitor-count')) if len(get_mon_hosts()) >= moncount: - bootstrap_monitor_cluster() + ceph.bootstrap_monitor_cluster(config('monitor-secret')) ceph.wait_for_bootstrap() ceph.rescan_osd_devices() notify_osds() notify_radosgws() notify_client() else: - log('Not enough mons ({}), punting.'.format( - len(get_mon_hosts()))) + log('Not enough mons ({}), punting.' + .format(len(get_mon_hosts()))) log('End mon-relation hook.') @@ -316,7 +221,6 @@ def radosgw_relation(): # Install radosgw for admin tools apt_install(packages=filter_installed_packages(['radosgw'])) - if ceph.is_quorum(): log('mon cluster in quorum - providing radosgw with keys') relation_set(radosgw_key=ceph.get_radosgw_key(), @@ -348,7 +252,7 @@ def upgrade_charm(): emit_cephconf() apt_install(packages=filter_installed_packages(ceph.PACKAGES), fatal=True) install_upstart_scripts() - update_monfs() + ceph.update_monfs() log('End upgrade-charm hook.') @@ -356,7 +260,7 @@ def upgrade_charm(): def start(): # In case we're being redeployed to the same machines, try # to make sure everything is running as soon as possible. - subprocess.call(['start', 'ceph-mon-all']) + service_start('ceph-mon-all') ceph.rescan_osd_devices() diff --git a/hooks/utils.py b/hooks/utils.py index 48c1871..8069bbe 100644 --- a/hooks/utils.py +++ b/hooks/utils.py @@ -14,12 +14,12 @@ from charmhelpers.core.hookenv import ( config, unit_get, cached - ) +) from charmhelpers.core.host import ( apt_install, apt_update, filter_installed_packages - ) +) TEMPLATES_DIR = 'templates' @@ -40,14 +40,12 @@ except ImportError: def render_template(template_name, context, template_dir=TEMPLATES_DIR): templates = jinja2.Environment( - loader=jinja2.FileSystemLoader(template_dir) - ) + loader=jinja2.FileSystemLoader(template_dir)) template = templates.get_template(template_name) return template.render(context) -CLOUD_ARCHIVE = \ -""" # Ubuntu Cloud Archive +CLOUD_ARCHIVE = """ # Ubuntu Cloud Archive deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main """ @@ -59,7 +57,7 @@ def configure_source(source=None): cmd = [ 'add-apt-repository', source - ] + ] subprocess.check_call(cmd) if source.startswith('cloud:'): apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), @@ -76,7 +74,7 @@ def configure_source(source=None): 'apt-key', 'adv', '--keyserver keyserver.ubuntu.com', '--recv-keys', key - ] + ] subprocess.check_call(cmd) apt_update(fatal=True) diff --git a/metadata.yaml b/metadata.yaml index 0d84f43..c62ea5c 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -1,7 +1,6 @@ name: ceph summary: Highly scalable distributed storage -maintainer: James Page , - Paul Collins +maintainer: James Page description: | Ceph is a distributed storage and network file system designed to provide excellent performance, reliability, and scalability. From 5f09a0fad13e06f5632d39012bccd553ccfbb591 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 24 Jun 2013 10:14:42 +0100 Subject: [PATCH 08/20] Add sync target for charm-helpers --- Makefile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Makefile b/Makefile index 1e9ee8d..9e8e5c0 100644 --- a/Makefile +++ b/Makefile @@ -3,3 +3,6 @@ lint: flake8 --exclude hooks/charmhelpers hooks charm proof + +sync: + charm-helper-sync -c charm-helpers-sync.yaml From 6d02728d50d2a3fd882c026d6b852d99b6802a29 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 24 Jun 2013 10:15:45 +0100 Subject: [PATCH 09/20] General tidy of README --- README.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 7d5d3de..89ea580 100644 --- a/README.md +++ b/README.md @@ -15,28 +15,28 @@ are provided: fsid: uuid specific to a ceph cluster used to ensure that different clusters don't get mixed up - use `uuid` to generate one. - + monitor-secret: a ceph generated key used by the daemons that manage to cluster to control security. You can use the ceph-authtool command to generate one: - + ceph-authtool /dev/stdout --name=mon. --gen-key - + These two pieces of configuration must NOT be changed post bootstrap; attempting todo this will cause a reconfiguration error and new service units will not join the existing ceph cluster. - + The charm also supports specification of the storage devices to use in the ceph cluster. osd-devices: A list of devices that the charm will attempt to detect, initialise and activate as ceph storage. - + This this can be a superset of the actual storage devices presented to each service unit and can be changed post ceph bootstrap using `juju set`. - + At a minimum you must provide a juju config file during initial deployment with the fsid and monitor-secret options (contents of cepy.yaml below): @@ -44,7 +44,7 @@ with the fsid and monitor-secret options (contents of cepy.yaml below): fsid: ecbb8960-0e21-11e2-b495-83a88f44db01 monitor-secret: AQD1P2xQiKglDhAA4NGUF5j38Mhq56qwz+45wg== osd-devices: /dev/vdb /dev/vdc /dev/vdd /dev/vde - + Specifying the osd-devices to use is also a good idea. Boot things up by using: @@ -62,7 +62,7 @@ Author: Paul Collins , James Page Report bugs at: http://bugs.launchpad.net/charms/+source/ceph/+filebug Location: http://jujucharms.com/charms/ceph - + Technical Bootnotes =================== @@ -89,4 +89,4 @@ all OSDs run on nodes that also run mon, we don't need this and did not implement it. See http://ceph.com/docs/master/dev/mon-bootstrap/ for more information on Ceph -monitor cluster deployment strategies and pitfalls. +monitor cluster deployment strategies and pitfalls. From c04031f98e87f8f5e675ade08f18b8bef63f72da Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 24 Jun 2013 10:24:12 +0100 Subject: [PATCH 10/20] Switch to using restart for ceph-mon-all --- hooks/ceph.py | 4 ++-- hooks/hooks.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/hooks/ceph.py b/hooks/ceph.py index ec6fb01..8e239de 100644 --- a/hooks/ceph.py +++ b/hooks/ceph.py @@ -14,7 +14,7 @@ import os import apt_pkg as apt from charmhelpers.core.host import ( mkdir, - service_start, + service_restart, log ) from utils import ( @@ -259,7 +259,7 @@ def bootstrap_monitor_cluster(secret): with open(upstart, 'w'): pass - service_start('ceph-mon-all') + service_restart('ceph-mon-all') except: raise finally: diff --git a/hooks/hooks.py b/hooks/hooks.py index 75ced03..872e192 100755 --- a/hooks/hooks.py +++ b/hooks/hooks.py @@ -31,7 +31,7 @@ from charmhelpers.core.host import ( apt_install, apt_update, filter_installed_packages, - service_start, + service_restart, umount ) @@ -260,7 +260,7 @@ def upgrade_charm(): def start(): # In case we're being redeployed to the same machines, try # to make sure everything is running as soon as possible. - service_start('ceph-mon-all') + service_restart('ceph-mon-all') ceph.rescan_osd_devices() From d2354108d4b1871a04f368deeb24f4de7fa9a955 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 24 Jun 2013 13:43:48 +0100 Subject: [PATCH 11/20] Use charm-helpers add_source --- charm-helpers-sync.yaml | 3 +- hooks/charmhelpers/fetch/__init__.py | 56 ++++++++++++++++++++++++++++ hooks/hooks.py | 4 +- 3 files changed, 60 insertions(+), 3 deletions(-) create mode 100644 hooks/charmhelpers/fetch/__init__.py diff --git a/charm-helpers-sync.yaml b/charm-helpers-sync.yaml index 7ee93b7..77afba7 100644 --- a/charm-helpers-sync.yaml +++ b/charm-helpers-sync.yaml @@ -1,4 +1,5 @@ -branch: lp:charm-helpers +branch: lp:~james-page/charm-helpers/configure_source_redux destination: hooks/charmhelpers include: - core + - fetch diff --git a/hooks/charmhelpers/fetch/__init__.py b/hooks/charmhelpers/fetch/__init__.py new file mode 100644 index 0000000..ef059e2 --- /dev/null +++ b/hooks/charmhelpers/fetch/__init__.py @@ -0,0 +1,56 @@ +from yaml import safe_load +from charmhelpers.core.hookenv import config +from charmhelpers.core.host import ( + apt_install, apt_update, filter_installed_packages +) +import subprocess + +CLOUD_ARCHIVE = 'http://ubuntu-cloud.archive.canonical.com/ubuntu {} main' + + +def add_source(source, key=None): + if ((source.startswith('ppa:') or + source.startswith('http:'))): + subprocess.check_call(['add-apt-repository', source]) + elif source.startswith('cloud:'): + apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), + fatal=True) + pocket = source.split(':')[-1] + with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: + apt.write(CLOUD_ARCHIVE.format(pocket)) + if key: + subprocess.check_call(['apt-key', 'import', key]) + + +class SourceConfigError(Exception): + pass + + +def configure_sources(update=False, + sources_var='install_sources', + keys_var='install_keys'): + """ + Configure multiple sources from charm configuration + + Example config: + install_sources: + - "ppa:foo" + - "http://example.com/repo precise main" + install_keys: + - null + - "a1b2c3d4" + + Note that 'null' (a.k.a. None) should not be quoted. + """ + sources = safe_load(config(sources_var)) + keys = safe_load(config(keys_var)) + if isinstance(sources, basestring) and isinstance(keys, basestring): + add_source(sources, keys) + else: + if not len(sources) == len(keys): + msg = 'Install sources and keys lists are different lengths' + raise SourceConfigError(msg) + for src_num in range(len(sources)): + add_source(sources[src_num], keys[src_num]) + if update: + apt_update(fatal=True) diff --git a/hooks/hooks.py b/hooks/hooks.py index 872e192..cf62546 100755 --- a/hooks/hooks.py +++ b/hooks/hooks.py @@ -34,10 +34,10 @@ from charmhelpers.core.host import ( service_restart, umount ) +from charmhelpers.fetch import add_source from utils import ( render_template, - configure_source, get_host_ip, ) @@ -54,7 +54,7 @@ def install_upstart_scripts(): @hooks.hook('install') def install(): log('Begin install hook.') - configure_source(config('source')) + add_source(config('source'), config('key')) apt_update(fatal=True) apt_install(packages=ceph.PACKAGES, fatal=True) install_upstart_scripts() From 5cf64e374b49462f99c62495d69e513df4257c84 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 24 Jun 2013 13:56:40 +0100 Subject: [PATCH 12/20] Resync fetch helper --- hooks/charmhelpers/fetch/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hooks/charmhelpers/fetch/__init__.py b/hooks/charmhelpers/fetch/__init__.py index ef059e2..7068dd8 100644 --- a/hooks/charmhelpers/fetch/__init__.py +++ b/hooks/charmhelpers/fetch/__init__.py @@ -5,7 +5,7 @@ from charmhelpers.core.host import ( ) import subprocess -CLOUD_ARCHIVE = 'http://ubuntu-cloud.archive.canonical.com/ubuntu {} main' +CLOUD_ARCHIVE = 'deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main' def add_source(source, key=None): From ebc4f8e729b124933bd38f26f5051563fe854650 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 24 Jun 2013 14:06:51 +0100 Subject: [PATCH 13/20] Resync helpers --- hooks/charmhelpers/fetch/__init__.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/hooks/charmhelpers/fetch/__init__.py b/hooks/charmhelpers/fetch/__init__.py index 7068dd8..19d83b8 100644 --- a/hooks/charmhelpers/fetch/__init__.py +++ b/hooks/charmhelpers/fetch/__init__.py @@ -5,7 +5,9 @@ from charmhelpers.core.host import ( ) import subprocess -CLOUD_ARCHIVE = 'deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main' +CLOUD_ARCHIVE = """ # Ubuntu Cloud Archive +deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main +""" def add_source(source, key=None): From b0bf0e66325101e15476f2b1908d921d4bd1a248 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 24 Jun 2013 14:52:52 +0100 Subject: [PATCH 14/20] Zap disks if reformatting is requested --- hooks/ceph.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/hooks/ceph.py b/hooks/ceph.py index 8e239de..e496820 100644 --- a/hooks/ceph.py +++ b/hooks/ceph.py @@ -108,6 +108,7 @@ def is_osd_disk(dev): for line in info: if line.startswith( 'Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D' + 'Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D' ): return True except subprocess.CalledProcessError: @@ -317,7 +318,11 @@ def osdize(dev, osd_format, osd_journal, reformat_osd=False): # Just provide the device - no other options # for older versions of ceph cmd.append(dev) - subprocess.call(cmd) + + if reformat_osd: + zap_disk(dev) + + subprocess.check_call(cmd) def device_mounted(dev): From d6e7d2031a60938171b5a8e2bfac5560bd574604 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 25 Jun 2013 12:03:02 +0100 Subject: [PATCH 15/20] Move to using new storage charmhelpers --- charm-helpers-sync.yaml | 2 + hooks/ceph.py | 13 ++++--- hooks/charmhelpers/contrib/__init__.py | 0 .../charmhelpers/contrib/storage/__init__.py | 0 .../contrib/storage/linux/__init__.py | 0 .../contrib/storage/linux/utils.py | 25 +++++++++++++ hooks/hooks.py | 7 +--- hooks/utils.py | 37 ------------------- 8 files changed, 37 insertions(+), 47 deletions(-) create mode 100644 hooks/charmhelpers/contrib/__init__.py create mode 100644 hooks/charmhelpers/contrib/storage/__init__.py create mode 100644 hooks/charmhelpers/contrib/storage/linux/__init__.py create mode 100644 hooks/charmhelpers/contrib/storage/linux/utils.py diff --git a/charm-helpers-sync.yaml b/charm-helpers-sync.yaml index 77afba7..032b9b6 100644 --- a/charm-helpers-sync.yaml +++ b/charm-helpers-sync.yaml @@ -3,3 +3,5 @@ destination: hooks/charmhelpers include: - core - fetch + - contrib.storage.linux: + - utils diff --git a/hooks/ceph.py b/hooks/ceph.py index e496820..7f9666e 100644 --- a/hooks/ceph.py +++ b/hooks/ceph.py @@ -17,6 +17,10 @@ from charmhelpers.core.host import ( service_restart, log ) +from charmhelpers.contrib.storage.linux.utils import ( + zap_disk, + is_block_device +) from utils import ( get_unit_hostname ) @@ -125,11 +129,6 @@ def rescan_osd_devices(): subprocess.call(cmd) -def zap_disk(dev): - cmd = ['sgdisk', '--zap-all', dev] - subprocess.check_call(cmd) - - _bootstrap_keyring = "/var/lib/ceph/bootstrap-osd/ceph.keyring" @@ -297,6 +296,10 @@ def osdize(dev, osd_format, osd_journal, reformat_osd=False): log('Path {} does not exist - bailing'.format(dev)) return + if not is_block_device(dev): + log('Path {} is not a block device - bailing'.format(dev)) + return + if (is_osd_disk(dev) and not reformat_osd): log('Looks like {} is already an OSD, skipping.'.format(dev)) return diff --git a/hooks/charmhelpers/contrib/__init__.py b/hooks/charmhelpers/contrib/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/hooks/charmhelpers/contrib/storage/__init__.py b/hooks/charmhelpers/contrib/storage/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/hooks/charmhelpers/contrib/storage/linux/__init__.py b/hooks/charmhelpers/contrib/storage/linux/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/hooks/charmhelpers/contrib/storage/linux/utils.py b/hooks/charmhelpers/contrib/storage/linux/utils.py new file mode 100644 index 0000000..5b9b6d4 --- /dev/null +++ b/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -0,0 +1,25 @@ +from os import stat +from stat import S_ISBLK + +from subprocess import ( + check_call +) + + +def is_block_device(path): + ''' + Confirm device at path is a valid block device node. + + :returns: boolean: True if path is a block device, False if not. + ''' + return S_ISBLK(stat(path).st_mode) + + +def zap_disk(block_device): + ''' + Clear a block device of partition table. Relies on sgdisk, which is + installed as pat of the 'gdisk' package in Ubuntu. + + :param block_device: str: Full path of block device to clean. + ''' + check_call(['sgdisk', '--zap-all', block_device]) diff --git a/hooks/hooks.py b/hooks/hooks.py index cf62546..b61e04b 100755 --- a/hooks/hooks.py +++ b/hooks/hooks.py @@ -14,18 +14,15 @@ import shutil import sys import ceph -#import utils from charmhelpers.core.hookenv import ( - log, - ERROR, + log, ERROR, config, relation_ids, related_units, relation_get, relation_set, remote_unit, - Hooks, - UnregisteredHookError + Hooks, UnregisteredHookError ) from charmhelpers.core.host import ( apt_install, diff --git a/hooks/utils.py b/hooks/utils.py index 8069bbe..a8868b6 100644 --- a/hooks/utils.py +++ b/hooks/utils.py @@ -7,17 +7,14 @@ # Paul Collins # -import subprocess import socket import re from charmhelpers.core.hookenv import ( - config, unit_get, cached ) from charmhelpers.core.host import ( apt_install, - apt_update, filter_installed_packages ) @@ -45,40 +42,6 @@ def render_template(template_name, context, template_dir=TEMPLATES_DIR): return template.render(context) -CLOUD_ARCHIVE = """ # Ubuntu Cloud Archive -deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main -""" - - -def configure_source(source=None): - if not source: - return - if source.startswith('ppa:'): - cmd = [ - 'add-apt-repository', - source - ] - subprocess.check_call(cmd) - if source.startswith('cloud:'): - apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), - fatal=True) - pocket = source.split(':')[-1] - with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: - apt.write(CLOUD_ARCHIVE.format(pocket)) - if source.startswith('http:'): - with open('/etc/apt/sources.list.d/ceph.list', 'w') as apt: - apt.write("deb " + source + "\n") - key = config('key') - if key: - cmd = [ - 'apt-key', - 'adv', '--keyserver keyserver.ubuntu.com', - '--recv-keys', key - ] - subprocess.check_call(cmd) - apt_update(fatal=True) - - def enable_pocket(pocket): apt_sources = "/etc/apt/sources.list" with open(apt_sources, "r") as sources: From e563f959db875bcb584b7f17eb21e80c8db0747d Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 3 Jul 2013 10:03:26 +0100 Subject: [PATCH 16/20] Resync helpers from trunk --- Makefile | 6 +- charm-helpers-sync.yaml | 2 +- hooks/charmhelpers/core/hookenv.py | 3 + hooks/charmhelpers/fetch/__init__.py | 92 +++++++++++++++++++++++++++- 4 files changed, 96 insertions(+), 7 deletions(-) diff --git a/Makefile b/Makefile index 9e8e5c0..71dfd40 100644 --- a/Makefile +++ b/Makefile @@ -1,8 +1,8 @@ #!/usr/bin/make lint: - flake8 --exclude hooks/charmhelpers hooks - charm proof + @flake8 --exclude hooks/charmhelpers hooks + @charm proof sync: - charm-helper-sync -c charm-helpers-sync.yaml + @charm-helper-sync -c charm-helpers-sync.yaml diff --git a/charm-helpers-sync.yaml b/charm-helpers-sync.yaml index 032b9b6..21c0bc6 100644 --- a/charm-helpers-sync.yaml +++ b/charm-helpers-sync.yaml @@ -1,4 +1,4 @@ -branch: lp:~james-page/charm-helpers/configure_source_redux +branch: lp:charm-helpers destination: hooks/charmhelpers include: - core diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py index b28240c..b1ede91 100644 --- a/hooks/charmhelpers/core/hookenv.py +++ b/hooks/charmhelpers/core/hookenv.py @@ -333,3 +333,6 @@ class Hooks(object): self.register(decorated.__name__, decorated) return decorated return wrapper + +def charm_dir(): + return os.environ.get('CHARM_DIR') diff --git a/hooks/charmhelpers/fetch/__init__.py b/hooks/charmhelpers/fetch/__init__.py index 19d83b8..e3c4242 100644 --- a/hooks/charmhelpers/fetch/__init__.py +++ b/hooks/charmhelpers/fetch/__init__.py @@ -1,11 +1,21 @@ +import importlib from yaml import safe_load -from charmhelpers.core.hookenv import config from charmhelpers.core.host import ( - apt_install, apt_update, filter_installed_packages + apt_install, + apt_update, + filter_installed_packages +) +from urlparse import ( + urlparse, + urlunparse, ) import subprocess +from charmhelpers.core.hookenv import ( + config, + log, +) -CLOUD_ARCHIVE = """ # Ubuntu Cloud Archive +CLOUD_ARCHIVE = """# Ubuntu Cloud Archive deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main """ @@ -56,3 +66,79 @@ def configure_sources(update=False, add_source(sources[src_num], keys[src_num]) if update: apt_update(fatal=True) + +# The order of this list is very important. Handlers should be listed in from +# least- to most-specific URL matching. +FETCH_HANDLERS = ( + 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', +) + + +class UnhandledSource(Exception): + pass + + +def install_remote(source): + """ + Install a file tree from a remote source + + The specified source should be a url of the form: + scheme://[host]/path[#[option=value][&...]] + + Schemes supported are based on this modules submodules + Options supported are submodule-specific""" + # We ONLY check for True here because can_handle may return a string + # explaining why it can't handle a given source. + handlers = [h for h in plugins() if h.can_handle(source) is True] + for handler in handlers: + try: + installed_to = handler.install(source) + except UnhandledSource: + pass + if not installed_to: + raise UnhandledSource("No handler found for source {}".format(source)) + return installed_to + + +def install_from_config(config_var_name): + charm_config = config() + source = charm_config[config_var_name] + return install_remote(source) + + +class BaseFetchHandler(object): + """Base class for FetchHandler implementations in fetch plugins""" + def can_handle(self, source): + """Returns True if the source can be handled. Otherwise returns + a string explaining why it cannot""" + return "Wrong source type" + + def install(self, source): + """Try to download and unpack the source. Return the path to the + unpacked files or raise UnhandledSource.""" + raise UnhandledSource("Wrong source type {}".format(source)) + + def parse_url(self, url): + return urlparse(url) + + def base_url(self, url): + """Return url without querystring or fragment""" + parts = list(self.parse_url(url)) + parts[4:] = ['' for i in parts[4:]] + return urlunparse(parts) + + +def plugins(fetch_handlers=None): + if not fetch_handlers: + fetch_handlers = FETCH_HANDLERS + plugin_list = [] + for handler_name in fetch_handlers: + package, classname = handler_name.rsplit('.', 1) + try: + handler_class = getattr(importlib.import_module(package), classname) + plugin_list.append(handler_class()) + except (ImportError, AttributeError): + # Skip missing plugins so that they can be ommitted from + # installation if desired + log("FetchHandler {} not found, skipping plugin".format(handler_name)) + return plugin_list From a276674c18fd36c0324e8f320c6cb6eb476bfdda Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 3 Jul 2013 10:05:03 +0100 Subject: [PATCH 17/20] Add missing charmhelpers files --- hooks/charmhelpers/fetch/archiveurl.py | 43 ++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 hooks/charmhelpers/fetch/archiveurl.py diff --git a/hooks/charmhelpers/fetch/archiveurl.py b/hooks/charmhelpers/fetch/archiveurl.py new file mode 100644 index 0000000..09ac69e --- /dev/null +++ b/hooks/charmhelpers/fetch/archiveurl.py @@ -0,0 +1,43 @@ +import os +import urllib2 +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource +) +from charmhelpers.payload.archive import ( + get_archive_handler, + extract, +) + + +class ArchiveUrlFetchHandler(BaseFetchHandler): + """Handler for archives via generic URLs""" + def can_handle(self, source): + url_parts = self.parse_url(source) + if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): + return "Wrong source type" + if get_archive_handler(self.base_url(source)): + return True + return False + + def download(self, source, dest): + # propogate all exceptions + # URLError, OSError, etc + response = urllib2.urlopen(source) + with open(dest, 'w') as dest_file: + dest_file.write(response.read()) + + def install(self, source): + url_parts = self.parse_url(source) + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') + dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path)) + try: + self.download(source, dld_file) + except urllib2.URLError as e: + return UnhandledSource(e.reason) + except OSError as e: + return UnhandledSource(e.strerror) + finally: + if os.path.isfile(dld_file): + os.unlink(dld_file) + return extract(dld_file) From c06e8e4c23222abccb6e870f86f3d70edeabe15c Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 3 Jul 2013 12:45:41 +0100 Subject: [PATCH 18/20] Resync with proposed enabled charmhelpers --- charm-helpers-sync.yaml | 2 +- hooks/charmhelpers/core/host.py | 22 ++++++++++++++++------ hooks/charmhelpers/fetch/__init__.py | 10 +++++++++- 3 files changed, 26 insertions(+), 8 deletions(-) diff --git a/charm-helpers-sync.yaml b/charm-helpers-sync.yaml index 21c0bc6..45b8a15 100644 --- a/charm-helpers-sync.yaml +++ b/charm-helpers-sync.yaml @@ -1,4 +1,4 @@ -branch: lp:charm-helpers +branch: lp:~james-page/charm-helpers/configure_source_proposed destination: hooks/charmhelpers include: - core diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py index 19951ed..d60d982 100644 --- a/hooks/charmhelpers/core/host.py +++ b/hooks/charmhelpers/core/host.py @@ -48,13 +48,13 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False): log('creating user {0}'.format(username)) cmd = ['useradd'] if system_user or password is None: - cmd.append('--system') + cmd.append('--system') else: - cmd.extend([ - '--create-home', - '--shell', shell, - '--password', password, - ]) + cmd.extend([ + '--create-home', + '--shell', shell, + '--password', password, + ]) cmd.append(username) subprocess.check_call(cmd) user_info = pwd.getpwnam(username) @@ -261,3 +261,13 @@ def restart_on_change(restart_map): service('restart', service_name) return wrapped_f return wrap + + +def lsb_release(): + '''Return /etc/lsb-release in a dict''' + d = {} + with open('/etc/lsb-release', 'r') as lsb: + for l in lsb: + k, v = l.split('=') + d[k.strip()] = v.strip() + return d diff --git a/hooks/charmhelpers/fetch/__init__.py b/hooks/charmhelpers/fetch/__init__.py index e3c4242..5a30625 100644 --- a/hooks/charmhelpers/fetch/__init__.py +++ b/hooks/charmhelpers/fetch/__init__.py @@ -3,7 +3,8 @@ from yaml import safe_load from charmhelpers.core.host import ( apt_install, apt_update, - filter_installed_packages + filter_installed_packages, + lsb_release ) from urlparse import ( urlparse, @@ -18,6 +19,9 @@ from charmhelpers.core.hookenv import ( CLOUD_ARCHIVE = """# Ubuntu Cloud Archive deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main """ +PROPOSED_POCKET = """# Proposed +deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted +""" def add_source(source, key=None): @@ -30,6 +34,10 @@ def add_source(source, key=None): pocket = source.split(':')[-1] with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: apt.write(CLOUD_ARCHIVE.format(pocket)) + elif source == 'proposed': + release = lsb_release()['DISTRIB_CODENAME'] + with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: + apt.write(PROPOSED_POCKET.format(release)) if key: subprocess.check_call(['apt-key', 'import', key]) From 84251b0213925c7b0e5179d3fba3a3f7f42831c9 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 3 Jul 2013 21:51:57 +0100 Subject: [PATCH 19/20] Resync with trunk of charm-helpers --- charm-helpers-sync.yaml | 2 +- hooks/charmhelpers/core/hookenv.py | 24 ++++++++++-------------- 2 files changed, 11 insertions(+), 15 deletions(-) diff --git a/charm-helpers-sync.yaml b/charm-helpers-sync.yaml index 45b8a15..21c0bc6 100644 --- a/charm-helpers-sync.yaml +++ b/charm-helpers-sync.yaml @@ -1,4 +1,4 @@ -branch: lp:~james-page/charm-helpers/configure_source_proposed +branch: lp:charm-helpers destination: hooks/charmhelpers include: - core diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py index b1ede91..e57ea25 100644 --- a/hooks/charmhelpers/core/hookenv.py +++ b/hooks/charmhelpers/core/hookenv.py @@ -108,11 +108,12 @@ def execution_environment(): """A convenient bundling of the current execution context""" context = {} context['conf'] = config() - context['reltype'] = relation_type() - context['relid'] = relation_id() + if relation_id(): + context['reltype'] = relation_type() + context['relid'] = relation_id() + context['rel'] = relation_get() context['unit'] = local_unit() context['rels'] = relations() - context['rel'] = relation_get() context['env'] = os.environ return context @@ -150,13 +151,9 @@ def config(scope=None): config_cmd_line.append(scope) config_cmd_line.append('--format=json') try: - value = json.loads(subprocess.check_output(config_cmd_line)) + return json.loads(subprocess.check_output(config_cmd_line)) except ValueError: return None - if isinstance(value, dict): - return Serializable(value) - else: - return value @cached @@ -169,13 +166,9 @@ def relation_get(attribute=None, unit=None, rid=None): if unit: _args.append(unit) try: - value = json.loads(subprocess.check_output(_args)) + return json.loads(subprocess.check_output(_args)) except ValueError: return None - if isinstance(value, dict): - return Serializable(value) - else: - return value def relation_set(relation_id=None, relation_settings={}, **kwargs): @@ -222,7 +215,7 @@ def relation_for_unit(unit=None, rid=None): if key.endswith('-list'): relation[key] = relation[key].split() relation['__unit__'] = unit - return Serializable(relation) + return relation @cached @@ -331,6 +324,9 @@ class Hooks(object): self.register(hook_name, decorated) else: self.register(decorated.__name__, decorated) + if '_' in decorated.__name__: + self.register( + decorated.__name__.replace('_', '-'), decorated) return decorated return wrapper From f66d3d95008232b69952edab9e6384bf86e0573d Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 8 Jul 2013 09:32:38 +0100 Subject: [PATCH 20/20] Fixup dodgy disk detection --- hooks/ceph.py | 1 - 1 file changed, 1 deletion(-) diff --git a/hooks/ceph.py b/hooks/ceph.py index 7f9666e..acdabe8 100644 --- a/hooks/ceph.py +++ b/hooks/ceph.py @@ -112,7 +112,6 @@ def is_osd_disk(dev): for line in info: if line.startswith( 'Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D' - 'Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D' ): return True except subprocess.CalledProcessError: