From 5eebc6c7871c8d3777458a8d912be3baa720dcd1 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Tue, 16 Jul 2013 21:27:06 -0700 Subject: [PATCH 01/20] Checkin initial python redux work. --- hooks/__init__.py | 0 hooks/charmhelpers/__init__.py | 0 hooks/charmhelpers/contrib/__init__.py | 0 .../contrib/hahelpers/__init__.py | 0 .../charmhelpers/contrib/hahelpers/apache.py | 58 +++ hooks/charmhelpers/contrib/hahelpers/ceph.py | 278 ++++++++++++++ .../charmhelpers/contrib/hahelpers/cluster.py | 180 ++++++++++ .../contrib/openstack/__init__.py | 0 .../charmhelpers/contrib/openstack/context.py | 271 ++++++++++++++ .../contrib/openstack/templates/__init__.py | 2 + .../contrib/openstack/templates/ceph.conf | 11 + .../contrib/openstack/templates/haproxy.cfg | 37 ++ .../templates/openstack_https_frontend | 23 ++ .../contrib/openstack/templating.py | 261 ++++++++++++++ hooks/charmhelpers/contrib/openstack/utils.py | 271 ++++++++++++++ .../charmhelpers/contrib/storage/__init__.py | 0 .../contrib/storage/linux/__init__.py | 0 .../contrib/storage/linux/loopback.py | 59 +++ .../charmhelpers/contrib/storage/linux/lvm.py | 88 +++++ .../contrib/storage/linux/utils.py | 25 ++ hooks/charmhelpers/core/__init__.py | 0 hooks/charmhelpers/core/hookenv.py | 340 ++++++++++++++++++ hooks/charmhelpers/core/host.py | 272 ++++++++++++++ hooks/config-changed | 2 +- hooks/install | 2 +- hooks/start | 1 - hooks/stop | 1 - hooks/swift-storage-node-common | 153 -------- hooks/swift-storage-node-relations | 103 ------ hooks/swift-storage-relation-changed | 2 +- hooks/swift-storage-relation-joined | 2 +- hooks/swift_storage_relations.py | 83 +++++ hooks/swift_storage_utils.py | 102 ++++++ 33 files changed, 2365 insertions(+), 262 deletions(-) create mode 100644 hooks/__init__.py create mode 100644 hooks/charmhelpers/__init__.py create mode 100644 hooks/charmhelpers/contrib/__init__.py create mode 100644 hooks/charmhelpers/contrib/hahelpers/__init__.py create mode 100644 hooks/charmhelpers/contrib/hahelpers/apache.py create mode 100644 hooks/charmhelpers/contrib/hahelpers/ceph.py create mode 100644 hooks/charmhelpers/contrib/hahelpers/cluster.py create mode 100644 hooks/charmhelpers/contrib/openstack/__init__.py create mode 100644 hooks/charmhelpers/contrib/openstack/context.py create mode 100644 hooks/charmhelpers/contrib/openstack/templates/__init__.py create mode 100644 hooks/charmhelpers/contrib/openstack/templates/ceph.conf create mode 100644 hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg create mode 100644 hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend create mode 100644 hooks/charmhelpers/contrib/openstack/templating.py create mode 100644 hooks/charmhelpers/contrib/openstack/utils.py create mode 100644 hooks/charmhelpers/contrib/storage/__init__.py create mode 100644 hooks/charmhelpers/contrib/storage/linux/__init__.py create mode 100644 hooks/charmhelpers/contrib/storage/linux/loopback.py create mode 100644 hooks/charmhelpers/contrib/storage/linux/lvm.py create mode 100644 hooks/charmhelpers/contrib/storage/linux/utils.py create mode 100644 hooks/charmhelpers/core/__init__.py create mode 100644 hooks/charmhelpers/core/hookenv.py create mode 100644 hooks/charmhelpers/core/host.py delete mode 120000 hooks/start delete mode 120000 hooks/stop delete mode 100755 hooks/swift-storage-node-common delete mode 100755 hooks/swift-storage-node-relations create mode 100755 hooks/swift_storage_relations.py create mode 100644 hooks/swift_storage_utils.py diff --git a/hooks/__init__.py b/hooks/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/hooks/charmhelpers/__init__.py b/hooks/charmhelpers/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/hooks/charmhelpers/contrib/__init__.py b/hooks/charmhelpers/contrib/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/hooks/charmhelpers/contrib/hahelpers/__init__.py b/hooks/charmhelpers/contrib/hahelpers/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/hooks/charmhelpers/contrib/hahelpers/apache.py b/hooks/charmhelpers/contrib/hahelpers/apache.py new file mode 100644 index 0000000..3208a85 --- /dev/null +++ b/hooks/charmhelpers/contrib/hahelpers/apache.py @@ -0,0 +1,58 @@ +# +# Copyright 2012 Canonical Ltd. +# +# This file is sourced from lp:openstack-charm-helpers +# +# Authors: +# James Page +# Adam Gandelman +# + +import subprocess + +from charmhelpers.core.hookenv import ( + config as config_get, + relation_get, + relation_ids, + related_units as relation_list, + log, + INFO, +) + + +def get_cert(): + cert = config_get('ssl_cert') + key = config_get('ssl_key') + if not (cert and key): + log("Inspecting identity-service relations for SSL certificate.", + level=INFO) + cert = key = None + for r_id in relation_ids('identity-service'): + for unit in relation_list(r_id): + if not cert: + cert = relation_get('ssl_cert', + rid=r_id, unit=unit) + if not key: + key = relation_get('ssl_key', + rid=r_id, unit=unit) + return (cert, key) + + +def get_ca_cert(): + ca_cert = None + log("Inspecting identity-service relations for CA SSL certificate.", + level=INFO) + for r_id in relation_ids('identity-service'): + for unit in relation_list(r_id): + if not ca_cert: + ca_cert = relation_get('ca_cert', + rid=r_id, unit=unit) + return ca_cert + + +def install_ca_cert(ca_cert): + if ca_cert: + with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt', + 'w') as crt: + crt.write(ca_cert) + subprocess.check_call(['update-ca-certificates', '--fresh']) diff --git a/hooks/charmhelpers/contrib/hahelpers/ceph.py b/hooks/charmhelpers/contrib/hahelpers/ceph.py new file mode 100644 index 0000000..fb1b8b9 --- /dev/null +++ b/hooks/charmhelpers/contrib/hahelpers/ceph.py @@ -0,0 +1,278 @@ +# +# Copyright 2012 Canonical Ltd. +# +# This file is sourced from lp:openstack-charm-helpers +# +# Authors: +# James Page +# Adam Gandelman +# + +import commands +import os +import shutil + +from subprocess import ( + check_call, + check_output, + CalledProcessError +) + +from charmhelpers.core.hookenv import ( + relation_get, + relation_ids, + related_units, + log, + INFO, +) + +from charmhelpers.core.host import ( + apt_install, + mount, + mounts, + service_start, + service_stop, + umount, +) + +KEYRING = '/etc/ceph/ceph.client.%s.keyring' +KEYFILE = '/etc/ceph/ceph.client.%s.key' + +CEPH_CONF = """[global] + auth supported = %(auth)s + keyring = %(keyring)s + mon host = %(mon_hosts)s +""" + + +def running(service): + # this local util can be dropped as soon the following branch lands + # in lp:charm-helpers + # https://code.launchpad.net/~gandelman-a/charm-helpers/service_running/ + try: + output = check_output(['service', service, 'status']) + except CalledProcessError: + return False + else: + if ("start/running" in output or "is running" in output): + return True + else: + return False + + +def install(): + ceph_dir = "/etc/ceph" + if not os.path.isdir(ceph_dir): + os.mkdir(ceph_dir) + apt_install('ceph-common', fatal=True) + + +def rbd_exists(service, pool, rbd_img): + (rc, out) = commands.getstatusoutput('rbd list --id %s --pool %s' % + (service, pool)) + return rbd_img in out + + +def create_rbd_image(service, pool, image, sizemb): + cmd = [ + 'rbd', + 'create', + image, + '--size', + str(sizemb), + '--id', + service, + '--pool', + pool + ] + check_call(cmd) + + +def pool_exists(service, name): + (rc, out) = commands.getstatusoutput("rados --id %s lspools" % service) + return name in out + + +def create_pool(service, name): + cmd = [ + 'rados', + '--id', + service, + 'mkpool', + name + ] + check_call(cmd) + + +def keyfile_path(service): + return KEYFILE % service + + +def keyring_path(service): + return KEYRING % service + + +def create_keyring(service, key): + keyring = keyring_path(service) + if os.path.exists(keyring): + log('ceph: Keyring exists at %s.' % keyring, level=INFO) + cmd = [ + 'ceph-authtool', + keyring, + '--create-keyring', + '--name=client.%s' % service, + '--add-key=%s' % key + ] + check_call(cmd) + log('ceph: Created new ring at %s.' % keyring, level=INFO) + + +def create_key_file(service, key): + # create a file containing the key + keyfile = keyfile_path(service) + if os.path.exists(keyfile): + log('ceph: Keyfile exists at %s.' % keyfile, level=INFO) + fd = open(keyfile, 'w') + fd.write(key) + fd.close() + log('ceph: Created new keyfile at %s.' % keyfile, level=INFO) + + +def get_ceph_nodes(): + hosts = [] + for r_id in relation_ids('ceph'): + for unit in related_units(r_id): + hosts.append(relation_get('private-address', unit=unit, rid=r_id)) + return hosts + + +def configure(service, key, auth): + create_keyring(service, key) + create_key_file(service, key) + hosts = get_ceph_nodes() + mon_hosts = ",".join(map(str, hosts)) + keyring = keyring_path(service) + with open('/etc/ceph/ceph.conf', 'w') as ceph_conf: + ceph_conf.write(CEPH_CONF % locals()) + modprobe_kernel_module('rbd') + + +def image_mapped(image_name): + (rc, out) = commands.getstatusoutput('rbd showmapped') + return image_name in out + + +def map_block_storage(service, pool, image): + cmd = [ + 'rbd', + 'map', + '%s/%s' % (pool, image), + '--user', + service, + '--secret', + keyfile_path(service), + ] + check_call(cmd) + + +def filesystem_mounted(fs): + return fs in [f for m, f in mounts()] + + +def make_filesystem(blk_device, fstype='ext4'): + log('ceph: Formatting block device %s as filesystem %s.' % + (blk_device, fstype), level=INFO) + cmd = ['mkfs', '-t', fstype, blk_device] + check_call(cmd) + + +def place_data_on_ceph(service, blk_device, data_src_dst, fstype='ext4'): + # mount block device into /mnt + mount(blk_device, '/mnt') + + # copy data to /mnt + try: + copy_files(data_src_dst, '/mnt') + except: + pass + + # umount block device + umount('/mnt') + + _dir = os.stat(data_src_dst) + uid = _dir.st_uid + gid = _dir.st_gid + + # re-mount where the data should originally be + mount(blk_device, data_src_dst, persist=True) + + # ensure original ownership of new mount. + cmd = ['chown', '-R', '%s:%s' % (uid, gid), data_src_dst] + check_call(cmd) + + +# TODO: re-use +def modprobe_kernel_module(module): + log('ceph: Loading kernel module', level=INFO) + cmd = ['modprobe', module] + check_call(cmd) + cmd = 'echo %s >> /etc/modules' % module + check_call(cmd, shell=True) + + +def copy_files(src, dst, symlinks=False, ignore=None): + for item in os.listdir(src): + s = os.path.join(src, item) + d = os.path.join(dst, item) + if os.path.isdir(s): + shutil.copytree(s, d, symlinks, ignore) + else: + shutil.copy2(s, d) + + +def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, + blk_device, fstype, system_services=[]): + """ + To be called from the current cluster leader. + Ensures given pool and RBD image exists, is mapped to a block device, + and the device is formatted and mounted at the given mount_point. + + If formatting a device for the first time, data existing at mount_point + will be migrated to the RBD device before being remounted. + + All services listed in system_services will be stopped prior to data + migration and restarted when complete. + """ + # Ensure pool, RBD image, RBD mappings are in place. + if not pool_exists(service, pool): + log('ceph: Creating new pool %s.' % pool, level=INFO) + create_pool(service, pool) + + if not rbd_exists(service, pool, rbd_img): + log('ceph: Creating RBD image (%s).' % rbd_img, level=INFO) + create_rbd_image(service, pool, rbd_img, sizemb) + + if not image_mapped(rbd_img): + log('ceph: Mapping RBD Image as a Block Device.', level=INFO) + map_block_storage(service, pool, rbd_img) + + # make file system + # TODO: What happens if for whatever reason this is run again and + # the data is already in the rbd device and/or is mounted?? + # When it is mounted already, it will fail to make the fs + # XXX: This is really sketchy! Need to at least add an fstab entry + # otherwise this hook will blow away existing data if its executed + # after a reboot. + if not filesystem_mounted(mount_point): + make_filesystem(blk_device, fstype) + + for svc in system_services: + if running(svc): + log('Stopping services %s prior to migrating data.' % svc, + level=INFO) + service_stop(svc) + + place_data_on_ceph(service, blk_device, mount_point, fstype) + + for svc in system_services: + service_start(svc) diff --git a/hooks/charmhelpers/contrib/hahelpers/cluster.py b/hooks/charmhelpers/contrib/hahelpers/cluster.py new file mode 100644 index 0000000..dde6c9b --- /dev/null +++ b/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -0,0 +1,180 @@ +# +# Copyright 2012 Canonical Ltd. +# +# Authors: +# James Page +# Adam Gandelman +# + +import subprocess +import os + +from socket import gethostname as get_unit_hostname + +from charmhelpers.core.hookenv import ( + log, + relation_ids, + related_units as relation_list, + relation_get, + config as config_get, + INFO, + ERROR, +) + + +class HAIncompleteConfig(Exception): + pass + + +def is_clustered(): + for r_id in (relation_ids('ha') or []): + for unit in (relation_list(r_id) or []): + clustered = relation_get('clustered', + rid=r_id, + unit=unit) + if clustered: + return True + return False + + +def is_leader(resource): + cmd = [ + "crm", "resource", + "show", resource + ] + try: + status = subprocess.check_output(cmd) + except subprocess.CalledProcessError: + return False + else: + if get_unit_hostname() in status: + return True + else: + return False + + +def peer_units(): + peers = [] + for r_id in (relation_ids('cluster') or []): + for unit in (relation_list(r_id) or []): + peers.append(unit) + return peers + + +def oldest_peer(peers): + local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1]) + for peer in peers: + remote_unit_no = int(peer.split('/')[1]) + if remote_unit_no < local_unit_no: + return False + return True + + +def eligible_leader(resource): + if is_clustered(): + if not is_leader(resource): + log('Deferring action to CRM leader.', level=INFO) + return False + else: + peers = peer_units() + if peers and not oldest_peer(peers): + log('Deferring action to oldest service unit.', level=INFO) + return False + return True + + +def https(): + ''' + Determines whether enough data has been provided in configuration + or relation data to configure HTTPS + . + returns: boolean + ''' + if config_get('use-https') == "yes": + return True + if config_get('ssl_cert') and config_get('ssl_key'): + return True + for r_id in relation_ids('identity-service'): + for unit in relation_list(r_id): + if None not in [ + relation_get('https_keystone', rid=r_id, unit=unit), + relation_get('ssl_cert', rid=r_id, unit=unit), + relation_get('ssl_key', rid=r_id, unit=unit), + relation_get('ca_cert', rid=r_id, unit=unit), + ]: + return True + return False + + +def determine_api_port(public_port): + ''' + Determine correct API server listening port based on + existence of HTTPS reverse proxy and/or haproxy. + + public_port: int: standard public port for given service + + returns: int: the correct listening port for the API service + ''' + i = 0 + if len(peer_units()) > 0 or is_clustered(): + i += 1 + if https(): + i += 1 + return public_port - (i * 10) + + +def determine_haproxy_port(public_port): + ''' + Description: Determine correct proxy listening port based on public IP + + existence of HTTPS reverse proxy. + + public_port: int: standard public port for given service + + returns: int: the correct listening port for the HAProxy service + ''' + i = 0 + if https(): + i += 1 + return public_port - (i * 10) + + +def get_hacluster_config(): + ''' + Obtains all relevant configuration from charm configuration required + for initiating a relation to hacluster: + + ha-bindiface, ha-mcastport, vip, vip_iface, vip_cidr + + returns: dict: A dict containing settings keyed by setting name. + raises: HAIncompleteConfig if settings are missing. + ''' + settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'vip_iface', 'vip_cidr'] + conf = {} + for setting in settings: + conf[setting] = config_get(setting) + missing = [] + [missing.append(s) for s, v in conf.iteritems() if v is None] + if missing: + log('Insufficient config data to configure hacluster.', level=ERROR) + raise HAIncompleteConfig + return conf + + +def canonical_url(configs, vip_setting='vip'): + ''' + Returns the correct HTTP URL to this host given the state of HTTPS + configuration and hacluster. + + :configs : OSTemplateRenderer: A config tempating object to inspect for + a complete https context. + :vip_setting: str: Setting in charm config that specifies + VIP address. + ''' + scheme = 'http' + if 'https' in configs.complete_contexts(): + scheme = 'https' + if is_clustered(): + addr = config_get(vip_setting) + else: + addr = get_unit_hostname() + return '%s://%s' % (scheme, addr) diff --git a/hooks/charmhelpers/contrib/openstack/__init__.py b/hooks/charmhelpers/contrib/openstack/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py new file mode 100644 index 0000000..f146e0b --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -0,0 +1,271 @@ +import os + +from base64 import b64decode + +from subprocess import ( + check_call +) + +from charmhelpers.core.hookenv import ( + config, + local_unit, + log, + relation_get, + relation_ids, + related_units, + unit_get, +) + +from charmhelpers.contrib.hahelpers.cluster import ( + determine_api_port, + determine_haproxy_port, + https, + is_clustered, + peer_units, +) + +from charmhelpers.contrib.hahelpers.apache import ( + get_cert, + get_ca_cert, +) + +CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' + + +class OSContextError(Exception): + pass + + +def context_complete(ctxt): + _missing = [] + for k, v in ctxt.iteritems(): + if v is None or v == '': + _missing.append(k) + if _missing: + log('Missing required data: %s' % ' '.join(_missing), level='INFO') + return False + return True + + +class OSContextGenerator(object): + interfaces = [] + + def __call__(self): + raise NotImplementedError + + +class SharedDBContext(OSContextGenerator): + interfaces = ['shared-db'] + + def __call__(self): + log('Generating template context for shared-db') + conf = config() + try: + database = conf['database'] + username = conf['database-user'] + except KeyError as e: + log('Could not generate shared_db context. ' + 'Missing required charm config options: %s.' % e) + raise OSContextError + ctxt = {} + for rid in relation_ids('shared-db'): + for unit in related_units(rid): + ctxt = { + 'database_host': relation_get('db_host', rid=rid, + unit=unit), + 'database': database, + 'database_user': username, + 'database_password': relation_get('password', rid=rid, + unit=unit) + } + if not context_complete(ctxt): + return {} + return ctxt + + +class IdentityServiceContext(OSContextGenerator): + interfaces = ['identity-service'] + + def __call__(self): + log('Generating template context for identity-service') + ctxt = {} + + for rid in relation_ids('identity-service'): + for unit in related_units(rid): + ctxt = { + 'service_port': relation_get('service_port', rid=rid, + unit=unit), + 'service_host': relation_get('service_host', rid=rid, + unit=unit), + 'auth_host': relation_get('auth_host', rid=rid, unit=unit), + 'auth_port': relation_get('auth_port', rid=rid, unit=unit), + 'admin_tenant_name': relation_get('service_tenant', + rid=rid, unit=unit), + 'admin_user': relation_get('service_username', rid=rid, + unit=unit), + 'admin_password': relation_get('service_password', rid=rid, + unit=unit), + # XXX: Hard-coded http. + 'service_protocol': 'http', + 'auth_protocol': 'http', + } + if not context_complete(ctxt): + return {} + return ctxt + + +class AMQPContext(OSContextGenerator): + interfaces = ['amqp'] + + def __call__(self): + log('Generating template context for amqp') + conf = config() + try: + username = conf['rabbit-user'] + vhost = conf['rabbit-vhost'] + except KeyError as e: + log('Could not generate shared_db context. ' + 'Missing required charm config options: %s.' % e) + raise OSContextError + + ctxt = {} + for rid in relation_ids('amqp'): + for unit in related_units(rid): + if relation_get('clustered', rid=rid, unit=unit): + rabbitmq_host = relation_get('vip', rid=rid, unit=unit) + else: + rabbitmq_host = relation_get('private-address', + rid=rid, unit=unit) + ctxt = { + 'rabbitmq_host': rabbitmq_host, + 'rabbitmq_user': username, + 'rabbitmq_password': relation_get('password', rid=rid, + unit=unit), + 'rabbitmq_virtual_host': vhost, + } + if not context_complete(ctxt): + return {} + return ctxt + + +class CephContext(OSContextGenerator): + interfaces = ['ceph'] + + def __call__(self): + '''This generates context for /etc/ceph/ceph.conf templates''' + log('Generating tmeplate context for ceph') + mon_hosts = [] + auth = None + for rid in relation_ids('ceph'): + for unit in related_units(rid): + mon_hosts.append(relation_get('private-address', rid=rid, + unit=unit)) + auth = relation_get('auth', rid=rid, unit=unit) + + ctxt = { + 'mon_hosts': ' '.join(mon_hosts), + 'auth': auth, + } + if not context_complete(ctxt): + return {} + return ctxt + + +class HAProxyContext(OSContextGenerator): + interfaces = ['cluster'] + + def __call__(self): + ''' + Builds half a context for the haproxy template, which describes + all peers to be included in the cluster. Each charm needs to include + its own context generator that describes the port mapping. + ''' + if not relation_ids('cluster'): + return {} + + cluster_hosts = {} + l_unit = local_unit().replace('/', '-') + cluster_hosts[l_unit] = unit_get('private-address') + + for rid in relation_ids('cluster'): + for unit in related_units(rid): + _unit = unit.replace('/', '-') + addr = relation_get('private-address', rid=rid, unit=unit) + cluster_hosts[_unit] = addr + + ctxt = { + 'units': cluster_hosts, + } + if len(cluster_hosts.keys()) > 1: + # Enable haproxy when we have enough peers. + log('Ensuring haproxy enabled in /etc/default/haproxy.') + with open('/etc/default/haproxy', 'w') as out: + out.write('ENABLED=1\n') + return ctxt + log('HAProxy context is incomplete, this unit has no peers.') + return {} + + +class ApacheSSLContext(OSContextGenerator): + """ + Generates a context for an apache vhost configuration that configures + HTTPS reverse proxying for one or many endpoints. Generated context + looks something like: + { + 'namespace': 'cinder', + 'private_address': 'iscsi.mycinderhost.com', + 'endpoints': [(8776, 8766), (8777, 8767)] + } + + The endpoints list consists of a tuples mapping external ports + to internal ports. + """ + interfaces = ['https'] + + # charms should inherit this context and set external ports + # and service namespace accordingly. + external_ports = [] + service_namespace = None + + def enable_modules(self): + cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http'] + check_call(cmd) + + def configure_cert(self): + if not os.path.isdir('/etc/apache2/ssl'): + os.mkdir('/etc/apache2/ssl') + ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace) + if not os.path.isdir(ssl_dir): + os.mkdir(ssl_dir) + cert, key = get_cert() + with open(os.path.join(ssl_dir, 'cert'), 'w') as cert_out: + cert_out.write(b64decode(cert)) + with open(os.path.join(ssl_dir, 'key'), 'w') as key_out: + key_out.write(b64decode(key)) + ca_cert = get_ca_cert() + if ca_cert: + with open(CA_CERT_PATH, 'w') as ca_out: + ca_out.write(b64decode(ca_cert)) + + def __call__(self): + if isinstance(self.external_ports, basestring): + self.external_ports = [self.external_ports] + if (not self.external_ports or not https()): + return {} + + self.configure_cert() + self.enable_modules() + + ctxt = { + 'namespace': self.service_namespace, + 'private_address': unit_get('private-address'), + 'endpoints': [] + } + for ext_port in self.external_ports: + if peer_units() or is_clustered(): + int_port = determine_haproxy_port(ext_port) + else: + int_port = determine_api_port(ext_port) + portmap = (int(ext_port), int(int_port)) + ctxt['endpoints'].append(portmap) + return ctxt diff --git a/hooks/charmhelpers/contrib/openstack/templates/__init__.py b/hooks/charmhelpers/contrib/openstack/templates/__init__.py new file mode 100644 index 0000000..0b49ad2 --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/templates/__init__.py @@ -0,0 +1,2 @@ +# dummy __init__.py to fool syncer into thinking this is a syncable python +# module diff --git a/hooks/charmhelpers/contrib/openstack/templates/ceph.conf b/hooks/charmhelpers/contrib/openstack/templates/ceph.conf new file mode 100644 index 0000000..1d8ca3b --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/templates/ceph.conf @@ -0,0 +1,11 @@ +############################################################################### +# [ WARNING ] +# cinder configuration file maintained by Juju +# local changes may be overwritten. +############################################################################### +{% if auth %} +[global] + auth_supported = {{ auth }} + keyring = /etc/ceph/$cluster.$name.keyring + mon host = {{ mon_hosts }} +{% endif %} diff --git a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg new file mode 100644 index 0000000..b184cd4 --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg @@ -0,0 +1,37 @@ +global + log 127.0.0.1 local0 + log 127.0.0.1 local1 notice + maxconn 20000 + user haproxy + group haproxy + spread-checks 0 + +defaults + log global + mode http + option httplog + option dontlognull + retries 3 + timeout queue 1000 + timeout connect 1000 + timeout client 30000 + timeout server 30000 + +listen stats :8888 + mode http + stats enable + stats hide-version + stats realm Haproxy\ Statistics + stats uri / + stats auth admin:password + +{% if units %} +{% for service, ports in service_ports.iteritems() -%} +listen {{ service }} 0.0.0.0:{{ ports[0] }} + balance roundrobin + option tcplog + {% for unit, address in units.iteritems() -%} + server {{ unit }} {{ address }}:{{ ports[1] }} check + {% endfor %} +{% endfor %} +{% endif %} diff --git a/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend b/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend new file mode 100644 index 0000000..e833a71 --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend @@ -0,0 +1,23 @@ +{% if endpoints %} +{% for ext, int in endpoints %} +Listen {{ ext }} +NameVirtualHost *:{{ ext }} + + ServerName {{ private_address }} + SSLEngine on + SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert + SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key + ProxyPass / http://localhost:{{ int }}/ + ProxyPassReverse / http://localhost:{{ int }}/ + ProxyPreserveHost on + + + Order deny,allow + Allow from all + + + Order allow,deny + Allow from all + +{% endfor %} +{% endif %} diff --git a/hooks/charmhelpers/contrib/openstack/templating.py b/hooks/charmhelpers/contrib/openstack/templating.py new file mode 100644 index 0000000..c555cc6 --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/templating.py @@ -0,0 +1,261 @@ +import os + +from charmhelpers.core.host import apt_install + +from charmhelpers.core.hookenv import ( + log, + ERROR, + INFO +) + +from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES + +try: + from jinja2 import FileSystemLoader, ChoiceLoader, Environment +except ImportError: + # python-jinja2 may not be installed yet, or we're running unittests. + FileSystemLoader = ChoiceLoader = Environment = None + + +class OSConfigException(Exception): + pass + + +def get_loader(templates_dir, os_release): + """ + Create a jinja2.ChoiceLoader containing template dirs up to + and including os_release. If directory template directory + is missing at templates_dir, it will be omitted from the loader. + templates_dir is added to the bottom of the search list as a base + loading dir. + + A charm may also ship a templates dir with this module + and it will be appended to the bottom of the search list, eg: + hooks/charmhelpers/contrib/openstack/templates. + + :param templates_dir: str: Base template directory containing release + sub-directories. + :param os_release : str: OpenStack release codename to construct template + loader. + + :returns : jinja2.ChoiceLoader constructed with a list of + jinja2.FilesystemLoaders, ordered in descending + order by OpenStack release. + """ + tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) + for rel in OPENSTACK_CODENAMES.itervalues()] + + if not os.path.isdir(templates_dir): + log('Templates directory not found @ %s.' % templates_dir, + level=ERROR) + raise OSConfigException + + # the bottom contains tempaltes_dir and possibly a common templates dir + # shipped with the helper. + loaders = [FileSystemLoader(templates_dir)] + helper_templates = os.path.join(os.path.dirname(__file__), 'templates') + if os.path.isdir(helper_templates): + loaders.append(FileSystemLoader(helper_templates)) + + for rel, tmpl_dir in tmpl_dirs: + if os.path.isdir(tmpl_dir): + loaders.insert(0, FileSystemLoader(tmpl_dir)) + if rel == os_release: + break + log('Creating choice loader with dirs: %s' % + [l.searchpath for l in loaders], level=INFO) + return ChoiceLoader(loaders) + + +class OSConfigTemplate(object): + """ + Associates a config file template with a list of context generators. + Responsible for constructing a template context based on those generators. + """ + def __init__(self, config_file, contexts): + self.config_file = config_file + + if hasattr(contexts, '__call__'): + self.contexts = [contexts] + else: + self.contexts = contexts + + self._complete_contexts = [] + + def context(self): + ctxt = {} + for context in self.contexts: + _ctxt = context() + if _ctxt: + ctxt.update(_ctxt) + # track interfaces for every complete context. + [self._complete_contexts.append(interface) + for interface in context.interfaces + if interface not in self._complete_contexts] + return ctxt + + def complete_contexts(self): + ''' + Return a list of interfaces that have atisfied contexts. + ''' + if self._complete_contexts: + return self._complete_contexts + self.context() + return self._complete_contexts + + +class OSConfigRenderer(object): + """ + This class provides a common templating system to be used by OpenStack + charms. It is intended to help charms share common code and templates, + and ease the burden of managing config templates across multiple OpenStack + releases. + + Basic usage: + # import some common context generates from charmhelpers + from charmhelpers.contrib.openstack import context + + # Create a renderer object for a specific OS release. + configs = OSConfigRenderer(templates_dir='/tmp/templates', + openstack_release='folsom') + # register some config files with context generators. + configs.register(config_file='/etc/nova/nova.conf', + contexts=[context.SharedDBContext(), + context.AMQPContext()]) + configs.register(config_file='/etc/nova/api-paste.ini', + contexts=[context.IdentityServiceContext()]) + configs.register(config_file='/etc/haproxy/haproxy.conf', + contexts=[context.HAProxyContext()]) + # write out a single config + configs.write('/etc/nova/nova.conf') + # write out all registered configs + configs.write_all() + + Details: + + OpenStack Releases and template loading + --------------------------------------- + When the object is instantiated, it is associated with a specific OS + release. This dictates how the template loader will be constructed. + + The constructed loader attempts to load the template from several places + in the following order: + - from the most recent OS release-specific template dir (if one exists) + - the base templates_dir + - a template directory shipped in the charm with this helper file. + + + For the example above, '/tmp/templates' contains the following structure: + /tmp/templates/nova.conf + /tmp/templates/api-paste.ini + /tmp/templates/grizzly/api-paste.ini + /tmp/templates/havana/api-paste.ini + + Since it was registered with the grizzly release, it first seraches + the grizzly directory for nova.conf, then the templates dir. + + When writing api-paste.ini, it will find the template in the grizzly + directory. + + If the object were created with folsom, it would fall back to the + base templates dir for its api-paste.ini template. + + This system should help manage changes in config files through + openstack releases, allowing charms to fall back to the most recently + updated config template for a given release + + The haproxy.conf, since it is not shipped in the templates dir, will + be loaded from the module directory's template directory, eg + $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows + us to ship common templates (haproxy, apache) with the helpers. + + Context generators + --------------------------------------- + Context generators are used to generate template contexts during hook + execution. Doing so may require inspecting service relations, charm + config, etc. When registered, a config file is associated with a list + of generators. When a template is rendered and written, all context + generates are called in a chain to generate the context dictionary + passed to the jinja2 template. See context.py for more info. + """ + def __init__(self, templates_dir, openstack_release): + if not os.path.isdir(templates_dir): + log('Could not locate templates dir %s' % templates_dir, + level=ERROR) + raise OSConfigException + + self.templates_dir = templates_dir + self.openstack_release = openstack_release + self.templates = {} + self._tmpl_env = None + + if None in [Environment, ChoiceLoader, FileSystemLoader]: + # if this code is running, the object is created pre-install hook. + # jinja2 shouldn't get touched until the module is reloaded on next + # hook execution, with proper jinja2 bits successfully imported. + apt_install('python-jinja2') + + def register(self, config_file, contexts): + """ + Register a config file with a list of context generators to be called + during rendering. + """ + self.templates[config_file] = OSConfigTemplate(config_file=config_file, + contexts=contexts) + log('Registered config file: %s' % config_file, level=INFO) + + def _get_tmpl_env(self): + if not self._tmpl_env: + loader = get_loader(self.templates_dir, self.openstack_release) + self._tmpl_env = Environment(loader=loader) + + def _get_template(self, template): + self._get_tmpl_env() + template = self._tmpl_env.get_template(template) + log('Loaded template from %s' % template.filename, level=INFO) + return template + + def render(self, config_file): + if config_file not in self.templates: + log('Config not registered: %s' % config_file, level=ERROR) + raise OSConfigException + ctxt = self.templates[config_file].context() + _tmpl = os.path.basename(config_file) + log('Rendering from template: %s' % _tmpl, level=INFO) + template = self._get_template(_tmpl) + return template.render(ctxt) + + def write(self, config_file): + """ + Write a single config file, raises if config file is not registered. + """ + if config_file not in self.templates: + log('Config not registered: %s' % config_file, level=ERROR) + raise OSConfigException + with open(config_file, 'wb') as out: + out.write(self.render(config_file)) + log('Wrote template %s.' % config_file, level=INFO) + + def write_all(self): + """ + Write out all registered config files. + """ + [self.write(k) for k in self.templates.iterkeys()] + + def set_release(self, openstack_release): + """ + Resets the template environment and generates a new template loader + based on a the new openstack release. + """ + self._tmpl_env = None + self.openstack_release = openstack_release + self._get_tmpl_env() + + def complete_contexts(self): + ''' + Returns a list of context interfaces that yield a complete context. + ''' + interfaces = [] + [interfaces.extend(i.complete_contexts()) + for i in self.templates.itervalues()] + return interfaces diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py new file mode 100644 index 0000000..ffe82e3 --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -0,0 +1,271 @@ +#!/usr/bin/python + +# Common python helper functions used for OpenStack charms. + +from collections import OrderedDict + +import apt_pkg as apt +import subprocess +import os +import sys + +from charmhelpers.core.hookenv import ( + config, + log as juju_log, +) + +from charmhelpers.core.host import ( + lsb_release, + apt_install +) + +CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" +CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' + +UBUNTU_OPENSTACK_RELEASE = OrderedDict([ + ('oneiric', 'diablo'), + ('precise', 'essex'), + ('quantal', 'folsom'), + ('raring', 'grizzly'), + ('saucy', 'havana'), +]) + + +OPENSTACK_CODENAMES = OrderedDict([ + ('2011.2', 'diablo'), + ('2012.1', 'essex'), + ('2012.2', 'folsom'), + ('2013.1', 'grizzly'), + ('2013.2', 'havana'), + ('2014.1', 'icehouse'), +]) + +# The ugly duckling +SWIFT_CODENAMES = { + '1.4.3': 'diablo', + '1.4.8': 'essex', + '1.7.4': 'folsom', + '1.7.6': 'grizzly', + '1.7.7': 'grizzly', + '1.8.0': 'grizzly', + '1.9.0': 'havana', + '1.9.1': 'havana', +} + + +def error_out(msg): + juju_log("FATAL ERROR: %s" % msg, level='ERROR') + sys.exit(1) + + +def get_os_codename_install_source(src): + '''Derive OpenStack release codename from a given installation source.''' + ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] + rel = '' + if src == 'distro': + try: + rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] + except KeyError: + e = 'Could not derive openstack release for '\ + 'this Ubuntu release: %s' % ubuntu_rel + error_out(e) + return rel + + if src.startswith('cloud:'): + ca_rel = src.split(':')[1] + ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0] + return ca_rel + + # Best guess match based on deb string provided + if src.startswith('deb') or src.startswith('ppa'): + for k, v in OPENSTACK_CODENAMES.iteritems(): + if v in src: + return v + + +def get_os_version_install_source(src): + codename = get_os_codename_install_source(src) + return get_os_version_codename(codename) + + +def get_os_codename_version(vers): + '''Determine OpenStack codename from version number.''' + try: + return OPENSTACK_CODENAMES[vers] + except KeyError: + e = 'Could not determine OpenStack codename for version %s' % vers + error_out(e) + + +def get_os_version_codename(codename): + '''Determine OpenStack version number from codename.''' + for k, v in OPENSTACK_CODENAMES.iteritems(): + if v == codename: + return k + e = 'Could not derive OpenStack version for '\ + 'codename: %s' % codename + error_out(e) + + +def get_os_codename_package(package, fatal=True): + '''Derive OpenStack release codename from an installed package.''' + apt.init() + cache = apt.Cache() + + try: + pkg = cache[package] + except: + if not fatal: + return None + # the package is unknown to the current apt cache. + e = 'Could not determine version of package with no installation '\ + 'candidate: %s' % package + error_out(e) + + if not pkg.current_ver: + if not fatal: + return None + # package is known, but no version is currently installed. + e = 'Could not determine version of uninstalled package: %s' % package + error_out(e) + + vers = apt.UpstreamVersion(pkg.current_ver.ver_str) + + try: + if 'swift' in pkg.name: + vers = vers[:5] + return SWIFT_CODENAMES[vers] + else: + vers = vers[:6] + return OPENSTACK_CODENAMES[vers] + except KeyError: + e = 'Could not determine OpenStack codename for version %s' % vers + error_out(e) + + +def get_os_version_package(pkg, fatal=True): + '''Derive OpenStack version number from an installed package.''' + codename = get_os_codename_package(pkg, fatal=fatal) + + if not codename: + return None + + if 'swift' in pkg: + vers_map = SWIFT_CODENAMES + else: + vers_map = OPENSTACK_CODENAMES + + for version, cname in vers_map.iteritems(): + if cname == codename: + return version + #e = "Could not determine OpenStack version for package: %s" % pkg + #error_out(e) + + +def import_key(keyid): + cmd = "apt-key adv --keyserver keyserver.ubuntu.com " \ + "--recv-keys %s" % keyid + try: + subprocess.check_call(cmd.split(' ')) + except subprocess.CalledProcessError: + error_out("Error importing repo key %s" % keyid) + + +def configure_installation_source(rel): + '''Configure apt installation source.''' + if rel == 'distro': + return + elif rel[:4] == "ppa:": + src = rel + subprocess.check_call(["add-apt-repository", "-y", src]) + elif rel[:3] == "deb": + l = len(rel.split('|')) + if l == 2: + src, key = rel.split('|') + juju_log("Importing PPA key from keyserver for %s" % src) + import_key(key) + elif l == 1: + src = rel + with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: + f.write(src) + elif rel[:6] == 'cloud:': + ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] + rel = rel.split(':')[1] + u_rel = rel.split('-')[0] + ca_rel = rel.split('-')[1] + + if u_rel != ubuntu_rel: + e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\ + 'version (%s)' % (ca_rel, ubuntu_rel) + error_out(e) + + if 'staging' in ca_rel: + # staging is just a regular PPA. + os_rel = ca_rel.split('/')[0] + ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel + cmd = 'add-apt-repository -y %s' % ppa + subprocess.check_call(cmd.split(' ')) + return + + # map charm config options to actual archive pockets. + pockets = { + 'folsom': 'precise-updates/folsom', + 'folsom/updates': 'precise-updates/folsom', + 'folsom/proposed': 'precise-proposed/folsom', + 'grizzly': 'precise-updates/grizzly', + 'grizzly/updates': 'precise-updates/grizzly', + 'grizzly/proposed': 'precise-proposed/grizzly', + 'havana': 'precise-updates/havana', + 'havana/updates': 'precise-updates/havana', + 'havana/proposed': 'precise-proposed/havana', + } + + try: + pocket = pockets[ca_rel] + except KeyError: + e = 'Invalid Cloud Archive release specified: %s' % rel + error_out(e) + + src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket) + apt_install('ubuntu-cloud-keyring', fatal=True) + + with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f: + f.write(src) + else: + error_out("Invalid openstack-release specified: %s" % rel) + + +def save_script_rc(script_path="scripts/scriptrc", **env_vars): + """ + Write an rc file in the charm-delivered directory containing + exported environment variables provided by env_vars. Any charm scripts run + outside the juju hook environment can source this scriptrc to obtain + updated config information necessary to perform health checks or + service changes. + """ + unit_name = os.getenv('JUJU_UNIT_NAME').replace('/', '-') + juju_rc_path = "/var/lib/juju/units/%s/charm/%s" % (unit_name, script_path) + with open(juju_rc_path, 'wb') as rc_script: + rc_script.write( + "#!/bin/bash\n") + [rc_script.write('export %s=%s\n' % (u, p)) + for u, p in env_vars.iteritems() if u != "script_path"] + + +def openstack_upgrade_available(package): + """ + Determines if an OpenStack upgrade is available from installation + source, based on version of installed package. + + :param package: str: Name of installed package. + + :returns: bool: : Returns True if configured installation source offers + a newer version of package. + + """ + + src = config('openstack-origin') + cur_vers = get_os_version_package(package) + available_vers = get_os_version_install_source(src) + apt.init() + return apt.version_compare(available_vers, cur_vers) == 1 diff --git a/hooks/charmhelpers/contrib/storage/__init__.py b/hooks/charmhelpers/contrib/storage/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/hooks/charmhelpers/contrib/storage/linux/__init__.py b/hooks/charmhelpers/contrib/storage/linux/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/hooks/charmhelpers/contrib/storage/linux/loopback.py b/hooks/charmhelpers/contrib/storage/linux/loopback.py new file mode 100644 index 0000000..9fb87a2 --- /dev/null +++ b/hooks/charmhelpers/contrib/storage/linux/loopback.py @@ -0,0 +1,59 @@ + +import os +import re + +from subprocess import ( + check_call, + check_output, +) + + +################################################## +# loopback device helpers. +################################################## +def loopback_devices(): + ''' + Parse through 'losetup -a' output to determine currently mapped + loopback devices. Output is expected to look like: + + /dev/loop0: [0807]:961814 (/tmp/my.img) + + :returns: dict: a dict mapping {loopback_dev: backing_file} + ''' + loopbacks = {} + cmd = ['losetup', '-a'] + devs = [d.strip().split(' ') for d in + check_output(cmd).splitlines() if d != ''] + for dev, _, f in devs: + loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0] + return loopbacks + + +def create_loopback(file_path): + ''' + Create a loopback device for a given backing file. + + :returns: str: Full path to new loopback device (eg, /dev/loop0) + ''' + cmd = ['losetup', '--find', file_path] + return check_output(cmd).strip() + + +def ensure_loopback_device(path, size): + ''' + Ensure a loopback device exists for a given backing file path and size. + If it a loopback device is not mapped to file, a new one will be created. + + TODO: Confirm size of found loopback device. + + :returns: str: Full path to the ensured loopback device (eg, /dev/loop0) + ''' + for d, f in loopback_devices().iteritems(): + if f == path: + return d + + if not os.path.exists(path): + cmd = ['truncate', '--size', size, path] + check_call(cmd) + + return create_loopback(path) diff --git a/hooks/charmhelpers/contrib/storage/linux/lvm.py b/hooks/charmhelpers/contrib/storage/linux/lvm.py new file mode 100644 index 0000000..6e29181 --- /dev/null +++ b/hooks/charmhelpers/contrib/storage/linux/lvm.py @@ -0,0 +1,88 @@ +from subprocess import ( + CalledProcessError, + check_call, + check_output, + Popen, + PIPE, +) + + +################################################## +# LVM helpers. +################################################## +def deactivate_lvm_volume_group(block_device): + ''' + Deactivate any volume gruop associated with an LVM physical volume. + + :param block_device: str: Full path to LVM physical volume + ''' + vg = list_lvm_volume_group(block_device) + if vg: + cmd = ['vgchange', '-an', vg] + check_call(cmd) + + +def is_lvm_physical_volume(block_device): + ''' + Determine whether a block device is initialized as an LVM PV. + + :param block_device: str: Full path of block device to inspect. + + :returns: boolean: True if block device is a PV, False if not. + ''' + try: + check_output(['pvdisplay', block_device]) + return True + except CalledProcessError: + return False + + +def remove_lvm_physical_volume(block_device): + ''' + Remove LVM PV signatures from a given block device. + + :param block_device: str: Full path of block device to scrub. + ''' + p = Popen(['pvremove', '-ff', block_device], + stdin=PIPE) + p.communicate(input='y\n') + + +def list_lvm_volume_group(block_device): + ''' + List LVM volume group associated with a given block device. + + Assumes block device is a valid LVM PV. + + :param block_device: str: Full path of block device to inspect. + + :returns: str: Name of volume group associated with block device or None + ''' + vg = None + pvd = check_output(['pvdisplay', block_device]).splitlines() + for l in pvd: + if l.strip().startswith('VG Name'): + vg = ' '.join(l.split()).split(' ').pop() + return vg + + +def create_lvm_physical_volume(block_device): + ''' + Initialize a block device as an LVM physical volume. + + :param block_device: str: Full path of block device to initialize. + + ''' + check_call(['pvcreate', block_device]) + + +def create_lvm_volume_group(volume_group, block_device): + ''' + Create an LVM volume group backed by a given block device. + + Assumes block device has already been initialized as an LVM PV. + + :param volume_group: str: Name of volume group to create. + :block_device: str: Full path of PV-initialized block device. + ''' + check_call(['vgcreate', volume_group, block_device]) diff --git a/hooks/charmhelpers/contrib/storage/linux/utils.py b/hooks/charmhelpers/contrib/storage/linux/utils.py new file mode 100644 index 0000000..5b9b6d4 --- /dev/null +++ b/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -0,0 +1,25 @@ +from os import stat +from stat import S_ISBLK + +from subprocess import ( + check_call +) + + +def is_block_device(path): + ''' + Confirm device at path is a valid block device node. + + :returns: boolean: True if path is a block device, False if not. + ''' + return S_ISBLK(stat(path).st_mode) + + +def zap_disk(block_device): + ''' + Clear a block device of partition table. Relies on sgdisk, which is + installed as pat of the 'gdisk' package in Ubuntu. + + :param block_device: str: Full path of block device to clean. + ''' + check_call(['sgdisk', '--zap-all', block_device]) diff --git a/hooks/charmhelpers/core/__init__.py b/hooks/charmhelpers/core/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py new file mode 100644 index 0000000..2b06706 --- /dev/null +++ b/hooks/charmhelpers/core/hookenv.py @@ -0,0 +1,340 @@ +"Interactions with the Juju environment" +# Copyright 2013 Canonical Ltd. +# +# Authors: +# Charm Helpers Developers + +import os +import json +import yaml +import subprocess +import UserDict + +CRITICAL = "CRITICAL" +ERROR = "ERROR" +WARNING = "WARNING" +INFO = "INFO" +DEBUG = "DEBUG" +MARKER = object() + +cache = {} + + +def cached(func): + ''' Cache return values for multiple executions of func + args + + For example: + + @cached + def unit_get(attribute): + pass + + unit_get('test') + + will cache the result of unit_get + 'test' for future calls. + ''' + def wrapper(*args, **kwargs): + global cache + key = str((func, args, kwargs)) + try: + return cache[key] + except KeyError: + res = func(*args, **kwargs) + cache[key] = res + return res + return wrapper + + +def flush(key): + ''' Flushes any entries from function cache where the + key is found in the function+args ''' + flush_list = [] + for item in cache: + if key in item: + flush_list.append(item) + for item in flush_list: + del cache[item] + + +def log(message, level=None): + "Write a message to the juju log" + command = ['juju-log'] + if level: + command += ['-l', level] + command += [message] + subprocess.call(command) + + +class Serializable(UserDict.IterableUserDict): + "Wrapper, an object that can be serialized to yaml or json" + + def __init__(self, obj): + # wrap the object + UserDict.IterableUserDict.__init__(self) + self.data = obj + + def __getattr__(self, attr): + # See if this object has attribute. + if attr in ("json", "yaml", "data"): + return self.__dict__[attr] + # Check for attribute in wrapped object. + got = getattr(self.data, attr, MARKER) + if got is not MARKER: + return got + # Proxy to the wrapped object via dict interface. + try: + return self.data[attr] + except KeyError: + raise AttributeError(attr) + + def __getstate__(self): + # Pickle as a standard dictionary. + return self.data + + def __setstate__(self, state): + # Unpickle into our wrapper. + self.data = state + + def json(self): + "Serialize the object to json" + return json.dumps(self.data) + + def yaml(self): + "Serialize the object to yaml" + return yaml.dump(self.data) + + +def execution_environment(): + """A convenient bundling of the current execution context""" + context = {} + context['conf'] = config() + if relation_id(): + context['reltype'] = relation_type() + context['relid'] = relation_id() + context['rel'] = relation_get() + context['unit'] = local_unit() + context['rels'] = relations() + context['env'] = os.environ + return context + + +def in_relation_hook(): + "Determine whether we're running in a relation hook" + return 'JUJU_RELATION' in os.environ + + +def relation_type(): + "The scope for the current relation hook" + return os.environ.get('JUJU_RELATION', None) + + +def relation_id(): + "The relation ID for the current relation hook" + return os.environ.get('JUJU_RELATION_ID', None) + + +def local_unit(): + "Local unit ID" + return os.environ['JUJU_UNIT_NAME'] + + +def remote_unit(): + "The remote unit for the current relation hook" + return os.environ['JUJU_REMOTE_UNIT'] + + +def service_name(): + "The name service group this unit belongs to" + return local_unit().split('/')[0] + + +@cached +def config(scope=None): + "Juju charm configuration" + config_cmd_line = ['config-get'] + if scope is not None: + config_cmd_line.append(scope) + config_cmd_line.append('--format=json') + try: + return json.loads(subprocess.check_output(config_cmd_line)) + except ValueError: + return None + + +@cached +def relation_get(attribute=None, unit=None, rid=None): + _args = ['relation-get', '--format=json'] + if rid: + _args.append('-r') + _args.append(rid) + _args.append(attribute or '-') + if unit: + _args.append(unit) + try: + return json.loads(subprocess.check_output(_args)) + except ValueError: + return None + + +def relation_set(relation_id=None, relation_settings={}, **kwargs): + relation_cmd_line = ['relation-set'] + if relation_id is not None: + relation_cmd_line.extend(('-r', relation_id)) + for k, v in (relation_settings.items() + kwargs.items()): + if v is None: + relation_cmd_line.append('{}='.format(k)) + else: + relation_cmd_line.append('{}={}'.format(k, v)) + subprocess.check_call(relation_cmd_line) + # Flush cache of any relation-gets for local unit + flush(local_unit()) + + +@cached +def relation_ids(reltype=None): + "A list of relation_ids" + reltype = reltype or relation_type() + relid_cmd_line = ['relation-ids', '--format=json'] + if reltype is not None: + relid_cmd_line.append(reltype) + return json.loads(subprocess.check_output(relid_cmd_line)) or [] + return [] + + +@cached +def related_units(relid=None): + "A list of related units" + relid = relid or relation_id() + units_cmd_line = ['relation-list', '--format=json'] + if relid is not None: + units_cmd_line.extend(('-r', relid)) + return json.loads(subprocess.check_output(units_cmd_line)) or [] + + +@cached +def relation_for_unit(unit=None, rid=None): + "Get the json represenation of a unit's relation" + unit = unit or remote_unit() + relation = relation_get(unit=unit, rid=rid) + for key in relation: + if key.endswith('-list'): + relation[key] = relation[key].split() + relation['__unit__'] = unit + return relation + + +@cached +def relations_for_id(relid=None): + "Get relations of a specific relation ID" + relation_data = [] + relid = relid or relation_ids() + for unit in related_units(relid): + unit_data = relation_for_unit(unit, relid) + unit_data['__relid__'] = relid + relation_data.append(unit_data) + return relation_data + + +@cached +def relations_of_type(reltype=None): + "Get relations of a specific type" + relation_data = [] + reltype = reltype or relation_type() + for relid in relation_ids(reltype): + for relation in relations_for_id(relid): + relation['__relid__'] = relid + relation_data.append(relation) + return relation_data + + +@cached +def relation_types(): + "Get a list of relation types supported by this charm" + charmdir = os.environ.get('CHARM_DIR', '') + mdf = open(os.path.join(charmdir, 'metadata.yaml')) + md = yaml.safe_load(mdf) + rel_types = [] + for key in ('provides', 'requires', 'peers'): + section = md.get(key) + if section: + rel_types.extend(section.keys()) + mdf.close() + return rel_types + + +@cached +def relations(): + rels = {} + for reltype in relation_types(): + relids = {} + for relid in relation_ids(reltype): + units = {local_unit(): relation_get(unit=local_unit(), rid=relid)} + for unit in related_units(relid): + reldata = relation_get(unit=unit, rid=relid) + units[unit] = reldata + relids[relid] = units + rels[reltype] = relids + return rels + + +def open_port(port, protocol="TCP"): + "Open a service network port" + _args = ['open-port'] + _args.append('{}/{}'.format(port, protocol)) + subprocess.check_call(_args) + + +def close_port(port, protocol="TCP"): + "Close a service network port" + _args = ['close-port'] + _args.append('{}/{}'.format(port, protocol)) + subprocess.check_call(_args) + + +@cached +def unit_get(attribute): + _args = ['unit-get', '--format=json', attribute] + try: + return json.loads(subprocess.check_output(_args)) + except ValueError: + return None + + +def unit_private_ip(): + return unit_get('private-address') + + +class UnregisteredHookError(Exception): + pass + + +class Hooks(object): + def __init__(self): + super(Hooks, self).__init__() + self._hooks = {} + + def register(self, name, function): + self._hooks[name] = function + + def execute(self, args): + hook_name = os.path.basename(args[0]) + if hook_name in self._hooks: + self._hooks[hook_name]() + else: + raise UnregisteredHookError(hook_name) + + def hook(self, *hook_names): + def wrapper(decorated): + for hook_name in hook_names: + self.register(hook_name, decorated) + else: + self.register(decorated.__name__, decorated) + if '_' in decorated.__name__: + self.register( + decorated.__name__.replace('_', '-'), decorated) + return decorated + return wrapper + + +def charm_dir(): + return os.environ.get('CHARM_DIR') diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py new file mode 100644 index 0000000..6550b63 --- /dev/null +++ b/hooks/charmhelpers/core/host.py @@ -0,0 +1,272 @@ +"""Tools for working with the host system""" +# Copyright 2012 Canonical Ltd. +# +# Authors: +# Nick Moffitt +# Matthew Wedgwood + +import apt_pkg +import os +import pwd +import grp +import subprocess +import hashlib + +from collections import OrderedDict + +from hookenv import log, execution_environment + + +def service_start(service_name): + service('start', service_name) + + +def service_stop(service_name): + service('stop', service_name) + + +def service_restart(service_name): + service('restart', service_name) + + +def service_reload(service_name, restart_on_failure=False): + if not service('reload', service_name) and restart_on_failure: + service('restart', service_name) + + +def service(action, service_name): + cmd = ['service', service_name, action] + return subprocess.call(cmd) == 0 + + +def service_running(service): + try: + output = subprocess.check_output(['service', service, 'status']) + except subprocess.CalledProcessError: + return False + else: + if ("start/running" in output or "is running" in output): + return True + else: + return False + + +def adduser(username, password=None, shell='/bin/bash', system_user=False): + """Add a user""" + try: + user_info = pwd.getpwnam(username) + log('user {0} already exists!'.format(username)) + except KeyError: + log('creating user {0}'.format(username)) + cmd = ['useradd'] + if system_user or password is None: + cmd.append('--system') + else: + cmd.extend([ + '--create-home', + '--shell', shell, + '--password', password, + ]) + cmd.append(username) + subprocess.check_call(cmd) + user_info = pwd.getpwnam(username) + return user_info + + +def add_user_to_group(username, group): + """Add a user to a group""" + cmd = [ + 'gpasswd', '-a', + username, + group + ] + log("Adding user {} to group {}".format(username, group)) + subprocess.check_call(cmd) + + +def rsync(from_path, to_path, flags='-r', options=None): + """Replicate the contents of a path""" + context = execution_environment() + options = options or ['--delete', '--executability'] + cmd = ['/usr/bin/rsync', flags] + cmd.extend(options) + cmd.append(from_path.format(**context)) + cmd.append(to_path.format(**context)) + log(" ".join(cmd)) + return subprocess.check_output(cmd).strip() + + +def symlink(source, destination): + """Create a symbolic link""" + context = execution_environment() + log("Symlinking {} as {}".format(source, destination)) + cmd = [ + 'ln', + '-sf', + source.format(**context), + destination.format(**context) + ] + subprocess.check_call(cmd) + + +def mkdir(path, owner='root', group='root', perms=0555, force=False): + """Create a directory""" + context = execution_environment() + log("Making dir {} {}:{} {:o}".format(path, owner, group, + perms)) + uid = pwd.getpwnam(owner.format(**context)).pw_uid + gid = grp.getgrnam(group.format(**context)).gr_gid + realpath = os.path.abspath(path) + if os.path.exists(realpath): + if force and not os.path.isdir(realpath): + log("Removing non-directory file {} prior to mkdir()".format(path)) + os.unlink(realpath) + else: + os.makedirs(realpath, perms) + os.chown(realpath, uid, gid) + + +def write_file(path, content, owner='root', group='root', perms=0444): + """Create or overwrite a file with the contents of a string""" + log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + with open(path, 'w') as target: + os.fchown(target.fileno(), uid, gid) + os.fchmod(target.fileno(), perms) + target.write(content) + + +def filter_installed_packages(packages): + """Returns a list of packages that require installation""" + apt_pkg.init() + cache = apt_pkg.Cache() + _pkgs = [] + for package in packages: + try: + p = cache[package] + p.current_ver or _pkgs.append(package) + except KeyError: + log('Package {} has no installation candidate.'.format(package), + level='WARNING') + _pkgs.append(package) + return _pkgs + + +def apt_install(packages, options=None, fatal=False): + """Install one or more packages""" + options = options or [] + cmd = ['apt-get', '-y'] + cmd.extend(options) + cmd.append('install') + if isinstance(packages, basestring): + cmd.append(packages) + else: + cmd.extend(packages) + log("Installing {} with options: {}".format(packages, + options)) + if fatal: + subprocess.check_call(cmd) + else: + subprocess.call(cmd) + + +def apt_update(fatal=False): + """Update local apt cache""" + cmd = ['apt-get', 'update'] + if fatal: + subprocess.check_call(cmd) + else: + subprocess.call(cmd) + + +def mount(device, mountpoint, options=None, persist=False): + '''Mount a filesystem''' + cmd_args = ['mount'] + if options is not None: + cmd_args.extend(['-o', options]) + cmd_args.extend([device, mountpoint]) + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError, e: + log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) + return False + if persist: + # TODO: update fstab + pass + return True + + +def umount(mountpoint, persist=False): + '''Unmount a filesystem''' + cmd_args = ['umount', mountpoint] + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError, e: + log('Error unmounting {}\n{}'.format(mountpoint, e.output)) + return False + if persist: + # TODO: update fstab + pass + return True + + +def mounts(): + '''List of all mounted volumes as [[mountpoint,device],[...]]''' + with open('/proc/mounts') as f: + # [['/mount/point','/dev/path'],[...]] + system_mounts = [m[1::-1] for m in [l.strip().split() + for l in f.readlines()]] + return system_mounts + + +def file_hash(path): + ''' Generate a md5 hash of the contents of 'path' or None if not found ''' + if os.path.exists(path): + h = hashlib.md5() + with open(path, 'r') as source: + h.update(source.read()) # IGNORE:E1101 - it does have update + return h.hexdigest() + else: + return None + + +def restart_on_change(restart_map): + ''' Restart services based on configuration files changing + + This function is used a decorator, for example + + @restart_on_change({ + '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] + }) + def ceph_client_changed(): + ... + + In this example, the cinder-api and cinder-volume services + would be restarted if /etc/ceph/ceph.conf is changed by the + ceph_client_changed function. + ''' + def wrap(f): + def wrapped_f(*args): + checksums = {} + for path in restart_map: + checksums[path] = file_hash(path) + f(*args) + restarts = [] + for path in restart_map: + if checksums[path] != file_hash(path): + restarts += restart_map[path] + for service_name in list(OrderedDict.fromkeys(restarts)): + service('restart', service_name) + return wrapped_f + return wrap + + +def lsb_release(): + '''Return /etc/lsb-release in a dict''' + d = {} + with open('/etc/lsb-release', 'r') as lsb: + for l in lsb: + k, v = l.split('=') + d[k.strip()] = v.strip() + return d diff --git a/hooks/config-changed b/hooks/config-changed index 68e4122..2b11074 120000 --- a/hooks/config-changed +++ b/hooks/config-changed @@ -1 +1 @@ -swift-storage-node-relations \ No newline at end of file +swift_storage_relations.py \ No newline at end of file diff --git a/hooks/install b/hooks/install index 68e4122..2b11074 120000 --- a/hooks/install +++ b/hooks/install @@ -1 +1 @@ -swift-storage-node-relations \ No newline at end of file +swift_storage_relations.py \ No newline at end of file diff --git a/hooks/start b/hooks/start deleted file mode 120000 index 68e4122..0000000 --- a/hooks/start +++ /dev/null @@ -1 +0,0 @@ -swift-storage-node-relations \ No newline at end of file diff --git a/hooks/stop b/hooks/stop deleted file mode 120000 index 68e4122..0000000 --- a/hooks/stop +++ /dev/null @@ -1 +0,0 @@ -swift-storage-node-relations \ No newline at end of file diff --git a/hooks/swift-storage-node-common b/hooks/swift-storage-node-common deleted file mode 100755 index e995964..0000000 --- a/hooks/swift-storage-node-common +++ /dev/null @@ -1,153 +0,0 @@ -#!/bin/bash -set -ue - -CHARM="swift-storage" -HOOKS_DIR="$CHARM_DIR/hooks" - -CONF_DIR="/etc/swift" - -if [[ -e "$HOOKS_DIR/lib/openstack-common" ]] ; then - . $HOOKS_DIR/lib/openstack-common -else - juju-log "ERROR: Couldn't load $HOOKS_DIR/lib/openstack-common." && exit 1 -fi - -DEFAULT_ETH=$(ip route | grep default | awk '{ print $5 }') -IP=$(ifconfig $DEFAULT_ETH | grep 'inet addr' | awk '{ print $2 }' | cut -d: -f2) - -# TODO: Need to use different addresses for internal swift traffic -# as this the only security measure in place is network isolation -STORAGE_LOCAL_NET_IP=$IP - -PACKAGES="swift swift-account swift-container swift-object xfsprogs gdisk" - -OPENSTACK_ORIGIN="$(config-get openstack-origin)" - -src=$(get_os_codename_install_source "$OPENSTACK_ORIGIN") -# Install python-swiftclient for Folsom and beyond. -if dpkg --compare-versions $(get_os_version_codename "$src") gt \ - $(get_os_version_codename "essex") ; then - PACKAGES="$PACKAGES python-swiftclient" -fi - -function set_swift_hash { - # TODO: Do this with augeas and put in a utility function for use elsewhere - cat >/etc/swift/swift.conf </etc/rsyncd.conf </etc/swift/$1-server.conf <>/etc/swift/$1-server.conf -done -} - -function determine_block_devs { - juju-log "determine_block_devs: $@" - local devices=$(config-get block-device) - if [ "$devices" = "guess" ]; then - # This should be more smart - devices=$(awk '($4 ~ /^(sd[a-z]|vd[a-z]|cciss\/c[0-9]d[0-9])$/) && ($4 != "sda") && ($4 != "vda") && ($4 != "cciss/c0d0") {print $4}' >/etc/fstab - fi - mkdir -p /srv/node/$nodename - mount $dev /srv/node/$nodename - done - chown -R swift:swift /srv/node -} - -function do_openstack_upgrade { - # update openstack components to those provided by a new installation source - # it is assumed the calling hook has confirmed that the upgrade is sane. - local rel="$1" - shift - local packages=$@ - - # Backup the config directory. - local stamp=$(date +"%Y%m%d%M%S") - tar -pcf /var/lib/juju/$CHARM-backup-$stamp.tar $CONF_DIR - - configure_install_source "$rel" - apt-get update - DEBIAN_FRONTEND=noninteractive apt-get \ - --option Dpkg::Options::=--force-confnew -y \ - install --no-install-recommends $packages - swift-init all restart || true -} diff --git a/hooks/swift-storage-node-relations b/hooks/swift-storage-node-relations deleted file mode 100755 index e4b4433..0000000 --- a/hooks/swift-storage-node-relations +++ /dev/null @@ -1,103 +0,0 @@ -#!/bin/bash -# test -set -eu - -HOOKS_DIR="$CHARM_DIR/hooks" -ARG0=${0##*/} - -if [[ -e $HOOKS_DIR/swift-storage-node-common ]] ; then - . $HOOKS_DIR/swift-storage-node-common -else - echo "ERROR: Could not load swift-storage-node-common from $HOOKS_DIR" -fi - -function config_changed { - # perform openstack upgrade if openstack-origin has been bumped. - local install_src=$(config-get openstack-origin) - local cur=$(get_os_codename_package "python-swift") - local available=$(get_os_codename_install_source "$install_src") - if [[ "$available" != "unknown" ]] ; then - if dpkg --compare-versions $(get_os_version_codename "$cur") lt \ - $(get_os_version_codename "$available") ; then - juju-log "$CHARM: Upgrading OpenStack release: $cur -> $available." - do_openstack_upgrade "$install_src" $PACKAGES - fi - fi - declare -a env_vars=() - for i in account container object ; do - port=$(config-get ${i}-server-port) - local url="http://$STORAGE_LOCAL_NET_IP:$port/recon/diskusage" - # append to env_vars - env_vars+=("OPENSTACK_PORT_${i^^}=$port") - env_vars+=("OPENSTACK_SWIFT_SERVICE_${i^^}=${i}-server") - - # Ensure we have at least one device mounted as reported by swift-recon - env_vars+=('OPENSTACK_URL_'${i^^}'="'$url'|\"mounted\":+true"') - - create_server_conf $i "$port" - done - - # Save our scriptrc env variables for health checks - save_script_rc ${env_vars[@]} -} - -function install_hook { - apt-get -y --force-yes install python-software-properties || exit 1 - - configure_install_source "$OPENSTACK_ORIGIN" - apt-get update - - DEBIAN_FRONTEND=noninteractive apt-get -y \ - install --no-install-recommends $PACKAGES || exit 1 - - [[ ! -d /etc/swift ]] && mkdir /etc/swift - [[ ! -d /var/cache/swift ]] && mkdir /var/cache/swift - chown swift:swift /etc/swift - chown -R swift:swift /var/cache/swift - configure_rsyncd - swift-init all stop || true - setup_storage - config_changed -} - -function storage_joined { - local devs=$(determine_block_devs) - local reldevices="" - for dev in $devs ; do - local srvnodename=$(basename $dev) - if [[ -n "$reldevices" ]] ; then - reldevices="$reldevices:$srvnodename" - else - reldevices="$srvnodename" - fi - done - relation-set zone="$(config-get zone)" \ - device="$reldevices" \ - object_port="$(config-get object-server-port)" \ - container_port="$(config-get container-server-port)" \ - account_port="$(config-get account-server-port)" -} - -function storage_changed { - local rings_url=`relation-get rings_url` - local swift_hash=`relation-get swift_hash` - [[ -z $rings_url ]] || [[ -z $swift_hash ]] && exit 0 - - set_swift_hash $swift_hash - for i in account object container ; do - echo "Fetching $rings_url/$i.ring.gz" - wget "$rings_url/$i.ring.gz" -O /etc/swift/$i.ring.gz - done - set_swift_hash $swift_hash - chown swift -R /etc/swift - swift-init all start || true -} - -case $ARG0 in - "install") install_hook ;; - "start"|"stop") exit 0 ;; - "config-changed") config_changed ;; - "swift-storage-relation-joined") storage_joined ;; - "swift-storage-relation-changed") storage_changed ;; - "config-changed") config_changed ;; -esac diff --git a/hooks/swift-storage-relation-changed b/hooks/swift-storage-relation-changed index 68e4122..2b11074 120000 --- a/hooks/swift-storage-relation-changed +++ b/hooks/swift-storage-relation-changed @@ -1 +1 @@ -swift-storage-node-relations \ No newline at end of file +swift_storage_relations.py \ No newline at end of file diff --git a/hooks/swift-storage-relation-joined b/hooks/swift-storage-relation-joined index 68e4122..2b11074 120000 --- a/hooks/swift-storage-relation-joined +++ b/hooks/swift-storage-relation-joined @@ -1 +1 @@ -swift-storage-node-relations \ No newline at end of file +swift_storage_relations.py \ No newline at end of file diff --git a/hooks/swift_storage_relations.py b/hooks/swift_storage_relations.py new file mode 100755 index 0000000..3c5fcde --- /dev/null +++ b/hooks/swift_storage_relations.py @@ -0,0 +1,83 @@ +#!/usr/bin/python + +import os +import sys + +from swift_storage_utils import ( + PACKAGES, + determine_block_devices, + do_openstack_upgrade, + ensure_swift_directories, + fetch_swift_rings, + register_configs, + swift_init, # move to openstack utils + setup_storage, +) + +from charmhelpers.core.hookenv import ( + Hooks, + config, + log, + relation_get, + relation_set, +) + +from charmhelpers.core.host import ( + apt_install, + apt_update, +) + + +from charmhelpers.contrib.openstack.utils import ( + configure_installation_source, + openstack_upgrade_available, +) + +hooks = Hooks() +CONFIGS = register_configs() + + +@hooks.hook() +def install(): + conf = config() + src = conf['openstack-origin'] + configure_installation_source(src) + apt_update() + apt_install(PACKAGES) + CONFIGS.write('/etc/rsyncd.conf') + swift_init('all', 'stop') + setup_storage() + ensure_swift_directories() + + +@hooks.hook() +def config_changed(): + if openstack_upgrade_available('swift'): + do_openstack_upgrade(configs=CONFIGS) + CONFIGS.write_all() + # TODO: save landscape scriptrc + + +@hooks.hook() +def swift_storage_relation_joined(): + devs = [os.path.basename(dev) for dev in determine_block_devices()] + rel_settings = { + 'zone': config('zone'), + 'object_port': config('object-server-port'), + 'container_port': config('container-server-port'), + 'account_port': config('account-server-port'), + 'device': ':'.join(devs), + } + relation_set(**rel_settings) + + +@hooks.hook() +def swift_storage_relation_changed(): + rings_url = relation_get('rings_url') + swift_hash = relation_get('swift_hash') + if None in [rings_url, swift_hash]: + log('swift_storage_relation_changed: Peer not ready?') + sys.exit(0) + CONFIGS.write('/etc/swift/swift.conf') + fetch_swift_rings(rings_url) + swift_init('all', 'start') diff --git a/hooks/swift_storage_utils.py b/hooks/swift_storage_utils.py new file mode 100644 index 0000000..e3a09a3 --- /dev/null +++ b/hooks/swift_storage_utils.py @@ -0,0 +1,102 @@ +import os + +from subprocess import check_call, call + +from charmhelpers.core.host import ( + mkdir, + mount, + umount as ensure_block_device, + umount as clean_storage, +) + +from charmhelpers.core.hookenv import ( + config, + log, + ERROR, +) + +PACKAGES = [ + 'swift', 'swift-account', 'swift-container', + 'swift-object' 'xfsprogs' 'gdisk' +] + + +def ensure_swift_directories(): + ''' + Ensure all directories required for a swift storage node exist with + correct permissions. + ''' + dirs = [ + '/etc/swift', + '/var/cache/swift', + '/srv/node', + ] + [mkdir(d, owner='swift', group='swift') for d in dirs + if not os.path.isdir(d)] + + +def register_configs(): + return None + + +def swift_init(target, action, fatal=False): + ''' + Call swift-init on a specific target with given action, potentially + raising exception. + ''' + cmd = ['swift-init', target, action] + if fatal: + return check_call(cmd) + return call(cmd) + + +def do_openstack_upgrade(configs): + pass + + +def find_block_devices(): + pass + + +def determine_block_devices(): + block_device = config('block-device') + + if not block_device or block_device in ['None', 'none']: + log('No storage devices specified in config as block-device', + level=ERROR) + return None + + if block_device == 'guess': + bdevs = find_block_devices() + else: + bdevs = block_device.split(' ') + + return [ensure_block_device(bd) for bd in bdevs] + + +def mkfs_xfs(bdev): + cmd = ['mkfs.xfs', '-f', '-i', 'size=1024', bdev] + check_call(cmd) + + +def setup_storage(): + for dev in determine_block_devices(): + if config('overwrite') in ['True', 'true']: + clean_storage(dev) + # if not cleaned and in use, mkfs should fail. + mkfs_xfs(dev) + _dev = os.path.basename(dev) + _mp = os.path.join('/srv', 'node', _dev) + mkdir(_mp, owner='swift', group='swift') + mount(dev, '/srv/node/%s' % _dev, persist=True) + # TODO: chown again post-mount? + + +def fetch_swift_rings(rings_url): + log('swift-storage-node: Fetching all swift rings from proxy @ %s.' % + rings_url) + for server in ['account', 'object', 'container']: + url = '%s/%s.ring.gz' % (rings_url, server) + log('Fetching %s.' % url) + cmd = ['wget', url, '-O', '/etc/swift/%s.ring.gz' % server] + check_call(cmd) From 85501cbb641a6c377faf2f7ab62658afd4b12985 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Thu, 18 Jul 2013 16:07:22 -0700 Subject: [PATCH 02/20] Finish up: find_block_device(), save_script_rc(). Add restart map. --- hooks/charmhelpers/contrib/openstack/utils.py | 22 ++++ .../contrib/storage/linux/loopback.py | 7 +- hooks/swift_storage_relations.py | 28 ++--- hooks/swift_storage_utils.py | 102 ++++++++++++++++-- revision | 2 +- 5 files changed, 139 insertions(+), 22 deletions(-) diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index ffe82e3..cff765d 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -7,11 +7,13 @@ from collections import OrderedDict import apt_pkg as apt import subprocess import os +import socket import sys from charmhelpers.core.hookenv import ( config, log as juju_log, + unit_get, ) from charmhelpers.core.host import ( @@ -269,3 +271,23 @@ def openstack_upgrade_available(package): available_vers = get_os_version_install_source(src) apt.init() return apt.version_compare(available_vers, cur_vers) == 1 + + +def get_host_ip(hostname=None): + hostname = hostname or unit_get('private-address') + try: + import dns.resolver + except ImportError: + apt_install('python-dnspython') + import dns.resolver + + try: + # Test to see if already an IPv4 address + socket.inet_aton(hostname) + return hostname + except socket.error: + # This may throw an NXDOMAIN exception; in which case + # things are badly broken so just let it kill the hook + answers = dns.resolver.query(hostname, 'A') + if answers: + return answers[0].address diff --git a/hooks/charmhelpers/contrib/storage/linux/loopback.py b/hooks/charmhelpers/contrib/storage/linux/loopback.py index 9fb87a2..38957ef 100644 --- a/hooks/charmhelpers/contrib/storage/linux/loopback.py +++ b/hooks/charmhelpers/contrib/storage/linux/loopback.py @@ -35,8 +35,11 @@ def create_loopback(file_path): :returns: str: Full path to new loopback device (eg, /dev/loop0) ''' - cmd = ['losetup', '--find', file_path] - return check_output(cmd).strip() + file_path = os.path.abspath(file_path) + check_call(['losetup', '--find', file_path]) + for d, f in loopback_devices().iteritems(): + if f == file_path: + return d def ensure_loopback_device(path, size): diff --git a/hooks/swift_storage_relations.py b/hooks/swift_storage_relations.py index 3c5fcde..860b34b 100755 --- a/hooks/swift_storage_relations.py +++ b/hooks/swift_storage_relations.py @@ -5,12 +5,13 @@ import sys from swift_storage_utils import ( PACKAGES, + RESTART_MAP, determine_block_devices, do_openstack_upgrade, ensure_swift_directories, fetch_swift_rings, register_configs, - swift_init, # move to openstack utils + save_script_rc, setup_storage, ) @@ -25,6 +26,7 @@ from charmhelpers.core.hookenv import ( from charmhelpers.core.host import ( apt_install, apt_update, + restart_on_change, ) @@ -37,25 +39,24 @@ hooks = Hooks() CONFIGS = register_configs() -@hooks.hook() +@hooks.hook('install') +@restart_on_change(RESTART_MAP) def install(): - conf = config() - src = conf['openstack-origin'] - configure_installation_source(src) + configure_installation_source(config('openstack-origin')) apt_update() - apt_install(PACKAGES) + apt_install(PACKAGES, fatal=True) CONFIGS.write('/etc/rsyncd.conf') - swift_init('all', 'stop') setup_storage() ensure_swift_directories() -@hooks.hook() +@hooks.hook('config-changed') +@restart_on_change(RESTART_MAP) def config_changed(): if openstack_upgrade_available('swift'): do_openstack_upgrade(configs=CONFIGS) CONFIGS.write_all() - # TODO: save landscape scriptrc + save_script_rc() @hooks.hook() @@ -71,13 +72,16 @@ def swift_storage_relation_joined(): relation_set(**rel_settings) -@hooks.hook() +@hooks.hook('swift-storage-relation-changed') +@restart_on_change(RESTART_MAP) def swift_storage_relation_changed(): rings_url = relation_get('rings_url') swift_hash = relation_get('swift_hash') - if None in [rings_url, swift_hash]: + if '' in [rings_url, swift_hash] or None in [rings_url, swift_hash]: log('swift_storage_relation_changed: Peer not ready?') sys.exit(0) CONFIGS.write('/etc/swift/swift.conf') fetch_swift_rings(rings_url) - swift_init('all', 'start') + +if '/usr/bin/nosetests' not in sys.argv: + hooks.execute(sys.argv) diff --git a/hooks/swift_storage_utils.py b/hooks/swift_storage_utils.py index e3a09a3..94be901 100644 --- a/hooks/swift_storage_utils.py +++ b/hooks/swift_storage_utils.py @@ -1,12 +1,24 @@ +import re import os from subprocess import check_call, call +# Stuff copied from cinder py charm, needs to go somewhere +# common. +from misc_utils import ( + ensure_block_device, + clean_storage, +) + +from swift_storage_context import ( + SwiftStorageContext, + SwiftStorageServerContext, + RsyncContext, +) + from charmhelpers.core.host import ( mkdir, mount, - umount as ensure_block_device, - umount as clean_storage, ) from charmhelpers.core.hookenv import ( @@ -15,11 +27,50 @@ from charmhelpers.core.hookenv import ( ERROR, ) +from charmhelpers.contrib.storage.linux.utils import ( + is_block_device, +) + +from charmhelpers.contrib.openstack.utils import ( + get_host_ip, + get_os_codename_package, + save_script_rc as _save_script_rc, +) + +from charmhelpers.contrib.openstack import ( + templating, +) + PACKAGES = [ - 'swift', 'swift-account', 'swift-container', - 'swift-object' 'xfsprogs' 'gdisk' + 'swift', 'swift-account', 'swift-container', 'swift-object', + 'xfsprogs', 'gdisk', 'lvm2', 'python-jinja2', ] +TEMPLATES = 'templates/' + +ACCOUNT_SVCS = [ + 'swift-account', 'swift-account-auditor', + 'swift-account-reaper', 'swift-account-replicator' +] + +CONTAINER_SVCS = [ + 'swift-container', 'swift-container-auditor', + 'swift-container-updater', 'swift-container-replicator' +] + +OBJECT_SVCS = [ + 'swift-object', 'swift-object-auditor', + 'swift-object-updater', 'swift-object-replicator' +] + +RESTART_MAP = { + '/etc/rsyncd.conf': ['rsync'], + '/etc/swift/account-server.conf': ACCOUNT_SVCS, + '/etc/swift/container-server.conf': CONTAINER_SVCS, + '/etc/swift/object-server.conf': OBJECT_SVCS, + '/etc/swift/swift.conf': ACCOUNT_SVCS + CONTAINER_SVCS + OBJECT_SVCS +} + def ensure_swift_directories(): ''' @@ -36,7 +87,17 @@ def ensure_swift_directories(): def register_configs(): - return None + release = get_os_codename_package('python-swift', fatal=False) or 'essex' + configs = templating.OSConfigRenderer(templates_dir=TEMPLATES, + openstack_release=release) + configs.register('/etc/swift/swift.conf', + [SwiftStorageContext()]) + configs.register('/etc/rsyncd.conf', + [RsyncContext()]) + for server in ['account', 'object', 'container']: + configs.register('/etc/swift/%s-server.conf' % server, + [SwiftStorageServerContext()]), + return configs def swift_init(target, action, fatal=False): @@ -55,7 +116,18 @@ def do_openstack_upgrade(configs): def find_block_devices(): - pass + found = [] + incl = ['sd[a-z]', 'vd[a-z]', 'cciss\/c[0-9]d[0-9]'] + blacklist = ['sda', 'vda', 'cciss/c0d0'] + + with open('/proc/partitions') as proc: + partitions = [p.split() for p in proc.readlines()[2:]] + for partition in [p[3] for p in partitions if p]: + for inc in incl: + _re = re.compile(r'^(%s)$' % inc) + if _re.match(partition) and partition not in blacklist: + found.append(os.path.join('/dev', partition)) + return [f for f in found if is_block_device(f)] def determine_block_devices(): @@ -89,7 +161,8 @@ def setup_storage(): _mp = os.path.join('/srv', 'node', _dev) mkdir(_mp, owner='swift', group='swift') mount(dev, '/srv/node/%s' % _dev, persist=True) - # TODO: chown again post-mount? + check_call(['chown', '-R', 'swift:swift', '/srv/node/']) + check_call(['chmod', '-R', '0750', '/srv/node/']) def fetch_swift_rings(rings_url): @@ -100,3 +173,18 @@ def fetch_swift_rings(rings_url): log('Fetching %s.' % url) cmd = ['wget', url, '-O', '/etc/swift/%s.ring.gz' % server] check_call(cmd) + + +def save_script_rc(): + env_vars = {} + ip = get_host_ip() + for server in ['account', 'container', 'object']: + port = config('%s-server-port' % server) + url = 'http://%s:%s/recon/diskusage|"mounted":true' % (ip, port) + svc = server.upper() + env_vars.update({ + 'OPENSTACK_PORT_%s' % svc: port, + 'OPENSTACK_SWIFT_SERVICE_%s' % svc: '%s-server' % server, + 'OPENSTACK_URL_%s' % svc: url, + }) + _save_script_rc(**env_vars) diff --git a/revision b/revision index 04f9fe4..84df352 100644 --- a/revision +++ b/revision @@ -1 +1 @@ -59 +87 From b99f2cf010c240a273bbf365510883dffbaaba1a Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Fri, 19 Jul 2013 12:52:45 -0700 Subject: [PATCH 03/20] Fix up port handling, checkin Makefile+setup.cfg+.coveragerc, rename tests/ -> unit_tests/ --- charm-helpers.yaml | 10 ++ hooks/misc_utils.py | 84 +++++++++ hooks/swift_storage_context.py | 72 ++++++++ hooks/swift_storage_relations.py | 4 +- revision | 2 +- templates/account-server.conf | 21 +++ templates/container-server.conf | 23 +++ templates/object-server.conf | 23 +++ templates/rsyncd.conf | 23 +++ templates/swift.conf | 5 + unit_tests/__init__.py | 0 unit_tests/test_swift_storage_relations.py | 102 +++++++++++ unit_tests/test_swift_storage_utils.py | 187 +++++++++++++++++++++ unit_tests/test_utils.py | 99 +++++++++++ 14 files changed, 651 insertions(+), 4 deletions(-) create mode 100644 charm-helpers.yaml create mode 100644 hooks/misc_utils.py create mode 100644 hooks/swift_storage_context.py create mode 100644 templates/account-server.conf create mode 100644 templates/container-server.conf create mode 100644 templates/object-server.conf create mode 100644 templates/rsyncd.conf create mode 100644 templates/swift.conf create mode 100644 unit_tests/__init__.py create mode 100644 unit_tests/test_swift_storage_relations.py create mode 100644 unit_tests/test_swift_storage_utils.py create mode 100644 unit_tests/test_utils.py diff --git a/charm-helpers.yaml b/charm-helpers.yaml new file mode 100644 index 0000000..7d9aa62 --- /dev/null +++ b/charm-helpers.yaml @@ -0,0 +1,10 @@ +branch: lp:charm-helpers +destination: hooks/charmhelpers +include: + - core + - contrib.openstack|inc=* + - contrib.storage + - contrib.hahelpers: + - apache + - ceph + - cluster diff --git a/hooks/misc_utils.py b/hooks/misc_utils.py new file mode 100644 index 0000000..2b2e39c --- /dev/null +++ b/hooks/misc_utils.py @@ -0,0 +1,84 @@ +from charmhelpers.contrib.storage.linux.utils import ( + is_block_device, + zap_disk, +) + +from charmhelpers.contrib.storage.linux.loopback import ( + ensure_loopback_device, +) + +from charmhelpers.contrib.storage.linux.lvm import ( + deactivate_lvm_volume_group, + is_lvm_physical_volume, + remove_lvm_physical_volume, +) + +from charmhelpers.core.host import ( + mounts, + umount, +) + +from charmhelpers.core.hookenv import ( + log, + INFO, + ERROR, +) + +DEFAULT_LOOPBACK_SIZE = '5G' + + +def ensure_block_device(block_device): + ''' + Confirm block_device, create as loopback if necessary. + + :param block_device: str: Full path of block device to ensure. + + :returns: str: Full path of ensured block device. + ''' + _none = ['None', 'none', None] + if (block_device in _none): + log('prepare_storage(): Missing required input: ' + 'block_device=%s.' % block_device, level=ERROR) + raise + + if block_device.startswith('/dev/'): + bdev = block_device + elif block_device.startswith('/'): + _bd = block_device.split('|') + if len(_bd) == 2: + bdev, size = _bd + else: + bdev = block_device + size = DEFAULT_LOOPBACK_SIZE + bdev = ensure_loopback_device(bdev, size) + else: + bdev = '/dev/%s' % block_device + + if not is_block_device(bdev): + log('Failed to locate valid block device at %s' % bdev, level=ERROR) + raise + + return bdev + + +def clean_storage(block_device): + ''' + Ensures a block device is clean. That is: + - unmounted + - any lvm volume groups are deactivated + - any lvm physical device signatures removed + - partition table wiped + + :param block_device: str: Full path to block device to clean. + ''' + for mp, d in mounts(): + if d == block_device: + log('clean_storage(): Found %s mounted @ %s, unmounting.' % + (d, mp), level=INFO) + umount(mp, persist=True) + + if is_lvm_physical_volume(block_device): + deactivate_lvm_volume_group(block_device) + remove_lvm_physical_volume(block_device) + else: + zap_disk(block_device) diff --git a/hooks/swift_storage_context.py b/hooks/swift_storage_context.py new file mode 100644 index 0000000..92e0bfa --- /dev/null +++ b/hooks/swift_storage_context.py @@ -0,0 +1,72 @@ +import re + +from charmhelpers.core.hookenv import ( + config, + log, + related_units, + relation_get, + relation_ids, +) + +from charmhelpers.contrib.openstack.utils import ( + get_host_ip, +) + +from charmhelpers.contrib.openstack.context import ( + OSContextGenerator, +) + + +class SwiftStorageContext(OSContextGenerator): + interfaces = ['swift-storage'] + + def __call__(self): + rids = relation_ids('swift-storage') + if not rids: + return {} + + swift_hash = None + for rid in rids: + for unit in related_units(rid): + if not swift_hash: + swift_hash = relation_get('swift_hash', rid=rid, + unit=unit) + if not swift_hash: + log('No swift_hash passed via swift-storage relation. ' + 'Peer not ready?') + return {} + return {'swift_hash': swift_hash} + + +class RsyncContext(OSContextGenerator): + interfaces = [] + + def enable_rsyncd(self): + default = open('/etc/default/rsync').read() + _m = re.compile('^RSYNC_ENABLE=(.*)$', re.MULTILINE) + if not re.search(_m, default): + with open('/etc/default/rsync', 'a+') as out: + out.write('RSYNC_ENABLE=true\n') + else: + with open('/etc/default/rsync', 'w') as out: + out.write(_m.sub('RSYNC_ENABLE=true', default)) + + def __call__(self): + local_ip = get_host_ip() + self.enable_rsyncd() + return { + 'local_ip': local_ip + } + + +class SwiftStorageServerContext(OSContextGenerator): + interfaces = [] + + def __call__(self): + ctxt = { + 'local_ip': get_host_ip(), + 'account_server_port': config('account-server-port'), + 'container_server_port': config('container-server-port'), + 'object_server_port': config('object-server-port'), + } + return ctxt diff --git a/hooks/swift_storage_relations.py b/hooks/swift_storage_relations.py index 860b34b..af52a1d 100755 --- a/hooks/swift_storage_relations.py +++ b/hooks/swift_storage_relations.py @@ -39,13 +39,11 @@ hooks = Hooks() CONFIGS = register_configs() -@hooks.hook('install') -@restart_on_change(RESTART_MAP) +@hooks.hook() def install(): configure_installation_source(config('openstack-origin')) apt_update() apt_install(PACKAGES, fatal=True) - CONFIGS.write('/etc/rsyncd.conf') setup_storage() ensure_swift_directories() diff --git a/revision b/revision index 84df352..d22307c 100644 --- a/revision +++ b/revision @@ -1 +1 @@ -87 +88 diff --git a/templates/account-server.conf b/templates/account-server.conf new file mode 100644 index 0000000..98fd13a --- /dev/null +++ b/templates/account-server.conf @@ -0,0 +1,21 @@ +[DEFAULT] +bind_ip = {{ local_ip }} +bind_port = {{ account_server_port }} +workers = 2 + +[pipeline:main] +pipeline = recon account-server + +[filter:recon] +use = egg:swift#recon +recon_cache_path = /var/cache/swift + +[app:account-server] +use = egg:swift#account + +[account-replicator] + +[account-auditor] + +[account-reaper] + diff --git a/templates/container-server.conf b/templates/container-server.conf new file mode 100644 index 0000000..76da700 --- /dev/null +++ b/templates/container-server.conf @@ -0,0 +1,23 @@ +[DEFAULT] +bind_ip = {{ local_ip }} +bind_port = {{ container_server_port }} +workers = 2 + +[pipeline:main] +pipeline = recon container-server + +[filter:recon] +use = egg:swift#recon +recon_cache_path = /var/cache/swift + +[app:container-server] +use = egg:swift#container + +[container-replicator] + +[container-updater] + +[container-auditor] + +[container-sync] + diff --git a/templates/object-server.conf b/templates/object-server.conf new file mode 100644 index 0000000..eac69d8 --- /dev/null +++ b/templates/object-server.conf @@ -0,0 +1,23 @@ +[DEFAULT] +bind_ip = {{ local_ip }} +bind_port = {{ object_server_port }} +workers = 2 + +[pipeline:main] +pipeline = recon object-server + +[filter:recon] +use = egg:swift#recon +recon_cache_path = /var/cache/swift + +[app:object-server] +use = egg:swift#object + +[object-replicator] + +[object-updater] + +[object-auditor] + +[object-sync] + diff --git a/templates/rsyncd.conf b/templates/rsyncd.conf new file mode 100644 index 0000000..6bb6aac --- /dev/null +++ b/templates/rsyncd.conf @@ -0,0 +1,23 @@ +uid = swift +gid = swift +log file = /var/log/rsyncd.log +pid file = /var/run/rsyncd.pid +address = {{ local_ip }} + +[account] +max connections = 2 +path = /srv/node/ +read only = false +lock file = /var/lock/account.lock + +[container] +max connections = 2 +path = /srv/node/ +read only = false +lock file = /var/lock/container.lock + +[object] +max connections = 2 +path = /srv/node/ +read only = false +lock file = /var/lock/object.lock diff --git a/templates/swift.conf b/templates/swift.conf new file mode 100644 index 0000000..58d943e --- /dev/null +++ b/templates/swift.conf @@ -0,0 +1,5 @@ +{% if swift_hash %} +[swift-hash] +# random unique string that can never change (DO NOT LOSE) +swift_hash_path_suffix = {{ swift_hash }} +{% endif %} diff --git a/unit_tests/__init__.py b/unit_tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/unit_tests/test_swift_storage_relations.py b/unit_tests/test_swift_storage_relations.py new file mode 100644 index 0000000..698a0fb --- /dev/null +++ b/unit_tests/test_swift_storage_relations.py @@ -0,0 +1,102 @@ +from mock import patch, MagicMock + +from unit_tests.test_utils import CharmTestCase + +import hooks.swift_storage_utils as utils + +_reg = utils.register_configs +utils.register_configs = MagicMock() + +import hooks.swift_storage_relations as relations + +utils.register_configs = _reg + +from hooks.swift_storage_utils import PACKAGES + +TO_PATCH = [ + 'CONFIGS', + # charmhelpers.core.hookenv + 'Hooks', + 'config', + 'log', + 'relation_set', + 'relation_get', + # charmhelpers.core.host + 'apt_update', + 'apt_install', + # charmehelpers.contrib.openstack.utils + 'configure_installation_source', + 'openstack_upgrade_available', + # swift_storage_utils + 'determine_block_devices', + 'do_openstack_upgrade', + 'ensure_swift_directories', + 'fetch_swift_rings', + 'save_script_rc', + 'setup_storage', + 'register_configs', +] + + +class SwiftStorageRelationsTests(CharmTestCase): + def setUp(self): + super(SwiftStorageRelationsTests, self).setUp(relations, + TO_PATCH) + self.config.side_effect = self.test_config.get + self.relation_get.side_effect = self.test_relation.get + + def test_install_hook(self): + self.test_config.set('openstack-origin', 'cloud:precise-havana') + relations.install() + self.configure_installation_source.assert_called_with( + 'cloud:precise-havana', + ) + self.apt_update.assert_called() + self.apt_install.assert_called_with(PACKAGES, fatal=True) + + self.setup_storage.assert_called() + + def test_config_changed_no_upgrade_available(self): + self.openstack_upgrade_available.return_value = False + relations.config_changed() + self.assertFalse(self.do_openstack_upgrade.called) + self.assertTrue(self.CONFIGS.write_all.called) + + def test_config_changed_upgrade_available(self): + self.openstack_upgrade_available.return_value = True + relations.config_changed() + self.assertTrue(self.do_openstack_upgrade.called) + self.assertTrue(self.CONFIGS.write_all.called) + + def test_storage_joined_single_device(self): + self.determine_block_devices.return_value = ['/dev/vdb'] + relations.swift_storage_relation_joined() + self.relation_set.assert_called_with( + device='vdb', object_port=6000, account_port=6002, + zone=1, container_port=6001 + ) + + def test_storage_joined_multi_device(self): + self.determine_block_devices.return_value = ['/dev/vdb', '/dev/vdc', + '/dev/vdd'] + relations.swift_storage_relation_joined() + self.relation_set.assert_called_with( + device='vdb:vdc:vdd', object_port=6000, account_port=6002, + zone=1, container_port=6001 + ) + + @patch('sys.exit') + def test_storage_changed_missing_relation_data(self, exit): + relations.swift_storage_relation_changed() + exit.assert_called_with(0) + + def test_storage_changed_with_relation_data(self): + self.test_relation.set({ + 'swift_hash': 'foo_hash', + 'rings_url': 'http://swift-proxy.com/rings/', + }) + relations.swift_storage_relation_changed() + self.CONFIGS.write.assert_called_with('/etc/swift/swift.conf') + self.fetch_swift_rings.assert_called_with( + 'http://swift-proxy.com/rings/' + ) diff --git a/unit_tests/test_swift_storage_utils.py b/unit_tests/test_swift_storage_utils.py new file mode 100644 index 0000000..2dfe3e6 --- /dev/null +++ b/unit_tests/test_swift_storage_utils.py @@ -0,0 +1,187 @@ +from mock import call, patch, MagicMock +from contextlib import contextmanager +from unit_tests.test_utils import CharmTestCase + + +import hooks.swift_storage_utils as swift_utils + + +TO_PATCH = [ + 'log', + 'config', + 'mkdir', + 'mount', + 'check_call', + 'call', + 'ensure_block_device', + 'clean_storage', + 'is_block_device', + 'get_os_codename_package', + 'get_host_ip', + '_save_script_rc', +] + + +PROC_PARTITIONS = """ +major minor #blocks name + + 8 0 732574584 sda + 8 1 102400 sda1 + 8 2 307097600 sda2 + 8 3 1 sda3 + 8 5 146483200 sda5 + 8 6 4881408 sda6 + 8 7 274004992 sda7 + 8 16 175825944 sdb + 9 0 732574584 vda + 10 0 732574584 vdb + 10 0 732574584 vdb1 + 104 0 1003393784 cciss/c0d0 + 105 0 1003393784 cciss/c1d0 + 105 1 86123689 cciss/c1d0p1 + 252 0 20971520 dm-0 + 252 1 15728640 dm-1 +""" + +SCRIPT_RC_ENV = { + 'OPENSTACK_PORT_ACCOUNT': 6002, + 'OPENSTACK_PORT_CONTAINER': 6001, + 'OPENSTACK_PORT_OBJECT': 6000, + 'OPENSTACK_SWIFT_SERVICE_ACCOUNT': 'account-server', + 'OPENSTACK_SWIFT_SERVICE_CONTAINER': 'container-server', + 'OPENSTACK_SWIFT_SERVICE_OBJECT': 'object-server', + 'OPENSTACK_URL_ACCOUNT': + 'http://10.0.0.1:6002/recon/diskusage|"mounted":true', + 'OPENSTACK_URL_CONTAINER': + 'http://10.0.0.1:6001/recon/diskusage|"mounted":true', + 'OPENSTACK_URL_OBJECT': + 'http://10.0.0.1:6000/recon/diskusage|"mounted":true' +} + + +@contextmanager +def patch_open(): + '''Patch open() to allow mocking both open() itself and the file that is + yielded. + + Yields the mock for "open" and "file", respectively.''' + mock_open = MagicMock(spec=open) + mock_file = MagicMock(spec=file) + + @contextmanager + def stub_open(*args, **kwargs): + mock_open(*args, **kwargs) + yield mock_file + + with patch('__builtin__.open', stub_open): + yield mock_open, mock_file + + +class SwiftStorageUtilsTests(CharmTestCase): + def setUp(self): + super(SwiftStorageUtilsTests, self).setUp(swift_utils, TO_PATCH) + self.config.side_effect = self.test_config.get + + def test_ensure_swift_directories(self): + with patch('os.path.isdir') as isdir: + isdir.return_value = False + swift_utils.ensure_swift_directories() + ex_dirs = [ + call('/etc/swift', owner='swift', group='swift'), + call('/var/cache/swift', owner='swift', group='swift'), + call('/srv/node', owner='swift', group='swift') + ] + self.assertEquals(ex_dirs, self.mkdir.call_args_list) + + def test_swift_init_nonfatal(self): + swift_utils.swift_init('all', 'start') + self.call.assert_called_with(['swift-init', 'all', 'start']) + + def test_swift_init_fatal(self): + swift_utils.swift_init('all', 'start', fatal=True) + self.check_call.assert_called_with(['swift-init', 'all', 'start']) + + def test_fetch_swift_rings(self): + url = 'http://someproxynode/rings' + swift_utils.fetch_swift_rings(url) + wgets = [] + for s in ['account', 'object', 'container']: + _c = call(['wget', '%s/%s.ring.gz' % (url, s), + '-O', '/etc/swift/%s.ring.gz' % s]) + wgets.append(_c) + self.assertEquals(wgets, self.check_call.call_args_list) + + def test_determine_block_device_no_config(self): + self.test_config.set('block-device', None) + self.assertEquals(swift_utils.determine_block_devices(), None) + + def _fake_ensure(self, bdev): + return bdev.split('|').pop(0) + + @patch.object(swift_utils, 'ensure_block_device') + def test_determine_block_device_single_dev(self, _ensure): + _ensure.side_effect = self._fake_ensure + self.test_config.set('block-device', '/dev/vdb') + result = swift_utils.determine_block_devices() + self.assertEquals(['/dev/vdb'], result) + + @patch.object(swift_utils, 'ensure_block_device') + def test_determine_block_device_multi_dev(self, _ensure): + _ensure.side_effect = self._fake_ensure + bdevs = '/dev/vdb /dev/vdc /tmp/swift.img|1G' + self.test_config.set('block-device', bdevs) + result = swift_utils.determine_block_devices() + ex = ['/dev/vdb', '/dev/vdc', '/tmp/swift.img'] + self.assertEquals(ex, result) + + @patch.object(swift_utils, 'find_block_devices') + @patch.object(swift_utils, 'ensure_block_device') + def test_determine_block_device_guess_dev(self, _ensure, _find): + _ensure.side_effect = self._fake_ensure + self.test_config.set('block-device', 'guess') + _find.return_value = ['/dev/vdb', '/dev/sdb'] + result = swift_utils.determine_block_devices() + self.assertTrue(_find.called) + self.assertEquals(result, ['/dev/vdb', '/dev/sdb']) + + def test_mkfs_xfs(self): + swift_utils.mkfs_xfs('/dev/sdb') + self.check_call.assert_called_with( + ['mkfs.xfs', '-f', '-i', 'size=1024', '/dev/sdb'] + ) + + @patch.object(swift_utils, 'clean_storage') + @patch.object(swift_utils, 'mkfs_xfs') + @patch.object(swift_utils, 'determine_block_devices') + def test_setup_storage_no_overwrite(self, determine, mkfs, clean): + determine.return_value = ['/dev/vdb'] + self.test_config.set('overwrite', 'false') + swift_utils.setup_storage() + self.assertFalse(clean.called) + + @patch.object(swift_utils, 'clean_storage') + @patch.object(swift_utils, 'mkfs_xfs') + @patch.object(swift_utils, 'determine_block_devices') + def test_setup_storage_overwrite(self, determine, mkfs, clean): + determine.return_value = ['/dev/vdb'] + self.test_config.set('overwrite', 'True') + swift_utils.setup_storage() + clean.assert_called_with('/dev/vdb') + self.mkdir.assert_called_with('/srv/node/vdb', owner='swift', + group='swift') + self.mount.assert_called('/dev/vdb', '/srv/node/vdb', persist=True) + + def test_find_block_devices(self): + self.is_block_device.return_value = True + with patch_open() as (_open, _file): + _file.read.return_value = PROC_PARTITIONS + _file.readlines = MagicMock() + _file.readlines.return_value = PROC_PARTITIONS.split('\n') + result = swift_utils.find_block_devices() + ex = ['/dev/sdb', '/dev/vdb', '/dev/cciss/c1d0'] + self.assertEquals(ex, result) + + def test_save_script_rc(self): + self.get_host_ip.return_value = '10.0.0.1' + swift_utils.save_script_rc() + self._save_script_rc.assert_called_with(**SCRIPT_RC_ENV) diff --git a/unit_tests/test_utils.py b/unit_tests/test_utils.py new file mode 100644 index 0000000..e40f83c --- /dev/null +++ b/unit_tests/test_utils.py @@ -0,0 +1,99 @@ +import logging +import unittest +import os +import yaml + +from mock import patch + + +def load_config(): + ''' + Walk backwords from __file__ looking for config.yaml, load and return the + 'options' section' + ''' + config = None + f = __file__ + while config is None: + d = os.path.dirname(f) + if os.path.isfile(os.path.join(d, 'config.yaml')): + config = os.path.join(d, 'config.yaml') + break + f = d + + if not config: + logging.error('Could not find config.yaml in any parent directory ' + 'of %s. ' % file) + raise Exception + + return yaml.safe_load(open(config).read())['options'] + + +def get_default_config(): + ''' + Load default charm config from config.yaml return as a dict. + If no default is set in config.yaml, its value is None. + ''' + default_config = {} + config = load_config() + for k, v in config.iteritems(): + if 'default' in v: + default_config[k] = v['default'] + else: + default_config[k] = None + return default_config + + +class CharmTestCase(unittest.TestCase): + def setUp(self, obj, patches): + super(CharmTestCase, self).setUp() + self.patches = patches + self.obj = obj + self.test_config = TestConfig() + self.test_relation = TestRelation() + self.patch_all() + + def patch(self, method): + _m = patch.object(self.obj, method) + mock = _m.start() + self.addCleanup(_m.stop) + return mock + + def patch_all(self): + for method in self.patches: + setattr(self, method, self.patch(method)) + + +class TestConfig(object): + def __init__(self): + self.config = get_default_config() + + def get(self, attr=None): + if not attr: + return self.get_all() + try: + return self.config[attr] + except KeyError: + return None + + def get_all(self): + return self.config + + def set(self, attr, value): + if attr not in self.config: + raise KeyError + self.config[attr] = value + + +class TestRelation(object): + def __init__(self, relation_data={}): + self.relation_data = relation_data + + def set(self, relation_data): + self.relation_data = relation_data + + def get(self, attr=None, unit=None, rid=None): + if attr is None: + return self.relation_data + elif attr in self.relation_data: + return self.relation_data[attr] + return None From a9fb83be8a8509e24c9f1e03145c36f331714532 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Fri, 19 Jul 2013 13:44:37 -0700 Subject: [PATCH 04/20] Update to 100% coverage. --- .coveragerc | 7 +++ Makefile | 14 +++++ hooks/swift_storage_context.py | 3 +- hooks/swift_storage_relations.py | 14 ++++- hooks/swift_storage_utils.py | 1 + setup.cfg | 6 ++ unit_tests/__init__.py | 2 + unit_tests/test_swift_storage_context.py | 73 ++++++++++++++++++++++ unit_tests/test_swift_storage_relations.py | 13 ++++ unit_tests/test_swift_storage_utils.py | 53 ++++++++++------ unit_tests/test_utils.py | 21 ++++++- 11 files changed, 182 insertions(+), 25 deletions(-) create mode 100644 .coveragerc create mode 100644 Makefile create mode 100644 setup.cfg create mode 100644 unit_tests/test_swift_storage_context.py diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 0000000..37585cd --- /dev/null +++ b/.coveragerc @@ -0,0 +1,7 @@ +[report] +# Regexes for lines to exclude from consideration +exclude_lines = + if __name__ == .__main__.: +include= + hooks/swift_storage_* + diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..77c481b --- /dev/null +++ b/Makefile @@ -0,0 +1,14 @@ +#!/usr/bin/make +PYTHON := /usr/bin/env python + +lint: + @flake8 --exclude hooks/charmhelpers hooks + @flake8 --exclude hooks/charmhelpers unit_tests + @charm proof + +test: + @echo Starting tests... + @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests + +sync: + @charm-helper-sync -c charm-helpers-sync.yaml diff --git a/hooks/swift_storage_context.py b/hooks/swift_storage_context.py index 92e0bfa..c6d5889 100644 --- a/hooks/swift_storage_context.py +++ b/hooks/swift_storage_context.py @@ -42,7 +42,8 @@ class RsyncContext(OSContextGenerator): interfaces = [] def enable_rsyncd(self): - default = open('/etc/default/rsync').read() + with open('/etc/default/rsync') as _in: + default = _in.read() _m = re.compile('^RSYNC_ENABLE=(.*)$', re.MULTILINE) if not re.search(_m, default): with open('/etc/default/rsync', 'a+') as out: diff --git a/hooks/swift_storage_relations.py b/hooks/swift_storage_relations.py index af52a1d..64f89c1 100755 --- a/hooks/swift_storage_relations.py +++ b/hooks/swift_storage_relations.py @@ -16,7 +16,7 @@ from swift_storage_utils import ( ) from charmhelpers.core.hookenv import ( - Hooks, + Hooks, UnregisteredHookError, config, log, relation_get, @@ -81,5 +81,13 @@ def swift_storage_relation_changed(): CONFIGS.write('/etc/swift/swift.conf') fetch_swift_rings(rings_url) -if '/usr/bin/nosetests' not in sys.argv: - hooks.execute(sys.argv) + +def main(): + try: + hooks.execute(sys.argv) + except UnregisteredHookError as e: + log('Unknown hook {} - skipping.'.format(e)) + + +if __name__ == '__main__': + main() diff --git a/hooks/swift_storage_utils.py b/hooks/swift_storage_utils.py index 94be901..0af7b14 100644 --- a/hooks/swift_storage_utils.py +++ b/hooks/swift_storage_utils.py @@ -121,6 +121,7 @@ def find_block_devices(): blacklist = ['sda', 'vda', 'cciss/c0d0'] with open('/proc/partitions') as proc: + print proc partitions = [p.split() for p in proc.readlines()[2:]] for partition in [p[3] for p in partitions if p]: for inc in incl: diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000..e5cf628 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,6 @@ +[nosesetests] +verbosity=2 +with-coverage=1 +cover-erase=1 +cover-package=hooks + diff --git a/unit_tests/__init__.py b/unit_tests/__init__.py index e69de29..f80aab3 100644 --- a/unit_tests/__init__.py +++ b/unit_tests/__init__.py @@ -0,0 +1,2 @@ +import sys +sys.path.append('hooks') diff --git a/unit_tests/test_swift_storage_context.py b/unit_tests/test_swift_storage_context.py new file mode 100644 index 0000000..5b5710a --- /dev/null +++ b/unit_tests/test_swift_storage_context.py @@ -0,0 +1,73 @@ +from mock import MagicMock, patch + +from unit_tests.test_utils import CharmTestCase, patch_open + + +import hooks.swift_storage_context as swift_context + + +TO_PATCH = [ + 'config', + 'log', + 'related_units', + 'relation_get', + 'relation_ids', + 'get_host_ip', +] + + +class SwiftStorageContextTests(CharmTestCase): + def setUp(self): + super(SwiftStorageContextTests, self).setUp(swift_context, TO_PATCH) + self.config.side_effect = self.test_config.get + + def test_swift_storage_context_missing_data(self): + self.relation_ids.return_value = [] + ctxt = swift_context.SwiftStorageContext() + self.assertEquals(ctxt(), {}) + self.relation_ids.return_value = ['swift-proxy:0'] + self.related_units.return_value = ['swift-proxy/0'] + self.relation_get.return_value = '' + self.assertEquals(ctxt(), {}) + + def test_swift_storage_context_with_data(self): + self.relation_ids.return_value = [] + ctxt = swift_context.SwiftStorageContext() + self.assertEquals(ctxt(), {}) + self.relation_ids.return_value = ['swift-proxy:0'] + self.related_units.return_value = ['swift-proxy/0'] + self.relation_get.return_value = 'fooooo' + self.assertEquals(ctxt(), {'swift_hash': 'fooooo'}) + + def test_rsync_context(self): + self.get_host_ip.return_value = '10.0.0.5' + ctxt = swift_context.RsyncContext() + ctxt.enable_rsyncd = MagicMock() + ctxt.enable_rsyncd.return_value = True + self.assertEquals({'local_ip': '10.0.0.5'}, ctxt()) + self.assertTrue(ctxt.enable_rsyncd.called) + + def test_rsync_enale_rsync(self): + with patch_open() as (_open, _file): + ctxt = swift_context.RsyncContext() + _file.read.return_value = 'RSYNC_ENABLE=false' + ctxt.enable_rsyncd() + _file.write.assert_called_with('RSYNC_ENABLE=true') + _file.read.return_value = '#foo' + ctxt.enable_rsyncd() + _file.write.assert_called_with('RSYNC_ENABLE=true\n') + + def test_swift_storage_server_context(self): + self.get_host_ip.return_value = '10.0.0.5' + self.test_config.set('account-server-port', '500') + self.test_config.set('object-server-port', '501') + self.test_config.set('container-server-port', '502') + ctxt = swift_context.SwiftStorageServerContext() + result = ctxt() + ex = { + 'container_server_port': '502', + 'object_server_port': '501', + 'account_server_port': '500', + 'local_ip': '10.0.0.5' + } + self.assertEquals(ex, result) diff --git a/unit_tests/test_swift_storage_relations.py b/unit_tests/test_swift_storage_relations.py index 698a0fb..9deeb3e 100644 --- a/unit_tests/test_swift_storage_relations.py +++ b/unit_tests/test_swift_storage_relations.py @@ -100,3 +100,16 @@ class SwiftStorageRelationsTests(CharmTestCase): self.fetch_swift_rings.assert_called_with( 'http://swift-proxy.com/rings/' ) + + @patch('sys.argv') + @patch.object(relations, 'install') + def test_main_hook_exists(self, _install, _argv): + _argv = ['hooks/install'] + relations.main() + _install.assert_called() + + @patch('sys.argv') + def test_main_hook_missing(self, _argv): + _argv = ['hooks/start'] + relations.main() + self.log.assert_called() diff --git a/unit_tests/test_swift_storage_utils.py b/unit_tests/test_swift_storage_utils.py index 2dfe3e6..4f8de29 100644 --- a/unit_tests/test_swift_storage_utils.py +++ b/unit_tests/test_swift_storage_utils.py @@ -1,6 +1,5 @@ from mock import call, patch, MagicMock -from contextlib import contextmanager -from unit_tests.test_utils import CharmTestCase +from unit_tests.test_utils import CharmTestCase, patch_open import hooks.swift_storage_utils as swift_utils @@ -59,24 +58,6 @@ SCRIPT_RC_ENV = { } -@contextmanager -def patch_open(): - '''Patch open() to allow mocking both open() itself and the file that is - yielded. - - Yields the mock for "open" and "file", respectively.''' - mock_open = MagicMock(spec=open) - mock_file = MagicMock(spec=file) - - @contextmanager - def stub_open(*args, **kwargs): - mock_open(*args, **kwargs) - yield mock_file - - with patch('__builtin__.open', stub_open): - yield mock_open, mock_file - - class SwiftStorageUtilsTests(CharmTestCase): def setUp(self): super(SwiftStorageUtilsTests, self).setUp(swift_utils, TO_PATCH) @@ -185,3 +166,35 @@ class SwiftStorageUtilsTests(CharmTestCase): self.get_host_ip.return_value = '10.0.0.1' swift_utils.save_script_rc() self._save_script_rc.assert_called_with(**SCRIPT_RC_ENV) + + @patch('hooks.charmhelpers.contrib.openstack.templating.OSConfigRenderer') + def test_register_configs_pre_install(self, renderer): + self.get_os_codename_package.return_value = None + swift_utils.register_configs() + renderer.assert_called_with(templates_dir=swift_utils.TEMPLATES, + openstack_release='essex') + + @patch.object(swift_utils, 'SwiftStorageContext') + @patch.object(swift_utils, 'RsyncContext') + @patch.object(swift_utils, 'SwiftStorageServerContext') + @patch('hooks.charmhelpers.contrib.openstack.templating.OSConfigRenderer') + def test_register_configs_post_install(self, renderer, + swift, rsync, server): + swift.return_value = 'swift_context' + rsync.return_value = 'rsync_context' + server.return_value = 'swift_server_context' + self.get_os_codename_package.return_value = 'grizzly' + configs = MagicMock() + configs.register = MagicMock() + renderer.return_value = configs + swift_utils.register_configs() + renderer.assert_called_with(templates_dir=swift_utils.TEMPLATES, + openstack_release='grizzly') + ex = [ + call('/etc/swift/swift.conf', ['swift_server_context']), + call('/etc/rsyncd.conf', ['rsync_context']), + call('/etc/swift/account-server.conf', ['swift_context']), + call('/etc/swift/object-server.conf', ['swift_context']), + call('/etc/swift/container-server.conf', ['swift_context']) + ] + self.assertEquals(ex, configs.register.call_args_list) diff --git a/unit_tests/test_utils.py b/unit_tests/test_utils.py index e40f83c..a21d761 100644 --- a/unit_tests/test_utils.py +++ b/unit_tests/test_utils.py @@ -3,7 +3,8 @@ import unittest import os import yaml -from mock import patch +from contextlib import contextmanager +from mock import MagicMock, patch def load_config(): @@ -97,3 +98,21 @@ class TestRelation(object): elif attr in self.relation_data: return self.relation_data[attr] return None + + +@contextmanager +def patch_open(): + '''Patch open() to allow mocking both open() itself and the file that is + yielded. + + Yields the mock for "open" and "file", respectively.''' + mock_open = MagicMock(spec=open) + mock_file = MagicMock(spec=file) + + @contextmanager + def stub_open(*args, **kwargs): + mock_open(*args, **kwargs) + yield mock_file + + with patch('__builtin__.open', stub_open): + yield mock_open, mock_file From 5740434491cb26f5143cc8b36d6a6b4a2d1f950f Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Fri, 19 Jul 2013 14:06:01 -0700 Subject: [PATCH 05/20] Add do_openstack_upgrade(). --- hooks/charmhelpers/contrib/openstack/utils.py | 1 + hooks/swift_storage_utils.py | 21 ++++++++++++++- unit_tests/test_swift_storage_utils.py | 27 +++++++++++++++++++ 3 files changed, 48 insertions(+), 1 deletion(-) diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index cff765d..570fea1 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -62,6 +62,7 @@ def error_out(msg): def get_os_codename_install_source(src): '''Derive OpenStack release codename from a given installation source.''' + import ipdb; ipdb.set_trace() ############################## Breakpoint ############################## ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] rel = '' if src == 'distro': diff --git a/hooks/swift_storage_utils.py b/hooks/swift_storage_utils.py index 0af7b14..84bbe21 100644 --- a/hooks/swift_storage_utils.py +++ b/hooks/swift_storage_utils.py @@ -17,8 +17,11 @@ from swift_storage_context import ( ) from charmhelpers.core.host import ( + apt_install, + apt_update, mkdir, mount, + service_restart, ) from charmhelpers.core.hookenv import ( @@ -32,7 +35,9 @@ from charmhelpers.contrib.storage.linux.utils import ( ) from charmhelpers.contrib.openstack.utils import ( + configure_installation_source, get_host_ip, + get_os_codename_install_source, get_os_codename_package, save_script_rc as _save_script_rc, ) @@ -112,7 +117,21 @@ def swift_init(target, action, fatal=False): def do_openstack_upgrade(configs): - pass + new_src = config('openstack-origin') + new_os_rel = get_os_codename_install_source(new_src) + + log('Performing OpenStack upgrade to %s.' % (new_os_rel)) + configure_installation_source(new_src) + dpkg_opts = [ + '--option', 'Dpkg::Options::=--force-confnew', + '--option', 'Dpkg::Options::=--force-confdef', + ] + apt_update() + apt_install(packages=PACKAGES, options=dpkg_opts, fatal=True) + configs.set_release(openstack_release=new_os_rel) + configs.write_all() + [service_restart(svc) for svc in + (ACCOUNT_SVCS + CONTAINER_SVCS + OBJECT_SVCS)] def find_block_devices(): diff --git a/unit_tests/test_swift_storage_utils.py b/unit_tests/test_swift_storage_utils.py index 4f8de29..ffc8483 100644 --- a/unit_tests/test_swift_storage_utils.py +++ b/unit_tests/test_swift_storage_utils.py @@ -6,8 +6,11 @@ import hooks.swift_storage_utils as swift_utils TO_PATCH = [ + 'apt_update', + 'apt_install', 'log', 'config', + 'configure_installation_source', 'mkdir', 'mount', 'check_call', @@ -16,7 +19,9 @@ TO_PATCH = [ 'clean_storage', 'is_block_device', 'get_os_codename_package', + 'get_os_codename_install_source', 'get_host_ip', + 'service_restart', '_save_script_rc', ] @@ -198,3 +203,25 @@ class SwiftStorageUtilsTests(CharmTestCase): call('/etc/swift/container-server.conf', ['swift_context']) ] self.assertEquals(ex, configs.register.call_args_list) + + def test_do_upgrade(self): + self.test_config.set('openstack-origin', 'cloud:precise-grizzly') + self.get_os_codename_install_source.return_value = 'grizzly' + swift_utils.do_openstack_upgrade(MagicMock()) + self.configure_installation_source.assert_called_with( + 'cloud:precise-grizzly' + ) + dpkg_opts = [ + '--option', 'Dpkg::Options::=--force-confnew', + '--option', 'Dpkg::Options::=--force-confdef', + ] + self.assertTrue(self.apt_update.called) + self.apt_install.assert_called_with( + packages=swift_utils.PACKAGES, + options=dpkg_opts, + fatal=True + ) + services = (swift_utils.ACCOUNT_SVCS + swift_utils.CONTAINER_SVCS + + swift_utils.OBJECT_SVCS) + for service in services: + self.assertIn(call(service), self.service_restart.call_args_list) From e5f219d4a6949229df0886997a04147c2cf87771 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Fri, 19 Jul 2013 14:13:59 -0700 Subject: [PATCH 06/20] Remove debug. --- hooks/charmhelpers/contrib/openstack/utils.py | 1 - revision | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index 570fea1..cff765d 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -62,7 +62,6 @@ def error_out(msg): def get_os_codename_install_source(src): '''Derive OpenStack release codename from a given installation source.''' - import ipdb; ipdb.set_trace() ############################## Breakpoint ############################## ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] rel = '' if src == 'distro': diff --git a/revision b/revision index d22307c..d61f00d 100644 --- a/revision +++ b/revision @@ -1 +1 @@ -88 +90 From a6b66ff8f097594f9b34b24abfc64dea124eda54 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Fri, 19 Jul 2013 14:21:28 -0700 Subject: [PATCH 07/20] Rename _relations.py -> _hooks.py --- ...ge_relations.py => swift_storage_hooks.py} | 0 unit_tests/test_swift_storage_relations.py | 24 +++++++++---------- 2 files changed, 12 insertions(+), 12 deletions(-) rename hooks/{swift_storage_relations.py => swift_storage_hooks.py} (100%) diff --git a/hooks/swift_storage_relations.py b/hooks/swift_storage_hooks.py similarity index 100% rename from hooks/swift_storage_relations.py rename to hooks/swift_storage_hooks.py diff --git a/unit_tests/test_swift_storage_relations.py b/unit_tests/test_swift_storage_relations.py index 9deeb3e..69b8a57 100644 --- a/unit_tests/test_swift_storage_relations.py +++ b/unit_tests/test_swift_storage_relations.py @@ -7,7 +7,7 @@ import hooks.swift_storage_utils as utils _reg = utils.register_configs utils.register_configs = MagicMock() -import hooks.swift_storage_relations as relations +import hooks.swift_storage_hooks as hooks utils.register_configs = _reg @@ -40,14 +40,14 @@ TO_PATCH = [ class SwiftStorageRelationsTests(CharmTestCase): def setUp(self): - super(SwiftStorageRelationsTests, self).setUp(relations, + super(SwiftStorageRelationsTests, self).setUp(hooks, TO_PATCH) self.config.side_effect = self.test_config.get self.relation_get.side_effect = self.test_relation.get def test_install_hook(self): self.test_config.set('openstack-origin', 'cloud:precise-havana') - relations.install() + hooks.install() self.configure_installation_source.assert_called_with( 'cloud:precise-havana', ) @@ -58,19 +58,19 @@ class SwiftStorageRelationsTests(CharmTestCase): def test_config_changed_no_upgrade_available(self): self.openstack_upgrade_available.return_value = False - relations.config_changed() + hooks.config_changed() self.assertFalse(self.do_openstack_upgrade.called) self.assertTrue(self.CONFIGS.write_all.called) def test_config_changed_upgrade_available(self): self.openstack_upgrade_available.return_value = True - relations.config_changed() + hooks.config_changed() self.assertTrue(self.do_openstack_upgrade.called) self.assertTrue(self.CONFIGS.write_all.called) def test_storage_joined_single_device(self): self.determine_block_devices.return_value = ['/dev/vdb'] - relations.swift_storage_relation_joined() + hooks.swift_storage_relation_joined() self.relation_set.assert_called_with( device='vdb', object_port=6000, account_port=6002, zone=1, container_port=6001 @@ -79,7 +79,7 @@ class SwiftStorageRelationsTests(CharmTestCase): def test_storage_joined_multi_device(self): self.determine_block_devices.return_value = ['/dev/vdb', '/dev/vdc', '/dev/vdd'] - relations.swift_storage_relation_joined() + hooks.swift_storage_relation_joined() self.relation_set.assert_called_with( device='vdb:vdc:vdd', object_port=6000, account_port=6002, zone=1, container_port=6001 @@ -87,7 +87,7 @@ class SwiftStorageRelationsTests(CharmTestCase): @patch('sys.exit') def test_storage_changed_missing_relation_data(self, exit): - relations.swift_storage_relation_changed() + hooks.swift_storage_relation_changed() exit.assert_called_with(0) def test_storage_changed_with_relation_data(self): @@ -95,21 +95,21 @@ class SwiftStorageRelationsTests(CharmTestCase): 'swift_hash': 'foo_hash', 'rings_url': 'http://swift-proxy.com/rings/', }) - relations.swift_storage_relation_changed() + hooks.swift_storage_relation_changed() self.CONFIGS.write.assert_called_with('/etc/swift/swift.conf') self.fetch_swift_rings.assert_called_with( 'http://swift-proxy.com/rings/' ) @patch('sys.argv') - @patch.object(relations, 'install') + @patch.object(hooks, 'install') def test_main_hook_exists(self, _install, _argv): _argv = ['hooks/install'] - relations.main() + hooks.main() _install.assert_called() @patch('sys.argv') def test_main_hook_missing(self, _argv): _argv = ['hooks/start'] - relations.main() + hooks.main() self.log.assert_called() From a8b9dbee1909ec4c63c01f7f2d8bf19339b8daca Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Fri, 19 Jul 2013 14:26:50 -0700 Subject: [PATCH 08/20] Update symlinks. --- hooks/config-changed | 2 +- hooks/install | 2 +- hooks/swift-storage-relation-changed | 2 +- hooks/swift-storage-relation-joined | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/hooks/config-changed b/hooks/config-changed index 2b11074..c5c04a7 120000 --- a/hooks/config-changed +++ b/hooks/config-changed @@ -1 +1 @@ -swift_storage_relations.py \ No newline at end of file +swift_storage_hooks.py \ No newline at end of file diff --git a/hooks/install b/hooks/install index 2b11074..c5c04a7 120000 --- a/hooks/install +++ b/hooks/install @@ -1 +1 @@ -swift_storage_relations.py \ No newline at end of file +swift_storage_hooks.py \ No newline at end of file diff --git a/hooks/swift-storage-relation-changed b/hooks/swift-storage-relation-changed index 2b11074..c5c04a7 120000 --- a/hooks/swift-storage-relation-changed +++ b/hooks/swift-storage-relation-changed @@ -1 +1 @@ -swift_storage_relations.py \ No newline at end of file +swift_storage_hooks.py \ No newline at end of file diff --git a/hooks/swift-storage-relation-joined b/hooks/swift-storage-relation-joined index 2b11074..c5c04a7 120000 --- a/hooks/swift-storage-relation-joined +++ b/hooks/swift-storage-relation-joined @@ -1 +1 @@ -swift_storage_relations.py \ No newline at end of file +swift_storage_hooks.py \ No newline at end of file From 047a89309f40f5aa166d3f84a6acda5c7e262225 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Fri, 19 Jul 2013 14:33:41 -0700 Subject: [PATCH 09/20] Clean charm lint. --- unit_tests/test_swift_storage_context.py | 2 +- unit_tests/test_swift_storage_relations.py | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/unit_tests/test_swift_storage_context.py b/unit_tests/test_swift_storage_context.py index 5b5710a..c222361 100644 --- a/unit_tests/test_swift_storage_context.py +++ b/unit_tests/test_swift_storage_context.py @@ -1,4 +1,4 @@ -from mock import MagicMock, patch +from mock import MagicMock from unit_tests.test_utils import CharmTestCase, patch_open diff --git a/unit_tests/test_swift_storage_relations.py b/unit_tests/test_swift_storage_relations.py index 69b8a57..c208c29 100644 --- a/unit_tests/test_swift_storage_relations.py +++ b/unit_tests/test_swift_storage_relations.py @@ -104,12 +104,10 @@ class SwiftStorageRelationsTests(CharmTestCase): @patch('sys.argv') @patch.object(hooks, 'install') def test_main_hook_exists(self, _install, _argv): - _argv = ['hooks/install'] hooks.main() _install.assert_called() @patch('sys.argv') def test_main_hook_missing(self, _argv): - _argv = ['hooks/start'] hooks.main() self.log.assert_called() From 9c546ad8764a1e669bdea0fd9dfc8c7725d9eae4 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Thu, 25 Jul 2013 18:02:52 -0700 Subject: [PATCH 10/20] Typo. --- unit_tests/test_swift_storage_context.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unit_tests/test_swift_storage_context.py b/unit_tests/test_swift_storage_context.py index 5b5710a..3e1fe59 100644 --- a/unit_tests/test_swift_storage_context.py +++ b/unit_tests/test_swift_storage_context.py @@ -47,7 +47,7 @@ class SwiftStorageContextTests(CharmTestCase): self.assertEquals({'local_ip': '10.0.0.5'}, ctxt()) self.assertTrue(ctxt.enable_rsyncd.called) - def test_rsync_enale_rsync(self): + def test_rsync_enable_rsync(self): with patch_open() as (_open, _file): ctxt = swift_context.RsyncContext() _file.read.return_value = 'RSYNC_ENABLE=false' From 8a10fcef6953ebc08d02c65c35e6a0783d32d28e Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Fri, 16 Aug 2013 13:34:18 -0700 Subject: [PATCH 11/20] Sync helpers. --- .../charmhelpers/contrib/hahelpers/cluster.py | 9 +- .../charmhelpers/contrib/openstack/context.py | 157 ++++++++++++++++-- .../contrib/openstack/templates/ceph.conf | 4 +- .../contrib/openstack/templates/haproxy.cfg | 6 +- .../templates/openstack_https_frontend | 8 +- .../contrib/openstack/templating.py | 27 ++- hooks/charmhelpers/contrib/openstack/utils.py | 49 +++--- hooks/charmhelpers/core/host.py | 31 ++-- 8 files changed, 229 insertions(+), 62 deletions(-) diff --git a/hooks/charmhelpers/contrib/hahelpers/cluster.py b/hooks/charmhelpers/contrib/hahelpers/cluster.py index dde6c9b..074855f 100644 --- a/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -19,6 +19,7 @@ from charmhelpers.core.hookenv import ( config as config_get, INFO, ERROR, + unit_get, ) @@ -96,12 +97,14 @@ def https(): return True for r_id in relation_ids('identity-service'): for unit in relation_list(r_id): - if None not in [ + rel_state = [ relation_get('https_keystone', rid=r_id, unit=unit), relation_get('ssl_cert', rid=r_id, unit=unit), relation_get('ssl_key', rid=r_id, unit=unit), relation_get('ca_cert', rid=r_id, unit=unit), - ]: + ] + # NOTE: works around (LP: #1203241) + if (None not in rel_state) and ('' not in rel_state): return True return False @@ -176,5 +179,5 @@ def canonical_url(configs, vip_setting='vip'): if is_clustered(): addr = config_get(vip_setting) else: - addr = get_unit_hostname() + addr = unit_get('private-address') return '%s://%s' % (scheme, addr) diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index f146e0b..2228c9b 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -6,6 +6,12 @@ from subprocess import ( check_call ) + +from charmhelpers.core.host import ( + apt_install, + filter_installed_packages, +) + from charmhelpers.core.hookenv import ( config, local_unit, @@ -14,6 +20,8 @@ from charmhelpers.core.hookenv import ( relation_ids, related_units, unit_get, + unit_private_ip, + WARNING, ) from charmhelpers.contrib.hahelpers.cluster import ( @@ -29,6 +37,10 @@ from charmhelpers.contrib.hahelpers.apache import ( get_ca_cert, ) +from charmhelpers.contrib.openstack.neutron import ( + neutron_plugin_attribute, +) + CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' @@ -57,26 +69,39 @@ class OSContextGenerator(object): class SharedDBContext(OSContextGenerator): interfaces = ['shared-db'] + def __init__(self, database=None, user=None, relation_prefix=None): + ''' + Allows inspecting relation for settings prefixed with relation_prefix. + This is useful for parsing access for multiple databases returned via + the shared-db interface (eg, nova_password, quantum_password) + ''' + self.relation_prefix = relation_prefix + self.database = database + self.user = user + def __call__(self): - log('Generating template context for shared-db') - conf = config() - try: - database = conf['database'] - username = conf['database-user'] - except KeyError as e: + self.database = self.database or config('database') + self.user = self.user or config('database-user') + if None in [self.database, self.user]: log('Could not generate shared_db context. ' - 'Missing required charm config options: %s.' % e) + 'Missing required charm config options. ' + '(database name and user)') raise OSContextError ctxt = {} + + password_setting = 'password' + if self.relation_prefix: + password_setting = self.relation_prefix + '_password' + for rid in relation_ids('shared-db'): for unit in related_units(rid): + passwd = relation_get(password_setting, rid=rid, unit=unit) ctxt = { 'database_host': relation_get('db_host', rid=rid, unit=unit), - 'database': database, - 'database_user': username, - 'database_password': relation_get('password', rid=rid, - unit=unit) + 'database': self.database, + 'database_user': self.user, + 'database_password': passwd, } if not context_complete(ctxt): return {} @@ -206,6 +231,29 @@ class HAProxyContext(OSContextGenerator): return {} +class ImageServiceContext(OSContextGenerator): + interfaces = ['image-service'] + + def __call__(self): + ''' + Obtains the glance API server from the image-service relation. Useful + in nova and cinder (currently). + ''' + log('Generating template context for image-service.') + rids = relation_ids('image-service') + if not rids: + return {} + for rid in rids: + for unit in related_units(rid): + api_server = relation_get('glance-api-server', + rid=rid, unit=unit) + if api_server: + return {'glance_api_servers': api_server} + log('ImageService context is incomplete. ' + 'Missing required relation data.') + return {} + + class ApacheSSLContext(OSContextGenerator): """ Generates a context for an apache vhost configuration that configures @@ -269,3 +317,90 @@ class ApacheSSLContext(OSContextGenerator): portmap = (int(ext_port), int(int_port)) ctxt['endpoints'].append(portmap) return ctxt + + +class NeutronContext(object): + interfaces = [] + + @property + def plugin(self): + return None + + @property + def network_manager(self): + return None + + @property + def packages(self): + return neutron_plugin_attribute( + self.plugin, 'packages', self.network_manager) + + @property + def neutron_security_groups(self): + return None + + def _ensure_packages(self): + '''Install but do not upgrade required plugin packages''' + required = filter_installed_packages(self.packages) + if required: + apt_install(required, fatal=True) + + def _save_flag_file(self): + if self.network_manager == 'quantum': + _file = '/etc/nova/quantum_plugin.conf' + else: + _file = '/etc/nova/neutron_plugin.conf' + with open(_file, 'wb') as out: + out.write(self.plugin + '\n') + + def ovs_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + + ovs_ctxt = { + 'core_plugin': driver, + 'neutron_plugin': 'ovs', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + } + + return ovs_ctxt + + def __call__(self): + self._ensure_packages() + + if self.network_manager not in ['quantum', 'neutron']: + return {} + + if not self.plugin: + return {} + + ctxt = {'network_manager': self.network_manager} + + if self.plugin == 'ovs': + ctxt.update(self.ovs_ctxt()) + + self._save_flag_file() + return ctxt + + +class OSConfigFlagContext(OSContextGenerator): + ''' + Responsible adding user-defined config-flags in charm config to a + to a template context. + ''' + def __call__(self): + config_flags = config('config-flags') + if not config_flags or config_flags in ['None', '']: + return {} + config_flags = config_flags.split(',') + flags = {} + for flag in config_flags: + if '=' not in flag: + log('Improperly formatted config-flag, expected k=v ' + 'got %s' % flag, level=WARNING) + continue + k, v = flag.split('=') + flags[k.strip()] = v + ctxt = {'user_config_flags': flags} + return ctxt diff --git a/hooks/charmhelpers/contrib/openstack/templates/ceph.conf b/hooks/charmhelpers/contrib/openstack/templates/ceph.conf index 1d8ca3b..49d07c8 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/ceph.conf +++ b/hooks/charmhelpers/contrib/openstack/templates/ceph.conf @@ -3,9 +3,9 @@ # cinder configuration file maintained by Juju # local changes may be overwritten. ############################################################################### -{% if auth %} +{% if auth -%} [global] auth_supported = {{ auth }} keyring = /etc/ceph/$cluster.$name.keyring mon host = {{ mon_hosts }} -{% endif %} +{% endif -%} diff --git a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg index b184cd4..a1694e4 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg +++ b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg @@ -25,7 +25,7 @@ listen stats :8888 stats uri / stats auth admin:password -{% if units %} +{% if units -%} {% for service, ports in service_ports.iteritems() -%} listen {{ service }} 0.0.0.0:{{ ports[0] }} balance roundrobin @@ -33,5 +33,5 @@ listen {{ service }} 0.0.0.0:{{ ports[0] }} {% for unit, address in units.iteritems() -%} server {{ unit }} {{ address }}:{{ ports[1] }} check {% endfor %} -{% endfor %} -{% endif %} +{% endfor -%} +{% endif -%} diff --git a/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend b/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend index e833a71..e02dc75 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend +++ b/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend @@ -1,5 +1,5 @@ -{% if endpoints %} -{% for ext, int in endpoints %} +{% if endpoints -%} +{% for ext, int in endpoints -%} Listen {{ ext }} NameVirtualHost *:{{ ext }} @@ -19,5 +19,5 @@ NameVirtualHost *:{{ ext }} Order allow,deny Allow from all -{% endfor %} -{% endif %} +{% endfor -%} +{% endif -%} diff --git a/hooks/charmhelpers/contrib/openstack/templating.py b/hooks/charmhelpers/contrib/openstack/templating.py index c555cc6..0b53443 100644 --- a/hooks/charmhelpers/contrib/openstack/templating.py +++ b/hooks/charmhelpers/contrib/openstack/templating.py @@ -11,10 +11,10 @@ from charmhelpers.core.hookenv import ( from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES try: - from jinja2 import FileSystemLoader, ChoiceLoader, Environment + from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions except ImportError: # python-jinja2 may not be installed yet, or we're running unittests. - FileSystemLoader = ChoiceLoader = Environment = None + FileSystemLoader = ChoiceLoader = Environment = exceptions = None class OSConfigException(Exception): @@ -220,9 +220,24 @@ class OSConfigRenderer(object): log('Config not registered: %s' % config_file, level=ERROR) raise OSConfigException ctxt = self.templates[config_file].context() + _tmpl = os.path.basename(config_file) + try: + template = self._get_template(_tmpl) + except exceptions.TemplateNotFound: + # if no template is found with basename, try looking for it + # using a munged full path, eg: + # /etc/apache2/apache2.conf -> etc_apache2_apache2.conf + _tmpl = '_'.join(config_file.split('/')[1:]) + try: + template = self._get_template(_tmpl) + except exceptions.TemplateNotFound as e: + log('Could not load template from %s by %s or %s.' % + (self.templates_dir, os.path.basename(config_file), _tmpl), + level=ERROR) + raise e + log('Rendering from template: %s' % _tmpl, level=INFO) - template = self._get_template(_tmpl) return template.render(ctxt) def write(self, config_file): @@ -232,8 +247,12 @@ class OSConfigRenderer(object): if config_file not in self.templates: log('Config not registered: %s' % config_file, level=ERROR) raise OSConfigException + + _out = self.render(config_file) + with open(config_file, 'wb') as out: - out.write(self.render(config_file)) + out.write(_out) + log('Wrote template %s.' % config_file, level=INFO) def write_all(self): diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index cff765d..677fa1d 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -7,18 +7,17 @@ from collections import OrderedDict import apt_pkg as apt import subprocess import os -import socket import sys from charmhelpers.core.hookenv import ( config, log as juju_log, - unit_get, + charm_dir, ) from charmhelpers.core.host import ( lsb_release, - apt_install + apt_install, ) CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" @@ -164,6 +163,25 @@ def get_os_version_package(pkg, fatal=True): #error_out(e) +os_rel = None + + +def os_release(package, base='essex'): + ''' + Returns OpenStack release codename from a cached global. + If the codename can not be determined from either an installed package or + the installation source, the earliest release supported by the charm should + be returned. + ''' + global os_rel + if os_rel: + return os_rel + os_rel = (get_os_codename_package(package, fatal=False) or + get_os_codename_install_source(config('openstack-origin')) or + base) + return os_rel + + def import_key(keyid): cmd = "apt-key adv --keyserver keyserver.ubuntu.com " \ "--recv-keys %s" % keyid @@ -245,8 +263,9 @@ def save_script_rc(script_path="scripts/scriptrc", **env_vars): updated config information necessary to perform health checks or service changes. """ - unit_name = os.getenv('JUJU_UNIT_NAME').replace('/', '-') - juju_rc_path = "/var/lib/juju/units/%s/charm/%s" % (unit_name, script_path) + juju_rc_path = "%s/%s" % (charm_dir(), script_path) + if not os.path.exists(os.path.dirname(juju_rc_path)): + os.mkdir(os.path.dirname(juju_rc_path)) with open(juju_rc_path, 'wb') as rc_script: rc_script.write( "#!/bin/bash\n") @@ -271,23 +290,3 @@ def openstack_upgrade_available(package): available_vers = get_os_version_install_source(src) apt.init() return apt.version_compare(available_vers, cur_vers) == 1 - - -def get_host_ip(hostname=None): - hostname = hostname or unit_get('private-address') - try: - import dns.resolver - except ImportError: - apt_install('python-dnspython') - import dns.resolver - - try: - # Test to see if already an IPv4 address - socket.inet_aton(hostname) - return hostname - except socket.error: - # This may throw an NXDOMAIN exception; in which case - # things are badly broken so just let it kill the hook - answers = dns.resolver.query(hostname, 'A') - if answers: - return answers[0].address diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py index 6550b63..4426d00 100644 --- a/hooks/charmhelpers/core/host.py +++ b/hooks/charmhelpers/core/host.py @@ -9,12 +9,14 @@ import apt_pkg import os import pwd import grp +import random +import string import subprocess import hashlib from collections import OrderedDict -from hookenv import log, execution_environment +from hookenv import log def service_start(service_name): @@ -86,36 +88,33 @@ def add_user_to_group(username, group): def rsync(from_path, to_path, flags='-r', options=None): """Replicate the contents of a path""" - context = execution_environment() options = options or ['--delete', '--executability'] cmd = ['/usr/bin/rsync', flags] cmd.extend(options) - cmd.append(from_path.format(**context)) - cmd.append(to_path.format(**context)) + cmd.append(from_path) + cmd.append(to_path) log(" ".join(cmd)) return subprocess.check_output(cmd).strip() def symlink(source, destination): """Create a symbolic link""" - context = execution_environment() log("Symlinking {} as {}".format(source, destination)) cmd = [ 'ln', '-sf', - source.format(**context), - destination.format(**context) + source, + destination, ] subprocess.check_call(cmd) def mkdir(path, owner='root', group='root', perms=0555, force=False): """Create a directory""" - context = execution_environment() log("Making dir {} {}:{} {:o}".format(path, owner, group, perms)) - uid = pwd.getpwnam(owner.format(**context)).pw_uid - gid = grp.getgrnam(group.format(**context)).gr_gid + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid realpath = os.path.abspath(path) if os.path.exists(realpath): if force and not os.path.isdir(realpath): @@ -270,3 +269,15 @@ def lsb_release(): k, v = l.split('=') d[k.strip()] = v.strip() return d + + +def pwgen(length=None): + '''Generate a random pasword.''' + if length is None: + length = random.choice(range(35, 45)) + alphanumeric_chars = [ + l for l in (string.letters + string.digits) + if l not in 'l0QD1vAEIOUaeiou'] + random_chars = [ + random.choice(alphanumeric_chars) for _ in range(length)] + return(''.join(random_chars)) From 45617ff76f31955e1470e06a0c9a4d0be95a6afa Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Fri, 16 Aug 2013 13:38:32 -0700 Subject: [PATCH 12/20] Update tests post-helper sync. --- hooks/swift_storage_context.py | 9 +++------ hooks/swift_storage_utils.py | 4 ++-- unit_tests/test_swift_storage_context.py | 6 +++--- unit_tests/test_swift_storage_utils.py | 4 ++-- 4 files changed, 10 insertions(+), 13 deletions(-) diff --git a/hooks/swift_storage_context.py b/hooks/swift_storage_context.py index c6d5889..9422234 100644 --- a/hooks/swift_storage_context.py +++ b/hooks/swift_storage_context.py @@ -6,10 +6,7 @@ from charmhelpers.core.hookenv import ( related_units, relation_get, relation_ids, -) - -from charmhelpers.contrib.openstack.utils import ( - get_host_ip, + unit_private_ip, ) from charmhelpers.contrib.openstack.context import ( @@ -53,7 +50,7 @@ class RsyncContext(OSContextGenerator): out.write(_m.sub('RSYNC_ENABLE=true', default)) def __call__(self): - local_ip = get_host_ip() + local_ip = unit_private_ip() self.enable_rsyncd() return { 'local_ip': local_ip @@ -65,7 +62,7 @@ class SwiftStorageServerContext(OSContextGenerator): def __call__(self): ctxt = { - 'local_ip': get_host_ip(), + 'local_ip': unit_private_ip(), 'account_server_port': config('account-server-port'), 'container_server_port': config('container-server-port'), 'object_server_port': config('object-server-port'), diff --git a/hooks/swift_storage_utils.py b/hooks/swift_storage_utils.py index 84bbe21..8250dc1 100644 --- a/hooks/swift_storage_utils.py +++ b/hooks/swift_storage_utils.py @@ -27,6 +27,7 @@ from charmhelpers.core.host import ( from charmhelpers.core.hookenv import ( config, log, + unit_private_ip, ERROR, ) @@ -36,7 +37,6 @@ from charmhelpers.contrib.storage.linux.utils import ( from charmhelpers.contrib.openstack.utils import ( configure_installation_source, - get_host_ip, get_os_codename_install_source, get_os_codename_package, save_script_rc as _save_script_rc, @@ -197,7 +197,7 @@ def fetch_swift_rings(rings_url): def save_script_rc(): env_vars = {} - ip = get_host_ip() + ip = unit_private_ip() for server in ['account', 'container', 'object']: port = config('%s-server-port' % server) url = 'http://%s:%s/recon/diskusage|"mounted":true' % (ip, port) diff --git a/unit_tests/test_swift_storage_context.py b/unit_tests/test_swift_storage_context.py index c222361..d4d11f0 100644 --- a/unit_tests/test_swift_storage_context.py +++ b/unit_tests/test_swift_storage_context.py @@ -12,7 +12,7 @@ TO_PATCH = [ 'related_units', 'relation_get', 'relation_ids', - 'get_host_ip', + 'unit_private_ip', ] @@ -40,7 +40,7 @@ class SwiftStorageContextTests(CharmTestCase): self.assertEquals(ctxt(), {'swift_hash': 'fooooo'}) def test_rsync_context(self): - self.get_host_ip.return_value = '10.0.0.5' + self.unit_private_ip.return_value = '10.0.0.5' ctxt = swift_context.RsyncContext() ctxt.enable_rsyncd = MagicMock() ctxt.enable_rsyncd.return_value = True @@ -58,7 +58,7 @@ class SwiftStorageContextTests(CharmTestCase): _file.write.assert_called_with('RSYNC_ENABLE=true\n') def test_swift_storage_server_context(self): - self.get_host_ip.return_value = '10.0.0.5' + self.unit_private_ip.return_value = '10.0.0.5' self.test_config.set('account-server-port', '500') self.test_config.set('object-server-port', '501') self.test_config.set('container-server-port', '502') diff --git a/unit_tests/test_swift_storage_utils.py b/unit_tests/test_swift_storage_utils.py index ffc8483..4152170 100644 --- a/unit_tests/test_swift_storage_utils.py +++ b/unit_tests/test_swift_storage_utils.py @@ -20,7 +20,7 @@ TO_PATCH = [ 'is_block_device', 'get_os_codename_package', 'get_os_codename_install_source', - 'get_host_ip', + 'unit_private_ip', 'service_restart', '_save_script_rc', ] @@ -168,7 +168,7 @@ class SwiftStorageUtilsTests(CharmTestCase): self.assertEquals(ex, result) def test_save_script_rc(self): - self.get_host_ip.return_value = '10.0.0.1' + self.unit_private_ip.return_value = '10.0.0.1' swift_utils.save_script_rc() self._save_script_rc.assert_called_with(**SCRIPT_RC_ENV) From 96eb0830241f9ee3e880b42f44ff9c4ee560078d Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Fri, 16 Aug 2013 14:00:39 -0700 Subject: [PATCH 13/20] Checkin new helper. --- .../charmhelpers/contrib/openstack/neutron.py | 108 ++++++++++++++++++ 1 file changed, 108 insertions(+) create mode 100644 hooks/charmhelpers/contrib/openstack/neutron.py diff --git a/hooks/charmhelpers/contrib/openstack/neutron.py b/hooks/charmhelpers/contrib/openstack/neutron.py new file mode 100644 index 0000000..37b5a7b --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/neutron.py @@ -0,0 +1,108 @@ +# Various utilies for dealing with Neutron and the renaming from Quantum. + +from charmhelpers.core.hookenv import ( + config, + log, + ERROR, +) + +from charmhelpers.contrib.openstack.utils import os_release + + +# legacy +def quantum_plugins(): + from charmhelpers.contrib.openstack import context + return { + 'ovs': { + 'config': '/etc/quantum/plugins/openvswitch/' + 'ovs_quantum_plugin.ini', + 'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.' + 'OVSQuantumPluginV2', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron')], + 'services': ['quantum-plugin-openvswitch-agent'], + 'packages': ['quantum-plugin-openvswitch-agent', + 'openvswitch-datapath-dkms'], + }, + 'nvp': { + 'config': '/etc/quantum/plugins/nicira/nvp.ini', + 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.' + 'QuantumPlugin.NvpPluginV2', + 'services': [], + 'packages': ['quantum-plugin-nicira'], + } + } + + +def neutron_plugins(): + from charmhelpers.contrib.openstack import context + return { + 'ovs': { + 'config': '/etc/neutron/plugins/openvswitch/' + 'ovs_neutron_plugin.ini', + 'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.' + 'OVSNeutronPluginV2', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron')], + 'services': ['neutron-plugin-openvswitch-agent'], + 'packages': ['neutron-plugin-openvswitch-agent', + 'openvswitch-datapath-dkms'], + }, + 'nvp': { + 'config': '/etc/neutron/plugins/nicira/nvp.ini', + 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.' + 'NeutronPlugin.NvpPluginV2', + 'services': [], + 'packages': ['neutron-plugin-nicira'], + } + } + + +def neutron_plugin_attribute(plugin, attr, net_manager=None): + manager = net_manager or network_manager() + if manager == 'quantum': + plugins = quantum_plugins() + elif manager == 'neutron': + plugins = neutron_plugins() + else: + log('Error: Network manager does not support plugins.') + raise Exception + + try: + _plugin = plugins[plugin] + except KeyError: + log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR) + raise + + try: + return _plugin[attr] + except KeyError: + return None + + +def network_manager(): + ''' + Deals with the renaming of Quantum to Neutron in H and any situations + that require compatability (eg, deploying H with network-manager=quantum, + upgrading from G). + ''' + release = os_release('nova-common') + manager = config('network-manager').lower() + + if manager not in ['quantum', 'neutron']: + return manager + + if release in ['essex']: + # E does not support neutron + log('Neutron networking not supported in Essex.', level=ERROR) + raise + elif release in ['folsom', 'grizzly']: + # neutron is named quantum in F and G + return 'quantum' + else: + # ensure accurate naming for all releases post-H + return 'neutron' From ecb87dd743db1b53b002890257c8795794f47b52 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Mon, 19 Aug 2013 16:42:47 -0700 Subject: [PATCH 14/20] Update charm-helpers sync source to temporary ~openstack-charmers helper branch. --- charm-helpers.yaml | 2 +- hooks/charmhelpers/contrib/hahelpers/ceph.py | 23 +++++++++++++++---- .../charmhelpers/contrib/openstack/context.py | 1 + 3 files changed, 20 insertions(+), 6 deletions(-) diff --git a/charm-helpers.yaml b/charm-helpers.yaml index 7d9aa62..ddc1575 100644 --- a/charm-helpers.yaml +++ b/charm-helpers.yaml @@ -1,4 +1,4 @@ -branch: lp:charm-helpers +branch: lp:~openstack-charmers/charm-helpers/to_upstream destination: hooks/charmhelpers include: - core diff --git a/hooks/charmhelpers/contrib/hahelpers/ceph.py b/hooks/charmhelpers/contrib/hahelpers/ceph.py index fb1b8b9..8ff029a 100644 --- a/hooks/charmhelpers/contrib/hahelpers/ceph.py +++ b/hooks/charmhelpers/contrib/hahelpers/ceph.py @@ -11,6 +11,7 @@ import commands import os import shutil +import time from subprocess import ( check_call, @@ -24,6 +25,7 @@ from charmhelpers.core.hookenv import ( related_units, log, INFO, + ERROR ) from charmhelpers.core.host import ( @@ -179,11 +181,22 @@ def filesystem_mounted(fs): return fs in [f for m, f in mounts()] -def make_filesystem(blk_device, fstype='ext4'): - log('ceph: Formatting block device %s as filesystem %s.' % - (blk_device, fstype), level=INFO) - cmd = ['mkfs', '-t', fstype, blk_device] - check_call(cmd) +def make_filesystem(blk_device, fstype='ext4', timeout=10): + count = 0 + e_noent = os.errno.ENOENT + while not os.path.exists(blk_device): + if count >= timeout: + log('ceph: gave up waiting on block device %s' % blk_device, + level=ERROR) + raise IOError(e_noent, os.strerror(e_noent), blk_device) + log('ceph: waiting for block device %s to appear' % blk_device, + level=INFO) + count += 1 + time.sleep(1) + else: + log('ceph: Formatting block device %s as filesystem %s.' % + (blk_device, fstype), level=INFO) + check_call(['mkfs', '-t', fstype, blk_device]) def place_data_on_ceph(service, blk_device, data_src_dst, fstype='ext4'): diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 2228c9b..b2d4085 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -294,6 +294,7 @@ class ApacheSSLContext(OSContextGenerator): if ca_cert: with open(CA_CERT_PATH, 'w') as ca_out: ca_out.write(b64decode(ca_cert)) + check_call(['update-ca-certificates']) def __call__(self): if isinstance(self.external_ports, basestring): From df655714eb61e401f6390f3eba2cc7f2075cb18d Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Mon, 23 Sep 2013 12:01:06 -0700 Subject: [PATCH 15/20] Sync helpers, update accordingly. --- Makefile | 2 +- charm-helpers.yaml | 1 + .../charmhelpers/contrib/openstack/context.py | 64 +++++++++++----- .../contrib/openstack/templating.py | 2 +- hooks/charmhelpers/contrib/openstack/utils.py | 73 ++++++++++++++++++- hooks/charmhelpers/core/host.py | 56 ++------------ hooks/swift_storage_hooks.py | 8 +- hooks/swift_storage_utils.py | 4 +- 8 files changed, 130 insertions(+), 80 deletions(-) diff --git a/Makefile b/Makefile index 77c481b..4e4a4f0 100644 --- a/Makefile +++ b/Makefile @@ -11,4 +11,4 @@ test: @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests sync: - @charm-helper-sync -c charm-helpers-sync.yaml + @charm-helper-sync -c charm-helpers.yaml diff --git a/charm-helpers.yaml b/charm-helpers.yaml index ddc1575..a91f23b 100644 --- a/charm-helpers.yaml +++ b/charm-helpers.yaml @@ -4,6 +4,7 @@ include: - core - contrib.openstack|inc=* - contrib.storage + - fetch - contrib.hahelpers: - apache - ceph diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index b2d4085..92924e3 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -7,7 +7,7 @@ from subprocess import ( ) -from charmhelpers.core.host import ( +from charmhelpers.fetch import ( apt_install, filter_installed_packages, ) @@ -48,6 +48,13 @@ class OSContextError(Exception): pass +def ensure_packages(packages): + '''Install but do not upgrade required plugin packages''' + required = filter_installed_packages(packages) + if required: + apt_install(required, fatal=True) + + def context_complete(ctxt): _missing = [] for k, v in ctxt.iteritems(): @@ -103,9 +110,9 @@ class SharedDBContext(OSContextGenerator): 'database_user': self.user, 'database_password': passwd, } - if not context_complete(ctxt): - return {} - return ctxt + if context_complete(ctxt): + return ctxt + return {} class IdentityServiceContext(OSContextGenerator): @@ -134,9 +141,9 @@ class IdentityServiceContext(OSContextGenerator): 'service_protocol': 'http', 'auth_protocol': 'http', } - if not context_complete(ctxt): - return {} - return ctxt + if context_complete(ctxt): + return ctxt + return {} class AMQPContext(OSContextGenerator): @@ -157,20 +164,30 @@ class AMQPContext(OSContextGenerator): for rid in relation_ids('amqp'): for unit in related_units(rid): if relation_get('clustered', rid=rid, unit=unit): - rabbitmq_host = relation_get('vip', rid=rid, unit=unit) + ctxt['clustered'] = True + ctxt['rabbitmq_host'] = relation_get('vip', rid=rid, + unit=unit) else: - rabbitmq_host = relation_get('private-address', - rid=rid, unit=unit) - ctxt = { - 'rabbitmq_host': rabbitmq_host, + ctxt['rabbitmq_host'] = relation_get('private-address', + rid=rid, unit=unit) + ctxt.update({ 'rabbitmq_user': username, 'rabbitmq_password': relation_get('password', rid=rid, unit=unit), 'rabbitmq_virtual_host': vhost, - } + }) + if context_complete(ctxt): + # Sufficient information found = break out! + break + # Used for active/active rabbitmq >= grizzly + ctxt['rabbitmq_hosts'] = [] + for unit in related_units(rid): + ctxt['rabbitmq_hosts'].append(relation_get('private-address', + rid=rid, unit=unit)) if not context_complete(ctxt): return {} - return ctxt + else: + return ctxt class CephContext(OSContextGenerator): @@ -178,21 +195,33 @@ class CephContext(OSContextGenerator): def __call__(self): '''This generates context for /etc/ceph/ceph.conf templates''' - log('Generating tmeplate context for ceph') + if not relation_ids('ceph'): + return {} + log('Generating template context for ceph') mon_hosts = [] auth = None + key = None for rid in relation_ids('ceph'): for unit in related_units(rid): mon_hosts.append(relation_get('private-address', rid=rid, unit=unit)) auth = relation_get('auth', rid=rid, unit=unit) + key = relation_get('key', rid=rid, unit=unit) ctxt = { 'mon_hosts': ' '.join(mon_hosts), 'auth': auth, + 'key': key, } + + if not os.path.isdir('/etc/ceph'): + os.mkdir('/etc/ceph') + if not context_complete(ctxt): return {} + + ensure_packages(['ceph-common']) + return ctxt @@ -341,10 +370,7 @@ class NeutronContext(object): return None def _ensure_packages(self): - '''Install but do not upgrade required plugin packages''' - required = filter_installed_packages(self.packages) - if required: - apt_install(required, fatal=True) + ensure_packages(self.packages) def _save_flag_file(self): if self.network_manager == 'quantum': diff --git a/hooks/charmhelpers/contrib/openstack/templating.py b/hooks/charmhelpers/contrib/openstack/templating.py index 0b53443..4595778 100644 --- a/hooks/charmhelpers/contrib/openstack/templating.py +++ b/hooks/charmhelpers/contrib/openstack/templating.py @@ -1,6 +1,6 @@ import os -from charmhelpers.core.host import apt_install +from charmhelpers.fetch import apt_install from charmhelpers.core.hookenv import ( log, diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index 677fa1d..39f627d 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -1,12 +1,12 @@ #!/usr/bin/python # Common python helper functions used for OpenStack charms. - from collections import OrderedDict import apt_pkg as apt import subprocess import os +import socket import sys from charmhelpers.core.hookenv import ( @@ -17,6 +17,9 @@ from charmhelpers.core.hookenv import ( from charmhelpers.core.host import ( lsb_release, +) + +from charmhelpers.fetch import ( apt_install, ) @@ -130,7 +133,7 @@ def get_os_codename_package(package, fatal=True): e = 'Could not determine version of uninstalled package: %s' % package error_out(e) - vers = apt.UpstreamVersion(pkg.current_ver.ver_str) + vers = apt.upstream_version(pkg.current_ver.ver_str) try: if 'swift' in pkg.name: @@ -290,3 +293,69 @@ def openstack_upgrade_available(package): available_vers = get_os_version_install_source(src) apt.init() return apt.version_compare(available_vers, cur_vers) == 1 + + +def is_ip(address): + """ + Returns True if address is a valid IP address. + """ + try: + # Test to see if already an IPv4 address + socket.inet_aton(address) + return True + except socket.error: + return False + + +def ns_query(address): + try: + import dns.resolver + except ImportError: + apt_install('python-dnspython') + import dns.resolver + + if isinstance(address, dns.name.Name): + rtype = 'PTR' + elif isinstance(address, basestring): + rtype = 'A' + + answers = dns.resolver.query(address, rtype) + if answers: + return str(answers[0]) + return None + + +def get_host_ip(hostname): + """ + Resolves the IP for a given hostname, or returns + the input if it is already an IP. + """ + if is_ip(hostname): + return hostname + + return ns_query(hostname) + + +def get_hostname(address): + """ + Resolves hostname for given IP, or returns the input + if it is already a hostname. + """ + if not is_ip(address): + return address + + try: + import dns.reversename + except ImportError: + apt_install('python-dnspython') + import dns.reversename + + rev = dns.reversename.from_address(address) + result = ns_query(rev) + if not result: + return None + + # strip trailing . + if result.endswith('.'): + return result[:-1] + return result diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py index 4426d00..1a63bf8 100644 --- a/hooks/charmhelpers/core/host.py +++ b/hooks/charmhelpers/core/host.py @@ -5,7 +5,6 @@ # Nick Moffitt # Matthew Wedgwood -import apt_pkg import os import pwd import grp @@ -20,20 +19,22 @@ from hookenv import log def service_start(service_name): - service('start', service_name) + return service('start', service_name) def service_stop(service_name): - service('stop', service_name) + return service('stop', service_name) def service_restart(service_name): - service('restart', service_name) + return service('restart', service_name) def service_reload(service_name, restart_on_failure=False): - if not service('reload', service_name) and restart_on_failure: - service('restart', service_name) + service_result = service('reload', service_name) + if not service_result and restart_on_failure: + service_result = service('restart', service_name) + return service_result def service(action, service_name): @@ -136,49 +137,6 @@ def write_file(path, content, owner='root', group='root', perms=0444): target.write(content) -def filter_installed_packages(packages): - """Returns a list of packages that require installation""" - apt_pkg.init() - cache = apt_pkg.Cache() - _pkgs = [] - for package in packages: - try: - p = cache[package] - p.current_ver or _pkgs.append(package) - except KeyError: - log('Package {} has no installation candidate.'.format(package), - level='WARNING') - _pkgs.append(package) - return _pkgs - - -def apt_install(packages, options=None, fatal=False): - """Install one or more packages""" - options = options or [] - cmd = ['apt-get', '-y'] - cmd.extend(options) - cmd.append('install') - if isinstance(packages, basestring): - cmd.append(packages) - else: - cmd.extend(packages) - log("Installing {} with options: {}".format(packages, - options)) - if fatal: - subprocess.check_call(cmd) - else: - subprocess.call(cmd) - - -def apt_update(fatal=False): - """Update local apt cache""" - cmd = ['apt-get', 'update'] - if fatal: - subprocess.check_call(cmd) - else: - subprocess.call(cmd) - - def mount(device, mountpoint, options=None, persist=False): '''Mount a filesystem''' cmd_args = ['mount'] diff --git a/hooks/swift_storage_hooks.py b/hooks/swift_storage_hooks.py index 64f89c1..7ff7ebe 100755 --- a/hooks/swift_storage_hooks.py +++ b/hooks/swift_storage_hooks.py @@ -23,12 +23,8 @@ from charmhelpers.core.hookenv import ( relation_set, ) -from charmhelpers.core.host import ( - apt_install, - apt_update, - restart_on_change, -) - +from charmhelpers.fetch import apt_install, apt_update +from charmhelpers.core.host import restart_on_change from charmhelpers.contrib.openstack.utils import ( configure_installation_source, diff --git a/hooks/swift_storage_utils.py b/hooks/swift_storage_utils.py index 8250dc1..8f90158 100644 --- a/hooks/swift_storage_utils.py +++ b/hooks/swift_storage_utils.py @@ -16,9 +16,9 @@ from swift_storage_context import ( RsyncContext, ) +from charmhelpers.fetch import apt_install, apt_update + from charmhelpers.core.host import ( - apt_install, - apt_update, mkdir, mount, service_restart, From b0676f9f615045a3f1fb54c57f9f1b3f8db194ee Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Mon, 23 Sep 2013 12:04:09 -0700 Subject: [PATCH 16/20] Checkin new helpers. --- .../contrib/storage/linux/ceph.py | 358 ++++++++++++++++++ hooks/charmhelpers/fetch/__init__.py | 209 ++++++++++ hooks/charmhelpers/fetch/archiveurl.py | 48 +++ hooks/charmhelpers/fetch/bzrurl.py | 49 +++ 4 files changed, 664 insertions(+) create mode 100644 hooks/charmhelpers/contrib/storage/linux/ceph.py create mode 100644 hooks/charmhelpers/fetch/__init__.py create mode 100644 hooks/charmhelpers/fetch/archiveurl.py create mode 100644 hooks/charmhelpers/fetch/bzrurl.py diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py new file mode 100644 index 0000000..10e665d --- /dev/null +++ b/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -0,0 +1,358 @@ +# +# Copyright 2012 Canonical Ltd. +# +# This file is sourced from lp:openstack-charm-helpers +# +# Authors: +# James Page +# Adam Gandelman +# + +import os +import shutil +import json +import time + +from subprocess import ( + check_call, + check_output, + CalledProcessError +) + +from charmhelpers.core.hookenv import ( + relation_get, + relation_ids, + related_units, + log, + INFO, + WARNING, + ERROR +) + +from charmhelpers.core.host import ( + mount, + mounts, + service_start, + service_stop, + service_running, + umount, +) + +from charmhelpers.fetch import ( + apt_install, +) + +KEYRING = '/etc/ceph/ceph.client.{}.keyring' +KEYFILE = '/etc/ceph/ceph.client.{}.key' + +CEPH_CONF = """[global] + auth supported = {auth} + keyring = {keyring} + mon host = {mon_hosts} +""" + + +def install(): + ''' Basic Ceph client installation ''' + ceph_dir = "/etc/ceph" + if not os.path.exists(ceph_dir): + os.mkdir(ceph_dir) + apt_install('ceph-common', fatal=True) + + +def rbd_exists(service, pool, rbd_img): + ''' Check to see if a RADOS block device exists ''' + try: + out = check_output(['rbd', 'list', '--id', service, + '--pool', pool]) + except CalledProcessError: + return False + else: + return rbd_img in out + + +def create_rbd_image(service, pool, image, sizemb): + ''' Create a new RADOS block device ''' + cmd = [ + 'rbd', + 'create', + image, + '--size', + str(sizemb), + '--id', + service, + '--pool', + pool + ] + check_call(cmd) + + +def pool_exists(service, name): + ''' Check to see if a RADOS pool already exists ''' + try: + out = check_output(['rados', '--id', service, 'lspools']) + except CalledProcessError: + return False + else: + return name in out + + +def get_osds(): + ''' + Return a list of all Ceph Object Storage Daemons + currently in the cluster + ''' + return json.loads(check_output(['ceph', 'osd', 'ls', '--format=json'])) + + +def create_pool(service, name, replicas=2): + ''' Create a new RADOS pool ''' + if pool_exists(service, name): + log("Ceph pool {} already exists, skipping creation".format(name), + level=WARNING) + return + # Calculate the number of placement groups based + # on upstream recommended best practices. + pgnum = (len(get_osds()) * 100 / replicas) + cmd = [ + 'ceph', '--id', service, + 'osd', 'pool', 'create', + name, pgnum + ] + check_call(cmd) + cmd = [ + 'ceph', '--id', service, + 'osd', 'set', name, + 'size', replicas + ] + check_call(cmd) + + +def delete_pool(service, name): + ''' Delete a RADOS pool from ceph ''' + cmd = [ + 'ceph', '--id', service, + 'osd', 'pool', 'delete', + name, '--yes-i-really-really-mean-it' + ] + check_call(cmd) + + +def _keyfile_path(service): + return KEYFILE.format(service) + + +def _keyring_path(service): + return KEYRING.format(service) + + +def create_keyring(service, key): + ''' Create a new Ceph keyring containing key''' + keyring = _keyring_path(service) + if os.path.exists(keyring): + log('ceph: Keyring exists at %s.' % keyring, level=WARNING) + return + cmd = [ + 'ceph-authtool', + keyring, + '--create-keyring', + '--name=client.{}'.format(service), + '--add-key={}'.format(key) + ] + check_call(cmd) + log('ceph: Created new ring at %s.' % keyring, level=INFO) + + +def create_key_file(service, key): + ''' Create a file containing key ''' + keyfile = _keyfile_path(service) + if os.path.exists(keyfile): + log('ceph: Keyfile exists at %s.' % keyfile, level=WARNING) + return + with open(keyfile, 'w') as fd: + fd.write(key) + log('ceph: Created new keyfile at %s.' % keyfile, level=INFO) + + +def get_ceph_nodes(): + ''' Query named relation 'ceph' to detemine current nodes ''' + hosts = [] + for r_id in relation_ids('ceph'): + for unit in related_units(r_id): + hosts.append(relation_get('private-address', unit=unit, rid=r_id)) + return hosts + + +def configure(service, key, auth): + ''' Perform basic configuration of Ceph ''' + create_keyring(service, key) + create_key_file(service, key) + hosts = get_ceph_nodes() + with open('/etc/ceph/ceph.conf', 'w') as ceph_conf: + ceph_conf.write(CEPH_CONF.format(auth=auth, + keyring=_keyring_path(service), + mon_hosts=",".join(map(str, hosts)))) + modprobe('rbd') + + +def image_mapped(name): + ''' Determine whether a RADOS block device is mapped locally ''' + try: + out = check_output(['rbd', 'showmapped']) + except CalledProcessError: + return False + else: + return name in out + + +def map_block_storage(service, pool, image): + ''' Map a RADOS block device for local use ''' + cmd = [ + 'rbd', + 'map', + '{}/{}'.format(pool, image), + '--user', + service, + '--secret', + _keyfile_path(service), + ] + check_call(cmd) + + +def filesystem_mounted(fs): + ''' Determine whether a filesytems is already mounted ''' + return fs in [f for f, m in mounts()] + + +def make_filesystem(blk_device, fstype='ext4', timeout=10): + ''' Make a new filesystem on the specified block device ''' + count = 0 + e_noent = os.errno.ENOENT + while not os.path.exists(blk_device): + if count >= timeout: + log('ceph: gave up waiting on block device %s' % blk_device, + level=ERROR) + raise IOError(e_noent, os.strerror(e_noent), blk_device) + log('ceph: waiting for block device %s to appear' % blk_device, + level=INFO) + count += 1 + time.sleep(1) + else: + log('ceph: Formatting block device %s as filesystem %s.' % + (blk_device, fstype), level=INFO) + check_call(['mkfs', '-t', fstype, blk_device]) + + +def place_data_on_block_device(blk_device, data_src_dst): + ''' Migrate data in data_src_dst to blk_device and then remount ''' + # mount block device into /mnt + mount(blk_device, '/mnt') + # copy data to /mnt + copy_files(data_src_dst, '/mnt') + # umount block device + umount('/mnt') + # Grab user/group ID's from original source + _dir = os.stat(data_src_dst) + uid = _dir.st_uid + gid = _dir.st_gid + # re-mount where the data should originally be + # TODO: persist is currently a NO-OP in core.host + mount(blk_device, data_src_dst, persist=True) + # ensure original ownership of new mount. + os.chown(data_src_dst, uid, gid) + + +# TODO: re-use +def modprobe(module): + ''' Load a kernel module and configure for auto-load on reboot ''' + log('ceph: Loading kernel module', level=INFO) + cmd = ['modprobe', module] + check_call(cmd) + with open('/etc/modules', 'r+') as modules: + if module not in modules.read(): + modules.write(module) + + +def copy_files(src, dst, symlinks=False, ignore=None): + ''' Copy files from src to dst ''' + for item in os.listdir(src): + s = os.path.join(src, item) + d = os.path.join(dst, item) + if os.path.isdir(s): + shutil.copytree(s, d, symlinks, ignore) + else: + shutil.copy2(s, d) + + +def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, + blk_device, fstype, system_services=[]): + """ + NOTE: This function must only be called from a single service unit for + the same rbd_img otherwise data loss will occur. + + Ensures given pool and RBD image exists, is mapped to a block device, + and the device is formatted and mounted at the given mount_point. + + If formatting a device for the first time, data existing at mount_point + will be migrated to the RBD device before being re-mounted. + + All services listed in system_services will be stopped prior to data + migration and restarted when complete. + """ + # Ensure pool, RBD image, RBD mappings are in place. + if not pool_exists(service, pool): + log('ceph: Creating new pool {}.'.format(pool)) + create_pool(service, pool) + + if not rbd_exists(service, pool, rbd_img): + log('ceph: Creating RBD image ({}).'.format(rbd_img)) + create_rbd_image(service, pool, rbd_img, sizemb) + + if not image_mapped(rbd_img): + log('ceph: Mapping RBD Image {} as a Block Device.'.format(rbd_img)) + map_block_storage(service, pool, rbd_img) + + # make file system + # TODO: What happens if for whatever reason this is run again and + # the data is already in the rbd device and/or is mounted?? + # When it is mounted already, it will fail to make the fs + # XXX: This is really sketchy! Need to at least add an fstab entry + # otherwise this hook will blow away existing data if its executed + # after a reboot. + if not filesystem_mounted(mount_point): + make_filesystem(blk_device, fstype) + + for svc in system_services: + if service_running(svc): + log('ceph: Stopping services {} prior to migrating data.' + .format(svc)) + service_stop(svc) + + place_data_on_block_device(blk_device, mount_point) + + for svc in system_services: + log('ceph: Starting service {} after migrating data.' + .format(svc)) + service_start(svc) + + +def ensure_ceph_keyring(service, user=None, group=None): + ''' + Ensures a ceph keyring is created for a named service + and optionally ensures user and group ownership. + + Returns False if no ceph key is available in relation state. + ''' + key = None + for rid in relation_ids('ceph'): + for unit in related_units(rid): + key = relation_get('key', rid=rid, unit=unit) + if key: + break + if not key: + return False + create_keyring(service=service, key=key) + keyring = _keyring_path(service) + if user and group: + check_call(['chown', '%s.%s' % (user, group), keyring]) + return True diff --git a/hooks/charmhelpers/fetch/__init__.py b/hooks/charmhelpers/fetch/__init__.py new file mode 100644 index 0000000..b2f9646 --- /dev/null +++ b/hooks/charmhelpers/fetch/__init__.py @@ -0,0 +1,209 @@ +import importlib +from yaml import safe_load +from charmhelpers.core.host import ( + lsb_release +) +from urlparse import ( + urlparse, + urlunparse, +) +import subprocess +from charmhelpers.core.hookenv import ( + config, + log, +) +import apt_pkg + +CLOUD_ARCHIVE = """# Ubuntu Cloud Archive +deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main +""" +PROPOSED_POCKET = """# Proposed +deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted +""" + + +def filter_installed_packages(packages): + """Returns a list of packages that require installation""" + apt_pkg.init() + cache = apt_pkg.Cache() + _pkgs = [] + for package in packages: + try: + p = cache[package] + p.current_ver or _pkgs.append(package) + except KeyError: + log('Package {} has no installation candidate.'.format(package), + level='WARNING') + _pkgs.append(package) + return _pkgs + + +def apt_install(packages, options=None, fatal=False): + """Install one or more packages""" + options = options or [] + cmd = ['apt-get', '-y'] + cmd.extend(options) + cmd.append('install') + if isinstance(packages, basestring): + cmd.append(packages) + else: + cmd.extend(packages) + log("Installing {} with options: {}".format(packages, + options)) + if fatal: + subprocess.check_call(cmd) + else: + subprocess.call(cmd) + + +def apt_update(fatal=False): + """Update local apt cache""" + cmd = ['apt-get', 'update'] + if fatal: + subprocess.check_call(cmd) + else: + subprocess.call(cmd) + + +def apt_purge(packages, fatal=False): + """Purge one or more packages""" + cmd = ['apt-get', '-y', 'purge'] + if isinstance(packages, basestring): + cmd.append(packages) + else: + cmd.extend(packages) + log("Purging {}".format(packages)) + if fatal: + subprocess.check_call(cmd) + else: + subprocess.call(cmd) + + +def add_source(source, key=None): + if ((source.startswith('ppa:') or + source.startswith('http:'))): + subprocess.check_call(['add-apt-repository', '--yes', source]) + elif source.startswith('cloud:'): + apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), + fatal=True) + pocket = source.split(':')[-1] + with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: + apt.write(CLOUD_ARCHIVE.format(pocket)) + elif source == 'proposed': + release = lsb_release()['DISTRIB_CODENAME'] + with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: + apt.write(PROPOSED_POCKET.format(release)) + if key: + subprocess.check_call(['apt-key', 'import', key]) + + +class SourceConfigError(Exception): + pass + + +def configure_sources(update=False, + sources_var='install_sources', + keys_var='install_keys'): + """ + Configure multiple sources from charm configuration + + Example config: + install_sources: + - "ppa:foo" + - "http://example.com/repo precise main" + install_keys: + - null + - "a1b2c3d4" + + Note that 'null' (a.k.a. None) should not be quoted. + """ + sources = safe_load(config(sources_var)) + keys = safe_load(config(keys_var)) + if isinstance(sources, basestring) and isinstance(keys, basestring): + add_source(sources, keys) + else: + if not len(sources) == len(keys): + msg = 'Install sources and keys lists are different lengths' + raise SourceConfigError(msg) + for src_num in range(len(sources)): + add_source(sources[src_num], keys[src_num]) + if update: + apt_update(fatal=True) + +# The order of this list is very important. Handlers should be listed in from +# least- to most-specific URL matching. +FETCH_HANDLERS = ( + 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', + 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', +) + + +class UnhandledSource(Exception): + pass + + +def install_remote(source): + """ + Install a file tree from a remote source + + The specified source should be a url of the form: + scheme://[host]/path[#[option=value][&...]] + + Schemes supported are based on this modules submodules + Options supported are submodule-specific""" + # We ONLY check for True here because can_handle may return a string + # explaining why it can't handle a given source. + handlers = [h for h in plugins() if h.can_handle(source) is True] + installed_to = None + for handler in handlers: + try: + installed_to = handler.install(source) + except UnhandledSource: + pass + if not installed_to: + raise UnhandledSource("No handler found for source {}".format(source)) + return installed_to + + +def install_from_config(config_var_name): + charm_config = config() + source = charm_config[config_var_name] + return install_remote(source) + + +class BaseFetchHandler(object): + """Base class for FetchHandler implementations in fetch plugins""" + def can_handle(self, source): + """Returns True if the source can be handled. Otherwise returns + a string explaining why it cannot""" + return "Wrong source type" + + def install(self, source): + """Try to download and unpack the source. Return the path to the + unpacked files or raise UnhandledSource.""" + raise UnhandledSource("Wrong source type {}".format(source)) + + def parse_url(self, url): + return urlparse(url) + + def base_url(self, url): + """Return url without querystring or fragment""" + parts = list(self.parse_url(url)) + parts[4:] = ['' for i in parts[4:]] + return urlunparse(parts) + + +def plugins(fetch_handlers=None): + if not fetch_handlers: + fetch_handlers = FETCH_HANDLERS + plugin_list = [] + for handler_name in fetch_handlers: + package, classname = handler_name.rsplit('.', 1) + try: + handler_class = getattr(importlib.import_module(package), classname) + plugin_list.append(handler_class()) + except (ImportError, AttributeError): + # Skip missing plugins so that they can be ommitted from + # installation if desired + log("FetchHandler {} not found, skipping plugin".format(handler_name)) + return plugin_list diff --git a/hooks/charmhelpers/fetch/archiveurl.py b/hooks/charmhelpers/fetch/archiveurl.py new file mode 100644 index 0000000..e35b8f1 --- /dev/null +++ b/hooks/charmhelpers/fetch/archiveurl.py @@ -0,0 +1,48 @@ +import os +import urllib2 +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource +) +from charmhelpers.payload.archive import ( + get_archive_handler, + extract, +) +from charmhelpers.core.host import mkdir + + +class ArchiveUrlFetchHandler(BaseFetchHandler): + """Handler for archives via generic URLs""" + def can_handle(self, source): + url_parts = self.parse_url(source) + if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): + return "Wrong source type" + if get_archive_handler(self.base_url(source)): + return True + return False + + def download(self, source, dest): + # propogate all exceptions + # URLError, OSError, etc + response = urllib2.urlopen(source) + try: + with open(dest, 'w') as dest_file: + dest_file.write(response.read()) + except Exception as e: + if os.path.isfile(dest): + os.unlink(dest) + raise e + + def install(self, source): + url_parts = self.parse_url(source) + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') + if not os.path.exists(dest_dir): + mkdir(dest_dir, perms=0755) + dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path)) + try: + self.download(source, dld_file) + except urllib2.URLError as e: + raise UnhandledSource(e.reason) + except OSError as e: + raise UnhandledSource(e.strerror) + return extract(dld_file) diff --git a/hooks/charmhelpers/fetch/bzrurl.py b/hooks/charmhelpers/fetch/bzrurl.py new file mode 100644 index 0000000..c348b4b --- /dev/null +++ b/hooks/charmhelpers/fetch/bzrurl.py @@ -0,0 +1,49 @@ +import os +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource +) +from charmhelpers.core.host import mkdir + +try: + from bzrlib.branch import Branch +except ImportError: + from charmhelpers.fetch import apt_install + apt_install("python-bzrlib") + from bzrlib.branch import Branch + +class BzrUrlFetchHandler(BaseFetchHandler): + """Handler for bazaar branches via generic and lp URLs""" + def can_handle(self, source): + url_parts = self.parse_url(source) + if url_parts.scheme not in ('bzr+ssh', 'lp'): + return False + else: + return True + + def branch(self, source, dest): + url_parts = self.parse_url(source) + # If we use lp:branchname scheme we need to load plugins + if not self.can_handle(source): + raise UnhandledSource("Cannot handle {}".format(source)) + if url_parts.scheme == "lp": + from bzrlib.plugin import load_plugins + load_plugins() + try: + remote_branch = Branch.open(source) + remote_branch.bzrdir.sprout(dest).open_branch() + except Exception as e: + raise e + + def install(self, source): + url_parts = self.parse_url(source) + branch_name = url_parts.path.strip("/").split("/")[-1] + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name) + if not os.path.exists(dest_dir): + mkdir(dest_dir, perms=0755) + try: + self.branch(source, dest_dir) + except OSError as e: + raise UnhandledSource(e.strerror) + return dest_dir + From 8a8da35a05b6176668734216950fdfd44c046473 Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 27 Sep 2013 17:33:06 +0100 Subject: [PATCH 17/20] Add execd preinstall, tidy unit testing --- charm-helpers.yaml | 2 +- .../templates/openstack_https_frontend.conf | 23 +++++++++ .../contrib/storage/linux/ceph.py | 13 ++--- hooks/charmhelpers/payload/__init__.py | 1 + hooks/charmhelpers/payload/execd.py | 50 +++++++++++++++++++ hooks/swift_storage_hooks.py | 2 + unit_tests/test_swift_storage_context.py | 6 +-- unit_tests/test_swift_storage_relations.py | 10 ++-- unit_tests/test_swift_storage_utils.py | 9 ++-- 9 files changed, 96 insertions(+), 20 deletions(-) create mode 100644 hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf create mode 100644 hooks/charmhelpers/payload/__init__.py create mode 100644 hooks/charmhelpers/payload/execd.py diff --git a/charm-helpers.yaml b/charm-helpers.yaml index a91f23b..868885c 100644 --- a/charm-helpers.yaml +++ b/charm-helpers.yaml @@ -7,5 +7,5 @@ include: - fetch - contrib.hahelpers: - apache - - ceph - cluster + - payload.execd diff --git a/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf b/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf new file mode 100644 index 0000000..e02dc75 --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf @@ -0,0 +1,23 @@ +{% if endpoints -%} +{% for ext, int in endpoints -%} +Listen {{ ext }} +NameVirtualHost *:{{ ext }} + + ServerName {{ private_address }} + SSLEngine on + SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert + SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key + ProxyPass / http://localhost:{{ int }}/ + ProxyPassReverse / http://localhost:{{ int }}/ + ProxyPreserveHost on + + + Order deny,allow + Allow from all + + + Order allow,deny + Allow from all + +{% endfor -%} +{% endif -%} diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py index 10e665d..9bb9530 100644 --- a/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -97,12 +97,13 @@ def pool_exists(service, name): return name in out -def get_osds(): +def get_osds(service): ''' Return a list of all Ceph Object Storage Daemons currently in the cluster ''' - return json.loads(check_output(['ceph', 'osd', 'ls', '--format=json'])) + return json.loads(check_output(['ceph', '--id', service, + 'osd', 'ls', '--format=json'])) def create_pool(service, name, replicas=2): @@ -113,17 +114,17 @@ def create_pool(service, name, replicas=2): return # Calculate the number of placement groups based # on upstream recommended best practices. - pgnum = (len(get_osds()) * 100 / replicas) + pgnum = (len(get_osds(service)) * 100 / replicas) cmd = [ 'ceph', '--id', service, 'osd', 'pool', 'create', - name, pgnum + name, str(pgnum) ] check_call(cmd) cmd = [ 'ceph', '--id', service, - 'osd', 'set', name, - 'size', replicas + 'osd', 'pool', 'set', name, + 'size', str(replicas) ] check_call(cmd) diff --git a/hooks/charmhelpers/payload/__init__.py b/hooks/charmhelpers/payload/__init__.py new file mode 100644 index 0000000..fc9fbc0 --- /dev/null +++ b/hooks/charmhelpers/payload/__init__.py @@ -0,0 +1 @@ +"Tools for working with files injected into a charm just before deployment." diff --git a/hooks/charmhelpers/payload/execd.py b/hooks/charmhelpers/payload/execd.py new file mode 100644 index 0000000..6476a75 --- /dev/null +++ b/hooks/charmhelpers/payload/execd.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python + +import os +import sys +import subprocess +from charmhelpers.core import hookenv + + +def default_execd_dir(): + return os.path.join(os.environ['CHARM_DIR'], 'exec.d') + + +def execd_module_paths(execd_dir=None): + """Generate a list of full paths to modules within execd_dir.""" + if not execd_dir: + execd_dir = default_execd_dir() + + if not os.path.exists(execd_dir): + return + + for subpath in os.listdir(execd_dir): + module = os.path.join(execd_dir, subpath) + if os.path.isdir(module): + yield module + + +def execd_submodule_paths(command, execd_dir=None): + """Generate a list of full paths to the specified command within exec_dir. + """ + for module_path in execd_module_paths(execd_dir): + path = os.path.join(module_path, command) + if os.access(path, os.X_OK) and os.path.isfile(path): + yield path + + +def execd_run(command, execd_dir=None, die_on_error=False, stderr=None): + """Run command for each module within execd_dir which defines it.""" + for submodule_path in execd_submodule_paths(command, execd_dir): + try: + subprocess.check_call(submodule_path, shell=True, stderr=stderr) + except subprocess.CalledProcessError as e: + hookenv.log("Error ({}) running {}. Output: {}".format( + e.returncode, e.cmd, e.output)) + if die_on_error: + sys.exit(e.returncode) + + +def execd_preinstall(execd_dir=None): + """Run charm-pre-install for each module within execd_dir.""" + execd_run('charm-pre-install', execd_dir=execd_dir) diff --git a/hooks/swift_storage_hooks.py b/hooks/swift_storage_hooks.py index 7ff7ebe..47f7368 100755 --- a/hooks/swift_storage_hooks.py +++ b/hooks/swift_storage_hooks.py @@ -25,6 +25,7 @@ from charmhelpers.core.hookenv import ( from charmhelpers.fetch import apt_install, apt_update from charmhelpers.core.host import restart_on_change +from charmhelpers.payload.execd import execd_preinstall from charmhelpers.contrib.openstack.utils import ( configure_installation_source, @@ -37,6 +38,7 @@ CONFIGS = register_configs() @hooks.hook() def install(): + execd_preinstall() configure_installation_source(config('openstack-origin')) apt_update() apt_install(PACKAGES, fatal=True) diff --git a/unit_tests/test_swift_storage_context.py b/unit_tests/test_swift_storage_context.py index 7c75dcd..6d165fa 100644 --- a/unit_tests/test_swift_storage_context.py +++ b/unit_tests/test_swift_storage_context.py @@ -1,9 +1,7 @@ from mock import MagicMock +from test_utils import CharmTestCase, patch_open -from unit_tests.test_utils import CharmTestCase, patch_open - - -import hooks.swift_storage_context as swift_context +import swift_storage_context as swift_context TO_PATCH = [ diff --git a/unit_tests/test_swift_storage_relations.py b/unit_tests/test_swift_storage_relations.py index c208c29..a373c29 100644 --- a/unit_tests/test_swift_storage_relations.py +++ b/unit_tests/test_swift_storage_relations.py @@ -1,17 +1,17 @@ from mock import patch, MagicMock -from unit_tests.test_utils import CharmTestCase +from test_utils import CharmTestCase -import hooks.swift_storage_utils as utils +import swift_storage_utils as utils _reg = utils.register_configs utils.register_configs = MagicMock() -import hooks.swift_storage_hooks as hooks +import swift_storage_hooks as hooks utils.register_configs = _reg -from hooks.swift_storage_utils import PACKAGES +from swift_storage_utils import PACKAGES TO_PATCH = [ 'CONFIGS', @@ -35,6 +35,7 @@ TO_PATCH = [ 'save_script_rc', 'setup_storage', 'register_configs', + 'execd_preinstall' ] @@ -55,6 +56,7 @@ class SwiftStorageRelationsTests(CharmTestCase): self.apt_install.assert_called_with(PACKAGES, fatal=True) self.setup_storage.assert_called() + self.execd_preinstall.assert_called() def test_config_changed_no_upgrade_available(self): self.openstack_upgrade_available.return_value = False diff --git a/unit_tests/test_swift_storage_utils.py b/unit_tests/test_swift_storage_utils.py index 4152170..04d0587 100644 --- a/unit_tests/test_swift_storage_utils.py +++ b/unit_tests/test_swift_storage_utils.py @@ -1,8 +1,7 @@ from mock import call, patch, MagicMock -from unit_tests.test_utils import CharmTestCase, patch_open +from test_utils import CharmTestCase, patch_open - -import hooks.swift_storage_utils as swift_utils +import swift_storage_utils as swift_utils TO_PATCH = [ @@ -172,7 +171,7 @@ class SwiftStorageUtilsTests(CharmTestCase): swift_utils.save_script_rc() self._save_script_rc.assert_called_with(**SCRIPT_RC_ENV) - @patch('hooks.charmhelpers.contrib.openstack.templating.OSConfigRenderer') + @patch('charmhelpers.contrib.openstack.templating.OSConfigRenderer') def test_register_configs_pre_install(self, renderer): self.get_os_codename_package.return_value = None swift_utils.register_configs() @@ -182,7 +181,7 @@ class SwiftStorageUtilsTests(CharmTestCase): @patch.object(swift_utils, 'SwiftStorageContext') @patch.object(swift_utils, 'RsyncContext') @patch.object(swift_utils, 'SwiftStorageServerContext') - @patch('hooks.charmhelpers.contrib.openstack.templating.OSConfigRenderer') + @patch('charmhelpers.contrib.openstack.templating.OSConfigRenderer') def test_register_configs_post_install(self, renderer, swift, rsync, server): swift.return_value = 'swift_context' From 464bc4bc658fff02aea285b0f7a6d0dc1f79f31a Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 30 Sep 2013 14:29:32 +0100 Subject: [PATCH 18/20] bind to all network interfaces by default --- templates/account-server.conf | 2 +- templates/container-server.conf | 2 +- templates/object-server.conf | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/templates/account-server.conf b/templates/account-server.conf index 98fd13a..0aac9b6 100644 --- a/templates/account-server.conf +++ b/templates/account-server.conf @@ -1,5 +1,5 @@ [DEFAULT] -bind_ip = {{ local_ip }} +bind_ip = 0.0.0.0 bind_port = {{ account_server_port }} workers = 2 diff --git a/templates/container-server.conf b/templates/container-server.conf index 76da700..ba14048 100644 --- a/templates/container-server.conf +++ b/templates/container-server.conf @@ -1,5 +1,5 @@ [DEFAULT] -bind_ip = {{ local_ip }} +bind_ip = 0.0.0.0 bind_port = {{ container_server_port }} workers = 2 diff --git a/templates/object-server.conf b/templates/object-server.conf index eac69d8..237be63 100644 --- a/templates/object-server.conf +++ b/templates/object-server.conf @@ -1,5 +1,5 @@ [DEFAULT] -bind_ip = {{ local_ip }} +bind_ip = 0.0.0.0 bind_port = {{ object_server_port }} workers = 2 From cf6e8b81df754d43f828f033c55fb985f916f113 Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 11 Oct 2013 14:39:58 +0100 Subject: [PATCH 19/20] Resync with charmhelper for new swift version --- .../charmhelpers/contrib/openstack/context.py | 2 +- .../charmhelpers/contrib/openstack/neutron.py | 12 ++++---- hooks/charmhelpers/contrib/openstack/utils.py | 28 +++++++++++-------- 3 files changed, 23 insertions(+), 19 deletions(-) diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 92924e3..ccd5526 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -370,7 +370,7 @@ class NeutronContext(object): return None def _ensure_packages(self): - ensure_packages(self.packages) + [ensure_packages(pkgs) for pkgs in self.packages] def _save_flag_file(self): if self.network_manager == 'quantum': diff --git a/hooks/charmhelpers/contrib/openstack/neutron.py b/hooks/charmhelpers/contrib/openstack/neutron.py index 37b5a7b..d18d41e 100644 --- a/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/hooks/charmhelpers/contrib/openstack/neutron.py @@ -23,15 +23,15 @@ def quantum_plugins(): database=config('neutron-database'), relation_prefix='neutron')], 'services': ['quantum-plugin-openvswitch-agent'], - 'packages': ['quantum-plugin-openvswitch-agent', - 'openvswitch-datapath-dkms'], + 'packages': [['openvswitch-datapath-dkms'], + ['quantum-plugin-openvswitch-agent']], }, 'nvp': { 'config': '/etc/quantum/plugins/nicira/nvp.ini', 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.' 'QuantumPlugin.NvpPluginV2', 'services': [], - 'packages': ['quantum-plugin-nicira'], + 'packages': [], } } @@ -49,15 +49,15 @@ def neutron_plugins(): database=config('neutron-database'), relation_prefix='neutron')], 'services': ['neutron-plugin-openvswitch-agent'], - 'packages': ['neutron-plugin-openvswitch-agent', - 'openvswitch-datapath-dkms'], + 'packages': [['openvswitch-datapath-dkms'], + ['quantum-plugin-openvswitch-agent']], }, 'nvp': { 'config': '/etc/neutron/plugins/nicira/nvp.ini', 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.' 'NeutronPlugin.NvpPluginV2', 'services': [], - 'packages': ['neutron-plugin-nicira'], + 'packages': [], } } diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index 39f627d..62d207f 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -45,16 +45,17 @@ OPENSTACK_CODENAMES = OrderedDict([ ]) # The ugly duckling -SWIFT_CODENAMES = { - '1.4.3': 'diablo', - '1.4.8': 'essex', - '1.7.4': 'folsom', - '1.7.6': 'grizzly', - '1.7.7': 'grizzly', - '1.8.0': 'grizzly', - '1.9.0': 'havana', - '1.9.1': 'havana', -} +SWIFT_CODENAMES = OrderedDict([ + ('1.4.3', 'diablo'), + ('1.4.8', 'essex'), + ('1.7.4', 'folsom'), + ('1.8.0', 'grizzly'), + ('1.7.7', 'grizzly'), + ('1.7.6', 'grizzly'), + ('1.10.0', 'havana'), + ('1.9.1', 'havana'), + ('1.9.0', 'havana'), +]) def error_out(msg): @@ -137,8 +138,11 @@ def get_os_codename_package(package, fatal=True): try: if 'swift' in pkg.name: - vers = vers[:5] - return SWIFT_CODENAMES[vers] + swift_vers = vers[:5] + if swift_vers not in SWIFT_CODENAMES: + # Deal with 1.10.0 upward + swift_vers = vers[:6] + return SWIFT_CODENAMES[swift_vers] else: vers = vers[:6] return OPENSTACK_CODENAMES[vers] From 888380abaf3f920e81d867729a087b220e183012 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Mon, 14 Oct 2013 18:32:42 -0700 Subject: [PATCH 20/20] Update charm-helpers config to point to upstream repo, re-sync helpers. --- charm-helpers.yaml | 2 +- .../charmhelpers/contrib/openstack/context.py | 89 +++++++++++++++++++ .../charmhelpers/contrib/openstack/neutron.py | 17 +++- 3 files changed, 103 insertions(+), 5 deletions(-) diff --git a/charm-helpers.yaml b/charm-helpers.yaml index 868885c..a43b2bf 100644 --- a/charm-helpers.yaml +++ b/charm-helpers.yaml @@ -1,4 +1,4 @@ -branch: lp:~openstack-charmers/charm-helpers/to_upstream +branch: lp:charm-helpers destination: hooks/charmhelpers include: - core diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index ccd5526..13fdd65 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -1,3 +1,4 @@ +import json import os from base64 import b64decode @@ -21,6 +22,7 @@ from charmhelpers.core.hookenv import ( related_units, unit_get, unit_private_ip, + ERROR, WARNING, ) @@ -431,3 +433,90 @@ class OSConfigFlagContext(OSContextGenerator): flags[k.strip()] = v ctxt = {'user_config_flags': flags} return ctxt + + +class SubordinateConfigContext(OSContextGenerator): + """ + Responsible for inspecting relations to subordinates that + may be exporting required config via a json blob. + + The subordinate interface allows subordinates to export their + configuration requirements to the principle for multiple config + files and multiple serivces. Ie, a subordinate that has interfaces + to both glance and nova may export to following yaml blob as json: + + glance: + /etc/glance/glance-api.conf: + sections: + DEFAULT: + - [key1, value1] + /etc/glance/glance-registry.conf: + MYSECTION: + - [key2, value2] + nova: + /etc/nova/nova.conf: + sections: + DEFAULT: + - [key3, value3] + + + It is then up to the principle charms to subscribe this context to + the service+config file it is interestd in. Configuration data will + be available in the template context, in glance's case, as: + ctxt = { + ... other context ... + 'subordinate_config': { + 'DEFAULT': { + 'key1': 'value1', + }, + 'MYSECTION': { + 'key2': 'value2', + }, + } + } + + """ + def __init__(self, service, config_file, interface): + """ + :param service : Service name key to query in any subordinate + data found + :param config_file : Service's config file to query sections + :param interface : Subordinate interface to inspect + """ + self.service = service + self.config_file = config_file + self.interface = interface + + def __call__(self): + ctxt = {} + for rid in relation_ids(self.interface): + for unit in related_units(rid): + sub_config = relation_get('subordinate_configuration', + rid=rid, unit=unit) + if sub_config and sub_config != '': + try: + sub_config = json.loads(sub_config) + except: + log('Could not parse JSON from subordinate_config ' + 'setting from %s' % rid, level=ERROR) + continue + + if self.service not in sub_config: + log('Found subordinate_config on %s but it contained' + 'nothing for %s service' % (rid, self.service)) + continue + + sub_config = sub_config[self.service] + if self.config_file not in sub_config: + log('Found subordinate_config on %s but it contained' + 'nothing for %s' % (rid, self.config_file)) + continue + + sub_config = sub_config[self.config_file] + for k, v in sub_config.iteritems(): + ctxt[k] = v + + if not ctxt: + ctxt['sections'] = {} + + return ctxt diff --git a/hooks/charmhelpers/contrib/openstack/neutron.py b/hooks/charmhelpers/contrib/openstack/neutron.py index d18d41e..a27ce95 100644 --- a/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/hooks/charmhelpers/contrib/openstack/neutron.py @@ -1,5 +1,7 @@ # Various utilies for dealing with Neutron and the renaming from Quantum. +from subprocess import check_output + from charmhelpers.core.hookenv import ( config, log, @@ -9,6 +11,13 @@ from charmhelpers.core.hookenv import ( from charmhelpers.contrib.openstack.utils import os_release +def headers_package(): + """Ensures correct linux-headers for running kernel are installed, + for building DKMS package""" + kver = check_output(['uname', '-r']).strip() + return 'linux-headers-%s' % kver + + # legacy def quantum_plugins(): from charmhelpers.contrib.openstack import context @@ -23,7 +32,7 @@ def quantum_plugins(): database=config('neutron-database'), relation_prefix='neutron')], 'services': ['quantum-plugin-openvswitch-agent'], - 'packages': [['openvswitch-datapath-dkms'], + 'packages': [[headers_package(), 'openvswitch-datapath-dkms'], ['quantum-plugin-openvswitch-agent']], }, 'nvp': { @@ -49,7 +58,7 @@ def neutron_plugins(): database=config('neutron-database'), relation_prefix='neutron')], 'services': ['neutron-plugin-openvswitch-agent'], - 'packages': [['openvswitch-datapath-dkms'], + 'packages': [[headers_package(), 'openvswitch-datapath-dkms'], ['quantum-plugin-openvswitch-agent']], }, 'nvp': { @@ -76,7 +85,7 @@ def neutron_plugin_attribute(plugin, attr, net_manager=None): _plugin = plugins[plugin] except KeyError: log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR) - raise + raise Exception try: return _plugin[attr] @@ -99,7 +108,7 @@ def network_manager(): if release in ['essex']: # E does not support neutron log('Neutron networking not supported in Essex.', level=ERROR) - raise + raise Exception elif release in ['folsom', 'grizzly']: # neutron is named quantum in F and G return 'quantum'