From 9e3898ffd0458f8949feca3951d15ef516b22ddc Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Wed, 10 Dec 2014 15:45:40 +0000 Subject: [PATCH 1/4] Add contrib.python.packages to charm-helpers sync. --- charm-helpers.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/charm-helpers.yaml b/charm-helpers.yaml index 7567bd0..ddc4939 100644 --- a/charm-helpers.yaml +++ b/charm-helpers.yaml @@ -7,3 +7,4 @@ include: - contrib.hahelpers - contrib.storage.linux - contrib.network.ip + - contrib.python.packages From 50b3d609b540e1debfcac31c6c75ac7cf1011740 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Wed, 10 Dec 2014 20:28:40 +0000 Subject: [PATCH 2/4] Sync charm-helpers. --- hooks/charmhelpers/__init__.pyc | Bin 0 -> 117 bytes .../charmhelpers/contrib/hahelpers/apache.py | 13 +- .../charmhelpers/contrib/hahelpers/cluster.py | 26 +- hooks/charmhelpers/contrib/network/ip.py | 241 ++++++- .../contrib/openstack/amulet/deployment.py | 49 +- .../contrib/openstack/amulet/utils.py | 13 +- .../charmhelpers/contrib/openstack/context.py | 652 ++++++++++++------ hooks/charmhelpers/contrib/openstack/ip.py | 68 +- .../charmhelpers/contrib/openstack/neutron.py | 24 +- .../contrib/openstack/templating.py | 10 +- hooks/charmhelpers/contrib/openstack/utils.py | 185 ++++- hooks/charmhelpers/contrib/python/__init__.py | 0 hooks/charmhelpers/contrib/python/packages.py | 77 +++ .../contrib/storage/linux/ceph.py | 187 +++-- .../contrib/storage/linux/loopback.py | 8 +- .../charmhelpers/contrib/storage/linux/lvm.py | 1 + .../contrib/storage/linux/utils.py | 5 +- hooks/charmhelpers/core/fstab.py | 18 +- hooks/charmhelpers/core/hookenv.py | 94 ++- hooks/charmhelpers/core/host.py | 106 ++- hooks/charmhelpers/core/services/__init__.py | 4 +- hooks/charmhelpers/core/services/base.py | 3 + hooks/charmhelpers/core/services/helpers.py | 130 +++- hooks/charmhelpers/core/sysctl.py | 34 + hooks/charmhelpers/core/templating.py | 3 +- hooks/charmhelpers/fetch/__init__.py | 54 +- hooks/charmhelpers/fetch/archiveurl.py | 116 +++- hooks/charmhelpers/fetch/bzrurl.py | 6 +- hooks/charmhelpers/fetch/giturl.py | 51 ++ 29 files changed, 1651 insertions(+), 527 deletions(-) create mode 100644 hooks/charmhelpers/__init__.pyc create mode 100644 hooks/charmhelpers/contrib/python/__init__.py create mode 100644 hooks/charmhelpers/contrib/python/packages.py create mode 100644 hooks/charmhelpers/core/sysctl.py create mode 100644 hooks/charmhelpers/fetch/giturl.py diff --git a/hooks/charmhelpers/__init__.pyc b/hooks/charmhelpers/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf0369a1c6f6cd8c4b01af51f1d7233fc94d1a93 GIT binary patch literal 117 zcmZSn%*(}^)De=*00oRd+5w1*S%5?e14FO|NW@PANHCxg#d1KgjQsrUV*TWd#G>4c o)SQCUqGJ8{_{_Y_lK6PNg31yOpc0$h{FKt1R6CGC#X!se01}B6f&c&j literal 0 HcmV?d00001 diff --git a/hooks/charmhelpers/contrib/hahelpers/apache.py b/hooks/charmhelpers/contrib/hahelpers/apache.py index 8d5fb8b..6616fff 100644 --- a/hooks/charmhelpers/contrib/hahelpers/apache.py +++ b/hooks/charmhelpers/contrib/hahelpers/apache.py @@ -20,20 +20,27 @@ from charmhelpers.core.hookenv import ( ) -def get_cert(): +def get_cert(cn=None): + # TODO: deal with multiple https endpoints via charm config cert = config_get('ssl_cert') key = config_get('ssl_key') if not (cert and key): log("Inspecting identity-service relations for SSL certificate.", level=INFO) cert = key = None + if cn: + ssl_cert_attr = 'ssl_cert_{}'.format(cn) + ssl_key_attr = 'ssl_key_{}'.format(cn) + else: + ssl_cert_attr = 'ssl_cert' + ssl_key_attr = 'ssl_key' for r_id in relation_ids('identity-service'): for unit in relation_list(r_id): if not cert: - cert = relation_get('ssl_cert', + cert = relation_get(ssl_cert_attr, rid=r_id, unit=unit) if not key: - key = relation_get('ssl_key', + key = relation_get(ssl_key_attr, rid=r_id, unit=unit) return (cert, key) diff --git a/hooks/charmhelpers/contrib/hahelpers/cluster.py b/hooks/charmhelpers/contrib/hahelpers/cluster.py index 7151b1d..52ce4b7 100644 --- a/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -13,9 +13,10 @@ clustering-related helpers. import subprocess import os - from socket import gethostname as get_unit_hostname +import six + from charmhelpers.core.hookenv import ( log, relation_ids, @@ -77,7 +78,7 @@ def is_crm_leader(resource): "show", resource ] try: - status = subprocess.check_output(cmd) + status = subprocess.check_output(cmd).decode('UTF-8') except subprocess.CalledProcessError: return False else: @@ -139,10 +140,9 @@ def https(): return True for r_id in relation_ids('identity-service'): for unit in relation_list(r_id): + # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN rel_state = [ relation_get('https_keystone', rid=r_id, unit=unit), - relation_get('ssl_cert', rid=r_id, unit=unit), - relation_get('ssl_key', rid=r_id, unit=unit), relation_get('ca_cert', rid=r_id, unit=unit), ] # NOTE: works around (LP: #1203241) @@ -151,34 +151,42 @@ def https(): return False -def determine_api_port(public_port): +def determine_api_port(public_port, singlenode_mode=False): ''' Determine correct API server listening port based on existence of HTTPS reverse proxy and/or haproxy. public_port: int: standard public port for given service + singlenode_mode: boolean: Shuffle ports when only a single unit is present + returns: int: the correct listening port for the API service ''' i = 0 - if len(peer_units()) > 0 or is_clustered(): + if singlenode_mode: + i += 1 + elif len(peer_units()) > 0 or is_clustered(): i += 1 if https(): i += 1 return public_port - (i * 10) -def determine_apache_port(public_port): +def determine_apache_port(public_port, singlenode_mode=False): ''' Description: Determine correct apache listening port based on public IP + state of the cluster. public_port: int: standard public port for given service + singlenode_mode: boolean: Shuffle ports when only a single unit is present + returns: int: the correct listening port for the HAProxy service ''' i = 0 - if len(peer_units()) > 0 or is_clustered(): + if singlenode_mode: + i += 1 + elif len(peer_units()) > 0 or is_clustered(): i += 1 return public_port - (i * 10) @@ -198,7 +206,7 @@ def get_hacluster_config(): for setting in settings: conf[setting] = config_get(setting) missing = [] - [missing.append(s) for s, v in conf.iteritems() if v is None] + [missing.append(s) for s, v in six.iteritems(conf) if v is None] if missing: log('Insufficient config data to configure hacluster.', level=ERROR) raise HAIncompleteConfig diff --git a/hooks/charmhelpers/contrib/network/ip.py b/hooks/charmhelpers/contrib/network/ip.py index 7edbcc4..8dc8316 100644 --- a/hooks/charmhelpers/contrib/network/ip.py +++ b/hooks/charmhelpers/contrib/network/ip.py @@ -1,10 +1,13 @@ -import sys +import glob +import re +import subprocess from functools import partial +from charmhelpers.core.hookenv import unit_get from charmhelpers.fetch import apt_install from charmhelpers.core.hookenv import ( - ERROR, log, config, + log ) try: @@ -28,29 +31,28 @@ def _validate_cidr(network): network) +def no_ip_found_error_out(network): + errmsg = ("No IP address found in network: %s" % network) + raise ValueError(errmsg) + + def get_address_in_network(network, fallback=None, fatal=False): - """ - Get an IPv4 or IPv6 address within the network from the host. + """Get an IPv4 or IPv6 address within the network from the host. :param network (str): CIDR presentation format. For example, '192.168.1.0/24'. :param fallback (str): If no address is found, return fallback. :param fatal (boolean): If no address is found, fallback is not set and fatal is True then exit(1). - """ - - def not_found_error_out(): - log("No IP address found in network: %s" % network, - level=ERROR) - sys.exit(1) - if network is None: if fallback is not None: return fallback + + if fatal: + no_ip_found_error_out(network) else: - if fatal: - not_found_error_out() + return None _validate_cidr(network) network = netaddr.IPNetwork(network) @@ -62,6 +64,7 @@ def get_address_in_network(network, fallback=None, fatal=False): cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) if cidr in network: return str(cidr.ip) + if network.version == 6 and netifaces.AF_INET6 in addresses: for addr in addresses[netifaces.AF_INET6]: if not addr['addr'].startswith('fe80'): @@ -74,20 +77,20 @@ def get_address_in_network(network, fallback=None, fatal=False): return fallback if fatal: - not_found_error_out() + no_ip_found_error_out(network) return None def is_ipv6(address): - '''Determine whether provided address is IPv6 or not''' + """Determine whether provided address is IPv6 or not.""" try: address = netaddr.IPAddress(address) except netaddr.AddrFormatError: # probably a hostname - so not an address at all! return False - else: - return address.version == 6 + + return address.version == 6 def is_address_in_network(network, address): @@ -105,11 +108,13 @@ def is_address_in_network(network, address): except (netaddr.core.AddrFormatError, ValueError): raise ValueError("Network (%s) is not in CIDR presentation format" % network) + try: address = netaddr.IPAddress(address) except (netaddr.core.AddrFormatError, ValueError): raise ValueError("Address (%s) is not in correct presentation format" % address) + if address in network: return True else: @@ -132,43 +137,215 @@ def _get_for_address(address, key): if address.version == 4 and netifaces.AF_INET in addresses: addr = addresses[netifaces.AF_INET][0]['addr'] netmask = addresses[netifaces.AF_INET][0]['netmask'] - cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) + network = netaddr.IPNetwork("%s/%s" % (addr, netmask)) + cidr = network.cidr if address in cidr: if key == 'iface': return iface else: return addresses[netifaces.AF_INET][0][key] + if address.version == 6 and netifaces.AF_INET6 in addresses: for addr in addresses[netifaces.AF_INET6]: if not addr['addr'].startswith('fe80'): - cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], - addr['netmask'])) + network = netaddr.IPNetwork("%s/%s" % (addr['addr'], + addr['netmask'])) + cidr = network.cidr if address in cidr: if key == 'iface': return iface + elif key == 'netmask' and cidr: + return str(cidr).split('/')[1] else: return addr[key] + return None get_iface_for_address = partial(_get_for_address, key='iface') + get_netmask_for_address = partial(_get_for_address, key='netmask') -def get_ipv6_addr(iface="eth0"): +def format_ipv6_addr(address): + """If address is IPv6, wrap it in '[]' otherwise return None. + + This is required by most configuration files when specifying IPv6 + addresses. + """ + if is_ipv6(address): + return "[%s]" % address + + return None + + +def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, + fatal=True, exc_list=None): + """Return the assigned IP address for a given interface, if any.""" + # Extract nic if passed /dev/ethX + if '/' in iface: + iface = iface.split('/')[-1] + + if not exc_list: + exc_list = [] + try: - iface_addrs = netifaces.ifaddresses(iface) - if netifaces.AF_INET6 not in iface_addrs: - raise Exception("Interface '%s' doesn't have an ipv6 address." % iface) + inet_num = getattr(netifaces, inet_type) + except AttributeError: + raise Exception("Unknown inet type '%s'" % str(inet_type)) - addresses = netifaces.ifaddresses(iface)[netifaces.AF_INET6] - ipv6_addr = [a['addr'] for a in addresses if not a['addr'].startswith('fe80') - and config('vip') != a['addr']] - if not ipv6_addr: - raise Exception("Interface '%s' doesn't have global ipv6 address." % iface) + interfaces = netifaces.interfaces() + if inc_aliases: + ifaces = [] + for _iface in interfaces: + if iface == _iface or _iface.split(':')[0] == iface: + ifaces.append(_iface) - return ipv6_addr[0] + if fatal and not ifaces: + raise Exception("Invalid interface '%s'" % iface) - except ValueError: - raise ValueError("Invalid interface '%s'" % iface) + ifaces.sort() + else: + if iface not in interfaces: + if fatal: + raise Exception("Interface '%s' not found " % (iface)) + else: + return [] + + else: + ifaces = [iface] + + addresses = [] + for netiface in ifaces: + net_info = netifaces.ifaddresses(netiface) + if inet_num in net_info: + for entry in net_info[inet_num]: + if 'addr' in entry and entry['addr'] not in exc_list: + addresses.append(entry['addr']) + + if fatal and not addresses: + raise Exception("Interface '%s' doesn't have any %s addresses." % + (iface, inet_type)) + + return sorted(addresses) + + +get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET') + + +def get_iface_from_addr(addr): + """Work out on which interface the provided address is configured.""" + for iface in netifaces.interfaces(): + addresses = netifaces.ifaddresses(iface) + for inet_type in addresses: + for _addr in addresses[inet_type]: + _addr = _addr['addr'] + # link local + ll_key = re.compile("(.+)%.*") + raw = re.match(ll_key, _addr) + if raw: + _addr = raw.group(1) + + if _addr == addr: + log("Address '%s' is configured on iface '%s'" % + (addr, iface)) + return iface + + msg = "Unable to infer net iface on which '%s' is configured" % (addr) + raise Exception(msg) + + +def sniff_iface(f): + """Ensure decorated function is called with a value for iface. + + If no iface provided, inject net iface inferred from unit private address. + """ + def iface_sniffer(*args, **kwargs): + if not kwargs.get('iface', None): + kwargs['iface'] = get_iface_from_addr(unit_get('private-address')) + + return f(*args, **kwargs) + + return iface_sniffer + + +@sniff_iface +def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None, + dynamic_only=True): + """Get assigned IPv6 address for a given interface. + + Returns list of addresses found. If no address found, returns empty list. + + If iface is None, we infer the current primary interface by doing a reverse + lookup on the unit private-address. + + We currently only support scope global IPv6 addresses i.e. non-temporary + addresses. If no global IPv6 address is found, return the first one found + in the ipv6 address list. + """ + addresses = get_iface_addr(iface=iface, inet_type='AF_INET6', + inc_aliases=inc_aliases, fatal=fatal, + exc_list=exc_list) + + if addresses: + global_addrs = [] + for addr in addresses: + key_scope_link_local = re.compile("^fe80::..(.+)%(.+)") + m = re.match(key_scope_link_local, addr) + if m: + eui_64_mac = m.group(1) + iface = m.group(2) + else: + global_addrs.append(addr) + + if global_addrs: + # Make sure any found global addresses are not temporary + cmd = ['ip', 'addr', 'show', iface] + out = subprocess.check_output(cmd).decode('UTF-8') + if dynamic_only: + key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*") + else: + key = re.compile("inet6 (.+)/[0-9]+ scope global.*") + + addrs = [] + for line in out.split('\n'): + line = line.strip() + m = re.match(key, line) + if m and 'temporary' not in line: + # Return the first valid address we find + for addr in global_addrs: + if m.group(1) == addr: + if not dynamic_only or \ + m.group(1).endswith(eui_64_mac): + addrs.append(addr) + + if addrs: + return addrs + + if fatal: + raise Exception("Interface '%s' does not have a scope global " + "non-temporary ipv6 address." % iface) + + return [] + + +def get_bridges(vnic_dir='/sys/devices/virtual/net'): + """Return a list of bridges on the system.""" + b_regex = "%s/*/bridge" % vnic_dir + return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)] + + +def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'): + """Return a list of nics comprising a given bridge on the system.""" + brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge) + return [x.split('/')[-1] for x in glob.glob(brif_regex)] + + +def is_bridge_member(nic): + """Check if a given nic is a member of a bridge.""" + for bridge in get_bridges(): + if nic in get_bridge_nics(bridge): + return True + + return False diff --git a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 9179eeb..f3fee07 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -1,3 +1,4 @@ +import six from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment ) @@ -10,36 +11,66 @@ class OpenStackAmuletDeployment(AmuletDeployment): that is specifically for use by OpenStack charms. """ - def __init__(self, series=None, openstack=None, source=None): + def __init__(self, series=None, openstack=None, source=None, stable=True): """Initialize the deployment environment.""" super(OpenStackAmuletDeployment, self).__init__(series) self.openstack = openstack self.source = source + self.stable = stable + # Note(coreycb): this needs to be changed when new next branches come + # out. + self.current_next = "trusty" + + def _determine_branch_locations(self, other_services): + """Determine the branch locations for the other services. + + Determine if the local branch being tested is derived from its + stable or next (dev) branch, and based on this, use the corresonding + stable or next branches for the other_services.""" + base_charms = ['mysql', 'mongodb', 'rabbitmq-server'] + + if self.stable: + for svc in other_services: + temp = 'lp:charms/{}' + svc['location'] = temp.format(svc['name']) + else: + for svc in other_services: + if svc['name'] in base_charms: + temp = 'lp:charms/{}' + svc['location'] = temp.format(svc['name']) + else: + temp = 'lp:~openstack-charmers/charms/{}/{}/next' + svc['location'] = temp.format(self.current_next, + svc['name']) + return other_services def _add_services(self, this_service, other_services): - """Add services to the deployment and set openstack-origin.""" + """Add services to the deployment and set openstack-origin/source.""" + other_services = self._determine_branch_locations(other_services) + super(OpenStackAmuletDeployment, self)._add_services(this_service, other_services) - name = 0 + services = other_services services.append(this_service) - use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph'] + use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', + 'ceph-osd', 'ceph-radosgw'] if self.openstack: for svc in services: - if svc[name] not in use_source: + if svc['name'] not in use_source: config = {'openstack-origin': self.openstack} - self.d.configure(svc[name], config) + self.d.configure(svc['name'], config) if self.source: for svc in services: - if svc[name] in use_source: + if svc['name'] in use_source: config = {'source': self.source} - self.d.configure(svc[name], config) + self.d.configure(svc['name'], config) def _configure_services(self, configs): """Configure all of the services.""" - for service, config in configs.iteritems(): + for service, config in six.iteritems(configs): self.d.configure(service, config) def _get_openstack_release(self): diff --git a/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/hooks/charmhelpers/contrib/openstack/amulet/utils.py index bd327bd..3e0cc61 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -7,6 +7,8 @@ import glanceclient.v1.client as glance_client import keystoneclient.v2_0 as keystone_client import novaclient.v1_1.client as nova_client +import six + from charmhelpers.contrib.amulet.utils import ( AmuletUtils ) @@ -60,7 +62,7 @@ class OpenStackAmuletUtils(AmuletUtils): expected service catalog endpoints. """ self.log.debug('actual: {}'.format(repr(actual))) - for k, v in expected.iteritems(): + for k, v in six.iteritems(expected): if k in actual: ret = self._validate_dict_data(expected[k][0], actual[k][0]) if ret: @@ -187,15 +189,16 @@ class OpenStackAmuletUtils(AmuletUtils): f = opener.open("http://download.cirros-cloud.net/version/released") version = f.read().strip() - cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version) + cirros_img = "cirros-{}-x86_64-disk.img".format(version) + local_path = os.path.join('tests', cirros_img) - if not os.path.exists(cirros_img): + if not os.path.exists(local_path): cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", version, cirros_img) - opener.retrieve(cirros_url, cirros_img) + opener.retrieve(cirros_url, local_path) f.close() - with open(cirros_img) as f: + with open(local_path) as f: image = glance.images.create(name=image_name, is_public=True, disk_format='qcow2', container_format='bare', data=f) diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index d41b74a..eebe8c0 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -1,21 +1,18 @@ import json import os import time - from base64 import b64decode +from subprocess import check_call -from subprocess import ( - check_call -) - +import six from charmhelpers.fetch import ( apt_install, filter_installed_packages, ) - from charmhelpers.core.hookenv import ( config, + is_relation_made, local_unit, log, relation_get, @@ -24,32 +21,40 @@ from charmhelpers.core.hookenv import ( relation_set, unit_get, unit_private_ip, + DEBUG, + INFO, + WARNING, ERROR, - INFO ) - +from charmhelpers.core.host import ( + mkdir, + write_file, +) from charmhelpers.contrib.hahelpers.cluster import ( determine_apache_port, determine_api_port, https, - is_clustered + is_clustered, ) - from charmhelpers.contrib.hahelpers.apache import ( get_cert, get_ca_cert, + install_ca_cert, ) - from charmhelpers.contrib.openstack.neutron import ( neutron_plugin_attribute, ) - from charmhelpers.contrib.network.ip import ( get_address_in_network, get_ipv6_addr, + get_netmask_for_address, + format_ipv6_addr, + is_address_in_network, ) +from charmhelpers.contrib.openstack.utils import get_host_ip CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' +ADDRESS_TYPES = ['admin', 'internal', 'public'] class OSContextError(Exception): @@ -57,7 +62,7 @@ class OSContextError(Exception): def ensure_packages(packages): - '''Install but do not upgrade required plugin packages''' + """Install but do not upgrade required plugin packages.""" required = filter_installed_packages(packages) if required: apt_install(required, fatal=True) @@ -65,20 +70,27 @@ def ensure_packages(packages): def context_complete(ctxt): _missing = [] - for k, v in ctxt.iteritems(): + for k, v in six.iteritems(ctxt): if v is None or v == '': _missing.append(k) + if _missing: - log('Missing required data: %s' % ' '.join(_missing), level='INFO') + log('Missing required data: %s' % ' '.join(_missing), level=INFO) return False + return True def config_flags_parser(config_flags): + """Parses config flags string into dict. + + The provided config_flags string may be a list of comma-separated values + which themselves may be comma-separated list of values. + """ if config_flags.find('==') >= 0: - log("config_flags is not in expected format (key=value)", - level=ERROR) + log("config_flags is not in expected format (key=value)", level=ERROR) raise OSContextError + # strip the following from each value. post_strippers = ' ,' # we strip any leading/trailing '=' or ' ' from the string then @@ -86,7 +98,7 @@ def config_flags_parser(config_flags): split = config_flags.strip(' =').split('=') limit = len(split) flags = {} - for i in xrange(0, limit - 1): + for i in range(0, limit - 1): current = split[i] next = split[i + 1] vindex = next.rfind(',') @@ -101,17 +113,18 @@ def config_flags_parser(config_flags): # if this not the first entry, expect an embedded key. index = current.rfind(',') if index < 0: - log("invalid config value(s) at index %s" % (i), - level=ERROR) + log("Invalid config value(s) at index %s" % (i), level=ERROR) raise OSContextError key = current[index + 1:] # Add to collection. flags[key.strip(post_strippers)] = value.rstrip(post_strippers) + return flags class OSContextGenerator(object): + """Base class for all context generators.""" interfaces = [] def __call__(self): @@ -123,11 +136,11 @@ class SharedDBContext(OSContextGenerator): def __init__(self, database=None, user=None, relation_prefix=None, ssl_dir=None): - ''' - Allows inspecting relation for settings prefixed with relation_prefix. - This is useful for parsing access for multiple databases returned via - the shared-db interface (eg, nova_password, quantum_password) - ''' + """Allows inspecting relation for settings prefixed with + relation_prefix. This is useful for parsing access for multiple + databases returned via the shared-db interface (eg, nova_password, + quantum_password) + """ self.relation_prefix = relation_prefix self.database = database self.user = user @@ -137,9 +150,8 @@ class SharedDBContext(OSContextGenerator): self.database = self.database or config('database') self.user = self.user or config('database-user') if None in [self.database, self.user]: - log('Could not generate shared_db context. ' - 'Missing required charm config options. ' - '(database name and user)') + log("Could not generate shared_db context. Missing required charm " + "config options. (database name and user)", level=ERROR) raise OSContextError ctxt = {} @@ -168,8 +180,10 @@ class SharedDBContext(OSContextGenerator): for rid in relation_ids('shared-db'): for unit in related_units(rid): rdata = relation_get(rid=rid, unit=unit) + host = rdata.get('db_host') + host = format_ipv6_addr(host) or host ctxt = { - 'database_host': rdata.get('db_host'), + 'database_host': host, 'database': self.database, 'database_user': self.user, 'database_password': rdata.get(password_setting), @@ -190,23 +204,24 @@ class PostgresqlDBContext(OSContextGenerator): def __call__(self): self.database = self.database or config('database') if self.database is None: - log('Could not generate postgresql_db context. ' - 'Missing required charm config options. ' - '(database name)') + log('Could not generate postgresql_db context. Missing required ' + 'charm config options. (database name)', level=ERROR) raise OSContextError - ctxt = {} + ctxt = {} for rid in relation_ids(self.interfaces[0]): for unit in related_units(rid): - ctxt = { - 'database_host': relation_get('host', rid=rid, unit=unit), - 'database': self.database, - 'database_user': relation_get('user', rid=rid, unit=unit), - 'database_password': relation_get('password', rid=rid, unit=unit), - 'database_type': 'postgresql', - } + rel_host = relation_get('host', rid=rid, unit=unit) + rel_user = relation_get('user', rid=rid, unit=unit) + rel_passwd = relation_get('password', rid=rid, unit=unit) + ctxt = {'database_host': rel_host, + 'database': self.database, + 'database_user': rel_user, + 'database_password': rel_passwd, + 'database_type': 'postgresql'} if context_complete(ctxt): return ctxt + return {} @@ -215,23 +230,29 @@ def db_ssl(rdata, ctxt, ssl_dir): ca_path = os.path.join(ssl_dir, 'db-client.ca') with open(ca_path, 'w') as fh: fh.write(b64decode(rdata['ssl_ca'])) + ctxt['database_ssl_ca'] = ca_path elif 'ssl_ca' in rdata: - log("Charm not setup for ssl support but ssl ca found") + log("Charm not setup for ssl support but ssl ca found", level=INFO) return ctxt + if 'ssl_cert' in rdata: cert_path = os.path.join( ssl_dir, 'db-client.cert') if not os.path.exists(cert_path): - log("Waiting 1m for ssl client cert validity") + log("Waiting 1m for ssl client cert validity", level=INFO) time.sleep(60) + with open(cert_path, 'w') as fh: fh.write(b64decode(rdata['ssl_cert'])) + ctxt['database_ssl_cert'] = cert_path key_path = os.path.join(ssl_dir, 'db-client.key') with open(key_path, 'w') as fh: fh.write(b64decode(rdata['ssl_key'])) + ctxt['database_ssl_key'] = key_path + return ctxt @@ -239,31 +260,33 @@ class IdentityServiceContext(OSContextGenerator): interfaces = ['identity-service'] def __call__(self): - log('Generating template context for identity-service') + log('Generating template context for identity-service', level=DEBUG) ctxt = {} - for rid in relation_ids('identity-service'): for unit in related_units(rid): rdata = relation_get(rid=rid, unit=unit) - ctxt = { - 'service_port': rdata.get('service_port'), - 'service_host': rdata.get('service_host'), - 'auth_host': rdata.get('auth_host'), - 'auth_port': rdata.get('auth_port'), - 'admin_tenant_name': rdata.get('service_tenant'), - 'admin_user': rdata.get('service_username'), - 'admin_password': rdata.get('service_password'), - 'service_protocol': - rdata.get('service_protocol') or 'http', - 'auth_protocol': - rdata.get('auth_protocol') or 'http', - } + serv_host = rdata.get('service_host') + serv_host = format_ipv6_addr(serv_host) or serv_host + auth_host = rdata.get('auth_host') + auth_host = format_ipv6_addr(auth_host) or auth_host + svc_protocol = rdata.get('service_protocol') or 'http' + auth_protocol = rdata.get('auth_protocol') or 'http' + ctxt = {'service_port': rdata.get('service_port'), + 'service_host': serv_host, + 'auth_host': auth_host, + 'auth_port': rdata.get('auth_port'), + 'admin_tenant_name': rdata.get('service_tenant'), + 'admin_user': rdata.get('service_username'), + 'admin_password': rdata.get('service_password'), + 'service_protocol': svc_protocol, + 'auth_protocol': auth_protocol} if context_complete(ctxt): # NOTE(jamespage) this is required for >= icehouse # so a missing value just indicates keystone needs # upgrading ctxt['admin_tenant_id'] = rdata.get('service_tenant_id') return ctxt + return {} @@ -276,32 +299,37 @@ class AMQPContext(OSContextGenerator): self.interfaces = [rel_name] def __call__(self): - log('Generating template context for amqp') + log('Generating template context for amqp', level=DEBUG) conf = config() - user_setting = 'rabbit-user' - vhost_setting = 'rabbit-vhost' if self.relation_prefix: - user_setting = self.relation_prefix + '-rabbit-user' - vhost_setting = self.relation_prefix + '-rabbit-vhost' + user_setting = '%s-rabbit-user' % (self.relation_prefix) + vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix) + else: + user_setting = 'rabbit-user' + vhost_setting = 'rabbit-vhost' try: username = conf[user_setting] vhost = conf[vhost_setting] except KeyError as e: - log('Could not generate shared_db context. ' - 'Missing required charm config options: %s.' % e) + log('Could not generate shared_db context. Missing required charm ' + 'config options: %s.' % e, level=ERROR) raise OSContextError + ctxt = {} for rid in relation_ids(self.rel_name): ha_vip_only = False for unit in related_units(rid): if relation_get('clustered', rid=rid, unit=unit): ctxt['clustered'] = True - ctxt['rabbitmq_host'] = relation_get('vip', rid=rid, - unit=unit) + vip = relation_get('vip', rid=rid, unit=unit) + vip = format_ipv6_addr(vip) or vip + ctxt['rabbitmq_host'] = vip else: - ctxt['rabbitmq_host'] = relation_get('private-address', - rid=rid, unit=unit) + host = relation_get('private-address', rid=rid, unit=unit) + host = format_ipv6_addr(host) or host + ctxt['rabbitmq_host'] = host + ctxt.update({ 'rabbitmq_user': username, 'rabbitmq_password': relation_get('password', rid=rid, @@ -312,6 +340,7 @@ class AMQPContext(OSContextGenerator): ssl_port = relation_get('ssl_port', rid=rid, unit=unit) if ssl_port: ctxt['rabbit_ssl_port'] = ssl_port + ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit) if ssl_ca: ctxt['rabbit_ssl_ca'] = ssl_ca @@ -325,40 +354,45 @@ class AMQPContext(OSContextGenerator): if context_complete(ctxt): if 'rabbit_ssl_ca' in ctxt: if not self.ssl_dir: - log(("Charm not setup for ssl support " - "but ssl ca found")) + log("Charm not setup for ssl support but ssl ca " + "found", level=INFO) break + ca_path = os.path.join( self.ssl_dir, 'rabbit-client-ca.pem') with open(ca_path, 'w') as fh: fh.write(b64decode(ctxt['rabbit_ssl_ca'])) ctxt['rabbit_ssl_ca'] = ca_path + # Sufficient information found = break out! break + # Used for active/active rabbitmq >= grizzly - if ('clustered' not in ctxt or ha_vip_only) \ - and len(related_units(rid)) > 1: + if (('clustered' not in ctxt or ha_vip_only) and + len(related_units(rid)) > 1): rabbitmq_hosts = [] for unit in related_units(rid): - rabbitmq_hosts.append(relation_get('private-address', - rid=rid, unit=unit)) - ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts) + host = relation_get('private-address', rid=rid, unit=unit) + host = format_ipv6_addr(host) or host + rabbitmq_hosts.append(host) + + ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts)) + if not context_complete(ctxt): return {} - else: - return ctxt + + return ctxt class CephContext(OSContextGenerator): + """Generates context for /etc/ceph/ceph.conf templates.""" interfaces = ['ceph'] def __call__(self): - '''This generates context for /etc/ceph/ceph.conf templates''' if not relation_ids('ceph'): return {} - log('Generating template context for ceph') - + log('Generating template context for ceph', level=DEBUG) mon_hosts = [] auth = None key = None @@ -367,17 +401,18 @@ class CephContext(OSContextGenerator): for unit in related_units(rid): auth = relation_get('auth', rid=rid, unit=unit) key = relation_get('key', rid=rid, unit=unit) - ceph_addr = \ - relation_get('ceph-public-address', rid=rid, unit=unit) or \ - relation_get('private-address', rid=rid, unit=unit) + ceph_pub_addr = relation_get('ceph-public-address', rid=rid, + unit=unit) + unit_priv_addr = relation_get('private-address', rid=rid, + unit=unit) + ceph_addr = ceph_pub_addr or unit_priv_addr + ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr mon_hosts.append(ceph_addr) - ctxt = { - 'mon_hosts': ' '.join(mon_hosts), - 'auth': auth, - 'key': key, - 'use_syslog': use_syslog - } + ctxt = {'mon_hosts': ' '.join(sorted(mon_hosts)), + 'auth': auth, + 'key': key, + 'use_syslog': use_syslog} if not os.path.isdir('/etc/ceph'): os.mkdir('/etc/ceph') @@ -386,40 +421,70 @@ class CephContext(OSContextGenerator): return {} ensure_packages(['ceph-common']) - return ctxt class HAProxyContext(OSContextGenerator): + """Provides half a context for the haproxy template, which describes + all peers to be included in the cluster. Each charm needs to include + its own context generator that describes the port mapping. + """ interfaces = ['cluster'] + def __init__(self, singlenode_mode=False): + self.singlenode_mode = singlenode_mode + def __call__(self): - ''' - Builds half a context for the haproxy template, which describes - all peers to be included in the cluster. Each charm needs to include - its own context generator that describes the port mapping. - ''' - if not relation_ids('cluster'): + if not relation_ids('cluster') and not self.singlenode_mode: return {} - cluster_hosts = {} - l_unit = local_unit().replace('/', '-') if config('prefer-ipv6'): - addr = get_ipv6_addr() + addr = get_ipv6_addr(exc_list=[config('vip')])[0] else: - addr = unit_get('private-address') - cluster_hosts[l_unit] = get_address_in_network(config('os-internal-network'), - addr) + addr = get_host_ip(unit_get('private-address')) - for rid in relation_ids('cluster'): - for unit in related_units(rid): - _unit = unit.replace('/', '-') - addr = relation_get('private-address', rid=rid, unit=unit) - cluster_hosts[_unit] = addr + l_unit = local_unit().replace('/', '-') + cluster_hosts = {} - ctxt = { - 'units': cluster_hosts, - } + # NOTE(jamespage): build out map of configured network endpoints + # and associated backends + for addr_type in ADDRESS_TYPES: + cfg_opt = 'os-{}-network'.format(addr_type) + laddr = get_address_in_network(config(cfg_opt)) + if laddr: + netmask = get_netmask_for_address(laddr) + cluster_hosts[laddr] = {'network': "{}/{}".format(laddr, + netmask), + 'backends': {l_unit: laddr}} + for rid in relation_ids('cluster'): + for unit in related_units(rid): + _laddr = relation_get('{}-address'.format(addr_type), + rid=rid, unit=unit) + if _laddr: + _unit = unit.replace('/', '-') + cluster_hosts[laddr]['backends'][_unit] = _laddr + + # NOTE(jamespage) no split configurations found, just use + # private addresses + if not cluster_hosts: + netmask = get_netmask_for_address(addr) + cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask), + 'backends': {l_unit: addr}} + for rid in relation_ids('cluster'): + for unit in related_units(rid): + _laddr = relation_get('private-address', + rid=rid, unit=unit) + if _laddr: + _unit = unit.replace('/', '-') + cluster_hosts[addr]['backends'][_unit] = _laddr + + ctxt = {'frontends': cluster_hosts} + + if config('haproxy-server-timeout'): + ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout') + + if config('haproxy-client-timeout'): + ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout') if config('prefer-ipv6'): ctxt['local_host'] = 'ip6-localhost' @@ -430,13 +495,19 @@ class HAProxyContext(OSContextGenerator): ctxt['haproxy_host'] = '0.0.0.0' ctxt['stat_port'] = ':8888' - if len(cluster_hosts.keys()) > 1: - # Enable haproxy when we have enough peers. - log('Ensuring haproxy enabled in /etc/default/haproxy.') - with open('/etc/default/haproxy', 'w') as out: - out.write('ENABLED=1\n') - return ctxt - log('HAProxy context is incomplete, this unit has no peers.') + for frontend in cluster_hosts: + if (len(cluster_hosts[frontend]['backends']) > 1 or + self.singlenode_mode): + # Enable haproxy when we have enough peers. + log('Ensuring haproxy enabled in /etc/default/haproxy.', + level=DEBUG) + with open('/etc/default/haproxy', 'w') as out: + out.write('ENABLED=1\n') + + return ctxt + + log('HAProxy context is incomplete, this unit has no peers.', + level=INFO) return {} @@ -444,29 +515,28 @@ class ImageServiceContext(OSContextGenerator): interfaces = ['image-service'] def __call__(self): - ''' - Obtains the glance API server from the image-service relation. Useful - in nova and cinder (currently). - ''' - log('Generating template context for image-service.') + """Obtains the glance API server from the image-service relation. + Useful in nova and cinder (currently). + """ + log('Generating template context for image-service.', level=DEBUG) rids = relation_ids('image-service') if not rids: return {} + for rid in rids: for unit in related_units(rid): api_server = relation_get('glance-api-server', rid=rid, unit=unit) if api_server: return {'glance_api_servers': api_server} - log('ImageService context is incomplete. ' - 'Missing required relation data.') + + log("ImageService context is incomplete. Missing required relation " + "data.", level=INFO) return {} class ApacheSSLContext(OSContextGenerator): - - """ - Generates a context for an apache vhost configuration that configures + """Generates a context for an apache vhost configuration that configures HTTPS reverse proxying for one or many endpoints. Generated context looks something like:: @@ -490,44 +560,111 @@ class ApacheSSLContext(OSContextGenerator): cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http'] check_call(cmd) - def configure_cert(self): - if not os.path.isdir('/etc/apache2/ssl'): - os.mkdir('/etc/apache2/ssl') + def configure_cert(self, cn=None): ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace) - if not os.path.isdir(ssl_dir): - os.mkdir(ssl_dir) - cert, key = get_cert() - with open(os.path.join(ssl_dir, 'cert'), 'w') as cert_out: - cert_out.write(b64decode(cert)) - with open(os.path.join(ssl_dir, 'key'), 'w') as key_out: - key_out.write(b64decode(key)) + mkdir(path=ssl_dir) + cert, key = get_cert(cn) + if cn: + cert_filename = 'cert_{}'.format(cn) + key_filename = 'key_{}'.format(cn) + else: + cert_filename = 'cert' + key_filename = 'key' + + write_file(path=os.path.join(ssl_dir, cert_filename), + content=b64decode(cert)) + write_file(path=os.path.join(ssl_dir, key_filename), + content=b64decode(key)) + + def configure_ca(self): ca_cert = get_ca_cert() if ca_cert: - with open(CA_CERT_PATH, 'w') as ca_out: - ca_out.write(b64decode(ca_cert)) - check_call(['update-ca-certificates']) + install_ca_cert(b64decode(ca_cert)) + + def canonical_names(self): + """Figure out which canonical names clients will access this service. + """ + cns = [] + for r_id in relation_ids('identity-service'): + for unit in related_units(r_id): + rdata = relation_get(rid=r_id, unit=unit) + for k in rdata: + if k.startswith('ssl_key_'): + cns.append(k.lstrip('ssl_key_')) + + return sorted(list(set(cns))) + + def get_network_addresses(self): + """For each network configured, return corresponding address and vip + (if available). + + Returns a list of tuples of the form: + + [(address_in_net_a, vip_in_net_a), + (address_in_net_b, vip_in_net_b), + ...] + + or, if no vip(s) available: + + [(address_in_net_a, address_in_net_a), + (address_in_net_b, address_in_net_b), + ...] + """ + addresses = [] + if config('vip'): + vips = config('vip').split() + else: + vips = [] + + for net_type in ['os-internal-network', 'os-admin-network', + 'os-public-network']: + addr = get_address_in_network(config(net_type), + unit_get('private-address')) + if len(vips) > 1 and is_clustered(): + if not config(net_type): + log("Multiple networks configured but net_type " + "is None (%s)." % net_type, level=WARNING) + continue + + for vip in vips: + if is_address_in_network(config(net_type), vip): + addresses.append((addr, vip)) + break + + elif is_clustered() and config('vip'): + addresses.append((addr, config('vip'))) + else: + addresses.append((addr, addr)) + + return sorted(addresses) def __call__(self): - if isinstance(self.external_ports, basestring): + if isinstance(self.external_ports, six.string_types): self.external_ports = [self.external_ports] - if (not self.external_ports or not https()): + + if not self.external_ports or not https(): return {} - self.configure_cert() + self.configure_ca() self.enable_modules() - ctxt = { - 'namespace': self.service_namespace, - 'private_address': unit_get('private-address'), - 'endpoints': [] - } - if is_clustered(): - ctxt['private_address'] = config('vip') - for api_port in self.external_ports: - ext_port = determine_apache_port(api_port) - int_port = determine_api_port(api_port) - portmap = (int(ext_port), int(int_port)) - ctxt['endpoints'].append(portmap) + ctxt = {'namespace': self.service_namespace, + 'endpoints': [], + 'ext_ports': []} + + for cn in self.canonical_names(): + self.configure_cert(cn) + + addresses = self.get_network_addresses() + for address, endpoint in sorted(set(addresses)): + for api_port in self.external_ports: + ext_port = determine_apache_port(api_port) + int_port = determine_api_port(api_port) + portmap = (address, endpoint, int(ext_port), int(int_port)) + ctxt['endpoints'].append(portmap) + ctxt['ext_ports'].append(int(ext_port)) + + ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports']))) return ctxt @@ -544,21 +681,23 @@ class NeutronContext(OSContextGenerator): @property def packages(self): - return neutron_plugin_attribute( - self.plugin, 'packages', self.network_manager) + return neutron_plugin_attribute(self.plugin, 'packages', + self.network_manager) @property def neutron_security_groups(self): return None def _ensure_packages(self): - [ensure_packages(pkgs) for pkgs in self.packages] + for pkgs in self.packages: + ensure_packages(pkgs) def _save_flag_file(self): if self.network_manager == 'quantum': _file = '/etc/nova/quantum_plugin.conf' else: _file = '/etc/nova/neutron_plugin.conf' + with open(_file, 'wb') as out: out.write(self.plugin + '\n') @@ -567,13 +706,11 @@ class NeutronContext(OSContextGenerator): self.network_manager) config = neutron_plugin_attribute(self.plugin, 'config', self.network_manager) - ovs_ctxt = { - 'core_plugin': driver, - 'neutron_plugin': 'ovs', - 'neutron_security_groups': self.neutron_security_groups, - 'local_ip': unit_private_ip(), - 'config': config - } + ovs_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'ovs', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} return ovs_ctxt @@ -582,13 +719,11 @@ class NeutronContext(OSContextGenerator): self.network_manager) config = neutron_plugin_attribute(self.plugin, 'config', self.network_manager) - nvp_ctxt = { - 'core_plugin': driver, - 'neutron_plugin': 'nvp', - 'neutron_security_groups': self.neutron_security_groups, - 'local_ip': unit_private_ip(), - 'config': config - } + nvp_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'nvp', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} return nvp_ctxt @@ -597,35 +732,50 @@ class NeutronContext(OSContextGenerator): self.network_manager) n1kv_config = neutron_plugin_attribute(self.plugin, 'config', self.network_manager) - n1kv_ctxt = { - 'core_plugin': driver, - 'neutron_plugin': 'n1kv', - 'neutron_security_groups': self.neutron_security_groups, - 'local_ip': unit_private_ip(), - 'config': n1kv_config, - 'vsm_ip': config('n1kv-vsm-ip'), - 'vsm_username': config('n1kv-vsm-username'), - 'vsm_password': config('n1kv-vsm-password'), - 'restrict_policy_profiles': config( - 'n1kv_restrict_policy_profiles'), - } + n1kv_user_config_flags = config('n1kv-config-flags') + restrict_policy_profiles = config('n1kv-restrict-policy-profiles') + n1kv_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'n1kv', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': n1kv_config, + 'vsm_ip': config('n1kv-vsm-ip'), + 'vsm_username': config('n1kv-vsm-username'), + 'vsm_password': config('n1kv-vsm-password'), + 'restrict_policy_profiles': restrict_policy_profiles} + + if n1kv_user_config_flags: + flags = config_flags_parser(n1kv_user_config_flags) + n1kv_ctxt['user_config_flags'] = flags return n1kv_ctxt + def calico_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + calico_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'Calico', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} + + return calico_ctxt + def neutron_ctxt(self): if https(): proto = 'https' else: proto = 'http' + if is_clustered(): host = config('vip') else: host = unit_get('private-address') - url = '%s://%s:%s' % (proto, host, '9696') - ctxt = { - 'network_manager': self.network_manager, - 'neutron_url': url, - } + + ctxt = {'network_manager': self.network_manager, + 'neutron_url': '%s://%s:%s' % (proto, host, '9696')} return ctxt def __call__(self): @@ -645,6 +795,8 @@ class NeutronContext(OSContextGenerator): ctxt.update(self.nvp_ctxt()) elif self.plugin == 'n1kv': ctxt.update(self.n1kv_ctxt()) + elif self.plugin == 'Calico': + ctxt.update(self.calico_ctxt()) alchemy_flags = config('neutron-alchemy-flags') if alchemy_flags: @@ -656,23 +808,40 @@ class NeutronContext(OSContextGenerator): class OSConfigFlagContext(OSContextGenerator): + """Provides support for user-defined config flags. + Users can define a comma-seperated list of key=value pairs + in the charm configuration and apply them at any point in + any file by using a template flag. + + Sometimes users might want config flags inserted within a + specific section so this class allows users to specify the + template flag name, allowing for multiple template flags + (sections) within the same context. + + NOTE: the value of config-flags may be a comma-separated list of + key=value pairs and some Openstack config files support + comma-separated lists as values. + """ + + def __init__(self, charm_flag='config-flags', + template_flag='user_config_flags'): """ - Responsible for adding user-defined config-flags in charm config to a - template context. - - NOTE: the value of config-flags may be a comma-separated list of - key=value pairs and some Openstack config files support - comma-separated lists as values. + :param charm_flag: config flags in charm configuration. + :param template_flag: insert point for user-defined flags in template + file. """ + super(OSConfigFlagContext, self).__init__() + self._charm_flag = charm_flag + self._template_flag = template_flag - def __call__(self): - config_flags = config('config-flags') - if not config_flags: - return {} + def __call__(self): + config_flags = config(self._charm_flag) + if not config_flags: + return {} - flags = config_flags_parser(config_flags) - return {'user_config_flags': flags} + return {self._template_flag: + config_flags_parser(config_flags)} class SubordinateConfigContext(OSContextGenerator): @@ -716,7 +885,6 @@ class SubordinateConfigContext(OSContextGenerator): }, } } - """ def __init__(self, service, config_file, interface): @@ -746,26 +914,28 @@ class SubordinateConfigContext(OSContextGenerator): if self.service not in sub_config: log('Found subordinate_config on %s but it contained' - 'nothing for %s service' % (rid, self.service)) + 'nothing for %s service' % (rid, self.service), + level=INFO) continue sub_config = sub_config[self.service] if self.config_file not in sub_config: log('Found subordinate_config on %s but it contained' - 'nothing for %s' % (rid, self.config_file)) + 'nothing for %s' % (rid, self.config_file), + level=INFO) continue sub_config = sub_config[self.config_file] - for k, v in sub_config.iteritems(): + for k, v in six.iteritems(sub_config): if k == 'sections': - for section, config_dict in v.iteritems(): - log("adding section '%s'" % (section)) + for section, config_dict in six.iteritems(v): + log("adding section '%s'" % (section), + level=DEBUG) ctxt[k][section] = config_dict else: ctxt[k] = v - log("%d section(s) found" % (len(ctxt['sections'])), level=INFO) - + log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG) return ctxt @@ -777,13 +947,71 @@ class LogLevelContext(OSContextGenerator): False if config('debug') is None else config('debug') ctxt['verbose'] = \ False if config('verbose') is None else config('verbose') + return ctxt class SyslogContext(OSContextGenerator): def __call__(self): - ctxt = { - 'use_syslog': config('use-syslog') - } + ctxt = {'use_syslog': config('use-syslog')} + return ctxt + + +class BindHostContext(OSContextGenerator): + + def __call__(self): + if config('prefer-ipv6'): + return {'bind_host': '::'} + else: + return {'bind_host': '0.0.0.0'} + + +class WorkerConfigContext(OSContextGenerator): + + @property + def num_cpus(self): + try: + from psutil import NUM_CPUS + except ImportError: + apt_install('python-psutil', fatal=True) + from psutil import NUM_CPUS + + return NUM_CPUS + + def __call__(self): + multiplier = config('worker-multiplier') or 0 + ctxt = {"workers": self.num_cpus * multiplier} + return ctxt + + +class ZeroMQContext(OSContextGenerator): + interfaces = ['zeromq-configuration'] + + def __call__(self): + ctxt = {} + if is_relation_made('zeromq-configuration', 'host'): + for rid in relation_ids('zeromq-configuration'): + for unit in related_units(rid): + ctxt['zmq_nonce'] = relation_get('nonce', unit, rid) + ctxt['zmq_host'] = relation_get('host', unit, rid) + + return ctxt + + +class NotificationDriverContext(OSContextGenerator): + + def __init__(self, zmq_relation='zeromq-configuration', + amqp_relation='amqp'): + """ + :param zmq_relation: Name of Zeromq relation to check + """ + self.zmq_relation = zmq_relation + self.amqp_relation = amqp_relation + + def __call__(self): + ctxt = {'notifications': 'False'} + if is_relation_made(self.amqp_relation): + ctxt['notifications'] = "True" + return ctxt diff --git a/hooks/charmhelpers/contrib/openstack/ip.py b/hooks/charmhelpers/contrib/openstack/ip.py index affe8cd..f062c80 100644 --- a/hooks/charmhelpers/contrib/openstack/ip.py +++ b/hooks/charmhelpers/contrib/openstack/ip.py @@ -2,21 +2,19 @@ from charmhelpers.core.hookenv import ( config, unit_get, ) - from charmhelpers.contrib.network.ip import ( get_address_in_network, is_address_in_network, is_ipv6, get_ipv6_addr, ) - from charmhelpers.contrib.hahelpers.cluster import is_clustered PUBLIC = 'public' INTERNAL = 'int' ADMIN = 'admin' -_address_map = { +ADDRESS_MAP = { PUBLIC: { 'config': 'os-public-network', 'fallback': 'public-address' @@ -33,16 +31,14 @@ _address_map = { def canonical_url(configs, endpoint_type=PUBLIC): - ''' - Returns the correct HTTP URL to this host given the state of HTTPS + """Returns the correct HTTP URL to this host given the state of HTTPS configuration, hacluster and charm configuration. - :configs OSTemplateRenderer: A config tempating object to inspect for - a complete https context. - :endpoint_type str: The endpoint type to resolve. - - :returns str: Base URL for services on the current service unit. - ''' + :param configs: OSTemplateRenderer config templating object to inspect + for a complete https context. + :param endpoint_type: str endpoint type to resolve. + :param returns: str base URL for services on the current service unit. + """ scheme = 'http' if 'https' in configs.complete_contexts(): scheme = 'https' @@ -53,27 +49,45 @@ def canonical_url(configs, endpoint_type=PUBLIC): def resolve_address(endpoint_type=PUBLIC): + """Return unit address depending on net config. + + If unit is clustered with vip(s) and has net splits defined, return vip on + correct network. If clustered with no nets defined, return primary vip. + + If not clustered, return unit address ensuring address is on configured net + split if one is configured. + + :param endpoint_type: Network endpoing type + """ resolved_address = None - if is_clustered(): - if config(_address_map[endpoint_type]['config']) is None: - # Assume vip is simple and pass back directly - resolved_address = config('vip') + vips = config('vip') + if vips: + vips = vips.split() + + net_type = ADDRESS_MAP[endpoint_type]['config'] + net_addr = config(net_type) + net_fallback = ADDRESS_MAP[endpoint_type]['fallback'] + clustered = is_clustered() + if clustered: + if not net_addr: + # If no net-splits defined, we expect a single vip + resolved_address = vips[0] else: - for vip in config('vip').split(): - if is_address_in_network( - config(_address_map[endpoint_type]['config']), - vip): + for vip in vips: + if is_address_in_network(net_addr, vip): resolved_address = vip + break else: if config('prefer-ipv6'): - fallback_addr = get_ipv6_addr() + fallback_addr = get_ipv6_addr(exc_list=vips)[0] else: - fallback_addr = unit_get(_address_map[endpoint_type]['fallback']) - resolved_address = get_address_in_network( - config(_address_map[endpoint_type]['config']), fallback_addr) + fallback_addr = unit_get(net_fallback) + + resolved_address = get_address_in_network(net_addr, fallback_addr) if resolved_address is None: - raise ValueError('Unable to resolve a suitable IP address' - ' based on charm state and configuration') - else: - return resolved_address + raise ValueError("Unable to resolve a suitable IP address based on " + "charm state and configuration. (net_type=%s, " + "clustered=%s)" % (net_type, clustered)) + + return resolved_address diff --git a/hooks/charmhelpers/contrib/openstack/neutron.py b/hooks/charmhelpers/contrib/openstack/neutron.py index 84d97bc..1446f63 100644 --- a/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/hooks/charmhelpers/contrib/openstack/neutron.py @@ -14,7 +14,7 @@ from charmhelpers.contrib.openstack.utils import os_release def headers_package(): """Ensures correct linux-headers for running kernel are installed, for building DKMS package""" - kver = check_output(['uname', '-r']).strip() + kver = check_output(['uname', '-r']).decode('UTF-8').strip() return 'linux-headers-%s' % kver QUANTUM_CONF_DIR = '/etc/quantum' @@ -22,7 +22,7 @@ QUANTUM_CONF_DIR = '/etc/quantum' def kernel_version(): """ Retrieve the current major kernel version as a tuple e.g. (3, 13) """ - kver = check_output(['uname', '-r']).strip() + kver = check_output(['uname', '-r']).decode('UTF-8').strip() kver = kver.split('.') return (int(kver[0]), int(kver[1])) @@ -138,10 +138,25 @@ def neutron_plugins(): relation_prefix='neutron', ssl_dir=NEUTRON_CONF_DIR)], 'services': [], - 'packages': [['neutron-plugin-cisco']], + 'packages': [[headers_package()] + determine_dkms_package(), + ['neutron-plugin-cisco']], 'server_packages': ['neutron-server', 'neutron-plugin-cisco'], 'server_services': ['neutron-server'] + }, + 'Calico': { + 'config': '/etc/neutron/plugins/ml2/ml2_conf.ini', + 'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron', + ssl_dir=NEUTRON_CONF_DIR)], + 'services': ['calico-compute', 'bird', 'neutron-dhcp-agent'], + 'packages': [[headers_package()] + determine_dkms_package(), + ['calico-compute', 'bird', 'neutron-dhcp-agent']], + 'server_packages': ['neutron-server', 'calico-control'], + 'server_services': ['neutron-server'] } } if release >= 'icehouse': @@ -162,7 +177,8 @@ def neutron_plugin_attribute(plugin, attr, net_manager=None): elif manager == 'neutron': plugins = neutron_plugins() else: - log('Error: Network manager does not support plugins.') + log("Network manager '%s' does not support plugins." % (manager), + level=ERROR) raise Exception try: diff --git a/hooks/charmhelpers/contrib/openstack/templating.py b/hooks/charmhelpers/contrib/openstack/templating.py index f544271..33df067 100644 --- a/hooks/charmhelpers/contrib/openstack/templating.py +++ b/hooks/charmhelpers/contrib/openstack/templating.py @@ -1,13 +1,13 @@ import os -from charmhelpers.fetch import apt_install +import six +from charmhelpers.fetch import apt_install from charmhelpers.core.hookenv import ( log, ERROR, INFO ) - from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES try: @@ -43,7 +43,7 @@ def get_loader(templates_dir, os_release): order by OpenStack release. """ tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) - for rel in OPENSTACK_CODENAMES.itervalues()] + for rel in six.itervalues(OPENSTACK_CODENAMES)] if not os.path.isdir(templates_dir): log('Templates directory not found @ %s.' % templates_dir, @@ -258,7 +258,7 @@ class OSConfigRenderer(object): """ Write out all registered config files. """ - [self.write(k) for k in self.templates.iterkeys()] + [self.write(k) for k in six.iterkeys(self.templates)] def set_release(self, openstack_release): """ @@ -275,5 +275,5 @@ class OSConfigRenderer(object): ''' interfaces = [] [interfaces.extend(i.complete_contexts()) - for i in self.templates.itervalues()] + for i in six.itervalues(self.templates)] return interfaces diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index 20943c2..4417967 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -2,18 +2,24 @@ # Common python helper functions used for OpenStack charms. from collections import OrderedDict +from functools import wraps import subprocess +import json import os import socket import sys +import six +import yaml + from charmhelpers.core.hookenv import ( config, log as juju_log, charm_dir, - ERROR, - INFO + INFO, + relation_ids, + relation_set ) from charmhelpers.contrib.storage.linux.lvm import ( @@ -22,8 +28,13 @@ from charmhelpers.contrib.storage.linux.lvm import ( remove_lvm_physical_volume, ) +from charmhelpers.contrib.network.ip import ( + get_ipv6_addr +) + from charmhelpers.core.host import lsb_release, mounts, umount -from charmhelpers.fetch import apt_install, apt_cache +from charmhelpers.fetch import apt_install, apt_cache, install_remote +from charmhelpers.contrib.python.packages import pip_install from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device @@ -70,6 +81,9 @@ SWIFT_CODENAMES = OrderedDict([ ('1.13.0', 'icehouse'), ('1.12.0', 'icehouse'), ('1.11.0', 'icehouse'), + ('2.0.0', 'juno'), + ('2.1.0', 'juno'), + ('2.2.0', 'juno'), ]) DEFAULT_LOOPBACK_SIZE = '5G' @@ -102,7 +116,7 @@ def get_os_codename_install_source(src): # Best guess match based on deb string provided if src.startswith('deb') or src.startswith('ppa'): - for k, v in OPENSTACK_CODENAMES.iteritems(): + for k, v in six.iteritems(OPENSTACK_CODENAMES): if v in src: return v @@ -123,7 +137,7 @@ def get_os_codename_version(vers): def get_os_version_codename(codename): '''Determine OpenStack version number from codename.''' - for k, v in OPENSTACK_CODENAMES.iteritems(): + for k, v in six.iteritems(OPENSTACK_CODENAMES): if v == codename: return k e = 'Could not derive OpenStack version for '\ @@ -183,7 +197,7 @@ def get_os_version_package(pkg, fatal=True): else: vers_map = OPENSTACK_CODENAMES - for version, cname in vers_map.iteritems(): + for version, cname in six.iteritems(vers_map): if cname == codename: return version # e = "Could not determine OpenStack version for package: %s" % pkg @@ -307,7 +321,7 @@ def save_script_rc(script_path="scripts/scriptrc", **env_vars): rc_script.write( "#!/bin/bash\n") [rc_script.write('export %s=%s\n' % (u, p)) - for u, p in env_vars.iteritems() if u != "script_path"] + for u, p in six.iteritems(env_vars) if u != "script_path"] def openstack_upgrade_available(package): @@ -340,8 +354,8 @@ def ensure_block_device(block_device): ''' _none = ['None', 'none', None] if (block_device in _none): - error_out('prepare_storage(): Missing required input: ' - 'block_device=%s.' % block_device, level=ERROR) + error_out('prepare_storage(): Missing required input: block_device=%s.' + % block_device) if block_device.startswith('/dev/'): bdev = block_device @@ -357,8 +371,7 @@ def ensure_block_device(block_device): bdev = '/dev/%s' % block_device if not is_block_device(bdev): - error_out('Failed to locate valid block device at %s' % bdev, - level=ERROR) + error_out('Failed to locate valid block device at %s' % bdev) return bdev @@ -407,7 +420,7 @@ def ns_query(address): if isinstance(address, dns.name.Name): rtype = 'PTR' - elif isinstance(address, basestring): + elif isinstance(address, six.string_types): rtype = 'A' else: return None @@ -456,3 +469,151 @@ def get_hostname(address, fqdn=True): return result else: return result.split('.')[0] + + +def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'): + mm_map = {} + if os.path.isfile(mm_file): + with open(mm_file, 'r') as f: + mm_map = json.load(f) + return mm_map + + +def sync_db_with_multi_ipv6_addresses(database, database_user, + relation_prefix=None): + hosts = get_ipv6_addr(dynamic_only=False) + + kwargs = {'database': database, + 'username': database_user, + 'hostname': json.dumps(hosts)} + + if relation_prefix: + for key in list(kwargs.keys()): + kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key] + del kwargs[key] + + for rid in relation_ids('shared-db'): + relation_set(relation_id=rid, **kwargs) + + +def os_requires_version(ostack_release, pkg): + """ + Decorator for hook to specify minimum supported release + """ + def wrap(f): + @wraps(f) + def wrapped_f(*args): + if os_release(pkg) < ostack_release: + raise Exception("This hook is not supported on releases" + " before %s" % ostack_release) + f(*args) + return wrapped_f + return wrap + + +def git_install_requested(): + """Returns true if openstack-origin-git is specified.""" + return config('openstack-origin-git') != "None" + + +requirements_dir = None + + +def git_clone_and_install(file_name, core_project): + """Clone/install all OpenStack repos specified in yaml config file.""" + global requirements_dir + + if file_name == "None": + return + + yaml_file = os.path.join(charm_dir(), file_name) + + # clone/install the requirements project first + installed = _git_clone_and_install_subset(yaml_file, + whitelist=['requirements']) + if 'requirements' not in installed: + error_out('requirements git repository must be specified') + + # clone/install all other projects except requirements and the core project + blacklist = ['requirements', core_project] + _git_clone_and_install_subset(yaml_file, blacklist=blacklist, + update_requirements=True) + + # clone/install the core project + whitelist = [core_project] + installed = _git_clone_and_install_subset(yaml_file, whitelist=whitelist, + update_requirements=True) + if core_project not in installed: + error_out('{} git repository must be specified'.format(core_project)) + + +def _git_clone_and_install_subset(yaml_file, whitelist=[], blacklist=[], + update_requirements=False): + """Clone/install subset of OpenStack repos specified in yaml config file.""" + global requirements_dir + installed = [] + + with open(yaml_file, 'r') as fd: + projects = yaml.load(fd) + for proj, val in projects.items(): + # The project subset is chosen based on the following 3 rules: + # 1) If project is in blacklist, we don't clone/install it, period. + # 2) If whitelist is empty, we clone/install everything else. + # 3) If whitelist is not empty, we clone/install everything in the + # whitelist. + if proj in blacklist: + continue + if whitelist and proj not in whitelist: + continue + repo = val['repository'] + branch = val['branch'] + repo_dir = _git_clone_and_install_single(repo, branch, + update_requirements) + if proj == 'requirements': + requirements_dir = repo_dir + installed.append(proj) + return installed + + +def _git_clone_and_install_single(repo, branch, update_requirements=False): + """Clone and install a single git repository.""" + dest_parent_dir = "/mnt/openstack-git/" + dest_dir = os.path.join(dest_parent_dir, os.path.basename(repo)) + + if not os.path.exists(dest_parent_dir): + juju_log('Host dir not mounted at {}. ' + 'Creating directory there instead.'.format(dest_parent_dir)) + os.mkdir(dest_parent_dir) + + if not os.path.exists(dest_dir): + juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch)) + repo_dir = install_remote(repo, dest=dest_parent_dir, branch=branch) + else: + repo_dir = dest_dir + + if update_requirements: + if not requirements_dir: + error_out('requirements repo must be cloned before ' + 'updating from global requirements.') + _git_update_requirements(repo_dir, requirements_dir) + + juju_log('Installing git repo from dir: {}'.format(repo_dir)) + pip_install(repo_dir) + + return repo_dir + + +def _git_update_requirements(package_dir, reqs_dir): + """Update from global requirements. + + Update an OpenStack git directory's requirements.txt and + test-requirements.txt from global-requirements.txt.""" + orig_dir = os.getcwd() + os.chdir(reqs_dir) + cmd = "python update.py {}".format(package_dir) + try: + subprocess.check_call(cmd.split(' ')) + except subprocess.CalledProcessError: + package = os.path.basename(package_dir) + error_out("Error updating {} from global-requirements.txt".format(package)) + os.chdir(orig_dir) diff --git a/hooks/charmhelpers/contrib/python/__init__.py b/hooks/charmhelpers/contrib/python/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/hooks/charmhelpers/contrib/python/packages.py b/hooks/charmhelpers/contrib/python/packages.py new file mode 100644 index 0000000..78162b1 --- /dev/null +++ b/hooks/charmhelpers/contrib/python/packages.py @@ -0,0 +1,77 @@ +#!/usr/bin/env python +# coding: utf-8 + +__author__ = "Jorge Niedbalski " + +from charmhelpers.fetch import apt_install, apt_update +from charmhelpers.core.hookenv import log + +try: + from pip import main as pip_execute +except ImportError: + apt_update() + apt_install('python-pip') + from pip import main as pip_execute + + +def parse_options(given, available): + """Given a set of options, check if available""" + for key, value in sorted(given.items()): + if key in available: + yield "--{0}={1}".format(key, value) + + +def pip_install_requirements(requirements, **options): + """Install a requirements file """ + command = ["install"] + + available_options = ('proxy', 'src', 'log', ) + for option in parse_options(options, available_options): + command.append(option) + + command.append("-r {0}".format(requirements)) + log("Installing from file: {} with options: {}".format(requirements, + command)) + pip_execute(command) + + +def pip_install(package, fatal=False, **options): + """Install a python package""" + command = ["install"] + + available_options = ('proxy', 'src', 'log', "index-url", ) + for option in parse_options(options, available_options): + command.append(option) + + if isinstance(package, list): + command.extend(package) + else: + command.append(package) + + log("Installing {} package with options: {}".format(package, + command)) + pip_execute(command) + + +def pip_uninstall(package, **options): + """Uninstall a python package""" + command = ["uninstall", "-q", "-y"] + + available_options = ('proxy', 'log', ) + for option in parse_options(options, available_options): + command.append(option) + + if isinstance(package, list): + command.extend(package) + else: + command.append(package) + + log("Uninstalling {} package with options: {}".format(package, + command)) + pip_execute(command) + + +def pip_list(): + """Returns the list of current python installed packages + """ + return pip_execute(["list"]) diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py index 768438a..d47dc22 100644 --- a/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -16,19 +16,18 @@ import time from subprocess import ( check_call, check_output, - CalledProcessError + CalledProcessError, ) - from charmhelpers.core.hookenv import ( relation_get, relation_ids, related_units, log, + DEBUG, INFO, WARNING, - ERROR + ERROR, ) - from charmhelpers.core.host import ( mount, mounts, @@ -37,7 +36,6 @@ from charmhelpers.core.host import ( service_running, umount, ) - from charmhelpers.fetch import ( apt_install, ) @@ -56,99 +54,85 @@ CEPH_CONF = """[global] def install(): - ''' Basic Ceph client installation ''' + """Basic Ceph client installation.""" ceph_dir = "/etc/ceph" if not os.path.exists(ceph_dir): os.mkdir(ceph_dir) + apt_install('ceph-common', fatal=True) def rbd_exists(service, pool, rbd_img): - ''' Check to see if a RADOS block device exists ''' + """Check to see if a RADOS block device exists.""" try: - out = check_output(['rbd', 'list', '--id', service, - '--pool', pool]) + out = check_output(['rbd', 'list', '--id', + service, '--pool', pool]).decode('UTF-8') except CalledProcessError: return False - else: - return rbd_img in out + + return rbd_img in out def create_rbd_image(service, pool, image, sizemb): - ''' Create a new RADOS block device ''' - cmd = [ - 'rbd', - 'create', - image, - '--size', - str(sizemb), - '--id', - service, - '--pool', - pool - ] + """Create a new RADOS block device.""" + cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service, + '--pool', pool] check_call(cmd) def pool_exists(service, name): - ''' Check to see if a RADOS pool already exists ''' + """Check to see if a RADOS pool already exists.""" try: - out = check_output(['rados', '--id', service, 'lspools']) + out = check_output(['rados', '--id', service, + 'lspools']).decode('UTF-8') except CalledProcessError: return False - else: - return name in out + + return name in out def get_osds(service): - ''' - Return a list of all Ceph Object Storage Daemons - currently in the cluster - ''' + """Return a list of all Ceph Object Storage Daemons currently in the + cluster. + """ version = ceph_version() if version and version >= '0.56': return json.loads(check_output(['ceph', '--id', service, - 'osd', 'ls', '--format=json'])) - else: - return None + 'osd', 'ls', + '--format=json']).decode('UTF-8')) + + return None -def create_pool(service, name, replicas=2): - ''' Create a new RADOS pool ''' +def create_pool(service, name, replicas=3): + """Create a new RADOS pool.""" if pool_exists(service, name): log("Ceph pool {} already exists, skipping creation".format(name), level=WARNING) return + # Calculate the number of placement groups based # on upstream recommended best practices. osds = get_osds(service) if osds: - pgnum = (len(osds) * 100 / replicas) + pgnum = (len(osds) * 100 // replicas) else: # NOTE(james-page): Default to 200 for older ceph versions # which don't support OSD query from cli pgnum = 200 - cmd = [ - 'ceph', '--id', service, - 'osd', 'pool', 'create', - name, str(pgnum) - ] + + cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)] check_call(cmd) - cmd = [ - 'ceph', '--id', service, - 'osd', 'pool', 'set', name, - 'size', str(replicas) - ] + + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size', + str(replicas)] check_call(cmd) def delete_pool(service, name): - ''' Delete a RADOS pool from ceph ''' - cmd = [ - 'ceph', '--id', service, - 'osd', 'pool', 'delete', - name, '--yes-i-really-really-mean-it' - ] + """Delete a RADOS pool from ceph.""" + cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name, + '--yes-i-really-really-mean-it'] check_call(cmd) @@ -161,44 +145,43 @@ def _keyring_path(service): def create_keyring(service, key): - ''' Create a new Ceph keyring containing key''' + """Create a new Ceph keyring containing key.""" keyring = _keyring_path(service) if os.path.exists(keyring): - log('ceph: Keyring exists at %s.' % keyring, level=WARNING) + log('Ceph keyring exists at %s.' % keyring, level=WARNING) return - cmd = [ - 'ceph-authtool', - keyring, - '--create-keyring', - '--name=client.{}'.format(service), - '--add-key={}'.format(key) - ] + + cmd = ['ceph-authtool', keyring, '--create-keyring', + '--name=client.{}'.format(service), '--add-key={}'.format(key)] check_call(cmd) - log('ceph: Created new ring at %s.' % keyring, level=INFO) + log('Created new ceph keyring at %s.' % keyring, level=DEBUG) def create_key_file(service, key): - ''' Create a file containing key ''' + """Create a file containing key.""" keyfile = _keyfile_path(service) if os.path.exists(keyfile): - log('ceph: Keyfile exists at %s.' % keyfile, level=WARNING) + log('Keyfile exists at %s.' % keyfile, level=WARNING) return + with open(keyfile, 'w') as fd: fd.write(key) - log('ceph: Created new keyfile at %s.' % keyfile, level=INFO) + + log('Created new keyfile at %s.' % keyfile, level=INFO) def get_ceph_nodes(): - ''' Query named relation 'ceph' to detemine current nodes ''' + """Query named relation 'ceph' to determine current nodes.""" hosts = [] for r_id in relation_ids('ceph'): for unit in related_units(r_id): hosts.append(relation_get('private-address', unit=unit, rid=r_id)) + return hosts def configure(service, key, auth, use_syslog): - ''' Perform basic configuration of Ceph ''' + """Perform basic configuration of Ceph.""" create_keyring(service, key) create_key_file(service, key) hosts = get_ceph_nodes() @@ -211,17 +194,17 @@ def configure(service, key, auth, use_syslog): def image_mapped(name): - ''' Determine whether a RADOS block device is mapped locally ''' + """Determine whether a RADOS block device is mapped locally.""" try: - out = check_output(['rbd', 'showmapped']) + out = check_output(['rbd', 'showmapped']).decode('UTF-8') except CalledProcessError: return False - else: - return name in out + + return name in out def map_block_storage(service, pool, image): - ''' Map a RADOS block device for local use ''' + """Map a RADOS block device for local use.""" cmd = [ 'rbd', 'map', @@ -235,31 +218,32 @@ def map_block_storage(service, pool, image): def filesystem_mounted(fs): - ''' Determine whether a filesytems is already mounted ''' + """Determine whether a filesytems is already mounted.""" return fs in [f for f, m in mounts()] def make_filesystem(blk_device, fstype='ext4', timeout=10): - ''' Make a new filesystem on the specified block device ''' + """Make a new filesystem on the specified block device.""" count = 0 e_noent = os.errno.ENOENT while not os.path.exists(blk_device): if count >= timeout: - log('ceph: gave up waiting on block device %s' % blk_device, + log('Gave up waiting on block device %s' % blk_device, level=ERROR) raise IOError(e_noent, os.strerror(e_noent), blk_device) - log('ceph: waiting for block device %s to appear' % blk_device, - level=INFO) + + log('Waiting for block device %s to appear' % blk_device, + level=DEBUG) count += 1 time.sleep(1) else: - log('ceph: Formatting block device %s as filesystem %s.' % + log('Formatting block device %s as filesystem %s.' % (blk_device, fstype), level=INFO) check_call(['mkfs', '-t', fstype, blk_device]) def place_data_on_block_device(blk_device, data_src_dst): - ''' Migrate data in data_src_dst to blk_device and then remount ''' + """Migrate data in data_src_dst to blk_device and then remount.""" # mount block device into /mnt mount(blk_device, '/mnt') # copy data to /mnt @@ -279,8 +263,8 @@ def place_data_on_block_device(blk_device, data_src_dst): # TODO: re-use def modprobe(module): - ''' Load a kernel module and configure for auto-load on reboot ''' - log('ceph: Loading kernel module', level=INFO) + """Load a kernel module and configure for auto-load on reboot.""" + log('Loading kernel module', level=INFO) cmd = ['modprobe', module] check_call(cmd) with open('/etc/modules', 'r+') as modules: @@ -289,7 +273,7 @@ def modprobe(module): def copy_files(src, dst, symlinks=False, ignore=None): - ''' Copy files from src to dst ''' + """Copy files from src to dst.""" for item in os.listdir(src): s = os.path.join(src, item) d = os.path.join(dst, item) @@ -300,9 +284,9 @@ def copy_files(src, dst, symlinks=False, ignore=None): def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, - blk_device, fstype, system_services=[]): - """ - NOTE: This function must only be called from a single service unit for + blk_device, fstype, system_services=[], + replicas=3): + """NOTE: This function must only be called from a single service unit for the same rbd_img otherwise data loss will occur. Ensures given pool and RBD image exists, is mapped to a block device, @@ -316,15 +300,16 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, """ # Ensure pool, RBD image, RBD mappings are in place. if not pool_exists(service, pool): - log('ceph: Creating new pool {}.'.format(pool)) - create_pool(service, pool) + log('Creating new pool {}.'.format(pool), level=INFO) + create_pool(service, pool, replicas=replicas) if not rbd_exists(service, pool, rbd_img): - log('ceph: Creating RBD image ({}).'.format(rbd_img)) + log('Creating RBD image ({}).'.format(rbd_img), level=INFO) create_rbd_image(service, pool, rbd_img, sizemb) if not image_mapped(rbd_img): - log('ceph: Mapping RBD Image {} as a Block Device.'.format(rbd_img)) + log('Mapping RBD Image {} as a Block Device.'.format(rbd_img), + level=INFO) map_block_storage(service, pool, rbd_img) # make file system @@ -339,45 +324,47 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, for svc in system_services: if service_running(svc): - log('ceph: Stopping services {} prior to migrating data.' - .format(svc)) + log('Stopping services {} prior to migrating data.' + .format(svc), level=DEBUG) service_stop(svc) place_data_on_block_device(blk_device, mount_point) for svc in system_services: - log('ceph: Starting service {} after migrating data.' - .format(svc)) + log('Starting service {} after migrating data.' + .format(svc), level=DEBUG) service_start(svc) def ensure_ceph_keyring(service, user=None, group=None): - ''' - Ensures a ceph keyring is created for a named service - and optionally ensures user and group ownership. + """Ensures a ceph keyring is created for a named service and optionally + ensures user and group ownership. Returns False if no ceph key is available in relation state. - ''' + """ key = None for rid in relation_ids('ceph'): for unit in related_units(rid): key = relation_get('key', rid=rid, unit=unit) if key: break + if not key: return False + create_keyring(service=service, key=key) keyring = _keyring_path(service) if user and group: check_call(['chown', '%s.%s' % (user, group), keyring]) + return True def ceph_version(): - ''' Retrieve the local version of ceph ''' + """Retrieve the local version of ceph.""" if os.path.exists('/usr/bin/ceph'): cmd = ['ceph', '-v'] - output = check_output(cmd) + output = check_output(cmd).decode('US-ASCII') output = output.split() if len(output) > 3: return output[2] diff --git a/hooks/charmhelpers/contrib/storage/linux/loopback.py b/hooks/charmhelpers/contrib/storage/linux/loopback.py index 38957ef..a22c3d7 100644 --- a/hooks/charmhelpers/contrib/storage/linux/loopback.py +++ b/hooks/charmhelpers/contrib/storage/linux/loopback.py @@ -1,12 +1,12 @@ - import os import re - from subprocess import ( check_call, check_output, ) +import six + ################################################## # loopback device helpers. @@ -37,7 +37,7 @@ def create_loopback(file_path): ''' file_path = os.path.abspath(file_path) check_call(['losetup', '--find', file_path]) - for d, f in loopback_devices().iteritems(): + for d, f in six.iteritems(loopback_devices()): if f == file_path: return d @@ -51,7 +51,7 @@ def ensure_loopback_device(path, size): :returns: str: Full path to the ensured loopback device (eg, /dev/loop0) ''' - for d, f in loopback_devices().iteritems(): + for d, f in six.iteritems(loopback_devices()): if f == path: return d diff --git a/hooks/charmhelpers/contrib/storage/linux/lvm.py b/hooks/charmhelpers/contrib/storage/linux/lvm.py index 8ac7fec..0aa65f4 100644 --- a/hooks/charmhelpers/contrib/storage/linux/lvm.py +++ b/hooks/charmhelpers/contrib/storage/linux/lvm.py @@ -61,6 +61,7 @@ def list_lvm_volume_group(block_device): vg = None pvd = check_output(['pvdisplay', block_device]).splitlines() for l in pvd: + l = l.decode('UTF-8') if l.strip().startswith('VG Name'): vg = ' '.join(l.strip().split()[2:]) return vg diff --git a/hooks/charmhelpers/contrib/storage/linux/utils.py b/hooks/charmhelpers/contrib/storage/linux/utils.py index 1b95871..c6a15e1 100644 --- a/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -30,7 +30,8 @@ def zap_disk(block_device): # sometimes sgdisk exits non-zero; this is OK, dd will clean up call(['sgdisk', '--zap-all', '--mbrtogpt', '--clear', block_device]) - dev_end = check_output(['blockdev', '--getsz', block_device]) + dev_end = check_output(['blockdev', '--getsz', + block_device]).decode('UTF-8') gpt_end = int(dev_end.split()[0]) - 100 check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), 'bs=1M', 'count=1']) @@ -47,7 +48,7 @@ def is_device_mounted(device): it doesn't. ''' is_partition = bool(re.search(r".*[0-9]+\b", device)) - out = check_output(['mount']) + out = check_output(['mount']).decode('UTF-8') if is_partition: return bool(re.search(device + r"\b", out)) return bool(re.search(device + r"[0-9]+\b", out)) diff --git a/hooks/charmhelpers/core/fstab.py b/hooks/charmhelpers/core/fstab.py index cfaf0a6..0adf0db 100644 --- a/hooks/charmhelpers/core/fstab.py +++ b/hooks/charmhelpers/core/fstab.py @@ -3,10 +3,11 @@ __author__ = 'Jorge Niedbalski R. ' +import io import os -class Fstab(file): +class Fstab(io.FileIO): """This class extends file in order to implement a file reader/writer for file `/etc/fstab` """ @@ -24,8 +25,8 @@ class Fstab(file): options = "defaults" self.options = options - self.d = d - self.p = p + self.d = int(d) + self.p = int(p) def __eq__(self, o): return str(self) == str(o) @@ -45,7 +46,7 @@ class Fstab(file): self._path = path else: self._path = self.DEFAULT_PATH - file.__init__(self, self._path, 'r+') + super(Fstab, self).__init__(self._path, 'rb+') def _hydrate_entry(self, line): # NOTE: use split with no arguments to split on any @@ -58,8 +59,9 @@ class Fstab(file): def entries(self): self.seek(0) for line in self.readlines(): + line = line.decode('us-ascii') try: - if not line.startswith("#"): + if line.strip() and not line.startswith("#"): yield self._hydrate_entry(line) except ValueError: pass @@ -75,14 +77,14 @@ class Fstab(file): if self.get_entry_by_attr('device', entry.device): return False - self.write(str(entry) + '\n') + self.write((str(entry) + '\n').encode('us-ascii')) self.truncate() return entry def remove_entry(self, entry): self.seek(0) - lines = self.readlines() + lines = [l.decode('us-ascii') for l in self.readlines()] found = False for index, line in enumerate(lines): @@ -97,7 +99,7 @@ class Fstab(file): lines.remove(line) self.seek(0) - self.write(''.join(lines)) + self.write(''.join(lines).encode('us-ascii')) self.truncate() return True diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py index eb4aa09..07d1f69 100644 --- a/hooks/charmhelpers/core/hookenv.py +++ b/hooks/charmhelpers/core/hookenv.py @@ -9,9 +9,14 @@ import json import yaml import subprocess import sys -import UserDict from subprocess import CalledProcessError +import six +if not six.PY3: + from UserDict import UserDict +else: + from collections import UserDict + CRITICAL = "CRITICAL" ERROR = "ERROR" WARNING = "WARNING" @@ -63,16 +68,18 @@ def log(message, level=None): command = ['juju-log'] if level: command += ['-l', level] + if not isinstance(message, six.string_types): + message = repr(message) command += [message] subprocess.call(command) -class Serializable(UserDict.IterableUserDict): +class Serializable(UserDict): """Wrapper, an object that can be serialized to yaml or json""" def __init__(self, obj): # wrap the object - UserDict.IterableUserDict.__init__(self) + UserDict.__init__(self) self.data = obj def __getattr__(self, attr): @@ -156,12 +163,15 @@ def hook_name(): class Config(dict): - """A Juju charm config dictionary that can write itself to - disk (as json) and track which values have changed since - the previous hook invocation. + """A dictionary representation of the charm's config.yaml, with some + extra features: - Do not instantiate this object directly - instead call - ``hookenv.config()`` + - See which values in the dictionary have changed since the previous hook. + - For values that have changed, see what the previous value was. + - Store arbitrary data for use in a later hook. + + NOTE: Do not instantiate this object directly - instead call + ``hookenv.config()``, which will return an instance of :class:`Config`. Example usage:: @@ -170,8 +180,8 @@ class Config(dict): >>> config = hookenv.config() >>> config['foo'] 'bar' + >>> # store a new key/value for later use >>> config['mykey'] = 'myval' - >>> config.save() >>> # user runs `juju set mycharm foo=baz` @@ -188,22 +198,40 @@ class Config(dict): >>> # keys/values that we add are preserved across hooks >>> config['mykey'] 'myval' - >>> # don't forget to save at the end of hook! - >>> config.save() """ CONFIG_FILE_NAME = '.juju-persistent-config' def __init__(self, *args, **kw): super(Config, self).__init__(*args, **kw) + self.implicit_save = True self._prev_dict = None self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) if os.path.exists(self.path): self.load_previous() + def __getitem__(self, key): + """For regular dict lookups, check the current juju config first, + then the previous (saved) copy. This ensures that user-saved values + will be returned by a dict lookup. + + """ + try: + return dict.__getitem__(self, key) + except KeyError: + return (self._prev_dict or {})[key] + + def keys(self): + prev_keys = [] + if self._prev_dict is not None: + prev_keys = self._prev_dict.keys() + return list(set(prev_keys + list(dict.keys(self)))) + def load_previous(self, path=None): - """Load previous copy of config from disk so that current values - can be compared to previous values. + """Load previous copy of config from disk. + + In normal usage you don't need to call this method directly - it + is called automatically at object initialization. :param path: @@ -218,8 +246,8 @@ class Config(dict): self._prev_dict = json.load(f) def changed(self, key): - """Return true if the value for this key has changed since - the last save. + """Return True if the current value for this key is different from + the previous value. """ if self._prev_dict is None: @@ -228,7 +256,7 @@ class Config(dict): def previous(self, key): """Return previous value for this key, or None if there - is no "previous" value. + is no previous value. """ if self._prev_dict: @@ -238,11 +266,17 @@ class Config(dict): def save(self): """Save this config to disk. - Preserves items in _prev_dict that do not exist in self. + If the charm is using the :mod:`Services Framework ` + or :meth:'@hook ' decorator, this + is called automatically at the end of successful hook execution. + Otherwise, it should be called directly by user code. + + To disable automatic saves, set ``implicit_save=False`` on this + instance. """ if self._prev_dict: - for k, v in self._prev_dict.iteritems(): + for k, v in six.iteritems(self._prev_dict): if k not in self: self[k] = v with open(self.path, 'w') as f: @@ -257,7 +291,8 @@ def config(scope=None): config_cmd_line.append(scope) config_cmd_line.append('--format=json') try: - config_data = json.loads(subprocess.check_output(config_cmd_line)) + config_data = json.loads( + subprocess.check_output(config_cmd_line).decode('UTF-8')) if scope is not None: return config_data return Config(config_data) @@ -276,10 +311,10 @@ def relation_get(attribute=None, unit=None, rid=None): if unit: _args.append(unit) try: - return json.loads(subprocess.check_output(_args)) + return json.loads(subprocess.check_output(_args).decode('UTF-8')) except ValueError: return None - except CalledProcessError, e: + except CalledProcessError as e: if e.returncode == 2: return None raise @@ -291,7 +326,7 @@ def relation_set(relation_id=None, relation_settings=None, **kwargs): relation_cmd_line = ['relation-set'] if relation_id is not None: relation_cmd_line.extend(('-r', relation_id)) - for k, v in (relation_settings.items() + kwargs.items()): + for k, v in (list(relation_settings.items()) + list(kwargs.items())): if v is None: relation_cmd_line.append('{}='.format(k)) else: @@ -308,7 +343,8 @@ def relation_ids(reltype=None): relid_cmd_line = ['relation-ids', '--format=json'] if reltype is not None: relid_cmd_line.append(reltype) - return json.loads(subprocess.check_output(relid_cmd_line)) or [] + return json.loads( + subprocess.check_output(relid_cmd_line).decode('UTF-8')) or [] return [] @@ -319,7 +355,8 @@ def related_units(relid=None): units_cmd_line = ['relation-list', '--format=json'] if relid is not None: units_cmd_line.extend(('-r', relid)) - return json.loads(subprocess.check_output(units_cmd_line)) or [] + return json.loads( + subprocess.check_output(units_cmd_line).decode('UTF-8')) or [] @cached @@ -428,7 +465,7 @@ def unit_get(attribute): """Get the unit ID for the remote unit""" _args = ['unit-get', '--format=json', attribute] try: - return json.loads(subprocess.check_output(_args)) + return json.loads(subprocess.check_output(_args).decode('UTF-8')) except ValueError: return None @@ -465,9 +502,10 @@ class Hooks(object): hooks.execute(sys.argv) """ - def __init__(self): + def __init__(self, config_save=True): super(Hooks, self).__init__() self._hooks = {} + self._config_save = config_save def register(self, name, function): """Register a hook""" @@ -478,6 +516,10 @@ class Hooks(object): hook_name = os.path.basename(args[0]) if hook_name in self._hooks: self._hooks[hook_name]() + if self._config_save: + cfg = config() + if cfg.implicit_save: + cfg.save() else: raise UnregisteredHookError(hook_name) diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py index b85b028..c6f1680 100644 --- a/hooks/charmhelpers/core/host.py +++ b/hooks/charmhelpers/core/host.py @@ -6,19 +6,20 @@ # Matthew Wedgwood import os +import re import pwd import grp import random import string import subprocess import hashlib -import shutil from contextlib import contextmanager - from collections import OrderedDict -from hookenv import log -from fstab import Fstab +import six + +from .hookenv import log +from .fstab import Fstab def service_start(service_name): @@ -54,7 +55,9 @@ def service(action, service_name): def service_running(service): """Determine whether a system service is running""" try: - output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT) + output = subprocess.check_output( + ['service', service, 'status'], + stderr=subprocess.STDOUT).decode('UTF-8') except subprocess.CalledProcessError: return False else: @@ -67,9 +70,11 @@ def service_running(service): def service_available(service_name): """Determine whether a system service is available""" try: - subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT) - except subprocess.CalledProcessError: - return False + subprocess.check_output( + ['service', service_name, 'status'], + stderr=subprocess.STDOUT).decode('UTF-8') + except subprocess.CalledProcessError as e: + return 'unrecognized service' not in e.output else: return True @@ -96,6 +101,26 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False): return user_info +def add_group(group_name, system_group=False): + """Add a group to the system""" + try: + group_info = grp.getgrnam(group_name) + log('group {0} already exists!'.format(group_name)) + except KeyError: + log('creating group {0}'.format(group_name)) + cmd = ['addgroup'] + if system_group: + cmd.append('--system') + else: + cmd.extend([ + '--group', + ]) + cmd.append(group_name) + subprocess.check_call(cmd) + group_info = grp.getgrnam(group_name) + return group_info + + def add_user_to_group(username, group): """Add a user to a group""" cmd = [ @@ -115,7 +140,7 @@ def rsync(from_path, to_path, flags='-r', options=None): cmd.append(from_path) cmd.append(to_path) log(" ".join(cmd)) - return subprocess.check_output(cmd).strip() + return subprocess.check_output(cmd).decode('UTF-8').strip() def symlink(source, destination): @@ -130,7 +155,7 @@ def symlink(source, destination): subprocess.check_call(cmd) -def mkdir(path, owner='root', group='root', perms=0555, force=False): +def mkdir(path, owner='root', group='root', perms=0o555, force=False): """Create a directory""" log("Making dir {} {}:{} {:o}".format(path, owner, group, perms)) @@ -146,7 +171,7 @@ def mkdir(path, owner='root', group='root', perms=0555, force=False): os.chown(realpath, uid, gid) -def write_file(path, content, owner='root', group='root', perms=0444): +def write_file(path, content, owner='root', group='root', perms=0o444): """Create or overwrite a file with the contents of a string""" log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) uid = pwd.getpwnam(owner).pw_uid @@ -177,7 +202,7 @@ def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"): cmd_args.extend([device, mountpoint]) try: subprocess.check_output(cmd_args) - except subprocess.CalledProcessError, e: + except subprocess.CalledProcessError as e: log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) return False @@ -191,7 +216,7 @@ def umount(mountpoint, persist=False): cmd_args = ['umount', mountpoint] try: subprocess.check_output(cmd_args) - except subprocess.CalledProcessError, e: + except subprocess.CalledProcessError as e: log('Error unmounting {}\n{}'.format(mountpoint, e.output)) return False @@ -209,17 +234,42 @@ def mounts(): return system_mounts -def file_hash(path): - """Generate a md5 hash of the contents of 'path' or None if not found """ +def file_hash(path, hash_type='md5'): + """ + Generate a hash checksum of the contents of 'path' or None if not found. + + :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + """ if os.path.exists(path): - h = hashlib.md5() - with open(path, 'r') as source: - h.update(source.read()) # IGNORE:E1101 - it does have update + h = getattr(hashlib, hash_type)() + with open(path, 'rb') as source: + h.update(source.read()) return h.hexdigest() else: return None +def check_hash(path, checksum, hash_type='md5'): + """ + Validate a file using a cryptographic checksum. + + :param str checksum: Value of the checksum used to validate the file. + :param str hash_type: Hash algorithm used to generate `checksum`. + Can be any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + :raises ChecksumError: If the file fails the checksum + + """ + actual_checksum = file_hash(path, hash_type) + if checksum != actual_checksum: + raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum)) + + +class ChecksumError(ValueError): + pass + + def restart_on_change(restart_map, stopstart=False): """Restart services based on configuration files changing @@ -272,7 +322,7 @@ def pwgen(length=None): if length is None: length = random.choice(range(35, 45)) alphanumeric_chars = [ - l for l in (string.letters + string.digits) + l for l in (string.ascii_letters + string.digits) if l not in 'l0QD1vAEIOUaeiou'] random_chars = [ random.choice(alphanumeric_chars) for _ in range(length)] @@ -281,18 +331,24 @@ def pwgen(length=None): def list_nics(nic_type): '''Return a list of nics of given type(s)''' - if isinstance(nic_type, basestring): + if isinstance(nic_type, six.string_types): int_types = [nic_type] else: int_types = nic_type interfaces = [] for int_type in int_types: cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] - ip_output = subprocess.check_output(cmd).split('\n') + ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') ip_output = (line for line in ip_output if line) for line in ip_output: if line.split()[1].startswith(int_type): - interfaces.append(line.split()[1].replace(":", "")) + matched = re.search('.*: (bond[0-9]+\.[0-9]+)@.*', line) + if matched: + interface = matched.groups()[0] + else: + interface = line.split()[1].replace(":", "") + interfaces.append(interface) + return interfaces @@ -304,7 +360,7 @@ def set_nic_mtu(nic, mtu): def get_nic_mtu(nic): cmd = ['ip', 'addr', 'show', nic] - ip_output = subprocess.check_output(cmd).split('\n') + ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') mtu = "" for line in ip_output: words = line.split() @@ -315,7 +371,7 @@ def get_nic_mtu(nic): def get_nic_hwaddr(nic): cmd = ['ip', '-o', '-0', 'addr', 'show', nic] - ip_output = subprocess.check_output(cmd) + ip_output = subprocess.check_output(cmd).decode('UTF-8') hwaddr = "" words = ip_output.split() if 'link/ether' in words: @@ -332,8 +388,8 @@ def cmp_pkgrevno(package, revno, pkgcache=None): ''' import apt_pkg - from charmhelpers.fetch import apt_cache if not pkgcache: + from charmhelpers.fetch import apt_cache pkgcache = apt_cache() pkg = pkgcache[package] return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) diff --git a/hooks/charmhelpers/core/services/__init__.py b/hooks/charmhelpers/core/services/__init__.py index e8039a8..69dde79 100644 --- a/hooks/charmhelpers/core/services/__init__.py +++ b/hooks/charmhelpers/core/services/__init__.py @@ -1,2 +1,2 @@ -from .base import * -from .helpers import * +from .base import * # NOQA +from .helpers import * # NOQA diff --git a/hooks/charmhelpers/core/services/base.py b/hooks/charmhelpers/core/services/base.py index 6b5a1b9..87ecb13 100644 --- a/hooks/charmhelpers/core/services/base.py +++ b/hooks/charmhelpers/core/services/base.py @@ -118,6 +118,9 @@ class ServiceManager(object): else: self.provide_data() self.reconfigure_services() + cfg = hookenv.config() + if cfg.implicit_save: + cfg.save() def provide_data(self): """ diff --git a/hooks/charmhelpers/core/services/helpers.py b/hooks/charmhelpers/core/services/helpers.py index 4b90589..163a793 100644 --- a/hooks/charmhelpers/core/services/helpers.py +++ b/hooks/charmhelpers/core/services/helpers.py @@ -1,3 +1,5 @@ +import os +import yaml from charmhelpers.core import hookenv from charmhelpers.core import templating @@ -19,15 +21,21 @@ class RelationContext(dict): the `name` attribute that are complete will used to populate the dictionary values (see `get_data`, below). - The generated context will be namespaced under the interface type, to prevent - potential naming conflicts. + The generated context will be namespaced under the relation :attr:`name`, + to prevent potential naming conflicts. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` """ name = None interface = None required_keys = [] - def __init__(self, *args, **kwargs): - super(RelationContext, self).__init__(*args, **kwargs) + def __init__(self, name=None, additional_required_keys=None): + if name is not None: + self.name = name + if additional_required_keys is not None: + self.required_keys.extend(additional_required_keys) self.get_data() def __bool__(self): @@ -101,11 +109,121 @@ class RelationContext(dict): return {} +class MysqlRelation(RelationContext): + """ + Relation context for the `mysql` interface. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = 'db' + interface = 'mysql' + required_keys = ['host', 'user', 'password', 'database'] + + +class HttpRelation(RelationContext): + """ + Relation context for the `http` interface. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = 'website' + interface = 'http' + required_keys = ['host', 'port'] + + def provide_data(self): + return { + 'host': hookenv.unit_get('private-address'), + 'port': 80, + } + + +class RequiredConfig(dict): + """ + Data context that loads config options with one or more mandatory options. + + Once the required options have been changed from their default values, all + config options will be available, namespaced under `config` to prevent + potential naming conflicts (for example, between a config option and a + relation property). + + :param list *args: List of options that must be changed from their default values. + """ + + def __init__(self, *args): + self.required_options = args + self['config'] = hookenv.config() + with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp: + self.config = yaml.load(fp).get('options', {}) + + def __bool__(self): + for option in self.required_options: + if option not in self['config']: + return False + current_value = self['config'][option] + default_value = self.config[option].get('default') + if current_value == default_value: + return False + if current_value in (None, '') and default_value in (None, ''): + return False + return True + + def __nonzero__(self): + return self.__bool__() + + +class StoredContext(dict): + """ + A data context that always returns the data that it was first created with. + + This is useful to do a one-time generation of things like passwords, that + will thereafter use the same value that was originally generated, instead + of generating a new value each time it is run. + """ + def __init__(self, file_name, config_data): + """ + If the file exists, populate `self` with the data from the file. + Otherwise, populate with the given data and persist it to the file. + """ + if os.path.exists(file_name): + self.update(self.read_context(file_name)) + else: + self.store_context(file_name, config_data) + self.update(config_data) + + def store_context(self, file_name, config_data): + if not os.path.isabs(file_name): + file_name = os.path.join(hookenv.charm_dir(), file_name) + with open(file_name, 'w') as file_stream: + os.fchmod(file_stream.fileno(), 0o600) + yaml.dump(config_data, file_stream) + + def read_context(self, file_name): + if not os.path.isabs(file_name): + file_name = os.path.join(hookenv.charm_dir(), file_name) + with open(file_name, 'r') as file_stream: + data = yaml.load(file_stream) + if not data: + raise OSError("%s is empty" % file_name) + return data + + class TemplateCallback(ManagerCallback): """ - Callback class that will render a template, for use as a ready action. + Callback class that will render a Jinja2 template, for use as a ready + action. + + :param str source: The template source file, relative to + `$CHARM_DIR/templates` + + :param str target: The target to write the rendered template to + :param str owner: The owner of the rendered file + :param str group: The group of the rendered file + :param int perms: The permissions of the rendered file """ - def __init__(self, source, target, owner='root', group='root', perms=0444): + def __init__(self, source, target, + owner='root', group='root', perms=0o444): self.source = source self.target = target self.owner = owner diff --git a/hooks/charmhelpers/core/sysctl.py b/hooks/charmhelpers/core/sysctl.py new file mode 100644 index 0000000..0f29963 --- /dev/null +++ b/hooks/charmhelpers/core/sysctl.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +__author__ = 'Jorge Niedbalski R. ' + +import yaml + +from subprocess import check_call + +from charmhelpers.core.hookenv import ( + log, + DEBUG, +) + + +def create(sysctl_dict, sysctl_file): + """Creates a sysctl.conf file from a YAML associative array + + :param sysctl_dict: a dict of sysctl options eg { 'kernel.max_pid': 1337 } + :type sysctl_dict: dict + :param sysctl_file: path to the sysctl file to be saved + :type sysctl_file: str or unicode + :returns: None + """ + sysctl_dict = yaml.load(sysctl_dict) + + with open(sysctl_file, "w") as fd: + for key, value in sysctl_dict.items(): + fd.write("{}={}\n".format(key, value)) + + log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict), + level=DEBUG) + + check_call(["sysctl", "-p", sysctl_file]) diff --git a/hooks/charmhelpers/core/templating.py b/hooks/charmhelpers/core/templating.py index 2c63885..83133fa 100644 --- a/hooks/charmhelpers/core/templating.py +++ b/hooks/charmhelpers/core/templating.py @@ -4,7 +4,8 @@ from charmhelpers.core import host from charmhelpers.core import hookenv -def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None): +def render(source, target, context, owner='root', group='root', + perms=0o444, templates_dir=None): """ Render a template. diff --git a/hooks/charmhelpers/fetch/__init__.py b/hooks/charmhelpers/fetch/__init__.py index 8e9d380..0a126fc 100644 --- a/hooks/charmhelpers/fetch/__init__.py +++ b/hooks/charmhelpers/fetch/__init__.py @@ -5,10 +5,6 @@ from yaml import safe_load from charmhelpers.core.host import ( lsb_release ) -from urlparse import ( - urlparse, - urlunparse, -) import subprocess from charmhelpers.core.hookenv import ( config, @@ -16,6 +12,12 @@ from charmhelpers.core.hookenv import ( ) import os +import six +if six.PY3: + from urllib.parse import urlparse, urlunparse +else: + from urlparse import urlparse, urlunparse + CLOUD_ARCHIVE = """# Ubuntu Cloud Archive deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main @@ -72,6 +74,7 @@ CLOUD_ARCHIVE_POCKETS = { FETCH_HANDLERS = ( 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', + 'charmhelpers.fetch.giturl.GitUrlFetchHandler', ) APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. @@ -148,7 +151,7 @@ def apt_install(packages, options=None, fatal=False): cmd = ['apt-get', '--assume-yes'] cmd.extend(options) cmd.append('install') - if isinstance(packages, basestring): + if isinstance(packages, six.string_types): cmd.append(packages) else: cmd.extend(packages) @@ -181,7 +184,7 @@ def apt_update(fatal=False): def apt_purge(packages, fatal=False): """Purge one or more packages""" cmd = ['apt-get', '--assume-yes', 'purge'] - if isinstance(packages, basestring): + if isinstance(packages, six.string_types): cmd.append(packages) else: cmd.extend(packages) @@ -192,7 +195,7 @@ def apt_purge(packages, fatal=False): def apt_hold(packages, fatal=False): """Hold one or more packages""" cmd = ['apt-mark', 'hold'] - if isinstance(packages, basestring): + if isinstance(packages, six.string_types): cmd.append(packages) else: cmd.extend(packages) @@ -208,7 +211,8 @@ def add_source(source, key=None): """Add a package source to this system. @param source: a URL or sources.list entry, as supported by - add-apt-repository(1). Examples: + add-apt-repository(1). Examples:: + ppa:charmers/example deb https://stub:key@private.example.com/ubuntu trusty main @@ -217,6 +221,7 @@ def add_source(source, key=None): pocket for the release. 'cloud:' may be used to activate official cloud archive pockets, such as 'cloud:icehouse' + 'distro' may be used as a noop @param key: A key to be added to the system's APT keyring and used to verify the signatures on packages. Ideally, this should be an @@ -250,12 +255,14 @@ def add_source(source, key=None): release = lsb_release()['DISTRIB_CODENAME'] with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: apt.write(PROPOSED_POCKET.format(release)) + elif source == 'distro': + pass else: - raise SourceConfigError("Unknown source: {!r}".format(source)) + log("Unknown source: {!r}".format(source)) if key: if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: - with NamedTemporaryFile() as key_file: + with NamedTemporaryFile('w+') as key_file: key_file.write(key) key_file.flush() key_file.seek(0) @@ -292,14 +299,14 @@ def configure_sources(update=False, sources = safe_load((config(sources_var) or '').strip()) or [] keys = safe_load((config(keys_var) or '').strip()) or None - if isinstance(sources, basestring): + if isinstance(sources, six.string_types): sources = [sources] if keys is None: for source in sources: add_source(source, None) else: - if isinstance(keys, basestring): + if isinstance(keys, six.string_types): keys = [keys] if len(sources) != len(keys): @@ -311,22 +318,35 @@ def configure_sources(update=False, apt_update(fatal=True) -def install_remote(source): +def install_remote(source, *args, **kwargs): """ Install a file tree from a remote source The specified source should be a url of the form: scheme://[host]/path[#[option=value][&...]] - Schemes supported are based on this modules submodules - Options supported are submodule-specific""" + Schemes supported are based on this modules submodules. + Options supported are submodule-specific. + Additional arguments are passed through to the submodule. + + For example:: + + dest = install_remote('http://example.com/archive.tgz', + checksum='deadbeef', + hash_type='sha1') + + This will download `archive.tgz`, validate it using SHA1 and, if + the file is ok, extract it and return the directory in which it + was extracted. If the checksum fails, it will raise + :class:`charmhelpers.core.host.ChecksumError`. + """ # We ONLY check for True here because can_handle may return a string # explaining why it can't handle a given source. handlers = [h for h in plugins() if h.can_handle(source) is True] installed_to = None for handler in handlers: try: - installed_to = handler.install(source) + installed_to = handler.install(source, *args, **kwargs) except UnhandledSource: pass if not installed_to: @@ -383,7 +403,7 @@ def _run_apt_command(cmd, fatal=False): while result is None or result == APT_NO_LOCK: try: result = subprocess.check_call(cmd, env=env) - except subprocess.CalledProcessError, e: + except subprocess.CalledProcessError as e: retry_count = retry_count + 1 if retry_count > APT_NO_LOCK_RETRY_COUNT: raise diff --git a/hooks/charmhelpers/fetch/archiveurl.py b/hooks/charmhelpers/fetch/archiveurl.py index 87e7071..8a4624b 100644 --- a/hooks/charmhelpers/fetch/archiveurl.py +++ b/hooks/charmhelpers/fetch/archiveurl.py @@ -1,6 +1,23 @@ import os -import urllib2 -import urlparse +import hashlib +import re + +import six +if six.PY3: + from urllib.request import ( + build_opener, install_opener, urlopen, urlretrieve, + HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, + ) + from urllib.parse import urlparse, urlunparse, parse_qs + from urllib.error import URLError +else: + from urllib import urlretrieve + from urllib2 import ( + build_opener, install_opener, urlopen, + HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, + URLError + ) + from urlparse import urlparse, urlunparse, parse_qs from charmhelpers.fetch import ( BaseFetchHandler, @@ -10,11 +27,37 @@ from charmhelpers.payload.archive import ( get_archive_handler, extract, ) -from charmhelpers.core.host import mkdir +from charmhelpers.core.host import mkdir, check_hash + + +def splituser(host): + '''urllib.splituser(), but six's support of this seems broken''' + _userprog = re.compile('^(.*)@(.*)$') + match = _userprog.match(host) + if match: + return match.group(1, 2) + return None, host + + +def splitpasswd(user): + '''urllib.splitpasswd(), but six's support of this is missing''' + _passwdprog = re.compile('^([^:]*):(.*)$', re.S) + match = _passwdprog.match(user) + if match: + return match.group(1, 2) + return user, None class ArchiveUrlFetchHandler(BaseFetchHandler): - """Handler for archives via generic URLs""" + """ + Handler to download archive files from arbitrary URLs. + + Can fetch from http, https, ftp, and file URLs. + + Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files. + + Installs the contents of the archive in $CHARM_DIR/fetched/. + """ def can_handle(self, source): url_parts = self.parse_url(source) if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): @@ -24,22 +67,28 @@ class ArchiveUrlFetchHandler(BaseFetchHandler): return False def download(self, source, dest): + """ + Download an archive file. + + :param str source: URL pointing to an archive file. + :param str dest: Local path location to download archive file to. + """ # propogate all exceptions # URLError, OSError, etc - proto, netloc, path, params, query, fragment = urlparse.urlparse(source) + proto, netloc, path, params, query, fragment = urlparse(source) if proto in ('http', 'https'): - auth, barehost = urllib2.splituser(netloc) + auth, barehost = splituser(netloc) if auth is not None: - source = urlparse.urlunparse((proto, barehost, path, params, query, fragment)) - username, password = urllib2.splitpasswd(auth) - passman = urllib2.HTTPPasswordMgrWithDefaultRealm() + source = urlunparse((proto, barehost, path, params, query, fragment)) + username, password = splitpasswd(auth) + passman = HTTPPasswordMgrWithDefaultRealm() # Realm is set to None in add_password to force the username and password # to be used whatever the realm passman.add_password(None, source, username, password) - authhandler = urllib2.HTTPBasicAuthHandler(passman) - opener = urllib2.build_opener(authhandler) - urllib2.install_opener(opener) - response = urllib2.urlopen(source) + authhandler = HTTPBasicAuthHandler(passman) + opener = build_opener(authhandler) + install_opener(opener) + response = urlopen(source) try: with open(dest, 'w') as dest_file: dest_file.write(response.read()) @@ -48,16 +97,49 @@ class ArchiveUrlFetchHandler(BaseFetchHandler): os.unlink(dest) raise e - def install(self, source): + # Mandatory file validation via Sha1 or MD5 hashing. + def download_and_validate(self, url, hashsum, validate="sha1"): + tempfile, headers = urlretrieve(url) + check_hash(tempfile, hashsum, validate) + return tempfile + + def install(self, source, dest=None, checksum=None, hash_type='sha1'): + """ + Download and install an archive file, with optional checksum validation. + + The checksum can also be given on the `source` URL's fragment. + For example:: + + handler.install('http://example.com/file.tgz#sha1=deadbeef') + + :param str source: URL pointing to an archive file. + :param str dest: Local destination path to install to. If not given, + installs to `$CHARM_DIR/archives/archive_file_name`. + :param str checksum: If given, validate the archive file after download. + :param str hash_type: Algorithm used to generate `checksum`. + Can be any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + + """ url_parts = self.parse_url(source) dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') if not os.path.exists(dest_dir): - mkdir(dest_dir, perms=0755) + mkdir(dest_dir, perms=0o755) dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path)) try: self.download(source, dld_file) - except urllib2.URLError as e: + except URLError as e: raise UnhandledSource(e.reason) except OSError as e: raise UnhandledSource(e.strerror) - return extract(dld_file) + options = parse_qs(url_parts.fragment) + for key, value in options.items(): + if not six.PY3: + algorithms = hashlib.algorithms + else: + algorithms = hashlib.algorithms_available + if key in algorithms: + check_hash(dld_file, value, key) + if checksum: + check_hash(dld_file, checksum, hash_type) + return extract(dld_file, dest) diff --git a/hooks/charmhelpers/fetch/bzrurl.py b/hooks/charmhelpers/fetch/bzrurl.py index 0e580e4..8ef48f3 100644 --- a/hooks/charmhelpers/fetch/bzrurl.py +++ b/hooks/charmhelpers/fetch/bzrurl.py @@ -5,6 +5,10 @@ from charmhelpers.fetch import ( ) from charmhelpers.core.host import mkdir +import six +if six.PY3: + raise ImportError('bzrlib does not support Python3') + try: from bzrlib.branch import Branch except ImportError: @@ -42,7 +46,7 @@ class BzrUrlFetchHandler(BaseFetchHandler): dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name) if not os.path.exists(dest_dir): - mkdir(dest_dir, perms=0755) + mkdir(dest_dir, perms=0o755) try: self.branch(source, dest_dir) except OSError as e: diff --git a/hooks/charmhelpers/fetch/giturl.py b/hooks/charmhelpers/fetch/giturl.py new file mode 100644 index 0000000..f3aa282 --- /dev/null +++ b/hooks/charmhelpers/fetch/giturl.py @@ -0,0 +1,51 @@ +import os +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource +) +from charmhelpers.core.host import mkdir + +import six +if six.PY3: + raise ImportError('GitPython does not support Python 3') + +try: + from git import Repo +except ImportError: + from charmhelpers.fetch import apt_install + apt_install("python-git") + from git import Repo + + +class GitUrlFetchHandler(BaseFetchHandler): + """Handler for git branches via generic and github URLs""" + def can_handle(self, source): + url_parts = self.parse_url(source) + # TODO (mattyw) no support for ssh git@ yet + if url_parts.scheme not in ('http', 'https', 'git'): + return False + else: + return True + + def clone(self, source, dest, branch): + if not self.can_handle(source): + raise UnhandledSource("Cannot handle {}".format(source)) + + repo = Repo.clone_from(source, dest) + repo.git.checkout(branch) + + def install(self, source, branch="master", dest=None): + url_parts = self.parse_url(source) + branch_name = url_parts.path.strip("/").split("/")[-1] + if dest: + dest_dir = os.path.join(dest, branch_name) + else: + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", + branch_name) + if not os.path.exists(dest_dir): + mkdir(dest_dir, perms=0o755) + try: + self.clone(source, dest_dir, branch) + except OSError as e: + raise UnhandledSource(e.strerror) + return dest_dir From aa774c05ecea1db65edbc015b17f79a5450e99c2 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Thu, 11 Dec 2014 02:46:41 +0000 Subject: [PATCH 3/4] Mock out config(). --- unit_tests/test_ceilometer_hooks.py | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/unit_tests/test_ceilometer_hooks.py b/unit_tests/test_ceilometer_hooks.py index 8523827..1870e7f 100644 --- a/unit_tests/test_ceilometer_hooks.py +++ b/unit_tests/test_ceilometer_hooks.py @@ -32,13 +32,15 @@ class CeilometerHooksTest(CharmTestCase): super(CeilometerHooksTest, self).setUp(hooks, TO_PATCH) self.config.side_effect = self.test_config.get - def test_configure_source(self): + @patch('charmhelpers.core.hookenv.config') + def test_configure_source(self, mock_config): self.test_config.set('openstack-origin', 'cloud:precise-havana') hooks.hooks.execute(['hooks/install']) self.configure_installation_source.\ assert_called_with('cloud:precise-havana') - def test_install_hook(self): + @patch('charmhelpers.core.hookenv.config') + def test_install_hook(self, mock_config): self.filter_installed_packages.return_value = \ hooks.CEILOMETER_AGENT_PACKAGES hooks.hooks.execute(['hooks/install']) @@ -47,16 +49,19 @@ class CeilometerHooksTest(CharmTestCase): self.apt_install.assert_called_with(hooks.CEILOMETER_AGENT_PACKAGES, fatal=True) - def test_ceilometer_changed(self): + @patch('charmhelpers.core.hookenv.config') + def test_ceilometer_changed(self, mock_config): hooks.hooks.execute(['hooks/ceilometer-service-relation-changed']) self.assertTrue(self.CONFIGS.write_all.called) - def test_nova_ceilometer_joined(self): + @patch('charmhelpers.core.hookenv.config') + def test_nova_ceilometer_joined(self, mock_config): hooks.hooks.execute(['hooks/nova-ceilometer-relation-joined']) self.relation_set.assert_called_with( subordinate_configuration=json.dumps(ceilometer_utils.NOVA_SETTINGS)) - def test_config_changed_no_upgrade(self): + @patch('charmhelpers.core.hookenv.config') + def test_config_changed_no_upgrade(self, mock_config): self.openstack_upgrade_available.return_value = False hooks.hooks.execute(['hooks/config-changed']) self.openstack_upgrade_available.\ @@ -64,7 +69,8 @@ class CeilometerHooksTest(CharmTestCase): self.assertFalse(self.do_openstack_upgrade.called) self.assertTrue(self.CONFIGS.write_all.called) - def test_config_changed_upgrade(self): + @patch('charmhelpers.core.hookenv.config') + def test_config_changed_upgrade(self, mock_config): self.openstack_upgrade_available.return_value = True hooks.hooks.execute(['hooks/config-changed']) self.openstack_upgrade_available.\ From 0e598ab57a2c4689bce2bb28c6d5fc5e51a714cf Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Thu, 11 Dec 2014 17:46:56 +0000 Subject: [PATCH 4/4] Sync charm-helpers and actually pick up charmhelpers/__init__.py this time. --- hooks/charmhelpers/__init__.py | 22 ++++++++++++++++++++++ hooks/charmhelpers/core/hookenv.py | 18 ++++++++++++++---- 2 files changed, 36 insertions(+), 4 deletions(-) diff --git a/hooks/charmhelpers/__init__.py b/hooks/charmhelpers/__init__.py index e69de29..b46e2e2 100644 --- a/hooks/charmhelpers/__init__.py +++ b/hooks/charmhelpers/__init__.py @@ -0,0 +1,22 @@ +# Bootstrap charm-helpers, installing its dependencies if necessary using +# only standard libraries. +import subprocess +import sys + +try: + import six # flake8: noqa +except ImportError: + if sys.version_info.major == 2: + subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) + else: + subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) + import six # flake8: noqa + +try: + import yaml # flake8: noqa +except ImportError: + if sys.version_info.major == 2: + subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) + else: + subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) + import yaml # flake8: noqa diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py index 07d1f69..69ae456 100644 --- a/hooks/charmhelpers/core/hookenv.py +++ b/hooks/charmhelpers/core/hookenv.py @@ -395,21 +395,31 @@ def relations_of_type(reltype=None): return relation_data +@cached +def metadata(): + """Get the current charm metadata.yaml contents as a python object""" + with open(os.path.join(charm_dir(), 'metadata.yaml')) as md: + return yaml.safe_load(md) + + @cached def relation_types(): """Get a list of relation types supported by this charm""" - charmdir = os.environ.get('CHARM_DIR', '') - mdf = open(os.path.join(charmdir, 'metadata.yaml')) - md = yaml.safe_load(mdf) rel_types = [] + md = metadata() for key in ('provides', 'requires', 'peers'): section = md.get(key) if section: rel_types.extend(section.keys()) - mdf.close() return rel_types +@cached +def charm_name(): + """Get the name of the current charm as is specified on metadata.yaml""" + return metadata().get('name') + + @cached def relations(): """Get a nested dictionary of relation data for all related units"""