diff --git a/.bzrignore b/.bzrignore new file mode 100644 index 0000000..ba077a4 --- /dev/null +++ b/.bzrignore @@ -0,0 +1 @@ +bin diff --git a/Makefile b/Makefile index 17f7bee..588a962 100644 --- a/Makefile +++ b/Makefile @@ -1,11 +1,17 @@ #!/usr/bin/make +PYTHON := /usr/bin/env python lint: @flake8 --exclude hooks/charmhelpers hooks @charm proof -sync: - @charm-helper-sync -c charm-helpers.yaml +bin/charm_helpers_sync.py: + @mkdir -p bin + @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \ + > bin/charm_helpers_sync.py + +sync: bin/charm_helpers_sync.py + @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers.yaml test: @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests diff --git a/hooks/charmhelpers/contrib/hahelpers/cluster.py b/hooks/charmhelpers/contrib/hahelpers/cluster.py index bf832f7..7151b1d 100644 --- a/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -6,6 +6,11 @@ # Adam Gandelman # +""" +Helpers for clustering and determining "cluster leadership" and other +clustering-related helpers. +""" + import subprocess import os @@ -19,6 +24,7 @@ from charmhelpers.core.hookenv import ( config as config_get, INFO, ERROR, + WARNING, unit_get, ) @@ -27,6 +33,29 @@ class HAIncompleteConfig(Exception): pass +def is_elected_leader(resource): + """ + Returns True if the charm executing this is the elected cluster leader. + + It relies on two mechanisms to determine leadership: + 1. If the charm is part of a corosync cluster, call corosync to + determine leadership. + 2. If the charm is not part of a corosync cluster, the leader is + determined as being "the alive unit with the lowest unit numer". In + other words, the oldest surviving unit. + """ + if is_clustered(): + if not is_crm_leader(resource): + log('Deferring action to CRM leader.', level=INFO) + return False + else: + peers = peer_units() + if peers and not oldest_peer(peers): + log('Deferring action to oldest service unit.', level=INFO) + return False + return True + + def is_clustered(): for r_id in (relation_ids('ha') or []): for unit in (relation_list(r_id) or []): @@ -38,7 +67,11 @@ def is_clustered(): return False -def is_leader(resource): +def is_crm_leader(resource): + """ + Returns True if the charm calling this is the elected corosync leader, + as returned by calling the external "crm" command. + """ cmd = [ "crm", "resource", "show", resource @@ -54,15 +87,31 @@ def is_leader(resource): return False -def peer_units(): +def is_leader(resource): + log("is_leader is deprecated. Please consider using is_crm_leader " + "instead.", level=WARNING) + return is_crm_leader(resource) + + +def peer_units(peer_relation="cluster"): peers = [] - for r_id in (relation_ids('cluster') or []): + for r_id in (relation_ids(peer_relation) or []): for unit in (relation_list(r_id) or []): peers.append(unit) return peers +def peer_ips(peer_relation='cluster', addr_key='private-address'): + '''Return a dict of peers and their private-address''' + peers = {} + for r_id in relation_ids(peer_relation): + for unit in relation_list(r_id): + peers[unit] = relation_get(addr_key, rid=r_id, unit=unit) + return peers + + def oldest_peer(peers): + """Determines who the oldest peer is by comparing unit numbers.""" local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1]) for peer in peers: remote_unit_no = int(peer.split('/')[1]) @@ -72,16 +121,9 @@ def oldest_peer(peers): def eligible_leader(resource): - if is_clustered(): - if not is_leader(resource): - log('Deferring action to CRM leader.', level=INFO) - return False - else: - peers = peer_units() - if peers and not oldest_peer(peers): - log('Deferring action to oldest service unit.', level=INFO) - return False - return True + log("eligible_leader is deprecated. Please consider using " + "is_elected_leader instead.", level=WARNING) + return is_elected_leader(resource) def https(): @@ -146,12 +188,12 @@ def get_hacluster_config(): Obtains all relevant configuration from charm configuration required for initiating a relation to hacluster: - ha-bindiface, ha-mcastport, vip, vip_iface, vip_cidr + ha-bindiface, ha-mcastport, vip returns: dict: A dict containing settings keyed by setting name. raises: HAIncompleteConfig if settings are missing. ''' - settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'vip_iface', 'vip_cidr'] + settings = ['ha-bindiface', 'ha-mcastport', 'vip'] conf = {} for setting in settings: conf[setting] = config_get(setting) @@ -170,6 +212,7 @@ def canonical_url(configs, vip_setting='vip'): :configs : OSTemplateRenderer: A config tempating object to inspect for a complete https context. + :vip_setting: str: Setting in charm config that specifies VIP address. ''' diff --git a/hooks/charmhelpers/contrib/openstack/amulet/__init__.py b/hooks/charmhelpers/contrib/openstack/amulet/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py new file mode 100644 index 0000000..9179eeb --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -0,0 +1,61 @@ +from charmhelpers.contrib.amulet.deployment import ( + AmuletDeployment +) + + +class OpenStackAmuletDeployment(AmuletDeployment): + """OpenStack amulet deployment. + + This class inherits from AmuletDeployment and has additional support + that is specifically for use by OpenStack charms. + """ + + def __init__(self, series=None, openstack=None, source=None): + """Initialize the deployment environment.""" + super(OpenStackAmuletDeployment, self).__init__(series) + self.openstack = openstack + self.source = source + + def _add_services(self, this_service, other_services): + """Add services to the deployment and set openstack-origin.""" + super(OpenStackAmuletDeployment, self)._add_services(this_service, + other_services) + name = 0 + services = other_services + services.append(this_service) + use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph'] + + if self.openstack: + for svc in services: + if svc[name] not in use_source: + config = {'openstack-origin': self.openstack} + self.d.configure(svc[name], config) + + if self.source: + for svc in services: + if svc[name] in use_source: + config = {'source': self.source} + self.d.configure(svc[name], config) + + def _configure_services(self, configs): + """Configure all of the services.""" + for service, config in configs.iteritems(): + self.d.configure(service, config) + + def _get_openstack_release(self): + """Get openstack release. + + Return an integer representing the enum value of the openstack + release. + """ + (self.precise_essex, self.precise_folsom, self.precise_grizzly, + self.precise_havana, self.precise_icehouse, + self.trusty_icehouse) = range(6) + releases = { + ('precise', None): self.precise_essex, + ('precise', 'cloud:precise-folsom'): self.precise_folsom, + ('precise', 'cloud:precise-grizzly'): self.precise_grizzly, + ('precise', 'cloud:precise-havana'): self.precise_havana, + ('precise', 'cloud:precise-icehouse'): self.precise_icehouse, + ('trusty', None): self.trusty_icehouse} + return releases[(self.series, self.openstack)] diff --git a/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/hooks/charmhelpers/contrib/openstack/amulet/utils.py new file mode 100644 index 0000000..bd327bd --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -0,0 +1,275 @@ +import logging +import os +import time +import urllib + +import glanceclient.v1.client as glance_client +import keystoneclient.v2_0 as keystone_client +import novaclient.v1_1.client as nova_client + +from charmhelpers.contrib.amulet.utils import ( + AmuletUtils +) + +DEBUG = logging.DEBUG +ERROR = logging.ERROR + + +class OpenStackAmuletUtils(AmuletUtils): + """OpenStack amulet utilities. + + This class inherits from AmuletUtils and has additional support + that is specifically for use by OpenStack charms. + """ + + def __init__(self, log_level=ERROR): + """Initialize the deployment environment.""" + super(OpenStackAmuletUtils, self).__init__(log_level) + + def validate_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected): + """Validate endpoint data. + + Validate actual endpoint data vs expected endpoint data. The ports + are used to find the matching endpoint. + """ + found = False + for ep in endpoints: + self.log.debug('endpoint: {}'.format(repr(ep))) + if (admin_port in ep.adminurl and + internal_port in ep.internalurl and + public_port in ep.publicurl): + found = True + actual = {'id': ep.id, + 'region': ep.region, + 'adminurl': ep.adminurl, + 'internalurl': ep.internalurl, + 'publicurl': ep.publicurl, + 'service_id': ep.service_id} + ret = self._validate_dict_data(expected, actual) + if ret: + return 'unexpected endpoint data - {}'.format(ret) + + if not found: + return 'endpoint not found' + + def validate_svc_catalog_endpoint_data(self, expected, actual): + """Validate service catalog endpoint data. + + Validate a list of actual service catalog endpoints vs a list of + expected service catalog endpoints. + """ + self.log.debug('actual: {}'.format(repr(actual))) + for k, v in expected.iteritems(): + if k in actual: + ret = self._validate_dict_data(expected[k][0], actual[k][0]) + if ret: + return self.endpoint_error(k, ret) + else: + return "endpoint {} does not exist".format(k) + return ret + + def validate_tenant_data(self, expected, actual): + """Validate tenant data. + + Validate a list of actual tenant data vs list of expected tenant + data. + """ + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'enabled': act.enabled, 'description': act.description, + 'name': act.name, 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected tenant data - {}".format(ret) + if not found: + return "tenant {} does not exist".format(e['name']) + return ret + + def validate_role_data(self, expected, actual): + """Validate role data. + + Validate a list of actual role data vs a list of expected role + data. + """ + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'name': act.name, 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected role data - {}".format(ret) + if not found: + return "role {} does not exist".format(e['name']) + return ret + + def validate_user_data(self, expected, actual): + """Validate user data. + + Validate a list of actual user data vs a list of expected user + data. + """ + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'enabled': act.enabled, 'name': act.name, + 'email': act.email, 'tenantId': act.tenantId, + 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected user data - {}".format(ret) + if not found: + return "user {} does not exist".format(e['name']) + return ret + + def validate_flavor_data(self, expected, actual): + """Validate flavor data. + + Validate a list of actual flavors vs a list of expected flavors. + """ + self.log.debug('actual: {}'.format(repr(actual))) + act = [a.name for a in actual] + return self._validate_list_data(expected, act) + + def tenant_exists(self, keystone, tenant): + """Return True if tenant exists.""" + return tenant in [t.name for t in keystone.tenants.list()] + + def authenticate_keystone_admin(self, keystone_sentry, user, password, + tenant): + """Authenticates admin user with the keystone admin endpoint.""" + unit = keystone_sentry + service_ip = unit.relation('shared-db', + 'mysql:shared-db')['private-address'] + ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8')) + return keystone_client.Client(username=user, password=password, + tenant_name=tenant, auth_url=ep) + + def authenticate_keystone_user(self, keystone, user, password, tenant): + """Authenticates a regular user with the keystone public endpoint.""" + ep = keystone.service_catalog.url_for(service_type='identity', + endpoint_type='publicURL') + return keystone_client.Client(username=user, password=password, + tenant_name=tenant, auth_url=ep) + + def authenticate_glance_admin(self, keystone): + """Authenticates admin user with glance.""" + ep = keystone.service_catalog.url_for(service_type='image', + endpoint_type='adminURL') + return glance_client.Client(ep, token=keystone.auth_token) + + def authenticate_nova_user(self, keystone, user, password, tenant): + """Authenticates a regular user with nova-api.""" + ep = keystone.service_catalog.url_for(service_type='identity', + endpoint_type='publicURL') + return nova_client.Client(username=user, api_key=password, + project_id=tenant, auth_url=ep) + + def create_cirros_image(self, glance, image_name): + """Download the latest cirros image and upload it to glance.""" + http_proxy = os.getenv('AMULET_HTTP_PROXY') + self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) + if http_proxy: + proxies = {'http': http_proxy} + opener = urllib.FancyURLopener(proxies) + else: + opener = urllib.FancyURLopener() + + f = opener.open("http://download.cirros-cloud.net/version/released") + version = f.read().strip() + cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version) + + if not os.path.exists(cirros_img): + cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", + version, cirros_img) + opener.retrieve(cirros_url, cirros_img) + f.close() + + with open(cirros_img) as f: + image = glance.images.create(name=image_name, is_public=True, + disk_format='qcow2', + container_format='bare', data=f) + count = 1 + status = image.status + while status != 'active' and count < 10: + time.sleep(3) + image = glance.images.get(image.id) + status = image.status + self.log.debug('image status: {}'.format(status)) + count += 1 + + if status != 'active': + self.log.error('image creation timed out') + return None + + return image + + def delete_image(self, glance, image): + """Delete the specified image.""" + num_before = len(list(glance.images.list())) + glance.images.delete(image) + + count = 1 + num_after = len(list(glance.images.list())) + while num_after != (num_before - 1) and count < 10: + time.sleep(3) + num_after = len(list(glance.images.list())) + self.log.debug('number of images: {}'.format(num_after)) + count += 1 + + if num_after != (num_before - 1): + self.log.error('image deletion timed out') + return False + + return True + + def create_instance(self, nova, image_name, instance_name, flavor): + """Create the specified instance.""" + image = nova.images.find(name=image_name) + flavor = nova.flavors.find(name=flavor) + instance = nova.servers.create(name=instance_name, image=image, + flavor=flavor) + + count = 1 + status = instance.status + while status != 'ACTIVE' and count < 60: + time.sleep(3) + instance = nova.servers.get(instance.id) + status = instance.status + self.log.debug('instance status: {}'.format(status)) + count += 1 + + if status != 'ACTIVE': + self.log.error('instance creation timed out') + return None + + return instance + + def delete_instance(self, nova, instance): + """Delete the specified instance.""" + num_before = len(list(nova.servers.list())) + nova.servers.delete(instance) + + count = 1 + num_after = len(list(nova.servers.list())) + while num_after != (num_before - 1) and count < 10: + time.sleep(3) + num_after = len(list(nova.servers.list())) + self.log.debug('number of instances: {}'.format(num_after)) + count += 1 + + if num_after != (num_before - 1): + self.log.error('instance deletion timed out') + return False + + return True diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 474d51e..d41b74a 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -21,9 +21,11 @@ from charmhelpers.core.hookenv import ( relation_get, relation_ids, related_units, + relation_set, unit_get, unit_private_ip, ERROR, + INFO ) from charmhelpers.contrib.hahelpers.cluster import ( @@ -42,6 +44,11 @@ from charmhelpers.contrib.openstack.neutron import ( neutron_plugin_attribute, ) +from charmhelpers.contrib.network.ip import ( + get_address_in_network, + get_ipv6_addr, +) + CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' @@ -134,8 +141,26 @@ class SharedDBContext(OSContextGenerator): 'Missing required charm config options. ' '(database name and user)') raise OSContextError + ctxt = {} + # NOTE(jamespage) if mysql charm provides a network upon which + # access to the database should be made, reconfigure relation + # with the service units local address and defer execution + access_network = relation_get('access-network') + if access_network is not None: + if self.relation_prefix is not None: + hostname_key = "{}_hostname".format(self.relation_prefix) + else: + hostname_key = "hostname" + access_hostname = get_address_in_network(access_network, + unit_get('private-address')) + set_hostname = relation_get(attribute=hostname_key, + unit=local_unit()) + if set_hostname != access_hostname: + relation_set(relation_settings={hostname_key: access_hostname}) + return ctxt # Defer any further hook execution for now.... + password_setting = 'password' if self.relation_prefix: password_setting = self.relation_prefix + '_password' @@ -243,23 +268,31 @@ class IdentityServiceContext(OSContextGenerator): class AMQPContext(OSContextGenerator): - interfaces = ['amqp'] - def __init__(self, ssl_dir=None): + def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None): self.ssl_dir = ssl_dir + self.rel_name = rel_name + self.relation_prefix = relation_prefix + self.interfaces = [rel_name] def __call__(self): log('Generating template context for amqp') conf = config() + user_setting = 'rabbit-user' + vhost_setting = 'rabbit-vhost' + if self.relation_prefix: + user_setting = self.relation_prefix + '-rabbit-user' + vhost_setting = self.relation_prefix + '-rabbit-vhost' + try: - username = conf['rabbit-user'] - vhost = conf['rabbit-vhost'] + username = conf[user_setting] + vhost = conf[vhost_setting] except KeyError as e: log('Could not generate shared_db context. ' 'Missing required charm config options: %s.' % e) raise OSContextError ctxt = {} - for rid in relation_ids('amqp'): + for rid in relation_ids(self.rel_name): ha_vip_only = False for unit in related_units(rid): if relation_get('clustered', rid=rid, unit=unit): @@ -332,10 +365,12 @@ class CephContext(OSContextGenerator): use_syslog = str(config('use-syslog')).lower() for rid in relation_ids('ceph'): for unit in related_units(rid): - mon_hosts.append(relation_get('private-address', rid=rid, - unit=unit)) auth = relation_get('auth', rid=rid, unit=unit) key = relation_get('key', rid=rid, unit=unit) + ceph_addr = \ + relation_get('ceph-public-address', rid=rid, unit=unit) or \ + relation_get('private-address', rid=rid, unit=unit) + mon_hosts.append(ceph_addr) ctxt = { 'mon_hosts': ' '.join(mon_hosts), @@ -369,7 +404,12 @@ class HAProxyContext(OSContextGenerator): cluster_hosts = {} l_unit = local_unit().replace('/', '-') - cluster_hosts[l_unit] = unit_get('private-address') + if config('prefer-ipv6'): + addr = get_ipv6_addr() + else: + addr = unit_get('private-address') + cluster_hosts[l_unit] = get_address_in_network(config('os-internal-network'), + addr) for rid in relation_ids('cluster'): for unit in related_units(rid): @@ -380,6 +420,16 @@ class HAProxyContext(OSContextGenerator): ctxt = { 'units': cluster_hosts, } + + if config('prefer-ipv6'): + ctxt['local_host'] = 'ip6-localhost' + ctxt['haproxy_host'] = '::' + ctxt['stat_port'] = ':::8888' + else: + ctxt['local_host'] = '127.0.0.1' + ctxt['haproxy_host'] = '0.0.0.0' + ctxt['stat_port'] = ':8888' + if len(cluster_hosts.keys()) > 1: # Enable haproxy when we have enough peers. log('Ensuring haproxy enabled in /etc/default/haproxy.') @@ -418,12 +468,13 @@ class ApacheSSLContext(OSContextGenerator): """ Generates a context for an apache vhost configuration that configures HTTPS reverse proxying for one or many endpoints. Generated context - looks something like: - { - 'namespace': 'cinder', - 'private_address': 'iscsi.mycinderhost.com', - 'endpoints': [(8776, 8766), (8777, 8767)] - } + looks something like:: + + { + 'namespace': 'cinder', + 'private_address': 'iscsi.mycinderhost.com', + 'endpoints': [(8776, 8766), (8777, 8767)] + } The endpoints list consists of a tuples mapping external ports to internal ports. @@ -541,6 +592,26 @@ class NeutronContext(OSContextGenerator): return nvp_ctxt + def n1kv_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + n1kv_config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + n1kv_ctxt = { + 'core_plugin': driver, + 'neutron_plugin': 'n1kv', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': n1kv_config, + 'vsm_ip': config('n1kv-vsm-ip'), + 'vsm_username': config('n1kv-vsm-username'), + 'vsm_password': config('n1kv-vsm-password'), + 'restrict_policy_profiles': config( + 'n1kv_restrict_policy_profiles'), + } + + return n1kv_ctxt + def neutron_ctxt(self): if https(): proto = 'https' @@ -572,6 +643,8 @@ class NeutronContext(OSContextGenerator): ctxt.update(self.ovs_ctxt()) elif self.plugin in ['nvp', 'nsx']: ctxt.update(self.nvp_ctxt()) + elif self.plugin == 'n1kv': + ctxt.update(self.n1kv_ctxt()) alchemy_flags = config('neutron-alchemy-flags') if alchemy_flags: @@ -611,7 +684,7 @@ class SubordinateConfigContext(OSContextGenerator): The subordinate interface allows subordinates to export their configuration requirements to the principle for multiple config files and multiple serivces. Ie, a subordinate that has interfaces - to both glance and nova may export to following yaml blob as json: + to both glance and nova may export to following yaml blob as json:: glance: /etc/glance/glance-api.conf: @@ -630,7 +703,8 @@ class SubordinateConfigContext(OSContextGenerator): It is then up to the principle charms to subscribe this context to the service+config file it is interestd in. Configuration data will - be available in the template context, in glance's case, as: + be available in the template context, in glance's case, as:: + ctxt = { ... other context ... 'subordinate_config': { @@ -657,7 +731,7 @@ class SubordinateConfigContext(OSContextGenerator): self.interface = interface def __call__(self): - ctxt = {} + ctxt = {'sections': {}} for rid in relation_ids(self.interface): for unit in related_units(rid): sub_config = relation_get('subordinate_configuration', @@ -683,14 +757,29 @@ class SubordinateConfigContext(OSContextGenerator): sub_config = sub_config[self.config_file] for k, v in sub_config.iteritems(): - ctxt[k] = v + if k == 'sections': + for section, config_dict in v.iteritems(): + log("adding section '%s'" % (section)) + ctxt[k][section] = config_dict + else: + ctxt[k] = v - if not ctxt: - ctxt['sections'] = {} + log("%d section(s) found" % (len(ctxt['sections'])), level=INFO) return ctxt +class LogLevelContext(OSContextGenerator): + + def __call__(self): + ctxt = {} + ctxt['debug'] = \ + False if config('debug') is None else config('debug') + ctxt['verbose'] = \ + False if config('verbose') is None else config('verbose') + return ctxt + + class SyslogContext(OSContextGenerator): def __call__(self): diff --git a/hooks/charmhelpers/contrib/openstack/ip.py b/hooks/charmhelpers/contrib/openstack/ip.py new file mode 100644 index 0000000..affe8cd --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/ip.py @@ -0,0 +1,79 @@ +from charmhelpers.core.hookenv import ( + config, + unit_get, +) + +from charmhelpers.contrib.network.ip import ( + get_address_in_network, + is_address_in_network, + is_ipv6, + get_ipv6_addr, +) + +from charmhelpers.contrib.hahelpers.cluster import is_clustered + +PUBLIC = 'public' +INTERNAL = 'int' +ADMIN = 'admin' + +_address_map = { + PUBLIC: { + 'config': 'os-public-network', + 'fallback': 'public-address' + }, + INTERNAL: { + 'config': 'os-internal-network', + 'fallback': 'private-address' + }, + ADMIN: { + 'config': 'os-admin-network', + 'fallback': 'private-address' + } +} + + +def canonical_url(configs, endpoint_type=PUBLIC): + ''' + Returns the correct HTTP URL to this host given the state of HTTPS + configuration, hacluster and charm configuration. + + :configs OSTemplateRenderer: A config tempating object to inspect for + a complete https context. + :endpoint_type str: The endpoint type to resolve. + + :returns str: Base URL for services on the current service unit. + ''' + scheme = 'http' + if 'https' in configs.complete_contexts(): + scheme = 'https' + address = resolve_address(endpoint_type) + if is_ipv6(address): + address = "[{}]".format(address) + return '%s://%s' % (scheme, address) + + +def resolve_address(endpoint_type=PUBLIC): + resolved_address = None + if is_clustered(): + if config(_address_map[endpoint_type]['config']) is None: + # Assume vip is simple and pass back directly + resolved_address = config('vip') + else: + for vip in config('vip').split(): + if is_address_in_network( + config(_address_map[endpoint_type]['config']), + vip): + resolved_address = vip + else: + if config('prefer-ipv6'): + fallback_addr = get_ipv6_addr() + else: + fallback_addr = unit_get(_address_map[endpoint_type]['fallback']) + resolved_address = get_address_in_network( + config(_address_map[endpoint_type]['config']), fallback_addr) + + if resolved_address is None: + raise ValueError('Unable to resolve a suitable IP address' + ' based on charm state and configuration') + else: + return resolved_address diff --git a/hooks/charmhelpers/contrib/openstack/neutron.py b/hooks/charmhelpers/contrib/openstack/neutron.py index ba97622..84d97bc 100644 --- a/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/hooks/charmhelpers/contrib/openstack/neutron.py @@ -128,6 +128,20 @@ def neutron_plugins(): 'server_packages': ['neutron-server', 'neutron-plugin-vmware'], 'server_services': ['neutron-server'] + }, + 'n1kv': { + 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini', + 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron', + ssl_dir=NEUTRON_CONF_DIR)], + 'services': [], + 'packages': [['neutron-plugin-cisco']], + 'server_packages': ['neutron-server', + 'neutron-plugin-cisco'], + 'server_services': ['neutron-server'] } } if release >= 'icehouse': diff --git a/hooks/charmhelpers/contrib/openstack/templating.py b/hooks/charmhelpers/contrib/openstack/templating.py index 4595778..f544271 100644 --- a/hooks/charmhelpers/contrib/openstack/templating.py +++ b/hooks/charmhelpers/contrib/openstack/templating.py @@ -30,17 +30,17 @@ def get_loader(templates_dir, os_release): loading dir. A charm may also ship a templates dir with this module - and it will be appended to the bottom of the search list, eg: - hooks/charmhelpers/contrib/openstack/templates. + and it will be appended to the bottom of the search list, eg:: - :param templates_dir: str: Base template directory containing release - sub-directories. - :param os_release : str: OpenStack release codename to construct template - loader. + hooks/charmhelpers/contrib/openstack/templates - :returns : jinja2.ChoiceLoader constructed with a list of - jinja2.FilesystemLoaders, ordered in descending - order by OpenStack release. + :param templates_dir (str): Base template directory containing release + sub-directories. + :param os_release (str): OpenStack release codename to construct template + loader. + :returns: jinja2.ChoiceLoader constructed with a list of + jinja2.FilesystemLoaders, ordered in descending + order by OpenStack release. """ tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) for rel in OPENSTACK_CODENAMES.itervalues()] @@ -111,7 +111,8 @@ class OSConfigRenderer(object): and ease the burden of managing config templates across multiple OpenStack releases. - Basic usage: + Basic usage:: + # import some common context generates from charmhelpers from charmhelpers.contrib.openstack import context @@ -131,21 +132,19 @@ class OSConfigRenderer(object): # write out all registered configs configs.write_all() - Details: + **OpenStack Releases and template loading** - OpenStack Releases and template loading - --------------------------------------- When the object is instantiated, it is associated with a specific OS release. This dictates how the template loader will be constructed. The constructed loader attempts to load the template from several places in the following order: - - from the most recent OS release-specific template dir (if one exists) - - the base templates_dir - - a template directory shipped in the charm with this helper file. + - from the most recent OS release-specific template dir (if one exists) + - the base templates_dir + - a template directory shipped in the charm with this helper file. + For the example above, '/tmp/templates' contains the following structure:: - For the example above, '/tmp/templates' contains the following structure: /tmp/templates/nova.conf /tmp/templates/api-paste.ini /tmp/templates/grizzly/api-paste.ini @@ -169,8 +168,8 @@ class OSConfigRenderer(object): $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows us to ship common templates (haproxy, apache) with the helpers. - Context generators - --------------------------------------- + **Context generators** + Context generators are used to generate template contexts during hook execution. Doing so may require inspecting service relations, charm config, etc. When registered, a config file is associated with a list diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index 1a44ab1..127b03f 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -3,7 +3,6 @@ # Common python helper functions used for OpenStack charms. from collections import OrderedDict -import apt_pkg as apt import subprocess import os import socket @@ -85,6 +84,8 @@ def get_os_codename_install_source(src): '''Derive OpenStack release codename from a given installation source.''' ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] rel = '' + if src is None: + return rel if src in ['distro', 'distro-proposed']: try: rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] @@ -132,6 +133,7 @@ def get_os_version_codename(codename): def get_os_codename_package(package, fatal=True): '''Derive OpenStack release codename from an installed package.''' + import apt_pkg as apt apt.init() # Tell apt to build an in-memory cache to prevent race conditions (if @@ -189,7 +191,7 @@ def get_os_version_package(pkg, fatal=True): for version, cname in vers_map.iteritems(): if cname == codename: return version - #e = "Could not determine OpenStack version for package: %s" % pkg + # e = "Could not determine OpenStack version for package: %s" % pkg # error_out(e) @@ -325,6 +327,7 @@ def openstack_upgrade_available(package): """ + import apt_pkg as apt src = config('openstack-origin') cur_vers = get_os_version_package(package) available_vers = get_os_version_install_source(src) diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py index 1241741..768438a 100644 --- a/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -303,7 +303,7 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, blk_device, fstype, system_services=[]): """ NOTE: This function must only be called from a single service unit for - the same rbd_img otherwise data loss will occur. + the same rbd_img otherwise data loss will occur. Ensures given pool and RBD image exists, is mapped to a block device, and the device is formatted and mounted at the given mount_point. diff --git a/hooks/charmhelpers/contrib/storage/linux/utils.py b/hooks/charmhelpers/contrib/storage/linux/utils.py index b87ef26..1b95871 100644 --- a/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -37,6 +37,7 @@ def zap_disk(block_device): check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), 'bs=512', 'count=100', 'seek=%s' % (gpt_end)]) + def is_device_mounted(device): '''Given a device path, return True if that device is mounted, and False if it isn't. @@ -45,5 +46,8 @@ def is_device_mounted(device): :returns: boolean: True if the path represents a mounted device, False if it doesn't. ''' + is_partition = bool(re.search(r".*[0-9]+\b", device)) out = check_output(['mount']) + if is_partition: + return bool(re.search(device + r"\b", out)) return bool(re.search(device + r"[0-9]+\b", out)) diff --git a/hooks/charmhelpers/core/fstab.py b/hooks/charmhelpers/core/fstab.py new file mode 100644 index 0000000..cfaf0a6 --- /dev/null +++ b/hooks/charmhelpers/core/fstab.py @@ -0,0 +1,116 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +__author__ = 'Jorge Niedbalski R. ' + +import os + + +class Fstab(file): + """This class extends file in order to implement a file reader/writer + for file `/etc/fstab` + """ + + class Entry(object): + """Entry class represents a non-comment line on the `/etc/fstab` file + """ + def __init__(self, device, mountpoint, filesystem, + options, d=0, p=0): + self.device = device + self.mountpoint = mountpoint + self.filesystem = filesystem + + if not options: + options = "defaults" + + self.options = options + self.d = d + self.p = p + + def __eq__(self, o): + return str(self) == str(o) + + def __str__(self): + return "{} {} {} {} {} {}".format(self.device, + self.mountpoint, + self.filesystem, + self.options, + self.d, + self.p) + + DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab') + + def __init__(self, path=None): + if path: + self._path = path + else: + self._path = self.DEFAULT_PATH + file.__init__(self, self._path, 'r+') + + def _hydrate_entry(self, line): + # NOTE: use split with no arguments to split on any + # whitespace including tabs + return Fstab.Entry(*filter( + lambda x: x not in ('', None), + line.strip("\n").split())) + + @property + def entries(self): + self.seek(0) + for line in self.readlines(): + try: + if not line.startswith("#"): + yield self._hydrate_entry(line) + except ValueError: + pass + + def get_entry_by_attr(self, attr, value): + for entry in self.entries: + e_attr = getattr(entry, attr) + if e_attr == value: + return entry + return None + + def add_entry(self, entry): + if self.get_entry_by_attr('device', entry.device): + return False + + self.write(str(entry) + '\n') + self.truncate() + return entry + + def remove_entry(self, entry): + self.seek(0) + + lines = self.readlines() + + found = False + for index, line in enumerate(lines): + if not line.startswith("#"): + if self._hydrate_entry(line) == entry: + found = True + break + + if not found: + return False + + lines.remove(line) + + self.seek(0) + self.write(''.join(lines)) + self.truncate() + return True + + @classmethod + def remove_by_mountpoint(cls, mountpoint, path=None): + fstab = cls(path=path) + entry = fstab.get_entry_by_attr('mountpoint', mountpoint) + if entry: + return fstab.remove_entry(entry) + return False + + @classmethod + def add(cls, device, mountpoint, filesystem, options=None, path=None): + return cls(path=path).add_entry(Fstab.Entry(device, + mountpoint, filesystem, + options=options)) diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py index c2e66f6..c953043 100644 --- a/hooks/charmhelpers/core/hookenv.py +++ b/hooks/charmhelpers/core/hookenv.py @@ -25,7 +25,7 @@ cache = {} def cached(func): """Cache return values for multiple executions of func + args - For example: + For example:: @cached def unit_get(attribute): @@ -445,18 +445,19 @@ class UnregisteredHookError(Exception): class Hooks(object): """A convenient handler for hook functions. - Example: + Example:: + hooks = Hooks() # register a hook, taking its name from the function name @hooks.hook() def install(): - ... + pass # your code here # register a hook, providing a custom hook name @hooks.hook("config-changed") def config_changed(): - ... + pass # your code here if __name__ == "__main__": # execute a hook based on the name the program is called by diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py index 186147f..ca7780d 100644 --- a/hooks/charmhelpers/core/host.py +++ b/hooks/charmhelpers/core/host.py @@ -12,11 +12,13 @@ import random import string import subprocess import hashlib -import apt_pkg +import shutil +from contextlib import contextmanager from collections import OrderedDict from hookenv import log +from fstab import Fstab def service_start(service_name): @@ -35,7 +37,8 @@ def service_restart(service_name): def service_reload(service_name, restart_on_failure=False): - """Reload a system service, optionally falling back to restart if reload fails""" + """Reload a system service, optionally falling back to restart if + reload fails""" service_result = service('reload', service_name) if not service_result and restart_on_failure: service_result = service('restart', service_name) @@ -51,7 +54,7 @@ def service(action, service_name): def service_running(service): """Determine whether a system service is running""" try: - output = subprocess.check_output(['service', service, 'status']) + output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT) except subprocess.CalledProcessError: return False else: @@ -61,6 +64,16 @@ def service_running(service): return False +def service_available(service_name): + """Determine whether a system service is available""" + try: + subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT) + except subprocess.CalledProcessError: + return False + else: + return True + + def adduser(username, password=None, shell='/bin/bash', system_user=False): """Add a user to the system""" try: @@ -144,7 +157,19 @@ def write_file(path, content, owner='root', group='root', perms=0444): target.write(content) -def mount(device, mountpoint, options=None, persist=False): +def fstab_remove(mp): + """Remove the given mountpoint entry from /etc/fstab + """ + return Fstab.remove_by_mountpoint(mp) + + +def fstab_add(dev, mp, fs, options=None): + """Adds the given device entry to the /etc/fstab file + """ + return Fstab.add(dev, mp, fs, options=options) + + +def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"): """Mount a filesystem at a particular mountpoint""" cmd_args = ['mount'] if options is not None: @@ -155,9 +180,9 @@ def mount(device, mountpoint, options=None, persist=False): except subprocess.CalledProcessError, e: log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) return False + if persist: - # TODO: update fstab - pass + return fstab_add(device, mountpoint, filesystem, options=options) return True @@ -169,9 +194,9 @@ def umount(mountpoint, persist=False): except subprocess.CalledProcessError, e: log('Error unmounting {}\n{}'.format(mountpoint, e.output)) return False + if persist: - # TODO: update fstab - pass + return fstab_remove(mountpoint) return True @@ -198,13 +223,13 @@ def file_hash(path): def restart_on_change(restart_map, stopstart=False): """Restart services based on configuration files changing - This function is used a decorator, for example + This function is used a decorator, for example:: @restart_on_change({ '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] }) def ceph_client_changed(): - ... + pass # your code here In this example, the cinder-api and cinder-volume services would be restarted if /etc/ceph/ceph.conf is changed by the @@ -300,12 +325,40 @@ def get_nic_hwaddr(nic): def cmp_pkgrevno(package, revno, pkgcache=None): '''Compare supplied revno with the revno of the installed package - 1 => Installed revno is greater than supplied arg - 0 => Installed revno is the same as supplied arg - -1 => Installed revno is less than supplied arg + + * 1 => Installed revno is greater than supplied arg + * 0 => Installed revno is the same as supplied arg + * -1 => Installed revno is less than supplied arg + ''' + import apt_pkg if not pkgcache: apt_pkg.init() + # Force Apt to build its cache in memory. That way we avoid race + # conditions with other applications building the cache in the same + # place. + apt_pkg.config.set("Dir::Cache::pkgcache", "") pkgcache = apt_pkg.Cache() pkg = pkgcache[package] return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) + + +@contextmanager +def chdir(d): + cur = os.getcwd() + try: + yield os.chdir(d) + finally: + os.chdir(cur) + + +def chownr(path, owner, group): + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + + for root, dirs, files in os.walk(path): + for name in dirs + files: + full = os.path.join(root, name) + broken_symlink = os.path.lexists(full) and not os.path.exists(full) + if not broken_symlink: + os.chown(full, uid, gid) diff --git a/hooks/charmhelpers/core/services/__init__.py b/hooks/charmhelpers/core/services/__init__.py new file mode 100644 index 0000000..e8039a8 --- /dev/null +++ b/hooks/charmhelpers/core/services/__init__.py @@ -0,0 +1,2 @@ +from .base import * +from .helpers import * diff --git a/hooks/charmhelpers/core/services/base.py b/hooks/charmhelpers/core/services/base.py new file mode 100644 index 0000000..f08e6d7 --- /dev/null +++ b/hooks/charmhelpers/core/services/base.py @@ -0,0 +1,305 @@ +import os +import re +import json +from collections import Iterable + +from charmhelpers.core import host +from charmhelpers.core import hookenv + + +__all__ = ['ServiceManager', 'ManagerCallback', + 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports', + 'service_restart', 'service_stop'] + + +class ServiceManager(object): + def __init__(self, services=None): + """ + Register a list of services, given their definitions. + + Traditional charm authoring is focused on implementing hooks. That is, + the charm author is thinking in terms of "What hook am I handling; what + does this hook need to do?" However, in most cases, the real question + should be "Do I have the information I need to configure and start this + piece of software and, if so, what are the steps for doing so?" The + ServiceManager framework tries to bring the focus to the data and the + setup tasks, in the most declarative way possible. + + Service definitions are dicts in the following formats (all keys except + 'service' are optional):: + + { + "service": , + "required_data": , + "data_ready": , + "data_lost": , + "start": , + "stop": , + "ports": , + } + + The 'required_data' list should contain dicts of required data (or + dependency managers that act like dicts and know how to collect the data). + Only when all items in the 'required_data' list are populated are the list + of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more + information. + + The 'data_ready' value should be either a single callback, or a list of + callbacks, to be called when all items in 'required_data' pass `is_ready()`. + Each callback will be called with the service name as the only parameter. + After all of the 'data_ready' callbacks are called, the 'start' callbacks + are fired. + + The 'data_lost' value should be either a single callback, or a list of + callbacks, to be called when a 'required_data' item no longer passes + `is_ready()`. Each callback will be called with the service name as the + only parameter. After all of the 'data_lost' callbacks are called, + the 'stop' callbacks are fired. + + The 'start' value should be either a single callback, or a list of + callbacks, to be called when starting the service, after the 'data_ready' + callbacks are complete. Each callback will be called with the service + name as the only parameter. This defaults to + `[host.service_start, services.open_ports]`. + + The 'stop' value should be either a single callback, or a list of + callbacks, to be called when stopping the service. If the service is + being stopped because it no longer has all of its 'required_data', this + will be called after all of the 'data_lost' callbacks are complete. + Each callback will be called with the service name as the only parameter. + This defaults to `[services.close_ports, host.service_stop]`. + + The 'ports' value should be a list of ports to manage. The default + 'start' handler will open the ports after the service is started, + and the default 'stop' handler will close the ports prior to stopping + the service. + + + Examples: + + The following registers an Upstart service called bingod that depends on + a mongodb relation and which runs a custom `db_migrate` function prior to + restarting the service, and a Runit service called spadesd:: + + manager = services.ServiceManager([ + { + 'service': 'bingod', + 'ports': [80, 443], + 'required_data': [MongoRelation(), config(), {'my': 'data'}], + 'data_ready': [ + services.template(source='bingod.conf'), + services.template(source='bingod.ini', + target='/etc/bingod.ini', + owner='bingo', perms=0400), + ], + }, + { + 'service': 'spadesd', + 'data_ready': services.template(source='spadesd_run.j2', + target='/etc/sv/spadesd/run', + perms=0555), + 'start': runit_start, + 'stop': runit_stop, + }, + ]) + manager.manage() + """ + self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') + self._ready = None + self.services = {} + for service in services or []: + service_name = service['service'] + self.services[service_name] = service + + def manage(self): + """ + Handle the current hook by doing The Right Thing with the registered services. + """ + hook_name = hookenv.hook_name() + if hook_name == 'stop': + self.stop_services() + else: + self.provide_data() + self.reconfigure_services() + + def provide_data(self): + hook_name = hookenv.hook_name() + for service in self.services.values(): + for provider in service.get('provided_data', []): + if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name): + data = provider.provide_data() + if provider._is_ready(data): + hookenv.relation_set(None, data) + + def reconfigure_services(self, *service_names): + """ + Update all files for one or more registered services, and, + if ready, optionally restart them. + + If no service names are given, reconfigures all registered services. + """ + for service_name in service_names or self.services.keys(): + if self.is_ready(service_name): + self.fire_event('data_ready', service_name) + self.fire_event('start', service_name, default=[ + service_restart, + manage_ports]) + self.save_ready(service_name) + else: + if self.was_ready(service_name): + self.fire_event('data_lost', service_name) + self.fire_event('stop', service_name, default=[ + manage_ports, + service_stop]) + self.save_lost(service_name) + + def stop_services(self, *service_names): + """ + Stop one or more registered services, by name. + + If no service names are given, stops all registered services. + """ + for service_name in service_names or self.services.keys(): + self.fire_event('stop', service_name, default=[ + manage_ports, + service_stop]) + + def get_service(self, service_name): + """ + Given the name of a registered service, return its service definition. + """ + service = self.services.get(service_name) + if not service: + raise KeyError('Service not registered: %s' % service_name) + return service + + def fire_event(self, event_name, service_name, default=None): + """ + Fire a data_ready, data_lost, start, or stop event on a given service. + """ + service = self.get_service(service_name) + callbacks = service.get(event_name, default) + if not callbacks: + return + if not isinstance(callbacks, Iterable): + callbacks = [callbacks] + for callback in callbacks: + if isinstance(callback, ManagerCallback): + callback(self, service_name, event_name) + else: + callback(service_name) + + def is_ready(self, service_name): + """ + Determine if a registered service is ready, by checking its 'required_data'. + + A 'required_data' item can be any mapping type, and is considered ready + if `bool(item)` evaluates as True. + """ + service = self.get_service(service_name) + reqs = service.get('required_data', []) + return all(bool(req) for req in reqs) + + def _load_ready_file(self): + if self._ready is not None: + return + if os.path.exists(self._ready_file): + with open(self._ready_file) as fp: + self._ready = set(json.load(fp)) + else: + self._ready = set() + + def _save_ready_file(self): + if self._ready is None: + return + with open(self._ready_file, 'w') as fp: + json.dump(list(self._ready), fp) + + def save_ready(self, service_name): + """ + Save an indicator that the given service is now data_ready. + """ + self._load_ready_file() + self._ready.add(service_name) + self._save_ready_file() + + def save_lost(self, service_name): + """ + Save an indicator that the given service is no longer data_ready. + """ + self._load_ready_file() + self._ready.discard(service_name) + self._save_ready_file() + + def was_ready(self, service_name): + """ + Determine if the given service was previously data_ready. + """ + self._load_ready_file() + return service_name in self._ready + + +class ManagerCallback(object): + """ + Special case of a callback that takes the `ServiceManager` instance + in addition to the service name. + + Subclasses should implement `__call__` which should accept three parameters: + + * `manager` The `ServiceManager` instance + * `service_name` The name of the service it's being triggered for + * `event_name` The name of the event that this callback is handling + """ + def __call__(self, manager, service_name, event_name): + raise NotImplementedError() + + +class PortManagerCallback(ManagerCallback): + """ + Callback class that will open or close ports, for use as either + a start or stop action. + """ + def __call__(self, manager, service_name, event_name): + service = manager.get_service(service_name) + new_ports = service.get('ports', []) + port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) + if os.path.exists(port_file): + with open(port_file) as fp: + old_ports = fp.read().split(',') + for old_port in old_ports: + if bool(old_port): + old_port = int(old_port) + if old_port not in new_ports: + hookenv.close_port(old_port) + with open(port_file, 'w') as fp: + fp.write(','.join(str(port) for port in new_ports)) + for port in new_ports: + if event_name == 'start': + hookenv.open_port(port) + elif event_name == 'stop': + hookenv.close_port(port) + + +def service_stop(service_name): + """ + Wrapper around host.service_stop to prevent spurious "unknown service" + messages in the logs. + """ + if host.service_running(service_name): + host.service_stop(service_name) + + +def service_restart(service_name): + """ + Wrapper around host.service_restart to prevent spurious "unknown service" + messages in the logs. + """ + if host.service_available(service_name): + if host.service_running(service_name): + host.service_restart(service_name) + else: + host.service_start(service_name) + + +# Convenience aliases +open_ports = close_ports = manage_ports = PortManagerCallback() diff --git a/hooks/charmhelpers/core/services/helpers.py b/hooks/charmhelpers/core/services/helpers.py new file mode 100644 index 0000000..4b90589 --- /dev/null +++ b/hooks/charmhelpers/core/services/helpers.py @@ -0,0 +1,125 @@ +from charmhelpers.core import hookenv +from charmhelpers.core import templating + +from charmhelpers.core.services.base import ManagerCallback + + +__all__ = ['RelationContext', 'TemplateCallback', + 'render_template', 'template'] + + +class RelationContext(dict): + """ + Base class for a context generator that gets relation data from juju. + + Subclasses must provide the attributes `name`, which is the name of the + interface of interest, `interface`, which is the type of the interface of + interest, and `required_keys`, which is the set of keys required for the + relation to be considered complete. The data for all interfaces matching + the `name` attribute that are complete will used to populate the dictionary + values (see `get_data`, below). + + The generated context will be namespaced under the interface type, to prevent + potential naming conflicts. + """ + name = None + interface = None + required_keys = [] + + def __init__(self, *args, **kwargs): + super(RelationContext, self).__init__(*args, **kwargs) + self.get_data() + + def __bool__(self): + """ + Returns True if all of the required_keys are available. + """ + return self.is_ready() + + __nonzero__ = __bool__ + + def __repr__(self): + return super(RelationContext, self).__repr__() + + def is_ready(self): + """ + Returns True if all of the `required_keys` are available from any units. + """ + ready = len(self.get(self.name, [])) > 0 + if not ready: + hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG) + return ready + + def _is_ready(self, unit_data): + """ + Helper method that tests a set of relation data and returns True if + all of the `required_keys` are present. + """ + return set(unit_data.keys()).issuperset(set(self.required_keys)) + + def get_data(self): + """ + Retrieve the relation data for each unit involved in a relation and, + if complete, store it in a list under `self[self.name]`. This + is automatically called when the RelationContext is instantiated. + + The units are sorted lexographically first by the service ID, then by + the unit ID. Thus, if an interface has two other services, 'db:1' + and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1', + and 'db:2' having one unit, 'mediawiki/0', all of which have a complete + set of data, the relation data for the units will be stored in the + order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'. + + If you only care about a single unit on the relation, you can just + access it as `{{ interface[0]['key'] }}`. However, if you can at all + support multiple units on a relation, you should iterate over the list, + like:: + + {% for unit in interface -%} + {{ unit['key'] }}{% if not loop.last %},{% endif %} + {%- endfor %} + + Note that since all sets of relation data from all related services and + units are in a single list, if you need to know which service or unit a + set of data came from, you'll need to extend this class to preserve + that information. + """ + if not hookenv.relation_ids(self.name): + return + + ns = self.setdefault(self.name, []) + for rid in sorted(hookenv.relation_ids(self.name)): + for unit in sorted(hookenv.related_units(rid)): + reldata = hookenv.relation_get(rid=rid, unit=unit) + if self._is_ready(reldata): + ns.append(reldata) + + def provide_data(self): + """ + Return data to be relation_set for this interface. + """ + return {} + + +class TemplateCallback(ManagerCallback): + """ + Callback class that will render a template, for use as a ready action. + """ + def __init__(self, source, target, owner='root', group='root', perms=0444): + self.source = source + self.target = target + self.owner = owner + self.group = group + self.perms = perms + + def __call__(self, manager, service_name, event_name): + service = manager.get_service(service_name) + context = {} + for ctx in service.get('required_data', []): + context.update(ctx) + templating.render(self.source, self.target, context, + self.owner, self.group, self.perms) + + +# Convenience aliases for templates +render_template = template = TemplateCallback diff --git a/hooks/charmhelpers/core/templating.py b/hooks/charmhelpers/core/templating.py new file mode 100644 index 0000000..2c63885 --- /dev/null +++ b/hooks/charmhelpers/core/templating.py @@ -0,0 +1,51 @@ +import os + +from charmhelpers.core import host +from charmhelpers.core import hookenv + + +def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None): + """ + Render a template. + + The `source` path, if not absolute, is relative to the `templates_dir`. + + The `target` path should be absolute. + + The context should be a dict containing the values to be replaced in the + template. + + The `owner`, `group`, and `perms` options will be passed to `write_file`. + + If omitted, `templates_dir` defaults to the `templates` folder in the charm. + + Note: Using this requires python-jinja2; if it is not installed, calling + this will attempt to use charmhelpers.fetch.apt_install to install it. + """ + try: + from jinja2 import FileSystemLoader, Environment, exceptions + except ImportError: + try: + from charmhelpers.fetch import apt_install + except ImportError: + hookenv.log('Could not import jinja2, and could not import ' + 'charmhelpers.fetch to install it', + level=hookenv.ERROR) + raise + apt_install('python-jinja2', fatal=True) + from jinja2 import FileSystemLoader, Environment, exceptions + + if templates_dir is None: + templates_dir = os.path.join(hookenv.charm_dir(), 'templates') + loader = Environment(loader=FileSystemLoader(templates_dir)) + try: + source = source + template = loader.get_template(source) + except exceptions.TemplateNotFound as e: + hookenv.log('Could not load template %s from %s.' % + (source, templates_dir), + level=hookenv.ERROR) + raise e + content = template.render(context) + host.mkdir(os.path.dirname(target)) + host.write_file(target, content, owner, group, perms) diff --git a/hooks/charmhelpers/fetch/__init__.py b/hooks/charmhelpers/fetch/__init__.py index e8e837a..d73cc34 100644 --- a/hooks/charmhelpers/fetch/__init__.py +++ b/hooks/charmhelpers/fetch/__init__.py @@ -13,7 +13,6 @@ from charmhelpers.core.hookenv import ( config, log, ) -import apt_pkg import os @@ -117,11 +116,13 @@ class BaseFetchHandler(object): def filter_installed_packages(packages): """Returns a list of packages that require installation""" + import apt_pkg apt_pkg.init() # Tell apt to build an in-memory cache to prevent race conditions (if # another process is already building the cache). apt_pkg.config.set("Dir::Cache::pkgcache", "") + apt_pkg.config.set("Dir::Cache::srcpkgcache", "") cache = apt_pkg.Cache() _pkgs = [] @@ -235,31 +236,39 @@ def configure_sources(update=False, sources_var='install_sources', keys_var='install_keys'): """ - Configure multiple sources from charm configuration + Configure multiple sources from charm configuration. + + The lists are encoded as yaml fragments in the configuration. + The frament needs to be included as a string. Example config: - install_sources: + install_sources: | - "ppa:foo" - "http://example.com/repo precise main" - install_keys: + install_keys: | - null - "a1b2c3d4" Note that 'null' (a.k.a. None) should not be quoted. """ - sources = safe_load(config(sources_var)) - keys = config(keys_var) - if keys is not None: - keys = safe_load(keys) - if isinstance(sources, basestring) and ( - keys is None or isinstance(keys, basestring)): - add_source(sources, keys) + sources = safe_load((config(sources_var) or '').strip()) or [] + keys = safe_load((config(keys_var) or '').strip()) or None + + if isinstance(sources, basestring): + sources = [sources] + + if keys is None: + for source in sources: + add_source(source, None) else: - if not len(sources) == len(keys): - msg = 'Install sources and keys lists are different lengths' - raise SourceConfigError(msg) - for src_num in range(len(sources)): - add_source(sources[src_num], keys[src_num]) + if isinstance(keys, basestring): + keys = [keys] + + if len(sources) != len(keys): + raise SourceConfigError( + 'Install sources and keys lists are different lengths') + for source, key in zip(sources, keys): + add_source(source, key) if update: apt_update(fatal=True) diff --git a/hooks/charmhelpers/fetch/bzrurl.py b/hooks/charmhelpers/fetch/bzrurl.py index db5dd9a..0e580e4 100644 --- a/hooks/charmhelpers/fetch/bzrurl.py +++ b/hooks/charmhelpers/fetch/bzrurl.py @@ -39,7 +39,8 @@ class BzrUrlFetchHandler(BaseFetchHandler): def install(self, source): url_parts = self.parse_url(source) branch_name = url_parts.path.strip("/").split("/")[-1] - dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name) + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", + branch_name) if not os.path.exists(dest_dir): mkdir(dest_dir, perms=0755) try: