From 7702073099e6661342c09d2e9cfa2377d6f79c8f Mon Sep 17 00:00:00 2001 From: Felipe Reyes Date: Fri, 24 Oct 2014 10:20:36 -0300 Subject: [PATCH 01/35] Add relation with memcached to use it to store nova-authconsole tokens Fix bug #989337 --- hooks/cache-relation-broken | 1 + hooks/cache-relation-changed | 1 + hooks/cache-relation-departed | 1 + hooks/cache-relation-joined | 1 + hooks/nova_cc_context.py | 21 ++++++++++- hooks/nova_cc_hooks.py | 9 +++++ metadata.yaml | 2 + templates/folsom/nova.conf | 5 +++ templates/grizzly/nova.conf | 5 +++ templates/havana/nova.conf | 4 ++ templates/icehouse/nova.conf | 4 ++ unit_tests/test_nova_cc_contexts.py | 58 +++++++++++++++++++++++++++++ 12 files changed, 111 insertions(+), 1 deletion(-) create mode 120000 hooks/cache-relation-broken create mode 120000 hooks/cache-relation-changed create mode 120000 hooks/cache-relation-departed create mode 120000 hooks/cache-relation-joined create mode 100644 unit_tests/test_nova_cc_contexts.py diff --git a/hooks/cache-relation-broken b/hooks/cache-relation-broken new file mode 120000 index 00000000..f6702415 --- /dev/null +++ b/hooks/cache-relation-broken @@ -0,0 +1 @@ +nova_cc_hooks.py \ No newline at end of file diff --git a/hooks/cache-relation-changed b/hooks/cache-relation-changed new file mode 120000 index 00000000..f6702415 --- /dev/null +++ b/hooks/cache-relation-changed @@ -0,0 +1 @@ +nova_cc_hooks.py \ No newline at end of file diff --git a/hooks/cache-relation-departed b/hooks/cache-relation-departed new file mode 120000 index 00000000..f6702415 --- /dev/null +++ b/hooks/cache-relation-departed @@ -0,0 +1 @@ +nova_cc_hooks.py \ No newline at end of file diff --git a/hooks/cache-relation-joined b/hooks/cache-relation-joined new file mode 120000 index 00000000..f6702415 --- /dev/null +++ b/hooks/cache-relation-joined @@ -0,0 +1 @@ +nova_cc_hooks.py \ No newline at end of file diff --git a/hooks/nova_cc_context.py b/hooks/nova_cc_context.py index bb3db4fb..94557ffd 100644 --- a/hooks/nova_cc_context.py +++ b/hooks/nova_cc_context.py @@ -1,6 +1,6 @@ from charmhelpers.core.hookenv import ( config, relation_ids, relation_set, log, ERROR, - unit_get, related_units, relation_get) + unit_get, related_units, relation_get, relations_for_id) from charmhelpers.fetch import apt_install, filter_installed_packages from charmhelpers.contrib.openstack import context, neutron, utils @@ -281,3 +281,22 @@ class NovaIPv6Context(context.BindHostContext): ctxt = super(NovaIPv6Context, self).__call__() ctxt['use_ipv6'] = config('prefer-ipv6') return ctxt + + +class InstanceConsoleContext(context.OSContextGenerator): + interfaces = [] + + def __call__(self): + ctxt = {} + servers = [] + try: + for rid in relation_ids('cache'): + for rel in relations_for_id(rid): + servers.append({'private-address': rel['private-address'], + 'port': rel['port']}) + except Exception as ex: + log(str(ex)) + servers = [] + + ctxt['memcached_servers'] = servers + return ctxt diff --git a/hooks/nova_cc_hooks.py b/hooks/nova_cc_hooks.py index 7fd18916..5fe9eea7 100755 --- a/hooks/nova_cc_hooks.py +++ b/hooks/nova_cc_hooks.py @@ -852,6 +852,15 @@ def neutron_api_relation_broken(): quantum_joined(rid=rid) +@hooks.hook('cache-relation-joined', + 'cache-relation-departed', + 'cache-relation-changed', + 'cache-relation-broken') +@restart_on_change(restart_map()) +def memcached_joined(): + CONFIGS.write(NOVA_CONF) + + def main(): try: hooks.execute(sys.argv) diff --git a/metadata.yaml b/metadata.yaml index 573e5232..be2440de 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -40,6 +40,8 @@ requires: nova-vmware: interface: nova-vmware scope: container + cache: + interface: memcache peers: cluster: interface: nova-ha diff --git a/templates/folsom/nova.conf b/templates/folsom/nova.conf index ed0f13ac..bbc5d832 100644 --- a/templates/folsom/nova.conf +++ b/templates/folsom/nova.conf @@ -21,6 +21,11 @@ volumes_path=/var/lib/nova/volumes enabled_apis=ec2,osapi_compute,metadata auth_strategy=keystone compute_driver=libvirt.LibvirtDriver + +{% if memcached_servers %} +memcached_servers = {%for s in memcached_servers %}{% if loop.index0 != 0 %},{% endif %}{{s['private-address']}}:{{s['port']}}{% endfor %} +{% endif %} + {% if keystone_ec2_url -%} keystone_ec2_url = {{ keystone_ec2_url }} {% endif -%} diff --git a/templates/grizzly/nova.conf b/templates/grizzly/nova.conf index 9c308fae..e67c32fc 100644 --- a/templates/grizzly/nova.conf +++ b/templates/grizzly/nova.conf @@ -20,6 +20,11 @@ volumes_path=/var/lib/nova/volumes enabled_apis=ec2,osapi_compute,metadata auth_strategy=keystone compute_driver=libvirt.LibvirtDriver + +{% if memcached_servers %} +memcached_servers = {%for s in memcached_servers %}{% if loop.index0 != 0 %},{% endif %}{{s['private-address']}}:{{s['port']}}{% endfor %} +{% endif %} + {% if keystone_ec2_url -%} keystone_ec2_url = {{ keystone_ec2_url }} {% endif -%} diff --git a/templates/havana/nova.conf b/templates/havana/nova.conf index 24a0fcfd..22cd8ed9 100644 --- a/templates/havana/nova.conf +++ b/templates/havana/nova.conf @@ -26,6 +26,10 @@ scheduler_default_filters = RetryFilter,AvailabilityZoneFilter,CoreFilter,RamFil cpu_allocation_ratio = {{ cpu_allocation_ratio }} use_syslog={{ use_syslog }} +{% if memcached_servers %} +memcached_servers = {%for s in memcached_servers %}{% if loop.index0 != 0 %},{% endif %}{{s['private-address']}}:{{s['port']}}{% endfor %} +{% endif %} + {% if keystone_ec2_url -%} keystone_ec2_url = {{ keystone_ec2_url }} {% endif -%} diff --git a/templates/icehouse/nova.conf b/templates/icehouse/nova.conf index c2b62e42..a0e19da0 100644 --- a/templates/icehouse/nova.conf +++ b/templates/icehouse/nova.conf @@ -38,6 +38,10 @@ ram_allocation_ratio = {{ ram_allocation_ratio }} use_syslog={{ use_syslog }} +{% if memcached_servers %} +memcached_servers = {%for s in memcached_servers %}{% if loop.index0 != 0 %},{% endif %}{{s['private-address']}}:{{s['port']}}{% endfor %} +{% endif %} + {% if keystone_ec2_url -%} keystone_ec2_url = {{ keystone_ec2_url }} {% endif -%} diff --git a/unit_tests/test_nova_cc_contexts.py b/unit_tests/test_nova_cc_contexts.py new file mode 100644 index 00000000..1945d949 --- /dev/null +++ b/unit_tests/test_nova_cc_contexts.py @@ -0,0 +1,58 @@ +from __future__ import print_function + +import mock +import nova_cc_context as context + +from charmhelpers.contrib.openstack import utils + +from test_utils import CharmTestCase + + +TO_PATCH = [ + 'apt_install', + 'filter_installed_packages', + 'relation_ids', + 'relation_get', + 'related_units', + 'config', + 'log', + 'unit_get', + 'relations_for_id', +] + + +def fake_log(msg, level=None): + level = level or 'INFO' + print('[juju test log (%s)] %s' % (level, msg)) + + +class NovaComputeContextTests(CharmTestCase): + def setUp(self): + super(NovaComputeContextTests, self).setUp(context, TO_PATCH) + self.relation_get.side_effect = self.test_relation.get + self.config.side_effect = self.test_config.get + self.log.side_effect = fake_log + + @mock.patch.object(utils, 'os_release') + def test_instance_console_context_without_memcache(self, os_release): + self.unit_get.return_value = '127.0.0.1' + self.relation_ids.return_value = 'cache:0' + self.related_units.return_value = 'memcached/0' + instance_console = context.InstanceConsoleContext() + os_release.return_value = 'icehouse' + self.assertEqual({'memcached_servers': []}, + instance_console()) + + @mock.patch.object(utils, 'os_release') + def test_instance_console_context_with_memcache(self, os_release): + memcached_servers = [{'private-address': '127.0.1.1', + 'port': '11211'}] + self.unit_get.return_value = '127.0.0.1' + self.relation_ids.return_value = ['cache:0'] + self.relations_for_id.return_value = memcached_servers + self.related_units.return_value = 'memcached/0' + instance_console = context.InstanceConsoleContext() + os_release.return_value = 'icehouse' + self.maxDiff = None + self.assertEqual({'memcached_servers': memcached_servers}, + instance_console()) From 641bbc776ebdd1014574599d41c13a52b621ae46 Mon Sep 17 00:00:00 2001 From: Felipe Reyes Date: Fri, 24 Oct 2014 12:00:23 -0300 Subject: [PATCH 02/35] Add InstanceConsoleContext to the resource map --- hooks/nova_cc_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hooks/nova_cc_utils.py b/hooks/nova_cc_utils.py index 88112039..5f23866b 100644 --- a/hooks/nova_cc_utils.py +++ b/hooks/nova_cc_utils.py @@ -123,7 +123,8 @@ BASE_RESOURCE_MAP = OrderedDict([ nova_cc_context.VolumeServiceContext(), nova_cc_context.NovaIPv6Context(), nova_cc_context.NeutronCCContext(), - nova_cc_context.NovaConfigContext()], + nova_cc_context.NovaConfigContext(), + nova_cc_context.InstanceConsoleContext()], }), (NOVA_API_PASTE, { 'services': [s for s in BASE_SERVICES if 'api' in s], From d291474335d592bdfe85c46b23e1e943e06ce8ab Mon Sep 17 00:00:00 2001 From: Felipe Reyes Date: Fri, 24 Oct 2014 17:47:02 -0300 Subject: [PATCH 03/35] Add python-memcache to be installed --- hooks/nova_cc_utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/hooks/nova_cc_utils.py b/hooks/nova_cc_utils.py index 5f23866b..9a2e5d12 100644 --- a/hooks/nova_cc_utils.py +++ b/hooks/nova_cc_utils.py @@ -68,6 +68,7 @@ BASE_PACKAGES = [ 'python-psycopg2', 'python-psutil', 'uuid', + 'python-memcached', ] BASE_SERVICES = [ From 547660045d1883da9b3cd0e4fb254c4cbc9d3a1f Mon Sep 17 00:00:00 2001 From: Felipe Reyes Date: Fri, 24 Oct 2014 18:04:26 -0300 Subject: [PATCH 04/35] Fix typo in memcache package --- hooks/nova_cc_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hooks/nova_cc_utils.py b/hooks/nova_cc_utils.py index 9a2e5d12..ea1dae5d 100644 --- a/hooks/nova_cc_utils.py +++ b/hooks/nova_cc_utils.py @@ -68,7 +68,7 @@ BASE_PACKAGES = [ 'python-psycopg2', 'python-psutil', 'uuid', - 'python-memcached', + 'python-memcache', ] BASE_SERVICES = [ From 131188db7142e290b943736ac4fbd56772880b07 Mon Sep 17 00:00:00 2001 From: root Date: Wed, 29 Oct 2014 22:30:36 -0500 Subject: [PATCH 05/35] [bradm] initial nrpe checks --- config.yaml | 11 + files/nrpe-external-master/check_upstart_job | 72 ++++++ .../contrib/charmsupport/__init__.py | 0 .../charmhelpers/contrib/charmsupport/nrpe.py | 218 ++++++++++++++++++ .../contrib/charmsupport/volumes.py | 156 +++++++++++++ hooks/nova_cc_hooks.py | 25 ++ hooks/nrpe-external-master-relation-changed | 1 + hooks/nrpe-external-master-relation-joined | 1 + metadata.yaml | 3 + 9 files changed, 487 insertions(+) create mode 100755 files/nrpe-external-master/check_upstart_job create mode 100644 hooks/charmhelpers/contrib/charmsupport/__init__.py create mode 100644 hooks/charmhelpers/contrib/charmsupport/nrpe.py create mode 100644 hooks/charmhelpers/contrib/charmsupport/volumes.py create mode 120000 hooks/nrpe-external-master-relation-changed create mode 120000 hooks/nrpe-external-master-relation-joined diff --git a/config.yaml b/config.yaml index b74ed243..475acd9d 100644 --- a/config.yaml +++ b/config.yaml @@ -292,3 +292,14 @@ options: order for this charm to function correctly, the privacy extension must be disabled and a non-temporary address must be configured/available on your network interface. + nagios_context: + default: "juju" + type: string + description: | + Used by the nrpe-external-master subordinate charm. + A string that will be prepended to instance name to set the host name + in nagios. So for instance the hostname would be something like: + juju-myservice-0 + If you're running multiple environments with the same services in them + this allows you to differentiate between them. + diff --git a/files/nrpe-external-master/check_upstart_job b/files/nrpe-external-master/check_upstart_job new file mode 100755 index 00000000..94efb95e --- /dev/null +++ b/files/nrpe-external-master/check_upstart_job @@ -0,0 +1,72 @@ +#!/usr/bin/python + +# +# Copyright 2012, 2013 Canonical Ltd. +# +# Author: Paul Collins +# +# Based on http://www.eurion.net/python-snippets/snippet/Upstart%20service%20status.html +# + +import sys + +import dbus + + +class Upstart(object): + def __init__(self): + self._bus = dbus.SystemBus() + self._upstart = self._bus.get_object('com.ubuntu.Upstart', + '/com/ubuntu/Upstart') + def get_job(self, job_name): + path = self._upstart.GetJobByName(job_name, + dbus_interface='com.ubuntu.Upstart0_6') + return self._bus.get_object('com.ubuntu.Upstart', path) + + def get_properties(self, job): + path = job.GetInstance([], dbus_interface='com.ubuntu.Upstart0_6.Job') + instance = self._bus.get_object('com.ubuntu.Upstart', path) + return instance.GetAll('com.ubuntu.Upstart0_6.Instance', + dbus_interface=dbus.PROPERTIES_IFACE) + + def get_job_instances(self, job_name): + job = self.get_job(job_name) + paths = job.GetAllInstances([], dbus_interface='com.ubuntu.Upstart0_6.Job') + return [self._bus.get_object('com.ubuntu.Upstart', path) for path in paths] + + def get_job_instance_properties(self, job): + return job.GetAll('com.ubuntu.Upstart0_6.Instance', + dbus_interface=dbus.PROPERTIES_IFACE) + +try: + upstart = Upstart() + try: + job = upstart.get_job(sys.argv[1]) + props = upstart.get_properties(job) + + if props['state'] == 'running': + print 'OK: %s is running' % sys.argv[1] + sys.exit(0) + else: + print 'CRITICAL: %s is not running' % sys.argv[1] + sys.exit(2) + + except dbus.DBusException as e: + instances = upstart.get_job_instances(sys.argv[1]) + propses = [upstart.get_job_instance_properties(instance) for instance in instances] + states = dict([(props['name'], props['state']) for props in propses]) + if len(states) != states.values().count('running'): + not_running = [] + for name in states.keys(): + if states[name] != 'running': + not_running.append(name) + print 'CRITICAL: %d instances of %s not running: %s' % \ + (len(not_running), sys.argv[1], not_running.join(', ')) + sys.exit(2) + else: + print 'OK: %d instances of %s running' % (len(states), sys.argv[1]) + +except dbus.DBusException as e: + print 'CRITICAL: failed to get properties of \'%s\' from upstart' % sys.argv[1] + sys.exit(2) + diff --git a/hooks/charmhelpers/contrib/charmsupport/__init__.py b/hooks/charmhelpers/contrib/charmsupport/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/hooks/charmhelpers/contrib/charmsupport/nrpe.py new file mode 100644 index 00000000..f3bfe3f3 --- /dev/null +++ b/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -0,0 +1,218 @@ +"""Compatibility with the nrpe-external-master charm""" +# Copyright 2012 Canonical Ltd. +# +# Authors: +# Matthew Wedgwood + +import subprocess +import pwd +import grp +import os +import re +import shlex +import yaml + +from charmhelpers.core.hookenv import ( + config, + local_unit, + log, + relation_ids, + relation_set, +) + +from charmhelpers.core.host import service + +# This module adds compatibility with the nrpe-external-master and plain nrpe +# subordinate charms. To use it in your charm: +# +# 1. Update metadata.yaml +# +# provides: +# (...) +# nrpe-external-master: +# interface: nrpe-external-master +# scope: container +# +# and/or +# +# provides: +# (...) +# local-monitors: +# interface: local-monitors +# scope: container + +# +# 2. Add the following to config.yaml +# +# nagios_context: +# default: "juju" +# type: string +# description: | +# Used by the nrpe subordinate charms. +# A string that will be prepended to instance name to set the host name +# in nagios. So for instance the hostname would be something like: +# juju-myservice-0 +# If you're running multiple environments with the same services in them +# this allows you to differentiate between them. +# +# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master +# +# 4. Update your hooks.py with something like this: +# +# from charmsupport.nrpe import NRPE +# (...) +# def update_nrpe_config(): +# nrpe_compat = NRPE() +# nrpe_compat.add_check( +# shortname = "myservice", +# description = "Check MyService", +# check_cmd = "check_http -w 2 -c 10 http://localhost" +# ) +# nrpe_compat.add_check( +# "myservice_other", +# "Check for widget failures", +# check_cmd = "/srv/myapp/scripts/widget_check" +# ) +# nrpe_compat.write() +# +# def config_changed(): +# (...) +# update_nrpe_config() +# +# def nrpe_external_master_relation_changed(): +# update_nrpe_config() +# +# def local_monitors_relation_changed(): +# update_nrpe_config() +# +# 5. ln -s hooks.py nrpe-external-master-relation-changed +# ln -s hooks.py local-monitors-relation-changed + + +class CheckException(Exception): + pass + + +class Check(object): + shortname_re = '[A-Za-z0-9-_]+$' + service_template = (""" +#--------------------------------------------------- +# This file is Juju managed +#--------------------------------------------------- +define service {{ + use active-service + host_name {nagios_hostname} + service_description {nagios_hostname}[{shortname}] """ + """{description} + check_command check_nrpe!{command} + servicegroups {nagios_servicegroup} +}} +""") + + def __init__(self, shortname, description, check_cmd): + super(Check, self).__init__() + # XXX: could be better to calculate this from the service name + if not re.match(self.shortname_re, shortname): + raise CheckException("shortname must match {}".format( + Check.shortname_re)) + self.shortname = shortname + self.command = "check_{}".format(shortname) + # Note: a set of invalid characters is defined by the + # Nagios server config + # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()= + self.description = description + self.check_cmd = self._locate_cmd(check_cmd) + + def _locate_cmd(self, check_cmd): + search_path = ( + '/', + os.path.join(os.environ['CHARM_DIR'], + 'files/nrpe-external-master'), + '/usr/lib/nagios/plugins', + ) + parts = shlex.split(check_cmd) + for path in search_path: + if os.path.exists(os.path.join(path, parts[0])): + command = os.path.join(path, parts[0]) + if len(parts) > 1: + command += " " + " ".join(parts[1:]) + return command + log('Check command not found: {}'.format(parts[0])) + return '' + + def write(self, nagios_context, hostname): + nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format( + self.command) + with open(nrpe_check_file, 'w') as nrpe_check_config: + nrpe_check_config.write("# check {}\n".format(self.shortname)) + nrpe_check_config.write("command[{}]={}\n".format( + self.command, self.check_cmd)) + + if not os.path.exists(NRPE.nagios_exportdir): + log('Not writing service config as {} is not accessible'.format( + NRPE.nagios_exportdir)) + else: + self.write_service_config(nagios_context, hostname) + + def write_service_config(self, nagios_context, hostname): + for f in os.listdir(NRPE.nagios_exportdir): + if re.search('.*{}.cfg'.format(self.command), f): + os.remove(os.path.join(NRPE.nagios_exportdir, f)) + + templ_vars = { + 'nagios_hostname': hostname, + 'nagios_servicegroup': nagios_context, + 'description': self.description, + 'shortname': self.shortname, + 'command': self.command, + } + nrpe_service_text = Check.service_template.format(**templ_vars) + nrpe_service_file = '{}/service__{}_{}.cfg'.format( + NRPE.nagios_exportdir, hostname, self.command) + with open(nrpe_service_file, 'w') as nrpe_service_config: + nrpe_service_config.write(str(nrpe_service_text)) + + def run(self): + subprocess.call(self.check_cmd) + + +class NRPE(object): + nagios_logdir = '/var/log/nagios' + nagios_exportdir = '/var/lib/nagios/export' + nrpe_confdir = '/etc/nagios/nrpe.d' + + def __init__(self): + super(NRPE, self).__init__() + self.config = config() + self.nagios_context = self.config['nagios_context'] + self.unit_name = local_unit().replace('/', '-') + self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) + self.checks = [] + + def add_check(self, *args, **kwargs): + self.checks.append(Check(*args, **kwargs)) + + def write(self): + try: + nagios_uid = pwd.getpwnam('nagios').pw_uid + nagios_gid = grp.getgrnam('nagios').gr_gid + except: + log("Nagios user not set up, nrpe checks not updated") + return + + if not os.path.exists(NRPE.nagios_logdir): + os.mkdir(NRPE.nagios_logdir) + os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid) + + nrpe_monitors = {} + monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}} + for nrpecheck in self.checks: + nrpecheck.write(self.nagios_context, self.hostname) + nrpe_monitors[nrpecheck.shortname] = { + "command": nrpecheck.command, + } + + service('restart', 'nagios-nrpe-server') + + for rid in relation_ids("local-monitors"): + relation_set(relation_id=rid, monitors=yaml.dump(monitors)) diff --git a/hooks/charmhelpers/contrib/charmsupport/volumes.py b/hooks/charmhelpers/contrib/charmsupport/volumes.py new file mode 100644 index 00000000..0f905dff --- /dev/null +++ b/hooks/charmhelpers/contrib/charmsupport/volumes.py @@ -0,0 +1,156 @@ +''' +Functions for managing volumes in juju units. One volume is supported per unit. +Subordinates may have their own storage, provided it is on its own partition. + +Configuration stanzas: + volume-ephemeral: + type: boolean + default: true + description: > + If false, a volume is mounted as sepecified in "volume-map" + If true, ephemeral storage will be used, meaning that log data + will only exist as long as the machine. YOU HAVE BEEN WARNED. + volume-map: + type: string + default: {} + description: > + YAML map of units to device names, e.g: + "{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }" + Service units will raise a configure-error if volume-ephemeral + is 'true' and no volume-map value is set. Use 'juju set' to set a + value and 'juju resolved' to complete configuration. + +Usage: + from charmsupport.volumes import configure_volume, VolumeConfigurationError + from charmsupport.hookenv import log, ERROR + def post_mount_hook(): + stop_service('myservice') + def post_mount_hook(): + start_service('myservice') + + if __name__ == '__main__': + try: + configure_volume(before_change=pre_mount_hook, + after_change=post_mount_hook) + except VolumeConfigurationError: + log('Storage could not be configured', ERROR) +''' + +# XXX: Known limitations +# - fstab is neither consulted nor updated + +import os +from charmhelpers.core import hookenv +from charmhelpers.core import host +import yaml + + +MOUNT_BASE = '/srv/juju/volumes' + + +class VolumeConfigurationError(Exception): + '''Volume configuration data is missing or invalid''' + pass + + +def get_config(): + '''Gather and sanity-check volume configuration data''' + volume_config = {} + config = hookenv.config() + + errors = False + + if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'): + volume_config['ephemeral'] = True + else: + volume_config['ephemeral'] = False + + try: + volume_map = yaml.safe_load(config.get('volume-map', '{}')) + except yaml.YAMLError as e: + hookenv.log("Error parsing YAML volume-map: {}".format(e), + hookenv.ERROR) + errors = True + if volume_map is None: + # probably an empty string + volume_map = {} + elif not isinstance(volume_map, dict): + hookenv.log("Volume-map should be a dictionary, not {}".format( + type(volume_map))) + errors = True + + volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME']) + if volume_config['device'] and volume_config['ephemeral']: + # asked for ephemeral storage but also defined a volume ID + hookenv.log('A volume is defined for this unit, but ephemeral ' + 'storage was requested', hookenv.ERROR) + errors = True + elif not volume_config['device'] and not volume_config['ephemeral']: + # asked for permanent storage but did not define volume ID + hookenv.log('Ephemeral storage was requested, but there is no volume ' + 'defined for this unit.', hookenv.ERROR) + errors = True + + unit_mount_name = hookenv.local_unit().replace('/', '-') + volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name) + + if errors: + return None + return volume_config + + +def mount_volume(config): + if os.path.exists(config['mountpoint']): + if not os.path.isdir(config['mountpoint']): + hookenv.log('Not a directory: {}'.format(config['mountpoint'])) + raise VolumeConfigurationError() + else: + host.mkdir(config['mountpoint']) + if os.path.ismount(config['mountpoint']): + unmount_volume(config) + if not host.mount(config['device'], config['mountpoint'], persist=True): + raise VolumeConfigurationError() + + +def unmount_volume(config): + if os.path.ismount(config['mountpoint']): + if not host.umount(config['mountpoint'], persist=True): + raise VolumeConfigurationError() + + +def managed_mounts(): + '''List of all mounted managed volumes''' + return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts()) + + +def configure_volume(before_change=lambda: None, after_change=lambda: None): + '''Set up storage (or don't) according to the charm's volume configuration. + Returns the mount point or "ephemeral". before_change and after_change + are optional functions to be called if the volume configuration changes. + ''' + + config = get_config() + if not config: + hookenv.log('Failed to read volume configuration', hookenv.CRITICAL) + raise VolumeConfigurationError() + + if config['ephemeral']: + if os.path.ismount(config['mountpoint']): + before_change() + unmount_volume(config) + after_change() + return 'ephemeral' + else: + # persistent storage + if os.path.ismount(config['mountpoint']): + mounts = dict(managed_mounts()) + if mounts.get(config['mountpoint']) != config['device']: + before_change() + unmount_volume(config) + mount_volume(config) + after_change() + else: + before_change() + mount_volume(config) + after_change() + return config['mountpoint'] diff --git a/hooks/nova_cc_hooks.py b/hooks/nova_cc_hooks.py index 6e53a903..516fd89b 100755 --- a/hooks/nova_cc_hooks.py +++ b/hooks/nova_cc_hooks.py @@ -116,6 +116,8 @@ from charmhelpers.contrib.network.ip import ( from charmhelpers.contrib.openstack.context import ADDRESS_TYPES +from charmhelpers.contrib.charmsupport.nrpe import NRPE + hooks = Hooks() CONFIGS = register_configs() @@ -166,6 +168,7 @@ def config_changed(): for r_id in relation_ids('identity-service'): identity_joined(rid=r_id) [cluster_joined(rid) for rid in relation_ids('cluster')] + update_nrpe_config() @hooks.hook('amqp-relation-joined') @@ -775,6 +778,7 @@ def upgrade_charm(): for r_id in relation_ids('cloud-compute'): for unit in related_units(r_id): compute_changed(r_id, unit) + update_nrpe_config() # remote_restart is defaulted to true as nova-cells may have started the @@ -848,6 +852,27 @@ def neutron_api_relation_broken(): for rid in relation_ids('quantum-network-service'): quantum_joined(rid=rid) +@hooks.hook('nrpe-external-master-relation-joined', 'nrpe-external-master-relation-changed') +def update_nrpe_config(): + SERVICES = [ + 'nova-api-os-compute', + 'nova-api-ec2', + 'nova-cert', + 'nova-objectstore', + 'nova-scheduler', + 'nova-conductor', + ] + nrpe = NRPE() + apt_install('python-dbus') + + for service in SERVICES: + nrpe.add_check( + shortname=service, + description='%s process' % service, + check_cmd = 'check_upstart_job %s' % service, + ) + + nrpe.write() def main(): try: diff --git a/hooks/nrpe-external-master-relation-changed b/hooks/nrpe-external-master-relation-changed new file mode 120000 index 00000000..f6702415 --- /dev/null +++ b/hooks/nrpe-external-master-relation-changed @@ -0,0 +1 @@ +nova_cc_hooks.py \ No newline at end of file diff --git a/hooks/nrpe-external-master-relation-joined b/hooks/nrpe-external-master-relation-joined new file mode 120000 index 00000000..f6702415 --- /dev/null +++ b/hooks/nrpe-external-master-relation-joined @@ -0,0 +1 @@ +nova_cc_hooks.py \ No newline at end of file diff --git a/metadata.yaml b/metadata.yaml index 573e5232..02fbc226 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -7,6 +7,9 @@ description: | categories: - openstack provides: + nrpe-external-master: + interface: nrpe-external-master + scope: container cloud-controller: interface: nova requires: From f95e4eb0339be841015b12753d97e98825b7ae15 Mon Sep 17 00:00:00 2001 From: Brad Marshall Date: Thu, 30 Oct 2014 16:03:48 +1000 Subject: [PATCH 06/35] [bradm] Added charmsupport to charmhelpers --- charm-helpers-hooks.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/charm-helpers-hooks.yaml b/charm-helpers-hooks.yaml index 8211c8dd..2fa6f30a 100644 --- a/charm-helpers-hooks.yaml +++ b/charm-helpers-hooks.yaml @@ -10,3 +10,4 @@ include: - payload.execd - contrib.network.ip - contrib.peerstorage + - contrib.charmsupport From 999118550f784980f3494bf88712c022ecbe7696 Mon Sep 17 00:00:00 2001 From: Brad Marshall Date: Fri, 31 Oct 2014 14:52:27 +1000 Subject: [PATCH 07/35] [bradm] Added support to get nagios hostname from nrpe relation --- hooks/charmhelpers/contrib/charmsupport/nrpe.py | 8 ++++++-- hooks/nova_cc_hooks.py | 9 ++++++++- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/hooks/charmhelpers/contrib/charmsupport/nrpe.py index f3bfe3f3..51b62d39 100644 --- a/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -129,6 +129,7 @@ define service {{ os.path.join(os.environ['CHARM_DIR'], 'files/nrpe-external-master'), '/usr/lib/nagios/plugins', + '/usr/local/lib/nagios/plugins', ) parts = shlex.split(check_cmd) for path in search_path: @@ -181,12 +182,15 @@ class NRPE(object): nagios_exportdir = '/var/lib/nagios/export' nrpe_confdir = '/etc/nagios/nrpe.d' - def __init__(self): + def __init__(self, hostname=None): super(NRPE, self).__init__() self.config = config() self.nagios_context = self.config['nagios_context'] self.unit_name = local_unit().replace('/', '-') - self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) + if hostname: + self.hostname = hostname + else: + self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) self.checks = [] def add_check(self, *args, **kwargs): diff --git a/hooks/nova_cc_hooks.py b/hooks/nova_cc_hooks.py index 516fd89b..94be2465 100755 --- a/hooks/nova_cc_hooks.py +++ b/hooks/nova_cc_hooks.py @@ -20,6 +20,7 @@ from charmhelpers.core.hookenv import ( relation_get, relation_ids, relation_set, + relations_of_type, related_units, open_port, unit_get, @@ -862,7 +863,13 @@ def update_nrpe_config(): 'nova-scheduler', 'nova-conductor', ] - nrpe = NRPE() + # Find out if nrpe set nagios_hostname + hostname = None + for rel in relations_of_type('nrpe-external-master'): + if 'nagios_hostname' in rel: + hostname = rel['nagios_hostname'] + break + nrpe = NRPE(hostname=hostname) apt_install('python-dbus') for service in SERVICES: From 25da383c0fbc502093f8b006cfe8d60d19dbe5e6 Mon Sep 17 00:00:00 2001 From: Brad Marshall Date: Tue, 4 Nov 2014 17:16:45 +1000 Subject: [PATCH 08/35] [bradm] Tweaked check to include host context and unit name --- hooks/nova_cc_hooks.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/hooks/nova_cc_hooks.py b/hooks/nova_cc_hooks.py index 94be2465..80dfc4e0 100755 --- a/hooks/nova_cc_hooks.py +++ b/hooks/nova_cc_hooks.py @@ -868,14 +868,17 @@ def update_nrpe_config(): for rel in relations_of_type('nrpe-external-master'): if 'nagios_hostname' in rel: hostname = rel['nagios_hostname'] + host_context = rel['nagios_host_context'] break nrpe = NRPE(hostname=hostname) apt_install('python-dbus') - + + current_unit = "%s:%s" % (host_context, local_unit()) + for service in SERVICES: nrpe.add_check( shortname=service, - description='%s process' % service, + description='process check {%s}' % current_unit, check_cmd = 'check_upstart_job %s' % service, ) From a37379fef5f6d5e1b4f9aaf5437154ad2dc7be81 Mon Sep 17 00:00:00 2001 From: Felipe Reyes Date: Tue, 4 Nov 2014 15:11:18 -0300 Subject: [PATCH 09/35] Changed the way charmhelpers.core.hookenv.config is imported This change allows to easily monkey patch the function with a mock, otherwise the 'config' function is left imported at the module level. --- hooks/nova_cc_context.py | 32 ++++++++++++++--------------- unit_tests/test_nova_cc_contexts.py | 2 -- unit_tests/test_nova_cc_utils.py | 13 ++++++++---- 3 files changed, 25 insertions(+), 22 deletions(-) diff --git a/hooks/nova_cc_context.py b/hooks/nova_cc_context.py index 94557ffd..b190dbd5 100644 --- a/hooks/nova_cc_context.py +++ b/hooks/nova_cc_context.py @@ -1,7 +1,7 @@ from charmhelpers.core.hookenv import ( - config, relation_ids, relation_set, log, ERROR, + relation_ids, relation_set, log, ERROR, unit_get, related_units, relation_get, relations_for_id) - +from charmhelpers.core import hookenv from charmhelpers.fetch import apt_install, filter_installed_packages from charmhelpers.contrib.openstack import context, neutron, utils @@ -181,14 +181,14 @@ def canonical_url(vip_setting='vip'): if https(): scheme = 'https' - if config('prefer-ipv6'): + if hookenv.config('prefer-ipv6'): if is_clustered(): - addr = '[%s]' % config(vip_setting) + addr = '[%s]' % hookenv.config(vip_setting) else: - addr = '[%s]' % get_ipv6_addr(exc_list=[config('vip')])[0] + addr = '[%s]' % get_ipv6_addr(exc_list=[hookenv.config('vip')])[0] else: if is_clustered(): - addr = config(vip_setting) + addr = hookenv.config(vip_setting) else: addr = unit_get('private-address') @@ -209,8 +209,8 @@ class NeutronCCContext(context.NeutronContext): @property def neutron_security_groups(self): - sec_groups = (config('neutron-security-groups') or - config('quantum-security-groups')) + sec_groups = (hookenv.config('neutron-security-groups') or + hookenv.config('quantum-security-groups')) return sec_groups.lower() == 'yes' def _ensure_packages(self): @@ -220,9 +220,9 @@ class NeutronCCContext(context.NeutronContext): def __call__(self): ctxt = super(NeutronCCContext, self).__call__() - ctxt['external_network'] = config('neutron-external-network') - if config('quantum-plugin') in ['nvp', 'nsx']: - _config = config() + ctxt['external_network'] = hookenv.config('neutron-external-network') + if hookenv.config('quantum-plugin') in ['nvp', 'nsx']: + _config = hookenv.config() for k, v in _config.iteritems(): if k.startswith('nvp'): ctxt[k.replace('-', '_')] = v @@ -251,7 +251,7 @@ class IdentityServiceContext(context.IdentityServiceContext): ctxt['service_port'] ) ctxt['keystone_ec2_url'] = ec2_tokens - ctxt['region'] = config('region') + ctxt['region'] = hookenv.config('region') return ctxt @@ -265,21 +265,21 @@ class NeutronPostgresqlDBContext(context.PostgresqlDBContext): def __init__(self): super(NeutronPostgresqlDBContext, - self).__init__(config('neutron-database')) + self).__init__(hookenv.config('neutron-database')) class NovaConfigContext(context.WorkerConfigContext): def __call__(self): ctxt = super(NovaConfigContext, self).__call__() - ctxt['cpu_allocation_ratio'] = config('cpu-allocation-ratio') - ctxt['ram_allocation_ratio'] = config('ram-allocation-ratio') + ctxt['cpu_allocation_ratio'] = hookenv.config('cpu-allocation-ratio') + ctxt['ram_allocation_ratio'] = hookenv.config('ram-allocation-ratio') return ctxt class NovaIPv6Context(context.BindHostContext): def __call__(self): ctxt = super(NovaIPv6Context, self).__call__() - ctxt['use_ipv6'] = config('prefer-ipv6') + ctxt['use_ipv6'] = hookenv.config('prefer-ipv6') return ctxt diff --git a/unit_tests/test_nova_cc_contexts.py b/unit_tests/test_nova_cc_contexts.py index 1945d949..1f4660ff 100644 --- a/unit_tests/test_nova_cc_contexts.py +++ b/unit_tests/test_nova_cc_contexts.py @@ -14,7 +14,6 @@ TO_PATCH = [ 'relation_ids', 'relation_get', 'related_units', - 'config', 'log', 'unit_get', 'relations_for_id', @@ -30,7 +29,6 @@ class NovaComputeContextTests(CharmTestCase): def setUp(self): super(NovaComputeContextTests, self).setUp(context, TO_PATCH) self.relation_get.side_effect = self.test_relation.get - self.config.side_effect = self.test_config.get self.log.side_effect = fake_log @mock.patch.object(utils, 'os_release') diff --git a/unit_tests/test_nova_cc_utils.py b/unit_tests/test_nova_cc_utils.py index 7351a2ec..6c9864c5 100644 --- a/unit_tests/test_nova_cc_utils.py +++ b/unit_tests/test_nova_cc_utils.py @@ -155,8 +155,9 @@ class NovaCCUtilsTests(CharmTestCase): _map = utils.resource_map() return _map + @patch('charmhelpers.core.hookenv.config') @patch('charmhelpers.contrib.openstack.context.SubordinateConfigContext') - def test_resource_map_quantum(self, subcontext): + def test_resource_map_quantum(self, subcontext, config_): self.is_relation_made.return_value = False self._resource_map(network_manager='quantum') _map = utils.resource_map() @@ -167,8 +168,9 @@ class NovaCCUtilsTests(CharmTestCase): ] [self.assertIn(q_conf, _map.keys()) for q_conf in confs] + @patch('charmhelpers.core.hookenv.config') @patch('charmhelpers.contrib.openstack.context.SubordinateConfigContext') - def test_resource_map_neutron(self, subcontext): + def test_resource_map_neutron(self, subcontext, config_): self.is_relation_made.return_value = False self._resource_map(network_manager='neutron') _map = utils.resource_map() @@ -259,9 +261,11 @@ class NovaCCUtilsTests(CharmTestCase): for service in console_services: self.assertIn(service, _map['/etc/nova/nova.conf']['services']) + @patch('charmhelpers.core.hookenv.config') @patch('os.path.exists') @patch('charmhelpers.contrib.openstack.context.SubordinateConfigContext') - def test_restart_map_api_before_frontends(self, subcontext, _exists): + def test_restart_map_api_before_frontends(self, subcontext, _exists, + config_): self.is_relation_made.return_value = False _exists.return_value = False self._resource_map(network_manager='neutron') @@ -303,8 +307,9 @@ class NovaCCUtilsTests(CharmTestCase): pkgs = utils.determine_packages() self.assertIn('quantum-server', pkgs) + @patch('charmhelpers.core.hookenv.config') @patch('charmhelpers.contrib.openstack.context.SubordinateConfigContext') - def test_determine_packages_neutron(self, subcontext): + def test_determine_packages_neutron(self, subcontext, config_): self.is_relation_made.return_value = False self._resource_map(network_manager='neutron') pkgs = utils.determine_packages() From 3af2f8fdd123e05dbcdb1bac8844e607195cc44c Mon Sep 17 00:00:00 2001 From: Brad Marshall Date: Thu, 6 Nov 2014 17:32:28 +1000 Subject: [PATCH 10/35] [bradm] Check if host_context is defined before using it, checking if files before installing --- hooks/nova_cc_hooks.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/hooks/nova_cc_hooks.py b/hooks/nova_cc_hooks.py index 80dfc4e0..686cc282 100755 --- a/hooks/nova_cc_hooks.py +++ b/hooks/nova_cc_hooks.py @@ -134,8 +134,9 @@ def install(): if os.path.isdir(_files): for f in os.listdir(_files): f = os.path.join(_files, f) - log('Installing %s to /usr/bin' % f) - shutil.copy2(f, '/usr/bin') + if os.path.isfile(f): + log('Installing %s to /usr/bin' % f) + shutil.copy2(f, '/usr/bin') [open_port(port) for port in determine_ports()] log('Disabling services into db relation joined') disable_services() @@ -865,6 +866,7 @@ def update_nrpe_config(): ] # Find out if nrpe set nagios_hostname hostname = None + host_context = None for rel in relations_of_type('nrpe-external-master'): if 'nagios_hostname' in rel: hostname = rel['nagios_hostname'] @@ -873,7 +875,10 @@ def update_nrpe_config(): nrpe = NRPE(hostname=hostname) apt_install('python-dbus') - current_unit = "%s:%s" % (host_context, local_unit()) + if host_context: + current_unit = "%s:%s" % (host_context, local_unit()) + else: + current_unit = local_unit() for service in SERVICES: nrpe.add_check( From c62ada400b8c1322b432f94007aeacf41f3e81e8 Mon Sep 17 00:00:00 2001 From: Brad Marshall Date: Mon, 17 Nov 2014 13:43:24 +1000 Subject: [PATCH 11/35] [bradm] Add monitoring for sysvinit daemons, use services() instead of hard coded daemon list, pep8 fixes --- .../nrpe-external-master/check_exit_status.pl | 189 ++++++++++++++++++ .../nrpe-external-master/check_status_file.py | 60 ++++++ files/nrpe-external-master/nagios_plugin.py | 78 ++++++++ hooks/nova_cc_hooks.py | 37 ++-- 4 files changed, 350 insertions(+), 14 deletions(-) create mode 100755 files/nrpe-external-master/check_exit_status.pl create mode 100755 files/nrpe-external-master/check_status_file.py create mode 100755 files/nrpe-external-master/nagios_plugin.py diff --git a/files/nrpe-external-master/check_exit_status.pl b/files/nrpe-external-master/check_exit_status.pl new file mode 100755 index 00000000..49df22d8 --- /dev/null +++ b/files/nrpe-external-master/check_exit_status.pl @@ -0,0 +1,189 @@ +#!/usr/bin/perl +################################################################################ +# # +# Copyright (C) 2011 Chad Columbus # +# # +# This program is free software; you can redistribute it and/or modify # +# it under the terms of the GNU General Public License as published by # +# the Free Software Foundation; either version 2 of the License, or # +# (at your option) any later version. # +# # +# This program is distributed in the hope that it will be useful, # +# but WITHOUT ANY WARRANTY; without even the implied warranty of # +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # +# GNU General Public License for more details. # +# # +# You should have received a copy of the GNU General Public License # +# along with this program; if not, write to the Free Software # +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # +# # +################################################################################ + +use strict; +use Getopt::Std; +$| = 1; + +my %opts; +getopts('heronp:s:', \%opts); + +my $VERSION = "Version 1.0"; +my $AUTHOR = '(c) 2011 Chad Columbus '; + +# Default values: +my $script_to_check; +my $pattern = 'is running'; +my $cmd; +my $message; +my $error; + +# Exit codes +my $STATE_OK = 0; +my $STATE_WARNING = 1; +my $STATE_CRITICAL = 2; +my $STATE_UNKNOWN = 3; + +# Parse command line options +if ($opts{'h'} || scalar(%opts) == 0) { + &print_help(); + exit($STATE_OK); +} + +# Make sure scipt is provided: +if ($opts{'s'} eq '') { + # Script to run not provided + print "\nYou must provide a script to run. Example: -s /etc/init.d/httpd\n"; + exit($STATE_UNKNOWN); +} else { + $script_to_check = $opts{'s'}; +} + +# Make sure only a-z, 0-9, /, _, and - are used in the script. +if ($script_to_check =~ /[^a-z0-9\_\-\/\.]/) { + # Script contains illegal characters exit. + print "\nScript to check can only contain Letters, Numbers, Periods, Underscores, Hyphens, and/or Slashes\n"; + exit($STATE_UNKNOWN); +} + +# See if script is executable +if (! -x "$script_to_check") { + print "\nIt appears you can't execute $script_to_check, $!\n"; + exit($STATE_UNKNOWN); +} + +# If a pattern is provided use it: +if ($opts{'p'} ne '') { + $pattern = $opts{'p'}; +} + +# If -r run command via sudo as root: +if ($opts{'r'}) { + $cmd = "sudo -n $script_to_check status" . ' 2>&1'; +} else { + $cmd = "$script_to_check status" . ' 2>&1'; +} + +my $cmd_result = `$cmd`; +chomp($cmd_result); +if ($cmd_result =~ /sudo/i) { + # This means it could not run the sudo command + $message = "$script_to_check CRITICAL - Could not run: 'sudo -n $script_to_check status'. Result is $cmd_result"; + $error = $STATE_UNKNOWN; +} else { + # Check exitstatus instead of output: + if ($opts{'e'} == 1) { + if ($? != 0) { + # error + $message = "$script_to_check CRITICAL - Exit code: $?\."; + if ($opts{'o'} == 0) { + $message .= " $cmd_result"; + } + $error = $STATE_CRITICAL; + } else { + # success + $message = "$script_to_check OK - Exit code: $?\."; + if ($opts{'o'} == 0) { + $message .= " $cmd_result"; + } + $error = $STATE_OK; + } + } else { + my $not_check = 1; + if ($opts{'n'} == 1) { + $not_check = 0; + } + if (($cmd_result =~ /$pattern/i) == $not_check) { + $message = "$script_to_check OK"; + if ($opts{'o'} == 0) { + $message .= " - $cmd_result"; + } + $error = $STATE_OK; + } else { + $message = "$script_to_check CRITICAL"; + if ($opts{'o'} == 0) { + $message .= " - $cmd_result"; + } + $error = $STATE_CRITICAL; + } + } +} + +if ($message eq '') { + print "Error: program failed in an unknown way\n"; + exit($STATE_UNKNOWN); +} + +if ($error) { + print "$message\n"; + exit($error); +} else { + # If we get here we are OK + print "$message\n"; + exit($STATE_OK); +} + +#################################### +# Start Subs: +#################################### +sub print_help() { + print << "EOF"; +Check the output or exit status of a script. +$VERSION +$AUTHOR + +Options: +-h + Print detailed help screen + +-s + 'FULL PATH TO SCRIPT' (required) + This is the script to run, the script is designed to run scripts in the + /etc/init.d dir (but can run any script) and will call the script with + a 'status' argument. So if you use another script make sure it will + work with /path/script status, example: /etc/init.d/httpd status + +-e + This is the "exitstaus" flag, it means check the exit status + code instead of looking for a pattern in the output of the script. + +-p 'REGEX' + This is a pattern to look for in the output of the script to confirm it + is running, default is 'is running', but not all init.d scripts output + (iptables), so you can specify an arbitrary pattern. + All patterns are case insensitive. + +-n + This is the "NOT" flag, it means not the -p pattern, so if you want to + make sure the output of the script does NOT contain -p 'REGEX' + +-r + This is the "ROOT" flag, it means run as root via sudo. You will need a + line in your /etc/sudoers file like: + nagios ALL=(root) NOPASSWD: /etc/init.d/* status + +-o + This is the "SUPPRESS OUTPUT" flag. Some programs have a long output + (like iptables), this flag suppresses that output so it is not printed + as a part of the nagios message. +EOF +} + diff --git a/files/nrpe-external-master/check_status_file.py b/files/nrpe-external-master/check_status_file.py new file mode 100755 index 00000000..ba828087 --- /dev/null +++ b/files/nrpe-external-master/check_status_file.py @@ -0,0 +1,60 @@ +#!/usr/bin/python + +# m +# mmmm m m mmmm mmmm mmm mm#mm +# #" "# # # #" "# #" "# #" # # +# # # # # # # # # #"""" # +# ##m#" "mm"# ##m#" ##m#" "#mm" "mm +# # # # +# " " " +# This file is managed by puppet. Do not make local changes. + +# +# Copyright 2014 Canonical Ltd. +# +# Author: Jacek Nykis +# + +import re +import nagios_plugin + + +def parse_args(): + import argparse + + parser = argparse.ArgumentParser( + description='Read file and return nagios status based on its content', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('-f', '--status-file', required=True, + help='Status file path') + parser.add_argument('-c', '--critical-text', default='CRITICAL', + help='String indicating critical status') + parser.add_argument('-w', '--warning-text', default='WARNING', + help='String indicating warning status') + parser.add_argument('-o', '--ok-text', default='OK', + help='String indicating OK status') + parser.add_argument('-u', '--unknown-text', default='UNKNOWN', + help='String indicating unknown status') + return parser.parse_args() + + +def check_status(args): + nagios_plugin.check_file_freshness(args.status_file, 43200) + + with open(args.status_file, "r") as f: + content = [l.strip() for l in f.readlines()] + + for line in content: + if re.search(args.critical_text, line): + raise nagios_plugin.CriticalError(line) + elif re.search(args.warning_text, line): + raise nagios_plugin.WarnError(line) + elif re.search(args.unknown_text, line): + raise nagios_plugin.UnknownError(line) + else: + print line + + +if __name__ == '__main__': + args = parse_args() + nagios_plugin.try_check(check_status, args) diff --git a/files/nrpe-external-master/nagios_plugin.py b/files/nrpe-external-master/nagios_plugin.py new file mode 100755 index 00000000..f0f8e7b5 --- /dev/null +++ b/files/nrpe-external-master/nagios_plugin.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python +# m +# mmmm m m mmmm mmmm mmm mm#mm +# #" "# # # #" "# #" "# #" # # +# # # # # # # # # #"""" # +# ##m#" "mm"# ##m#" ##m#" "#mm" "mm +# # # # +# " " " +# This file is managed by puppet. Do not make local changes. + +# Copyright (C) 2005, 2006, 2007, 2012 James Troup + +import os +import stat +import time +import traceback +import sys + + +################################################################################ + +class CriticalError(Exception): + """This indicates a critical error.""" + pass + + +class WarnError(Exception): + """This indicates a warning condition.""" + pass + + +class UnknownError(Exception): + """This indicates a unknown error was encountered.""" + pass + + +def try_check(function, *args, **kwargs): + """Perform a check with error/warn/unknown handling.""" + try: + function(*args, **kwargs) + except UnknownError, msg: + print msg + sys.exit(3) + except CriticalError, msg: + print msg + sys.exit(2) + except WarnError, msg: + print msg + sys.exit(1) + except: + print "%s raised unknown exception '%s'" % (function, sys.exc_info()[0]) + print '=' * 60 + traceback.print_exc(file=sys.stdout) + print '=' * 60 + sys.exit(3) + + +################################################################################ + +def check_file_freshness(filename, newer_than=600): + """Check a file exists, is readable and is newer than seconds (where defaults to 600).""" + # First check the file exists and is readable + if not os.path.exists(filename): + raise CriticalError("%s: does not exist." % (filename)) + if os.access(filename, os.R_OK) == 0: + raise CriticalError("%s: is not readable." % (filename)) + + # Then ensure the file is up-to-date enough + mtime = os.stat(filename)[stat.ST_MTIME] + last_modified = time.time() - mtime + if last_modified > newer_than: + raise CriticalError("%s: was last modified on %s and is too old (> %s seconds)." + % (filename, time.ctime(mtime), newer_than)) + if last_modified < 0: + raise CriticalError("%s: was last modified on %s which is in the future." + % (filename, time.ctime(mtime))) + +################################################################################ diff --git a/hooks/nova_cc_hooks.py b/hooks/nova_cc_hooks.py index 686cc282..c5e7c415 100755 --- a/hooks/nova_cc_hooks.py +++ b/hooks/nova_cc_hooks.py @@ -856,14 +856,6 @@ def neutron_api_relation_broken(): @hooks.hook('nrpe-external-master-relation-joined', 'nrpe-external-master-relation-changed') def update_nrpe_config(): - SERVICES = [ - 'nova-api-os-compute', - 'nova-api-ec2', - 'nova-cert', - 'nova-objectstore', - 'nova-scheduler', - 'nova-conductor', - ] # Find out if nrpe set nagios_hostname hostname = None host_context = None @@ -880,12 +872,29 @@ def update_nrpe_config(): else: current_unit = local_unit() - for service in SERVICES: - nrpe.add_check( - shortname=service, - description='process check {%s}' % current_unit, - check_cmd = 'check_upstart_job %s' % service, - ) + services_to_monitor = services() + for service in services_to_monitor: + upstart_init = '/etc/init/%s.conf' % service + sysv_init = '/etc/init.d/%s' % service + + if os.path.exists(upstart_init): + nrpe.add_check( + shortname=service, + description='process check {%s}' % current_unit, + check_cmd = 'check_upstart_job %s' % service, + ) + elif os.path.exists(sysv_init): + cronpath = '/etc/cron.d/nagios-service-check-%s' % service + checkpath = os.path.join(os.environ['CHARM_DIR'], 'files/nrpe-external-master', 'check_exit_status.pl'), + cron_template = '*/5 * * * * root %s -s /etc/init.d/%s status > /var/lib/nagios/service-check-%s.txt\n' % (checkpath[0], service, service) + f = open(cronpath, 'w') + f.write(cron_template) + f.close() + nrpe.add_check( + shortname=service, + description='process check {%s}' % current_unit, + check_cmd = 'check_status_file.py -f /var/lib/nagios/service-check-%s.txt' % service, + ) nrpe.write() From cf09ab2dd8eeba9dab6fe1ab3f7dc0281eba3552 Mon Sep 17 00:00:00 2001 From: Brad Marshall Date: Mon, 17 Nov 2014 15:07:48 +1000 Subject: [PATCH 12/35] [bradm] Removed puppet header from nagios_plugin module --- files/nrpe-external-master/nagios_plugin.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/files/nrpe-external-master/nagios_plugin.py b/files/nrpe-external-master/nagios_plugin.py index f0f8e7b5..fc0d7b7b 100755 --- a/files/nrpe-external-master/nagios_plugin.py +++ b/files/nrpe-external-master/nagios_plugin.py @@ -1,13 +1,4 @@ #!/usr/bin/env python -# m -# mmmm m m mmmm mmmm mmm mm#mm -# #" "# # # #" "# #" "# #" # # -# # # # # # # # # #"""" # -# ##m#" "mm"# ##m#" ##m#" "#mm" "mm -# # # # -# " " " -# This file is managed by puppet. Do not make local changes. - # Copyright (C) 2005, 2006, 2007, 2012 James Troup import os From d71fef9bc00f340c40e33adc1633d3fbe8c99902 Mon Sep 17 00:00:00 2001 From: Brad Marshall Date: Tue, 18 Nov 2014 11:21:15 +1000 Subject: [PATCH 13/35] [bradm] Removed nagios check files that were moved to nrpe-external-master charm --- .../nrpe-external-master/check_exit_status.pl | 189 ------------------ .../nrpe-external-master/check_status_file.py | 60 ------ files/nrpe-external-master/check_upstart_job | 72 ------- files/nrpe-external-master/nagios_plugin.py | 69 ------- hooks/nova_cc_hooks.py | 13 +- 5 files changed, 8 insertions(+), 395 deletions(-) delete mode 100755 files/nrpe-external-master/check_exit_status.pl delete mode 100755 files/nrpe-external-master/check_status_file.py delete mode 100755 files/nrpe-external-master/check_upstart_job delete mode 100755 files/nrpe-external-master/nagios_plugin.py diff --git a/files/nrpe-external-master/check_exit_status.pl b/files/nrpe-external-master/check_exit_status.pl deleted file mode 100755 index 49df22d8..00000000 --- a/files/nrpe-external-master/check_exit_status.pl +++ /dev/null @@ -1,189 +0,0 @@ -#!/usr/bin/perl -################################################################################ -# # -# Copyright (C) 2011 Chad Columbus # -# # -# This program is free software; you can redistribute it and/or modify # -# it under the terms of the GNU General Public License as published by # -# the Free Software Foundation; either version 2 of the License, or # -# (at your option) any later version. # -# # -# This program is distributed in the hope that it will be useful, # -# but WITHOUT ANY WARRANTY; without even the implied warranty of # -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # -# GNU General Public License for more details. # -# # -# You should have received a copy of the GNU General Public License # -# along with this program; if not, write to the Free Software # -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # -# # -################################################################################ - -use strict; -use Getopt::Std; -$| = 1; - -my %opts; -getopts('heronp:s:', \%opts); - -my $VERSION = "Version 1.0"; -my $AUTHOR = '(c) 2011 Chad Columbus '; - -# Default values: -my $script_to_check; -my $pattern = 'is running'; -my $cmd; -my $message; -my $error; - -# Exit codes -my $STATE_OK = 0; -my $STATE_WARNING = 1; -my $STATE_CRITICAL = 2; -my $STATE_UNKNOWN = 3; - -# Parse command line options -if ($opts{'h'} || scalar(%opts) == 0) { - &print_help(); - exit($STATE_OK); -} - -# Make sure scipt is provided: -if ($opts{'s'} eq '') { - # Script to run not provided - print "\nYou must provide a script to run. Example: -s /etc/init.d/httpd\n"; - exit($STATE_UNKNOWN); -} else { - $script_to_check = $opts{'s'}; -} - -# Make sure only a-z, 0-9, /, _, and - are used in the script. -if ($script_to_check =~ /[^a-z0-9\_\-\/\.]/) { - # Script contains illegal characters exit. - print "\nScript to check can only contain Letters, Numbers, Periods, Underscores, Hyphens, and/or Slashes\n"; - exit($STATE_UNKNOWN); -} - -# See if script is executable -if (! -x "$script_to_check") { - print "\nIt appears you can't execute $script_to_check, $!\n"; - exit($STATE_UNKNOWN); -} - -# If a pattern is provided use it: -if ($opts{'p'} ne '') { - $pattern = $opts{'p'}; -} - -# If -r run command via sudo as root: -if ($opts{'r'}) { - $cmd = "sudo -n $script_to_check status" . ' 2>&1'; -} else { - $cmd = "$script_to_check status" . ' 2>&1'; -} - -my $cmd_result = `$cmd`; -chomp($cmd_result); -if ($cmd_result =~ /sudo/i) { - # This means it could not run the sudo command - $message = "$script_to_check CRITICAL - Could not run: 'sudo -n $script_to_check status'. Result is $cmd_result"; - $error = $STATE_UNKNOWN; -} else { - # Check exitstatus instead of output: - if ($opts{'e'} == 1) { - if ($? != 0) { - # error - $message = "$script_to_check CRITICAL - Exit code: $?\."; - if ($opts{'o'} == 0) { - $message .= " $cmd_result"; - } - $error = $STATE_CRITICAL; - } else { - # success - $message = "$script_to_check OK - Exit code: $?\."; - if ($opts{'o'} == 0) { - $message .= " $cmd_result"; - } - $error = $STATE_OK; - } - } else { - my $not_check = 1; - if ($opts{'n'} == 1) { - $not_check = 0; - } - if (($cmd_result =~ /$pattern/i) == $not_check) { - $message = "$script_to_check OK"; - if ($opts{'o'} == 0) { - $message .= " - $cmd_result"; - } - $error = $STATE_OK; - } else { - $message = "$script_to_check CRITICAL"; - if ($opts{'o'} == 0) { - $message .= " - $cmd_result"; - } - $error = $STATE_CRITICAL; - } - } -} - -if ($message eq '') { - print "Error: program failed in an unknown way\n"; - exit($STATE_UNKNOWN); -} - -if ($error) { - print "$message\n"; - exit($error); -} else { - # If we get here we are OK - print "$message\n"; - exit($STATE_OK); -} - -#################################### -# Start Subs: -#################################### -sub print_help() { - print << "EOF"; -Check the output or exit status of a script. -$VERSION -$AUTHOR - -Options: --h - Print detailed help screen - --s - 'FULL PATH TO SCRIPT' (required) - This is the script to run, the script is designed to run scripts in the - /etc/init.d dir (but can run any script) and will call the script with - a 'status' argument. So if you use another script make sure it will - work with /path/script status, example: /etc/init.d/httpd status - --e - This is the "exitstaus" flag, it means check the exit status - code instead of looking for a pattern in the output of the script. - --p 'REGEX' - This is a pattern to look for in the output of the script to confirm it - is running, default is 'is running', but not all init.d scripts output - (iptables), so you can specify an arbitrary pattern. - All patterns are case insensitive. - --n - This is the "NOT" flag, it means not the -p pattern, so if you want to - make sure the output of the script does NOT contain -p 'REGEX' - --r - This is the "ROOT" flag, it means run as root via sudo. You will need a - line in your /etc/sudoers file like: - nagios ALL=(root) NOPASSWD: /etc/init.d/* status - --o - This is the "SUPPRESS OUTPUT" flag. Some programs have a long output - (like iptables), this flag suppresses that output so it is not printed - as a part of the nagios message. -EOF -} - diff --git a/files/nrpe-external-master/check_status_file.py b/files/nrpe-external-master/check_status_file.py deleted file mode 100755 index ba828087..00000000 --- a/files/nrpe-external-master/check_status_file.py +++ /dev/null @@ -1,60 +0,0 @@ -#!/usr/bin/python - -# m -# mmmm m m mmmm mmmm mmm mm#mm -# #" "# # # #" "# #" "# #" # # -# # # # # # # # # #"""" # -# ##m#" "mm"# ##m#" ##m#" "#mm" "mm -# # # # -# " " " -# This file is managed by puppet. Do not make local changes. - -# -# Copyright 2014 Canonical Ltd. -# -# Author: Jacek Nykis -# - -import re -import nagios_plugin - - -def parse_args(): - import argparse - - parser = argparse.ArgumentParser( - description='Read file and return nagios status based on its content', - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument('-f', '--status-file', required=True, - help='Status file path') - parser.add_argument('-c', '--critical-text', default='CRITICAL', - help='String indicating critical status') - parser.add_argument('-w', '--warning-text', default='WARNING', - help='String indicating warning status') - parser.add_argument('-o', '--ok-text', default='OK', - help='String indicating OK status') - parser.add_argument('-u', '--unknown-text', default='UNKNOWN', - help='String indicating unknown status') - return parser.parse_args() - - -def check_status(args): - nagios_plugin.check_file_freshness(args.status_file, 43200) - - with open(args.status_file, "r") as f: - content = [l.strip() for l in f.readlines()] - - for line in content: - if re.search(args.critical_text, line): - raise nagios_plugin.CriticalError(line) - elif re.search(args.warning_text, line): - raise nagios_plugin.WarnError(line) - elif re.search(args.unknown_text, line): - raise nagios_plugin.UnknownError(line) - else: - print line - - -if __name__ == '__main__': - args = parse_args() - nagios_plugin.try_check(check_status, args) diff --git a/files/nrpe-external-master/check_upstart_job b/files/nrpe-external-master/check_upstart_job deleted file mode 100755 index 94efb95e..00000000 --- a/files/nrpe-external-master/check_upstart_job +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/python - -# -# Copyright 2012, 2013 Canonical Ltd. -# -# Author: Paul Collins -# -# Based on http://www.eurion.net/python-snippets/snippet/Upstart%20service%20status.html -# - -import sys - -import dbus - - -class Upstart(object): - def __init__(self): - self._bus = dbus.SystemBus() - self._upstart = self._bus.get_object('com.ubuntu.Upstart', - '/com/ubuntu/Upstart') - def get_job(self, job_name): - path = self._upstart.GetJobByName(job_name, - dbus_interface='com.ubuntu.Upstart0_6') - return self._bus.get_object('com.ubuntu.Upstart', path) - - def get_properties(self, job): - path = job.GetInstance([], dbus_interface='com.ubuntu.Upstart0_6.Job') - instance = self._bus.get_object('com.ubuntu.Upstart', path) - return instance.GetAll('com.ubuntu.Upstart0_6.Instance', - dbus_interface=dbus.PROPERTIES_IFACE) - - def get_job_instances(self, job_name): - job = self.get_job(job_name) - paths = job.GetAllInstances([], dbus_interface='com.ubuntu.Upstart0_6.Job') - return [self._bus.get_object('com.ubuntu.Upstart', path) for path in paths] - - def get_job_instance_properties(self, job): - return job.GetAll('com.ubuntu.Upstart0_6.Instance', - dbus_interface=dbus.PROPERTIES_IFACE) - -try: - upstart = Upstart() - try: - job = upstart.get_job(sys.argv[1]) - props = upstart.get_properties(job) - - if props['state'] == 'running': - print 'OK: %s is running' % sys.argv[1] - sys.exit(0) - else: - print 'CRITICAL: %s is not running' % sys.argv[1] - sys.exit(2) - - except dbus.DBusException as e: - instances = upstart.get_job_instances(sys.argv[1]) - propses = [upstart.get_job_instance_properties(instance) for instance in instances] - states = dict([(props['name'], props['state']) for props in propses]) - if len(states) != states.values().count('running'): - not_running = [] - for name in states.keys(): - if states[name] != 'running': - not_running.append(name) - print 'CRITICAL: %d instances of %s not running: %s' % \ - (len(not_running), sys.argv[1], not_running.join(', ')) - sys.exit(2) - else: - print 'OK: %d instances of %s running' % (len(states), sys.argv[1]) - -except dbus.DBusException as e: - print 'CRITICAL: failed to get properties of \'%s\' from upstart' % sys.argv[1] - sys.exit(2) - diff --git a/files/nrpe-external-master/nagios_plugin.py b/files/nrpe-external-master/nagios_plugin.py deleted file mode 100755 index fc0d7b7b..00000000 --- a/files/nrpe-external-master/nagios_plugin.py +++ /dev/null @@ -1,69 +0,0 @@ -#!/usr/bin/env python -# Copyright (C) 2005, 2006, 2007, 2012 James Troup - -import os -import stat -import time -import traceback -import sys - - -################################################################################ - -class CriticalError(Exception): - """This indicates a critical error.""" - pass - - -class WarnError(Exception): - """This indicates a warning condition.""" - pass - - -class UnknownError(Exception): - """This indicates a unknown error was encountered.""" - pass - - -def try_check(function, *args, **kwargs): - """Perform a check with error/warn/unknown handling.""" - try: - function(*args, **kwargs) - except UnknownError, msg: - print msg - sys.exit(3) - except CriticalError, msg: - print msg - sys.exit(2) - except WarnError, msg: - print msg - sys.exit(1) - except: - print "%s raised unknown exception '%s'" % (function, sys.exc_info()[0]) - print '=' * 60 - traceback.print_exc(file=sys.stdout) - print '=' * 60 - sys.exit(3) - - -################################################################################ - -def check_file_freshness(filename, newer_than=600): - """Check a file exists, is readable and is newer than seconds (where defaults to 600).""" - # First check the file exists and is readable - if not os.path.exists(filename): - raise CriticalError("%s: does not exist." % (filename)) - if os.access(filename, os.R_OK) == 0: - raise CriticalError("%s: is not readable." % (filename)) - - # Then ensure the file is up-to-date enough - mtime = os.stat(filename)[stat.ST_MTIME] - last_modified = time.time() - mtime - if last_modified > newer_than: - raise CriticalError("%s: was last modified on %s and is too old (> %s seconds)." - % (filename, time.ctime(mtime), newer_than)) - if last_modified < 0: - raise CriticalError("%s: was last modified on %s which is in the future." - % (filename, time.ctime(mtime))) - -################################################################################ diff --git a/hooks/nova_cc_hooks.py b/hooks/nova_cc_hooks.py index c5e7c415..9630370d 100755 --- a/hooks/nova_cc_hooks.py +++ b/hooks/nova_cc_hooks.py @@ -854,7 +854,8 @@ def neutron_api_relation_broken(): for rid in relation_ids('quantum-network-service'): quantum_joined(rid=rid) -@hooks.hook('nrpe-external-master-relation-joined', 'nrpe-external-master-relation-changed') +@hooks.hook('nrpe-external-master-relation-joined', + 'nrpe-external-master-relation-changed') def update_nrpe_config(): # Find out if nrpe set nagios_hostname hostname = None @@ -881,19 +882,21 @@ def update_nrpe_config(): nrpe.add_check( shortname=service, description='process check {%s}' % current_unit, - check_cmd = 'check_upstart_job %s' % service, + check_cmd='check_upstart_job %s' % service, ) elif os.path.exists(sysv_init): cronpath = '/etc/cron.d/nagios-service-check-%s' % service - checkpath = os.path.join(os.environ['CHARM_DIR'], 'files/nrpe-external-master', 'check_exit_status.pl'), - cron_template = '*/5 * * * * root %s -s /etc/init.d/%s status > /var/lib/nagios/service-check-%s.txt\n' % (checkpath[0], service, service) + cron_template = '*/5 * * * * root \ +/usr/local/lib/nagios/plugins/check_exit_status.pl -s /etc/init.d/%s \ +status > /var/lib/nagios/service-check-%s.txt\n' % (service, service) f = open(cronpath, 'w') f.write(cron_template) f.close() nrpe.add_check( shortname=service, description='process check {%s}' % current_unit, - check_cmd = 'check_status_file.py -f /var/lib/nagios/service-check-%s.txt' % service, + check_cmd='check_status_file.py -f \ +/var/lib/nagios/service-check-%s.txt' % service, ) nrpe.write() From 85562f5ef4b91704946e718e308855155958efb9 Mon Sep 17 00:00:00 2001 From: Felipe Reyes Date: Thu, 20 Nov 2014 11:37:06 -0300 Subject: [PATCH 14/35] Revert "Changed the way charmhelpers.core.hookenv.config is imported" --- hooks/nova_cc_context.py | 32 ++++++++++++++--------------- unit_tests/test_nova_cc_contexts.py | 2 ++ unit_tests/test_nova_cc_utils.py | 13 ++++-------- 3 files changed, 22 insertions(+), 25 deletions(-) diff --git a/hooks/nova_cc_context.py b/hooks/nova_cc_context.py index b190dbd5..94557ffd 100644 --- a/hooks/nova_cc_context.py +++ b/hooks/nova_cc_context.py @@ -1,7 +1,7 @@ from charmhelpers.core.hookenv import ( - relation_ids, relation_set, log, ERROR, + config, relation_ids, relation_set, log, ERROR, unit_get, related_units, relation_get, relations_for_id) -from charmhelpers.core import hookenv + from charmhelpers.fetch import apt_install, filter_installed_packages from charmhelpers.contrib.openstack import context, neutron, utils @@ -181,14 +181,14 @@ def canonical_url(vip_setting='vip'): if https(): scheme = 'https' - if hookenv.config('prefer-ipv6'): + if config('prefer-ipv6'): if is_clustered(): - addr = '[%s]' % hookenv.config(vip_setting) + addr = '[%s]' % config(vip_setting) else: - addr = '[%s]' % get_ipv6_addr(exc_list=[hookenv.config('vip')])[0] + addr = '[%s]' % get_ipv6_addr(exc_list=[config('vip')])[0] else: if is_clustered(): - addr = hookenv.config(vip_setting) + addr = config(vip_setting) else: addr = unit_get('private-address') @@ -209,8 +209,8 @@ class NeutronCCContext(context.NeutronContext): @property def neutron_security_groups(self): - sec_groups = (hookenv.config('neutron-security-groups') or - hookenv.config('quantum-security-groups')) + sec_groups = (config('neutron-security-groups') or + config('quantum-security-groups')) return sec_groups.lower() == 'yes' def _ensure_packages(self): @@ -220,9 +220,9 @@ class NeutronCCContext(context.NeutronContext): def __call__(self): ctxt = super(NeutronCCContext, self).__call__() - ctxt['external_network'] = hookenv.config('neutron-external-network') - if hookenv.config('quantum-plugin') in ['nvp', 'nsx']: - _config = hookenv.config() + ctxt['external_network'] = config('neutron-external-network') + if config('quantum-plugin') in ['nvp', 'nsx']: + _config = config() for k, v in _config.iteritems(): if k.startswith('nvp'): ctxt[k.replace('-', '_')] = v @@ -251,7 +251,7 @@ class IdentityServiceContext(context.IdentityServiceContext): ctxt['service_port'] ) ctxt['keystone_ec2_url'] = ec2_tokens - ctxt['region'] = hookenv.config('region') + ctxt['region'] = config('region') return ctxt @@ -265,21 +265,21 @@ class NeutronPostgresqlDBContext(context.PostgresqlDBContext): def __init__(self): super(NeutronPostgresqlDBContext, - self).__init__(hookenv.config('neutron-database')) + self).__init__(config('neutron-database')) class NovaConfigContext(context.WorkerConfigContext): def __call__(self): ctxt = super(NovaConfigContext, self).__call__() - ctxt['cpu_allocation_ratio'] = hookenv.config('cpu-allocation-ratio') - ctxt['ram_allocation_ratio'] = hookenv.config('ram-allocation-ratio') + ctxt['cpu_allocation_ratio'] = config('cpu-allocation-ratio') + ctxt['ram_allocation_ratio'] = config('ram-allocation-ratio') return ctxt class NovaIPv6Context(context.BindHostContext): def __call__(self): ctxt = super(NovaIPv6Context, self).__call__() - ctxt['use_ipv6'] = hookenv.config('prefer-ipv6') + ctxt['use_ipv6'] = config('prefer-ipv6') return ctxt diff --git a/unit_tests/test_nova_cc_contexts.py b/unit_tests/test_nova_cc_contexts.py index 1f4660ff..1945d949 100644 --- a/unit_tests/test_nova_cc_contexts.py +++ b/unit_tests/test_nova_cc_contexts.py @@ -14,6 +14,7 @@ TO_PATCH = [ 'relation_ids', 'relation_get', 'related_units', + 'config', 'log', 'unit_get', 'relations_for_id', @@ -29,6 +30,7 @@ class NovaComputeContextTests(CharmTestCase): def setUp(self): super(NovaComputeContextTests, self).setUp(context, TO_PATCH) self.relation_get.side_effect = self.test_relation.get + self.config.side_effect = self.test_config.get self.log.side_effect = fake_log @mock.patch.object(utils, 'os_release') diff --git a/unit_tests/test_nova_cc_utils.py b/unit_tests/test_nova_cc_utils.py index 6c9864c5..7351a2ec 100644 --- a/unit_tests/test_nova_cc_utils.py +++ b/unit_tests/test_nova_cc_utils.py @@ -155,9 +155,8 @@ class NovaCCUtilsTests(CharmTestCase): _map = utils.resource_map() return _map - @patch('charmhelpers.core.hookenv.config') @patch('charmhelpers.contrib.openstack.context.SubordinateConfigContext') - def test_resource_map_quantum(self, subcontext, config_): + def test_resource_map_quantum(self, subcontext): self.is_relation_made.return_value = False self._resource_map(network_manager='quantum') _map = utils.resource_map() @@ -168,9 +167,8 @@ class NovaCCUtilsTests(CharmTestCase): ] [self.assertIn(q_conf, _map.keys()) for q_conf in confs] - @patch('charmhelpers.core.hookenv.config') @patch('charmhelpers.contrib.openstack.context.SubordinateConfigContext') - def test_resource_map_neutron(self, subcontext, config_): + def test_resource_map_neutron(self, subcontext): self.is_relation_made.return_value = False self._resource_map(network_manager='neutron') _map = utils.resource_map() @@ -261,11 +259,9 @@ class NovaCCUtilsTests(CharmTestCase): for service in console_services: self.assertIn(service, _map['/etc/nova/nova.conf']['services']) - @patch('charmhelpers.core.hookenv.config') @patch('os.path.exists') @patch('charmhelpers.contrib.openstack.context.SubordinateConfigContext') - def test_restart_map_api_before_frontends(self, subcontext, _exists, - config_): + def test_restart_map_api_before_frontends(self, subcontext, _exists): self.is_relation_made.return_value = False _exists.return_value = False self._resource_map(network_manager='neutron') @@ -307,9 +303,8 @@ class NovaCCUtilsTests(CharmTestCase): pkgs = utils.determine_packages() self.assertIn('quantum-server', pkgs) - @patch('charmhelpers.core.hookenv.config') @patch('charmhelpers.contrib.openstack.context.SubordinateConfigContext') - def test_determine_packages_neutron(self, subcontext, config_): + def test_determine_packages_neutron(self, subcontext): self.is_relation_made.return_value = False self._resource_map(network_manager='neutron') pkgs = utils.determine_packages() From b61604b4d90dd8622331b0ece85a60e28da6753a Mon Sep 17 00:00:00 2001 From: Felipe Reyes Date: Thu, 20 Nov 2014 12:07:29 -0300 Subject: [PATCH 15/35] Patch nova_cc_utils config() before it's imported by nova_cc_context --- unit_tests/test_nova_cc_contexts.py | 14 ++++++++++++++ unit_tests/test_nova_cc_hooks.py | 1 + 2 files changed, 15 insertions(+) diff --git a/unit_tests/test_nova_cc_contexts.py b/unit_tests/test_nova_cc_contexts.py index 1945d949..c777b7d3 100644 --- a/unit_tests/test_nova_cc_contexts.py +++ b/unit_tests/test_nova_cc_contexts.py @@ -1,6 +1,20 @@ from __future__ import print_function import mock + +##### +# NOTE(freyes): this is a workaround to patch config() function imported by +# nova_cc_utils before it gets a reference to the actual config() provided by +# hookenv module. +from charmhelpers.core import hookenv +_conf = hookenv.config +hookenv.config = mock.MagicMock() +import nova_cc_utils as _utils +# this assert is a double check + to avoid pep8 warning +assert _utils.config == hookenv.config +hookenv.config = _conf +##### + import nova_cc_context as context from charmhelpers.contrib.openstack import utils diff --git a/unit_tests/test_nova_cc_hooks.py b/unit_tests/test_nova_cc_hooks.py index a2dc4d84..1aef9ffe 100644 --- a/unit_tests/test_nova_cc_hooks.py +++ b/unit_tests/test_nova_cc_hooks.py @@ -1,6 +1,7 @@ from mock import MagicMock, patch, call from test_utils import CharmTestCase, patch_open import os + with patch('charmhelpers.core.hookenv.config') as config: config.return_value = 'neutron' import nova_cc_utils as utils From b4e6cf2f5b226e8d35fc2f03b4f0824c8759e342 Mon Sep 17 00:00:00 2001 From: Felipe Reyes Date: Thu, 20 Nov 2014 12:13:57 -0300 Subject: [PATCH 16/35] Format message, set proper level and include exception --- hooks/nova_cc_context.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hooks/nova_cc_context.py b/hooks/nova_cc_context.py index 94557ffd..bbbcc88d 100644 --- a/hooks/nova_cc_context.py +++ b/hooks/nova_cc_context.py @@ -295,7 +295,8 @@ class InstanceConsoleContext(context.OSContextGenerator): servers.append({'private-address': rel['private-address'], 'port': rel['port']}) except Exception as ex: - log(str(ex)) + log("Couldn't get caching servers: {}".format(str(ex)), + level='WARNING') servers = [] ctxt['memcached_servers'] = servers From 73cd15bebf003c2ea93e6883a611d92632c25826 Mon Sep 17 00:00:00 2001 From: Felipe Reyes Date: Thu, 20 Nov 2014 13:49:23 -0300 Subject: [PATCH 17/35] Renamed relation from 'cache' to 'memcache' --- hooks/{cache-relation-broken => memcache-relation-broken} | 0 .../{cache-relation-changed => memcache-relation-changed} | 0 ...cache-relation-departed => memcache-relation-departed} | 0 hooks/{cache-relation-joined => memcache-relation-joined} | 0 hooks/nova_cc_hooks.py | 8 ++++---- metadata.yaml | 2 +- 6 files changed, 5 insertions(+), 5 deletions(-) rename hooks/{cache-relation-broken => memcache-relation-broken} (100%) rename hooks/{cache-relation-changed => memcache-relation-changed} (100%) rename hooks/{cache-relation-departed => memcache-relation-departed} (100%) rename hooks/{cache-relation-joined => memcache-relation-joined} (100%) diff --git a/hooks/cache-relation-broken b/hooks/memcache-relation-broken similarity index 100% rename from hooks/cache-relation-broken rename to hooks/memcache-relation-broken diff --git a/hooks/cache-relation-changed b/hooks/memcache-relation-changed similarity index 100% rename from hooks/cache-relation-changed rename to hooks/memcache-relation-changed diff --git a/hooks/cache-relation-departed b/hooks/memcache-relation-departed similarity index 100% rename from hooks/cache-relation-departed rename to hooks/memcache-relation-departed diff --git a/hooks/cache-relation-joined b/hooks/memcache-relation-joined similarity index 100% rename from hooks/cache-relation-joined rename to hooks/memcache-relation-joined diff --git a/hooks/nova_cc_hooks.py b/hooks/nova_cc_hooks.py index 5fe9eea7..cd6fa6f7 100755 --- a/hooks/nova_cc_hooks.py +++ b/hooks/nova_cc_hooks.py @@ -852,10 +852,10 @@ def neutron_api_relation_broken(): quantum_joined(rid=rid) -@hooks.hook('cache-relation-joined', - 'cache-relation-departed', - 'cache-relation-changed', - 'cache-relation-broken') +@hooks.hook('memcache-relation-joined', + 'memcache-relation-departed', + 'memcache-relation-changed', + 'memcache-relation-broken') @restart_on_change(restart_map()) def memcached_joined(): CONFIGS.write(NOVA_CONF) diff --git a/metadata.yaml b/metadata.yaml index be2440de..f277b1cd 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -40,7 +40,7 @@ requires: nova-vmware: interface: nova-vmware scope: container - cache: + memcache: interface: memcache peers: cluster: From 3482bf647a0210bc1f9bf79664fb899868150e9e Mon Sep 17 00:00:00 2001 From: Felipe Reyes Date: Thu, 20 Nov 2014 13:51:44 -0300 Subject: [PATCH 18/35] Renamed relation from 'cache' to 'memcache' --- hooks/{cache-relation-broken => memcache-relation-broken} | 0 .../{cache-relation-changed => memcache-relation-changed} | 0 ...cache-relation-departed => memcache-relation-departed} | 0 hooks/{cache-relation-joined => memcache-relation-joined} | 0 hooks/nova_cc_context.py | 2 +- hooks/nova_cc_hooks.py | 8 ++++---- metadata.yaml | 2 +- 7 files changed, 6 insertions(+), 6 deletions(-) rename hooks/{cache-relation-broken => memcache-relation-broken} (100%) rename hooks/{cache-relation-changed => memcache-relation-changed} (100%) rename hooks/{cache-relation-departed => memcache-relation-departed} (100%) rename hooks/{cache-relation-joined => memcache-relation-joined} (100%) diff --git a/hooks/cache-relation-broken b/hooks/memcache-relation-broken similarity index 100% rename from hooks/cache-relation-broken rename to hooks/memcache-relation-broken diff --git a/hooks/cache-relation-changed b/hooks/memcache-relation-changed similarity index 100% rename from hooks/cache-relation-changed rename to hooks/memcache-relation-changed diff --git a/hooks/cache-relation-departed b/hooks/memcache-relation-departed similarity index 100% rename from hooks/cache-relation-departed rename to hooks/memcache-relation-departed diff --git a/hooks/cache-relation-joined b/hooks/memcache-relation-joined similarity index 100% rename from hooks/cache-relation-joined rename to hooks/memcache-relation-joined diff --git a/hooks/nova_cc_context.py b/hooks/nova_cc_context.py index bbbcc88d..75c97e1b 100644 --- a/hooks/nova_cc_context.py +++ b/hooks/nova_cc_context.py @@ -290,7 +290,7 @@ class InstanceConsoleContext(context.OSContextGenerator): ctxt = {} servers = [] try: - for rid in relation_ids('cache'): + for rid in relation_ids('memcache'): for rel in relations_for_id(rid): servers.append({'private-address': rel['private-address'], 'port': rel['port']}) diff --git a/hooks/nova_cc_hooks.py b/hooks/nova_cc_hooks.py index 5fe9eea7..cd6fa6f7 100755 --- a/hooks/nova_cc_hooks.py +++ b/hooks/nova_cc_hooks.py @@ -852,10 +852,10 @@ def neutron_api_relation_broken(): quantum_joined(rid=rid) -@hooks.hook('cache-relation-joined', - 'cache-relation-departed', - 'cache-relation-changed', - 'cache-relation-broken') +@hooks.hook('memcache-relation-joined', + 'memcache-relation-departed', + 'memcache-relation-changed', + 'memcache-relation-broken') @restart_on_change(restart_map()) def memcached_joined(): CONFIGS.write(NOVA_CONF) diff --git a/metadata.yaml b/metadata.yaml index be2440de..f277b1cd 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -40,7 +40,7 @@ requires: nova-vmware: interface: nova-vmware scope: container - cache: + memcache: interface: memcache peers: cluster: From 45dd25939cb1ee67de30e02311e6cfa433149adb Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 25 Nov 2014 10:19:08 +0000 Subject: [PATCH 19/35] Enable haproxy for when there is a single unit in a deployment --- charm-helpers-hooks.yaml | 2 +- .../charmhelpers/contrib/hahelpers/cluster.py | 21 +- hooks/charmhelpers/contrib/network/ip.py | 100 ++-- .../contrib/openstack/amulet/deployment.py | 3 +- .../contrib/openstack/amulet/utils.py | 4 +- .../charmhelpers/contrib/openstack/context.py | 544 ++++++++++-------- .../charmhelpers/contrib/openstack/neutron.py | 20 +- .../contrib/openstack/templates/haproxy.cfg | 4 +- .../contrib/openstack/templating.py | 10 +- hooks/charmhelpers/contrib/openstack/utils.py | 39 +- .../contrib/peerstorage/__init__.py | 7 +- .../contrib/storage/linux/ceph.py | 176 +++--- .../contrib/storage/linux/loopback.py | 8 +- hooks/charmhelpers/core/fstab.py | 18 +- hooks/charmhelpers/core/hookenv.py | 20 +- hooks/charmhelpers/core/host.py | 33 +- hooks/charmhelpers/core/services/__init__.py | 4 +- hooks/charmhelpers/core/services/helpers.py | 12 +- hooks/charmhelpers/core/templating.py | 3 +- hooks/charmhelpers/fetch/__init__.py | 40 +- hooks/charmhelpers/fetch/archiveurl.py | 60 +- hooks/charmhelpers/fetch/bzrurl.py | 6 +- hooks/charmhelpers/fetch/giturl.py | 48 ++ hooks/nova_cc_context.py | 30 +- hooks/nova_cc_utils.py | 2 +- .../charmhelpers/contrib/amulet/deployment.py | 6 +- tests/charmhelpers/contrib/amulet/utils.py | 10 +- .../contrib/openstack/amulet/deployment.py | 3 +- .../contrib/openstack/amulet/utils.py | 4 +- 29 files changed, 748 insertions(+), 489 deletions(-) create mode 100644 hooks/charmhelpers/fetch/giturl.py diff --git a/charm-helpers-hooks.yaml b/charm-helpers-hooks.yaml index 8211c8dd..9ae5e6a2 100644 --- a/charm-helpers-hooks.yaml +++ b/charm-helpers-hooks.yaml @@ -1,4 +1,4 @@ -branch: lp:charm-helpers +branch: lp:~gnuoy/charm-helpers/haproxy-singlenode-mode destination: hooks/charmhelpers include: - core diff --git a/hooks/charmhelpers/contrib/hahelpers/cluster.py b/hooks/charmhelpers/contrib/hahelpers/cluster.py index 6d972007..3e51986d 100644 --- a/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -13,9 +13,10 @@ clustering-related helpers. import subprocess import os - from socket import gethostname as get_unit_hostname +import six + from charmhelpers.core.hookenv import ( log, relation_ids, @@ -150,34 +151,42 @@ def https(): return False -def determine_api_port(public_port): +def determine_api_port(public_port, singlenode_mode=False): ''' Determine correct API server listening port based on existence of HTTPS reverse proxy and/or haproxy. public_port: int: standard public port for given service + singlenode_mode: boolean: Shuffle ports when only a single unit is present + returns: int: the correct listening port for the API service ''' i = 0 - if len(peer_units()) > 0 or is_clustered(): + if singlenode_mode: + i += 1 + elif len(peer_units()) > 0 or is_clustered(): i += 1 if https(): i += 1 return public_port - (i * 10) -def determine_apache_port(public_port): +def determine_apache_port(public_port, singlenode_mode=False): ''' Description: Determine correct apache listening port based on public IP + state of the cluster. public_port: int: standard public port for given service + singlenode_mode: boolean: Shuffle ports when only a single unit is present + returns: int: the correct listening port for the HAProxy service ''' i = 0 - if len(peer_units()) > 0 or is_clustered(): + if singlenode_mode: + i += 1 + elif len(peer_units()) > 0 or is_clustered(): i += 1 return public_port - (i * 10) @@ -197,7 +206,7 @@ def get_hacluster_config(): for setting in settings: conf[setting] = config_get(setting) missing = [] - [missing.append(s) for s, v in conf.iteritems() if v is None] + [missing.append(s) for s, v in six.iteritems(conf) if v is None] if missing: log('Insufficient config data to configure hacluster.', level=ERROR) raise HAIncompleteConfig diff --git a/hooks/charmhelpers/contrib/network/ip.py b/hooks/charmhelpers/contrib/network/ip.py index e62e5655..b9a9815c 100644 --- a/hooks/charmhelpers/contrib/network/ip.py +++ b/hooks/charmhelpers/contrib/network/ip.py @@ -1,15 +1,12 @@ import glob import re import subprocess -import sys from functools import partial from charmhelpers.core.hookenv import unit_get from charmhelpers.fetch import apt_install from charmhelpers.core.hookenv import ( - WARNING, - ERROR, log ) @@ -34,31 +31,28 @@ def _validate_cidr(network): network) +def no_ip_found_error_out(network): + errmsg = ("No IP address found in network: %s" % network) + raise ValueError(errmsg) + + def get_address_in_network(network, fallback=None, fatal=False): - """ - Get an IPv4 or IPv6 address within the network from the host. + """Get an IPv4 or IPv6 address within the network from the host. :param network (str): CIDR presentation format. For example, '192.168.1.0/24'. :param fallback (str): If no address is found, return fallback. :param fatal (boolean): If no address is found, fallback is not set and fatal is True then exit(1). - """ - - def not_found_error_out(): - log("No IP address found in network: %s" % network, - level=ERROR) - sys.exit(1) - if network is None: if fallback is not None: return fallback + + if fatal: + no_ip_found_error_out(network) else: - if fatal: - not_found_error_out() - else: - return None + return None _validate_cidr(network) network = netaddr.IPNetwork(network) @@ -70,6 +64,7 @@ def get_address_in_network(network, fallback=None, fatal=False): cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) if cidr in network: return str(cidr.ip) + if network.version == 6 and netifaces.AF_INET6 in addresses: for addr in addresses[netifaces.AF_INET6]: if not addr['addr'].startswith('fe80'): @@ -82,20 +77,20 @@ def get_address_in_network(network, fallback=None, fatal=False): return fallback if fatal: - not_found_error_out() + no_ip_found_error_out(network) return None def is_ipv6(address): - '''Determine whether provided address is IPv6 or not''' + """Determine whether provided address is IPv6 or not.""" try: address = netaddr.IPAddress(address) except netaddr.AddrFormatError: # probably a hostname - so not an address at all! return False - else: - return address.version == 6 + + return address.version == 6 def is_address_in_network(network, address): @@ -113,11 +108,13 @@ def is_address_in_network(network, address): except (netaddr.core.AddrFormatError, ValueError): raise ValueError("Network (%s) is not in CIDR presentation format" % network) + try: address = netaddr.IPAddress(address) except (netaddr.core.AddrFormatError, ValueError): raise ValueError("Address (%s) is not in correct presentation format" % address) + if address in network: return True else: @@ -147,6 +144,7 @@ def _get_for_address(address, key): return iface else: return addresses[netifaces.AF_INET][0][key] + if address.version == 6 and netifaces.AF_INET6 in addresses: for addr in addresses[netifaces.AF_INET6]: if not addr['addr'].startswith('fe80'): @@ -160,41 +158,42 @@ def _get_for_address(address, key): return str(cidr).split('/')[1] else: return addr[key] + return None get_iface_for_address = partial(_get_for_address, key='iface') + get_netmask_for_address = partial(_get_for_address, key='netmask') def format_ipv6_addr(address): - """ - IPv6 needs to be wrapped with [] in url link to parse correctly. + """If address is IPv6, wrap it in '[]' otherwise return None. + + This is required by most configuration files when specifying IPv6 + addresses. """ if is_ipv6(address): - address = "[%s]" % address - else: - log("Not a valid ipv6 address: %s" % address, level=WARNING) - address = None + return "[%s]" % address - return address + return None def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, fatal=True, exc_list=None): - """ - Return the assigned IP address for a given interface, if any, or []. - """ + """Return the assigned IP address for a given interface, if any.""" # Extract nic if passed /dev/ethX if '/' in iface: iface = iface.split('/')[-1] + if not exc_list: exc_list = [] + try: inet_num = getattr(netifaces, inet_type) except AttributeError: - raise Exception('Unknown inet type ' + str(inet_type)) + raise Exception("Unknown inet type '%s'" % str(inet_type)) interfaces = netifaces.interfaces() if inc_aliases: @@ -202,15 +201,18 @@ def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, for _iface in interfaces: if iface == _iface or _iface.split(':')[0] == iface: ifaces.append(_iface) + if fatal and not ifaces: raise Exception("Invalid interface '%s'" % iface) + ifaces.sort() else: if iface not in interfaces: if fatal: - raise Exception("%s not found " % (iface)) + raise Exception("Interface '%s' not found " % (iface)) else: return [] + else: ifaces = [iface] @@ -221,10 +223,13 @@ def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, for entry in net_info[inet_num]: if 'addr' in entry and entry['addr'] not in exc_list: addresses.append(entry['addr']) + if fatal and not addresses: raise Exception("Interface '%s' doesn't have any %s addresses." % (iface, inet_type)) - return addresses + + return sorted(addresses) + get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET') @@ -241,6 +246,7 @@ def get_iface_from_addr(addr): raw = re.match(ll_key, _addr) if raw: _addr = raw.group(1) + if _addr == addr: log("Address '%s' is configured on iface '%s'" % (addr, iface)) @@ -251,8 +257,9 @@ def get_iface_from_addr(addr): def sniff_iface(f): - """If no iface provided, inject net iface inferred from unit private - address. + """Ensure decorated function is called with a value for iface. + + If no iface provided, inject net iface inferred from unit private address. """ def iface_sniffer(*args, **kwargs): if not kwargs.get('iface', None): @@ -317,33 +324,28 @@ def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None, return addrs if fatal: - raise Exception("Interface '%s' doesn't have a scope global " + raise Exception("Interface '%s' does not have a scope global " "non-temporary ipv6 address." % iface) return [] def get_bridges(vnic_dir='/sys/devices/virtual/net'): - """ - Return a list of bridges on the system or [] - """ - b_rgex = vnic_dir + '/*/bridge' - return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_rgex)] + """Return a list of bridges on the system.""" + b_regex = "%s/*/bridge" % vnic_dir + return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)] def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'): - """ - Return a list of nics comprising a given bridge on the system or [] - """ - brif_rgex = "%s/%s/brif/*" % (vnic_dir, bridge) - return [x.split('/')[-1] for x in glob.glob(brif_rgex)] + """Return a list of nics comprising a given bridge on the system.""" + brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge) + return [x.split('/')[-1] for x in glob.glob(brif_regex)] def is_bridge_member(nic): - """ - Check if a given nic is a member of a bridge - """ + """Check if a given nic is a member of a bridge.""" for bridge in get_bridges(): if nic in get_bridge_nics(bridge): return True + return False diff --git a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 3c7f422a..f3fee074 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -1,3 +1,4 @@ +import six from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment ) @@ -69,7 +70,7 @@ class OpenStackAmuletDeployment(AmuletDeployment): def _configure_services(self, configs): """Configure all of the services.""" - for service, config in configs.iteritems(): + for service, config in six.iteritems(configs): self.d.configure(service, config) def _get_openstack_release(self): diff --git a/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 0f312b99..3e0cc61c 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -7,6 +7,8 @@ import glanceclient.v1.client as glance_client import keystoneclient.v2_0 as keystone_client import novaclient.v1_1.client as nova_client +import six + from charmhelpers.contrib.amulet.utils import ( AmuletUtils ) @@ -60,7 +62,7 @@ class OpenStackAmuletUtils(AmuletUtils): expected service catalog endpoints. """ self.log.debug('actual: {}'.format(repr(actual))) - for k, v in expected.iteritems(): + for k, v in six.iteritems(expected): if k in actual: ret = self._validate_dict_data(expected[k][0], actual[k][0]) if ret: diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 538dc913..355e6e05 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -1,20 +1,19 @@ import json import os import time - from base64 import b64decode +from subprocess import check_call -from subprocess import ( - check_call -) +import six +from six.moves import xrange from charmhelpers.fetch import ( apt_install, filter_installed_packages, ) - from charmhelpers.core.hookenv import ( config, + is_relation_made, local_unit, log, relation_get, @@ -23,43 +22,40 @@ from charmhelpers.core.hookenv import ( relation_set, unit_get, unit_private_ip, + DEBUG, + INFO, + WARNING, ERROR, - INFO ) - from charmhelpers.core.host import ( mkdir, - write_file + write_file, ) - from charmhelpers.contrib.hahelpers.cluster import ( determine_apache_port, determine_api_port, https, - is_clustered + is_clustered, ) - from charmhelpers.contrib.hahelpers.apache import ( get_cert, get_ca_cert, install_ca_cert, ) - from charmhelpers.contrib.openstack.neutron import ( neutron_plugin_attribute, ) - from charmhelpers.contrib.network.ip import ( get_address_in_network, get_ipv6_addr, get_netmask_for_address, format_ipv6_addr, - is_address_in_network + is_address_in_network, ) - from charmhelpers.contrib.openstack.utils import get_host_ip CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' +ADDRESS_TYPES = ['admin', 'internal', 'public'] class OSContextError(Exception): @@ -67,7 +63,7 @@ class OSContextError(Exception): def ensure_packages(packages): - '''Install but do not upgrade required plugin packages''' + """Install but do not upgrade required plugin packages.""" required = filter_installed_packages(packages) if required: apt_install(required, fatal=True) @@ -75,20 +71,27 @@ def ensure_packages(packages): def context_complete(ctxt): _missing = [] - for k, v in ctxt.iteritems(): + for k, v in six.iteritems(ctxt): if v is None or v == '': _missing.append(k) + if _missing: - log('Missing required data: %s' % ' '.join(_missing), level='INFO') + log('Missing required data: %s' % ' '.join(_missing), level=INFO) return False + return True def config_flags_parser(config_flags): + """Parses config flags string into dict. + + The provided config_flags string may be a list of comma-separated values + which themselves may be comma-separated list of values. + """ if config_flags.find('==') >= 0: - log("config_flags is not in expected format (key=value)", - level=ERROR) + log("config_flags is not in expected format (key=value)", level=ERROR) raise OSContextError + # strip the following from each value. post_strippers = ' ,' # we strip any leading/trailing '=' or ' ' from the string then @@ -111,17 +114,18 @@ def config_flags_parser(config_flags): # if this not the first entry, expect an embedded key. index = current.rfind(',') if index < 0: - log("invalid config value(s) at index %s" % (i), - level=ERROR) + log("Invalid config value(s) at index %s" % (i), level=ERROR) raise OSContextError key = current[index + 1:] # Add to collection. flags[key.strip(post_strippers)] = value.rstrip(post_strippers) + return flags class OSContextGenerator(object): + """Base class for all context generators.""" interfaces = [] def __call__(self): @@ -133,11 +137,11 @@ class SharedDBContext(OSContextGenerator): def __init__(self, database=None, user=None, relation_prefix=None, ssl_dir=None): - ''' - Allows inspecting relation for settings prefixed with relation_prefix. - This is useful for parsing access for multiple databases returned via - the shared-db interface (eg, nova_password, quantum_password) - ''' + """Allows inspecting relation for settings prefixed with + relation_prefix. This is useful for parsing access for multiple + databases returned via the shared-db interface (eg, nova_password, + quantum_password) + """ self.relation_prefix = relation_prefix self.database = database self.user = user @@ -147,9 +151,8 @@ class SharedDBContext(OSContextGenerator): self.database = self.database or config('database') self.user = self.user or config('database-user') if None in [self.database, self.user]: - log('Could not generate shared_db context. ' - 'Missing required charm config options. ' - '(database name and user)') + log("Could not generate shared_db context. Missing required charm " + "config options. (database name and user)", level=ERROR) raise OSContextError ctxt = {} @@ -202,23 +205,24 @@ class PostgresqlDBContext(OSContextGenerator): def __call__(self): self.database = self.database or config('database') if self.database is None: - log('Could not generate postgresql_db context. ' - 'Missing required charm config options. ' - '(database name)') + log('Could not generate postgresql_db context. Missing required ' + 'charm config options. (database name)', level=ERROR) raise OSContextError - ctxt = {} + ctxt = {} for rid in relation_ids(self.interfaces[0]): for unit in related_units(rid): - ctxt = { - 'database_host': relation_get('host', rid=rid, unit=unit), - 'database': self.database, - 'database_user': relation_get('user', rid=rid, unit=unit), - 'database_password': relation_get('password', rid=rid, unit=unit), - 'database_type': 'postgresql', - } + rel_host = relation_get('host', rid=rid, unit=unit) + rel_user = relation_get('user', rid=rid, unit=unit) + rel_passwd = relation_get('password', rid=rid, unit=unit) + ctxt = {'database_host': rel_host, + 'database': self.database, + 'database_user': rel_user, + 'database_password': rel_passwd, + 'database_type': 'postgresql'} if context_complete(ctxt): return ctxt + return {} @@ -227,23 +231,29 @@ def db_ssl(rdata, ctxt, ssl_dir): ca_path = os.path.join(ssl_dir, 'db-client.ca') with open(ca_path, 'w') as fh: fh.write(b64decode(rdata['ssl_ca'])) + ctxt['database_ssl_ca'] = ca_path elif 'ssl_ca' in rdata: - log("Charm not setup for ssl support but ssl ca found") + log("Charm not setup for ssl support but ssl ca found", level=INFO) return ctxt + if 'ssl_cert' in rdata: cert_path = os.path.join( ssl_dir, 'db-client.cert') if not os.path.exists(cert_path): - log("Waiting 1m for ssl client cert validity") + log("Waiting 1m for ssl client cert validity", level=INFO) time.sleep(60) + with open(cert_path, 'w') as fh: fh.write(b64decode(rdata['ssl_cert'])) + ctxt['database_ssl_cert'] = cert_path key_path = os.path.join(ssl_dir, 'db-client.key') with open(key_path, 'w') as fh: fh.write(b64decode(rdata['ssl_key'])) + ctxt['database_ssl_key'] = key_path + return ctxt @@ -251,9 +261,8 @@ class IdentityServiceContext(OSContextGenerator): interfaces = ['identity-service'] def __call__(self): - log('Generating template context for identity-service') + log('Generating template context for identity-service', level=DEBUG) ctxt = {} - for rid in relation_ids('identity-service'): for unit in related_units(rid): rdata = relation_get(rid=rid, unit=unit) @@ -261,26 +270,24 @@ class IdentityServiceContext(OSContextGenerator): serv_host = format_ipv6_addr(serv_host) or serv_host auth_host = rdata.get('auth_host') auth_host = format_ipv6_addr(auth_host) or auth_host - - ctxt = { - 'service_port': rdata.get('service_port'), - 'service_host': serv_host, - 'auth_host': auth_host, - 'auth_port': rdata.get('auth_port'), - 'admin_tenant_name': rdata.get('service_tenant'), - 'admin_user': rdata.get('service_username'), - 'admin_password': rdata.get('service_password'), - 'service_protocol': - rdata.get('service_protocol') or 'http', - 'auth_protocol': - rdata.get('auth_protocol') or 'http', - } + svc_protocol = rdata.get('service_protocol') or 'http' + auth_protocol = rdata.get('auth_protocol') or 'http' + ctxt = {'service_port': rdata.get('service_port'), + 'service_host': serv_host, + 'auth_host': auth_host, + 'auth_port': rdata.get('auth_port'), + 'admin_tenant_name': rdata.get('service_tenant'), + 'admin_user': rdata.get('service_username'), + 'admin_password': rdata.get('service_password'), + 'service_protocol': svc_protocol, + 'auth_protocol': auth_protocol} if context_complete(ctxt): # NOTE(jamespage) this is required for >= icehouse # so a missing value just indicates keystone needs # upgrading ctxt['admin_tenant_id'] = rdata.get('service_tenant_id') return ctxt + return {} @@ -293,21 +300,23 @@ class AMQPContext(OSContextGenerator): self.interfaces = [rel_name] def __call__(self): - log('Generating template context for amqp') + log('Generating template context for amqp', level=DEBUG) conf = config() - user_setting = 'rabbit-user' - vhost_setting = 'rabbit-vhost' if self.relation_prefix: - user_setting = self.relation_prefix + '-rabbit-user' - vhost_setting = self.relation_prefix + '-rabbit-vhost' + user_setting = '%s-rabbit-user' % (self.relation_prefix) + vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix) + else: + user_setting = 'rabbit-user' + vhost_setting = 'rabbit-vhost' try: username = conf[user_setting] vhost = conf[vhost_setting] except KeyError as e: - log('Could not generate shared_db context. ' - 'Missing required charm config options: %s.' % e) + log('Could not generate shared_db context. Missing required charm ' + 'config options: %s.' % e, level=ERROR) raise OSContextError + ctxt = {} for rid in relation_ids(self.rel_name): ha_vip_only = False @@ -321,6 +330,7 @@ class AMQPContext(OSContextGenerator): host = relation_get('private-address', rid=rid, unit=unit) host = format_ipv6_addr(host) or host ctxt['rabbitmq_host'] = host + ctxt.update({ 'rabbitmq_user': username, 'rabbitmq_password': relation_get('password', rid=rid, @@ -331,6 +341,7 @@ class AMQPContext(OSContextGenerator): ssl_port = relation_get('ssl_port', rid=rid, unit=unit) if ssl_port: ctxt['rabbit_ssl_port'] = ssl_port + ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit) if ssl_ca: ctxt['rabbit_ssl_ca'] = ssl_ca @@ -344,41 +355,45 @@ class AMQPContext(OSContextGenerator): if context_complete(ctxt): if 'rabbit_ssl_ca' in ctxt: if not self.ssl_dir: - log(("Charm not setup for ssl support " - "but ssl ca found")) + log("Charm not setup for ssl support but ssl ca " + "found", level=INFO) break + ca_path = os.path.join( self.ssl_dir, 'rabbit-client-ca.pem') with open(ca_path, 'w') as fh: fh.write(b64decode(ctxt['rabbit_ssl_ca'])) ctxt['rabbit_ssl_ca'] = ca_path + # Sufficient information found = break out! break + # Used for active/active rabbitmq >= grizzly - if ('clustered' not in ctxt or ha_vip_only) \ - and len(related_units(rid)) > 1: + if (('clustered' not in ctxt or ha_vip_only) and + len(related_units(rid)) > 1): rabbitmq_hosts = [] for unit in related_units(rid): host = relation_get('private-address', rid=rid, unit=unit) host = format_ipv6_addr(host) or host rabbitmq_hosts.append(host) - ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts) + + ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts)) + if not context_complete(ctxt): return {} - else: - return ctxt + + return ctxt class CephContext(OSContextGenerator): + """Generates context for /etc/ceph/ceph.conf templates.""" interfaces = ['ceph'] def __call__(self): - '''This generates context for /etc/ceph/ceph.conf templates''' if not relation_ids('ceph'): return {} - log('Generating template context for ceph') - + log('Generating template context for ceph', level=DEBUG) mon_hosts = [] auth = None key = None @@ -387,18 +402,18 @@ class CephContext(OSContextGenerator): for unit in related_units(rid): auth = relation_get('auth', rid=rid, unit=unit) key = relation_get('key', rid=rid, unit=unit) - ceph_addr = \ - relation_get('ceph-public-address', rid=rid, unit=unit) or \ - relation_get('private-address', rid=rid, unit=unit) + ceph_pub_addr = relation_get('ceph-public-address', rid=rid, + unit=unit) + unit_priv_addr = relation_get('private-address', rid=rid, + unit=unit) + ceph_addr = ceph_pub_addr or unit_priv_addr ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr mon_hosts.append(ceph_addr) - ctxt = { - 'mon_hosts': ' '.join(mon_hosts), - 'auth': auth, - 'key': key, - 'use_syslog': use_syslog - } + ctxt = {'mon_hosts': ' '.join(sorted(mon_hosts)), + 'auth': auth, + 'key': key, + 'use_syslog': use_syslog} if not os.path.isdir('/etc/ceph'): os.mkdir('/etc/ceph') @@ -407,79 +422,68 @@ class CephContext(OSContextGenerator): return {} ensure_packages(['ceph-common']) - return ctxt -ADDRESS_TYPES = ['admin', 'internal', 'public'] - - class HAProxyContext(OSContextGenerator): + """Provides half a context for the haproxy template, which describes + all peers to be included in the cluster. Each charm needs to include + its own context generator that describes the port mapping. + """ interfaces = ['cluster'] - def __call__(self): - ''' - Builds half a context for the haproxy template, which describes - all peers to be included in the cluster. Each charm needs to include - its own context generator that describes the port mapping. - ''' - if not relation_ids('cluster'): - return {} + def __init__(self, singlenode_mode=False): + self.singlenode_mode = singlenode_mode - l_unit = local_unit().replace('/', '-') + def __call__(self): + if not relation_ids('cluster') and not self.singlenode_mode: + return {} if config('prefer-ipv6'): addr = get_ipv6_addr(exc_list=[config('vip')])[0] else: addr = get_host_ip(unit_get('private-address')) + l_unit = local_unit().replace('/', '-') cluster_hosts = {} # NOTE(jamespage): build out map of configured network endpoints # and associated backends for addr_type in ADDRESS_TYPES: - laddr = get_address_in_network( - config('os-{}-network'.format(addr_type))) + cfg_opt = 'os-{}-network'.format(addr_type) + laddr = get_address_in_network(config(cfg_opt)) if laddr: - cluster_hosts[laddr] = {} - cluster_hosts[laddr]['network'] = "{}/{}".format( - laddr, - get_netmask_for_address(laddr) - ) - cluster_hosts[laddr]['backends'] = {} - cluster_hosts[laddr]['backends'][l_unit] = laddr + netmask = get_netmask_for_address(laddr) + cluster_hosts[laddr] = {'network': "{}/{}".format(laddr, + netmask), + 'backends': {l_unit: laddr}} for rid in relation_ids('cluster'): for unit in related_units(rid): - _unit = unit.replace('/', '-') _laddr = relation_get('{}-address'.format(addr_type), rid=rid, unit=unit) if _laddr: + _unit = unit.replace('/', '-') cluster_hosts[laddr]['backends'][_unit] = _laddr # NOTE(jamespage) no split configurations found, just use # private addresses if not cluster_hosts: - cluster_hosts[addr] = {} - cluster_hosts[addr]['network'] = "{}/{}".format( - addr, - get_netmask_for_address(addr) - ) - cluster_hosts[addr]['backends'] = {} - cluster_hosts[addr]['backends'][l_unit] = addr + netmask = get_netmask_for_address(addr) + cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask), + 'backends': {l_unit: addr}} for rid in relation_ids('cluster'): for unit in related_units(rid): - _unit = unit.replace('/', '-') _laddr = relation_get('private-address', rid=rid, unit=unit) if _laddr: + _unit = unit.replace('/', '-') cluster_hosts[addr]['backends'][_unit] = _laddr - ctxt = { - 'frontends': cluster_hosts, - } + ctxt = {'frontends': cluster_hosts} if config('haproxy-server-timeout'): ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout') + if config('haproxy-client-timeout'): ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout') @@ -493,13 +497,18 @@ class HAProxyContext(OSContextGenerator): ctxt['stat_port'] = ':8888' for frontend in cluster_hosts: - if len(cluster_hosts[frontend]['backends']) > 1: + if (len(cluster_hosts[frontend]['backends']) > 1 or + self.singlenode_mode): # Enable haproxy when we have enough peers. - log('Ensuring haproxy enabled in /etc/default/haproxy.') + log('Ensuring haproxy enabled in /etc/default/haproxy.', + level=DEBUG) with open('/etc/default/haproxy', 'w') as out: out.write('ENABLED=1\n') + return ctxt - log('HAProxy context is incomplete, this unit has no peers.') + + log('HAProxy context is incomplete, this unit has no peers.', + level=INFO) return {} @@ -507,29 +516,28 @@ class ImageServiceContext(OSContextGenerator): interfaces = ['image-service'] def __call__(self): - ''' - Obtains the glance API server from the image-service relation. Useful - in nova and cinder (currently). - ''' - log('Generating template context for image-service.') + """Obtains the glance API server from the image-service relation. + Useful in nova and cinder (currently). + """ + log('Generating template context for image-service.', level=DEBUG) rids = relation_ids('image-service') if not rids: return {} + for rid in rids: for unit in related_units(rid): api_server = relation_get('glance-api-server', rid=rid, unit=unit) if api_server: return {'glance_api_servers': api_server} - log('ImageService context is incomplete. ' - 'Missing required relation data.') + + log("ImageService context is incomplete. Missing required relation " + "data.", level=INFO) return {} class ApacheSSLContext(OSContextGenerator): - - """ - Generates a context for an apache vhost configuration that configures + """Generates a context for an apache vhost configuration that configures HTTPS reverse proxying for one or many endpoints. Generated context looks something like:: @@ -563,6 +571,7 @@ class ApacheSSLContext(OSContextGenerator): else: cert_filename = 'cert' key_filename = 'key' + write_file(path=os.path.join(ssl_dir, cert_filename), content=b64decode(cert)) write_file(path=os.path.join(ssl_dir, key_filename), @@ -574,7 +583,8 @@ class ApacheSSLContext(OSContextGenerator): install_ca_cert(b64decode(ca_cert)) def canonical_names(self): - '''Figure out which canonical names clients will access this service''' + """Figure out which canonical names clients will access this service. + """ cns = [] for r_id in relation_ids('identity-service'): for unit in related_units(r_id): @@ -582,55 +592,80 @@ class ApacheSSLContext(OSContextGenerator): for k in rdata: if k.startswith('ssl_key_'): cns.append(k.lstrip('ssl_key_')) - return list(set(cns)) + + return sorted(list(set(cns))) + + def get_network_addresses(self): + """For each network configured, return corresponding address and vip + (if available). + + Returns a list of tuples of the form: + + [(address_in_net_a, vip_in_net_a), + (address_in_net_b, vip_in_net_b), + ...] + + or, if no vip(s) available: + + [(address_in_net_a, address_in_net_a), + (address_in_net_b, address_in_net_b), + ...] + """ + addresses = [] + if config('vip'): + vips = config('vip').split() + else: + vips = [] + + for net_type in ['os-internal-network', 'os-admin-network', + 'os-public-network']: + addr = get_address_in_network(config(net_type), + unit_get('private-address')) + if len(vips) > 1 and is_clustered(): + if not config(net_type): + log("Multiple networks configured but net_type " + "is None (%s)." % net_type, level=WARNING) + continue + + for vip in vips: + if is_address_in_network(config(net_type), vip): + addresses.append((addr, vip)) + break + + elif is_clustered() and config('vip'): + addresses.append((addr, config('vip'))) + else: + addresses.append((addr, addr)) + + return sorted(addresses) def __call__(self): - if isinstance(self.external_ports, basestring): + if isinstance(self.external_ports, six.string_types): self.external_ports = [self.external_ports] - if (not self.external_ports or not https()): + + if not self.external_ports or not https(): return {} self.configure_ca() self.enable_modules() - ctxt = { - 'namespace': self.service_namespace, - 'endpoints': [], - 'ext_ports': [] - } + ctxt = {'namespace': self.service_namespace, + 'endpoints': [], + 'ext_ports': []} for cn in self.canonical_names(): self.configure_cert(cn) - addresses = [] - vips = [] - if config('vip'): - vips = config('vip').split() - - for network_type in ['os-internal-network', - 'os-admin-network', - 'os-public-network']: - address = get_address_in_network(config(network_type), - unit_get('private-address')) - if len(vips) > 0 and is_clustered(): - for vip in vips: - if is_address_in_network(config(network_type), - vip): - addresses.append((address, vip)) - break - elif is_clustered(): - addresses.append((address, config('vip'))) - else: - addresses.append((address, address)) - - for address, endpoint in set(addresses): + addresses = self.get_network_addresses() + for address, endpoint in sorted(set(addresses)): for api_port in self.external_ports: ext_port = determine_apache_port(api_port) int_port = determine_api_port(api_port) portmap = (address, endpoint, int(ext_port), int(int_port)) ctxt['endpoints'].append(portmap) ctxt['ext_ports'].append(int(ext_port)) - ctxt['ext_ports'] = list(set(ctxt['ext_ports'])) + + ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports']))) return ctxt @@ -647,21 +682,23 @@ class NeutronContext(OSContextGenerator): @property def packages(self): - return neutron_plugin_attribute( - self.plugin, 'packages', self.network_manager) + return neutron_plugin_attribute(self.plugin, 'packages', + self.network_manager) @property def neutron_security_groups(self): return None def _ensure_packages(self): - [ensure_packages(pkgs) for pkgs in self.packages] + for pkgs in self.packages: + ensure_packages(pkgs) def _save_flag_file(self): if self.network_manager == 'quantum': _file = '/etc/nova/quantum_plugin.conf' else: _file = '/etc/nova/neutron_plugin.conf' + with open(_file, 'wb') as out: out.write(self.plugin + '\n') @@ -670,13 +707,11 @@ class NeutronContext(OSContextGenerator): self.network_manager) config = neutron_plugin_attribute(self.plugin, 'config', self.network_manager) - ovs_ctxt = { - 'core_plugin': driver, - 'neutron_plugin': 'ovs', - 'neutron_security_groups': self.neutron_security_groups, - 'local_ip': unit_private_ip(), - 'config': config - } + ovs_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'ovs', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} return ovs_ctxt @@ -685,13 +720,11 @@ class NeutronContext(OSContextGenerator): self.network_manager) config = neutron_plugin_attribute(self.plugin, 'config', self.network_manager) - nvp_ctxt = { - 'core_plugin': driver, - 'neutron_plugin': 'nvp', - 'neutron_security_groups': self.neutron_security_groups, - 'local_ip': unit_private_ip(), - 'config': config - } + nvp_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'nvp', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} return nvp_ctxt @@ -700,35 +733,50 @@ class NeutronContext(OSContextGenerator): self.network_manager) n1kv_config = neutron_plugin_attribute(self.plugin, 'config', self.network_manager) - n1kv_ctxt = { - 'core_plugin': driver, - 'neutron_plugin': 'n1kv', - 'neutron_security_groups': self.neutron_security_groups, - 'local_ip': unit_private_ip(), - 'config': n1kv_config, - 'vsm_ip': config('n1kv-vsm-ip'), - 'vsm_username': config('n1kv-vsm-username'), - 'vsm_password': config('n1kv-vsm-password'), - 'restrict_policy_profiles': config( - 'n1kv_restrict_policy_profiles'), - } + n1kv_user_config_flags = config('n1kv-config-flags') + restrict_policy_profiles = config('n1kv-restrict-policy-profiles') + n1kv_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'n1kv', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': n1kv_config, + 'vsm_ip': config('n1kv-vsm-ip'), + 'vsm_username': config('n1kv-vsm-username'), + 'vsm_password': config('n1kv-vsm-password'), + 'restrict_policy_profiles': restrict_policy_profiles} + + if n1kv_user_config_flags: + flags = config_flags_parser(n1kv_user_config_flags) + n1kv_ctxt['user_config_flags'] = flags return n1kv_ctxt + def calico_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + calico_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'Calico', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} + + return calico_ctxt + def neutron_ctxt(self): if https(): proto = 'https' else: proto = 'http' + if is_clustered(): host = config('vip') else: host = unit_get('private-address') - url = '%s://%s:%s' % (proto, host, '9696') - ctxt = { - 'network_manager': self.network_manager, - 'neutron_url': url, - } + + ctxt = {'network_manager': self.network_manager, + 'neutron_url': '%s://%s:%s' % (proto, host, '9696')} return ctxt def __call__(self): @@ -748,6 +796,8 @@ class NeutronContext(OSContextGenerator): ctxt.update(self.nvp_ctxt()) elif self.plugin == 'n1kv': ctxt.update(self.n1kv_ctxt()) + elif self.plugin == 'Calico': + ctxt.update(self.calico_ctxt()) alchemy_flags = config('neutron-alchemy-flags') if alchemy_flags: @@ -759,23 +809,40 @@ class NeutronContext(OSContextGenerator): class OSConfigFlagContext(OSContextGenerator): + """Provides support for user-defined config flags. - """ - Responsible for adding user-defined config-flags in charm config to a - template context. + Users can define a comma-seperated list of key=value pairs + in the charm configuration and apply them at any point in + any file by using a template flag. + + Sometimes users might want config flags inserted within a + specific section so this class allows users to specify the + template flag name, allowing for multiple template flags + (sections) within the same context. NOTE: the value of config-flags may be a comma-separated list of key=value pairs and some Openstack config files support comma-separated lists as values. """ + def __init__(self, charm_flag='config-flags', + template_flag='user_config_flags'): + """ + :param charm_flag: config flags in charm configuration. + :param template_flag: insert point for user-defined flags in template + file. + """ + super(OSConfigFlagContext, self).__init__() + self._charm_flag = charm_flag + self._template_flag = template_flag + def __call__(self): - config_flags = config('config-flags') + config_flags = config(self._charm_flag) if not config_flags: return {} - flags = config_flags_parser(config_flags) - return {'user_config_flags': flags} + return {self._template_flag: + config_flags_parser(config_flags)} class SubordinateConfigContext(OSContextGenerator): @@ -819,7 +886,6 @@ class SubordinateConfigContext(OSContextGenerator): }, } } - """ def __init__(self, service, config_file, interface): @@ -849,26 +915,28 @@ class SubordinateConfigContext(OSContextGenerator): if self.service not in sub_config: log('Found subordinate_config on %s but it contained' - 'nothing for %s service' % (rid, self.service)) + 'nothing for %s service' % (rid, self.service), + level=INFO) continue sub_config = sub_config[self.service] if self.config_file not in sub_config: log('Found subordinate_config on %s but it contained' - 'nothing for %s' % (rid, self.config_file)) + 'nothing for %s' % (rid, self.config_file), + level=INFO) continue sub_config = sub_config[self.config_file] - for k, v in sub_config.iteritems(): + for k, v in six.iteritems(sub_config): if k == 'sections': - for section, config_dict in v.iteritems(): - log("adding section '%s'" % (section)) + for section, config_dict in six.iteritems(v): + log("adding section '%s'" % (section), + level=DEBUG) ctxt[k][section] = config_dict else: ctxt[k] = v - log("%d section(s) found" % (len(ctxt['sections'])), level=INFO) - + log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG) return ctxt @@ -880,15 +948,14 @@ class LogLevelContext(OSContextGenerator): False if config('debug') is None else config('debug') ctxt['verbose'] = \ False if config('verbose') is None else config('verbose') + return ctxt class SyslogContext(OSContextGenerator): def __call__(self): - ctxt = { - 'use_syslog': config('use-syslog') - } + ctxt = {'use_syslog': config('use-syslog')} return ctxt @@ -896,13 +963,9 @@ class BindHostContext(OSContextGenerator): def __call__(self): if config('prefer-ipv6'): - return { - 'bind_host': '::' - } + return {'bind_host': '::'} else: - return { - 'bind_host': '0.0.0.0' - } + return {'bind_host': '0.0.0.0'} class WorkerConfigContext(OSContextGenerator): @@ -914,11 +977,42 @@ class WorkerConfigContext(OSContextGenerator): except ImportError: apt_install('python-psutil', fatal=True) from psutil import NUM_CPUS + return NUM_CPUS def __call__(self): - multiplier = config('worker-multiplier') or 1 - ctxt = { - "workers": self.num_cpus * multiplier - } + multiplier = config('worker-multiplier') or 0 + ctxt = {"workers": self.num_cpus * multiplier} + return ctxt + + +class ZeroMQContext(OSContextGenerator): + interfaces = ['zeromq-configuration'] + + def __call__(self): + ctxt = {} + if is_relation_made('zeromq-configuration', 'host'): + for rid in relation_ids('zeromq-configuration'): + for unit in related_units(rid): + ctxt['zmq_nonce'] = relation_get('nonce', unit, rid) + ctxt['zmq_host'] = relation_get('host', unit, rid) + + return ctxt + + +class NotificationDriverContext(OSContextGenerator): + + def __init__(self, zmq_relation='zeromq-configuration', + amqp_relation='amqp'): + """ + :param zmq_relation: Name of Zeromq relation to check + """ + self.zmq_relation = zmq_relation + self.amqp_relation = amqp_relation + + def __call__(self): + ctxt = {'notifications': 'False'} + if is_relation_made(self.amqp_relation): + ctxt['notifications'] = "True" + return ctxt diff --git a/hooks/charmhelpers/contrib/openstack/neutron.py b/hooks/charmhelpers/contrib/openstack/neutron.py index 84d97bca..8390d135 100644 --- a/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/hooks/charmhelpers/contrib/openstack/neutron.py @@ -138,10 +138,25 @@ def neutron_plugins(): relation_prefix='neutron', ssl_dir=NEUTRON_CONF_DIR)], 'services': [], - 'packages': [['neutron-plugin-cisco']], + 'packages': [[headers_package()] + determine_dkms_package(), + ['neutron-plugin-cisco']], 'server_packages': ['neutron-server', 'neutron-plugin-cisco'], 'server_services': ['neutron-server'] + }, + 'Calico': { + 'config': '/etc/neutron/plugins/ml2/ml2_conf.ini', + 'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron', + ssl_dir=NEUTRON_CONF_DIR)], + 'services': ['calico-compute', 'bird', 'neutron-dhcp-agent'], + 'packages': [[headers_package()] + determine_dkms_package(), + ['calico-compute', 'bird', 'neutron-dhcp-agent']], + 'server_packages': ['neutron-server', 'calico-control'], + 'server_services': ['neutron-server'] } } if release >= 'icehouse': @@ -162,7 +177,8 @@ def neutron_plugin_attribute(plugin, attr, net_manager=None): elif manager == 'neutron': plugins = neutron_plugins() else: - log('Error: Network manager does not support plugins.') + log("Network manager '%s' does not support plugins." % (manager), + level=ERROR) raise Exception try: diff --git a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg index 19c9b856..0229f9d4 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg +++ b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg @@ -35,7 +35,7 @@ listen stats {{ stat_port }} stats auth admin:password {% if frontends -%} -{% for service, ports in service_ports.iteritems() -%} +{% for service, ports in service_ports.items() -%} frontend tcp-in_{{ service }} bind *:{{ ports[0] }} bind :::{{ ports[0] }} @@ -46,7 +46,7 @@ frontend tcp-in_{{ service }} {% for frontend in frontends -%} backend {{ service }}_{{ frontend }} balance leastconn - {% for unit, address in frontends[frontend]['backends'].iteritems() -%} + {% for unit, address in frontends[frontend]['backends'].items() -%} server {{ unit }} {{ address }}:{{ ports[1] }} check {% endfor %} {% endfor -%} diff --git a/hooks/charmhelpers/contrib/openstack/templating.py b/hooks/charmhelpers/contrib/openstack/templating.py index f5442712..33df0675 100644 --- a/hooks/charmhelpers/contrib/openstack/templating.py +++ b/hooks/charmhelpers/contrib/openstack/templating.py @@ -1,13 +1,13 @@ import os -from charmhelpers.fetch import apt_install +import six +from charmhelpers.fetch import apt_install from charmhelpers.core.hookenv import ( log, ERROR, INFO ) - from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES try: @@ -43,7 +43,7 @@ def get_loader(templates_dir, os_release): order by OpenStack release. """ tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) - for rel in OPENSTACK_CODENAMES.itervalues()] + for rel in six.itervalues(OPENSTACK_CODENAMES)] if not os.path.isdir(templates_dir): log('Templates directory not found @ %s.' % templates_dir, @@ -258,7 +258,7 @@ class OSConfigRenderer(object): """ Write out all registered config files. """ - [self.write(k) for k in self.templates.iterkeys()] + [self.write(k) for k in six.iterkeys(self.templates)] def set_release(self, openstack_release): """ @@ -275,5 +275,5 @@ class OSConfigRenderer(object): ''' interfaces = [] [interfaces.extend(i.complete_contexts()) - for i in self.templates.itervalues()] + for i in six.itervalues(self.templates)] return interfaces diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index b0d1b03a..6447ce9a 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -2,6 +2,7 @@ # Common python helper functions used for OpenStack charms. from collections import OrderedDict +from functools import wraps import subprocess import json @@ -9,6 +10,8 @@ import os import socket import sys +import six + from charmhelpers.core.hookenv import ( config, log as juju_log, @@ -112,7 +115,7 @@ def get_os_codename_install_source(src): # Best guess match based on deb string provided if src.startswith('deb') or src.startswith('ppa'): - for k, v in OPENSTACK_CODENAMES.iteritems(): + for k, v in six.iteritems(OPENSTACK_CODENAMES): if v in src: return v @@ -133,7 +136,7 @@ def get_os_codename_version(vers): def get_os_version_codename(codename): '''Determine OpenStack version number from codename.''' - for k, v in OPENSTACK_CODENAMES.iteritems(): + for k, v in six.iteritems(OPENSTACK_CODENAMES): if v == codename: return k e = 'Could not derive OpenStack version for '\ @@ -193,7 +196,7 @@ def get_os_version_package(pkg, fatal=True): else: vers_map = OPENSTACK_CODENAMES - for version, cname in vers_map.iteritems(): + for version, cname in six.iteritems(vers_map): if cname == codename: return version # e = "Could not determine OpenStack version for package: %s" % pkg @@ -317,7 +320,7 @@ def save_script_rc(script_path="scripts/scriptrc", **env_vars): rc_script.write( "#!/bin/bash\n") [rc_script.write('export %s=%s\n' % (u, p)) - for u, p in env_vars.iteritems() if u != "script_path"] + for u, p in six.iteritems(env_vars) if u != "script_path"] def openstack_upgrade_available(package): @@ -417,7 +420,7 @@ def ns_query(address): if isinstance(address, dns.name.Name): rtype = 'PTR' - elif isinstance(address, basestring): + elif isinstance(address, six.string_types): rtype = 'A' else: return None @@ -468,6 +471,14 @@ def get_hostname(address, fqdn=True): return result.split('.')[0] +def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'): + mm_map = {} + if os.path.isfile(mm_file): + with open(mm_file, 'r') as f: + mm_map = json.load(f) + return mm_map + + def sync_db_with_multi_ipv6_addresses(database, database_user, relation_prefix=None): hosts = get_ipv6_addr(dynamic_only=False) @@ -477,10 +488,24 @@ def sync_db_with_multi_ipv6_addresses(database, database_user, 'hostname': json.dumps(hosts)} if relation_prefix: - keys = kwargs.keys() - for key in keys: + for key in list(kwargs.keys()): kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key] del kwargs[key] for rid in relation_ids('shared-db'): relation_set(relation_id=rid, **kwargs) + + +def os_requires_version(ostack_release, pkg): + """ + Decorator for hook to specify minimum supported release + """ + def wrap(f): + @wraps(f) + def wrapped_f(*args): + if os_release(pkg) < ostack_release: + raise Exception("This hook is not supported on releases" + " before %s" % ostack_release) + f(*args) + return wrapped_f + return wrap diff --git a/hooks/charmhelpers/contrib/peerstorage/__init__.py b/hooks/charmhelpers/contrib/peerstorage/__init__.py index 11e4fea8..773d72a2 100644 --- a/hooks/charmhelpers/contrib/peerstorage/__init__.py +++ b/hooks/charmhelpers/contrib/peerstorage/__init__.py @@ -1,3 +1,4 @@ +import six from charmhelpers.core.hookenv import relation_id as current_relation_id from charmhelpers.core.hookenv import ( is_relation_made, @@ -93,7 +94,7 @@ def peer_echo(includes=None): if ex in echo_data: echo_data.pop(ex) else: - for attribute, value in rdata.iteritems(): + for attribute, value in six.iteritems(rdata): for include in includes: if include in attribute: echo_data[attribute] = value @@ -119,8 +120,8 @@ def peer_store_and_set(relation_id=None, peer_relation_name='cluster', relation_settings=relation_settings, **kwargs) if is_relation_made(peer_relation_name): - for key, value in dict(kwargs.items() + - relation_settings.items()).iteritems(): + for key, value in six.iteritems(dict(list(kwargs.items()) + + list(relation_settings.items()))): key_prefix = relation_id or current_relation_id() peer_store(key_prefix + delimiter + key, value, diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py index 768438a4..5d907c02 100644 --- a/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -16,19 +16,18 @@ import time from subprocess import ( check_call, check_output, - CalledProcessError + CalledProcessError, ) - from charmhelpers.core.hookenv import ( relation_get, relation_ids, related_units, log, + DEBUG, INFO, WARNING, - ERROR + ERROR, ) - from charmhelpers.core.host import ( mount, mounts, @@ -37,7 +36,6 @@ from charmhelpers.core.host import ( service_running, umount, ) - from charmhelpers.fetch import ( apt_install, ) @@ -56,99 +54,82 @@ CEPH_CONF = """[global] def install(): - ''' Basic Ceph client installation ''' + """Basic Ceph client installation.""" ceph_dir = "/etc/ceph" if not os.path.exists(ceph_dir): os.mkdir(ceph_dir) + apt_install('ceph-common', fatal=True) def rbd_exists(service, pool, rbd_img): - ''' Check to see if a RADOS block device exists ''' + """Check to see if a RADOS block device exists.""" try: - out = check_output(['rbd', 'list', '--id', service, - '--pool', pool]) + out = check_output(['rbd', 'list', '--id', service, '--pool', pool]) except CalledProcessError: return False - else: - return rbd_img in out + + return rbd_img in out def create_rbd_image(service, pool, image, sizemb): - ''' Create a new RADOS block device ''' - cmd = [ - 'rbd', - 'create', - image, - '--size', - str(sizemb), - '--id', - service, - '--pool', - pool - ] + """Create a new RADOS block device.""" + cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service, + '--pool', pool] check_call(cmd) def pool_exists(service, name): - ''' Check to see if a RADOS pool already exists ''' + """Check to see if a RADOS pool already exists.""" try: out = check_output(['rados', '--id', service, 'lspools']) except CalledProcessError: return False - else: - return name in out + + return name in out def get_osds(service): - ''' - Return a list of all Ceph Object Storage Daemons - currently in the cluster - ''' + """Return a list of all Ceph Object Storage Daemons currently in the + cluster. + """ version = ceph_version() if version and version >= '0.56': return json.loads(check_output(['ceph', '--id', service, 'osd', 'ls', '--format=json'])) - else: - return None + + return None -def create_pool(service, name, replicas=2): - ''' Create a new RADOS pool ''' +def create_pool(service, name, replicas=3): + """Create a new RADOS pool.""" if pool_exists(service, name): log("Ceph pool {} already exists, skipping creation".format(name), level=WARNING) return + # Calculate the number of placement groups based # on upstream recommended best practices. osds = get_osds(service) if osds: - pgnum = (len(osds) * 100 / replicas) + pgnum = (len(osds) * 100 // replicas) else: # NOTE(james-page): Default to 200 for older ceph versions # which don't support OSD query from cli pgnum = 200 - cmd = [ - 'ceph', '--id', service, - 'osd', 'pool', 'create', - name, str(pgnum) - ] + + cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)] check_call(cmd) - cmd = [ - 'ceph', '--id', service, - 'osd', 'pool', 'set', name, - 'size', str(replicas) - ] + + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size', + str(replicas)] check_call(cmd) def delete_pool(service, name): - ''' Delete a RADOS pool from ceph ''' - cmd = [ - 'ceph', '--id', service, - 'osd', 'pool', 'delete', - name, '--yes-i-really-really-mean-it' - ] + """Delete a RADOS pool from ceph.""" + cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name, + '--yes-i-really-really-mean-it'] check_call(cmd) @@ -161,44 +142,43 @@ def _keyring_path(service): def create_keyring(service, key): - ''' Create a new Ceph keyring containing key''' + """Create a new Ceph keyring containing key.""" keyring = _keyring_path(service) if os.path.exists(keyring): - log('ceph: Keyring exists at %s.' % keyring, level=WARNING) + log('Ceph keyring exists at %s.' % keyring, level=WARNING) return - cmd = [ - 'ceph-authtool', - keyring, - '--create-keyring', - '--name=client.{}'.format(service), - '--add-key={}'.format(key) - ] + + cmd = ['ceph-authtool', keyring, '--create-keyring', + '--name=client.{}'.format(service), '--add-key={}'.format(key)] check_call(cmd) - log('ceph: Created new ring at %s.' % keyring, level=INFO) + log('Created new ceph keyring at %s.' % keyring, level=DEBUG) def create_key_file(service, key): - ''' Create a file containing key ''' + """Create a file containing key.""" keyfile = _keyfile_path(service) if os.path.exists(keyfile): - log('ceph: Keyfile exists at %s.' % keyfile, level=WARNING) + log('Keyfile exists at %s.' % keyfile, level=WARNING) return + with open(keyfile, 'w') as fd: fd.write(key) - log('ceph: Created new keyfile at %s.' % keyfile, level=INFO) + + log('Created new keyfile at %s.' % keyfile, level=INFO) def get_ceph_nodes(): - ''' Query named relation 'ceph' to detemine current nodes ''' + """Query named relation 'ceph' to determine current nodes.""" hosts = [] for r_id in relation_ids('ceph'): for unit in related_units(r_id): hosts.append(relation_get('private-address', unit=unit, rid=r_id)) + return hosts def configure(service, key, auth, use_syslog): - ''' Perform basic configuration of Ceph ''' + """Perform basic configuration of Ceph.""" create_keyring(service, key) create_key_file(service, key) hosts = get_ceph_nodes() @@ -211,17 +191,17 @@ def configure(service, key, auth, use_syslog): def image_mapped(name): - ''' Determine whether a RADOS block device is mapped locally ''' + """Determine whether a RADOS block device is mapped locally.""" try: out = check_output(['rbd', 'showmapped']) except CalledProcessError: return False - else: - return name in out + + return name in out def map_block_storage(service, pool, image): - ''' Map a RADOS block device for local use ''' + """Map a RADOS block device for local use.""" cmd = [ 'rbd', 'map', @@ -235,31 +215,32 @@ def map_block_storage(service, pool, image): def filesystem_mounted(fs): - ''' Determine whether a filesytems is already mounted ''' + """Determine whether a filesytems is already mounted.""" return fs in [f for f, m in mounts()] def make_filesystem(blk_device, fstype='ext4', timeout=10): - ''' Make a new filesystem on the specified block device ''' + """Make a new filesystem on the specified block device.""" count = 0 e_noent = os.errno.ENOENT while not os.path.exists(blk_device): if count >= timeout: - log('ceph: gave up waiting on block device %s' % blk_device, + log('Gave up waiting on block device %s' % blk_device, level=ERROR) raise IOError(e_noent, os.strerror(e_noent), blk_device) - log('ceph: waiting for block device %s to appear' % blk_device, - level=INFO) + + log('Waiting for block device %s to appear' % blk_device, + level=DEBUG) count += 1 time.sleep(1) else: - log('ceph: Formatting block device %s as filesystem %s.' % + log('Formatting block device %s as filesystem %s.' % (blk_device, fstype), level=INFO) check_call(['mkfs', '-t', fstype, blk_device]) def place_data_on_block_device(blk_device, data_src_dst): - ''' Migrate data in data_src_dst to blk_device and then remount ''' + """Migrate data in data_src_dst to blk_device and then remount.""" # mount block device into /mnt mount(blk_device, '/mnt') # copy data to /mnt @@ -279,8 +260,8 @@ def place_data_on_block_device(blk_device, data_src_dst): # TODO: re-use def modprobe(module): - ''' Load a kernel module and configure for auto-load on reboot ''' - log('ceph: Loading kernel module', level=INFO) + """Load a kernel module and configure for auto-load on reboot.""" + log('Loading kernel module', level=INFO) cmd = ['modprobe', module] check_call(cmd) with open('/etc/modules', 'r+') as modules: @@ -289,7 +270,7 @@ def modprobe(module): def copy_files(src, dst, symlinks=False, ignore=None): - ''' Copy files from src to dst ''' + """Copy files from src to dst.""" for item in os.listdir(src): s = os.path.join(src, item) d = os.path.join(dst, item) @@ -300,9 +281,9 @@ def copy_files(src, dst, symlinks=False, ignore=None): def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, - blk_device, fstype, system_services=[]): - """ - NOTE: This function must only be called from a single service unit for + blk_device, fstype, system_services=[], + replicas=3): + """NOTE: This function must only be called from a single service unit for the same rbd_img otherwise data loss will occur. Ensures given pool and RBD image exists, is mapped to a block device, @@ -316,15 +297,16 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, """ # Ensure pool, RBD image, RBD mappings are in place. if not pool_exists(service, pool): - log('ceph: Creating new pool {}.'.format(pool)) - create_pool(service, pool) + log('Creating new pool {}.'.format(pool), level=INFO) + create_pool(service, pool, replicas=replicas) if not rbd_exists(service, pool, rbd_img): - log('ceph: Creating RBD image ({}).'.format(rbd_img)) + log('Creating RBD image ({}).'.format(rbd_img), level=INFO) create_rbd_image(service, pool, rbd_img, sizemb) if not image_mapped(rbd_img): - log('ceph: Mapping RBD Image {} as a Block Device.'.format(rbd_img)) + log('Mapping RBD Image {} as a Block Device.'.format(rbd_img), + level=INFO) map_block_storage(service, pool, rbd_img) # make file system @@ -339,42 +321,44 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, for svc in system_services: if service_running(svc): - log('ceph: Stopping services {} prior to migrating data.' - .format(svc)) + log('Stopping services {} prior to migrating data.' + .format(svc), level=DEBUG) service_stop(svc) place_data_on_block_device(blk_device, mount_point) for svc in system_services: - log('ceph: Starting service {} after migrating data.' - .format(svc)) + log('Starting service {} after migrating data.' + .format(svc), level=DEBUG) service_start(svc) def ensure_ceph_keyring(service, user=None, group=None): - ''' - Ensures a ceph keyring is created for a named service - and optionally ensures user and group ownership. + """Ensures a ceph keyring is created for a named service and optionally + ensures user and group ownership. Returns False if no ceph key is available in relation state. - ''' + """ key = None for rid in relation_ids('ceph'): for unit in related_units(rid): key = relation_get('key', rid=rid, unit=unit) if key: break + if not key: return False + create_keyring(service=service, key=key) keyring = _keyring_path(service) if user and group: check_call(['chown', '%s.%s' % (user, group), keyring]) + return True def ceph_version(): - ''' Retrieve the local version of ceph ''' + """Retrieve the local version of ceph.""" if os.path.exists('/usr/bin/ceph'): cmd = ['ceph', '-v'] output = check_output(cmd) diff --git a/hooks/charmhelpers/contrib/storage/linux/loopback.py b/hooks/charmhelpers/contrib/storage/linux/loopback.py index 38957ef0..a22c3d7b 100644 --- a/hooks/charmhelpers/contrib/storage/linux/loopback.py +++ b/hooks/charmhelpers/contrib/storage/linux/loopback.py @@ -1,12 +1,12 @@ - import os import re - from subprocess import ( check_call, check_output, ) +import six + ################################################## # loopback device helpers. @@ -37,7 +37,7 @@ def create_loopback(file_path): ''' file_path = os.path.abspath(file_path) check_call(['losetup', '--find', file_path]) - for d, f in loopback_devices().iteritems(): + for d, f in six.iteritems(loopback_devices()): if f == file_path: return d @@ -51,7 +51,7 @@ def ensure_loopback_device(path, size): :returns: str: Full path to the ensured loopback device (eg, /dev/loop0) ''' - for d, f in loopback_devices().iteritems(): + for d, f in six.iteritems(loopback_devices()): if f == path: return d diff --git a/hooks/charmhelpers/core/fstab.py b/hooks/charmhelpers/core/fstab.py index cfaf0a65..0adf0db3 100644 --- a/hooks/charmhelpers/core/fstab.py +++ b/hooks/charmhelpers/core/fstab.py @@ -3,10 +3,11 @@ __author__ = 'Jorge Niedbalski R. ' +import io import os -class Fstab(file): +class Fstab(io.FileIO): """This class extends file in order to implement a file reader/writer for file `/etc/fstab` """ @@ -24,8 +25,8 @@ class Fstab(file): options = "defaults" self.options = options - self.d = d - self.p = p + self.d = int(d) + self.p = int(p) def __eq__(self, o): return str(self) == str(o) @@ -45,7 +46,7 @@ class Fstab(file): self._path = path else: self._path = self.DEFAULT_PATH - file.__init__(self, self._path, 'r+') + super(Fstab, self).__init__(self._path, 'rb+') def _hydrate_entry(self, line): # NOTE: use split with no arguments to split on any @@ -58,8 +59,9 @@ class Fstab(file): def entries(self): self.seek(0) for line in self.readlines(): + line = line.decode('us-ascii') try: - if not line.startswith("#"): + if line.strip() and not line.startswith("#"): yield self._hydrate_entry(line) except ValueError: pass @@ -75,14 +77,14 @@ class Fstab(file): if self.get_entry_by_attr('device', entry.device): return False - self.write(str(entry) + '\n') + self.write((str(entry) + '\n').encode('us-ascii')) self.truncate() return entry def remove_entry(self, entry): self.seek(0) - lines = self.readlines() + lines = [l.decode('us-ascii') for l in self.readlines()] found = False for index, line in enumerate(lines): @@ -97,7 +99,7 @@ class Fstab(file): lines.remove(line) self.seek(0) - self.write(''.join(lines)) + self.write(''.join(lines).encode('us-ascii')) self.truncate() return True diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py index af8fe2db..90623667 100644 --- a/hooks/charmhelpers/core/hookenv.py +++ b/hooks/charmhelpers/core/hookenv.py @@ -9,9 +9,11 @@ import json import yaml import subprocess import sys -import UserDict from subprocess import CalledProcessError +import six +from six.moves import UserDict + CRITICAL = "CRITICAL" ERROR = "ERROR" WARNING = "WARNING" @@ -67,12 +69,12 @@ def log(message, level=None): subprocess.call(command) -class Serializable(UserDict.IterableUserDict): +class Serializable(UserDict): """Wrapper, an object that can be serialized to yaml or json""" def __init__(self, obj): # wrap the object - UserDict.IterableUserDict.__init__(self) + UserDict.__init__(self) self.data = obj def __getattr__(self, attr): @@ -214,6 +216,12 @@ class Config(dict): except KeyError: return (self._prev_dict or {})[key] + def keys(self): + prev_keys = [] + if self._prev_dict is not None: + prev_keys = self._prev_dict.keys() + return list(set(prev_keys + list(dict.keys(self)))) + def load_previous(self, path=None): """Load previous copy of config from disk. @@ -263,7 +271,7 @@ class Config(dict): """ if self._prev_dict: - for k, v in self._prev_dict.iteritems(): + for k, v in six.iteritems(self._prev_dict): if k not in self: self[k] = v with open(self.path, 'w') as f: @@ -300,7 +308,7 @@ def relation_get(attribute=None, unit=None, rid=None): return json.loads(subprocess.check_output(_args)) except ValueError: return None - except CalledProcessError, e: + except CalledProcessError as e: if e.returncode == 2: return None raise @@ -312,7 +320,7 @@ def relation_set(relation_id=None, relation_settings=None, **kwargs): relation_cmd_line = ['relation-set'] if relation_id is not None: relation_cmd_line.extend(('-r', relation_id)) - for k, v in (relation_settings.items() + kwargs.items()): + for k, v in (list(relation_settings.items()) + list(kwargs.items())): if v is None: relation_cmd_line.append('{}='.format(k)) else: diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py index d7ce1e4c..a3cb996b 100644 --- a/hooks/charmhelpers/core/host.py +++ b/hooks/charmhelpers/core/host.py @@ -6,19 +6,20 @@ # Matthew Wedgwood import os +import re import pwd import grp import random import string import subprocess import hashlib -import shutil from contextlib import contextmanager - from collections import OrderedDict -from hookenv import log -from fstab import Fstab +import six + +from .hookenv import log +from .fstab import Fstab def service_start(service_name): @@ -130,7 +131,7 @@ def symlink(source, destination): subprocess.check_call(cmd) -def mkdir(path, owner='root', group='root', perms=0555, force=False): +def mkdir(path, owner='root', group='root', perms=0o555, force=False): """Create a directory""" log("Making dir {} {}:{} {:o}".format(path, owner, group, perms)) @@ -146,7 +147,7 @@ def mkdir(path, owner='root', group='root', perms=0555, force=False): os.chown(realpath, uid, gid) -def write_file(path, content, owner='root', group='root', perms=0444): +def write_file(path, content, owner='root', group='root', perms=0o444): """Create or overwrite a file with the contents of a string""" log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) uid = pwd.getpwnam(owner).pw_uid @@ -177,7 +178,7 @@ def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"): cmd_args.extend([device, mountpoint]) try: subprocess.check_output(cmd_args) - except subprocess.CalledProcessError, e: + except subprocess.CalledProcessError as e: log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) return False @@ -191,7 +192,7 @@ def umount(mountpoint, persist=False): cmd_args = ['umount', mountpoint] try: subprocess.check_output(cmd_args) - except subprocess.CalledProcessError, e: + except subprocess.CalledProcessError as e: log('Error unmounting {}\n{}'.format(mountpoint, e.output)) return False @@ -218,8 +219,8 @@ def file_hash(path, hash_type='md5'): """ if os.path.exists(path): h = getattr(hashlib, hash_type)() - with open(path, 'r') as source: - h.update(source.read()) # IGNORE:E1101 - it does have update + with open(path, 'rb') as source: + h.update(source.read()) return h.hexdigest() else: return None @@ -297,7 +298,7 @@ def pwgen(length=None): if length is None: length = random.choice(range(35, 45)) alphanumeric_chars = [ - l for l in (string.letters + string.digits) + l for l in (string.ascii_letters + string.digits) if l not in 'l0QD1vAEIOUaeiou'] random_chars = [ random.choice(alphanumeric_chars) for _ in range(length)] @@ -306,7 +307,7 @@ def pwgen(length=None): def list_nics(nic_type): '''Return a list of nics of given type(s)''' - if isinstance(nic_type, basestring): + if isinstance(nic_type, six.string_types): int_types = [nic_type] else: int_types = nic_type @@ -317,7 +318,13 @@ def list_nics(nic_type): ip_output = (line for line in ip_output if line) for line in ip_output: if line.split()[1].startswith(int_type): - interfaces.append(line.split()[1].replace(":", "")) + matched = re.search('.*: (bond[0-9]+\.[0-9]+)@.*', line) + if matched: + interface = matched.groups()[0] + else: + interface = line.split()[1].replace(":", "") + interfaces.append(interface) + return interfaces diff --git a/hooks/charmhelpers/core/services/__init__.py b/hooks/charmhelpers/core/services/__init__.py index e8039a84..69dde79a 100644 --- a/hooks/charmhelpers/core/services/__init__.py +++ b/hooks/charmhelpers/core/services/__init__.py @@ -1,2 +1,2 @@ -from .base import * -from .helpers import * +from .base import * # NOQA +from .helpers import * # NOQA diff --git a/hooks/charmhelpers/core/services/helpers.py b/hooks/charmhelpers/core/services/helpers.py index 7067b94b..163a7932 100644 --- a/hooks/charmhelpers/core/services/helpers.py +++ b/hooks/charmhelpers/core/services/helpers.py @@ -196,7 +196,7 @@ class StoredContext(dict): if not os.path.isabs(file_name): file_name = os.path.join(hookenv.charm_dir(), file_name) with open(file_name, 'w') as file_stream: - os.fchmod(file_stream.fileno(), 0600) + os.fchmod(file_stream.fileno(), 0o600) yaml.dump(config_data, file_stream) def read_context(self, file_name): @@ -211,15 +211,19 @@ class StoredContext(dict): class TemplateCallback(ManagerCallback): """ - Callback class that will render a Jinja2 template, for use as a ready action. + Callback class that will render a Jinja2 template, for use as a ready + action. + + :param str source: The template source file, relative to + `$CHARM_DIR/templates` - :param str source: The template source file, relative to `$CHARM_DIR/templates` :param str target: The target to write the rendered template to :param str owner: The owner of the rendered file :param str group: The group of the rendered file :param int perms: The permissions of the rendered file """ - def __init__(self, source, target, owner='root', group='root', perms=0444): + def __init__(self, source, target, + owner='root', group='root', perms=0o444): self.source = source self.target = target self.owner = owner diff --git a/hooks/charmhelpers/core/templating.py b/hooks/charmhelpers/core/templating.py index 2c638853..83133fa4 100644 --- a/hooks/charmhelpers/core/templating.py +++ b/hooks/charmhelpers/core/templating.py @@ -4,7 +4,8 @@ from charmhelpers.core import host from charmhelpers.core import hookenv -def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None): +def render(source, target, context, owner='root', group='root', + perms=0o444, templates_dir=None): """ Render a template. diff --git a/hooks/charmhelpers/fetch/__init__.py b/hooks/charmhelpers/fetch/__init__.py index 32a673d6..4a27d2cc 100644 --- a/hooks/charmhelpers/fetch/__init__.py +++ b/hooks/charmhelpers/fetch/__init__.py @@ -5,10 +5,6 @@ from yaml import safe_load from charmhelpers.core.host import ( lsb_release ) -from urlparse import ( - urlparse, - urlunparse, -) import subprocess from charmhelpers.core.hookenv import ( config, @@ -16,6 +12,9 @@ from charmhelpers.core.hookenv import ( ) import os +import six +from six.moves.urllib.parse import urlparse, urlunparse + CLOUD_ARCHIVE = """# Ubuntu Cloud Archive deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main @@ -69,10 +68,16 @@ CLOUD_ARCHIVE_POCKETS = { # The order of this list is very important. Handlers should be listed in from # least- to most-specific URL matching. -FETCH_HANDLERS = ( - 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', - 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', -) +if six.PY2: + FETCH_HANDLERS = ( + 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', + 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', + 'charmhelpers.fetch.giturl.GitUrlFetchHandler', + ) +else: + FETCH_HANDLERS = ( + 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', + ) APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks. @@ -148,7 +153,7 @@ def apt_install(packages, options=None, fatal=False): cmd = ['apt-get', '--assume-yes'] cmd.extend(options) cmd.append('install') - if isinstance(packages, basestring): + if isinstance(packages, six.string_types): cmd.append(packages) else: cmd.extend(packages) @@ -181,7 +186,7 @@ def apt_update(fatal=False): def apt_purge(packages, fatal=False): """Purge one or more packages""" cmd = ['apt-get', '--assume-yes', 'purge'] - if isinstance(packages, basestring): + if isinstance(packages, six.string_types): cmd.append(packages) else: cmd.extend(packages) @@ -192,7 +197,7 @@ def apt_purge(packages, fatal=False): def apt_hold(packages, fatal=False): """Hold one or more packages""" cmd = ['apt-mark', 'hold'] - if isinstance(packages, basestring): + if isinstance(packages, six.string_types): cmd.append(packages) else: cmd.extend(packages) @@ -218,6 +223,7 @@ def add_source(source, key=None): pocket for the release. 'cloud:' may be used to activate official cloud archive pockets, such as 'cloud:icehouse' + 'distro' may be used as a noop @param key: A key to be added to the system's APT keyring and used to verify the signatures on packages. Ideally, this should be an @@ -251,12 +257,14 @@ def add_source(source, key=None): release = lsb_release()['DISTRIB_CODENAME'] with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: apt.write(PROPOSED_POCKET.format(release)) + elif source == 'distro': + pass else: - raise SourceConfigError("Unknown source: {!r}".format(source)) + log("Unknown source: {!r}".format(source)) if key: if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: - with NamedTemporaryFile() as key_file: + with NamedTemporaryFile('w+') as key_file: key_file.write(key) key_file.flush() key_file.seek(0) @@ -293,14 +301,14 @@ def configure_sources(update=False, sources = safe_load((config(sources_var) or '').strip()) or [] keys = safe_load((config(keys_var) or '').strip()) or None - if isinstance(sources, basestring): + if isinstance(sources, six.string_types): sources = [sources] if keys is None: for source in sources: add_source(source, None) else: - if isinstance(keys, basestring): + if isinstance(keys, six.string_types): keys = [keys] if len(sources) != len(keys): @@ -397,7 +405,7 @@ def _run_apt_command(cmd, fatal=False): while result is None or result == APT_NO_LOCK: try: result = subprocess.check_call(cmd, env=env) - except subprocess.CalledProcessError, e: + except subprocess.CalledProcessError as e: retry_count = retry_count + 1 if retry_count > APT_NO_LOCK_RETRY_COUNT: raise diff --git a/hooks/charmhelpers/fetch/archiveurl.py b/hooks/charmhelpers/fetch/archiveurl.py index 8c045650..613ea90f 100644 --- a/hooks/charmhelpers/fetch/archiveurl.py +++ b/hooks/charmhelpers/fetch/archiveurl.py @@ -1,8 +1,14 @@ import os -import urllib2 -from urllib import urlretrieve -import urlparse import hashlib +import re + +import six +from six.moves.urllib.request import ( + build_opener, install_opener, urlopen, urlretrieve, + HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, +) +from six.moves.urllib.parse import urlparse, urlunparse, parse_qs +from six.moves.urllib.error import URLError from charmhelpers.fetch import ( BaseFetchHandler, @@ -15,6 +21,24 @@ from charmhelpers.payload.archive import ( from charmhelpers.core.host import mkdir, check_hash +def splituser(host): + '''urllib.splituser(), but six's support of this seems broken''' + _userprog = re.compile('^(.*)@(.*)$') + match = _userprog.match(host) + if match: + return match.group(1, 2) + return None, host + + +def splitpasswd(user): + '''urllib.splitpasswd(), but six's support of this is missing''' + _passwdprog = re.compile('^([^:]*):(.*)$', re.S) + match = _passwdprog.match(user) + if match: + return match.group(1, 2) + return user, None + + class ArchiveUrlFetchHandler(BaseFetchHandler): """ Handler to download archive files from arbitrary URLs. @@ -42,20 +66,20 @@ class ArchiveUrlFetchHandler(BaseFetchHandler): """ # propogate all exceptions # URLError, OSError, etc - proto, netloc, path, params, query, fragment = urlparse.urlparse(source) + proto, netloc, path, params, query, fragment = urlparse(source) if proto in ('http', 'https'): - auth, barehost = urllib2.splituser(netloc) + auth, barehost = splituser(netloc) if auth is not None: - source = urlparse.urlunparse((proto, barehost, path, params, query, fragment)) - username, password = urllib2.splitpasswd(auth) - passman = urllib2.HTTPPasswordMgrWithDefaultRealm() + source = urlunparse((proto, barehost, path, params, query, fragment)) + username, password = splitpasswd(auth) + passman = HTTPPasswordMgrWithDefaultRealm() # Realm is set to None in add_password to force the username and password # to be used whatever the realm passman.add_password(None, source, username, password) - authhandler = urllib2.HTTPBasicAuthHandler(passman) - opener = urllib2.build_opener(authhandler) - urllib2.install_opener(opener) - response = urllib2.urlopen(source) + authhandler = HTTPBasicAuthHandler(passman) + opener = build_opener(authhandler) + install_opener(opener) + response = urlopen(source) try: with open(dest, 'w') as dest_file: dest_file.write(response.read()) @@ -91,17 +115,21 @@ class ArchiveUrlFetchHandler(BaseFetchHandler): url_parts = self.parse_url(source) dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') if not os.path.exists(dest_dir): - mkdir(dest_dir, perms=0755) + mkdir(dest_dir, perms=0o755) dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path)) try: self.download(source, dld_file) - except urllib2.URLError as e: + except URLError as e: raise UnhandledSource(e.reason) except OSError as e: raise UnhandledSource(e.strerror) - options = urlparse.parse_qs(url_parts.fragment) + options = parse_qs(url_parts.fragment) for key, value in options.items(): - if key in hashlib.algorithms: + if six.PY2: + algorithms = hashlib.algorithms + else: + algorithms = hashlib.algorithms_available + if key in algorithms: check_hash(dld_file, value, key) if checksum: check_hash(dld_file, checksum, hash_type) diff --git a/hooks/charmhelpers/fetch/bzrurl.py b/hooks/charmhelpers/fetch/bzrurl.py index 0e580e47..8ef48f30 100644 --- a/hooks/charmhelpers/fetch/bzrurl.py +++ b/hooks/charmhelpers/fetch/bzrurl.py @@ -5,6 +5,10 @@ from charmhelpers.fetch import ( ) from charmhelpers.core.host import mkdir +import six +if six.PY3: + raise ImportError('bzrlib does not support Python3') + try: from bzrlib.branch import Branch except ImportError: @@ -42,7 +46,7 @@ class BzrUrlFetchHandler(BaseFetchHandler): dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name) if not os.path.exists(dest_dir): - mkdir(dest_dir, perms=0755) + mkdir(dest_dir, perms=0o755) try: self.branch(source, dest_dir) except OSError as e: diff --git a/hooks/charmhelpers/fetch/giturl.py b/hooks/charmhelpers/fetch/giturl.py new file mode 100644 index 00000000..61684cb6 --- /dev/null +++ b/hooks/charmhelpers/fetch/giturl.py @@ -0,0 +1,48 @@ +import os +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource +) +from charmhelpers.core.host import mkdir + +import six +if six.PY3: + raise ImportError('GitPython does not support Python 3') + +try: + from git import Repo +except ImportError: + from charmhelpers.fetch import apt_install + apt_install("python-git") + from git import Repo + + +class GitUrlFetchHandler(BaseFetchHandler): + """Handler for git branches via generic and github URLs""" + def can_handle(self, source): + url_parts = self.parse_url(source) + # TODO (mattyw) no support for ssh git@ yet + if url_parts.scheme not in ('http', 'https', 'git'): + return False + else: + return True + + def clone(self, source, dest, branch): + if not self.can_handle(source): + raise UnhandledSource("Cannot handle {}".format(source)) + + repo = Repo.clone_from(source, dest) + repo.git.checkout(branch) + + def install(self, source, branch="master"): + url_parts = self.parse_url(source) + branch_name = url_parts.path.strip("/").split("/")[-1] + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", + branch_name) + if not os.path.exists(dest_dir): + mkdir(dest_dir, perms=0o755) + try: + self.clone(source, dest_dir, branch) + except OSError as e: + raise UnhandledSource(e.strerror) + return dest_dir diff --git a/hooks/nova_cc_context.py b/hooks/nova_cc_context.py index bb3db4fb..a58ebfcc 100644 --- a/hooks/nova_cc_context.py +++ b/hooks/nova_cc_context.py @@ -118,18 +118,28 @@ class HAProxyContext(context.HAProxyContext): # determine which port api processes should bind to, depending # on existence of haproxy + apache frontends - compute_api = determine_api_port(api_port('nova-api-os-compute')) - ec2_api = determine_api_port(api_port('nova-api-ec2')) - s3_api = determine_api_port(api_port('nova-objectstore')) - nvol_api = determine_api_port(api_port('nova-api-os-volume')) - neutron_api = determine_api_port(api_port('neutron-server')) + compute_api = determine_api_port(api_port('nova-api-os-compute'), + singlenode_mode=True) + ec2_api = determine_api_port(api_port('nova-api-ec2'), + singlenode_mode=True) + s3_api = determine_api_port(api_port('nova-objectstore'), + singlenode_mode=True) + nvol_api = determine_api_port(api_port('nova-api-os-volume'), + singlenode_mode=True) + neutron_api = determine_api_port(api_port('neutron-server'), + singlenode_mode=True) # Apache ports - a_compute_api = determine_apache_port(api_port('nova-api-os-compute')) - a_ec2_api = determine_apache_port(api_port('nova-api-ec2')) - a_s3_api = determine_apache_port(api_port('nova-objectstore')) - a_nvol_api = determine_apache_port(api_port('nova-api-os-volume')) - a_neutron_api = determine_apache_port(api_port('neutron-server')) + a_compute_api = determine_apache_port(api_port('nova-api-os-compute'), + singlenode_mode=True) + a_ec2_api = determine_apache_port(api_port('nova-api-ec2'), + singlenode_mode=True) + a_s3_api = determine_apache_port(api_port('nova-objectstore'), + singlenode_mode=True) + a_nvol_api = determine_apache_port(api_port('nova-api-os-volume'), + singlenode_mode=True) + a_neutron_api = determine_apache_port(api_port('neutron-server'), + singlenode_mode=True) # to be set in nova.conf accordingly. listen_ports = { diff --git a/hooks/nova_cc_utils.py b/hooks/nova_cc_utils.py index 554dd74a..e8d7c897 100644 --- a/hooks/nova_cc_utils.py +++ b/hooks/nova_cc_utils.py @@ -172,7 +172,7 @@ BASE_RESOURCE_MAP = OrderedDict([ 'contexts': [nova_cc_context.NeutronCCContext()], }), (HAPROXY_CONF, { - 'contexts': [context.HAProxyContext(), + 'contexts': [context.HAProxyContext(singlenode_mode=True), nova_cc_context.HAProxyContext()], 'services': ['haproxy'], }), diff --git a/tests/charmhelpers/contrib/amulet/deployment.py b/tests/charmhelpers/contrib/amulet/deployment.py index d859d367..3d3ef339 100644 --- a/tests/charmhelpers/contrib/amulet/deployment.py +++ b/tests/charmhelpers/contrib/amulet/deployment.py @@ -1,6 +1,6 @@ import amulet - import os +import six class AmuletDeployment(object): @@ -52,12 +52,12 @@ class AmuletDeployment(object): def _add_relations(self, relations): """Add all of the relations for the services.""" - for k, v in relations.iteritems(): + for k, v in six.iteritems(relations): self.d.relate(k, v) def _configure_services(self, configs): """Configure all of the services.""" - for service, config in configs.iteritems(): + for service, config in six.iteritems(configs): self.d.configure(service, config) def _deploy(self): diff --git a/tests/charmhelpers/contrib/amulet/utils.py b/tests/charmhelpers/contrib/amulet/utils.py index c843333f..d333e63b 100644 --- a/tests/charmhelpers/contrib/amulet/utils.py +++ b/tests/charmhelpers/contrib/amulet/utils.py @@ -5,6 +5,8 @@ import re import sys import time +import six + class AmuletUtils(object): """Amulet utilities. @@ -58,7 +60,7 @@ class AmuletUtils(object): Verify the specified services are running on the corresponding service units. """ - for k, v in commands.iteritems(): + for k, v in six.iteritems(commands): for cmd in v: output, code = k.run(cmd) if code != 0: @@ -100,11 +102,11 @@ class AmuletUtils(object): longs, or can be a function that evaluate a variable and returns a bool. """ - for k, v in expected.iteritems(): + for k, v in six.iteritems(expected): if k in actual: - if (isinstance(v, basestring) or + if (isinstance(v, six.string_types) or isinstance(v, bool) or - isinstance(v, (int, long))): + isinstance(v, six.integer_types)): if v != actual[k]: return "{}:{}".format(k, actual[k]) elif not v(actual[k]): diff --git a/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 3c7f422a..f3fee074 100644 --- a/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -1,3 +1,4 @@ +import six from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment ) @@ -69,7 +70,7 @@ class OpenStackAmuletDeployment(AmuletDeployment): def _configure_services(self, configs): """Configure all of the services.""" - for service, config in configs.iteritems(): + for service, config in six.iteritems(configs): self.d.configure(service, config) def _get_openstack_release(self): diff --git a/tests/charmhelpers/contrib/openstack/amulet/utils.py b/tests/charmhelpers/contrib/openstack/amulet/utils.py index 0f312b99..3e0cc61c 100644 --- a/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -7,6 +7,8 @@ import glanceclient.v1.client as glance_client import keystoneclient.v2_0 as keystone_client import novaclient.v1_1.client as nova_client +import six + from charmhelpers.contrib.amulet.utils import ( AmuletUtils ) @@ -60,7 +62,7 @@ class OpenStackAmuletUtils(AmuletUtils): expected service catalog endpoints. """ self.log.debug('actual: {}'.format(repr(actual))) - for k, v in expected.iteritems(): + for k, v in six.iteritems(expected): if k in actual: ret = self._validate_dict_data(expected[k][0], actual[k][0]) if ret: From 588f8bb6e43ee94f270ab20c6534490f273a8be3 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 26 Nov 2014 08:52:09 +0000 Subject: [PATCH 20/35] Sync charmhelpers --- .../charmhelpers/contrib/hahelpers/cluster.py | 2 +- hooks/charmhelpers/contrib/network/ip.py | 2 +- .../charmhelpers/contrib/openstack/context.py | 3 +-- .../charmhelpers/contrib/openstack/neutron.py | 4 ++-- .../contrib/storage/linux/ceph.py | 13 +++++++---- .../charmhelpers/contrib/storage/linux/lvm.py | 1 + .../contrib/storage/linux/utils.py | 5 ++-- hooks/charmhelpers/core/hookenv.py | 18 ++++++++++----- hooks/charmhelpers/core/host.py | 16 ++++++++----- hooks/charmhelpers/fetch/__init__.py | 20 ++++++++-------- hooks/charmhelpers/fetch/archiveurl.py | 23 +++++++++++++------ 11 files changed, 64 insertions(+), 43 deletions(-) diff --git a/hooks/charmhelpers/contrib/hahelpers/cluster.py b/hooks/charmhelpers/contrib/hahelpers/cluster.py index 3e51986d..52ce4b7c 100644 --- a/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -78,7 +78,7 @@ def is_crm_leader(resource): "show", resource ] try: - status = subprocess.check_output(cmd) + status = subprocess.check_output(cmd).decode('UTF-8') except subprocess.CalledProcessError: return False else: diff --git a/hooks/charmhelpers/contrib/network/ip.py b/hooks/charmhelpers/contrib/network/ip.py index b9a9815c..8dc83165 100644 --- a/hooks/charmhelpers/contrib/network/ip.py +++ b/hooks/charmhelpers/contrib/network/ip.py @@ -302,7 +302,7 @@ def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None, if global_addrs: # Make sure any found global addresses are not temporary cmd = ['ip', 'addr', 'show', iface] - out = subprocess.check_output(cmd) + out = subprocess.check_output(cmd).decode('UTF-8') if dynamic_only: key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*") else: diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 355e6e05..eebe8c03 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -5,7 +5,6 @@ from base64 import b64decode from subprocess import check_call import six -from six.moves import xrange from charmhelpers.fetch import ( apt_install, @@ -99,7 +98,7 @@ def config_flags_parser(config_flags): split = config_flags.strip(' =').split('=') limit = len(split) flags = {} - for i in xrange(0, limit - 1): + for i in range(0, limit - 1): current = split[i] next = split[i + 1] vindex = next.rfind(',') diff --git a/hooks/charmhelpers/contrib/openstack/neutron.py b/hooks/charmhelpers/contrib/openstack/neutron.py index 8390d135..1446f637 100644 --- a/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/hooks/charmhelpers/contrib/openstack/neutron.py @@ -14,7 +14,7 @@ from charmhelpers.contrib.openstack.utils import os_release def headers_package(): """Ensures correct linux-headers for running kernel are installed, for building DKMS package""" - kver = check_output(['uname', '-r']).strip() + kver = check_output(['uname', '-r']).decode('UTF-8').strip() return 'linux-headers-%s' % kver QUANTUM_CONF_DIR = '/etc/quantum' @@ -22,7 +22,7 @@ QUANTUM_CONF_DIR = '/etc/quantum' def kernel_version(): """ Retrieve the current major kernel version as a tuple e.g. (3, 13) """ - kver = check_output(['uname', '-r']).strip() + kver = check_output(['uname', '-r']).decode('UTF-8').strip() kver = kver.split('.') return (int(kver[0]), int(kver[1])) diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py index 5d907c02..d47dc228 100644 --- a/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -65,7 +65,8 @@ def install(): def rbd_exists(service, pool, rbd_img): """Check to see if a RADOS block device exists.""" try: - out = check_output(['rbd', 'list', '--id', service, '--pool', pool]) + out = check_output(['rbd', 'list', '--id', + service, '--pool', pool]).decode('UTF-8') except CalledProcessError: return False @@ -82,7 +83,8 @@ def create_rbd_image(service, pool, image, sizemb): def pool_exists(service, name): """Check to see if a RADOS pool already exists.""" try: - out = check_output(['rados', '--id', service, 'lspools']) + out = check_output(['rados', '--id', service, + 'lspools']).decode('UTF-8') except CalledProcessError: return False @@ -96,7 +98,8 @@ def get_osds(service): version = ceph_version() if version and version >= '0.56': return json.loads(check_output(['ceph', '--id', service, - 'osd', 'ls', '--format=json'])) + 'osd', 'ls', + '--format=json']).decode('UTF-8')) return None @@ -193,7 +196,7 @@ def configure(service, key, auth, use_syslog): def image_mapped(name): """Determine whether a RADOS block device is mapped locally.""" try: - out = check_output(['rbd', 'showmapped']) + out = check_output(['rbd', 'showmapped']).decode('UTF-8') except CalledProcessError: return False @@ -361,7 +364,7 @@ def ceph_version(): """Retrieve the local version of ceph.""" if os.path.exists('/usr/bin/ceph'): cmd = ['ceph', '-v'] - output = check_output(cmd) + output = check_output(cmd).decode('US-ASCII') output = output.split() if len(output) > 3: return output[2] diff --git a/hooks/charmhelpers/contrib/storage/linux/lvm.py b/hooks/charmhelpers/contrib/storage/linux/lvm.py index 8ac7fecc..0aa65f4f 100644 --- a/hooks/charmhelpers/contrib/storage/linux/lvm.py +++ b/hooks/charmhelpers/contrib/storage/linux/lvm.py @@ -61,6 +61,7 @@ def list_lvm_volume_group(block_device): vg = None pvd = check_output(['pvdisplay', block_device]).splitlines() for l in pvd: + l = l.decode('UTF-8') if l.strip().startswith('VG Name'): vg = ' '.join(l.strip().split()[2:]) return vg diff --git a/hooks/charmhelpers/contrib/storage/linux/utils.py b/hooks/charmhelpers/contrib/storage/linux/utils.py index 1b958712..c6a15e14 100644 --- a/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -30,7 +30,8 @@ def zap_disk(block_device): # sometimes sgdisk exits non-zero; this is OK, dd will clean up call(['sgdisk', '--zap-all', '--mbrtogpt', '--clear', block_device]) - dev_end = check_output(['blockdev', '--getsz', block_device]) + dev_end = check_output(['blockdev', '--getsz', + block_device]).decode('UTF-8') gpt_end = int(dev_end.split()[0]) - 100 check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), 'bs=1M', 'count=1']) @@ -47,7 +48,7 @@ def is_device_mounted(device): it doesn't. ''' is_partition = bool(re.search(r".*[0-9]+\b", device)) - out = check_output(['mount']) + out = check_output(['mount']).decode('UTF-8') if is_partition: return bool(re.search(device + r"\b", out)) return bool(re.search(device + r"[0-9]+\b", out)) diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py index 90623667..99e5d208 100644 --- a/hooks/charmhelpers/core/hookenv.py +++ b/hooks/charmhelpers/core/hookenv.py @@ -12,7 +12,10 @@ import sys from subprocess import CalledProcessError import six -from six.moves import UserDict +if not six.PY3: + from UserDict import UserDict +else: + from collections import UserDict CRITICAL = "CRITICAL" ERROR = "ERROR" @@ -286,7 +289,8 @@ def config(scope=None): config_cmd_line.append(scope) config_cmd_line.append('--format=json') try: - config_data = json.loads(subprocess.check_output(config_cmd_line)) + config_data = json.loads( + subprocess.check_output(config_cmd_line).decode('UTF-8')) if scope is not None: return config_data return Config(config_data) @@ -305,7 +309,7 @@ def relation_get(attribute=None, unit=None, rid=None): if unit: _args.append(unit) try: - return json.loads(subprocess.check_output(_args)) + return json.loads(subprocess.check_output(_args).decode('UTF-8')) except ValueError: return None except CalledProcessError as e: @@ -337,7 +341,8 @@ def relation_ids(reltype=None): relid_cmd_line = ['relation-ids', '--format=json'] if reltype is not None: relid_cmd_line.append(reltype) - return json.loads(subprocess.check_output(relid_cmd_line)) or [] + return json.loads( + subprocess.check_output(relid_cmd_line).decode('UTF-8')) or [] return [] @@ -348,7 +353,8 @@ def related_units(relid=None): units_cmd_line = ['relation-list', '--format=json'] if relid is not None: units_cmd_line.extend(('-r', relid)) - return json.loads(subprocess.check_output(units_cmd_line)) or [] + return json.loads( + subprocess.check_output(units_cmd_line).decode('UTF-8')) or [] @cached @@ -457,7 +463,7 @@ def unit_get(attribute): """Get the unit ID for the remote unit""" _args = ['unit-get', '--format=json', attribute] try: - return json.loads(subprocess.check_output(_args)) + return json.loads(subprocess.check_output(_args).decode('UTF-8')) except ValueError: return None diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py index a3cb996b..e6783d9b 100644 --- a/hooks/charmhelpers/core/host.py +++ b/hooks/charmhelpers/core/host.py @@ -55,7 +55,9 @@ def service(action, service_name): def service_running(service): """Determine whether a system service is running""" try: - output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT) + output = subprocess.check_output( + ['service', service, 'status'], + stderr=subprocess.STDOUT).decode('UTF-8') except subprocess.CalledProcessError: return False else: @@ -68,7 +70,9 @@ def service_running(service): def service_available(service_name): """Determine whether a system service is available""" try: - subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT) + subprocess.check_output( + ['service', service_name, 'status'], + stderr=subprocess.STDOUT).decode('UTF-8') except subprocess.CalledProcessError as e: return 'unrecognized service' not in e.output else: @@ -116,7 +120,7 @@ def rsync(from_path, to_path, flags='-r', options=None): cmd.append(from_path) cmd.append(to_path) log(" ".join(cmd)) - return subprocess.check_output(cmd).strip() + return subprocess.check_output(cmd).decode('UTF-8').strip() def symlink(source, destination): @@ -314,7 +318,7 @@ def list_nics(nic_type): interfaces = [] for int_type in int_types: cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] - ip_output = subprocess.check_output(cmd).split('\n') + ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') ip_output = (line for line in ip_output if line) for line in ip_output: if line.split()[1].startswith(int_type): @@ -336,7 +340,7 @@ def set_nic_mtu(nic, mtu): def get_nic_mtu(nic): cmd = ['ip', 'addr', 'show', nic] - ip_output = subprocess.check_output(cmd).split('\n') + ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') mtu = "" for line in ip_output: words = line.split() @@ -347,7 +351,7 @@ def get_nic_mtu(nic): def get_nic_hwaddr(nic): cmd = ['ip', '-o', '-0', 'addr', 'show', nic] - ip_output = subprocess.check_output(cmd) + ip_output = subprocess.check_output(cmd).decode('UTF-8') hwaddr = "" words = ip_output.split() if 'link/ether' in words: diff --git a/hooks/charmhelpers/fetch/__init__.py b/hooks/charmhelpers/fetch/__init__.py index 4a27d2cc..0a126fc3 100644 --- a/hooks/charmhelpers/fetch/__init__.py +++ b/hooks/charmhelpers/fetch/__init__.py @@ -13,7 +13,10 @@ from charmhelpers.core.hookenv import ( import os import six -from six.moves.urllib.parse import urlparse, urlunparse +if six.PY3: + from urllib.parse import urlparse, urlunparse +else: + from urlparse import urlparse, urlunparse CLOUD_ARCHIVE = """# Ubuntu Cloud Archive @@ -68,16 +71,11 @@ CLOUD_ARCHIVE_POCKETS = { # The order of this list is very important. Handlers should be listed in from # least- to most-specific URL matching. -if six.PY2: - FETCH_HANDLERS = ( - 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', - 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', - 'charmhelpers.fetch.giturl.GitUrlFetchHandler', - ) -else: - FETCH_HANDLERS = ( - 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', - ) +FETCH_HANDLERS = ( + 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', + 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', + 'charmhelpers.fetch.giturl.GitUrlFetchHandler', +) APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks. diff --git a/hooks/charmhelpers/fetch/archiveurl.py b/hooks/charmhelpers/fetch/archiveurl.py index 613ea90f..8a4624b2 100644 --- a/hooks/charmhelpers/fetch/archiveurl.py +++ b/hooks/charmhelpers/fetch/archiveurl.py @@ -3,12 +3,21 @@ import hashlib import re import six -from six.moves.urllib.request import ( - build_opener, install_opener, urlopen, urlretrieve, - HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, -) -from six.moves.urllib.parse import urlparse, urlunparse, parse_qs -from six.moves.urllib.error import URLError +if six.PY3: + from urllib.request import ( + build_opener, install_opener, urlopen, urlretrieve, + HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, + ) + from urllib.parse import urlparse, urlunparse, parse_qs + from urllib.error import URLError +else: + from urllib import urlretrieve + from urllib2 import ( + build_opener, install_opener, urlopen, + HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, + URLError + ) + from urlparse import urlparse, urlunparse, parse_qs from charmhelpers.fetch import ( BaseFetchHandler, @@ -125,7 +134,7 @@ class ArchiveUrlFetchHandler(BaseFetchHandler): raise UnhandledSource(e.strerror) options = parse_qs(url_parts.fragment) for key, value in options.items(): - if six.PY2: + if not six.PY3: algorithms = hashlib.algorithms else: algorithms = hashlib.algorithms_available From 084ca5c89a17fd0d55af7dbc74905bbfa3164164 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 26 Nov 2014 10:36:26 +0000 Subject: [PATCH 21/35] Fix charmhelper source and resync --- charm-helpers-hooks.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charm-helpers-hooks.yaml b/charm-helpers-hooks.yaml index 9ae5e6a2..8211c8dd 100644 --- a/charm-helpers-hooks.yaml +++ b/charm-helpers-hooks.yaml @@ -1,4 +1,4 @@ -branch: lp:~gnuoy/charm-helpers/haproxy-singlenode-mode +branch: lp:charm-helpers destination: hooks/charmhelpers include: - core From 2bffd104ebb46954ad8de106142a63660c65c19d Mon Sep 17 00:00:00 2001 From: Felipe Reyes Date: Wed, 26 Nov 2014 18:24:02 -0300 Subject: [PATCH 22/35] Added IPv6 support Added test to check that IPv6 addresses are properly formatted. --- hooks/nova_cc_context.py | 8 ++++++-- unit_tests/test_nova_cc_contexts.py | 23 +++++++++++++++++++---- 2 files changed, 25 insertions(+), 6 deletions(-) diff --git a/hooks/nova_cc_context.py b/hooks/nova_cc_context.py index 75c97e1b..25f4784e 100644 --- a/hooks/nova_cc_context.py +++ b/hooks/nova_cc_context.py @@ -13,7 +13,8 @@ from charmhelpers.contrib.hahelpers.cluster import ( ) from charmhelpers.contrib.network.ip import ( - get_ipv6_addr + get_ipv6_addr, + format_ipv6_addr ) @@ -292,7 +293,10 @@ class InstanceConsoleContext(context.OSContextGenerator): try: for rid in relation_ids('memcache'): for rel in relations_for_id(rid): - servers.append({'private-address': rel['private-address'], + priv_addr = rel['private-address'] + # format it as IPv6 address if neeeded + priv_addr = format_ipv6_addr(priv_addr) or priv_addr + servers.append({'private-address': priv_addr, 'port': rel['port']}) except Exception as ex: log("Couldn't get caching servers: {}".format(str(ex)), diff --git a/unit_tests/test_nova_cc_contexts.py b/unit_tests/test_nova_cc_contexts.py index c777b7d3..33ea3d44 100644 --- a/unit_tests/test_nova_cc_contexts.py +++ b/unit_tests/test_nova_cc_contexts.py @@ -48,7 +48,8 @@ class NovaComputeContextTests(CharmTestCase): self.log.side_effect = fake_log @mock.patch.object(utils, 'os_release') - def test_instance_console_context_without_memcache(self, os_release): + @mock.patch('charmhelpers.contrib.network.ip.log') + def test_instance_console_context_without_memcache(self, os_release, log_): self.unit_get.return_value = '127.0.0.1' self.relation_ids.return_value = 'cache:0' self.related_units.return_value = 'memcached/0' @@ -58,10 +59,24 @@ class NovaComputeContextTests(CharmTestCase): instance_console()) @mock.patch.object(utils, 'os_release') - def test_instance_console_context_with_memcache(self, os_release): - memcached_servers = [{'private-address': '127.0.1.1', + @mock.patch('charmhelpers.contrib.network.ip.log') + def test_instance_console_context_with_memcache(self, os_release, log_): + self.check_instance_console_context_with_memcache(os_release, + '127.0.1.1', + '127.0.1.1') + + @mock.patch.object(utils, 'os_release') + @mock.patch('charmhelpers.contrib.network.ip.log') + def test_instance_console_context_with_memcache_ipv6(self, os_release, + log_): + self.check_instance_console_context_with_memcache(os_release, '::1', + '[::1]') + + def check_instance_console_context_with_memcache(self, os_release, ip, + formated_ip): + memcached_servers = [{'private-address': formated_ip, 'port': '11211'}] - self.unit_get.return_value = '127.0.0.1' + self.unit_get.return_value = ip self.relation_ids.return_value = ['cache:0'] self.relations_for_id.return_value = memcached_servers self.related_units.return_value = 'memcached/0' From fd40c349b2e3e4d42ac0aed0f20300b6716cc236 Mon Sep 17 00:00:00 2001 From: Felipe Reyes Date: Mon, 15 Dec 2014 13:47:17 -0300 Subject: [PATCH 23/35] Replace "Couldn't get caching servers" with "Couldn't get memcache servers" --- hooks/nova_cc_context.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hooks/nova_cc_context.py b/hooks/nova_cc_context.py index 508a3865..19a35320 100644 --- a/hooks/nova_cc_context.py +++ b/hooks/nova_cc_context.py @@ -317,7 +317,7 @@ class InstanceConsoleContext(context.OSContextGenerator): servers.append({'private-address': priv_addr, 'port': rel['port']}) except Exception as ex: - log("Couldn't get caching servers: {}".format(str(ex)), + log("Couldn't get memcache servers: {}".format(str(ex)), level='WARNING') servers = [] From ec00658edf7f94b2a2c79a6b43a693e49dc22a12 Mon Sep 17 00:00:00 2001 From: Felipe Reyes Date: Tue, 16 Dec 2014 11:08:58 -0300 Subject: [PATCH 24/35] Refactor the way memcached_servers config is put in the templates Instead of formatting the string with Jinja2, it's formatted with python and passed to the template as a string. --- hooks/nova_cc_context.py | 3 ++- templates/grizzly/nova.conf | 2 +- templates/havana/nova.conf | 2 +- templates/icehouse/nova.conf | 2 +- unit_tests/test_nova_cc_contexts.py | 4 ++-- 5 files changed, 7 insertions(+), 6 deletions(-) diff --git a/hooks/nova_cc_context.py b/hooks/nova_cc_context.py index 19a35320..d61bf273 100644 --- a/hooks/nova_cc_context.py +++ b/hooks/nova_cc_context.py @@ -321,5 +321,6 @@ class InstanceConsoleContext(context.OSContextGenerator): level='WARNING') servers = [] - ctxt['memcached_servers'] = servers + ctxt['memcached_servers'] = ','.join( + ["%s:%s" % (s['private-address'], s['port']) for s in servers]) return ctxt diff --git a/templates/grizzly/nova.conf b/templates/grizzly/nova.conf index e67c32fc..4921d893 100644 --- a/templates/grizzly/nova.conf +++ b/templates/grizzly/nova.conf @@ -22,7 +22,7 @@ auth_strategy=keystone compute_driver=libvirt.LibvirtDriver {% if memcached_servers %} -memcached_servers = {%for s in memcached_servers %}{% if loop.index0 != 0 %},{% endif %}{{s['private-address']}}:{{s['port']}}{% endfor %} +memcached_servers = {{ memcached_servers }} {% endif %} {% if keystone_ec2_url -%} diff --git a/templates/havana/nova.conf b/templates/havana/nova.conf index 7cbf0ef0..77b88110 100644 --- a/templates/havana/nova.conf +++ b/templates/havana/nova.conf @@ -28,7 +28,7 @@ use_syslog={{ use_syslog }} my_ip = {{ host_ip }} {% if memcached_servers %} -memcached_servers = {%for s in memcached_servers %}{% if loop.index0 != 0 %},{% endif %}{{s['private-address']}}:{{s['port']}}{% endfor %} +memcached_servers = {{ memcached_servers }} {% endif %} {% if keystone_ec2_url -%} diff --git a/templates/icehouse/nova.conf b/templates/icehouse/nova.conf index 3e0d0883..67385124 100644 --- a/templates/icehouse/nova.conf +++ b/templates/icehouse/nova.conf @@ -39,7 +39,7 @@ use_syslog={{ use_syslog }} my_ip = {{ host_ip }} {% if memcached_servers %} -memcached_servers = {%for s in memcached_servers %}{% if loop.index0 != 0 %},{% endif %}{{s['private-address']}}:{{s['port']}}{% endfor %} +memcached_servers = {{ memcached_servers }} {% endif %} {% if keystone_ec2_url -%} diff --git a/unit_tests/test_nova_cc_contexts.py b/unit_tests/test_nova_cc_contexts.py index 33ea3d44..76486f34 100644 --- a/unit_tests/test_nova_cc_contexts.py +++ b/unit_tests/test_nova_cc_contexts.py @@ -55,7 +55,7 @@ class NovaComputeContextTests(CharmTestCase): self.related_units.return_value = 'memcached/0' instance_console = context.InstanceConsoleContext() os_release.return_value = 'icehouse' - self.assertEqual({'memcached_servers': []}, + self.assertEqual({'memcached_servers': ''}, instance_console()) @mock.patch.object(utils, 'os_release') @@ -83,5 +83,5 @@ class NovaComputeContextTests(CharmTestCase): instance_console = context.InstanceConsoleContext() os_release.return_value = 'icehouse' self.maxDiff = None - self.assertEqual({'memcached_servers': memcached_servers}, + self.assertEqual({'memcached_servers': "%s:11211" % (formated_ip, )}, instance_console()) From 2b2f2c96858cc7229a8393493ef11e41a1317d95 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Tue, 16 Dec 2014 19:31:29 +0000 Subject: [PATCH 25/35] add python-six install re: bug 1403114 --- hooks/nova_cc_utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/hooks/nova_cc_utils.py b/hooks/nova_cc_utils.py index 554dd74a..6a42dd81 100644 --- a/hooks/nova_cc_utils.py +++ b/hooks/nova_cc_utils.py @@ -67,6 +67,7 @@ BASE_PACKAGES = [ 'python-mysqldb', 'python-psycopg2', 'python-psutil', + 'python-six', 'uuid', ] From 25c0fdc70fe1918d97e47654008a18ee3a1b6a9c Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 18 Dec 2014 11:26:04 +0000 Subject: [PATCH 26/35] charmhelpers sync to get fix for LP #1396246 --- .../charmhelpers/contrib/hahelpers/cluster.py | 38 ++++++++++------ .../contrib/storage/linux/ceph.py | 43 +++++++++++++++++++ hooks/charmhelpers/core/decorators.py | 41 ++++++++++++++++++ hooks/charmhelpers/core/host.py | 11 +++-- 4 files changed, 117 insertions(+), 16 deletions(-) create mode 100644 hooks/charmhelpers/core/decorators.py diff --git a/hooks/charmhelpers/contrib/hahelpers/cluster.py b/hooks/charmhelpers/contrib/hahelpers/cluster.py index 52ce4b7c..912b2fe3 100644 --- a/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -13,6 +13,7 @@ clustering-related helpers. import subprocess import os + from socket import gethostname as get_unit_hostname import six @@ -28,12 +29,19 @@ from charmhelpers.core.hookenv import ( WARNING, unit_get, ) +from charmhelpers.core.decorators import ( + retry_on_exception, +) class HAIncompleteConfig(Exception): pass +class CRMResourceNotFound(Exception): + pass + + def is_elected_leader(resource): """ Returns True if the charm executing this is the elected cluster leader. @@ -68,24 +76,30 @@ def is_clustered(): return False -def is_crm_leader(resource): +@retry_on_exception(5, base_delay=2, exc_type=CRMResourceNotFound) +def is_crm_leader(resource, retry=False): """ Returns True if the charm calling this is the elected corosync leader, as returned by calling the external "crm" command. + + We allow this operation to be retried to avoid the possibility of getting a + false negative. See LP #1396246 for more info. """ - cmd = [ - "crm", "resource", - "show", resource - ] + cmd = ['crm', 'resource', 'show', resource] try: - status = subprocess.check_output(cmd).decode('UTF-8') + status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + if not isinstance(status, six.text_type): + status = six.text_type(status, "utf-8") except subprocess.CalledProcessError: - return False - else: - if get_unit_hostname() in status: - return True - else: - return False + status = None + + if status and get_unit_hostname() in status: + return True + + if status and "resource %s is NOT running" % (resource) in status: + raise CRMResourceNotFound("CRM resource %s not found" % (resource)) + + return False def is_leader(resource): diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py index d47dc228..1479f4f3 100644 --- a/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -372,3 +372,46 @@ def ceph_version(): return None else: return None + + +class CephBrokerRq(object): + """Ceph broker request. + + Multiple operations can be added to a request and sent to the Ceph broker + to be executed. + + Request is json-encoded for sending over the wire. + + The API is versioned and defaults to version 1. + """ + def __init__(self, api_version=1): + self.api_version = api_version + self.ops = [] + + def add_op_create_pool(self, name, replica_count=3): + self.ops.append({'op': 'create-pool', 'name': name, + 'replicas': replica_count}) + + @property + def request(self): + return json.dumps({'api-version': self.api_version, 'ops': self.ops}) + + +class CephBrokerRsp(object): + """Ceph broker response. + + Response is json-decoded and contents provided as methods/properties. + + The API is versioned and defaults to version 1. + """ + def __init__(self, encoded_rsp): + self.api_version = None + self.rsp = json.loads(encoded_rsp) + + @property + def exit_code(self): + return self.rsp.get('exit-code') + + @property + def exit_msg(self): + return self.rsp.get('stderr') diff --git a/hooks/charmhelpers/core/decorators.py b/hooks/charmhelpers/core/decorators.py new file mode 100644 index 00000000..029a4ef4 --- /dev/null +++ b/hooks/charmhelpers/core/decorators.py @@ -0,0 +1,41 @@ +# +# Copyright 2014 Canonical Ltd. +# +# Authors: +# Edward Hope-Morley +# + +import time + +from charmhelpers.core.hookenv import ( + log, + INFO, +) + + +def retry_on_exception(num_retries, base_delay=0, exc_type=Exception): + """If the decorated function raises exception exc_type, allow num_retries + retry attempts before raise the exception. + """ + def _retry_on_exception_inner_1(f): + def _retry_on_exception_inner_2(*args, **kwargs): + retries = num_retries + multiplier = 1 + while True: + try: + return f(*args, **kwargs) + except exc_type: + if not retries: + raise + + delay = base_delay * multiplier + multiplier += 1 + log("Retrying '%s' %d more times (delay=%s)" % + (f.__name__, retries, delay), level=INFO) + retries -= 1 + if delay: + time.sleep(delay) + + return _retry_on_exception_inner_2 + + return _retry_on_exception_inner_1 diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py index c6f1680a..5221120c 100644 --- a/hooks/charmhelpers/core/host.py +++ b/hooks/charmhelpers/core/host.py @@ -162,13 +162,16 @@ def mkdir(path, owner='root', group='root', perms=0o555, force=False): uid = pwd.getpwnam(owner).pw_uid gid = grp.getgrnam(group).gr_gid realpath = os.path.abspath(path) - if os.path.exists(realpath): - if force and not os.path.isdir(realpath): + path_exists = os.path.exists(realpath) + if path_exists and force: + if not os.path.isdir(realpath): log("Removing non-directory file {} prior to mkdir()".format(path)) os.unlink(realpath) - else: + os.makedirs(realpath, perms) + os.chown(realpath, uid, gid) + elif not path_exists: os.makedirs(realpath, perms) - os.chown(realpath, uid, gid) + os.chown(realpath, uid, gid) def write_file(path, content, owner='root', group='root', perms=0o444): From 1e83b905973ad655a5278748ba14ee8fc3db599f Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 19 Dec 2014 10:25:26 +0000 Subject: [PATCH 27/35] Sync charmhelpers --- hooks/charmhelpers/contrib/openstack/neutron.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/hooks/charmhelpers/contrib/openstack/neutron.py b/hooks/charmhelpers/contrib/openstack/neutron.py index 1446f637..095cc24b 100644 --- a/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/hooks/charmhelpers/contrib/openstack/neutron.py @@ -152,9 +152,15 @@ def neutron_plugins(): database=config('neutron-database'), relation_prefix='neutron', ssl_dir=NEUTRON_CONF_DIR)], - 'services': ['calico-compute', 'bird', 'neutron-dhcp-agent'], + 'services': ['calico-felix', + 'bird', + 'neutron-dhcp-agent', + 'nova-api-metadata'], 'packages': [[headers_package()] + determine_dkms_package(), - ['calico-compute', 'bird', 'neutron-dhcp-agent']], + ['calico-compute', + 'bird', + 'neutron-dhcp-agent', + 'nova-api-metadata']], 'server_packages': ['neutron-server', 'calico-control'], 'server_services': ['neutron-server'] } From 2fcbbedeebd1f5a176cc4cf5a07c9519815f7383 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Fri, 19 Dec 2014 17:10:35 +0000 Subject: [PATCH 28/35] charmhelpers sync to get fix for precise haproxy ipv6 --- hooks/charmhelpers/contrib/openstack/context.py | 1 + hooks/charmhelpers/contrib/openstack/neutron.py | 10 ++++++++-- .../contrib/openstack/templates/haproxy.cfg | 2 ++ 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index eb108910..180bfad2 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -491,6 +491,7 @@ class HAProxyContext(OSContextGenerator): ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout') if config('prefer-ipv6'): + ctxt['ipv6'] = True ctxt['local_host'] = 'ip6-localhost' ctxt['haproxy_host'] = '::' ctxt['stat_port'] = ':::8888' diff --git a/hooks/charmhelpers/contrib/openstack/neutron.py b/hooks/charmhelpers/contrib/openstack/neutron.py index 1446f637..095cc24b 100644 --- a/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/hooks/charmhelpers/contrib/openstack/neutron.py @@ -152,9 +152,15 @@ def neutron_plugins(): database=config('neutron-database'), relation_prefix='neutron', ssl_dir=NEUTRON_CONF_DIR)], - 'services': ['calico-compute', 'bird', 'neutron-dhcp-agent'], + 'services': ['calico-felix', + 'bird', + 'neutron-dhcp-agent', + 'nova-api-metadata'], 'packages': [[headers_package()] + determine_dkms_package(), - ['calico-compute', 'bird', 'neutron-dhcp-agent']], + ['calico-compute', + 'bird', + 'neutron-dhcp-agent', + 'nova-api-metadata']], 'server_packages': ['neutron-server', 'calico-control'], 'server_services': ['neutron-server'] } diff --git a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg index 0229f9d4..9ae1efb9 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg +++ b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg @@ -38,7 +38,9 @@ listen stats {{ stat_port }} {% for service, ports in service_ports.items() -%} frontend tcp-in_{{ service }} bind *:{{ ports[0] }} + {% if ipv6 -%} bind :::{{ ports[0] }} + {% endif -%} {% for frontend in frontends -%} acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }} use_backend {{ service }}_{{ frontend }} if net_{{ frontend }} From 1059962fbc19a48d1de080cf5bb787377fa53575 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 5 Jan 2015 09:31:38 +0000 Subject: [PATCH 29/35] charmhelper sync --- hooks/charmhelpers/contrib/openstack/context.py | 1 + hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg | 2 ++ 2 files changed, 3 insertions(+) diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index eb108910..180bfad2 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -491,6 +491,7 @@ class HAProxyContext(OSContextGenerator): ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout') if config('prefer-ipv6'): + ctxt['ipv6'] = True ctxt['local_host'] = 'ip6-localhost' ctxt['haproxy_host'] = '::' ctxt['stat_port'] = ':::8888' diff --git a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg index 0229f9d4..9ae1efb9 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg +++ b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg @@ -38,7 +38,9 @@ listen stats {{ stat_port }} {% for service, ports in service_ports.items() -%} frontend tcp-in_{{ service }} bind *:{{ ports[0] }} + {% if ipv6 -%} bind :::{{ ports[0] }} + {% endif -%} {% for frontend in frontends -%} acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }} use_backend {{ service }}_{{ frontend }} if net_{{ frontend }} From 7291b2eff6d82e39b735116b3e367574050e7363 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 5 Jan 2015 13:59:35 +0000 Subject: [PATCH 30/35] Fix port in amulet tests --- tests/basic_deployment.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/basic_deployment.py b/tests/basic_deployment.py index acb6405b..7eb5fdc1 100644 --- a/tests/basic_deployment.py +++ b/tests/basic_deployment.py @@ -467,7 +467,7 @@ class NovaCCBasicDeployment(OpenStackAmuletDeployment): 'rabbit_host': rabbitmq_relation['hostname'], 'glance_api_servers': glance_relation['glance-api-server'], 'network_manager': 'nova.network.manager.FlatDHCPManager', - 's3_listen_port': '3333', + 's3_listen_port': '3323', 'osapi_compute_listen_port': '8774', 'ec2_listen_port': '8773'} From 347eaa5dbb6ee8e3e80383bdf50bbde70bff09a3 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 5 Jan 2015 14:40:20 +0000 Subject: [PATCH 31/35] Actually fix amulet tests --- tests/basic_deployment.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/basic_deployment.py b/tests/basic_deployment.py index 7eb5fdc1..907ea7be 100644 --- a/tests/basic_deployment.py +++ b/tests/basic_deployment.py @@ -468,8 +468,8 @@ class NovaCCBasicDeployment(OpenStackAmuletDeployment): 'glance_api_servers': glance_relation['glance-api-server'], 'network_manager': 'nova.network.manager.FlatDHCPManager', 's3_listen_port': '3323', - 'osapi_compute_listen_port': '8774', - 'ec2_listen_port': '8773'} + 'osapi_compute_listen_port': '8764', + 'ec2_listen_port': '8763'} ret = u.validate_config_data(unit, conf, 'DEFAULT', expected) if ret: From 9d02924da5c8d51f5c4124fba37a8af8ed76c575 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 9 Jan 2015 15:50:01 +0000 Subject: [PATCH 32/35] Fix lint --- hooks/nova_cc_hooks.py | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/hooks/nova_cc_hooks.py b/hooks/nova_cc_hooks.py index eb315f4d..5c31368c 100755 --- a/hooks/nova_cc_hooks.py +++ b/hooks/nova_cc_hooks.py @@ -854,6 +854,7 @@ def neutron_api_relation_broken(): for rid in relation_ids('quantum-network-service'): quantum_joined(rid=rid) + @hooks.hook('nrpe-external-master-relation-joined', 'nrpe-external-master-relation-changed') def update_nrpe_config(): @@ -883,24 +884,28 @@ def update_nrpe_config(): shortname=service, description='process check {%s}' % current_unit, check_cmd='check_upstart_job %s' % service, - ) + ) elif os.path.exists(sysv_init): cronpath = '/etc/cron.d/nagios-service-check-%s' % service - cron_template = '*/5 * * * * root \ -/usr/local/lib/nagios/plugins/check_exit_status.pl -s /etc/init.d/%s \ -status > /var/lib/nagios/service-check-%s.txt\n' % (service, service) + cron_entry = ('*/5 * * * * root ' + '/usr/local/lib/nagios/plugins/check_exit_status.pl ' + '-s /etc/init.d/%s status > ' + '/var/lib/nagios/service-check-%s.txt\n' % (service, + service) + ) f = open(cronpath, 'w') - f.write(cron_template) + f.write(cron_entry) f.close() nrpe.add_check( shortname=service, description='process check {%s}' % current_unit, - check_cmd='check_status_file.py -f \ -/var/lib/nagios/service-check-%s.txt' % service, - ) + check_cmd='check_status_file.py -f ' + '/var/lib/nagios/service-check-%s.txt' % service, + ) nrpe.write() + @hooks.hook('memcache-relation-joined', 'memcache-relation-departed', 'memcache-relation-changed', From b333d4155ad2e4aff426e9ad96b9409434af7821 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 12 Jan 2015 12:04:00 +0000 Subject: [PATCH 33/35] Use rnpe functions from charmhelpers --- .../charmhelpers/contrib/charmsupport/nrpe.py | 102 ++++++++++++++++-- .../contrib/charmsupport/volumes.py | 7 +- hooks/charmhelpers/contrib/openstack/utils.py | 6 ++ hooks/charmhelpers/fetch/__init__.py | 9 +- hooks/nova_cc_hooks.py | 54 ++-------- 5 files changed, 120 insertions(+), 58 deletions(-) diff --git a/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 51b62d39..f3a936d0 100644 --- a/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -18,6 +18,7 @@ from charmhelpers.core.hookenv import ( log, relation_ids, relation_set, + relations_of_type, ) from charmhelpers.core.host import service @@ -54,6 +55,12 @@ from charmhelpers.core.host import service # juju-myservice-0 # If you're running multiple environments with the same services in them # this allows you to differentiate between them. +# nagios_servicegroups: +# default: "" +# type: string +# description: | +# A comma-separated list of nagios servicegroups. +# If left empty, the nagios_context will be used as the servicegroup # # 3. Add custom checks (Nagios plugins) to files/nrpe-external-master # @@ -125,9 +132,6 @@ define service {{ def _locate_cmd(self, check_cmd): search_path = ( - '/', - os.path.join(os.environ['CHARM_DIR'], - 'files/nrpe-external-master'), '/usr/lib/nagios/plugins', '/usr/local/lib/nagios/plugins', ) @@ -141,7 +145,7 @@ define service {{ log('Check command not found: {}'.format(parts[0])) return '' - def write(self, nagios_context, hostname): + def write(self, nagios_context, hostname, nagios_servicegroups=None): nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format( self.command) with open(nrpe_check_file, 'w') as nrpe_check_config: @@ -153,16 +157,21 @@ define service {{ log('Not writing service config as {} is not accessible'.format( NRPE.nagios_exportdir)) else: - self.write_service_config(nagios_context, hostname) + self.write_service_config(nagios_context, hostname, + nagios_servicegroups) - def write_service_config(self, nagios_context, hostname): + def write_service_config(self, nagios_context, hostname, + nagios_servicegroups=None): for f in os.listdir(NRPE.nagios_exportdir): if re.search('.*{}.cfg'.format(self.command), f): os.remove(os.path.join(NRPE.nagios_exportdir, f)) + if not nagios_servicegroups: + nagios_servicegroups = nagios_context + templ_vars = { 'nagios_hostname': hostname, - 'nagios_servicegroup': nagios_context, + 'nagios_servicegroup': nagios_servicegroups, 'description': self.description, 'shortname': self.shortname, 'command': self.command, @@ -186,6 +195,10 @@ class NRPE(object): super(NRPE, self).__init__() self.config = config() self.nagios_context = self.config['nagios_context'] + if 'nagios_servicegroups' in self.config: + self.nagios_servicegroups = self.config['nagios_servicegroups'] + else: + self.nagios_servicegroups = 'juju' self.unit_name = local_unit().replace('/', '-') if hostname: self.hostname = hostname @@ -211,7 +224,8 @@ class NRPE(object): nrpe_monitors = {} monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}} for nrpecheck in self.checks: - nrpecheck.write(self.nagios_context, self.hostname) + nrpecheck.write(self.nagios_context, self.hostname, + self.nagios_servicegroups) nrpe_monitors[nrpecheck.shortname] = { "command": nrpecheck.command, } @@ -220,3 +234,75 @@ class NRPE(object): for rid in relation_ids("local-monitors"): relation_set(relation_id=rid, monitors=yaml.dump(monitors)) + + +def get_nagios_hostcontext(relation_name='nrpe-external-master'): + """ + Query relation with nrpe subordinate, return the nagios_host_context + + :param str relation_name: Name of relation nrpe sub joined to + """ + for rel in relations_of_type(relation_name): + if 'nagios_hostname' in rel: + return rel['nagios_host_context'] + + +def get_nagios_hostname(relation_name='nrpe-external-master'): + """ + Query relation with nrpe subordinate, return the nagios_hostname + + :param str relation_name: Name of relation nrpe sub joined to + """ + for rel in relations_of_type(relation_name): + if 'nagios_hostname' in rel: + return rel['nagios_hostname'] + + +def get_nagios_unit_name(relation_name='nrpe-external-master'): + """ + Return the nagios unit name prepended with host_context if needed + + :param str relation_name: Name of relation nrpe sub joined to + """ + host_context = get_nagios_hostcontext(relation_name) + if host_context: + unit = "%s:%s" % (host_context, local_unit()) + else: + unit = local_unit() + return unit + + +def add_init_service_checks(nrpe, services, unit_name): + """ + Add checks for each service in list + + :param NRPE nrpe: NRPE object to add check to + :param list services: List of services to check + :param str unit_name: Unit name to use in check description + """ + for svc in services: + upstart_init = '/etc/init/%s.conf' % svc + sysv_init = '/etc/init.d/%s' % svc + if os.path.exists(upstart_init): + nrpe.add_check( + shortname=svc, + description='process check {%s}' % unit_name, + check_cmd='check_upstart_job %s' % svc + ) + elif os.path.exists(sysv_init): + cronpath = '/etc/cron.d/nagios-service-check-%s' % svc + cron_file = ('*/5 * * * * root ' + '/usr/local/lib/nagios/plugins/check_exit_status.pl ' + '-s /etc/init.d/%s status > ' + '/var/lib/nagios/service-check-%s.txt\n' % (svc, + svc) + ) + f = open(cronpath, 'w') + f.write(cron_file) + f.close() + nrpe.add_check( + shortname=svc, + description='process check {%s}' % unit_name, + check_cmd='check_status_file.py -f ' + '/var/lib/nagios/service-check-%s.txt' % svc, + ) diff --git a/hooks/charmhelpers/contrib/charmsupport/volumes.py b/hooks/charmhelpers/contrib/charmsupport/volumes.py index 0f905dff..d61aa47f 100644 --- a/hooks/charmhelpers/contrib/charmsupport/volumes.py +++ b/hooks/charmhelpers/contrib/charmsupport/volumes.py @@ -2,7 +2,8 @@ Functions for managing volumes in juju units. One volume is supported per unit. Subordinates may have their own storage, provided it is on its own partition. -Configuration stanzas: +Configuration stanzas:: + volume-ephemeral: type: boolean default: true @@ -20,7 +21,8 @@ Configuration stanzas: is 'true' and no volume-map value is set. Use 'juju set' to set a value and 'juju resolved' to complete configuration. -Usage: +Usage:: + from charmsupport.volumes import configure_volume, VolumeConfigurationError from charmsupport.hookenv import log, ERROR def post_mount_hook(): @@ -34,6 +36,7 @@ Usage: after_change=post_mount_hook) except VolumeConfigurationError: log('Storage could not be configured', ERROR) + ''' # XXX: Known limitations diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index 44179679..ddd40ce5 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -53,6 +53,7 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([ ('saucy', 'havana'), ('trusty', 'icehouse'), ('utopic', 'juno'), + ('vivid', 'kilo'), ]) @@ -64,6 +65,7 @@ OPENSTACK_CODENAMES = OrderedDict([ ('2013.2', 'havana'), ('2014.1', 'icehouse'), ('2014.2', 'juno'), + ('2015.1', 'kilo'), ]) # The ugly duckling @@ -84,6 +86,7 @@ SWIFT_CODENAMES = OrderedDict([ ('2.0.0', 'juno'), ('2.1.0', 'juno'), ('2.2.0', 'juno'), + ('2.2.1', 'kilo'), ]) DEFAULT_LOOPBACK_SIZE = '5G' @@ -289,6 +292,9 @@ def configure_installation_source(rel): 'juno': 'trusty-updates/juno', 'juno/updates': 'trusty-updates/juno', 'juno/proposed': 'trusty-proposed/juno', + 'kilo': 'trusty-updates/kilo', + 'kilo/updates': 'trusty-updates/kilo', + 'kilo/proposed': 'trusty-proposed/kilo', } try: diff --git a/hooks/charmhelpers/fetch/__init__.py b/hooks/charmhelpers/fetch/__init__.py index 0a126fc3..aceadea4 100644 --- a/hooks/charmhelpers/fetch/__init__.py +++ b/hooks/charmhelpers/fetch/__init__.py @@ -64,9 +64,16 @@ CLOUD_ARCHIVE_POCKETS = { 'trusty-juno/updates': 'trusty-updates/juno', 'trusty-updates/juno': 'trusty-updates/juno', 'juno/proposed': 'trusty-proposed/juno', - 'juno/proposed': 'trusty-proposed/juno', 'trusty-juno/proposed': 'trusty-proposed/juno', 'trusty-proposed/juno': 'trusty-proposed/juno', + # Kilo + 'kilo': 'trusty-updates/kilo', + 'trusty-kilo': 'trusty-updates/kilo', + 'trusty-kilo/updates': 'trusty-updates/kilo', + 'trusty-updates/kilo': 'trusty-updates/kilo', + 'kilo/proposed': 'trusty-proposed/kilo', + 'trusty-kilo/proposed': 'trusty-proposed/kilo', + 'trusty-proposed/kilo': 'trusty-proposed/kilo', } # The order of this list is very important. Handlers should be listed in from diff --git a/hooks/nova_cc_hooks.py b/hooks/nova_cc_hooks.py index 5c31368c..6dda1ab3 100755 --- a/hooks/nova_cc_hooks.py +++ b/hooks/nova_cc_hooks.py @@ -20,7 +20,6 @@ from charmhelpers.core.hookenv import ( relation_get, relation_ids, relation_set, - relations_of_type, related_units, open_port, unit_get, @@ -117,7 +116,7 @@ from charmhelpers.contrib.network.ip import ( from charmhelpers.contrib.openstack.context import ADDRESS_TYPES -from charmhelpers.contrib.charmsupport.nrpe import NRPE +from charmhelpers.contrib.charmsupport import nrpe hooks = Hooks() CONFIGS = register_configs() @@ -858,52 +857,13 @@ def neutron_api_relation_broken(): @hooks.hook('nrpe-external-master-relation-joined', 'nrpe-external-master-relation-changed') def update_nrpe_config(): - # Find out if nrpe set nagios_hostname - hostname = None - host_context = None - for rel in relations_of_type('nrpe-external-master'): - if 'nagios_hostname' in rel: - hostname = rel['nagios_hostname'] - host_context = rel['nagios_host_context'] - break - nrpe = NRPE(hostname=hostname) + # python-dbus is used by check_upstart_job apt_install('python-dbus') - - if host_context: - current_unit = "%s:%s" % (host_context, local_unit()) - else: - current_unit = local_unit() - - services_to_monitor = services() - for service in services_to_monitor: - upstart_init = '/etc/init/%s.conf' % service - sysv_init = '/etc/init.d/%s' % service - - if os.path.exists(upstart_init): - nrpe.add_check( - shortname=service, - description='process check {%s}' % current_unit, - check_cmd='check_upstart_job %s' % service, - ) - elif os.path.exists(sysv_init): - cronpath = '/etc/cron.d/nagios-service-check-%s' % service - cron_entry = ('*/5 * * * * root ' - '/usr/local/lib/nagios/plugins/check_exit_status.pl ' - '-s /etc/init.d/%s status > ' - '/var/lib/nagios/service-check-%s.txt\n' % (service, - service) - ) - f = open(cronpath, 'w') - f.write(cron_entry) - f.close() - nrpe.add_check( - shortname=service, - description='process check {%s}' % current_unit, - check_cmd='check_status_file.py -f ' - '/var/lib/nagios/service-check-%s.txt' % service, - ) - - nrpe.write() + hostname = nrpe.get_nagios_hostname() + current_unit = nrpe.get_nagios_unit_name() + nrpe_setup = nrpe.NRPE(hostname=hostname) + nrpe.add_init_service_checks(nrpe_setup, services(), current_unit) + nrpe_setup.write() @hooks.hook('memcache-relation-joined', From c7dd3ee9367fad3e5993a156fdcfc08919d8de33 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Mon, 12 Jan 2015 12:40:02 +0000 Subject: [PATCH 34/35] charmhelpers sync to get fix for apache ssl port selection --- hooks/charmhelpers/contrib/openstack/context.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 180bfad2..8ab61bf0 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -663,8 +663,9 @@ class ApacheSSLContext(OSContextGenerator): addresses = self.get_network_addresses() for address, endpoint in sorted(set(addresses)): for api_port in self.external_ports: - ext_port = determine_apache_port(api_port) - int_port = determine_api_port(api_port) + ext_port = determine_apache_port(api_port, + singlenode_mode=True) + int_port = determine_api_port(api_port, singlenode_mode=True) portmap = (address, endpoint, int(ext_port), int(int_port)) ctxt['endpoints'].append(portmap) ctxt['ext_ports'].append(int(ext_port)) From 19f3dbed0002c1f5630cef86793c662bd5c1d418 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 12 Jan 2015 14:23:34 +0000 Subject: [PATCH 35/35] Fix unit tests --- unit_tests/test_nova_cc_hooks.py | 1 + 1 file changed, 1 insertion(+) diff --git a/unit_tests/test_nova_cc_hooks.py b/unit_tests/test_nova_cc_hooks.py index d0ff0e70..0c606c2e 100644 --- a/unit_tests/test_nova_cc_hooks.py +++ b/unit_tests/test_nova_cc_hooks.py @@ -64,6 +64,7 @@ TO_PATCH = [ 'migrate_nova_database', 'migrate_neutron_database', 'uuid', + 'update_nrpe_config', ]