From 7c5831be1cd5a8c69974409a059651965a84bc2b Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Tue, 18 Aug 2015 13:34:34 -0400 Subject: [PATCH 01/16] [corey.bryant,r=trivial] Sync charm-helpers to pick up Liberty support. --- hooks/charmhelpers/cli/__init__.py | 6 +- hooks/charmhelpers/cli/commands.py | 8 +- hooks/charmhelpers/cli/hookenv.py | 23 ++++ .../contrib/openstack/amulet/deployment.py | 4 +- hooks/charmhelpers/contrib/openstack/utils.py | 65 ++++++++--- .../contrib/storage/linux/utils.py | 5 +- hooks/charmhelpers/core/hookenv.py | 21 +--- hooks/charmhelpers/core/host.py | 25 ++++- hooks/charmhelpers/core/services/helpers.py | 20 +++- hooks/charmhelpers/fetch/__init__.py | 8 ++ tests/charmhelpers/contrib/amulet/utils.py | 105 ++++++++++++++---- .../contrib/openstack/amulet/deployment.py | 4 +- 12 files changed, 220 insertions(+), 74 deletions(-) create mode 100644 hooks/charmhelpers/cli/hookenv.py diff --git a/hooks/charmhelpers/cli/__init__.py b/hooks/charmhelpers/cli/__init__.py index 7118daf..16d52cc 100644 --- a/hooks/charmhelpers/cli/__init__.py +++ b/hooks/charmhelpers/cli/__init__.py @@ -152,15 +152,11 @@ class CommandLine(object): arguments = self.argument_parser.parse_args() argspec = inspect.getargspec(arguments.func) vargs = [] - kwargs = {} for arg in argspec.args: vargs.append(getattr(arguments, arg)) if argspec.varargs: vargs.extend(getattr(arguments, argspec.varargs)) - if argspec.keywords: - for kwarg in argspec.keywords.items(): - kwargs[kwarg] = getattr(arguments, kwarg) - output = arguments.func(*vargs, **kwargs) + output = arguments.func(*vargs) if getattr(arguments.func, '_cli_test_command', False): self.exit_code = 0 if output else 1 output = '' diff --git a/hooks/charmhelpers/cli/commands.py b/hooks/charmhelpers/cli/commands.py index 443ff05..7e91db0 100644 --- a/hooks/charmhelpers/cli/commands.py +++ b/hooks/charmhelpers/cli/commands.py @@ -26,7 +26,7 @@ from . import CommandLine # noqa """ Import the sub-modules which have decorated subcommands to register with chlp. """ -import host # noqa -import benchmark # noqa -import unitdata # noqa -from charmhelpers.core import hookenv # noqa +from . import host # noqa +from . import benchmark # noqa +from . import unitdata # noqa +from . import hookenv # noqa diff --git a/hooks/charmhelpers/cli/hookenv.py b/hooks/charmhelpers/cli/hookenv.py new file mode 100644 index 0000000..265c816 --- /dev/null +++ b/hooks/charmhelpers/cli/hookenv.py @@ -0,0 +1,23 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from . import cmdline +from charmhelpers.core import hookenv + + +cmdline.subcommand('relation-id')(hookenv.relation_id._wrapped) +cmdline.subcommand('service-name')(hookenv.service_name) +cmdline.subcommand('remote-service-name')(hookenv.remote_service_name._wrapped) diff --git a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index b01e6cb..07ee2ef 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -44,7 +44,7 @@ class OpenStackAmuletDeployment(AmuletDeployment): Determine if the local branch being tested is derived from its stable or next (dev) branch, and based on this, use the corresonding stable or next branches for the other_services.""" - base_charms = ['mysql', 'mongodb'] + base_charms = ['mysql', 'mongodb', 'nrpe'] if self.series in ['precise', 'trusty']: base_series = self.series @@ -81,7 +81,7 @@ class OpenStackAmuletDeployment(AmuletDeployment): 'ceph-osd', 'ceph-radosgw'] # Most OpenStack subordinate charms do not expose an origin option # as that is controlled by the principle. - ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch'] + ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe'] if self.openstack: for svc in services: diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index 4dd000c..c9fd68f 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -24,6 +24,7 @@ import subprocess import json import os import sys +import re import six import yaml @@ -69,7 +70,6 @@ CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed ' 'restricted main multiverse universe') - UBUNTU_OPENSTACK_RELEASE = OrderedDict([ ('oneiric', 'diablo'), ('precise', 'essex'), @@ -118,6 +118,34 @@ SWIFT_CODENAMES = OrderedDict([ ('2.3.0', 'liberty'), ]) +# >= Liberty version->codename mapping +PACKAGE_CODENAMES = { + 'nova-common': OrderedDict([ + ('12.0.0', 'liberty'), + ]), + 'neutron-common': OrderedDict([ + ('7.0.0', 'liberty'), + ]), + 'cinder-common': OrderedDict([ + ('7.0.0', 'liberty'), + ]), + 'keystone': OrderedDict([ + ('8.0.0', 'liberty'), + ]), + 'horizon-common': OrderedDict([ + ('8.0.0', 'liberty'), + ]), + 'ceilometer-common': OrderedDict([ + ('5.0.0', 'liberty'), + ]), + 'heat-common': OrderedDict([ + ('5.0.0', 'liberty'), + ]), + 'glance-common': OrderedDict([ + ('11.0.0', 'liberty'), + ]), +} + DEFAULT_LOOPBACK_SIZE = '5G' @@ -201,20 +229,29 @@ def get_os_codename_package(package, fatal=True): error_out(e) vers = apt.upstream_version(pkg.current_ver.ver_str) + match = re.match('^(\d)\.(\d)\.(\d)', vers) + if match: + vers = match.group(0) - try: - if 'swift' in pkg.name: - swift_vers = vers[:5] - if swift_vers not in SWIFT_CODENAMES: - # Deal with 1.10.0 upward - swift_vers = vers[:6] - return SWIFT_CODENAMES[swift_vers] - else: - vers = vers[:6] - return OPENSTACK_CODENAMES[vers] - except KeyError: - e = 'Could not determine OpenStack codename for version %s' % vers - error_out(e) + # >= Liberty independent project versions + if (package in PACKAGE_CODENAMES and + vers in PACKAGE_CODENAMES[package]): + return PACKAGE_CODENAMES[package][vers] + else: + # < Liberty co-ordinated project versions + try: + if 'swift' in pkg.name: + swift_vers = vers[:5] + if swift_vers not in SWIFT_CODENAMES: + # Deal with 1.10.0 upward + swift_vers = vers[:6] + return SWIFT_CODENAMES[swift_vers] + else: + vers = vers[:6] + return OPENSTACK_CODENAMES[vers] + except KeyError: + e = 'Could not determine OpenStack codename for version %s' % vers + error_out(e) def get_os_version_package(pkg, fatal=True): diff --git a/hooks/charmhelpers/contrib/storage/linux/utils.py b/hooks/charmhelpers/contrib/storage/linux/utils.py index e2769e4..1e57941 100644 --- a/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -43,9 +43,10 @@ def zap_disk(block_device): :param block_device: str: Full path of block device to clean. ''' + # https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b # sometimes sgdisk exits non-zero; this is OK, dd will clean up - call(['sgdisk', '--zap-all', '--mbrtogpt', - '--clear', block_device]) + call(['sgdisk', '--zap-all', '--', block_device]) + call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device]) dev_end = check_output(['blockdev', '--getsz', block_device]).decode('UTF-8') gpt_end = int(dev_end.split()[0]) - 100 diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py index 18860f5..a35d006 100644 --- a/hooks/charmhelpers/core/hookenv.py +++ b/hooks/charmhelpers/core/hookenv.py @@ -34,23 +34,6 @@ import errno import tempfile from subprocess import CalledProcessError -try: - from charmhelpers.cli import cmdline -except ImportError as e: - # due to the anti-pattern of partially synching charmhelpers directly - # into charms, it's possible that charmhelpers.cli is not available; - # if that's the case, they don't really care about using the cli anyway, - # so mock it out - if str(e) == 'No module named cli': - class cmdline(object): - @classmethod - def subcommand(cls, *args, **kwargs): - def _wrap(func): - return func - return _wrap - else: - raise - import six if not six.PY3: from UserDict import UserDict @@ -91,6 +74,7 @@ def cached(func): res = func(*args, **kwargs) cache[key] = res return res + wrapper._wrapped = func return wrapper @@ -190,7 +174,6 @@ def relation_type(): return os.environ.get('JUJU_RELATION', None) -@cmdline.subcommand() @cached def relation_id(relation_name=None, service_or_unit=None): """The relation ID for the current or a specified relation""" @@ -216,13 +199,11 @@ def remote_unit(): return os.environ.get('JUJU_REMOTE_UNIT', None) -@cmdline.subcommand() def service_name(): """The name service group this unit belongs to""" return local_unit().split('/')[0] -@cmdline.subcommand() @cached def remote_service_name(relid=None): """The remote service name for a given relation-id (or the current relation)""" diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py index 8ae8ef8..ec659ee 100644 --- a/hooks/charmhelpers/core/host.py +++ b/hooks/charmhelpers/core/host.py @@ -72,7 +72,7 @@ def service_pause(service_name, init_dir=None): stopped = service_stop(service_name) # XXX: Support systemd too override_path = os.path.join( - init_dir, '{}.conf.override'.format(service_name)) + init_dir, '{}.override'.format(service_name)) with open(override_path, 'w') as fh: fh.write("manual\n") return stopped @@ -86,7 +86,7 @@ def service_resume(service_name, init_dir=None): if init_dir is None: init_dir = "/etc/init" override_path = os.path.join( - init_dir, '{}.conf.override'.format(service_name)) + init_dir, '{}.override'.format(service_name)) if os.path.exists(override_path): os.unlink(override_path) started = service_start(service_name) @@ -148,6 +148,16 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False): return user_info +def user_exists(username): + """Check if a user exists""" + try: + pwd.getpwnam(username) + user_exists = True + except KeyError: + user_exists = False + return user_exists + + def add_group(group_name, system_group=False): """Add a group to the system""" try: @@ -280,6 +290,17 @@ def mounts(): return system_mounts +def fstab_mount(mountpoint): + """Mount filesystem using fstab""" + cmd_args = ['mount', mountpoint] + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError as e: + log('Error unmounting {}\n{}'.format(mountpoint, e.output)) + return False + return True + + def file_hash(path, hash_type='md5'): """ Generate a hash checksum of the contents of 'path' or None if not found. diff --git a/hooks/charmhelpers/core/services/helpers.py b/hooks/charmhelpers/core/services/helpers.py index 8005c41..3f67783 100644 --- a/hooks/charmhelpers/core/services/helpers.py +++ b/hooks/charmhelpers/core/services/helpers.py @@ -16,7 +16,9 @@ import os import yaml + from charmhelpers.core import hookenv +from charmhelpers.core import host from charmhelpers.core import templating from charmhelpers.core.services.base import ManagerCallback @@ -240,27 +242,41 @@ class TemplateCallback(ManagerCallback): :param str source: The template source file, relative to `$CHARM_DIR/templates` + :param str target: The target to write the rendered template to :param str owner: The owner of the rendered file :param str group: The group of the rendered file :param int perms: The permissions of the rendered file - + :param partial on_change_action: functools partial to be executed when + rendered file changes """ def __init__(self, source, target, - owner='root', group='root', perms=0o444): + owner='root', group='root', perms=0o444, + on_change_action=None): self.source = source self.target = target self.owner = owner self.group = group self.perms = perms + self.on_change_action = on_change_action def __call__(self, manager, service_name, event_name): + pre_checksum = '' + if self.on_change_action and os.path.isfile(self.target): + pre_checksum = host.file_hash(self.target) service = manager.get_service(service_name) context = {} for ctx in service.get('required_data', []): context.update(ctx) templating.render(self.source, self.target, context, self.owner, self.group, self.perms) + if self.on_change_action: + if pre_checksum == host.file_hash(self.target): + hookenv.log( + 'No change detected: {}'.format(self.target), + hookenv.DEBUG) + else: + self.on_change_action() # Convenience aliases for templates diff --git a/hooks/charmhelpers/fetch/__init__.py b/hooks/charmhelpers/fetch/__init__.py index 0a3bb96..cd0b783 100644 --- a/hooks/charmhelpers/fetch/__init__.py +++ b/hooks/charmhelpers/fetch/__init__.py @@ -90,6 +90,14 @@ CLOUD_ARCHIVE_POCKETS = { 'kilo/proposed': 'trusty-proposed/kilo', 'trusty-kilo/proposed': 'trusty-proposed/kilo', 'trusty-proposed/kilo': 'trusty-proposed/kilo', + # Liberty + 'liberty': 'trusty-updates/liberty', + 'trusty-liberty': 'trusty-updates/liberty', + 'trusty-liberty/updates': 'trusty-updates/liberty', + 'trusty-updates/liberty': 'trusty-updates/liberty', + 'liberty/proposed': 'trusty-proposed/liberty', + 'trusty-liberty/proposed': 'trusty-proposed/liberty', + 'trusty-proposed/liberty': 'trusty-proposed/liberty', } # The order of this list is very important. Handlers should be listed in from diff --git a/tests/charmhelpers/contrib/amulet/utils.py b/tests/charmhelpers/contrib/amulet/utils.py index 3de26af..7816c93 100644 --- a/tests/charmhelpers/contrib/amulet/utils.py +++ b/tests/charmhelpers/contrib/amulet/utils.py @@ -14,17 +14,23 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . -import amulet -import ConfigParser -import distro_info import io +import json import logging import os import re -import six +import subprocess import sys import time -import urlparse + +import amulet +import distro_info +import six +from six.moves import configparser +if six.PY3: + from urllib import parse as urlparse +else: + import urlparse class AmuletUtils(object): @@ -142,19 +148,23 @@ class AmuletUtils(object): for service_name in services_list: if (self.ubuntu_releases.index(release) >= systemd_switch or - service_name == "rabbitmq-server"): - # init is systemd + service_name in ['rabbitmq-server', 'apache2']): + # init is systemd (or regular sysv) cmd = 'sudo service {} status'.format(service_name) + output, code = sentry_unit.run(cmd) + service_running = code == 0 elif self.ubuntu_releases.index(release) < systemd_switch: # init is upstart cmd = 'sudo status {}'.format(service_name) + output, code = sentry_unit.run(cmd) + service_running = code == 0 and "start/running" in output - output, code = sentry_unit.run(cmd) self.log.debug('{} `{}` returned ' '{}'.format(sentry_unit.info['unit_name'], cmd, code)) - if code != 0: - return "command `{}` returned {}".format(cmd, str(code)) + if not service_running: + return u"command `{}` returned {} {}".format( + cmd, output, str(code)) return None def _get_config(self, unit, filename): @@ -164,7 +174,7 @@ class AmuletUtils(object): # NOTE(beisner): by default, ConfigParser does not handle options # with no value, such as the flags used in the mysql my.cnf file. # https://bugs.python.org/issue7005 - config = ConfigParser.ConfigParser(allow_no_value=True) + config = configparser.ConfigParser(allow_no_value=True) config.readfp(io.StringIO(file_contents)) return config @@ -450,15 +460,20 @@ class AmuletUtils(object): cmd, code, output)) return None - def get_process_id_list(self, sentry_unit, process_name): + def get_process_id_list(self, sentry_unit, process_name, + expect_success=True): """Get a list of process ID(s) from a single sentry juju unit for a single process name. - :param sentry_unit: Pointer to amulet sentry instance (juju unit) + :param sentry_unit: Amulet sentry instance (juju unit) :param process_name: Process name + :param expect_success: If False, expect the PID to be missing, + raise if it is present. :returns: List of process IDs """ - cmd = 'pidof {}'.format(process_name) + cmd = 'pidof -x {}'.format(process_name) + if not expect_success: + cmd += " || exit 0 && exit 1" output, code = sentry_unit.run(cmd) if code != 0: msg = ('{} `{}` returned {} ' @@ -467,14 +482,23 @@ class AmuletUtils(object): amulet.raise_status(amulet.FAIL, msg=msg) return str(output).split() - def get_unit_process_ids(self, unit_processes): + def get_unit_process_ids(self, unit_processes, expect_success=True): """Construct a dict containing unit sentries, process names, and - process IDs.""" + process IDs. + + :param unit_processes: A dictionary of Amulet sentry instance + to list of process names. + :param expect_success: if False expect the processes to not be + running, raise if they are. + :returns: Dictionary of Amulet sentry instance to dictionary + of process names to PIDs. + """ pid_dict = {} - for sentry_unit, process_list in unit_processes.iteritems(): + for sentry_unit, process_list in six.iteritems(unit_processes): pid_dict[sentry_unit] = {} for process in process_list: - pids = self.get_process_id_list(sentry_unit, process) + pids = self.get_process_id_list( + sentry_unit, process, expect_success=expect_success) pid_dict[sentry_unit].update({process: pids}) return pid_dict @@ -488,7 +512,7 @@ class AmuletUtils(object): return ('Unit count mismatch. expected, actual: {}, ' '{} '.format(len(expected), len(actual))) - for (e_sentry, e_proc_names) in expected.iteritems(): + for (e_sentry, e_proc_names) in six.iteritems(expected): e_sentry_name = e_sentry.info['unit_name'] if e_sentry in actual.keys(): a_proc_names = actual[e_sentry] @@ -507,11 +531,23 @@ class AmuletUtils(object): '{}'.format(e_proc_name, a_proc_name)) a_pids_length = len(a_pids) - if e_pids_length != a_pids_length: - return ('PID count mismatch. {} ({}) expected, actual: ' + fail_msg = ('PID count mismatch. {} ({}) expected, actual: ' '{}, {} ({})'.format(e_sentry_name, e_proc_name, e_pids_length, a_pids_length, a_pids)) + + # If expected is not bool, ensure PID quantities match + if not isinstance(e_pids_length, bool) and \ + a_pids_length != e_pids_length: + return fail_msg + # If expected is bool True, ensure 1 or more PIDs exist + elif isinstance(e_pids_length, bool) and \ + e_pids_length is True and a_pids_length < 1: + return fail_msg + # If expected is bool False, ensure 0 PIDs exist + elif isinstance(e_pids_length, bool) and \ + e_pids_length is False and a_pids_length != 0: + return fail_msg else: self.log.debug('PID check OK: {} {} {}: ' '{}'.format(e_sentry_name, e_proc_name, @@ -531,3 +567,30 @@ class AmuletUtils(object): return 'Dicts within list are not identical' return None + + def run_action(self, unit_sentry, action, + _check_output=subprocess.check_output): + """Run the named action on a given unit sentry. + + _check_output parameter is used for dependency injection. + + @return action_id. + """ + unit_id = unit_sentry.info["unit_name"] + command = ["juju", "action", "do", "--format=json", unit_id, action] + self.log.info("Running command: %s\n" % " ".join(command)) + output = _check_output(command, universal_newlines=True) + data = json.loads(output) + action_id = data[u'Action queued with id'] + return action_id + + def wait_on_action(self, action_id, _check_output=subprocess.check_output): + """Wait for a given action, returning if it completed or not. + + _check_output parameter is used for dependency injection. + """ + command = ["juju", "action", "fetch", "--format=json", "--wait=0", + action_id] + output = _check_output(command, universal_newlines=True) + data = json.loads(output) + return data.get(u"status") == "completed" diff --git a/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/tests/charmhelpers/contrib/openstack/amulet/deployment.py index b01e6cb..07ee2ef 100644 --- a/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -44,7 +44,7 @@ class OpenStackAmuletDeployment(AmuletDeployment): Determine if the local branch being tested is derived from its stable or next (dev) branch, and based on this, use the corresonding stable or next branches for the other_services.""" - base_charms = ['mysql', 'mongodb'] + base_charms = ['mysql', 'mongodb', 'nrpe'] if self.series in ['precise', 'trusty']: base_series = self.series @@ -81,7 +81,7 @@ class OpenStackAmuletDeployment(AmuletDeployment): 'ceph-osd', 'ceph-radosgw'] # Most OpenStack subordinate charms do not expose an origin option # as that is controlled by the principle. - ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch'] + ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe'] if self.openstack: for svc in services: From 0b0d832c33469b89f995a43fb05c157d7081797e Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 19 Aug 2015 14:53:16 +0100 Subject: [PATCH 02/16] [gnuoy,trivial] Charmhelper sync (+1'd by mojo) --- .../charmhelpers/contrib/openstack/context.py | 27 +++++-- .../charmhelpers/contrib/openstack/neutron.py | 43 +++++++---- hooks/charmhelpers/core/host.py | 77 ++++++++++++++++--- hooks/charmhelpers/core/hugepage.py | 62 +++++++++++++++ 4 files changed, 178 insertions(+), 31 deletions(-) create mode 100644 hooks/charmhelpers/core/hugepage.py diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index ab2ebac..9a33a03 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -50,6 +50,8 @@ from charmhelpers.core.sysctl import create as sysctl_create from charmhelpers.core.strutils import bool_from_string from charmhelpers.core.host import ( + get_bond_master, + is_phy_iface, list_nics, get_nic_hwaddr, mkdir, @@ -923,7 +925,6 @@ class NeutronContext(OSContextGenerator): class NeutronPortContext(OSContextGenerator): - NIC_PREFIXES = ['eth', 'bond'] def resolve_ports(self, ports): """Resolve NICs not yet bound to bridge(s) @@ -935,7 +936,18 @@ class NeutronPortContext(OSContextGenerator): hwaddr_to_nic = {} hwaddr_to_ip = {} - for nic in list_nics(self.NIC_PREFIXES): + for nic in list_nics(): + # Ignore virtual interfaces (bond masters will be identified from + # their slaves) + if not is_phy_iface(nic): + continue + + _nic = get_bond_master(nic) + if _nic: + log("Replacing iface '%s' with bond master '%s'" % (nic, _nic), + level=DEBUG) + nic = _nic + hwaddr = get_nic_hwaddr(nic) hwaddr_to_nic[hwaddr] = nic addresses = get_ipv4_addr(nic, fatal=False) @@ -961,7 +973,8 @@ class NeutronPortContext(OSContextGenerator): # trust it to be the real external network). resolved.append(entry) - return resolved + # Ensure no duplicates + return list(set(resolved)) class OSConfigFlagContext(OSContextGenerator): @@ -1280,15 +1293,19 @@ class DataPortContext(NeutronPortContext): def __call__(self): ports = config('data-port') if ports: + # Map of {port/mac:bridge} portmap = parse_data_port_mappings(ports) - ports = portmap.values() + ports = portmap.keys() + # Resolve provided ports or mac addresses and filter out those + # already attached to a bridge. resolved = self.resolve_ports(ports) + # FIXME: is this necessary? normalized = {get_nic_hwaddr(port): port for port in resolved if port not in ports} normalized.update({port: port for port in resolved if port in ports}) if resolved: - return {bridge: normalized[port] for bridge, port in + return {bridge: normalized[port] for port, bridge in six.iteritems(portmap) if port in normalized.keys()} return None diff --git a/hooks/charmhelpers/contrib/openstack/neutron.py b/hooks/charmhelpers/contrib/openstack/neutron.py index f7b7235..c3d5c28 100644 --- a/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/hooks/charmhelpers/contrib/openstack/neutron.py @@ -255,17 +255,30 @@ def network_manager(): return 'neutron' -def parse_mappings(mappings): +def parse_mappings(mappings, key_rvalue=False): + """By default mappings are lvalue keyed. + + If key_rvalue is True, the mapping will be reversed to allow multiple + configs for the same lvalue. + """ parsed = {} if mappings: mappings = mappings.split() for m in mappings: p = m.partition(':') - key = p[0].strip() - if p[1]: - parsed[key] = p[2].strip() + + if key_rvalue: + key_index = 2 + val_index = 0 + # if there is no rvalue skip to next + if not p[1]: + continue else: - parsed[key] = '' + key_index = 0 + val_index = 2 + + key = p[key_index].strip() + parsed[key] = p[val_index].strip() return parsed @@ -283,25 +296,25 @@ def parse_bridge_mappings(mappings): def parse_data_port_mappings(mappings, default_bridge='br-data'): """Parse data port mappings. - Mappings must be a space-delimited list of bridge:port mappings. + Mappings must be a space-delimited list of port:bridge mappings. - Returns dict of the form {bridge:port}. + Returns dict of the form {port:bridge} where port may be an mac address or + interface name. """ - _mappings = parse_mappings(mappings) + + # NOTE(dosaboy): we use rvalue for key to allow multiple values to be + # proposed for since it may be a mac address which will differ + # across units this allowing first-known-good to be chosen. + _mappings = parse_mappings(mappings, key_rvalue=True) if not _mappings or list(_mappings.values()) == ['']: if not mappings: return {} # For backwards-compatibility we need to support port-only provided in # config. - _mappings = {default_bridge: mappings.split()[0]} - - bridges = _mappings.keys() - ports = _mappings.values() - if len(set(bridges)) != len(bridges): - raise Exception("It is not allowed to have more than one port " - "configured on the same bridge") + _mappings = {mappings.split()[0]: default_bridge} + ports = _mappings.keys() if len(set(ports)) != len(ports): raise Exception("It is not allowed to have the same port configured " "on more than one bridge") diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py index ec659ee..29e8fee 100644 --- a/hooks/charmhelpers/core/host.py +++ b/hooks/charmhelpers/core/host.py @@ -417,25 +417,80 @@ def pwgen(length=None): return(''.join(random_chars)) -def list_nics(nic_type): +def is_phy_iface(interface): + """Returns True if interface is not virtual, otherwise False.""" + if interface: + sys_net = '/sys/class/net' + if os.path.isdir(sys_net): + for iface in glob.glob(os.path.join(sys_net, '*')): + if '/virtual/' in os.path.realpath(iface): + continue + + if interface == os.path.basename(iface): + return True + + return False + + +def get_bond_master(interface): + """Returns bond master if interface is bond slave otherwise None. + + NOTE: the provided interface is expected to be physical + """ + if interface: + iface_path = '/sys/class/net/%s' % (interface) + if os.path.exists(iface_path): + if '/virtual/' in os.path.realpath(iface_path): + return None + + master = os.path.join(iface_path, 'master') + if os.path.exists(master): + master = os.path.realpath(master) + # make sure it is a bond master + if os.path.exists(os.path.join(master, 'bonding')): + return os.path.basename(master) + + return None + + +def list_nics(nic_type=None): '''Return a list of nics of given type(s)''' if isinstance(nic_type, six.string_types): int_types = [nic_type] else: int_types = nic_type + interfaces = [] - for int_type in int_types: - cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] + if nic_type: + for int_type in int_types: + cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] + ip_output = subprocess.check_output(cmd).decode('UTF-8') + ip_output = ip_output.split('\n') + ip_output = (line for line in ip_output if line) + for line in ip_output: + if line.split()[1].startswith(int_type): + matched = re.search('.*: (' + int_type + + r'[0-9]+\.[0-9]+)@.*', line) + if matched: + iface = matched.groups()[0] + else: + iface = line.split()[1].replace(":", "") + + if iface not in interfaces: + interfaces.append(iface) + else: + cmd = ['ip', 'a'] ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') - ip_output = (line for line in ip_output if line) + ip_output = (line.strip() for line in ip_output if line) + + key = re.compile('^[0-9]+:\s+(.+):') for line in ip_output: - if line.split()[1].startswith(int_type): - matched = re.search('.*: (' + int_type + r'[0-9]+\.[0-9]+)@.*', line) - if matched: - interface = matched.groups()[0] - else: - interface = line.split()[1].replace(":", "") - interfaces.append(interface) + matched = re.search(key, line) + if matched: + iface = matched.group(1) + iface = iface.partition("@")[0] + if iface not in interfaces: + interfaces.append(iface) return interfaces diff --git a/hooks/charmhelpers/core/hugepage.py b/hooks/charmhelpers/core/hugepage.py new file mode 100644 index 0000000..ba4340f --- /dev/null +++ b/hooks/charmhelpers/core/hugepage.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import yaml +from charmhelpers.core import fstab +from charmhelpers.core import sysctl +from charmhelpers.core.host import ( + add_group, + add_user_to_group, + fstab_mount, + mkdir, +) + + +def hugepage_support(user, group='hugetlb', nr_hugepages=256, + max_map_count=65536, mnt_point='/run/hugepages/kvm', + pagesize='2MB', mount=True): + """Enable hugepages on system. + + Args: + user (str) -- Username to allow access to hugepages to + group (str) -- Group name to own hugepages + nr_hugepages (int) -- Number of pages to reserve + max_map_count (int) -- Number of Virtual Memory Areas a process can own + mnt_point (str) -- Directory to mount hugepages on + pagesize (str) -- Size of hugepages + mount (bool) -- Whether to Mount hugepages + """ + group_info = add_group(group) + gid = group_info.gr_gid + add_user_to_group(user, group) + sysctl_settings = { + 'vm.nr_hugepages': nr_hugepages, + 'vm.max_map_count': max_map_count, + 'vm.hugetlb_shm_group': gid, + } + sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf') + mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False) + lfstab = fstab.Fstab() + fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point) + if fstab_entry: + lfstab.remove_entry(fstab_entry) + entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs', + 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0) + lfstab.add_entry(entry) + if mount: + fstab_mount(mnt_point) From e908a0dccea0e2fd298d2f4bcf23b50b7d82cfd2 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 3 Sep 2015 10:08:40 +0100 Subject: [PATCH 03/16] Resync helpers, use heat-common for package version detection --- hooks/charmhelpers/contrib/network/ip.py | 6 +++++- .../charmhelpers/contrib/openstack/context.py | 14 ++++++++++++++ .../charmhelpers/contrib/openstack/neutron.py | 14 ++++++++++++++ hooks/charmhelpers/contrib/openstack/utils.py | 16 ++++++++++------ hooks/charmhelpers/core/hookenv.py | 18 ++++++++++-------- hooks/heat_relations.py | 2 +- hooks/heat_utils.py | 2 +- 7 files changed, 55 insertions(+), 17 deletions(-) diff --git a/hooks/charmhelpers/contrib/network/ip.py b/hooks/charmhelpers/contrib/network/ip.py index fff6d5c..67b4dcc 100644 --- a/hooks/charmhelpers/contrib/network/ip.py +++ b/hooks/charmhelpers/contrib/network/ip.py @@ -435,8 +435,12 @@ def get_hostname(address, fqdn=True): rev = dns.reversename.from_address(address) result = ns_query(rev) + if not result: - return None + try: + result = socket.gethostbyaddr(address)[0] + except: + return None else: result = address diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 9a33a03..9ae4582 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -895,6 +895,18 @@ class NeutronContext(OSContextGenerator): 'neutron_url': '%s://%s:%s' % (proto, host, '9696')} return ctxt + def pg_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + ovs_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'plumgrid', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} + return ovs_ctxt + def __call__(self): if self.network_manager not in ['quantum', 'neutron']: return {} @@ -914,6 +926,8 @@ class NeutronContext(OSContextGenerator): ctxt.update(self.calico_ctxt()) elif self.plugin == 'vsp': ctxt.update(self.nuage_ctxt()) + elif self.plugin == 'plumgrid': + ctxt.update(self.pg_ctxt()) alchemy_flags = config('neutron-alchemy-flags') if alchemy_flags: diff --git a/hooks/charmhelpers/contrib/openstack/neutron.py b/hooks/charmhelpers/contrib/openstack/neutron.py index c3d5c28..55b2037 100644 --- a/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/hooks/charmhelpers/contrib/openstack/neutron.py @@ -195,6 +195,20 @@ def neutron_plugins(): 'packages': [], 'server_packages': ['neutron-server', 'neutron-plugin-nuage'], 'server_services': ['neutron-server'] + }, + 'plumgrid': { + 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini', + 'driver': 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2', + 'contexts': [ + context.SharedDBContext(user=config('database-user'), + database=config('database'), + ssl_dir=NEUTRON_CONF_DIR)], + 'services': [], + 'packages': [['plumgrid-lxc'], + ['iovisor-dkms']], + 'server_packages': ['neutron-server', + 'neutron-plugin-plumgrid'], + 'server_services': ['neutron-server'] } } if release >= 'icehouse': diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index c9fd68f..c98c5c9 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -1,5 +1,3 @@ -#!/usr/bin/python - # Copyright 2014-2015 Canonical Limited. # # This file is part of charm-helpers. @@ -195,9 +193,9 @@ def get_os_codename_version(vers): error_out(e) -def get_os_version_codename(codename): +def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES): '''Determine OpenStack version number from codename.''' - for k, v in six.iteritems(OPENSTACK_CODENAMES): + for k, v in six.iteritems(version_map): if v == codename: return k e = 'Could not derive OpenStack version for '\ @@ -229,7 +227,7 @@ def get_os_codename_package(package, fatal=True): error_out(e) vers = apt.upstream_version(pkg.current_ver.ver_str) - match = re.match('^(\d)\.(\d)\.(\d)', vers) + match = re.match('^(\d+)\.(\d+)\.(\d+)', vers) if match: vers = match.group(0) @@ -250,6 +248,8 @@ def get_os_codename_package(package, fatal=True): vers = vers[:6] return OPENSTACK_CODENAMES[vers] except KeyError: + if not fatal: + return None e = 'Could not determine OpenStack codename for version %s' % vers error_out(e) @@ -429,7 +429,11 @@ def openstack_upgrade_available(package): import apt_pkg as apt src = config('openstack-origin') cur_vers = get_os_version_package(package) - available_vers = get_os_version_install_source(src) + if "swift" in package: + codename = get_os_codename_install_source(src) + available_vers = get_os_version_codename(codename, SWIFT_CODENAMES) + else: + available_vers = get_os_version_install_source(src) apt.init() return apt.version_compare(available_vers, cur_vers) == 1 diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py index a35d006..ab53a78 100644 --- a/hooks/charmhelpers/core/hookenv.py +++ b/hooks/charmhelpers/core/hookenv.py @@ -767,21 +767,23 @@ def status_set(workload_state, message): def status_get(): - """Retrieve the previously set juju workload state + """Retrieve the previously set juju workload state and message + + If the status-get command is not found then assume this is juju < 1.23 and + return 'unknown', "" - If the status-set command is not found then assume this is juju < 1.23 and - return 'unknown' """ - cmd = ['status-get'] + cmd = ['status-get', "--format=json", "--include-data"] try: - raw_status = subprocess.check_output(cmd, universal_newlines=True) - status = raw_status.rstrip() - return status + raw_status = subprocess.check_output(cmd) except OSError as e: if e.errno == errno.ENOENT: - return 'unknown' + return ('unknown', "") else: raise + else: + status = json.loads(raw_status.decode("UTF-8")) + return (status["status"], status["message"]) def translate_exc(from_exc, to_exc): diff --git a/hooks/heat_relations.py b/hooks/heat_relations.py index 66828cd..d2ad10d 100755 --- a/hooks/heat_relations.py +++ b/hooks/heat_relations.py @@ -87,7 +87,7 @@ def install(): @hooks.hook('config-changed') @restart_on_change(restart_map()) def config_changed(): - if openstack_upgrade_available('heat-engine'): + if openstack_upgrade_available('heat-common'): do_openstack_upgrade(CONFIGS) CONFIGS.write_all() configure_https() diff --git a/hooks/heat_utils.py b/hooks/heat_utils.py index 651d936..804c996 100644 --- a/hooks/heat_utils.py +++ b/hooks/heat_utils.py @@ -91,7 +91,7 @@ CONFIG_FILES = OrderedDict([ def register_configs(): - release = os_release('heat-engine') + release = os_release('heat-common') configs = templating.OSConfigRenderer(templates_dir=TEMPLATES, openstack_release=release) From 5c90772c74b0542fdb985ea3501ac3bacd8d94df Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 3 Sep 2015 10:45:18 +0100 Subject: [PATCH 04/16] [gnuoy,trivial] Charmhelper sync (+1'd by mojo) --- hooks/charmhelpers/contrib/network/ip.py | 6 +++++- .../charmhelpers/contrib/openstack/context.py | 14 ++++++++++++++ .../charmhelpers/contrib/openstack/neutron.py | 14 ++++++++++++++ hooks/charmhelpers/contrib/openstack/utils.py | 16 ++++++++++------ hooks/charmhelpers/core/hookenv.py | 18 ++++++++++-------- 5 files changed, 53 insertions(+), 15 deletions(-) diff --git a/hooks/charmhelpers/contrib/network/ip.py b/hooks/charmhelpers/contrib/network/ip.py index fff6d5c..67b4dcc 100644 --- a/hooks/charmhelpers/contrib/network/ip.py +++ b/hooks/charmhelpers/contrib/network/ip.py @@ -435,8 +435,12 @@ def get_hostname(address, fqdn=True): rev = dns.reversename.from_address(address) result = ns_query(rev) + if not result: - return None + try: + result = socket.gethostbyaddr(address)[0] + except: + return None else: result = address diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 9a33a03..9ae4582 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -895,6 +895,18 @@ class NeutronContext(OSContextGenerator): 'neutron_url': '%s://%s:%s' % (proto, host, '9696')} return ctxt + def pg_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + ovs_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'plumgrid', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} + return ovs_ctxt + def __call__(self): if self.network_manager not in ['quantum', 'neutron']: return {} @@ -914,6 +926,8 @@ class NeutronContext(OSContextGenerator): ctxt.update(self.calico_ctxt()) elif self.plugin == 'vsp': ctxt.update(self.nuage_ctxt()) + elif self.plugin == 'plumgrid': + ctxt.update(self.pg_ctxt()) alchemy_flags = config('neutron-alchemy-flags') if alchemy_flags: diff --git a/hooks/charmhelpers/contrib/openstack/neutron.py b/hooks/charmhelpers/contrib/openstack/neutron.py index c3d5c28..55b2037 100644 --- a/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/hooks/charmhelpers/contrib/openstack/neutron.py @@ -195,6 +195,20 @@ def neutron_plugins(): 'packages': [], 'server_packages': ['neutron-server', 'neutron-plugin-nuage'], 'server_services': ['neutron-server'] + }, + 'plumgrid': { + 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini', + 'driver': 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2', + 'contexts': [ + context.SharedDBContext(user=config('database-user'), + database=config('database'), + ssl_dir=NEUTRON_CONF_DIR)], + 'services': [], + 'packages': [['plumgrid-lxc'], + ['iovisor-dkms']], + 'server_packages': ['neutron-server', + 'neutron-plugin-plumgrid'], + 'server_services': ['neutron-server'] } } if release >= 'icehouse': diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index c9fd68f..c98c5c9 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -1,5 +1,3 @@ -#!/usr/bin/python - # Copyright 2014-2015 Canonical Limited. # # This file is part of charm-helpers. @@ -195,9 +193,9 @@ def get_os_codename_version(vers): error_out(e) -def get_os_version_codename(codename): +def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES): '''Determine OpenStack version number from codename.''' - for k, v in six.iteritems(OPENSTACK_CODENAMES): + for k, v in six.iteritems(version_map): if v == codename: return k e = 'Could not derive OpenStack version for '\ @@ -229,7 +227,7 @@ def get_os_codename_package(package, fatal=True): error_out(e) vers = apt.upstream_version(pkg.current_ver.ver_str) - match = re.match('^(\d)\.(\d)\.(\d)', vers) + match = re.match('^(\d+)\.(\d+)\.(\d+)', vers) if match: vers = match.group(0) @@ -250,6 +248,8 @@ def get_os_codename_package(package, fatal=True): vers = vers[:6] return OPENSTACK_CODENAMES[vers] except KeyError: + if not fatal: + return None e = 'Could not determine OpenStack codename for version %s' % vers error_out(e) @@ -429,7 +429,11 @@ def openstack_upgrade_available(package): import apt_pkg as apt src = config('openstack-origin') cur_vers = get_os_version_package(package) - available_vers = get_os_version_install_source(src) + if "swift" in package: + codename = get_os_codename_install_source(src) + available_vers = get_os_version_codename(codename, SWIFT_CODENAMES) + else: + available_vers = get_os_version_install_source(src) apt.init() return apt.version_compare(available_vers, cur_vers) == 1 diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py index a35d006..ab53a78 100644 --- a/hooks/charmhelpers/core/hookenv.py +++ b/hooks/charmhelpers/core/hookenv.py @@ -767,21 +767,23 @@ def status_set(workload_state, message): def status_get(): - """Retrieve the previously set juju workload state + """Retrieve the previously set juju workload state and message + + If the status-get command is not found then assume this is juju < 1.23 and + return 'unknown', "" - If the status-set command is not found then assume this is juju < 1.23 and - return 'unknown' """ - cmd = ['status-get'] + cmd = ['status-get', "--format=json", "--include-data"] try: - raw_status = subprocess.check_output(cmd, universal_newlines=True) - status = raw_status.rstrip() - return status + raw_status = subprocess.check_output(cmd) except OSError as e: if e.errno == errno.ENOENT: - return 'unknown' + return ('unknown', "") else: raise + else: + status = json.loads(raw_status.decode("UTF-8")) + return (status["status"], status["message"]) def translate_exc(from_exc, to_exc): From c10efd0a91c9fe0c9a5731f7021d82221e1f454e Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Mon, 21 Sep 2015 19:35:41 +0000 Subject: [PATCH 05/16] Action-managed openstack upgrade support --- actions.yaml | 4 ++ actions/openstack-upgrade | 1 + actions/openstack_upgrade.py | 38 ++++++++++++++ config.yaml | 10 ++++ hooks/heat_relations.py | 5 +- unit_tests/__init__.py | 1 + unit_tests/test_actions_openstack_upgrade.py | 54 ++++++++++++++++++++ unit_tests/test_heat_relations.py | 10 ++++ 8 files changed, 121 insertions(+), 2 deletions(-) create mode 100644 actions.yaml create mode 120000 actions/openstack-upgrade create mode 100755 actions/openstack_upgrade.py create mode 100644 unit_tests/test_actions_openstack_upgrade.py diff --git a/actions.yaml b/actions.yaml new file mode 100644 index 0000000..9ece090 --- /dev/null +++ b/actions.yaml @@ -0,0 +1,4 @@ +openstack-upgrade: + description: + Perform openstack upgrades. Config option action-managed-upgrade must be + set to True. diff --git a/actions/openstack-upgrade b/actions/openstack-upgrade new file mode 120000 index 0000000..6179301 --- /dev/null +++ b/actions/openstack-upgrade @@ -0,0 +1 @@ +openstack_upgrade.py \ No newline at end of file diff --git a/actions/openstack_upgrade.py b/actions/openstack_upgrade.py new file mode 100755 index 0000000..6c2fc9b --- /dev/null +++ b/actions/openstack_upgrade.py @@ -0,0 +1,38 @@ +#!/usr/bin/python +import sys + +sys.path.append('hooks/') + +from charmhelpers.contrib.openstack.utils import ( + do_action_openstack_upgrade, +) + +from heat_relations import config_changed + +from heat_utils import ( + do_openstack_upgrade, + register_configs +) + + +CONFIGS = register_configs() + + +def openstack_upgrade(): + """Perform action-managed OpenStack upgrade. + + Upgrades packages to the configured openstack-origin version and sets + the corresponding action status as a result. + + If the charm was installed from source we cannot upgrade it. + For backwards compatibility a config flag (action-managed-upgrade) must + be set for this code to run, otherwise a full service level upgrade will + fire on config-changed.""" + + if (do_action_openstack_upgrade('heat-common', + do_openstack_upgrade, + CONFIGS)): + config_changed() + +if __name__ == '__main__': + openstack_upgrade() diff --git a/config.yaml b/config.yaml index d7dcece..c40f29a 100644 --- a/config.yaml +++ b/config.yaml @@ -85,3 +85,13 @@ options: create the following public endpoints for ceilometer: . https://ceilometer.example.com:8777/ + action-managed-upgrade: + type: boolean + default: False + description: | + If True enables openstack upgrades for this charm via juju actions. + You will still need to set openstack-origin to the new repository but + instead of an upgrade running automatically across all units, it will + wait for you to execute the openstack-upgrade action for this charm on + each unit. If False it will revert to existing behavior of upgrading + all units on config change. diff --git a/hooks/heat_relations.py b/hooks/heat_relations.py index d2ad10d..0fdbdf9 100755 --- a/hooks/heat_relations.py +++ b/hooks/heat_relations.py @@ -87,8 +87,9 @@ def install(): @hooks.hook('config-changed') @restart_on_change(restart_map()) def config_changed(): - if openstack_upgrade_available('heat-common'): - do_openstack_upgrade(CONFIGS) + if not config('action-managed-upgrade'): + if openstack_upgrade_available('heat-common'): + do_openstack_upgrade(CONFIGS) CONFIGS.write_all() configure_https() diff --git a/unit_tests/__init__.py b/unit_tests/__init__.py index 415b211..d6289f0 100644 --- a/unit_tests/__init__.py +++ b/unit_tests/__init__.py @@ -1,2 +1,3 @@ import sys +sys.path.append('actions/') sys.path.append('hooks/') diff --git a/unit_tests/test_actions_openstack_upgrade.py b/unit_tests/test_actions_openstack_upgrade.py new file mode 100644 index 0000000..c516bc6 --- /dev/null +++ b/unit_tests/test_actions_openstack_upgrade.py @@ -0,0 +1,54 @@ +from mock import patch +import os + +os.environ['JUJU_UNIT_NAME'] = 'heat' + +with patch('heat_utils.register_configs') as register_configs: + import openstack_upgrade + +from test_utils import ( + CharmTestCase +) + +TO_PATCH = [ + 'config_changed', + 'do_openstack_upgrade', + 'register_configs', +] + + +class TestHeatUpgradeActions(CharmTestCase): + + def setUp(self): + super(TestHeatUpgradeActions, self).setUp(openstack_upgrade, + TO_PATCH) + + @patch('charmhelpers.contrib.openstack.utils.config') + @patch('charmhelpers.contrib.openstack.utils.action_set') + @patch('charmhelpers.contrib.openstack.utils.git_install_requested') + @patch('charmhelpers.contrib.openstack.utils.openstack_upgrade_available') + def test_openstack_upgrade_true(self, upgrade_avail, git_requested, + action_set, config): + git_requested.return_value = False + upgrade_avail.return_value = True + config.return_value = True + + openstack_upgrade.openstack_upgrade() + + self.assertTrue(self.do_openstack_upgrade.called) + self.assertTrue(self.config_changed.called) + + @patch('charmhelpers.contrib.openstack.utils.config') + @patch('charmhelpers.contrib.openstack.utils.action_set') + @patch('charmhelpers.contrib.openstack.utils.git_install_requested') + @patch('charmhelpers.contrib.openstack.utils.openstack_upgrade_available') + def test_openstack_upgrade_false(self, upgrade_avail, git_requested, + action_set, config): + git_requested.return_value = False + upgrade_avail.return_value = True + config.return_value = False + + openstack_upgrade.openstack_upgrade() + + self.assertFalse(self.do_openstack_upgrade.called) + self.assertFalse(self.config_changed.called) diff --git a/unit_tests/test_heat_relations.py b/unit_tests/test_heat_relations.py index 85d3ca3..ebdf7e5 100644 --- a/unit_tests/test_heat_relations.py +++ b/unit_tests/test_heat_relations.py @@ -76,6 +76,16 @@ class HeatRelationTests(CharmTestCase): relations.config_changed() self.assertTrue(self.do_openstack_upgrade.called) + @patch.object(relations, 'configure_https') + def test_config_changed_with_openstack_upgrade_action(self, + mock_configure_https): + self.openstack_upgrade_available.return_value = True + self.test_config.set('action-managed-upgrade', True) + + relations.config_changed() + + self.assertFalse(self.do_openstack_upgrade.called) + def test_db_joined(self): self.unit_get.return_value = 'heat.foohost.com' relations.db_joined() From c3c9f5583a867f110f01d370a4617a84c586db20 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Mon, 21 Sep 2015 19:36:07 +0000 Subject: [PATCH 06/16] Sync charm-helpers --- hooks/charmhelpers/contrib/network/ip.py | 8 +- .../contrib/openstack/amulet/deployment.py | 32 +- .../contrib/openstack/amulet/utils.py | 359 ++++++++++++++++++ .../charmhelpers/contrib/openstack/context.py | 76 +++- .../contrib/openstack/templating.py | 32 +- hooks/charmhelpers/contrib/openstack/utils.py | 225 ++++++++++- .../contrib/storage/linux/ceph.py | 239 +++++++++++- hooks/charmhelpers/core/host.py | 48 ++- hooks/charmhelpers/core/hugepage.py | 9 +- hooks/charmhelpers/core/strutils.py | 30 ++ .../charmhelpers/contrib/amulet/deployment.py | 6 +- tests/charmhelpers/contrib/amulet/utils.py | 293 +++++++++++--- .../contrib/openstack/amulet/deployment.py | 32 +- .../contrib/openstack/amulet/utils.py | 359 ++++++++++++++++++ 14 files changed, 1625 insertions(+), 123 deletions(-) diff --git a/hooks/charmhelpers/contrib/network/ip.py b/hooks/charmhelpers/contrib/network/ip.py index 67b4dcc..7f3b66b 100644 --- a/hooks/charmhelpers/contrib/network/ip.py +++ b/hooks/charmhelpers/contrib/network/ip.py @@ -23,7 +23,7 @@ import socket from functools import partial from charmhelpers.core.hookenv import unit_get -from charmhelpers.fetch import apt_install +from charmhelpers.fetch import apt_install, apt_update from charmhelpers.core.hookenv import ( log, WARNING, @@ -32,13 +32,15 @@ from charmhelpers.core.hookenv import ( try: import netifaces except ImportError: - apt_install('python-netifaces') + apt_update(fatal=True) + apt_install('python-netifaces', fatal=True) import netifaces try: import netaddr except ImportError: - apt_install('python-netaddr') + apt_update(fatal=True) + apt_install('python-netaddr', fatal=True) import netaddr diff --git a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 07ee2ef..722bc64 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -44,20 +44,31 @@ class OpenStackAmuletDeployment(AmuletDeployment): Determine if the local branch being tested is derived from its stable or next (dev) branch, and based on this, use the corresonding stable or next branches for the other_services.""" + + # Charms outside the lp:~openstack-charmers namespace base_charms = ['mysql', 'mongodb', 'nrpe'] + # Force these charms to current series even when using an older series. + # ie. Use trusty/nrpe even when series is precise, as the P charm + # does not possess the necessary external master config and hooks. + force_series_current = ['nrpe'] + if self.series in ['precise', 'trusty']: base_series = self.series else: base_series = self.current_next - if self.stable: - for svc in other_services: + for svc in other_services: + if svc['name'] in force_series_current: + base_series = self.current_next + # If a location has been explicitly set, use it + if svc.get('location'): + continue + if self.stable: temp = 'lp:charms/{}/{}' svc['location'] = temp.format(base_series, svc['name']) - else: - for svc in other_services: + else: if svc['name'] in base_charms: temp = 'lp:charms/{}/{}' svc['location'] = temp.format(base_series, @@ -66,6 +77,7 @@ class OpenStackAmuletDeployment(AmuletDeployment): temp = 'lp:~openstack-charmers/charms/{}/{}/next' svc['location'] = temp.format(self.current_next, svc['name']) + return other_services def _add_services(self, this_service, other_services): @@ -77,21 +89,23 @@ class OpenStackAmuletDeployment(AmuletDeployment): services = other_services services.append(this_service) + + # Charms which should use the source config option use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', 'ceph-osd', 'ceph-radosgw'] - # Most OpenStack subordinate charms do not expose an origin option - # as that is controlled by the principle. - ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe'] + + # Charms which can not use openstack-origin, ie. many subordinates + no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe'] if self.openstack: for svc in services: - if svc['name'] not in use_source + ignore: + if svc['name'] not in use_source + no_origin: config = {'openstack-origin': self.openstack} self.d.configure(svc['name'], config) if self.source: for svc in services: - if svc['name'] in use_source and svc['name'] not in ignore: + if svc['name'] in use_source and svc['name'] not in no_origin: config = {'source': self.source} self.d.configure(svc['name'], config) diff --git a/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 03f7927..b139741 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -27,6 +27,7 @@ import glanceclient.v1.client as glance_client import heatclient.v1.client as heat_client import keystoneclient.v2_0 as keystone_client import novaclient.v1_1.client as nova_client +import pika import swiftclient from charmhelpers.contrib.amulet.utils import ( @@ -602,3 +603,361 @@ class OpenStackAmuletUtils(AmuletUtils): self.log.debug('Ceph {} samples (OK): ' '{}'.format(sample_type, samples)) return None + +# rabbitmq/amqp specific helpers: + def add_rmq_test_user(self, sentry_units, + username="testuser1", password="changeme"): + """Add a test user via the first rmq juju unit, check connection as + the new user against all sentry units. + + :param sentry_units: list of sentry unit pointers + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: None if successful. Raise on error. + """ + self.log.debug('Adding rmq user ({})...'.format(username)) + + # Check that user does not already exist + cmd_user_list = 'rabbitmqctl list_users' + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) + if username in output: + self.log.warning('User ({}) already exists, returning ' + 'gracefully.'.format(username)) + return + + perms = '".*" ".*" ".*"' + cmds = ['rabbitmqctl add_user {} {}'.format(username, password), + 'rabbitmqctl set_permissions {} {}'.format(username, perms)] + + # Add user via first unit + for cmd in cmds: + output, _ = self.run_cmd_unit(sentry_units[0], cmd) + + # Check connection against the other sentry_units + self.log.debug('Checking user connect against units...') + for sentry_unit in sentry_units: + connection = self.connect_amqp_by_unit(sentry_unit, ssl=False, + username=username, + password=password) + connection.close() + + def delete_rmq_test_user(self, sentry_units, username="testuser1"): + """Delete a rabbitmq user via the first rmq juju unit. + + :param sentry_units: list of sentry unit pointers + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: None if successful or no such user. + """ + self.log.debug('Deleting rmq user ({})...'.format(username)) + + # Check that the user exists + cmd_user_list = 'rabbitmqctl list_users' + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) + + if username not in output: + self.log.warning('User ({}) does not exist, returning ' + 'gracefully.'.format(username)) + return + + # Delete the user + cmd_user_del = 'rabbitmqctl delete_user {}'.format(username) + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del) + + def get_rmq_cluster_status(self, sentry_unit): + """Execute rabbitmq cluster status command on a unit and return + the full output. + + :param unit: sentry unit + :returns: String containing console output of cluster status command + """ + cmd = 'rabbitmqctl cluster_status' + output, _ = self.run_cmd_unit(sentry_unit, cmd) + self.log.debug('{} cluster_status:\n{}'.format( + sentry_unit.info['unit_name'], output)) + return str(output) + + def get_rmq_cluster_running_nodes(self, sentry_unit): + """Parse rabbitmqctl cluster_status output string, return list of + running rabbitmq cluster nodes. + + :param unit: sentry unit + :returns: List containing node names of running nodes + """ + # NOTE(beisner): rabbitmqctl cluster_status output is not + # json-parsable, do string chop foo, then json.loads that. + str_stat = self.get_rmq_cluster_status(sentry_unit) + if 'running_nodes' in str_stat: + pos_start = str_stat.find("{running_nodes,") + 15 + pos_end = str_stat.find("]},", pos_start) + 1 + str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"') + run_nodes = json.loads(str_run_nodes) + return run_nodes + else: + return [] + + def validate_rmq_cluster_running_nodes(self, sentry_units): + """Check that all rmq unit hostnames are represented in the + cluster_status output of all units. + + :param host_names: dict of juju unit names to host names + :param units: list of sentry unit pointers (all rmq units) + :returns: None if successful, otherwise return error message + """ + host_names = self.get_unit_hostnames(sentry_units) + errors = [] + + # Query every unit for cluster_status running nodes + for query_unit in sentry_units: + query_unit_name = query_unit.info['unit_name'] + running_nodes = self.get_rmq_cluster_running_nodes(query_unit) + + # Confirm that every unit is represented in the queried unit's + # cluster_status running nodes output. + for validate_unit in sentry_units: + val_host_name = host_names[validate_unit.info['unit_name']] + val_node_name = 'rabbit@{}'.format(val_host_name) + + if val_node_name not in running_nodes: + errors.append('Cluster member check failed on {}: {} not ' + 'in {}\n'.format(query_unit_name, + val_node_name, + running_nodes)) + if errors: + return ''.join(errors) + + def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None): + """Check a single juju rmq unit for ssl and port in the config file.""" + host = sentry_unit.info['public-address'] + unit_name = sentry_unit.info['unit_name'] + + conf_file = '/etc/rabbitmq/rabbitmq.config' + conf_contents = str(self.file_contents_safe(sentry_unit, + conf_file, max_wait=16)) + # Checks + conf_ssl = 'ssl' in conf_contents + conf_port = str(port) in conf_contents + + # Port explicitly checked in config + if port and conf_port and conf_ssl: + self.log.debug('SSL is enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return True + elif port and not conf_port and conf_ssl: + self.log.debug('SSL is enabled @{} but not on port {} ' + '({})'.format(host, port, unit_name)) + return False + # Port not checked (useful when checking that ssl is disabled) + elif not port and conf_ssl: + self.log.debug('SSL is enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return True + elif not port and not conf_ssl: + self.log.debug('SSL not enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return False + else: + msg = ('Unknown condition when checking SSL status @{}:{} ' + '({})'.format(host, port, unit_name)) + amulet.raise_status(amulet.FAIL, msg) + + def validate_rmq_ssl_enabled_units(self, sentry_units, port=None): + """Check that ssl is enabled on rmq juju sentry units. + + :param sentry_units: list of all rmq sentry units + :param port: optional ssl port override to validate + :returns: None if successful, otherwise return error message + """ + for sentry_unit in sentry_units: + if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port): + return ('Unexpected condition: ssl is disabled on unit ' + '({})'.format(sentry_unit.info['unit_name'])) + return None + + def validate_rmq_ssl_disabled_units(self, sentry_units): + """Check that ssl is enabled on listed rmq juju sentry units. + + :param sentry_units: list of all rmq sentry units + :returns: True if successful. Raise on error. + """ + for sentry_unit in sentry_units: + if self.rmq_ssl_is_enabled_on_unit(sentry_unit): + return ('Unexpected condition: ssl is enabled on unit ' + '({})'.format(sentry_unit.info['unit_name'])) + return None + + def configure_rmq_ssl_on(self, sentry_units, deployment, + port=None, max_wait=60): + """Turn ssl charm config option on, with optional non-default + ssl port specification. Confirm that it is enabled on every + unit. + + :param sentry_units: list of sentry units + :param deployment: amulet deployment object pointer + :param port: amqp port, use defaults if None + :param max_wait: maximum time to wait in seconds to confirm + :returns: None if successful. Raise on error. + """ + self.log.debug('Setting ssl charm config option: on') + + # Enable RMQ SSL + config = {'ssl': 'on'} + if port: + config['ssl_port'] = port + + deployment.configure('rabbitmq-server', config) + + # Confirm + tries = 0 + ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) + while ret and tries < (max_wait / 4): + time.sleep(4) + self.log.debug('Attempt {}: {}'.format(tries, ret)) + ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) + tries += 1 + + if ret: + amulet.raise_status(amulet.FAIL, ret) + + def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60): + """Turn ssl charm config option off, confirm that it is disabled + on every unit. + + :param sentry_units: list of sentry units + :param deployment: amulet deployment object pointer + :param max_wait: maximum time to wait in seconds to confirm + :returns: None if successful. Raise on error. + """ + self.log.debug('Setting ssl charm config option: off') + + # Disable RMQ SSL + config = {'ssl': 'off'} + deployment.configure('rabbitmq-server', config) + + # Confirm + tries = 0 + ret = self.validate_rmq_ssl_disabled_units(sentry_units) + while ret and tries < (max_wait / 4): + time.sleep(4) + self.log.debug('Attempt {}: {}'.format(tries, ret)) + ret = self.validate_rmq_ssl_disabled_units(sentry_units) + tries += 1 + + if ret: + amulet.raise_status(amulet.FAIL, ret) + + def connect_amqp_by_unit(self, sentry_unit, ssl=False, + port=None, fatal=True, + username="testuser1", password="changeme"): + """Establish and return a pika amqp connection to the rabbitmq service + running on a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :param fatal: boolean, default to True (raises on connect error) + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: pika amqp connection pointer or None if failed and non-fatal + """ + host = sentry_unit.info['public-address'] + unit_name = sentry_unit.info['unit_name'] + + # Default port logic if port is not specified + if ssl and not port: + port = 5671 + elif not ssl and not port: + port = 5672 + + self.log.debug('Connecting to amqp on {}:{} ({}) as ' + '{}...'.format(host, port, unit_name, username)) + + try: + credentials = pika.PlainCredentials(username, password) + parameters = pika.ConnectionParameters(host=host, port=port, + credentials=credentials, + ssl=ssl, + connection_attempts=3, + retry_delay=5, + socket_timeout=1) + connection = pika.BlockingConnection(parameters) + assert connection.server_properties['product'] == 'RabbitMQ' + self.log.debug('Connect OK') + return connection + except Exception as e: + msg = ('amqp connection failed to {}:{} as ' + '{} ({})'.format(host, port, username, str(e))) + if fatal: + amulet.raise_status(amulet.FAIL, msg) + else: + self.log.warn(msg) + return None + + def publish_amqp_message_by_unit(self, sentry_unit, message, + queue="test", ssl=False, + username="testuser1", + password="changeme", + port=None): + """Publish an amqp message to a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param message: amqp message string + :param queue: message queue, default to test + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :returns: None. Raises exception if publish failed. + """ + self.log.debug('Publishing message to {} queue:\n{}'.format(queue, + message)) + connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, + port=port, + username=username, + password=password) + + # NOTE(beisner): extra debug here re: pika hang potential: + # https://github.com/pika/pika/issues/297 + # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw + self.log.debug('Defining channel...') + channel = connection.channel() + self.log.debug('Declaring queue...') + channel.queue_declare(queue=queue, auto_delete=False, durable=True) + self.log.debug('Publishing message...') + channel.basic_publish(exchange='', routing_key=queue, body=message) + self.log.debug('Closing channel...') + channel.close() + self.log.debug('Closing connection...') + connection.close() + + def get_amqp_message_by_unit(self, sentry_unit, queue="test", + username="testuser1", + password="changeme", + ssl=False, port=None): + """Get an amqp message from a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param queue: message queue, default to test + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :returns: amqp message body as string. Raise if get fails. + """ + connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, + port=port, + username=username, + password=password) + channel = connection.channel() + method_frame, _, body = channel.basic_get(queue) + + if method_frame: + self.log.debug('Retreived message from {} queue:\n{}'.format(queue, + body)) + channel.basic_ack(method_frame.delivery_tag) + channel.close() + connection.close() + return body + else: + msg = 'No message retrieved.' + amulet.raise_status(amulet.FAIL, msg) diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 9ae4582..1248d49 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -194,10 +194,50 @@ def config_flags_parser(config_flags): class OSContextGenerator(object): """Base class for all context generators.""" interfaces = [] + related = False + complete = False + missing_data = [] def __call__(self): raise NotImplementedError + def context_complete(self, ctxt): + """Check for missing data for the required context data. + Set self.missing_data if it exists and return False. + Set self.complete if no missing data and return True. + """ + # Fresh start + self.complete = False + self.missing_data = [] + for k, v in six.iteritems(ctxt): + if v is None or v == '': + if k not in self.missing_data: + self.missing_data.append(k) + + if self.missing_data: + self.complete = False + log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO) + else: + self.complete = True + return self.complete + + def get_related(self): + """Check if any of the context interfaces have relation ids. + Set self.related and return True if one of the interfaces + has relation ids. + """ + # Fresh start + self.related = False + try: + for interface in self.interfaces: + if relation_ids(interface): + self.related = True + return self.related + except AttributeError as e: + log("{} {}" + "".format(self, e), 'INFO') + return self.related + class SharedDBContext(OSContextGenerator): interfaces = ['shared-db'] @@ -213,6 +253,7 @@ class SharedDBContext(OSContextGenerator): self.database = database self.user = user self.ssl_dir = ssl_dir + self.rel_name = self.interfaces[0] def __call__(self): self.database = self.database or config('database') @@ -246,6 +287,7 @@ class SharedDBContext(OSContextGenerator): password_setting = self.relation_prefix + '_password' for rid in relation_ids(self.interfaces[0]): + self.related = True for unit in related_units(rid): rdata = relation_get(rid=rid, unit=unit) host = rdata.get('db_host') @@ -257,7 +299,7 @@ class SharedDBContext(OSContextGenerator): 'database_password': rdata.get(password_setting), 'database_type': 'mysql' } - if context_complete(ctxt): + if self.context_complete(ctxt): db_ssl(rdata, ctxt, self.ssl_dir) return ctxt return {} @@ -278,6 +320,7 @@ class PostgresqlDBContext(OSContextGenerator): ctxt = {} for rid in relation_ids(self.interfaces[0]): + self.related = True for unit in related_units(rid): rel_host = relation_get('host', rid=rid, unit=unit) rel_user = relation_get('user', rid=rid, unit=unit) @@ -287,7 +330,7 @@ class PostgresqlDBContext(OSContextGenerator): 'database_user': rel_user, 'database_password': rel_passwd, 'database_type': 'postgresql'} - if context_complete(ctxt): + if self.context_complete(ctxt): return ctxt return {} @@ -348,6 +391,7 @@ class IdentityServiceContext(OSContextGenerator): ctxt['signing_dir'] = cachedir for rid in relation_ids(self.rel_name): + self.related = True for unit in related_units(rid): rdata = relation_get(rid=rid, unit=unit) serv_host = rdata.get('service_host') @@ -366,7 +410,7 @@ class IdentityServiceContext(OSContextGenerator): 'service_protocol': svc_protocol, 'auth_protocol': auth_protocol}) - if context_complete(ctxt): + if self.context_complete(ctxt): # NOTE(jamespage) this is required for >= icehouse # so a missing value just indicates keystone needs # upgrading @@ -405,6 +449,7 @@ class AMQPContext(OSContextGenerator): ctxt = {} for rid in relation_ids(self.rel_name): ha_vip_only = False + self.related = True for unit in related_units(rid): if relation_get('clustered', rid=rid, unit=unit): ctxt['clustered'] = True @@ -437,7 +482,7 @@ class AMQPContext(OSContextGenerator): ha_vip_only = relation_get('ha-vip-only', rid=rid, unit=unit) is not None - if context_complete(ctxt): + if self.context_complete(ctxt): if 'rabbit_ssl_ca' in ctxt: if not self.ssl_dir: log("Charm not setup for ssl support but ssl ca " @@ -469,7 +514,7 @@ class AMQPContext(OSContextGenerator): ctxt['oslo_messaging_flags'] = config_flags_parser( oslo_messaging_flags) - if not context_complete(ctxt): + if not self.complete: return {} return ctxt @@ -485,13 +530,15 @@ class CephContext(OSContextGenerator): log('Generating template context for ceph', level=DEBUG) mon_hosts = [] - auth = None - key = None - use_syslog = str(config('use-syslog')).lower() + ctxt = { + 'use_syslog': str(config('use-syslog')).lower() + } for rid in relation_ids('ceph'): for unit in related_units(rid): - auth = relation_get('auth', rid=rid, unit=unit) - key = relation_get('key', rid=rid, unit=unit) + if not ctxt.get('auth'): + ctxt['auth'] = relation_get('auth', rid=rid, unit=unit) + if not ctxt.get('key'): + ctxt['key'] = relation_get('key', rid=rid, unit=unit) ceph_pub_addr = relation_get('ceph-public-address', rid=rid, unit=unit) unit_priv_addr = relation_get('private-address', rid=rid, @@ -500,15 +547,12 @@ class CephContext(OSContextGenerator): ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr mon_hosts.append(ceph_addr) - ctxt = {'mon_hosts': ' '.join(sorted(mon_hosts)), - 'auth': auth, - 'key': key, - 'use_syslog': use_syslog} + ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts)) if not os.path.isdir('/etc/ceph'): os.mkdir('/etc/ceph') - if not context_complete(ctxt): + if not self.context_complete(ctxt): return {} ensure_packages(['ceph-common']) @@ -1367,6 +1411,6 @@ class NetworkServiceContext(OSContextGenerator): 'auth_protocol': rdata.get('auth_protocol') or 'http', } - if context_complete(ctxt): + if self.context_complete(ctxt): return ctxt return {} diff --git a/hooks/charmhelpers/contrib/openstack/templating.py b/hooks/charmhelpers/contrib/openstack/templating.py index 021d8cf..e5e3cb1 100644 --- a/hooks/charmhelpers/contrib/openstack/templating.py +++ b/hooks/charmhelpers/contrib/openstack/templating.py @@ -18,7 +18,7 @@ import os import six -from charmhelpers.fetch import apt_install +from charmhelpers.fetch import apt_install, apt_update from charmhelpers.core.hookenv import ( log, ERROR, @@ -29,6 +29,7 @@ from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES try: from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions except ImportError: + apt_update(fatal=True) apt_install('python-jinja2', fatal=True) from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions @@ -112,7 +113,7 @@ class OSConfigTemplate(object): def complete_contexts(self): ''' - Return a list of interfaces that have atisfied contexts. + Return a list of interfaces that have satisfied contexts. ''' if self._complete_contexts: return self._complete_contexts @@ -293,3 +294,30 @@ class OSConfigRenderer(object): [interfaces.extend(i.complete_contexts()) for i in six.itervalues(self.templates)] return interfaces + + def get_incomplete_context_data(self, interfaces): + ''' + Return dictionary of relation status of interfaces and any missing + required context data. Example: + {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True}, + 'zeromq-configuration': {'related': False}} + ''' + incomplete_context_data = {} + + for i in six.itervalues(self.templates): + for context in i.contexts: + for interface in interfaces: + related = False + if interface in context.interfaces: + related = context.get_related() + missing_data = context.missing_data + if missing_data: + incomplete_context_data[interface] = {'missing_data': missing_data} + if related: + if incomplete_context_data.get(interface): + incomplete_context_data[interface].update({'related': True}) + else: + incomplete_context_data[interface] = {'related': True} + else: + incomplete_context_data[interface] = {'related': False} + return incomplete_context_data diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index c98c5c9..4d395a7 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -25,6 +25,7 @@ import sys import re import six +import traceback import yaml from charmhelpers.contrib.network import ip @@ -34,12 +35,16 @@ from charmhelpers.core import ( ) from charmhelpers.core.hookenv import ( + action_fail, + action_set, config, log as juju_log, charm_dir, INFO, relation_ids, - relation_set + relation_set, + status_set, + hook_name ) from charmhelpers.contrib.storage.linux.lvm import ( @@ -114,6 +119,7 @@ SWIFT_CODENAMES = OrderedDict([ ('2.2.1', 'kilo'), ('2.2.2', 'kilo'), ('2.3.0', 'liberty'), + ('2.4.0', 'liberty'), ]) # >= Liberty version->codename mapping @@ -142,6 +148,9 @@ PACKAGE_CODENAMES = { 'glance-common': OrderedDict([ ('11.0.0', 'liberty'), ]), + 'openstack-dashboard': OrderedDict([ + ('8.0.0', 'liberty'), + ]), } DEFAULT_LOOPBACK_SIZE = '5G' @@ -745,3 +754,217 @@ def git_yaml_value(projects_yaml, key): return projects[key] return None + + +def os_workload_status(configs, required_interfaces, charm_func=None): + """ + Decorator to set workload status based on complete contexts + """ + def wrap(f): + @wraps(f) + def wrapped_f(*args, **kwargs): + # Run the original function first + f(*args, **kwargs) + # Set workload status now that contexts have been + # acted on + set_os_workload_status(configs, required_interfaces, charm_func) + return wrapped_f + return wrap + + +def set_os_workload_status(configs, required_interfaces, charm_func=None): + """ + Set workload status based on complete contexts. + status-set missing or incomplete contexts + and juju-log details of missing required data. + charm_func is a charm specific function to run checking + for charm specific requirements such as a VIP setting. + """ + incomplete_rel_data = incomplete_relation_data(configs, required_interfaces) + state = 'active' + missing_relations = [] + incomplete_relations = [] + message = None + charm_state = None + charm_message = None + + for generic_interface in incomplete_rel_data.keys(): + related_interface = None + missing_data = {} + # Related or not? + for interface in incomplete_rel_data[generic_interface]: + if incomplete_rel_data[generic_interface][interface].get('related'): + related_interface = interface + missing_data = incomplete_rel_data[generic_interface][interface].get('missing_data') + # No relation ID for the generic_interface + if not related_interface: + juju_log("{} relation is missing and must be related for " + "functionality. ".format(generic_interface), 'WARN') + state = 'blocked' + if generic_interface not in missing_relations: + missing_relations.append(generic_interface) + else: + # Relation ID exists but no related unit + if not missing_data: + # Edge case relation ID exists but departing + if ('departed' in hook_name() or 'broken' in hook_name()) \ + and related_interface in hook_name(): + state = 'blocked' + if generic_interface not in missing_relations: + missing_relations.append(generic_interface) + juju_log("{} relation's interface, {}, " + "relationship is departed or broken " + "and is required for functionality." + "".format(generic_interface, related_interface), "WARN") + # Normal case relation ID exists but no related unit + # (joining) + else: + juju_log("{} relations's interface, {}, is related but has " + "no units in the relation." + "".format(generic_interface, related_interface), "INFO") + # Related unit exists and data missing on the relation + else: + juju_log("{} relation's interface, {}, is related awaiting " + "the following data from the relationship: {}. " + "".format(generic_interface, related_interface, + ", ".join(missing_data)), "INFO") + if state != 'blocked': + state = 'waiting' + if generic_interface not in incomplete_relations \ + and generic_interface not in missing_relations: + incomplete_relations.append(generic_interface) + + if missing_relations: + message = "Missing relations: {}".format(", ".join(missing_relations)) + if incomplete_relations: + message += "; incomplete relations: {}" \ + "".format(", ".join(incomplete_relations)) + state = 'blocked' + elif incomplete_relations: + message = "Incomplete relations: {}" \ + "".format(", ".join(incomplete_relations)) + state = 'waiting' + + # Run charm specific checks + if charm_func: + charm_state, charm_message = charm_func(configs) + if charm_state != 'active' and charm_state != 'unknown': + state = workload_state_compare(state, charm_state) + if message: + message = "{} {}".format(message, charm_message) + else: + message = charm_message + + # Set to active if all requirements have been met + if state == 'active': + message = "Unit is ready" + juju_log(message, "INFO") + + status_set(state, message) + + +def workload_state_compare(current_workload_state, workload_state): + """ Return highest priority of two states""" + hierarchy = {'unknown': -1, + 'active': 0, + 'maintenance': 1, + 'waiting': 2, + 'blocked': 3, + } + + if hierarchy.get(workload_state) is None: + workload_state = 'unknown' + if hierarchy.get(current_workload_state) is None: + current_workload_state = 'unknown' + + # Set workload_state based on hierarchy of statuses + if hierarchy.get(current_workload_state) > hierarchy.get(workload_state): + return current_workload_state + else: + return workload_state + + +def incomplete_relation_data(configs, required_interfaces): + """ + Check complete contexts against required_interfaces + Return dictionary of incomplete relation data. + + configs is an OSConfigRenderer object with configs registered + + required_interfaces is a dictionary of required general interfaces + with dictionary values of possible specific interfaces. + Example: + required_interfaces = {'database': ['shared-db', 'pgsql-db']} + + The interface is said to be satisfied if anyone of the interfaces in the + list has a complete context. + + Return dictionary of incomplete or missing required contexts with relation + status of interfaces and any missing data points. Example: + {'message': + {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True}, + 'zeromq-configuration': {'related': False}}, + 'identity': + {'identity-service': {'related': False}}, + 'database': + {'pgsql-db': {'related': False}, + 'shared-db': {'related': True}}} + """ + complete_ctxts = configs.complete_contexts() + incomplete_relations = [] + for svc_type in required_interfaces.keys(): + # Avoid duplicates + found_ctxt = False + for interface in required_interfaces[svc_type]: + if interface in complete_ctxts: + found_ctxt = True + if not found_ctxt: + incomplete_relations.append(svc_type) + incomplete_context_data = {} + for i in incomplete_relations: + incomplete_context_data[i] = configs.get_incomplete_context_data(required_interfaces[i]) + return incomplete_context_data + + +def do_action_openstack_upgrade(package, upgrade_callback, configs): + """Perform action-managed OpenStack upgrade. + + Upgrades packages to the configured openstack-origin version and sets + the corresponding action status as a result. + + If the charm was installed from source we cannot upgrade it. + For backwards compatibility a config flag (action-managed-upgrade) must + be set for this code to run, otherwise a full service level upgrade will + fire on config-changed. + + @param package: package name for determining if upgrade available + @param upgrade_callback: function callback to charm's upgrade function + @param configs: templating object derived from OSConfigRenderer class + + @return: True if upgrade successful; False if upgrade failed or skipped + """ + ret = False + + if git_install_requested(): + action_set({'outcome': 'installed from source, skipped upgrade.'}) + else: + if openstack_upgrade_available(package): + if config('action-managed-upgrade'): + juju_log('Upgrading OpenStack release') + + try: + upgrade_callback(configs=configs) + action_set({'outcome': 'success, upgrade completed.'}) + ret = True + except: + action_set({'outcome': 'upgrade failed, see traceback.'}) + action_set({'traceback': traceback.format_exc()}) + action_fail('do_openstack_upgrade resulted in an ' + 'unexpected error') + else: + action_set({'outcome': 'action-managed-upgrade config is ' + 'False, skipped upgrade.'}) + else: + action_set({'outcome': 'no upgrade available.'}) + + return ret diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py index 00dbffb..83f264d 100644 --- a/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -28,6 +28,7 @@ import os import shutil import json import time +import uuid from subprocess import ( check_call, @@ -35,8 +36,10 @@ from subprocess import ( CalledProcessError, ) from charmhelpers.core.hookenv import ( + local_unit, relation_get, relation_ids, + relation_set, related_units, log, DEBUG, @@ -56,6 +59,8 @@ from charmhelpers.fetch import ( apt_install, ) +from charmhelpers.core.kernel import modprobe + KEYRING = '/etc/ceph/ceph.client.{}.keyring' KEYFILE = '/etc/ceph/ceph.client.{}.key' @@ -288,17 +293,6 @@ def place_data_on_block_device(blk_device, data_src_dst): os.chown(data_src_dst, uid, gid) -# TODO: re-use -def modprobe(module): - """Load a kernel module and configure for auto-load on reboot.""" - log('Loading kernel module', level=INFO) - cmd = ['modprobe', module] - check_call(cmd) - with open('/etc/modules', 'r+') as modules: - if module not in modules.read(): - modules.write(module) - - def copy_files(src, dst, symlinks=False, ignore=None): """Copy files from src to dst.""" for item in os.listdir(src): @@ -411,17 +405,52 @@ class CephBrokerRq(object): The API is versioned and defaults to version 1. """ - def __init__(self, api_version=1): + def __init__(self, api_version=1, request_id=None): self.api_version = api_version + if request_id: + self.request_id = request_id + else: + self.request_id = str(uuid.uuid1()) self.ops = [] def add_op_create_pool(self, name, replica_count=3): self.ops.append({'op': 'create-pool', 'name': name, 'replicas': replica_count}) + def set_ops(self, ops): + """Set request ops to provided value. + + Useful for injecting ops that come from a previous request + to allow comparisons to ensure validity. + """ + self.ops = ops + @property def request(self): - return json.dumps({'api-version': self.api_version, 'ops': self.ops}) + return json.dumps({'api-version': self.api_version, 'ops': self.ops, + 'request-id': self.request_id}) + + def _ops_equal(self, other): + if len(self.ops) == len(other.ops): + for req_no in range(0, len(self.ops)): + for key in ['replicas', 'name', 'op']: + if self.ops[req_no][key] != other.ops[req_no][key]: + return False + else: + return False + return True + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + if self.api_version == other.api_version and \ + self._ops_equal(other): + return True + else: + return False + + def __ne__(self, other): + return not self.__eq__(other) class CephBrokerRsp(object): @@ -431,10 +460,15 @@ class CephBrokerRsp(object): The API is versioned and defaults to version 1. """ + def __init__(self, encoded_rsp): self.api_version = None self.rsp = json.loads(encoded_rsp) + @property + def request_id(self): + return self.rsp.get('request-id') + @property def exit_code(self): return self.rsp.get('exit-code') @@ -442,3 +476,182 @@ class CephBrokerRsp(object): @property def exit_msg(self): return self.rsp.get('stderr') + + +# Ceph Broker Conversation: +# If a charm needs an action to be taken by ceph it can create a CephBrokerRq +# and send that request to ceph via the ceph relation. The CephBrokerRq has a +# unique id so that the client can identity which CephBrokerRsp is associated +# with the request. Ceph will also respond to each client unit individually +# creating a response key per client unit eg glance/0 will get a CephBrokerRsp +# via key broker-rsp-glance-0 +# +# To use this the charm can just do something like: +# +# from charmhelpers.contrib.storage.linux.ceph import ( +# send_request_if_needed, +# is_request_complete, +# CephBrokerRq, +# ) +# +# @hooks.hook('ceph-relation-changed') +# def ceph_changed(): +# rq = CephBrokerRq() +# rq.add_op_create_pool(name='poolname', replica_count=3) +# +# if is_request_complete(rq): +# +# else: +# send_request_if_needed(get_ceph_request()) +# +# CephBrokerRq and CephBrokerRsp are serialized into JSON. Below is an example +# of glance having sent a request to ceph which ceph has successfully processed +# 'ceph:8': { +# 'ceph/0': { +# 'auth': 'cephx', +# 'broker-rsp-glance-0': '{"request-id": "0bc7dc54", "exit-code": 0}', +# 'broker_rsp': '{"request-id": "0da543b8", "exit-code": 0}', +# 'ceph-public-address': '10.5.44.103', +# 'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==', +# 'private-address': '10.5.44.103', +# }, +# 'glance/0': { +# 'broker_req': ('{"api-version": 1, "request-id": "0bc7dc54", ' +# '"ops": [{"replicas": 3, "name": "glance", ' +# '"op": "create-pool"}]}'), +# 'private-address': '10.5.44.109', +# }, +# } + +def get_previous_request(rid): + """Return the last ceph broker request sent on a given relation + + @param rid: Relation id to query for request + """ + request = None + broker_req = relation_get(attribute='broker_req', rid=rid, + unit=local_unit()) + if broker_req: + request_data = json.loads(broker_req) + request = CephBrokerRq(api_version=request_data['api-version'], + request_id=request_data['request-id']) + request.set_ops(request_data['ops']) + + return request + + +def get_request_states(request): + """Return a dict of requests per relation id with their corresponding + completion state. + + This allows a charm, which has a request for ceph, to see whether there is + an equivalent request already being processed and if so what state that + request is in. + + @param request: A CephBrokerRq object + """ + complete = [] + requests = {} + for rid in relation_ids('ceph'): + complete = False + previous_request = get_previous_request(rid) + if request == previous_request: + sent = True + complete = is_request_complete_for_rid(previous_request, rid) + else: + sent = False + complete = False + + requests[rid] = { + 'sent': sent, + 'complete': complete, + } + + return requests + + +def is_request_sent(request): + """Check to see if a functionally equivalent request has already been sent + + Returns True if a similair request has been sent + + @param request: A CephBrokerRq object + """ + states = get_request_states(request) + for rid in states.keys(): + if not states[rid]['sent']: + return False + + return True + + +def is_request_complete(request): + """Check to see if a functionally equivalent request has already been + completed + + Returns True if a similair request has been completed + + @param request: A CephBrokerRq object + """ + states = get_request_states(request) + for rid in states.keys(): + if not states[rid]['complete']: + return False + + return True + + +def is_request_complete_for_rid(request, rid): + """Check if a given request has been completed on the given relation + + @param request: A CephBrokerRq object + @param rid: Relation ID + """ + broker_key = get_broker_rsp_key() + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + if rdata.get(broker_key): + rsp = CephBrokerRsp(rdata.get(broker_key)) + if rsp.request_id == request.request_id: + if not rsp.exit_code: + return True + else: + # The remote unit sent no reply targeted at this unit so either the + # remote ceph cluster does not support unit targeted replies or it + # has not processed our request yet. + if rdata.get('broker_rsp'): + request_data = json.loads(rdata['broker_rsp']) + if request_data.get('request-id'): + log('Ignoring legacy broker_rsp without unit key as remote ' + 'service supports unit specific replies', level=DEBUG) + else: + log('Using legacy broker_rsp as remote service does not ' + 'supports unit specific replies', level=DEBUG) + rsp = CephBrokerRsp(rdata['broker_rsp']) + if not rsp.exit_code: + return True + + return False + + +def get_broker_rsp_key(): + """Return broker response key for this unit + + This is the key that ceph is going to use to pass request status + information back to this unit + """ + return 'broker-rsp-' + local_unit().replace('/', '-') + + +def send_request_if_needed(request): + """Send broker request if an equivalent request has not already been sent + + @param request: A CephBrokerRq object + """ + if is_request_sent(request): + log('Request already sent but not complete, not sending new request', + level=DEBUG) + else: + for rid in relation_ids('ceph'): + log('Sending request {}'.format(request.request_id), level=DEBUG) + relation_set(relation_id=rid, broker_req=request.request) diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py index 29e8fee..cb3c527 100644 --- a/hooks/charmhelpers/core/host.py +++ b/hooks/charmhelpers/core/host.py @@ -63,32 +63,48 @@ def service_reload(service_name, restart_on_failure=False): return service_result -def service_pause(service_name, init_dir=None): +def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"): """Pause a system service. Stop it, and prevent it from starting again at boot.""" - if init_dir is None: - init_dir = "/etc/init" stopped = service_stop(service_name) - # XXX: Support systemd too - override_path = os.path.join( - init_dir, '{}.override'.format(service_name)) - with open(override_path, 'w') as fh: - fh.write("manual\n") + upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) + sysv_file = os.path.join(initd_dir, service_name) + if os.path.exists(upstart_file): + override_path = os.path.join( + init_dir, '{}.override'.format(service_name)) + with open(override_path, 'w') as fh: + fh.write("manual\n") + elif os.path.exists(sysv_file): + subprocess.check_call(["update-rc.d", service_name, "disable"]) + else: + # XXX: Support SystemD too + raise ValueError( + "Unable to detect {0} as either Upstart {1} or SysV {2}".format( + service_name, upstart_file, sysv_file)) return stopped -def service_resume(service_name, init_dir=None): +def service_resume(service_name, init_dir="/etc/init", + initd_dir="/etc/init.d"): """Resume a system service. Reenable starting again at boot. Start the service""" - # XXX: Support systemd too - if init_dir is None: - init_dir = "/etc/init" - override_path = os.path.join( - init_dir, '{}.override'.format(service_name)) - if os.path.exists(override_path): - os.unlink(override_path) + upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) + sysv_file = os.path.join(initd_dir, service_name) + if os.path.exists(upstart_file): + override_path = os.path.join( + init_dir, '{}.override'.format(service_name)) + if os.path.exists(override_path): + os.unlink(override_path) + elif os.path.exists(sysv_file): + subprocess.check_call(["update-rc.d", service_name, "enable"]) + else: + # XXX: Support SystemD too + raise ValueError( + "Unable to detect {0} as either Upstart {1} or SysV {2}".format( + service_name, upstart_file, sysv_file)) + started = service_start(service_name) return started diff --git a/hooks/charmhelpers/core/hugepage.py b/hooks/charmhelpers/core/hugepage.py index ba4340f..4aaca3f 100644 --- a/hooks/charmhelpers/core/hugepage.py +++ b/hooks/charmhelpers/core/hugepage.py @@ -25,11 +25,13 @@ from charmhelpers.core.host import ( fstab_mount, mkdir, ) +from charmhelpers.core.strutils import bytes_from_string +from subprocess import check_output def hugepage_support(user, group='hugetlb', nr_hugepages=256, max_map_count=65536, mnt_point='/run/hugepages/kvm', - pagesize='2MB', mount=True): + pagesize='2MB', mount=True, set_shmmax=False): """Enable hugepages on system. Args: @@ -49,6 +51,11 @@ def hugepage_support(user, group='hugetlb', nr_hugepages=256, 'vm.max_map_count': max_map_count, 'vm.hugetlb_shm_group': gid, } + if set_shmmax: + shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax'])) + shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages + if shmmax_minsize > shmmax_current: + sysctl_settings['kernel.shmmax'] = shmmax_minsize sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf') mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False) lfstab = fstab.Fstab() diff --git a/hooks/charmhelpers/core/strutils.py b/hooks/charmhelpers/core/strutils.py index a2a784a..7e3f969 100644 --- a/hooks/charmhelpers/core/strutils.py +++ b/hooks/charmhelpers/core/strutils.py @@ -18,6 +18,7 @@ # along with charm-helpers. If not, see . import six +import re def bool_from_string(value): @@ -40,3 +41,32 @@ def bool_from_string(value): msg = "Unable to interpret string value '%s' as boolean" % (value) raise ValueError(msg) + + +def bytes_from_string(value): + """Interpret human readable string value as bytes. + + Returns int + """ + BYTE_POWER = { + 'K': 1, + 'KB': 1, + 'M': 2, + 'MB': 2, + 'G': 3, + 'GB': 3, + 'T': 4, + 'TB': 4, + 'P': 5, + 'PB': 5, + } + if isinstance(value, six.string_types): + value = six.text_type(value) + else: + msg = "Unable to interpret non-string value '%s' as boolean" % (value) + raise ValueError(msg) + matches = re.match("([0-9]+)([a-zA-Z]+)", value) + if not matches: + msg = "Unable to interpret string value '%s' as bytes" % (value) + raise ValueError(msg) + return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) diff --git a/tests/charmhelpers/contrib/amulet/deployment.py b/tests/charmhelpers/contrib/amulet/deployment.py index 367d6b4..d451698 100644 --- a/tests/charmhelpers/contrib/amulet/deployment.py +++ b/tests/charmhelpers/contrib/amulet/deployment.py @@ -51,7 +51,8 @@ class AmuletDeployment(object): if 'units' not in this_service: this_service['units'] = 1 - self.d.add(this_service['name'], units=this_service['units']) + self.d.add(this_service['name'], units=this_service['units'], + constraints=this_service.get('constraints')) for svc in other_services: if 'location' in svc: @@ -64,7 +65,8 @@ class AmuletDeployment(object): if 'units' not in svc: svc['units'] = 1 - self.d.add(svc['name'], charm=branch_location, units=svc['units']) + self.d.add(svc['name'], charm=branch_location, units=svc['units'], + constraints=svc.get('constraints')) def _add_relations(self, relations): """Add all of the relations for the services.""" diff --git a/tests/charmhelpers/contrib/amulet/utils.py b/tests/charmhelpers/contrib/amulet/utils.py index 7816c93..55c8634 100644 --- a/tests/charmhelpers/contrib/amulet/utils.py +++ b/tests/charmhelpers/contrib/amulet/utils.py @@ -19,9 +19,11 @@ import json import logging import os import re +import socket import subprocess import sys import time +import uuid import amulet import distro_info @@ -114,7 +116,7 @@ class AmuletUtils(object): # /!\ DEPRECATION WARNING (beisner): # New and existing tests should be rewritten to use # validate_services_by_name() as it is aware of init systems. - self.log.warn('/!\\ DEPRECATION WARNING: use ' + self.log.warn('DEPRECATION WARNING: use ' 'validate_services_by_name instead of validate_services ' 'due to init system differences.') @@ -269,33 +271,52 @@ class AmuletUtils(object): """Get last modification time of directory.""" return sentry_unit.directory_stat(directory)['mtime'] - def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False): - """Get process' start time. + def _get_proc_start_time(self, sentry_unit, service, pgrep_full=None): + """Get start time of a process based on the last modification time + of the /proc/pid directory. - Determine start time of the process based on the last modification - time of the /proc/pid directory. If pgrep_full is True, the process - name is matched against the full command line. - """ - if pgrep_full: - cmd = 'pgrep -o -f {}'.format(service) - else: - cmd = 'pgrep -o {}'.format(service) - cmd = cmd + ' | grep -v pgrep || exit 0' - cmd_out = sentry_unit.run(cmd) - self.log.debug('CMDout: ' + str(cmd_out)) - if cmd_out[0]: - self.log.debug('Pid for %s %s' % (service, str(cmd_out[0]))) - proc_dir = '/proc/{}'.format(cmd_out[0].strip()) - return self._get_dir_mtime(sentry_unit, proc_dir) + :sentry_unit: The sentry unit to check for the service on + :service: service name to look for in process table + :pgrep_full: [Deprecated] Use full command line search mode with pgrep + :returns: epoch time of service process start + :param commands: list of bash commands + :param sentry_units: list of sentry unit pointers + :returns: None if successful; Failure message otherwise + """ + if pgrep_full is not None: + # /!\ DEPRECATION WARNING (beisner): + # No longer implemented, as pidof is now used instead of pgrep. + # https://bugs.launchpad.net/charm-helpers/+bug/1474030 + self.log.warn('DEPRECATION WARNING: pgrep_full bool is no ' + 'longer implemented re: lp 1474030.') + + pid_list = self.get_process_id_list(sentry_unit, service) + pid = pid_list[0] + proc_dir = '/proc/{}'.format(pid) + self.log.debug('Pid for {} on {}: {}'.format( + service, sentry_unit.info['unit_name'], pid)) + + return self._get_dir_mtime(sentry_unit, proc_dir) def service_restarted(self, sentry_unit, service, filename, - pgrep_full=False, sleep_time=20): + pgrep_full=None, sleep_time=20): """Check if service was restarted. Compare a service's start time vs a file's last modification time (such as a config file for that service) to determine if the service has been restarted. """ + # /!\ DEPRECATION WARNING (beisner): + # This method is prone to races in that no before-time is known. + # Use validate_service_config_changed instead. + + # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now + # used instead of pgrep. pgrep_full is still passed through to ensure + # deprecation WARNS. lp1474030 + self.log.warn('DEPRECATION WARNING: use ' + 'validate_service_config_changed instead of ' + 'service_restarted due to known races.') + time.sleep(sleep_time) if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >= self._get_file_mtime(sentry_unit, filename)): @@ -304,15 +325,15 @@ class AmuletUtils(object): return False def service_restarted_since(self, sentry_unit, mtime, service, - pgrep_full=False, sleep_time=20, - retry_count=2): + pgrep_full=None, sleep_time=20, + retry_count=2, retry_sleep_time=30): """Check if service was been started after a given time. Args: sentry_unit (sentry): The sentry unit to check for the service on mtime (float): The epoch time to check against service (string): service name to look for in process table - pgrep_full (boolean): Use full command line search mode with pgrep + pgrep_full: [Deprecated] Use full command line search mode with pgrep sleep_time (int): Seconds to sleep before looking for process retry_count (int): If service is not found, how many times to retry @@ -321,30 +342,44 @@ class AmuletUtils(object): False if service is older than mtime or if service was not found. """ - self.log.debug('Checking %s restarted since %s' % (service, mtime)) + # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now + # used instead of pgrep. pgrep_full is still passed through to ensure + # deprecation WARNS. lp1474030 + + unit_name = sentry_unit.info['unit_name'] + self.log.debug('Checking that %s service restarted since %s on ' + '%s' % (service, mtime, unit_name)) time.sleep(sleep_time) - proc_start_time = self._get_proc_start_time(sentry_unit, service, - pgrep_full) - while retry_count > 0 and not proc_start_time: - self.log.debug('No pid file found for service %s, will retry %i ' - 'more times' % (service, retry_count)) - time.sleep(30) - proc_start_time = self._get_proc_start_time(sentry_unit, service, - pgrep_full) - retry_count = retry_count - 1 + proc_start_time = None + tries = 0 + while tries <= retry_count and not proc_start_time: + try: + proc_start_time = self._get_proc_start_time(sentry_unit, + service, + pgrep_full) + self.log.debug('Attempt {} to get {} proc start time on {} ' + 'OK'.format(tries, service, unit_name)) + except IOError: + # NOTE(beisner) - race avoidance, proc may not exist yet. + # https://bugs.launchpad.net/charm-helpers/+bug/1474030 + self.log.debug('Attempt {} to get {} proc start time on {} ' + 'failed'.format(tries, service, unit_name)) + time.sleep(retry_sleep_time) + tries += 1 if not proc_start_time: self.log.warn('No proc start time found, assuming service did ' 'not start') return False if proc_start_time >= mtime: - self.log.debug('proc start time is newer than provided mtime' - '(%s >= %s)' % (proc_start_time, mtime)) + self.log.debug('Proc start time is newer than provided mtime' + '(%s >= %s) on %s (OK)' % (proc_start_time, + mtime, unit_name)) return True else: - self.log.warn('proc start time (%s) is older than provided mtime ' - '(%s), service did not restart' % (proc_start_time, - mtime)) + self.log.warn('Proc start time (%s) is older than provided mtime ' + '(%s) on %s, service did not ' + 'restart' % (proc_start_time, mtime, unit_name)) return False def config_updated_since(self, sentry_unit, filename, mtime, @@ -374,8 +409,9 @@ class AmuletUtils(object): return False def validate_service_config_changed(self, sentry_unit, mtime, service, - filename, pgrep_full=False, - sleep_time=20, retry_count=2): + filename, pgrep_full=None, + sleep_time=20, retry_count=2, + retry_sleep_time=30): """Check service and file were updated after mtime Args: @@ -383,9 +419,10 @@ class AmuletUtils(object): mtime (float): The epoch time to check against service (string): service name to look for in process table filename (string): The file to check mtime of - pgrep_full (boolean): Use full command line search mode with pgrep - sleep_time (int): Seconds to sleep before looking for process + pgrep_full: [Deprecated] Use full command line search mode with pgrep + sleep_time (int): Initial sleep in seconds to pass to test helpers retry_count (int): If service is not found, how many times to retry + retry_sleep_time (int): Time in seconds to wait between retries Typical Usage: u = OpenStackAmuletUtils(ERROR) @@ -402,15 +439,25 @@ class AmuletUtils(object): mtime, False if service is older than mtime or if service was not found or if filename was modified before mtime. """ - self.log.debug('Checking %s restarted since %s' % (service, mtime)) - time.sleep(sleep_time) - service_restart = self.service_restarted_since(sentry_unit, mtime, - service, - pgrep_full=pgrep_full, - sleep_time=0, - retry_count=retry_count) - config_update = self.config_updated_since(sentry_unit, filename, mtime, - sleep_time=0) + + # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now + # used instead of pgrep. pgrep_full is still passed through to ensure + # deprecation WARNS. lp1474030 + + service_restart = self.service_restarted_since( + sentry_unit, mtime, + service, + pgrep_full=pgrep_full, + sleep_time=sleep_time, + retry_count=retry_count, + retry_sleep_time=retry_sleep_time) + + config_update = self.config_updated_since( + sentry_unit, + filename, + mtime, + sleep_time=0) + return service_restart and config_update def get_sentry_time(self, sentry_unit): @@ -428,7 +475,6 @@ class AmuletUtils(object): """Return a list of all Ubuntu releases in order of release.""" _d = distro_info.UbuntuDistroInfo() _release_list = _d.all - self.log.debug('Ubuntu release list: {}'.format(_release_list)) return _release_list def file_to_url(self, file_rel_path): @@ -568,6 +614,142 @@ class AmuletUtils(object): return None + def validate_sectionless_conf(self, file_contents, expected): + """A crude conf parser. Useful to inspect configuration files which + do not have section headers (as would be necessary in order to use + the configparser). Such as openstack-dashboard or rabbitmq confs.""" + for line in file_contents.split('\n'): + if '=' in line: + args = line.split('=') + if len(args) <= 1: + continue + key = args[0].strip() + value = args[1].strip() + if key in expected.keys(): + if expected[key] != value: + msg = ('Config mismatch. Expected, actual: {}, ' + '{}'.format(expected[key], value)) + amulet.raise_status(amulet.FAIL, msg=msg) + + def get_unit_hostnames(self, units): + """Return a dict of juju unit names to hostnames.""" + host_names = {} + for unit in units: + host_names[unit.info['unit_name']] = \ + str(unit.file_contents('/etc/hostname').strip()) + self.log.debug('Unit host names: {}'.format(host_names)) + return host_names + + def run_cmd_unit(self, sentry_unit, cmd): + """Run a command on a unit, return the output and exit code.""" + output, code = sentry_unit.run(cmd) + if code == 0: + self.log.debug('{} `{}` command returned {} ' + '(OK)'.format(sentry_unit.info['unit_name'], + cmd, code)) + else: + msg = ('{} `{}` command returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + amulet.raise_status(amulet.FAIL, msg=msg) + return str(output), code + + def file_exists_on_unit(self, sentry_unit, file_name): + """Check if a file exists on a unit.""" + try: + sentry_unit.file_stat(file_name) + return True + except IOError: + return False + except Exception as e: + msg = 'Error checking file {}: {}'.format(file_name, e) + amulet.raise_status(amulet.FAIL, msg=msg) + + def file_contents_safe(self, sentry_unit, file_name, + max_wait=60, fatal=False): + """Get file contents from a sentry unit. Wrap amulet file_contents + with retry logic to address races where a file checks as existing, + but no longer exists by the time file_contents is called. + Return None if file not found. Optionally raise if fatal is True.""" + unit_name = sentry_unit.info['unit_name'] + file_contents = False + tries = 0 + while not file_contents and tries < (max_wait / 4): + try: + file_contents = sentry_unit.file_contents(file_name) + except IOError: + self.log.debug('Attempt {} to open file {} from {} ' + 'failed'.format(tries, file_name, + unit_name)) + time.sleep(4) + tries += 1 + + if file_contents: + return file_contents + elif not fatal: + return None + elif fatal: + msg = 'Failed to get file contents from unit.' + amulet.raise_status(amulet.FAIL, msg) + + def port_knock_tcp(self, host="localhost", port=22, timeout=15): + """Open a TCP socket to check for a listening sevice on a host. + + :param host: host name or IP address, default to localhost + :param port: TCP port number, default to 22 + :param timeout: Connect timeout, default to 15 seconds + :returns: True if successful, False if connect failed + """ + + # Resolve host name if possible + try: + connect_host = socket.gethostbyname(host) + host_human = "{} ({})".format(connect_host, host) + except socket.error as e: + self.log.warn('Unable to resolve address: ' + '{} ({}) Trying anyway!'.format(host, e)) + connect_host = host + host_human = connect_host + + # Attempt socket connection + try: + knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + knock.settimeout(timeout) + knock.connect((connect_host, port)) + knock.close() + self.log.debug('Socket connect OK for host ' + '{} on port {}.'.format(host_human, port)) + return True + except socket.error as e: + self.log.debug('Socket connect FAIL for' + ' {} port {} ({})'.format(host_human, port, e)) + return False + + def port_knock_units(self, sentry_units, port=22, + timeout=15, expect_success=True): + """Open a TCP socket to check for a listening sevice on each + listed juju unit. + + :param sentry_units: list of sentry unit pointers + :param port: TCP port number, default to 22 + :param timeout: Connect timeout, default to 15 seconds + :expect_success: True by default, set False to invert logic + :returns: None if successful, Failure message otherwise + """ + for unit in sentry_units: + host = unit.info['public-address'] + connected = self.port_knock_tcp(host, port, timeout) + if not connected and expect_success: + return 'Socket connect failed.' + elif connected and not expect_success: + return 'Socket connected unexpectedly.' + + def get_uuid_epoch_stamp(self): + """Returns a stamp string based on uuid4 and epoch time. Useful in + generating test messages which need to be unique-ish.""" + return '[{}-{}]'.format(uuid.uuid4(), time.time()) + +# amulet juju action helpers: def run_action(self, unit_sentry, action, _check_output=subprocess.check_output): """Run the named action on a given unit sentry. @@ -594,3 +776,12 @@ class AmuletUtils(object): output = _check_output(command, universal_newlines=True) data = json.loads(output) return data.get(u"status") == "completed" + + def status_get(self, unit): + """Return the current service status of this unit.""" + raw_status, return_code = unit.run( + "status-get --format=json --include-data") + if return_code != 0: + return ("unknown", "") + status = json.loads(raw_status) + return (status["status"], status["message"]) diff --git a/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 07ee2ef..722bc64 100644 --- a/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -44,20 +44,31 @@ class OpenStackAmuletDeployment(AmuletDeployment): Determine if the local branch being tested is derived from its stable or next (dev) branch, and based on this, use the corresonding stable or next branches for the other_services.""" + + # Charms outside the lp:~openstack-charmers namespace base_charms = ['mysql', 'mongodb', 'nrpe'] + # Force these charms to current series even when using an older series. + # ie. Use trusty/nrpe even when series is precise, as the P charm + # does not possess the necessary external master config and hooks. + force_series_current = ['nrpe'] + if self.series in ['precise', 'trusty']: base_series = self.series else: base_series = self.current_next - if self.stable: - for svc in other_services: + for svc in other_services: + if svc['name'] in force_series_current: + base_series = self.current_next + # If a location has been explicitly set, use it + if svc.get('location'): + continue + if self.stable: temp = 'lp:charms/{}/{}' svc['location'] = temp.format(base_series, svc['name']) - else: - for svc in other_services: + else: if svc['name'] in base_charms: temp = 'lp:charms/{}/{}' svc['location'] = temp.format(base_series, @@ -66,6 +77,7 @@ class OpenStackAmuletDeployment(AmuletDeployment): temp = 'lp:~openstack-charmers/charms/{}/{}/next' svc['location'] = temp.format(self.current_next, svc['name']) + return other_services def _add_services(self, this_service, other_services): @@ -77,21 +89,23 @@ class OpenStackAmuletDeployment(AmuletDeployment): services = other_services services.append(this_service) + + # Charms which should use the source config option use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', 'ceph-osd', 'ceph-radosgw'] - # Most OpenStack subordinate charms do not expose an origin option - # as that is controlled by the principle. - ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe'] + + # Charms which can not use openstack-origin, ie. many subordinates + no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe'] if self.openstack: for svc in services: - if svc['name'] not in use_source + ignore: + if svc['name'] not in use_source + no_origin: config = {'openstack-origin': self.openstack} self.d.configure(svc['name'], config) if self.source: for svc in services: - if svc['name'] in use_source and svc['name'] not in ignore: + if svc['name'] in use_source and svc['name'] not in no_origin: config = {'source': self.source} self.d.configure(svc['name'], config) diff --git a/tests/charmhelpers/contrib/openstack/amulet/utils.py b/tests/charmhelpers/contrib/openstack/amulet/utils.py index 03f7927..b139741 100644 --- a/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -27,6 +27,7 @@ import glanceclient.v1.client as glance_client import heatclient.v1.client as heat_client import keystoneclient.v2_0 as keystone_client import novaclient.v1_1.client as nova_client +import pika import swiftclient from charmhelpers.contrib.amulet.utils import ( @@ -602,3 +603,361 @@ class OpenStackAmuletUtils(AmuletUtils): self.log.debug('Ceph {} samples (OK): ' '{}'.format(sample_type, samples)) return None + +# rabbitmq/amqp specific helpers: + def add_rmq_test_user(self, sentry_units, + username="testuser1", password="changeme"): + """Add a test user via the first rmq juju unit, check connection as + the new user against all sentry units. + + :param sentry_units: list of sentry unit pointers + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: None if successful. Raise on error. + """ + self.log.debug('Adding rmq user ({})...'.format(username)) + + # Check that user does not already exist + cmd_user_list = 'rabbitmqctl list_users' + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) + if username in output: + self.log.warning('User ({}) already exists, returning ' + 'gracefully.'.format(username)) + return + + perms = '".*" ".*" ".*"' + cmds = ['rabbitmqctl add_user {} {}'.format(username, password), + 'rabbitmqctl set_permissions {} {}'.format(username, perms)] + + # Add user via first unit + for cmd in cmds: + output, _ = self.run_cmd_unit(sentry_units[0], cmd) + + # Check connection against the other sentry_units + self.log.debug('Checking user connect against units...') + for sentry_unit in sentry_units: + connection = self.connect_amqp_by_unit(sentry_unit, ssl=False, + username=username, + password=password) + connection.close() + + def delete_rmq_test_user(self, sentry_units, username="testuser1"): + """Delete a rabbitmq user via the first rmq juju unit. + + :param sentry_units: list of sentry unit pointers + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: None if successful or no such user. + """ + self.log.debug('Deleting rmq user ({})...'.format(username)) + + # Check that the user exists + cmd_user_list = 'rabbitmqctl list_users' + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) + + if username not in output: + self.log.warning('User ({}) does not exist, returning ' + 'gracefully.'.format(username)) + return + + # Delete the user + cmd_user_del = 'rabbitmqctl delete_user {}'.format(username) + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del) + + def get_rmq_cluster_status(self, sentry_unit): + """Execute rabbitmq cluster status command on a unit and return + the full output. + + :param unit: sentry unit + :returns: String containing console output of cluster status command + """ + cmd = 'rabbitmqctl cluster_status' + output, _ = self.run_cmd_unit(sentry_unit, cmd) + self.log.debug('{} cluster_status:\n{}'.format( + sentry_unit.info['unit_name'], output)) + return str(output) + + def get_rmq_cluster_running_nodes(self, sentry_unit): + """Parse rabbitmqctl cluster_status output string, return list of + running rabbitmq cluster nodes. + + :param unit: sentry unit + :returns: List containing node names of running nodes + """ + # NOTE(beisner): rabbitmqctl cluster_status output is not + # json-parsable, do string chop foo, then json.loads that. + str_stat = self.get_rmq_cluster_status(sentry_unit) + if 'running_nodes' in str_stat: + pos_start = str_stat.find("{running_nodes,") + 15 + pos_end = str_stat.find("]},", pos_start) + 1 + str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"') + run_nodes = json.loads(str_run_nodes) + return run_nodes + else: + return [] + + def validate_rmq_cluster_running_nodes(self, sentry_units): + """Check that all rmq unit hostnames are represented in the + cluster_status output of all units. + + :param host_names: dict of juju unit names to host names + :param units: list of sentry unit pointers (all rmq units) + :returns: None if successful, otherwise return error message + """ + host_names = self.get_unit_hostnames(sentry_units) + errors = [] + + # Query every unit for cluster_status running nodes + for query_unit in sentry_units: + query_unit_name = query_unit.info['unit_name'] + running_nodes = self.get_rmq_cluster_running_nodes(query_unit) + + # Confirm that every unit is represented in the queried unit's + # cluster_status running nodes output. + for validate_unit in sentry_units: + val_host_name = host_names[validate_unit.info['unit_name']] + val_node_name = 'rabbit@{}'.format(val_host_name) + + if val_node_name not in running_nodes: + errors.append('Cluster member check failed on {}: {} not ' + 'in {}\n'.format(query_unit_name, + val_node_name, + running_nodes)) + if errors: + return ''.join(errors) + + def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None): + """Check a single juju rmq unit for ssl and port in the config file.""" + host = sentry_unit.info['public-address'] + unit_name = sentry_unit.info['unit_name'] + + conf_file = '/etc/rabbitmq/rabbitmq.config' + conf_contents = str(self.file_contents_safe(sentry_unit, + conf_file, max_wait=16)) + # Checks + conf_ssl = 'ssl' in conf_contents + conf_port = str(port) in conf_contents + + # Port explicitly checked in config + if port and conf_port and conf_ssl: + self.log.debug('SSL is enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return True + elif port and not conf_port and conf_ssl: + self.log.debug('SSL is enabled @{} but not on port {} ' + '({})'.format(host, port, unit_name)) + return False + # Port not checked (useful when checking that ssl is disabled) + elif not port and conf_ssl: + self.log.debug('SSL is enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return True + elif not port and not conf_ssl: + self.log.debug('SSL not enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return False + else: + msg = ('Unknown condition when checking SSL status @{}:{} ' + '({})'.format(host, port, unit_name)) + amulet.raise_status(amulet.FAIL, msg) + + def validate_rmq_ssl_enabled_units(self, sentry_units, port=None): + """Check that ssl is enabled on rmq juju sentry units. + + :param sentry_units: list of all rmq sentry units + :param port: optional ssl port override to validate + :returns: None if successful, otherwise return error message + """ + for sentry_unit in sentry_units: + if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port): + return ('Unexpected condition: ssl is disabled on unit ' + '({})'.format(sentry_unit.info['unit_name'])) + return None + + def validate_rmq_ssl_disabled_units(self, sentry_units): + """Check that ssl is enabled on listed rmq juju sentry units. + + :param sentry_units: list of all rmq sentry units + :returns: True if successful. Raise on error. + """ + for sentry_unit in sentry_units: + if self.rmq_ssl_is_enabled_on_unit(sentry_unit): + return ('Unexpected condition: ssl is enabled on unit ' + '({})'.format(sentry_unit.info['unit_name'])) + return None + + def configure_rmq_ssl_on(self, sentry_units, deployment, + port=None, max_wait=60): + """Turn ssl charm config option on, with optional non-default + ssl port specification. Confirm that it is enabled on every + unit. + + :param sentry_units: list of sentry units + :param deployment: amulet deployment object pointer + :param port: amqp port, use defaults if None + :param max_wait: maximum time to wait in seconds to confirm + :returns: None if successful. Raise on error. + """ + self.log.debug('Setting ssl charm config option: on') + + # Enable RMQ SSL + config = {'ssl': 'on'} + if port: + config['ssl_port'] = port + + deployment.configure('rabbitmq-server', config) + + # Confirm + tries = 0 + ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) + while ret and tries < (max_wait / 4): + time.sleep(4) + self.log.debug('Attempt {}: {}'.format(tries, ret)) + ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) + tries += 1 + + if ret: + amulet.raise_status(amulet.FAIL, ret) + + def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60): + """Turn ssl charm config option off, confirm that it is disabled + on every unit. + + :param sentry_units: list of sentry units + :param deployment: amulet deployment object pointer + :param max_wait: maximum time to wait in seconds to confirm + :returns: None if successful. Raise on error. + """ + self.log.debug('Setting ssl charm config option: off') + + # Disable RMQ SSL + config = {'ssl': 'off'} + deployment.configure('rabbitmq-server', config) + + # Confirm + tries = 0 + ret = self.validate_rmq_ssl_disabled_units(sentry_units) + while ret and tries < (max_wait / 4): + time.sleep(4) + self.log.debug('Attempt {}: {}'.format(tries, ret)) + ret = self.validate_rmq_ssl_disabled_units(sentry_units) + tries += 1 + + if ret: + amulet.raise_status(amulet.FAIL, ret) + + def connect_amqp_by_unit(self, sentry_unit, ssl=False, + port=None, fatal=True, + username="testuser1", password="changeme"): + """Establish and return a pika amqp connection to the rabbitmq service + running on a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :param fatal: boolean, default to True (raises on connect error) + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: pika amqp connection pointer or None if failed and non-fatal + """ + host = sentry_unit.info['public-address'] + unit_name = sentry_unit.info['unit_name'] + + # Default port logic if port is not specified + if ssl and not port: + port = 5671 + elif not ssl and not port: + port = 5672 + + self.log.debug('Connecting to amqp on {}:{} ({}) as ' + '{}...'.format(host, port, unit_name, username)) + + try: + credentials = pika.PlainCredentials(username, password) + parameters = pika.ConnectionParameters(host=host, port=port, + credentials=credentials, + ssl=ssl, + connection_attempts=3, + retry_delay=5, + socket_timeout=1) + connection = pika.BlockingConnection(parameters) + assert connection.server_properties['product'] == 'RabbitMQ' + self.log.debug('Connect OK') + return connection + except Exception as e: + msg = ('amqp connection failed to {}:{} as ' + '{} ({})'.format(host, port, username, str(e))) + if fatal: + amulet.raise_status(amulet.FAIL, msg) + else: + self.log.warn(msg) + return None + + def publish_amqp_message_by_unit(self, sentry_unit, message, + queue="test", ssl=False, + username="testuser1", + password="changeme", + port=None): + """Publish an amqp message to a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param message: amqp message string + :param queue: message queue, default to test + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :returns: None. Raises exception if publish failed. + """ + self.log.debug('Publishing message to {} queue:\n{}'.format(queue, + message)) + connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, + port=port, + username=username, + password=password) + + # NOTE(beisner): extra debug here re: pika hang potential: + # https://github.com/pika/pika/issues/297 + # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw + self.log.debug('Defining channel...') + channel = connection.channel() + self.log.debug('Declaring queue...') + channel.queue_declare(queue=queue, auto_delete=False, durable=True) + self.log.debug('Publishing message...') + channel.basic_publish(exchange='', routing_key=queue, body=message) + self.log.debug('Closing channel...') + channel.close() + self.log.debug('Closing connection...') + connection.close() + + def get_amqp_message_by_unit(self, sentry_unit, queue="test", + username="testuser1", + password="changeme", + ssl=False, port=None): + """Get an amqp message from a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param queue: message queue, default to test + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :returns: amqp message body as string. Raise if get fails. + """ + connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, + port=port, + username=username, + password=password) + channel = connection.channel() + method_frame, _, body = channel.basic_get(queue) + + if method_frame: + self.log.debug('Retreived message from {} queue:\n{}'.format(queue, + body)) + channel.basic_ack(method_frame.delivery_tag) + channel.close() + connection.close() + return body + else: + msg = 'No message retrieved.' + amulet.raise_status(amulet.FAIL, msg) From 6b6b1912f8ff138a486af6e06f12ea2c4ef2324b Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 22 Sep 2015 14:40:04 +0100 Subject: [PATCH 07/16] Ensure python2 is installed before hook execution --- hooks/heat_relations.py | 2 +- hooks/install | 21 ++++++++++++++++++++- hooks/install.real | 1 + 3 files changed, 22 insertions(+), 2 deletions(-) mode change 120000 => 100755 hooks/install create mode 120000 hooks/install.real diff --git a/hooks/heat_relations.py b/hooks/heat_relations.py index d2ad10d..75cfad3 100755 --- a/hooks/heat_relations.py +++ b/hooks/heat_relations.py @@ -66,7 +66,7 @@ hooks = Hooks() CONFIGS = register_configs() -@hooks.hook('install') +@hooks.hook('install.real') def install(): execd_preinstall() configure_installation_source(config('openstack-origin')) diff --git a/hooks/install b/hooks/install deleted file mode 120000 index ab98840..0000000 --- a/hooks/install +++ /dev/null @@ -1 +0,0 @@ -heat_relations.py \ No newline at end of file diff --git a/hooks/install b/hooks/install new file mode 100755 index 0000000..83a9d3c --- /dev/null +++ b/hooks/install @@ -0,0 +1,20 @@ +#!/bin/bash +# Wrapper to deal with newer Ubuntu versions that don't have py2 installed +# by default. + +declare -a DEPS=('apt' 'netaddr' 'netifaces' 'pip' 'yaml') + +check_and_install() { + pkg="${1}-${2}" + if ! dpkg -s ${pkg} 2>&1 > /dev/null; then + apt-get -y install ${pkg} + fi +} + +PYTHON="python" + +for dep in ${DEPS[@]}; do + check_and_install ${PYTHON} ${dep} +done + +exec ./hooks/install.real diff --git a/hooks/install.real b/hooks/install.real new file mode 120000 index 0000000..ab98840 --- /dev/null +++ b/hooks/install.real @@ -0,0 +1 @@ +heat_relations.py \ No newline at end of file From a1f2428e97318a87c5c7e41ecaa396419f2d47bf Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Tue, 22 Sep 2015 19:41:24 +0000 Subject: [PATCH 08/16] Import CONFIGS instead of calling register_configs() again. --- actions/openstack_upgrade.py | 9 ++++----- unit_tests/test_actions_openstack_upgrade.py | 7 ++++--- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/actions/openstack_upgrade.py b/actions/openstack_upgrade.py index 6c2fc9b..48b672e 100755 --- a/actions/openstack_upgrade.py +++ b/actions/openstack_upgrade.py @@ -7,17 +7,16 @@ from charmhelpers.contrib.openstack.utils import ( do_action_openstack_upgrade, ) -from heat_relations import config_changed +from heat_relations import ( + config_changed, + CONFIGS, +) from heat_utils import ( do_openstack_upgrade, - register_configs ) -CONFIGS = register_configs() - - def openstack_upgrade(): """Perform action-managed OpenStack upgrade. diff --git a/unit_tests/test_actions_openstack_upgrade.py b/unit_tests/test_actions_openstack_upgrade.py index c516bc6..39e56b6 100644 --- a/unit_tests/test_actions_openstack_upgrade.py +++ b/unit_tests/test_actions_openstack_upgrade.py @@ -13,7 +13,6 @@ from test_utils import ( TO_PATCH = [ 'config_changed', 'do_openstack_upgrade', - 'register_configs', ] @@ -23,12 +22,13 @@ class TestHeatUpgradeActions(CharmTestCase): super(TestHeatUpgradeActions, self).setUp(openstack_upgrade, TO_PATCH) + @patch('charmhelpers.contrib.openstack.utils.juju_log') @patch('charmhelpers.contrib.openstack.utils.config') @patch('charmhelpers.contrib.openstack.utils.action_set') @patch('charmhelpers.contrib.openstack.utils.git_install_requested') @patch('charmhelpers.contrib.openstack.utils.openstack_upgrade_available') def test_openstack_upgrade_true(self, upgrade_avail, git_requested, - action_set, config): + action_set, config, log): git_requested.return_value = False upgrade_avail.return_value = True config.return_value = True @@ -38,12 +38,13 @@ class TestHeatUpgradeActions(CharmTestCase): self.assertTrue(self.do_openstack_upgrade.called) self.assertTrue(self.config_changed.called) + @patch('charmhelpers.contrib.openstack.utils.juju_log') @patch('charmhelpers.contrib.openstack.utils.config') @patch('charmhelpers.contrib.openstack.utils.action_set') @patch('charmhelpers.contrib.openstack.utils.git_install_requested') @patch('charmhelpers.contrib.openstack.utils.openstack_upgrade_available') def test_openstack_upgrade_false(self, upgrade_avail, git_requested, - action_set, config): + action_set, config, log): git_requested.return_value = False upgrade_avail.return_value = True config.return_value = False From a77f97c13dc3b2e9e9dfae0209e266217d0ca907 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Tue, 22 Sep 2015 19:53:18 +0000 Subject: [PATCH 09/16] Migrate heat database on upgrade and work around juno python-swiftclient upgrade issue. --- hooks/heat_relations.py | 5 ++--- hooks/heat_utils.py | 10 ++++++++++ unit_tests/test_heat_relations.py | 5 +++-- unit_tests/test_heat_utils.py | 8 +++++++- 4 files changed, 22 insertions(+), 6 deletions(-) diff --git a/hooks/heat_relations.py b/hooks/heat_relations.py index 0fdbdf9..3dc89ca 100755 --- a/hooks/heat_relations.py +++ b/hooks/heat_relations.py @@ -12,8 +12,6 @@ import shutil import subprocess import sys -from subprocess import check_call - from charmhelpers.core.hookenv import ( Hooks, UnregisteredHookError, @@ -52,6 +50,7 @@ from heat_utils import ( do_openstack_upgrade, restart_map, determine_packages, + migrate_database, register_configs, HEAT_CONF, ) @@ -123,7 +122,7 @@ def db_changed(): log('shared-db relation incomplete. Peer not ready?') return CONFIGS.write(HEAT_CONF) - check_call(['heat-manage', 'db_sync']) + migrate_database() def configure_https(): diff --git a/hooks/heat_utils.py b/hooks/heat_utils.py index 804c996..cd0bab2 100644 --- a/hooks/heat_utils.py +++ b/hooks/heat_utils.py @@ -7,6 +7,7 @@ import os from collections import OrderedDict +from subprocess import check_call from charmhelpers.contrib.openstack import context, templating @@ -37,6 +38,7 @@ TEMPLATES = 'templates/' BASE_PACKAGES = [ 'python-keystoneclient', + 'python-swiftclient', # work-around missing epoch in juno heat package 'python-six', 'uuid', 'apache2', @@ -147,6 +149,8 @@ def do_openstack_upgrade(configs): configs.set_release(openstack_release=new_os_rel) configs.write_all() + migrate_database() + def restart_map(): """Restarts on config change. @@ -165,3 +169,9 @@ def restart_map(): if svcs: _map.append((f, svcs)) return OrderedDict(_map) + + +def migrate_database(): + """Runs heat-manage to initialize a new database or migrate existing""" + log('Migrating the heat database.') + check_call(['heat-manage', 'db_sync']) diff --git a/unit_tests/test_heat_relations.py b/unit_tests/test_heat_relations.py index ebdf7e5..52d29fc 100644 --- a/unit_tests/test_heat_relations.py +++ b/unit_tests/test_heat_relations.py @@ -37,9 +37,9 @@ TO_PATCH = [ 'register_configs', 'do_openstack_upgrade', # other - 'check_call', 'execd_preinstall', - 'log' + 'log', + 'migrate_database', ] @@ -99,6 +99,7 @@ class HeatRelationTests(CharmTestCase): configs.complete_contexts.return_value = ['shared-db'] configs.write = MagicMock() relations.db_changed() + self.assertTrue(self.migrate_database.called) @patch.object(relations, 'CONFIGS') def test_db_changed(self, configs): diff --git a/unit_tests/test_heat_utils.py b/unit_tests/test_heat_utils.py index 67ccfd8..a825c74 100644 --- a/unit_tests/test_heat_utils.py +++ b/unit_tests/test_heat_utils.py @@ -18,7 +18,8 @@ TO_PATCH = [ 'get_os_codename_install_source', 'configure_installation_source', 'apt_install', - 'apt_update' + 'apt_update', + 'check_call', ] @@ -64,3 +65,8 @@ class HeatUtilsTests(CharmTestCase): self.assertEquals(cfn, 8000) cfn = utils.api_port('heat-api') self.assertEquals(cfn, 8004) + + def test_migrate_database(self): + utils.migrate_database() + self.assertTrue(self.log.called) + self.check_call.assert_called_with(['heat-manage', 'db_sync']) From 0921e84ca702640947e66fd970ae140e0005386f Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Tue, 22 Sep 2015 20:21:39 +0000 Subject: [PATCH 10/16] apt upgrade packages during openstack upgrade --- hooks/heat_utils.py | 2 ++ unit_tests/test_heat_utils.py | 6 +++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/hooks/heat_utils.py b/hooks/heat_utils.py index cd0bab2..d2fc817 100644 --- a/hooks/heat_utils.py +++ b/hooks/heat_utils.py @@ -19,6 +19,7 @@ from charmhelpers.contrib.openstack.utils import ( from charmhelpers.fetch import ( apt_install, apt_update, + apt_upgrade, ) from charmhelpers.core.hookenv import ( @@ -142,6 +143,7 @@ def do_openstack_upgrade(configs): '--option', 'Dpkg::Options::=--force-confdef', ] apt_update() + apt_upgrade(options=dpkg_opts, fatal=True, dist=True) packages = BASE_PACKAGES + BASE_SERVICES apt_install(packages=packages, options=dpkg_opts, fatal=True) diff --git a/unit_tests/test_heat_utils.py b/unit_tests/test_heat_utils.py index a825c74..3a45454 100644 --- a/unit_tests/test_heat_utils.py +++ b/unit_tests/test_heat_utils.py @@ -19,6 +19,7 @@ TO_PATCH = [ 'configure_installation_source', 'apt_install', 'apt_update', + 'apt_upgrade', 'check_call', ] @@ -57,8 +58,11 @@ class HeatUtilsTests(CharmTestCase): self.get_os_codename_install_source.return_value = 'havana' configs = MagicMock() utils.do_openstack_upgrade(configs) - self.assertTrue(configs.write_all.called) + self.assertTrue(self.apt_update.called) + self.assertTrue(self.apt_upgrade.called) + self.assertTrue(self.apt_install.called) configs.set_release.assert_called_with(openstack_release='havana') + self.assertTrue(configs.write_all.called) def test_api_ports(self): cfn = utils.api_port('heat-api-cfn') From 537e5d0d1db263b8ab56585f2050c90b4da64db3 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Tue, 22 Sep 2015 21:53:17 +0000 Subject: [PATCH 11/16] Restart services after migrating database. --- hooks/heat_utils.py | 15 +++++++++++++++ unit_tests/test_heat_utils.py | 8 +++++++- 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/hooks/heat_utils.py b/hooks/heat_utils.py index d2fc817..d48dcb2 100644 --- a/hooks/heat_utils.py +++ b/hooks/heat_utils.py @@ -27,6 +27,11 @@ from charmhelpers.core.hookenv import ( config ) +from charmhelpers.core.host import ( + service_start, + service_stop, +) + from heat_context import ( API_PORTS, HeatIdentityServiceContext, @@ -173,7 +178,17 @@ def restart_map(): return OrderedDict(_map) +def services(): + """Returns a list of services associate with this charm""" + _services = [] + for v in restart_map().values(): + _services = _services + v + return list(set(_services)) + + def migrate_database(): """Runs heat-manage to initialize a new database or migrate existing""" log('Migrating the heat database.') + [service_stop(s) for s in services()] check_call(['heat-manage', 'db_sync']) + [service_start(s) for s in services()] diff --git a/unit_tests/test_heat_utils.py b/unit_tests/test_heat_utils.py index 3a45454..2821183 100644 --- a/unit_tests/test_heat_utils.py +++ b/unit_tests/test_heat_utils.py @@ -1,5 +1,5 @@ from collections import OrderedDict -from mock import patch, MagicMock +from mock import patch, MagicMock, call from test_utils import CharmTestCase from charmhelpers.core import hookenv @@ -21,6 +21,8 @@ TO_PATCH = [ 'apt_update', 'apt_upgrade', 'check_call', + 'service_start', + 'service_stop', ] @@ -74,3 +76,7 @@ class HeatUtilsTests(CharmTestCase): utils.migrate_database() self.assertTrue(self.log.called) self.check_call.assert_called_with(['heat-manage', 'db_sync']) + expected = [call('heat-api'), call('heat-api-cfn'), + call('heat-engine'), call('apache2')] + self.service_stop.assert_has_calls(expected, any_order=True) + self.service_start.assert_has_calls(expected, any_order=True) From 1f7a734231205ac849dde5b1afb0e0a22c2cfd92 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Tue, 22 Sep 2015 21:55:13 +0000 Subject: [PATCH 12/16] fix lint errors --- unit_tests/test_actions_openstack_upgrade.py | 2 +- unit_tests/test_heat_relations.py | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/unit_tests/test_actions_openstack_upgrade.py b/unit_tests/test_actions_openstack_upgrade.py index 39e56b6..457b01d 100644 --- a/unit_tests/test_actions_openstack_upgrade.py +++ b/unit_tests/test_actions_openstack_upgrade.py @@ -20,7 +20,7 @@ class TestHeatUpgradeActions(CharmTestCase): def setUp(self): super(TestHeatUpgradeActions, self).setUp(openstack_upgrade, - TO_PATCH) + TO_PATCH) @patch('charmhelpers.contrib.openstack.utils.juju_log') @patch('charmhelpers.contrib.openstack.utils.config') diff --git a/unit_tests/test_heat_relations.py b/unit_tests/test_heat_relations.py index 52d29fc..dc63928 100644 --- a/unit_tests/test_heat_relations.py +++ b/unit_tests/test_heat_relations.py @@ -77,8 +77,9 @@ class HeatRelationTests(CharmTestCase): self.assertTrue(self.do_openstack_upgrade.called) @patch.object(relations, 'configure_https') - def test_config_changed_with_openstack_upgrade_action(self, - mock_configure_https): + def test_config_changed_with_openstack_upgrade_action( + self, + mock_configure_https): self.openstack_upgrade_available.return_value = True self.test_config.set('action-managed-upgrade', True) From f19c94f6f1a8c33e0b9ceb9ffa528633e4711fc6 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Tue, 6 Oct 2015 14:11:45 +0000 Subject: [PATCH 13/16] update amulet test dependency setup file --- tests/00-setup | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/tests/00-setup b/tests/00-setup index ee63c3d..94e5611 100755 --- a/tests/00-setup +++ b/tests/00-setup @@ -4,8 +4,14 @@ set -ex sudo add-apt-repository --yes ppa:juju/stable sudo apt-get update --yes -sudo apt-get install --yes python-amulet \ +sudo apt-get install --yes amulet \ + distro-info-data \ + python-cinderclient \ python-distro-info \ python-glanceclient \ + python-heatclient \ python-keystoneclient \ - python-novaclient + python-neutronclient \ + python-novaclient \ + python-pika \ + python-swiftclient From abee57f6894ff07d8c4f58f75be87b8e2d67843c Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Tue, 6 Oct 2015 14:11:45 +0000 Subject: [PATCH 14/16] update bundletester test plan yaml file --- tests/tests.yaml | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/tests/tests.yaml b/tests/tests.yaml index 9e4b07f..64e3e2d 100644 --- a/tests/tests.yaml +++ b/tests/tests.yaml @@ -3,13 +3,18 @@ reset: true virtualenv: true makefile: - lint - - unit_test + - test sources: - ppa:juju/stable packages: - amulet - - python-amulet + - distro-info-data + - python-cinderclient - python-distro-info - python-glanceclient + - python-heatclient - python-keystoneclient + - python-neutronclient - python-novaclient + - python-pika + - python-swiftclient From b6423b42c375109a7694c173bf7e2aba0f4ed1e7 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Tue, 6 Oct 2015 14:55:32 +0000 Subject: [PATCH 15/16] update makefile unit test target --- Makefile | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index c19f016..713e37c 100644 --- a/Makefile +++ b/Makefile @@ -2,13 +2,13 @@ PYTHON := /usr/bin/env python lint: - @echo Lint inspections and charm proof... - @flake8 --exclude hooks/charmhelpers hooks tests unit_tests + @flake8 --exclude hooks/charmhelpers,tests/charmhelpers \ + actions hooks unit_tests tests @charm proof test: @# Bundletester expects unit tests here. - @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests + @$(PYTHON) /usr/bin/nosetests -v --nologcapture --with-coverage unit_tests functional_test: @echo Starting all functional, lint and unit tests... @@ -23,6 +23,6 @@ sync: bin/charm_helpers_sync.py @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml -publish: lint unit_test +publish: lint test bzr push lp:charms/heat bzr push lp:charms/trusty/heat From 60e183edd805eb6303662f8f4f911dfaeee711f2 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Thu, 8 Oct 2015 13:13:09 +0000 Subject: [PATCH 16/16] Initial workload status support. --- hooks/heat_relations.py | 11 +++++++++-- hooks/heat_utils.py | 8 ++++++++ 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/hooks/heat_relations.py b/hooks/heat_relations.py index a8914f3..e6d0e0b 100755 --- a/hooks/heat_relations.py +++ b/hooks/heat_relations.py @@ -21,7 +21,8 @@ from charmhelpers.core.hookenv import ( relation_ids, relation_set, open_port, - unit_get + unit_get, + status_set, ) from charmhelpers.core.host import ( @@ -36,7 +37,8 @@ from charmhelpers.fetch import ( from charmhelpers.contrib.openstack.utils import ( configure_installation_source, - openstack_upgrade_available + openstack_upgrade_available, + set_os_workload_status, ) from charmhelpers.contrib.openstack.ip import ( @@ -53,6 +55,7 @@ from heat_utils import ( migrate_database, register_configs, HEAT_CONF, + REQUIRED_INTERFACES, ) from heat_context import ( @@ -67,8 +70,10 @@ CONFIGS = register_configs() @hooks.hook('install.real') def install(): + status_set('maintenance', 'Executing pre-install') execd_preinstall() configure_installation_source(config('openstack-origin')) + status_set('maintenance', 'Installing apt packages') apt_update() apt_install(determine_packages(), fatal=True) @@ -88,6 +93,7 @@ def install(): def config_changed(): if not config('action-managed-upgrade'): if openstack_upgrade_available('heat-common'): + status_set('maintenance', 'Running openstack upgrade') do_openstack_upgrade(CONFIGS) CONFIGS.write_all() configure_https() @@ -200,6 +206,7 @@ def main(): hooks.execute(sys.argv) except UnregisteredHookError as e: log('Unknown hook {} - skipping.'.format(e)) + set_os_workload_status(CONFIGS, REQUIRED_INTERFACES) if __name__ == '__main__': diff --git a/hooks/heat_utils.py b/hooks/heat_utils.py index d48dcb2..474ca5a 100644 --- a/hooks/heat_utils.py +++ b/hooks/heat_utils.py @@ -42,6 +42,14 @@ from heat_context import ( TEMPLATES = 'templates/' +# The interface is said to be satisfied if anyone of the interfaces in +# the list has a complete context. +REQUIRED_INTERFACES = { + 'database': ['shared-db'], + 'messaging': ['amqp'], + 'identity': ['identity-service'], +} + BASE_PACKAGES = [ 'python-keystoneclient', 'python-swiftclient', # work-around missing epoch in juno heat package