From 6dd05c38d7a6ef98745cc06e80f83d08064a50e7 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 11 Nov 2015 14:57:38 +0000 Subject: [PATCH 1/2] Add amulet tests, unit tests and charmhelper sync --- Makefile | 16 + charm-helpers-sync.yaml | 2 + charm-helpers-tests.yaml | 5 + config.yaml | 6 +- .../charmhelpers/contrib/network/__init__.py | 15 + hooks/charmhelpers/contrib/network/ip.py | 456 ++++++++ .../contrib/openstack/amulet/deployment.py | 135 ++- .../contrib/openstack/amulet/utils.py | 381 +++++++ .../charmhelpers/contrib/openstack/context.py | 216 +++- .../charmhelpers/contrib/openstack/neutron.py | 71 +- .../contrib/openstack/templates/ceph.conf | 6 + .../contrib/openstack/templating.py | 36 +- hooks/charmhelpers/contrib/openstack/utils.py | 334 +++++- hooks/charmhelpers/contrib/python/__init__.py | 15 + hooks/charmhelpers/contrib/python/packages.py | 121 +++ .../contrib/storage/linux/ceph.py | 239 ++++- .../contrib/storage/linux/utils.py | 7 +- hooks/charmhelpers/core/files.py | 45 + hooks/charmhelpers/core/hookenv.py | 169 ++- hooks/charmhelpers/core/host.py | 175 +++- hooks/charmhelpers/core/hugepage.py | 71 ++ hooks/charmhelpers/core/kernel.py | 68 ++ hooks/charmhelpers/core/services/helpers.py | 25 +- hooks/charmhelpers/core/strutils.py | 30 + hooks/charmhelpers/core/templating.py | 19 +- hooks/charmhelpers/core/unitdata.py | 78 +- hooks/charmhelpers/fetch/__init__.py | 10 +- metadata.yaml | 2 +- tests/015-basic-trusty-icehouse | 9 + tests/016-basic-trusty-juno | 11 + tests/017-basic-trusty-kilo | 11 + tests/basic_deployment.py | 465 +++++++++ tests/charmhelpers/__init__.py | 38 + tests/charmhelpers/contrib/__init__.py | 15 + tests/charmhelpers/contrib/amulet/__init__.py | 15 + .../charmhelpers/contrib/amulet/deployment.py | 95 ++ tests/charmhelpers/contrib/amulet/utils.py | 818 +++++++++++++++ .../contrib/openstack/__init__.py | 15 + .../contrib/openstack/amulet/__init__.py | 15 + .../contrib/openstack/amulet/deployment.py | 296 ++++++ .../contrib/openstack/amulet/utils.py | 985 ++++++++++++++++++ tests/setup/00-setup | 17 + unit_tests/__init__.py | 3 + unit_tests/odl_outputs.py | 271 +++++ unit_tests/test_odl_controller_hooks.py | 95 ++ unit_tests/test_odl_controller_utils.py | 93 ++ unit_tests/test_utils.py | 124 +++ 47 files changed, 5955 insertions(+), 189 deletions(-) create mode 100644 charm-helpers-tests.yaml create mode 100644 hooks/charmhelpers/contrib/network/__init__.py create mode 100644 hooks/charmhelpers/contrib/network/ip.py create mode 100644 hooks/charmhelpers/contrib/python/__init__.py create mode 100644 hooks/charmhelpers/contrib/python/packages.py create mode 100644 hooks/charmhelpers/core/files.py create mode 100644 hooks/charmhelpers/core/hugepage.py create mode 100644 hooks/charmhelpers/core/kernel.py create mode 100755 tests/015-basic-trusty-icehouse create mode 100755 tests/016-basic-trusty-juno create mode 100755 tests/017-basic-trusty-kilo create mode 100644 tests/basic_deployment.py create mode 100644 tests/charmhelpers/__init__.py create mode 100644 tests/charmhelpers/contrib/__init__.py create mode 100644 tests/charmhelpers/contrib/amulet/__init__.py create mode 100644 tests/charmhelpers/contrib/amulet/deployment.py create mode 100644 tests/charmhelpers/contrib/amulet/utils.py create mode 100644 tests/charmhelpers/contrib/openstack/__init__.py create mode 100644 tests/charmhelpers/contrib/openstack/amulet/__init__.py create mode 100644 tests/charmhelpers/contrib/openstack/amulet/deployment.py create mode 100644 tests/charmhelpers/contrib/openstack/amulet/utils.py create mode 100755 tests/setup/00-setup create mode 100644 unit_tests/__init__.py create mode 100644 unit_tests/odl_outputs.py create mode 100644 unit_tests/test_odl_controller_hooks.py create mode 100644 unit_tests/test_odl_controller_utils.py create mode 100644 unit_tests/test_utils.py diff --git a/Makefile b/Makefile index 378713f..5a3cd4b 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,21 @@ #!/usr/bin/make PYTHON := /usr/bin/env python +lint: + @flake8 --exclude hooks/charmhelpers,tests/charmhelpers \ + hooks unit_tests tests + @charm proof + +test: + @# Bundletester expects unit tests here. + @echo Starting unit tests... + @$(PYTHON) /usr/bin/nosetests -v --nologcapture --with-coverage unit_tests + +functional_test: + @echo Starting amulet tests... + @tests/setup/00-setup + @juju test -v -p AMULET_HTTP_PROXY,AMULET_OS_VIP --timeout 2700 + bin/charm_helpers_sync.py: @mkdir -p bin @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \ @@ -8,3 +23,4 @@ bin/charm_helpers_sync.py: sync: bin/charm_helpers_sync.py @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-sync.yaml + @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml diff --git a/charm-helpers-sync.yaml b/charm-helpers-sync.yaml index dd8445a..ed33c4d 100644 --- a/charm-helpers-sync.yaml +++ b/charm-helpers-sync.yaml @@ -6,3 +6,5 @@ include: - payload - contrib.openstack|inc=* - contrib.storage + - contrib.network.ip + - contrib.python.packages diff --git a/charm-helpers-tests.yaml b/charm-helpers-tests.yaml new file mode 100644 index 0000000..48b12f6 --- /dev/null +++ b/charm-helpers-tests.yaml @@ -0,0 +1,5 @@ +branch: lp:charm-helpers +destination: tests/charmhelpers +include: + - contrib.amulet + - contrib.openstack.amulet diff --git a/config.yaml b/config.yaml index c257b6b..5e1ba03 100644 --- a/config.yaml +++ b/config.yaml @@ -19,17 +19,19 @@ options: package. install-sources: type: string + default: '' description: | Package sources to install. Can be used to specify where to install the opendaylight-karaf package from. install-keys: type: string + default: '' description: Apt keys for package install sources http-proxy: type: string - default: + default: '' description: Proxy to use for http connections for OpenDayLight https-proxy: type: string - default: + default: '' description: Proxy to use for https connections for OpenDayLight diff --git a/hooks/charmhelpers/contrib/network/__init__.py b/hooks/charmhelpers/contrib/network/__init__.py new file mode 100644 index 0000000..d1400a0 --- /dev/null +++ b/hooks/charmhelpers/contrib/network/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/hooks/charmhelpers/contrib/network/ip.py b/hooks/charmhelpers/contrib/network/ip.py new file mode 100644 index 0000000..7f3b66b --- /dev/null +++ b/hooks/charmhelpers/contrib/network/ip.py @@ -0,0 +1,456 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import glob +import re +import subprocess +import six +import socket + +from functools import partial + +from charmhelpers.core.hookenv import unit_get +from charmhelpers.fetch import apt_install, apt_update +from charmhelpers.core.hookenv import ( + log, + WARNING, +) + +try: + import netifaces +except ImportError: + apt_update(fatal=True) + apt_install('python-netifaces', fatal=True) + import netifaces + +try: + import netaddr +except ImportError: + apt_update(fatal=True) + apt_install('python-netaddr', fatal=True) + import netaddr + + +def _validate_cidr(network): + try: + netaddr.IPNetwork(network) + except (netaddr.core.AddrFormatError, ValueError): + raise ValueError("Network (%s) is not in CIDR presentation format" % + network) + + +def no_ip_found_error_out(network): + errmsg = ("No IP address found in network: %s" % network) + raise ValueError(errmsg) + + +def get_address_in_network(network, fallback=None, fatal=False): + """Get an IPv4 or IPv6 address within the network from the host. + + :param network (str): CIDR presentation format. For example, + '192.168.1.0/24'. + :param fallback (str): If no address is found, return fallback. + :param fatal (boolean): If no address is found, fallback is not + set and fatal is True then exit(1). + """ + if network is None: + if fallback is not None: + return fallback + + if fatal: + no_ip_found_error_out(network) + else: + return None + + _validate_cidr(network) + network = netaddr.IPNetwork(network) + for iface in netifaces.interfaces(): + addresses = netifaces.ifaddresses(iface) + if network.version == 4 and netifaces.AF_INET in addresses: + addr = addresses[netifaces.AF_INET][0]['addr'] + netmask = addresses[netifaces.AF_INET][0]['netmask'] + cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) + if cidr in network: + return str(cidr.ip) + + if network.version == 6 and netifaces.AF_INET6 in addresses: + for addr in addresses[netifaces.AF_INET6]: + if not addr['addr'].startswith('fe80'): + cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], + addr['netmask'])) + if cidr in network: + return str(cidr.ip) + + if fallback is not None: + return fallback + + if fatal: + no_ip_found_error_out(network) + + return None + + +def is_ipv6(address): + """Determine whether provided address is IPv6 or not.""" + try: + address = netaddr.IPAddress(address) + except netaddr.AddrFormatError: + # probably a hostname - so not an address at all! + return False + + return address.version == 6 + + +def is_address_in_network(network, address): + """ + Determine whether the provided address is within a network range. + + :param network (str): CIDR presentation format. For example, + '192.168.1.0/24'. + :param address: An individual IPv4 or IPv6 address without a net + mask or subnet prefix. For example, '192.168.1.1'. + :returns boolean: Flag indicating whether address is in network. + """ + try: + network = netaddr.IPNetwork(network) + except (netaddr.core.AddrFormatError, ValueError): + raise ValueError("Network (%s) is not in CIDR presentation format" % + network) + + try: + address = netaddr.IPAddress(address) + except (netaddr.core.AddrFormatError, ValueError): + raise ValueError("Address (%s) is not in correct presentation format" % + address) + + if address in network: + return True + else: + return False + + +def _get_for_address(address, key): + """Retrieve an attribute of or the physical interface that + the IP address provided could be bound to. + + :param address (str): An individual IPv4 or IPv6 address without a net + mask or subnet prefix. For example, '192.168.1.1'. + :param key: 'iface' for the physical interface name or an attribute + of the configured interface, for example 'netmask'. + :returns str: Requested attribute or None if address is not bindable. + """ + address = netaddr.IPAddress(address) + for iface in netifaces.interfaces(): + addresses = netifaces.ifaddresses(iface) + if address.version == 4 and netifaces.AF_INET in addresses: + addr = addresses[netifaces.AF_INET][0]['addr'] + netmask = addresses[netifaces.AF_INET][0]['netmask'] + network = netaddr.IPNetwork("%s/%s" % (addr, netmask)) + cidr = network.cidr + if address in cidr: + if key == 'iface': + return iface + else: + return addresses[netifaces.AF_INET][0][key] + + if address.version == 6 and netifaces.AF_INET6 in addresses: + for addr in addresses[netifaces.AF_INET6]: + if not addr['addr'].startswith('fe80'): + network = netaddr.IPNetwork("%s/%s" % (addr['addr'], + addr['netmask'])) + cidr = network.cidr + if address in cidr: + if key == 'iface': + return iface + elif key == 'netmask' and cidr: + return str(cidr).split('/')[1] + else: + return addr[key] + + return None + + +get_iface_for_address = partial(_get_for_address, key='iface') + + +get_netmask_for_address = partial(_get_for_address, key='netmask') + + +def format_ipv6_addr(address): + """If address is IPv6, wrap it in '[]' otherwise return None. + + This is required by most configuration files when specifying IPv6 + addresses. + """ + if is_ipv6(address): + return "[%s]" % address + + return None + + +def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, + fatal=True, exc_list=None): + """Return the assigned IP address for a given interface, if any.""" + # Extract nic if passed /dev/ethX + if '/' in iface: + iface = iface.split('/')[-1] + + if not exc_list: + exc_list = [] + + try: + inet_num = getattr(netifaces, inet_type) + except AttributeError: + raise Exception("Unknown inet type '%s'" % str(inet_type)) + + interfaces = netifaces.interfaces() + if inc_aliases: + ifaces = [] + for _iface in interfaces: + if iface == _iface or _iface.split(':')[0] == iface: + ifaces.append(_iface) + + if fatal and not ifaces: + raise Exception("Invalid interface '%s'" % iface) + + ifaces.sort() + else: + if iface not in interfaces: + if fatal: + raise Exception("Interface '%s' not found " % (iface)) + else: + return [] + + else: + ifaces = [iface] + + addresses = [] + for netiface in ifaces: + net_info = netifaces.ifaddresses(netiface) + if inet_num in net_info: + for entry in net_info[inet_num]: + if 'addr' in entry and entry['addr'] not in exc_list: + addresses.append(entry['addr']) + + if fatal and not addresses: + raise Exception("Interface '%s' doesn't have any %s addresses." % + (iface, inet_type)) + + return sorted(addresses) + + +get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET') + + +def get_iface_from_addr(addr): + """Work out on which interface the provided address is configured.""" + for iface in netifaces.interfaces(): + addresses = netifaces.ifaddresses(iface) + for inet_type in addresses: + for _addr in addresses[inet_type]: + _addr = _addr['addr'] + # link local + ll_key = re.compile("(.+)%.*") + raw = re.match(ll_key, _addr) + if raw: + _addr = raw.group(1) + + if _addr == addr: + log("Address '%s' is configured on iface '%s'" % + (addr, iface)) + return iface + + msg = "Unable to infer net iface on which '%s' is configured" % (addr) + raise Exception(msg) + + +def sniff_iface(f): + """Ensure decorated function is called with a value for iface. + + If no iface provided, inject net iface inferred from unit private address. + """ + def iface_sniffer(*args, **kwargs): + if not kwargs.get('iface', None): + kwargs['iface'] = get_iface_from_addr(unit_get('private-address')) + + return f(*args, **kwargs) + + return iface_sniffer + + +@sniff_iface +def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None, + dynamic_only=True): + """Get assigned IPv6 address for a given interface. + + Returns list of addresses found. If no address found, returns empty list. + + If iface is None, we infer the current primary interface by doing a reverse + lookup on the unit private-address. + + We currently only support scope global IPv6 addresses i.e. non-temporary + addresses. If no global IPv6 address is found, return the first one found + in the ipv6 address list. + """ + addresses = get_iface_addr(iface=iface, inet_type='AF_INET6', + inc_aliases=inc_aliases, fatal=fatal, + exc_list=exc_list) + + if addresses: + global_addrs = [] + for addr in addresses: + key_scope_link_local = re.compile("^fe80::..(.+)%(.+)") + m = re.match(key_scope_link_local, addr) + if m: + eui_64_mac = m.group(1) + iface = m.group(2) + else: + global_addrs.append(addr) + + if global_addrs: + # Make sure any found global addresses are not temporary + cmd = ['ip', 'addr', 'show', iface] + out = subprocess.check_output(cmd).decode('UTF-8') + if dynamic_only: + key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*") + else: + key = re.compile("inet6 (.+)/[0-9]+ scope global.*") + + addrs = [] + for line in out.split('\n'): + line = line.strip() + m = re.match(key, line) + if m and 'temporary' not in line: + # Return the first valid address we find + for addr in global_addrs: + if m.group(1) == addr: + if not dynamic_only or \ + m.group(1).endswith(eui_64_mac): + addrs.append(addr) + + if addrs: + return addrs + + if fatal: + raise Exception("Interface '%s' does not have a scope global " + "non-temporary ipv6 address." % iface) + + return [] + + +def get_bridges(vnic_dir='/sys/devices/virtual/net'): + """Return a list of bridges on the system.""" + b_regex = "%s/*/bridge" % vnic_dir + return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)] + + +def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'): + """Return a list of nics comprising a given bridge on the system.""" + brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge) + return [x.split('/')[-1] for x in glob.glob(brif_regex)] + + +def is_bridge_member(nic): + """Check if a given nic is a member of a bridge.""" + for bridge in get_bridges(): + if nic in get_bridge_nics(bridge): + return True + + return False + + +def is_ip(address): + """ + Returns True if address is a valid IP address. + """ + try: + # Test to see if already an IPv4 address + socket.inet_aton(address) + return True + except socket.error: + return False + + +def ns_query(address): + try: + import dns.resolver + except ImportError: + apt_install('python-dnspython') + import dns.resolver + + if isinstance(address, dns.name.Name): + rtype = 'PTR' + elif isinstance(address, six.string_types): + rtype = 'A' + else: + return None + + answers = dns.resolver.query(address, rtype) + if answers: + return str(answers[0]) + return None + + +def get_host_ip(hostname, fallback=None): + """ + Resolves the IP for a given hostname, or returns + the input if it is already an IP. + """ + if is_ip(hostname): + return hostname + + ip_addr = ns_query(hostname) + if not ip_addr: + try: + ip_addr = socket.gethostbyname(hostname) + except: + log("Failed to resolve hostname '%s'" % (hostname), + level=WARNING) + return fallback + return ip_addr + + +def get_hostname(address, fqdn=True): + """ + Resolves hostname for given IP, or returns the input + if it is already a hostname. + """ + if is_ip(address): + try: + import dns.reversename + except ImportError: + apt_install("python-dnspython") + import dns.reversename + + rev = dns.reversename.from_address(address) + result = ns_query(rev) + + if not result: + try: + result = socket.gethostbyaddr(address)[0] + except: + return None + else: + result = address + + if fqdn: + # strip trailing . + if result.endswith('.'): + return result[:-1] + else: + return result + else: + return result.split('.')[0] diff --git a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index b01e6cb..e4ad62b 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -14,12 +14,18 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . +import logging +import re +import sys import six from collections import OrderedDict from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment ) +DEBUG = logging.DEBUG +ERROR = logging.ERROR + class OpenStackAmuletDeployment(AmuletDeployment): """OpenStack amulet deployment. @@ -28,9 +34,12 @@ class OpenStackAmuletDeployment(AmuletDeployment): that is specifically for use by OpenStack charms. """ - def __init__(self, series=None, openstack=None, source=None, stable=True): + def __init__(self, series=None, openstack=None, source=None, + stable=True, log_level=DEBUG): """Initialize the deployment environment.""" super(OpenStackAmuletDeployment, self).__init__(series) + self.log = self.get_logger(level=log_level) + self.log.info('OpenStackAmuletDeployment: init') self.openstack = openstack self.source = source self.stable = stable @@ -38,26 +47,55 @@ class OpenStackAmuletDeployment(AmuletDeployment): # out. self.current_next = "trusty" + def get_logger(self, name="deployment-logger", level=logging.DEBUG): + """Get a logger object that will log to stdout.""" + log = logging + logger = log.getLogger(name) + fmt = log.Formatter("%(asctime)s %(funcName)s " + "%(levelname)s: %(message)s") + + handler = log.StreamHandler(stream=sys.stdout) + handler.setLevel(level) + handler.setFormatter(fmt) + + logger.addHandler(handler) + logger.setLevel(level) + + return logger + def _determine_branch_locations(self, other_services): """Determine the branch locations for the other services. Determine if the local branch being tested is derived from its stable or next (dev) branch, and based on this, use the corresonding stable or next branches for the other_services.""" - base_charms = ['mysql', 'mongodb'] + + self.log.info('OpenStackAmuletDeployment: determine branch locations') + + # Charms outside the lp:~openstack-charmers namespace + base_charms = ['mysql', 'mongodb', 'nrpe'] + + # Force these charms to current series even when using an older series. + # ie. Use trusty/nrpe even when series is precise, as the P charm + # does not possess the necessary external master config and hooks. + force_series_current = ['nrpe'] if self.series in ['precise', 'trusty']: base_series = self.series else: base_series = self.current_next - if self.stable: - for svc in other_services: + for svc in other_services: + if svc['name'] in force_series_current: + base_series = self.current_next + # If a location has been explicitly set, use it + if svc.get('location'): + continue + if self.stable: temp = 'lp:charms/{}/{}' svc['location'] = temp.format(base_series, svc['name']) - else: - for svc in other_services: + else: if svc['name'] in base_charms: temp = 'lp:charms/{}/{}' svc['location'] = temp.format(base_series, @@ -66,10 +104,13 @@ class OpenStackAmuletDeployment(AmuletDeployment): temp = 'lp:~openstack-charmers/charms/{}/{}/next' svc['location'] = temp.format(self.current_next, svc['name']) + return other_services def _add_services(self, this_service, other_services): """Add services to the deployment and set openstack-origin/source.""" + self.log.info('OpenStackAmuletDeployment: adding services') + other_services = self._determine_branch_locations(other_services) super(OpenStackAmuletDeployment, self)._add_services(this_service, @@ -77,29 +118,101 @@ class OpenStackAmuletDeployment(AmuletDeployment): services = other_services services.append(this_service) + + # Charms which should use the source config option use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', 'ceph-osd', 'ceph-radosgw'] - # Most OpenStack subordinate charms do not expose an origin option - # as that is controlled by the principle. - ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch'] + + # Charms which can not use openstack-origin, ie. many subordinates + no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe'] if self.openstack: for svc in services: - if svc['name'] not in use_source + ignore: + if svc['name'] not in use_source + no_origin: config = {'openstack-origin': self.openstack} self.d.configure(svc['name'], config) if self.source: for svc in services: - if svc['name'] in use_source and svc['name'] not in ignore: + if svc['name'] in use_source and svc['name'] not in no_origin: config = {'source': self.source} self.d.configure(svc['name'], config) def _configure_services(self, configs): """Configure all of the services.""" + self.log.info('OpenStackAmuletDeployment: configure services') for service, config in six.iteritems(configs): self.d.configure(service, config) + def _auto_wait_for_status(self, message=None, exclude_services=None, + include_only=None, timeout=1800): + """Wait for all units to have a specific extended status, except + for any defined as excluded. Unless specified via message, any + status containing any case of 'ready' will be considered a match. + + Examples of message usage: + + Wait for all unit status to CONTAIN any case of 'ready' or 'ok': + message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE) + + Wait for all units to reach this status (exact match): + message = re.compile('^Unit is ready and clustered$') + + Wait for all units to reach any one of these (exact match): + message = re.compile('Unit is ready|OK|Ready') + + Wait for at least one unit to reach this status (exact match): + message = {'ready'} + + See Amulet's sentry.wait_for_messages() for message usage detail. + https://github.com/juju/amulet/blob/master/amulet/sentry.py + + :param message: Expected status match + :param exclude_services: List of juju service names to ignore, + not to be used in conjuction with include_only. + :param include_only: List of juju service names to exclusively check, + not to be used in conjuction with exclude_services. + :param timeout: Maximum time in seconds to wait for status match + :returns: None. Raises if timeout is hit. + """ + self.log.info('Waiting for extended status on units...') + + all_services = self.d.services.keys() + + if exclude_services and include_only: + raise ValueError('exclude_services can not be used ' + 'with include_only') + + if message: + if isinstance(message, re._pattern_type): + match = message.pattern + else: + match = message + + self.log.debug('Custom extended status wait match: ' + '{}'.format(match)) + else: + self.log.debug('Default extended status wait match: contains ' + 'READY (case-insensitive)') + message = re.compile('.*ready.*', re.IGNORECASE) + + if exclude_services: + self.log.debug('Excluding services from extended status match: ' + '{}'.format(exclude_services)) + else: + exclude_services = [] + + if include_only: + services = include_only + else: + services = list(set(all_services) - set(exclude_services)) + + self.log.debug('Waiting up to {}s for extended status on services: ' + '{}'.format(timeout, services)) + service_messages = {service: message for service in services} + self.d.sentry.wait_for_messages(service_messages, timeout=timeout) + self.log.info('OK') + def _get_openstack_release(self): """Get openstack release. diff --git a/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 03f7927..388b60e 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -18,6 +18,7 @@ import amulet import json import logging import os +import re import six import time import urllib @@ -27,6 +28,7 @@ import glanceclient.v1.client as glance_client import heatclient.v1.client as heat_client import keystoneclient.v2_0 as keystone_client import novaclient.v1_1.client as nova_client +import pika import swiftclient from charmhelpers.contrib.amulet.utils import ( @@ -602,3 +604,382 @@ class OpenStackAmuletUtils(AmuletUtils): self.log.debug('Ceph {} samples (OK): ' '{}'.format(sample_type, samples)) return None + + # rabbitmq/amqp specific helpers: + + def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200): + """Wait for rmq units extended status to show cluster readiness, + after an optional initial sleep period. Initial sleep is likely + necessary to be effective following a config change, as status + message may not instantly update to non-ready.""" + + if init_sleep: + time.sleep(init_sleep) + + message = re.compile('^Unit is ready and clustered$') + deployment._auto_wait_for_status(message=message, + timeout=timeout, + include_only=['rabbitmq-server']) + + def add_rmq_test_user(self, sentry_units, + username="testuser1", password="changeme"): + """Add a test user via the first rmq juju unit, check connection as + the new user against all sentry units. + + :param sentry_units: list of sentry unit pointers + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: None if successful. Raise on error. + """ + self.log.debug('Adding rmq user ({})...'.format(username)) + + # Check that user does not already exist + cmd_user_list = 'rabbitmqctl list_users' + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) + if username in output: + self.log.warning('User ({}) already exists, returning ' + 'gracefully.'.format(username)) + return + + perms = '".*" ".*" ".*"' + cmds = ['rabbitmqctl add_user {} {}'.format(username, password), + 'rabbitmqctl set_permissions {} {}'.format(username, perms)] + + # Add user via first unit + for cmd in cmds: + output, _ = self.run_cmd_unit(sentry_units[0], cmd) + + # Check connection against the other sentry_units + self.log.debug('Checking user connect against units...') + for sentry_unit in sentry_units: + connection = self.connect_amqp_by_unit(sentry_unit, ssl=False, + username=username, + password=password) + connection.close() + + def delete_rmq_test_user(self, sentry_units, username="testuser1"): + """Delete a rabbitmq user via the first rmq juju unit. + + :param sentry_units: list of sentry unit pointers + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: None if successful or no such user. + """ + self.log.debug('Deleting rmq user ({})...'.format(username)) + + # Check that the user exists + cmd_user_list = 'rabbitmqctl list_users' + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) + + if username not in output: + self.log.warning('User ({}) does not exist, returning ' + 'gracefully.'.format(username)) + return + + # Delete the user + cmd_user_del = 'rabbitmqctl delete_user {}'.format(username) + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del) + + def get_rmq_cluster_status(self, sentry_unit): + """Execute rabbitmq cluster status command on a unit and return + the full output. + + :param unit: sentry unit + :returns: String containing console output of cluster status command + """ + cmd = 'rabbitmqctl cluster_status' + output, _ = self.run_cmd_unit(sentry_unit, cmd) + self.log.debug('{} cluster_status:\n{}'.format( + sentry_unit.info['unit_name'], output)) + return str(output) + + def get_rmq_cluster_running_nodes(self, sentry_unit): + """Parse rabbitmqctl cluster_status output string, return list of + running rabbitmq cluster nodes. + + :param unit: sentry unit + :returns: List containing node names of running nodes + """ + # NOTE(beisner): rabbitmqctl cluster_status output is not + # json-parsable, do string chop foo, then json.loads that. + str_stat = self.get_rmq_cluster_status(sentry_unit) + if 'running_nodes' in str_stat: + pos_start = str_stat.find("{running_nodes,") + 15 + pos_end = str_stat.find("]},", pos_start) + 1 + str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"') + run_nodes = json.loads(str_run_nodes) + return run_nodes + else: + return [] + + def validate_rmq_cluster_running_nodes(self, sentry_units): + """Check that all rmq unit hostnames are represented in the + cluster_status output of all units. + + :param host_names: dict of juju unit names to host names + :param units: list of sentry unit pointers (all rmq units) + :returns: None if successful, otherwise return error message + """ + host_names = self.get_unit_hostnames(sentry_units) + errors = [] + + # Query every unit for cluster_status running nodes + for query_unit in sentry_units: + query_unit_name = query_unit.info['unit_name'] + running_nodes = self.get_rmq_cluster_running_nodes(query_unit) + + # Confirm that every unit is represented in the queried unit's + # cluster_status running nodes output. + for validate_unit in sentry_units: + val_host_name = host_names[validate_unit.info['unit_name']] + val_node_name = 'rabbit@{}'.format(val_host_name) + + if val_node_name not in running_nodes: + errors.append('Cluster member check failed on {}: {} not ' + 'in {}\n'.format(query_unit_name, + val_node_name, + running_nodes)) + if errors: + return ''.join(errors) + + def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None): + """Check a single juju rmq unit for ssl and port in the config file.""" + host = sentry_unit.info['public-address'] + unit_name = sentry_unit.info['unit_name'] + + conf_file = '/etc/rabbitmq/rabbitmq.config' + conf_contents = str(self.file_contents_safe(sentry_unit, + conf_file, max_wait=16)) + # Checks + conf_ssl = 'ssl' in conf_contents + conf_port = str(port) in conf_contents + + # Port explicitly checked in config + if port and conf_port and conf_ssl: + self.log.debug('SSL is enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return True + elif port and not conf_port and conf_ssl: + self.log.debug('SSL is enabled @{} but not on port {} ' + '({})'.format(host, port, unit_name)) + return False + # Port not checked (useful when checking that ssl is disabled) + elif not port and conf_ssl: + self.log.debug('SSL is enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return True + elif not conf_ssl: + self.log.debug('SSL not enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return False + else: + msg = ('Unknown condition when checking SSL status @{}:{} ' + '({})'.format(host, port, unit_name)) + amulet.raise_status(amulet.FAIL, msg) + + def validate_rmq_ssl_enabled_units(self, sentry_units, port=None): + """Check that ssl is enabled on rmq juju sentry units. + + :param sentry_units: list of all rmq sentry units + :param port: optional ssl port override to validate + :returns: None if successful, otherwise return error message + """ + for sentry_unit in sentry_units: + if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port): + return ('Unexpected condition: ssl is disabled on unit ' + '({})'.format(sentry_unit.info['unit_name'])) + return None + + def validate_rmq_ssl_disabled_units(self, sentry_units): + """Check that ssl is enabled on listed rmq juju sentry units. + + :param sentry_units: list of all rmq sentry units + :returns: True if successful. Raise on error. + """ + for sentry_unit in sentry_units: + if self.rmq_ssl_is_enabled_on_unit(sentry_unit): + return ('Unexpected condition: ssl is enabled on unit ' + '({})'.format(sentry_unit.info['unit_name'])) + return None + + def configure_rmq_ssl_on(self, sentry_units, deployment, + port=None, max_wait=60): + """Turn ssl charm config option on, with optional non-default + ssl port specification. Confirm that it is enabled on every + unit. + + :param sentry_units: list of sentry units + :param deployment: amulet deployment object pointer + :param port: amqp port, use defaults if None + :param max_wait: maximum time to wait in seconds to confirm + :returns: None if successful. Raise on error. + """ + self.log.debug('Setting ssl charm config option: on') + + # Enable RMQ SSL + config = {'ssl': 'on'} + if port: + config['ssl_port'] = port + + deployment.d.configure('rabbitmq-server', config) + + # Wait for unit status + self.rmq_wait_for_cluster(deployment) + + # Confirm + tries = 0 + ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) + while ret and tries < (max_wait / 4): + time.sleep(4) + self.log.debug('Attempt {}: {}'.format(tries, ret)) + ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) + tries += 1 + + if ret: + amulet.raise_status(amulet.FAIL, ret) + + def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60): + """Turn ssl charm config option off, confirm that it is disabled + on every unit. + + :param sentry_units: list of sentry units + :param deployment: amulet deployment object pointer + :param max_wait: maximum time to wait in seconds to confirm + :returns: None if successful. Raise on error. + """ + self.log.debug('Setting ssl charm config option: off') + + # Disable RMQ SSL + config = {'ssl': 'off'} + deployment.d.configure('rabbitmq-server', config) + + # Wait for unit status + self.rmq_wait_for_cluster(deployment) + + # Confirm + tries = 0 + ret = self.validate_rmq_ssl_disabled_units(sentry_units) + while ret and tries < (max_wait / 4): + time.sleep(4) + self.log.debug('Attempt {}: {}'.format(tries, ret)) + ret = self.validate_rmq_ssl_disabled_units(sentry_units) + tries += 1 + + if ret: + amulet.raise_status(amulet.FAIL, ret) + + def connect_amqp_by_unit(self, sentry_unit, ssl=False, + port=None, fatal=True, + username="testuser1", password="changeme"): + """Establish and return a pika amqp connection to the rabbitmq service + running on a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :param fatal: boolean, default to True (raises on connect error) + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: pika amqp connection pointer or None if failed and non-fatal + """ + host = sentry_unit.info['public-address'] + unit_name = sentry_unit.info['unit_name'] + + # Default port logic if port is not specified + if ssl and not port: + port = 5671 + elif not ssl and not port: + port = 5672 + + self.log.debug('Connecting to amqp on {}:{} ({}) as ' + '{}...'.format(host, port, unit_name, username)) + + try: + credentials = pika.PlainCredentials(username, password) + parameters = pika.ConnectionParameters(host=host, port=port, + credentials=credentials, + ssl=ssl, + connection_attempts=3, + retry_delay=5, + socket_timeout=1) + connection = pika.BlockingConnection(parameters) + assert connection.server_properties['product'] == 'RabbitMQ' + self.log.debug('Connect OK') + return connection + except Exception as e: + msg = ('amqp connection failed to {}:{} as ' + '{} ({})'.format(host, port, username, str(e))) + if fatal: + amulet.raise_status(amulet.FAIL, msg) + else: + self.log.warn(msg) + return None + + def publish_amqp_message_by_unit(self, sentry_unit, message, + queue="test", ssl=False, + username="testuser1", + password="changeme", + port=None): + """Publish an amqp message to a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param message: amqp message string + :param queue: message queue, default to test + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :returns: None. Raises exception if publish failed. + """ + self.log.debug('Publishing message to {} queue:\n{}'.format(queue, + message)) + connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, + port=port, + username=username, + password=password) + + # NOTE(beisner): extra debug here re: pika hang potential: + # https://github.com/pika/pika/issues/297 + # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw + self.log.debug('Defining channel...') + channel = connection.channel() + self.log.debug('Declaring queue...') + channel.queue_declare(queue=queue, auto_delete=False, durable=True) + self.log.debug('Publishing message...') + channel.basic_publish(exchange='', routing_key=queue, body=message) + self.log.debug('Closing channel...') + channel.close() + self.log.debug('Closing connection...') + connection.close() + + def get_amqp_message_by_unit(self, sentry_unit, queue="test", + username="testuser1", + password="changeme", + ssl=False, port=None): + """Get an amqp message from a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param queue: message queue, default to test + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :returns: amqp message body as string. Raise if get fails. + """ + connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, + port=port, + username=username, + password=password) + channel = connection.channel() + method_frame, _, body = channel.basic_get(queue) + + if method_frame: + self.log.debug('Retreived message from {} queue:\n{}'.format(queue, + body)) + channel.basic_ack(method_frame.delivery_tag) + channel.close() + connection.close() + return body + else: + msg = 'No message retrieved.' + amulet.raise_status(amulet.FAIL, msg) diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 8f3f1b1..4821633 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -14,6 +14,7 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . +import glob import json import os import re @@ -50,6 +51,8 @@ from charmhelpers.core.sysctl import create as sysctl_create from charmhelpers.core.strutils import bool_from_string from charmhelpers.core.host import ( + get_bond_master, + is_phy_iface, list_nics, get_nic_hwaddr, mkdir, @@ -192,10 +195,50 @@ def config_flags_parser(config_flags): class OSContextGenerator(object): """Base class for all context generators.""" interfaces = [] + related = False + complete = False + missing_data = [] def __call__(self): raise NotImplementedError + def context_complete(self, ctxt): + """Check for missing data for the required context data. + Set self.missing_data if it exists and return False. + Set self.complete if no missing data and return True. + """ + # Fresh start + self.complete = False + self.missing_data = [] + for k, v in six.iteritems(ctxt): + if v is None or v == '': + if k not in self.missing_data: + self.missing_data.append(k) + + if self.missing_data: + self.complete = False + log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO) + else: + self.complete = True + return self.complete + + def get_related(self): + """Check if any of the context interfaces have relation ids. + Set self.related and return True if one of the interfaces + has relation ids. + """ + # Fresh start + self.related = False + try: + for interface in self.interfaces: + if relation_ids(interface): + self.related = True + return self.related + except AttributeError as e: + log("{} {}" + "".format(self, e), 'INFO') + return self.related + class SharedDBContext(OSContextGenerator): interfaces = ['shared-db'] @@ -211,6 +254,7 @@ class SharedDBContext(OSContextGenerator): self.database = database self.user = user self.ssl_dir = ssl_dir + self.rel_name = self.interfaces[0] def __call__(self): self.database = self.database or config('database') @@ -244,6 +288,7 @@ class SharedDBContext(OSContextGenerator): password_setting = self.relation_prefix + '_password' for rid in relation_ids(self.interfaces[0]): + self.related = True for unit in related_units(rid): rdata = relation_get(rid=rid, unit=unit) host = rdata.get('db_host') @@ -255,7 +300,7 @@ class SharedDBContext(OSContextGenerator): 'database_password': rdata.get(password_setting), 'database_type': 'mysql' } - if context_complete(ctxt): + if self.context_complete(ctxt): db_ssl(rdata, ctxt, self.ssl_dir) return ctxt return {} @@ -276,6 +321,7 @@ class PostgresqlDBContext(OSContextGenerator): ctxt = {} for rid in relation_ids(self.interfaces[0]): + self.related = True for unit in related_units(rid): rel_host = relation_get('host', rid=rid, unit=unit) rel_user = relation_get('user', rid=rid, unit=unit) @@ -285,7 +331,7 @@ class PostgresqlDBContext(OSContextGenerator): 'database_user': rel_user, 'database_password': rel_passwd, 'database_type': 'postgresql'} - if context_complete(ctxt): + if self.context_complete(ctxt): return ctxt return {} @@ -346,6 +392,7 @@ class IdentityServiceContext(OSContextGenerator): ctxt['signing_dir'] = cachedir for rid in relation_ids(self.rel_name): + self.related = True for unit in related_units(rid): rdata = relation_get(rid=rid, unit=unit) serv_host = rdata.get('service_host') @@ -364,7 +411,7 @@ class IdentityServiceContext(OSContextGenerator): 'service_protocol': svc_protocol, 'auth_protocol': auth_protocol}) - if context_complete(ctxt): + if self.context_complete(ctxt): # NOTE(jamespage) this is required for >= icehouse # so a missing value just indicates keystone needs # upgrading @@ -403,6 +450,7 @@ class AMQPContext(OSContextGenerator): ctxt = {} for rid in relation_ids(self.rel_name): ha_vip_only = False + self.related = True for unit in related_units(rid): if relation_get('clustered', rid=rid, unit=unit): ctxt['clustered'] = True @@ -435,7 +483,7 @@ class AMQPContext(OSContextGenerator): ha_vip_only = relation_get('ha-vip-only', rid=rid, unit=unit) is not None - if context_complete(ctxt): + if self.context_complete(ctxt): if 'rabbit_ssl_ca' in ctxt: if not self.ssl_dir: log("Charm not setup for ssl support but ssl ca " @@ -467,7 +515,7 @@ class AMQPContext(OSContextGenerator): ctxt['oslo_messaging_flags'] = config_flags_parser( oslo_messaging_flags) - if not context_complete(ctxt): + if not self.complete: return {} return ctxt @@ -483,13 +531,15 @@ class CephContext(OSContextGenerator): log('Generating template context for ceph', level=DEBUG) mon_hosts = [] - auth = None - key = None - use_syslog = str(config('use-syslog')).lower() + ctxt = { + 'use_syslog': str(config('use-syslog')).lower() + } for rid in relation_ids('ceph'): for unit in related_units(rid): - auth = relation_get('auth', rid=rid, unit=unit) - key = relation_get('key', rid=rid, unit=unit) + if not ctxt.get('auth'): + ctxt['auth'] = relation_get('auth', rid=rid, unit=unit) + if not ctxt.get('key'): + ctxt['key'] = relation_get('key', rid=rid, unit=unit) ceph_pub_addr = relation_get('ceph-public-address', rid=rid, unit=unit) unit_priv_addr = relation_get('private-address', rid=rid, @@ -498,15 +548,12 @@ class CephContext(OSContextGenerator): ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr mon_hosts.append(ceph_addr) - ctxt = {'mon_hosts': ' '.join(sorted(mon_hosts)), - 'auth': auth, - 'key': key, - 'use_syslog': use_syslog} + ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts)) if not os.path.isdir('/etc/ceph'): os.mkdir('/etc/ceph') - if not context_complete(ctxt): + if not self.context_complete(ctxt): return {} ensure_packages(['ceph-common']) @@ -893,6 +940,31 @@ class NeutronContext(OSContextGenerator): 'neutron_url': '%s://%s:%s' % (proto, host, '9696')} return ctxt + def pg_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + ovs_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'plumgrid', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} + return ovs_ctxt + + def midonet_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + midonet_config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + mido_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'midonet', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': midonet_config} + + return mido_ctxt + def __call__(self): if self.network_manager not in ['quantum', 'neutron']: return {} @@ -912,6 +984,10 @@ class NeutronContext(OSContextGenerator): ctxt.update(self.calico_ctxt()) elif self.plugin == 'vsp': ctxt.update(self.nuage_ctxt()) + elif self.plugin == 'plumgrid': + ctxt.update(self.pg_ctxt()) + elif self.plugin == 'midonet': + ctxt.update(self.midonet_ctxt()) alchemy_flags = config('neutron-alchemy-flags') if alchemy_flags: @@ -923,7 +999,6 @@ class NeutronContext(OSContextGenerator): class NeutronPortContext(OSContextGenerator): - NIC_PREFIXES = ['eth', 'bond'] def resolve_ports(self, ports): """Resolve NICs not yet bound to bridge(s) @@ -935,7 +1010,18 @@ class NeutronPortContext(OSContextGenerator): hwaddr_to_nic = {} hwaddr_to_ip = {} - for nic in list_nics(self.NIC_PREFIXES): + for nic in list_nics(): + # Ignore virtual interfaces (bond masters will be identified from + # their slaves) + if not is_phy_iface(nic): + continue + + _nic = get_bond_master(nic) + if _nic: + log("Replacing iface '%s' with bond master '%s'" % (nic, _nic), + level=DEBUG) + nic = _nic + hwaddr = get_nic_hwaddr(nic) hwaddr_to_nic[hwaddr] = nic addresses = get_ipv4_addr(nic, fatal=False) @@ -961,7 +1047,8 @@ class NeutronPortContext(OSContextGenerator): # trust it to be the real external network). resolved.append(entry) - return resolved + # Ensure no duplicates + return list(set(resolved)) class OSConfigFlagContext(OSContextGenerator): @@ -1033,7 +1120,7 @@ class SubordinateConfigContext(OSContextGenerator): ctxt = { ... other context ... - 'subordinate_config': { + 'subordinate_configuration': { 'DEFAULT': { 'key1': 'value1', }, @@ -1051,13 +1138,22 @@ class SubordinateConfigContext(OSContextGenerator): :param config_file : Service's config file to query sections :param interface : Subordinate interface to inspect """ - self.service = service self.config_file = config_file - self.interface = interface + if isinstance(service, list): + self.services = service + else: + self.services = [service] + if isinstance(interface, list): + self.interfaces = interface + else: + self.interfaces = [interface] def __call__(self): ctxt = {'sections': {}} - for rid in relation_ids(self.interface): + rids = [] + for interface in self.interfaces: + rids.extend(relation_ids(interface)) + for rid in rids: for unit in related_units(rid): sub_config = relation_get('subordinate_configuration', rid=rid, unit=unit) @@ -1065,33 +1161,37 @@ class SubordinateConfigContext(OSContextGenerator): try: sub_config = json.loads(sub_config) except: - log('Could not parse JSON from subordinate_config ' - 'setting from %s' % rid, level=ERROR) + log('Could not parse JSON from ' + 'subordinate_configuration setting from %s' + % rid, level=ERROR) continue - if self.service not in sub_config: - log('Found subordinate_config on %s but it contained' - 'nothing for %s service' % (rid, self.service), - level=INFO) - continue + for service in self.services: + if service not in sub_config: + log('Found subordinate_configuration on %s but it ' + 'contained nothing for %s service' + % (rid, service), level=INFO) + continue - sub_config = sub_config[self.service] - if self.config_file not in sub_config: - log('Found subordinate_config on %s but it contained' - 'nothing for %s' % (rid, self.config_file), - level=INFO) - continue - - sub_config = sub_config[self.config_file] - for k, v in six.iteritems(sub_config): - if k == 'sections': - for section, config_dict in six.iteritems(v): - log("adding section '%s'" % (section), - level=DEBUG) - ctxt[k][section] = config_dict - else: - ctxt[k] = v + sub_config = sub_config[service] + if self.config_file not in sub_config: + log('Found subordinate_configuration on %s but it ' + 'contained nothing for %s' + % (rid, self.config_file), level=INFO) + continue + sub_config = sub_config[self.config_file] + for k, v in six.iteritems(sub_config): + if k == 'sections': + for section, config_list in six.iteritems(v): + log("adding section '%s'" % (section), + level=DEBUG) + if ctxt[k].get(section): + ctxt[k][section].extend(config_list) + else: + ctxt[k][section] = config_list + else: + ctxt[k] = v log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG) return ctxt @@ -1268,15 +1368,19 @@ class DataPortContext(NeutronPortContext): def __call__(self): ports = config('data-port') if ports: + # Map of {port/mac:bridge} portmap = parse_data_port_mappings(ports) - ports = portmap.values() + ports = portmap.keys() + # Resolve provided ports or mac addresses and filter out those + # already attached to a bridge. resolved = self.resolve_ports(ports) + # FIXME: is this necessary? normalized = {get_nic_hwaddr(port): port for port in resolved if port not in ports} normalized.update({port: port for port in resolved if port in ports}) if resolved: - return {bridge: normalized[port] for bridge, port in + return {normalized[port]: bridge for port, bridge in six.iteritems(portmap) if port in normalized.keys()} return None @@ -1287,12 +1391,22 @@ class PhyNICMTUContext(DataPortContext): def __call__(self): ctxt = {} mappings = super(PhyNICMTUContext, self).__call__() - if mappings and mappings.values(): - ports = mappings.values() + if mappings and mappings.keys(): + ports = sorted(mappings.keys()) napi_settings = NeutronAPIContext()() mtu = napi_settings.get('network_device_mtu') + all_ports = set() + # If any of ports is a vlan device, its underlying device must have + # mtu applied first. + for port in ports: + for lport in glob.glob("/sys/class/net/%s/lower_*" % port): + lport = os.path.basename(lport) + all_ports.add(lport.split('_')[1]) + + all_ports = list(all_ports) + all_ports.extend(ports) if mtu: - ctxt["devs"] = '\\n'.join(ports) + ctxt["devs"] = '\\n'.join(all_ports) ctxt['mtu'] = mtu return ctxt @@ -1324,6 +1438,6 @@ class NetworkServiceContext(OSContextGenerator): 'auth_protocol': rdata.get('auth_protocol') or 'http', } - if context_complete(ctxt): + if self.context_complete(ctxt): return ctxt return {} diff --git a/hooks/charmhelpers/contrib/openstack/neutron.py b/hooks/charmhelpers/contrib/openstack/neutron.py index f7b7235..c54d63c 100644 --- a/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/hooks/charmhelpers/contrib/openstack/neutron.py @@ -195,6 +195,34 @@ def neutron_plugins(): 'packages': [], 'server_packages': ['neutron-server', 'neutron-plugin-nuage'], 'server_services': ['neutron-server'] + }, + 'plumgrid': { + 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini', + 'driver': 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2', + 'contexts': [ + context.SharedDBContext(user=config('database-user'), + database=config('database'), + ssl_dir=NEUTRON_CONF_DIR)], + 'services': [], + 'packages': [['plumgrid-lxc'], + ['iovisor-dkms']], + 'server_packages': ['neutron-server', + 'neutron-plugin-plumgrid'], + 'server_services': ['neutron-server'] + }, + 'midonet': { + 'config': '/etc/neutron/plugins/midonet/midonet.ini', + 'driver': 'midonet.neutron.plugin.MidonetPluginV2', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron', + ssl_dir=NEUTRON_CONF_DIR)], + 'services': [], + 'packages': [[headers_package()] + determine_dkms_package()], + 'server_packages': ['neutron-server', + 'python-neutron-plugin-midonet'], + 'server_services': ['neutron-server'] } } if release >= 'icehouse': @@ -255,17 +283,30 @@ def network_manager(): return 'neutron' -def parse_mappings(mappings): +def parse_mappings(mappings, key_rvalue=False): + """By default mappings are lvalue keyed. + + If key_rvalue is True, the mapping will be reversed to allow multiple + configs for the same lvalue. + """ parsed = {} if mappings: mappings = mappings.split() for m in mappings: p = m.partition(':') - key = p[0].strip() - if p[1]: - parsed[key] = p[2].strip() + + if key_rvalue: + key_index = 2 + val_index = 0 + # if there is no rvalue skip to next + if not p[1]: + continue else: - parsed[key] = '' + key_index = 0 + val_index = 2 + + key = p[key_index].strip() + parsed[key] = p[val_index].strip() return parsed @@ -283,25 +324,25 @@ def parse_bridge_mappings(mappings): def parse_data_port_mappings(mappings, default_bridge='br-data'): """Parse data port mappings. - Mappings must be a space-delimited list of bridge:port mappings. + Mappings must be a space-delimited list of bridge:port. - Returns dict of the form {bridge:port}. + Returns dict of the form {port:bridge} where ports may be mac addresses or + interface names. """ - _mappings = parse_mappings(mappings) + + # NOTE(dosaboy): we use rvalue for key to allow multiple values to be + # proposed for since it may be a mac address which will differ + # across units this allowing first-known-good to be chosen. + _mappings = parse_mappings(mappings, key_rvalue=True) if not _mappings or list(_mappings.values()) == ['']: if not mappings: return {} # For backwards-compatibility we need to support port-only provided in # config. - _mappings = {default_bridge: mappings.split()[0]} - - bridges = _mappings.keys() - ports = _mappings.values() - if len(set(bridges)) != len(bridges): - raise Exception("It is not allowed to have more than one port " - "configured on the same bridge") + _mappings = {mappings.split()[0]: default_bridge} + ports = _mappings.keys() if len(set(ports)) != len(ports): raise Exception("It is not allowed to have the same port configured " "on more than one bridge") diff --git a/hooks/charmhelpers/contrib/openstack/templates/ceph.conf b/hooks/charmhelpers/contrib/openstack/templates/ceph.conf index b99851c..33ceee2 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/ceph.conf +++ b/hooks/charmhelpers/contrib/openstack/templates/ceph.conf @@ -13,3 +13,9 @@ log to syslog = {{ use_syslog }} err to syslog = {{ use_syslog }} clog to syslog = {{ use_syslog }} +[client] +{% if rbd_client_cache_settings -%} +{% for key, value in rbd_client_cache_settings.iteritems() -%} +{{ key }} = {{ value }} +{% endfor -%} +{%- endif %} \ No newline at end of file diff --git a/hooks/charmhelpers/contrib/openstack/templating.py b/hooks/charmhelpers/contrib/openstack/templating.py index 24cb272..e5e3cb1 100644 --- a/hooks/charmhelpers/contrib/openstack/templating.py +++ b/hooks/charmhelpers/contrib/openstack/templating.py @@ -18,7 +18,7 @@ import os import six -from charmhelpers.fetch import apt_install +from charmhelpers.fetch import apt_install, apt_update from charmhelpers.core.hookenv import ( log, ERROR, @@ -29,8 +29,9 @@ from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES try: from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions except ImportError: - # python-jinja2 may not be installed yet, or we're running unittests. - FileSystemLoader = ChoiceLoader = Environment = exceptions = None + apt_update(fatal=True) + apt_install('python-jinja2', fatal=True) + from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions class OSConfigException(Exception): @@ -112,7 +113,7 @@ class OSConfigTemplate(object): def complete_contexts(self): ''' - Return a list of interfaces that have atisfied contexts. + Return a list of interfaces that have satisfied contexts. ''' if self._complete_contexts: return self._complete_contexts @@ -293,3 +294,30 @@ class OSConfigRenderer(object): [interfaces.extend(i.complete_contexts()) for i in six.itervalues(self.templates)] return interfaces + + def get_incomplete_context_data(self, interfaces): + ''' + Return dictionary of relation status of interfaces and any missing + required context data. Example: + {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True}, + 'zeromq-configuration': {'related': False}} + ''' + incomplete_context_data = {} + + for i in six.itervalues(self.templates): + for context in i.contexts: + for interface in interfaces: + related = False + if interface in context.interfaces: + related = context.get_related() + missing_data = context.missing_data + if missing_data: + incomplete_context_data[interface] = {'missing_data': missing_data} + if related: + if incomplete_context_data.get(interface): + incomplete_context_data[interface].update({'related': True}) + else: + incomplete_context_data[interface] = {'related': True} + else: + incomplete_context_data[interface] = {'related': False} + return incomplete_context_data diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index 4dd000c..fc479a3 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -1,5 +1,3 @@ -#!/usr/bin/python - # Copyright 2014-2015 Canonical Limited. # # This file is part of charm-helpers. @@ -24,8 +22,11 @@ import subprocess import json import os import sys +import re import six +import traceback +import uuid import yaml from charmhelpers.contrib.network import ip @@ -35,12 +36,17 @@ from charmhelpers.core import ( ) from charmhelpers.core.hookenv import ( + action_fail, + action_set, config, log as juju_log, charm_dir, INFO, + related_units, relation_ids, - relation_set + relation_set, + status_set, + hook_name ) from charmhelpers.contrib.storage.linux.lvm import ( @@ -50,7 +56,8 @@ from charmhelpers.contrib.storage.linux.lvm import ( ) from charmhelpers.contrib.network.ip import ( - get_ipv6_addr + get_ipv6_addr, + is_ipv6, ) from charmhelpers.contrib.python.packages import ( @@ -69,7 +76,6 @@ CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed ' 'restricted main multiverse universe') - UBUNTU_OPENSTACK_RELEASE = OrderedDict([ ('oneiric', 'diablo'), ('precise', 'essex'), @@ -116,8 +122,41 @@ SWIFT_CODENAMES = OrderedDict([ ('2.2.1', 'kilo'), ('2.2.2', 'kilo'), ('2.3.0', 'liberty'), + ('2.4.0', 'liberty'), + ('2.5.0', 'liberty'), ]) +# >= Liberty version->codename mapping +PACKAGE_CODENAMES = { + 'nova-common': OrderedDict([ + ('12.0.0', 'liberty'), + ]), + 'neutron-common': OrderedDict([ + ('7.0.0', 'liberty'), + ]), + 'cinder-common': OrderedDict([ + ('7.0.0', 'liberty'), + ]), + 'keystone': OrderedDict([ + ('8.0.0', 'liberty'), + ]), + 'horizon-common': OrderedDict([ + ('8.0.0', 'liberty'), + ]), + 'ceilometer-common': OrderedDict([ + ('5.0.0', 'liberty'), + ]), + 'heat-common': OrderedDict([ + ('5.0.0', 'liberty'), + ]), + 'glance-common': OrderedDict([ + ('11.0.0', 'liberty'), + ]), + 'openstack-dashboard': OrderedDict([ + ('8.0.0', 'liberty'), + ]), +} + DEFAULT_LOOPBACK_SIZE = '5G' @@ -167,9 +206,9 @@ def get_os_codename_version(vers): error_out(e) -def get_os_version_codename(codename): +def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES): '''Determine OpenStack version number from codename.''' - for k, v in six.iteritems(OPENSTACK_CODENAMES): + for k, v in six.iteritems(version_map): if v == codename: return k e = 'Could not derive OpenStack version for '\ @@ -201,20 +240,31 @@ def get_os_codename_package(package, fatal=True): error_out(e) vers = apt.upstream_version(pkg.current_ver.ver_str) + match = re.match('^(\d+)\.(\d+)\.(\d+)', vers) + if match: + vers = match.group(0) - try: - if 'swift' in pkg.name: - swift_vers = vers[:5] - if swift_vers not in SWIFT_CODENAMES: - # Deal with 1.10.0 upward - swift_vers = vers[:6] - return SWIFT_CODENAMES[swift_vers] - else: - vers = vers[:6] - return OPENSTACK_CODENAMES[vers] - except KeyError: - e = 'Could not determine OpenStack codename for version %s' % vers - error_out(e) + # >= Liberty independent project versions + if (package in PACKAGE_CODENAMES and + vers in PACKAGE_CODENAMES[package]): + return PACKAGE_CODENAMES[package][vers] + else: + # < Liberty co-ordinated project versions + try: + if 'swift' in pkg.name: + swift_vers = vers[:5] + if swift_vers not in SWIFT_CODENAMES: + # Deal with 1.10.0 upward + swift_vers = vers[:6] + return SWIFT_CODENAMES[swift_vers] + else: + vers = vers[:6] + return OPENSTACK_CODENAMES[vers] + except KeyError: + if not fatal: + return None + e = 'Could not determine OpenStack codename for version %s' % vers + error_out(e) def get_os_version_package(pkg, fatal=True): @@ -392,7 +442,11 @@ def openstack_upgrade_available(package): import apt_pkg as apt src = config('openstack-origin') cur_vers = get_os_version_package(package) - available_vers = get_os_version_install_source(src) + if "swift" in package: + codename = get_os_codename_install_source(src) + available_vers = get_os_version_codename(codename, SWIFT_CODENAMES) + else: + available_vers = get_os_version_install_source(src) apt.init() return apt.version_compare(available_vers, cur_vers) == 1 @@ -469,6 +523,12 @@ def sync_db_with_multi_ipv6_addresses(database, database_user, relation_prefix=None): hosts = get_ipv6_addr(dynamic_only=False) + if config('vip'): + vips = config('vip').split() + for vip in vips: + if vip and is_ipv6(vip): + hosts.append(vip) + kwargs = {'database': database, 'username': database_user, 'hostname': json.dumps(hosts)} @@ -704,3 +764,235 @@ def git_yaml_value(projects_yaml, key): return projects[key] return None + + +def os_workload_status(configs, required_interfaces, charm_func=None): + """ + Decorator to set workload status based on complete contexts + """ + def wrap(f): + @wraps(f) + def wrapped_f(*args, **kwargs): + # Run the original function first + f(*args, **kwargs) + # Set workload status now that contexts have been + # acted on + set_os_workload_status(configs, required_interfaces, charm_func) + return wrapped_f + return wrap + + +def set_os_workload_status(configs, required_interfaces, charm_func=None): + """ + Set workload status based on complete contexts. + status-set missing or incomplete contexts + and juju-log details of missing required data. + charm_func is a charm specific function to run checking + for charm specific requirements such as a VIP setting. + """ + incomplete_rel_data = incomplete_relation_data(configs, required_interfaces) + state = 'active' + missing_relations = [] + incomplete_relations = [] + message = None + charm_state = None + charm_message = None + + for generic_interface in incomplete_rel_data.keys(): + related_interface = None + missing_data = {} + # Related or not? + for interface in incomplete_rel_data[generic_interface]: + if incomplete_rel_data[generic_interface][interface].get('related'): + related_interface = interface + missing_data = incomplete_rel_data[generic_interface][interface].get('missing_data') + # No relation ID for the generic_interface + if not related_interface: + juju_log("{} relation is missing and must be related for " + "functionality. ".format(generic_interface), 'WARN') + state = 'blocked' + if generic_interface not in missing_relations: + missing_relations.append(generic_interface) + else: + # Relation ID exists but no related unit + if not missing_data: + # Edge case relation ID exists but departing + if ('departed' in hook_name() or 'broken' in hook_name()) \ + and related_interface in hook_name(): + state = 'blocked' + if generic_interface not in missing_relations: + missing_relations.append(generic_interface) + juju_log("{} relation's interface, {}, " + "relationship is departed or broken " + "and is required for functionality." + "".format(generic_interface, related_interface), "WARN") + # Normal case relation ID exists but no related unit + # (joining) + else: + juju_log("{} relations's interface, {}, is related but has " + "no units in the relation." + "".format(generic_interface, related_interface), "INFO") + # Related unit exists and data missing on the relation + else: + juju_log("{} relation's interface, {}, is related awaiting " + "the following data from the relationship: {}. " + "".format(generic_interface, related_interface, + ", ".join(missing_data)), "INFO") + if state != 'blocked': + state = 'waiting' + if generic_interface not in incomplete_relations \ + and generic_interface not in missing_relations: + incomplete_relations.append(generic_interface) + + if missing_relations: + message = "Missing relations: {}".format(", ".join(missing_relations)) + if incomplete_relations: + message += "; incomplete relations: {}" \ + "".format(", ".join(incomplete_relations)) + state = 'blocked' + elif incomplete_relations: + message = "Incomplete relations: {}" \ + "".format(", ".join(incomplete_relations)) + state = 'waiting' + + # Run charm specific checks + if charm_func: + charm_state, charm_message = charm_func(configs) + if charm_state != 'active' and charm_state != 'unknown': + state = workload_state_compare(state, charm_state) + if message: + charm_message = charm_message.replace("Incomplete relations: ", + "") + message = "{}, {}".format(message, charm_message) + else: + message = charm_message + + # Set to active if all requirements have been met + if state == 'active': + message = "Unit is ready" + juju_log(message, "INFO") + + status_set(state, message) + + +def workload_state_compare(current_workload_state, workload_state): + """ Return highest priority of two states""" + hierarchy = {'unknown': -1, + 'active': 0, + 'maintenance': 1, + 'waiting': 2, + 'blocked': 3, + } + + if hierarchy.get(workload_state) is None: + workload_state = 'unknown' + if hierarchy.get(current_workload_state) is None: + current_workload_state = 'unknown' + + # Set workload_state based on hierarchy of statuses + if hierarchy.get(current_workload_state) > hierarchy.get(workload_state): + return current_workload_state + else: + return workload_state + + +def incomplete_relation_data(configs, required_interfaces): + """ + Check complete contexts against required_interfaces + Return dictionary of incomplete relation data. + + configs is an OSConfigRenderer object with configs registered + + required_interfaces is a dictionary of required general interfaces + with dictionary values of possible specific interfaces. + Example: + required_interfaces = {'database': ['shared-db', 'pgsql-db']} + + The interface is said to be satisfied if anyone of the interfaces in the + list has a complete context. + + Return dictionary of incomplete or missing required contexts with relation + status of interfaces and any missing data points. Example: + {'message': + {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True}, + 'zeromq-configuration': {'related': False}}, + 'identity': + {'identity-service': {'related': False}}, + 'database': + {'pgsql-db': {'related': False}, + 'shared-db': {'related': True}}} + """ + complete_ctxts = configs.complete_contexts() + incomplete_relations = [] + for svc_type in required_interfaces.keys(): + # Avoid duplicates + found_ctxt = False + for interface in required_interfaces[svc_type]: + if interface in complete_ctxts: + found_ctxt = True + if not found_ctxt: + incomplete_relations.append(svc_type) + incomplete_context_data = {} + for i in incomplete_relations: + incomplete_context_data[i] = configs.get_incomplete_context_data(required_interfaces[i]) + return incomplete_context_data + + +def do_action_openstack_upgrade(package, upgrade_callback, configs): + """Perform action-managed OpenStack upgrade. + + Upgrades packages to the configured openstack-origin version and sets + the corresponding action status as a result. + + If the charm was installed from source we cannot upgrade it. + For backwards compatibility a config flag (action-managed-upgrade) must + be set for this code to run, otherwise a full service level upgrade will + fire on config-changed. + + @param package: package name for determining if upgrade available + @param upgrade_callback: function callback to charm's upgrade function + @param configs: templating object derived from OSConfigRenderer class + + @return: True if upgrade successful; False if upgrade failed or skipped + """ + ret = False + + if git_install_requested(): + action_set({'outcome': 'installed from source, skipped upgrade.'}) + else: + if openstack_upgrade_available(package): + if config('action-managed-upgrade'): + juju_log('Upgrading OpenStack release') + + try: + upgrade_callback(configs=configs) + action_set({'outcome': 'success, upgrade completed.'}) + ret = True + except: + action_set({'outcome': 'upgrade failed, see traceback.'}) + action_set({'traceback': traceback.format_exc()}) + action_fail('do_openstack_upgrade resulted in an ' + 'unexpected error') + else: + action_set({'outcome': 'action-managed-upgrade config is ' + 'False, skipped upgrade.'}) + else: + action_set({'outcome': 'no upgrade available.'}) + + return ret + + +def remote_restart(rel_name, remote_service=None): + trigger = { + 'restart-trigger': str(uuid.uuid4()), + } + if remote_service: + trigger['remote-service'] = remote_service + for rid in relation_ids(rel_name): + # This subordinate can be related to two seperate services using + # different subordinate relations so only issue the restart if + # the principle is conencted down the relation we think it is + if related_units(relid=rid): + relation_set(relation_id=rid, + relation_settings=trigger, + ) diff --git a/hooks/charmhelpers/contrib/python/__init__.py b/hooks/charmhelpers/contrib/python/__init__.py new file mode 100644 index 0000000..d1400a0 --- /dev/null +++ b/hooks/charmhelpers/contrib/python/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/hooks/charmhelpers/contrib/python/packages.py b/hooks/charmhelpers/contrib/python/packages.py new file mode 100644 index 0000000..10b32e3 --- /dev/null +++ b/hooks/charmhelpers/contrib/python/packages.py @@ -0,0 +1,121 @@ +#!/usr/bin/env python +# coding: utf-8 + +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import os +import subprocess + +from charmhelpers.fetch import apt_install, apt_update +from charmhelpers.core.hookenv import charm_dir, log + +try: + from pip import main as pip_execute +except ImportError: + apt_update() + apt_install('python-pip') + from pip import main as pip_execute + +__author__ = "Jorge Niedbalski " + + +def parse_options(given, available): + """Given a set of options, check if available""" + for key, value in sorted(given.items()): + if not value: + continue + if key in available: + yield "--{0}={1}".format(key, value) + + +def pip_install_requirements(requirements, **options): + """Install a requirements file """ + command = ["install"] + + available_options = ('proxy', 'src', 'log', ) + for option in parse_options(options, available_options): + command.append(option) + + command.append("-r {0}".format(requirements)) + log("Installing from file: {} with options: {}".format(requirements, + command)) + pip_execute(command) + + +def pip_install(package, fatal=False, upgrade=False, venv=None, **options): + """Install a python package""" + if venv: + venv_python = os.path.join(venv, 'bin/pip') + command = [venv_python, "install"] + else: + command = ["install"] + + available_options = ('proxy', 'src', 'log', 'index-url', ) + for option in parse_options(options, available_options): + command.append(option) + + if upgrade: + command.append('--upgrade') + + if isinstance(package, list): + command.extend(package) + else: + command.append(package) + + log("Installing {} package with options: {}".format(package, + command)) + if venv: + subprocess.check_call(command) + else: + pip_execute(command) + + +def pip_uninstall(package, **options): + """Uninstall a python package""" + command = ["uninstall", "-q", "-y"] + + available_options = ('proxy', 'log', ) + for option in parse_options(options, available_options): + command.append(option) + + if isinstance(package, list): + command.extend(package) + else: + command.append(package) + + log("Uninstalling {} package with options: {}".format(package, + command)) + pip_execute(command) + + +def pip_list(): + """Returns the list of current python installed packages + """ + return pip_execute(["list"]) + + +def pip_create_virtualenv(path=None): + """Create an isolated Python environment.""" + apt_install('python-virtualenv') + + if path: + venv_path = path + else: + venv_path = os.path.join(charm_dir(), 'venv') + + if not os.path.exists(venv_path): + subprocess.check_call(['virtualenv', venv_path]) diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py index 00dbffb..83f264d 100644 --- a/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -28,6 +28,7 @@ import os import shutil import json import time +import uuid from subprocess import ( check_call, @@ -35,8 +36,10 @@ from subprocess import ( CalledProcessError, ) from charmhelpers.core.hookenv import ( + local_unit, relation_get, relation_ids, + relation_set, related_units, log, DEBUG, @@ -56,6 +59,8 @@ from charmhelpers.fetch import ( apt_install, ) +from charmhelpers.core.kernel import modprobe + KEYRING = '/etc/ceph/ceph.client.{}.keyring' KEYFILE = '/etc/ceph/ceph.client.{}.key' @@ -288,17 +293,6 @@ def place_data_on_block_device(blk_device, data_src_dst): os.chown(data_src_dst, uid, gid) -# TODO: re-use -def modprobe(module): - """Load a kernel module and configure for auto-load on reboot.""" - log('Loading kernel module', level=INFO) - cmd = ['modprobe', module] - check_call(cmd) - with open('/etc/modules', 'r+') as modules: - if module not in modules.read(): - modules.write(module) - - def copy_files(src, dst, symlinks=False, ignore=None): """Copy files from src to dst.""" for item in os.listdir(src): @@ -411,17 +405,52 @@ class CephBrokerRq(object): The API is versioned and defaults to version 1. """ - def __init__(self, api_version=1): + def __init__(self, api_version=1, request_id=None): self.api_version = api_version + if request_id: + self.request_id = request_id + else: + self.request_id = str(uuid.uuid1()) self.ops = [] def add_op_create_pool(self, name, replica_count=3): self.ops.append({'op': 'create-pool', 'name': name, 'replicas': replica_count}) + def set_ops(self, ops): + """Set request ops to provided value. + + Useful for injecting ops that come from a previous request + to allow comparisons to ensure validity. + """ + self.ops = ops + @property def request(self): - return json.dumps({'api-version': self.api_version, 'ops': self.ops}) + return json.dumps({'api-version': self.api_version, 'ops': self.ops, + 'request-id': self.request_id}) + + def _ops_equal(self, other): + if len(self.ops) == len(other.ops): + for req_no in range(0, len(self.ops)): + for key in ['replicas', 'name', 'op']: + if self.ops[req_no][key] != other.ops[req_no][key]: + return False + else: + return False + return True + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + if self.api_version == other.api_version and \ + self._ops_equal(other): + return True + else: + return False + + def __ne__(self, other): + return not self.__eq__(other) class CephBrokerRsp(object): @@ -431,10 +460,15 @@ class CephBrokerRsp(object): The API is versioned and defaults to version 1. """ + def __init__(self, encoded_rsp): self.api_version = None self.rsp = json.loads(encoded_rsp) + @property + def request_id(self): + return self.rsp.get('request-id') + @property def exit_code(self): return self.rsp.get('exit-code') @@ -442,3 +476,182 @@ class CephBrokerRsp(object): @property def exit_msg(self): return self.rsp.get('stderr') + + +# Ceph Broker Conversation: +# If a charm needs an action to be taken by ceph it can create a CephBrokerRq +# and send that request to ceph via the ceph relation. The CephBrokerRq has a +# unique id so that the client can identity which CephBrokerRsp is associated +# with the request. Ceph will also respond to each client unit individually +# creating a response key per client unit eg glance/0 will get a CephBrokerRsp +# via key broker-rsp-glance-0 +# +# To use this the charm can just do something like: +# +# from charmhelpers.contrib.storage.linux.ceph import ( +# send_request_if_needed, +# is_request_complete, +# CephBrokerRq, +# ) +# +# @hooks.hook('ceph-relation-changed') +# def ceph_changed(): +# rq = CephBrokerRq() +# rq.add_op_create_pool(name='poolname', replica_count=3) +# +# if is_request_complete(rq): +# +# else: +# send_request_if_needed(get_ceph_request()) +# +# CephBrokerRq and CephBrokerRsp are serialized into JSON. Below is an example +# of glance having sent a request to ceph which ceph has successfully processed +# 'ceph:8': { +# 'ceph/0': { +# 'auth': 'cephx', +# 'broker-rsp-glance-0': '{"request-id": "0bc7dc54", "exit-code": 0}', +# 'broker_rsp': '{"request-id": "0da543b8", "exit-code": 0}', +# 'ceph-public-address': '10.5.44.103', +# 'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==', +# 'private-address': '10.5.44.103', +# }, +# 'glance/0': { +# 'broker_req': ('{"api-version": 1, "request-id": "0bc7dc54", ' +# '"ops": [{"replicas": 3, "name": "glance", ' +# '"op": "create-pool"}]}'), +# 'private-address': '10.5.44.109', +# }, +# } + +def get_previous_request(rid): + """Return the last ceph broker request sent on a given relation + + @param rid: Relation id to query for request + """ + request = None + broker_req = relation_get(attribute='broker_req', rid=rid, + unit=local_unit()) + if broker_req: + request_data = json.loads(broker_req) + request = CephBrokerRq(api_version=request_data['api-version'], + request_id=request_data['request-id']) + request.set_ops(request_data['ops']) + + return request + + +def get_request_states(request): + """Return a dict of requests per relation id with their corresponding + completion state. + + This allows a charm, which has a request for ceph, to see whether there is + an equivalent request already being processed and if so what state that + request is in. + + @param request: A CephBrokerRq object + """ + complete = [] + requests = {} + for rid in relation_ids('ceph'): + complete = False + previous_request = get_previous_request(rid) + if request == previous_request: + sent = True + complete = is_request_complete_for_rid(previous_request, rid) + else: + sent = False + complete = False + + requests[rid] = { + 'sent': sent, + 'complete': complete, + } + + return requests + + +def is_request_sent(request): + """Check to see if a functionally equivalent request has already been sent + + Returns True if a similair request has been sent + + @param request: A CephBrokerRq object + """ + states = get_request_states(request) + for rid in states.keys(): + if not states[rid]['sent']: + return False + + return True + + +def is_request_complete(request): + """Check to see if a functionally equivalent request has already been + completed + + Returns True if a similair request has been completed + + @param request: A CephBrokerRq object + """ + states = get_request_states(request) + for rid in states.keys(): + if not states[rid]['complete']: + return False + + return True + + +def is_request_complete_for_rid(request, rid): + """Check if a given request has been completed on the given relation + + @param request: A CephBrokerRq object + @param rid: Relation ID + """ + broker_key = get_broker_rsp_key() + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + if rdata.get(broker_key): + rsp = CephBrokerRsp(rdata.get(broker_key)) + if rsp.request_id == request.request_id: + if not rsp.exit_code: + return True + else: + # The remote unit sent no reply targeted at this unit so either the + # remote ceph cluster does not support unit targeted replies or it + # has not processed our request yet. + if rdata.get('broker_rsp'): + request_data = json.loads(rdata['broker_rsp']) + if request_data.get('request-id'): + log('Ignoring legacy broker_rsp without unit key as remote ' + 'service supports unit specific replies', level=DEBUG) + else: + log('Using legacy broker_rsp as remote service does not ' + 'supports unit specific replies', level=DEBUG) + rsp = CephBrokerRsp(rdata['broker_rsp']) + if not rsp.exit_code: + return True + + return False + + +def get_broker_rsp_key(): + """Return broker response key for this unit + + This is the key that ceph is going to use to pass request status + information back to this unit + """ + return 'broker-rsp-' + local_unit().replace('/', '-') + + +def send_request_if_needed(request): + """Send broker request if an equivalent request has not already been sent + + @param request: A CephBrokerRq object + """ + if is_request_sent(request): + log('Request already sent but not complete, not sending new request', + level=DEBUG) + else: + for rid in relation_ids('ceph'): + log('Sending request {}'.format(request.request_id), level=DEBUG) + relation_set(relation_id=rid, broker_req=request.request) diff --git a/hooks/charmhelpers/contrib/storage/linux/utils.py b/hooks/charmhelpers/contrib/storage/linux/utils.py index c8373b7..1e57941 100644 --- a/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -43,9 +43,10 @@ def zap_disk(block_device): :param block_device: str: Full path of block device to clean. ''' + # https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b # sometimes sgdisk exits non-zero; this is OK, dd will clean up - call(['sgdisk', '--zap-all', '--mbrtogpt', - '--clear', block_device]) + call(['sgdisk', '--zap-all', '--', block_device]) + call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device]) dev_end = check_output(['blockdev', '--getsz', block_device]).decode('UTF-8') gpt_end = int(dev_end.split()[0]) - 100 @@ -67,4 +68,4 @@ def is_device_mounted(device): out = check_output(['mount']).decode('UTF-8') if is_partition: return bool(re.search(device + r"\b", out)) - return bool(re.search(device + r"[0-9]+\b", out)) + return bool(re.search(device + r"[0-9]*\b", out)) diff --git a/hooks/charmhelpers/core/files.py b/hooks/charmhelpers/core/files.py new file mode 100644 index 0000000..0f12d32 --- /dev/null +++ b/hooks/charmhelpers/core/files.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +__author__ = 'Jorge Niedbalski ' + +import os +import subprocess + + +def sed(filename, before, after, flags='g'): + """ + Search and replaces the given pattern on filename. + + :param filename: relative or absolute file path. + :param before: expression to be replaced (see 'man sed') + :param after: expression to replace with (see 'man sed') + :param flags: sed-compatible regex flags in example, to make + the search and replace case insensitive, specify ``flags="i"``. + The ``g`` flag is always specified regardless, so you do not + need to remember to include it when overriding this parameter. + :returns: If the sed command exit code was zero then return, + otherwise raise CalledProcessError. + """ + expression = r's/{0}/{1}/{2}'.format(before, + after, flags) + + return subprocess.check_call(["sed", "-i", "-r", "-e", + expression, + os.path.expanduser(filename)]) diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py index dd8def9..454b52a 100644 --- a/hooks/charmhelpers/core/hookenv.py +++ b/hooks/charmhelpers/core/hookenv.py @@ -21,6 +21,7 @@ # Charm Helpers Developers from __future__ import print_function +import copy from distutils.version import LooseVersion from functools import wraps import glob @@ -73,6 +74,7 @@ def cached(func): res = func(*args, **kwargs) cache[key] = res return res + wrapper._wrapped = func return wrapper @@ -172,9 +174,19 @@ def relation_type(): return os.environ.get('JUJU_RELATION', None) -def relation_id(): - """The relation ID for the current relation hook""" - return os.environ.get('JUJU_RELATION_ID', None) +@cached +def relation_id(relation_name=None, service_or_unit=None): + """The relation ID for the current or a specified relation""" + if not relation_name and not service_or_unit: + return os.environ.get('JUJU_RELATION_ID', None) + elif relation_name and service_or_unit: + service_name = service_or_unit.split('/')[0] + for relid in relation_ids(relation_name): + remote_service = remote_service_name(relid) + if remote_service == service_name: + return relid + else: + raise ValueError('Must specify neither or both of relation_name and service_or_unit') def local_unit(): @@ -192,9 +204,20 @@ def service_name(): return local_unit().split('/')[0] +@cached +def remote_service_name(relid=None): + """The remote service name for a given relation-id (or the current relation)""" + if relid is None: + unit = remote_unit() + else: + units = related_units(relid) + unit = units[0] if units else None + return unit.split('/')[0] if unit else None + + def hook_name(): """The name of the currently executing hook""" - return os.path.basename(sys.argv[0]) + return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0])) class Config(dict): @@ -263,7 +286,7 @@ class Config(dict): self.path = path or self.path with open(self.path) as f: self._prev_dict = json.load(f) - for k, v in self._prev_dict.items(): + for k, v in copy.deepcopy(self._prev_dict).items(): if k not in self: self[k] = v @@ -467,6 +490,76 @@ def relation_types(): return rel_types +@cached +def peer_relation_id(): + '''Get a peer relation id if a peer relation has been joined, else None.''' + md = metadata() + section = md.get('peers') + if section: + for key in section: + relids = relation_ids(key) + if relids: + return relids[0] + return None + + +@cached +def relation_to_interface(relation_name): + """ + Given the name of a relation, return the interface that relation uses. + + :returns: The interface name, or ``None``. + """ + return relation_to_role_and_interface(relation_name)[1] + + +@cached +def relation_to_role_and_interface(relation_name): + """ + Given the name of a relation, return the role and the name of the interface + that relation uses (where role is one of ``provides``, ``requires``, or ``peer``). + + :returns: A tuple containing ``(role, interface)``, or ``(None, None)``. + """ + _metadata = metadata() + for role in ('provides', 'requires', 'peer'): + interface = _metadata.get(role, {}).get(relation_name, {}).get('interface') + if interface: + return role, interface + return None, None + + +@cached +def role_and_interface_to_relations(role, interface_name): + """ + Given a role and interface name, return a list of relation names for the + current charm that use that interface under that role (where role is one + of ``provides``, ``requires``, or ``peer``). + + :returns: A list of relation names. + """ + _metadata = metadata() + results = [] + for relation_name, relation in _metadata.get(role, {}).items(): + if relation['interface'] == interface_name: + results.append(relation_name) + return results + + +@cached +def interface_to_relations(interface_name): + """ + Given an interface, return a list of relation names for the current + charm that use that interface. + + :returns: A list of relation names. + """ + results = [] + for role in ('provides', 'requires', 'peer'): + results.extend(role_and_interface_to_relations(role, interface_name)) + return results + + @cached def charm_name(): """Get the name of the current charm as is specified on metadata.yaml""" @@ -543,6 +636,38 @@ def unit_private_ip(): return unit_get('private-address') +@cached +def storage_get(attribute="", storage_id=""): + """Get storage attributes""" + _args = ['storage-get', '--format=json'] + if storage_id: + _args.extend(('-s', storage_id)) + if attribute: + _args.append(attribute) + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + + +@cached +def storage_list(storage_name=""): + """List the storage IDs for the unit""" + _args = ['storage-list', '--format=json'] + if storage_name: + _args.append(storage_name) + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + except OSError as e: + import errno + if e.errno == errno.ENOENT: + # storage-list does not exist + return [] + raise + + class UnregisteredHookError(Exception): """Raised when an undefined hook is called""" pass @@ -643,6 +768,21 @@ def action_fail(message): subprocess.check_call(['action-fail', message]) +def action_name(): + """Get the name of the currently executing action.""" + return os.environ.get('JUJU_ACTION_NAME') + + +def action_uuid(): + """Get the UUID of the currently executing action.""" + return os.environ.get('JUJU_ACTION_UUID') + + +def action_tag(): + """Get the tag for the currently executing action.""" + return os.environ.get('JUJU_ACTION_TAG') + + def status_set(workload_state, message): """Set the workload state with a message @@ -672,25 +812,28 @@ def status_set(workload_state, message): def status_get(): - """Retrieve the previously set juju workload state + """Retrieve the previously set juju workload state and message + + If the status-get command is not found then assume this is juju < 1.23 and + return 'unknown', "" - If the status-set command is not found then assume this is juju < 1.23 and - return 'unknown' """ - cmd = ['status-get'] + cmd = ['status-get', "--format=json", "--include-data"] try: - raw_status = subprocess.check_output(cmd, universal_newlines=True) - status = raw_status.rstrip() - return status + raw_status = subprocess.check_output(cmd) except OSError as e: if e.errno == errno.ENOENT: - return 'unknown' + return ('unknown', "") else: raise + else: + status = json.loads(raw_status.decode("UTF-8")) + return (status["status"], status["message"]) def translate_exc(from_exc, to_exc): def inner_translate_exc1(f): + @wraps(f) def inner_translate_exc2(*args, **kwargs): try: return f(*args, **kwargs) diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py index 8ae8ef8..0d9c64f 100644 --- a/hooks/charmhelpers/core/host.py +++ b/hooks/charmhelpers/core/host.py @@ -63,32 +63,48 @@ def service_reload(service_name, restart_on_failure=False): return service_result -def service_pause(service_name, init_dir=None): +def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"): """Pause a system service. Stop it, and prevent it from starting again at boot.""" - if init_dir is None: - init_dir = "/etc/init" stopped = service_stop(service_name) - # XXX: Support systemd too - override_path = os.path.join( - init_dir, '{}.conf.override'.format(service_name)) - with open(override_path, 'w') as fh: - fh.write("manual\n") + upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) + sysv_file = os.path.join(initd_dir, service_name) + if os.path.exists(upstart_file): + override_path = os.path.join( + init_dir, '{}.override'.format(service_name)) + with open(override_path, 'w') as fh: + fh.write("manual\n") + elif os.path.exists(sysv_file): + subprocess.check_call(["update-rc.d", service_name, "disable"]) + else: + # XXX: Support SystemD too + raise ValueError( + "Unable to detect {0} as either Upstart {1} or SysV {2}".format( + service_name, upstart_file, sysv_file)) return stopped -def service_resume(service_name, init_dir=None): +def service_resume(service_name, init_dir="/etc/init", + initd_dir="/etc/init.d"): """Resume a system service. Reenable starting again at boot. Start the service""" - # XXX: Support systemd too - if init_dir is None: - init_dir = "/etc/init" - override_path = os.path.join( - init_dir, '{}.conf.override'.format(service_name)) - if os.path.exists(override_path): - os.unlink(override_path) + upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) + sysv_file = os.path.join(initd_dir, service_name) + if os.path.exists(upstart_file): + override_path = os.path.join( + init_dir, '{}.override'.format(service_name)) + if os.path.exists(override_path): + os.unlink(override_path) + elif os.path.exists(sysv_file): + subprocess.check_call(["update-rc.d", service_name, "enable"]) + else: + # XXX: Support SystemD too + raise ValueError( + "Unable to detect {0} as either Upstart {1} or SysV {2}".format( + service_name, upstart_file, sysv_file)) + started = service_start(service_name) return started @@ -148,6 +164,16 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False): return user_info +def user_exists(username): + """Check if a user exists""" + try: + pwd.getpwnam(username) + user_exists = True + except KeyError: + user_exists = False + return user_exists + + def add_group(group_name, system_group=False): """Add a group to the system""" try: @@ -280,6 +306,17 @@ def mounts(): return system_mounts +def fstab_mount(mountpoint): + """Mount filesystem using fstab""" + cmd_args = ['mount', mountpoint] + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError as e: + log('Error unmounting {}\n{}'.format(mountpoint, e.output)) + return False + return True + + def file_hash(path, hash_type='md5'): """ Generate a hash checksum of the contents of 'path' or None if not found. @@ -396,25 +433,80 @@ def pwgen(length=None): return(''.join(random_chars)) -def list_nics(nic_type): +def is_phy_iface(interface): + """Returns True if interface is not virtual, otherwise False.""" + if interface: + sys_net = '/sys/class/net' + if os.path.isdir(sys_net): + for iface in glob.glob(os.path.join(sys_net, '*')): + if '/virtual/' in os.path.realpath(iface): + continue + + if interface == os.path.basename(iface): + return True + + return False + + +def get_bond_master(interface): + """Returns bond master if interface is bond slave otherwise None. + + NOTE: the provided interface is expected to be physical + """ + if interface: + iface_path = '/sys/class/net/%s' % (interface) + if os.path.exists(iface_path): + if '/virtual/' in os.path.realpath(iface_path): + return None + + master = os.path.join(iface_path, 'master') + if os.path.exists(master): + master = os.path.realpath(master) + # make sure it is a bond master + if os.path.exists(os.path.join(master, 'bonding')): + return os.path.basename(master) + + return None + + +def list_nics(nic_type=None): '''Return a list of nics of given type(s)''' if isinstance(nic_type, six.string_types): int_types = [nic_type] else: int_types = nic_type + interfaces = [] - for int_type in int_types: - cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] + if nic_type: + for int_type in int_types: + cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] + ip_output = subprocess.check_output(cmd).decode('UTF-8') + ip_output = ip_output.split('\n') + ip_output = (line for line in ip_output if line) + for line in ip_output: + if line.split()[1].startswith(int_type): + matched = re.search('.*: (' + int_type + + r'[0-9]+\.[0-9]+)@.*', line) + if matched: + iface = matched.groups()[0] + else: + iface = line.split()[1].replace(":", "") + + if iface not in interfaces: + interfaces.append(iface) + else: + cmd = ['ip', 'a'] ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') - ip_output = (line for line in ip_output if line) + ip_output = (line.strip() for line in ip_output if line) + + key = re.compile('^[0-9]+:\s+(.+):') for line in ip_output: - if line.split()[1].startswith(int_type): - matched = re.search('.*: (' + int_type + r'[0-9]+\.[0-9]+)@.*', line) - if matched: - interface = matched.groups()[0] - else: - interface = line.split()[1].replace(":", "") - interfaces.append(interface) + matched = re.search(key, line) + if matched: + iface = matched.group(1) + iface = iface.partition("@")[0] + if iface not in interfaces: + interfaces.append(iface) return interfaces @@ -474,7 +566,14 @@ def chdir(d): os.chdir(cur) -def chownr(path, owner, group, follow_links=True): +def chownr(path, owner, group, follow_links=True, chowntopdir=False): + """ + Recursively change user and group ownership of files and directories + in given path. Doesn't chown path itself by default, only its children. + + :param bool follow_links: Also Chown links if True + :param bool chowntopdir: Also chown path itself if True + """ uid = pwd.getpwnam(owner).pw_uid gid = grp.getgrnam(group).gr_gid if follow_links: @@ -482,6 +581,10 @@ def chownr(path, owner, group, follow_links=True): else: chown = os.lchown + if chowntopdir: + broken_symlink = os.path.lexists(path) and not os.path.exists(path) + if not broken_symlink: + chown(path, uid, gid) for root, dirs, files in os.walk(path): for name in dirs + files: full = os.path.join(root, name) @@ -492,3 +595,19 @@ def chownr(path, owner, group, follow_links=True): def lchownr(path, owner, group): chownr(path, owner, group, follow_links=False) + + +def get_total_ram(): + '''The total amount of system RAM in bytes. + + This is what is reported by the OS, and may be overcommitted when + there are multiple containers hosted on the same machine. + ''' + with open('/proc/meminfo', 'r') as f: + for line in f.readlines(): + if line: + key, value, unit = line.split() + if key == 'MemTotal:': + assert unit == 'kB', 'Unknown unit' + return int(value) * 1024 # Classic, not KiB. + raise NotImplementedError() diff --git a/hooks/charmhelpers/core/hugepage.py b/hooks/charmhelpers/core/hugepage.py new file mode 100644 index 0000000..a783ad9 --- /dev/null +++ b/hooks/charmhelpers/core/hugepage.py @@ -0,0 +1,71 @@ +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import yaml +from charmhelpers.core import fstab +from charmhelpers.core import sysctl +from charmhelpers.core.host import ( + add_group, + add_user_to_group, + fstab_mount, + mkdir, +) +from charmhelpers.core.strutils import bytes_from_string +from subprocess import check_output + + +def hugepage_support(user, group='hugetlb', nr_hugepages=256, + max_map_count=65536, mnt_point='/run/hugepages/kvm', + pagesize='2MB', mount=True, set_shmmax=False): + """Enable hugepages on system. + + Args: + user (str) -- Username to allow access to hugepages to + group (str) -- Group name to own hugepages + nr_hugepages (int) -- Number of pages to reserve + max_map_count (int) -- Number of Virtual Memory Areas a process can own + mnt_point (str) -- Directory to mount hugepages on + pagesize (str) -- Size of hugepages + mount (bool) -- Whether to Mount hugepages + """ + group_info = add_group(group) + gid = group_info.gr_gid + add_user_to_group(user, group) + if max_map_count < 2 * nr_hugepages: + max_map_count = 2 * nr_hugepages + sysctl_settings = { + 'vm.nr_hugepages': nr_hugepages, + 'vm.max_map_count': max_map_count, + 'vm.hugetlb_shm_group': gid, + } + if set_shmmax: + shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax'])) + shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages + if shmmax_minsize > shmmax_current: + sysctl_settings['kernel.shmmax'] = shmmax_minsize + sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf') + mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False) + lfstab = fstab.Fstab() + fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point) + if fstab_entry: + lfstab.remove_entry(fstab_entry) + entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs', + 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0) + lfstab.add_entry(entry) + if mount: + fstab_mount(mnt_point) diff --git a/hooks/charmhelpers/core/kernel.py b/hooks/charmhelpers/core/kernel.py new file mode 100644 index 0000000..5dc6495 --- /dev/null +++ b/hooks/charmhelpers/core/kernel.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +__author__ = "Jorge Niedbalski " + +from charmhelpers.core.hookenv import ( + log, + INFO +) + +from subprocess import check_call, check_output +import re + + +def modprobe(module, persist=True): + """Load a kernel module and configure for auto-load on reboot.""" + cmd = ['modprobe', module] + + log('Loading kernel module %s' % module, level=INFO) + + check_call(cmd) + if persist: + with open('/etc/modules', 'r+') as modules: + if module not in modules.read(): + modules.write(module) + + +def rmmod(module, force=False): + """Remove a module from the linux kernel""" + cmd = ['rmmod'] + if force: + cmd.append('-f') + cmd.append(module) + log('Removing kernel module %s' % module, level=INFO) + return check_call(cmd) + + +def lsmod(): + """Shows what kernel modules are currently loaded""" + return check_output(['lsmod'], + universal_newlines=True) + + +def is_module_loaded(module): + """Checks if a kernel module is already loaded""" + matches = re.findall('^%s[ ]+' % module, lsmod(), re.M) + return len(matches) > 0 + + +def update_initramfs(version='all'): + """Updates an initramfs image""" + return check_call(["update-initramfs", "-k", version, "-u"]) diff --git a/hooks/charmhelpers/core/services/helpers.py b/hooks/charmhelpers/core/services/helpers.py index 8005c41..12d768e 100644 --- a/hooks/charmhelpers/core/services/helpers.py +++ b/hooks/charmhelpers/core/services/helpers.py @@ -16,7 +16,9 @@ import os import yaml + from charmhelpers.core import hookenv +from charmhelpers.core import host from charmhelpers.core import templating from charmhelpers.core.services.base import ManagerCallback @@ -240,27 +242,44 @@ class TemplateCallback(ManagerCallback): :param str source: The template source file, relative to `$CHARM_DIR/templates` + :param str target: The target to write the rendered template to :param str owner: The owner of the rendered file :param str group: The group of the rendered file :param int perms: The permissions of the rendered file - + :param partial on_change_action: functools partial to be executed when + rendered file changes + :param jinja2 loader template_loader: A jinja2 template loader """ def __init__(self, source, target, - owner='root', group='root', perms=0o444): + owner='root', group='root', perms=0o444, + on_change_action=None, template_loader=None): self.source = source self.target = target self.owner = owner self.group = group self.perms = perms + self.on_change_action = on_change_action + self.template_loader = template_loader def __call__(self, manager, service_name, event_name): + pre_checksum = '' + if self.on_change_action and os.path.isfile(self.target): + pre_checksum = host.file_hash(self.target) service = manager.get_service(service_name) context = {} for ctx in service.get('required_data', []): context.update(ctx) templating.render(self.source, self.target, context, - self.owner, self.group, self.perms) + self.owner, self.group, self.perms, + template_loader=self.template_loader) + if self.on_change_action: + if pre_checksum == host.file_hash(self.target): + hookenv.log( + 'No change detected: {}'.format(self.target), + hookenv.DEBUG) + else: + self.on_change_action() # Convenience aliases for templates diff --git a/hooks/charmhelpers/core/strutils.py b/hooks/charmhelpers/core/strutils.py index a2a784a..7e3f969 100644 --- a/hooks/charmhelpers/core/strutils.py +++ b/hooks/charmhelpers/core/strutils.py @@ -18,6 +18,7 @@ # along with charm-helpers. If not, see . import six +import re def bool_from_string(value): @@ -40,3 +41,32 @@ def bool_from_string(value): msg = "Unable to interpret string value '%s' as boolean" % (value) raise ValueError(msg) + + +def bytes_from_string(value): + """Interpret human readable string value as bytes. + + Returns int + """ + BYTE_POWER = { + 'K': 1, + 'KB': 1, + 'M': 2, + 'MB': 2, + 'G': 3, + 'GB': 3, + 'T': 4, + 'TB': 4, + 'P': 5, + 'PB': 5, + } + if isinstance(value, six.string_types): + value = six.text_type(value) + else: + msg = "Unable to interpret non-string value '%s' as boolean" % (value) + raise ValueError(msg) + matches = re.match("([0-9]+)([a-zA-Z]+)", value) + if not matches: + msg = "Unable to interpret string value '%s' as bytes" % (value) + raise ValueError(msg) + return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) diff --git a/hooks/charmhelpers/core/templating.py b/hooks/charmhelpers/core/templating.py index 4531999..239719d 100644 --- a/hooks/charmhelpers/core/templating.py +++ b/hooks/charmhelpers/core/templating.py @@ -21,7 +21,7 @@ from charmhelpers.core import hookenv def render(source, target, context, owner='root', group='root', - perms=0o444, templates_dir=None, encoding='UTF-8'): + perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None): """ Render a template. @@ -52,17 +52,24 @@ def render(source, target, context, owner='root', group='root', apt_install('python-jinja2', fatal=True) from jinja2 import FileSystemLoader, Environment, exceptions - if templates_dir is None: - templates_dir = os.path.join(hookenv.charm_dir(), 'templates') - loader = Environment(loader=FileSystemLoader(templates_dir)) + if template_loader: + template_env = Environment(loader=template_loader) + else: + if templates_dir is None: + templates_dir = os.path.join(hookenv.charm_dir(), 'templates') + template_env = Environment(loader=FileSystemLoader(templates_dir)) try: source = source - template = loader.get_template(source) + template = template_env.get_template(source) except exceptions.TemplateNotFound as e: hookenv.log('Could not load template %s from %s.' % (source, templates_dir), level=hookenv.ERROR) raise e content = template.render(context) - host.mkdir(os.path.dirname(target), owner, group, perms=0o755) + target_dir = os.path.dirname(target) + if not os.path.exists(target_dir): + # This is a terrible default directory permission, as the file + # or its siblings will often contain secrets. + host.mkdir(os.path.dirname(target), owner, group, perms=0o755) host.write_file(target, content.encode(encoding), owner, group, perms) diff --git a/hooks/charmhelpers/core/unitdata.py b/hooks/charmhelpers/core/unitdata.py index 406a35c..338104e 100644 --- a/hooks/charmhelpers/core/unitdata.py +++ b/hooks/charmhelpers/core/unitdata.py @@ -152,6 +152,7 @@ associated to the hookname. import collections import contextlib import datetime +import itertools import json import os import pprint @@ -164,8 +165,7 @@ __author__ = 'Kapil Thangavelu ' class Storage(object): """Simple key value database for local unit state within charms. - Modifications are automatically committed at hook exit. That's - currently regardless of exit code. + Modifications are not persisted unless :meth:`flush` is called. To support dicts, lists, integer, floats, and booleans values are automatically json encoded/decoded. @@ -173,8 +173,11 @@ class Storage(object): def __init__(self, path=None): self.db_path = path if path is None: - self.db_path = os.path.join( - os.environ.get('CHARM_DIR', ''), '.unit-state.db') + if 'UNIT_STATE_DB' in os.environ: + self.db_path = os.environ['UNIT_STATE_DB'] + else: + self.db_path = os.path.join( + os.environ.get('CHARM_DIR', ''), '.unit-state.db') self.conn = sqlite3.connect('%s' % self.db_path) self.cursor = self.conn.cursor() self.revision = None @@ -189,15 +192,8 @@ class Storage(object): self.conn.close() self._closed = True - def _scoped_query(self, stmt, params=None): - if params is None: - params = [] - return stmt, params - def get(self, key, default=None, record=False): - self.cursor.execute( - *self._scoped_query( - 'select data from kv where key=?', [key])) + self.cursor.execute('select data from kv where key=?', [key]) result = self.cursor.fetchone() if not result: return default @@ -206,33 +202,81 @@ class Storage(object): return json.loads(result[0]) def getrange(self, key_prefix, strip=False): - stmt = "select key, data from kv where key like '%s%%'" % key_prefix - self.cursor.execute(*self._scoped_query(stmt)) + """ + Get a range of keys starting with a common prefix as a mapping of + keys to values. + + :param str key_prefix: Common prefix among all keys + :param bool strip: Optionally strip the common prefix from the key + names in the returned dict + :return dict: A (possibly empty) dict of key-value mappings + """ + self.cursor.execute("select key, data from kv where key like ?", + ['%s%%' % key_prefix]) result = self.cursor.fetchall() if not result: - return None + return {} if not strip: key_prefix = '' return dict([ (k[len(key_prefix):], json.loads(v)) for k, v in result]) def update(self, mapping, prefix=""): + """ + Set the values of multiple keys at once. + + :param dict mapping: Mapping of keys to values + :param str prefix: Optional prefix to apply to all keys in `mapping` + before setting + """ for k, v in mapping.items(): self.set("%s%s" % (prefix, k), v) def unset(self, key): + """ + Remove a key from the database entirely. + """ self.cursor.execute('delete from kv where key=?', [key]) if self.revision and self.cursor.rowcount: self.cursor.execute( 'insert into kv_revisions values (?, ?, ?)', [key, self.revision, json.dumps('DELETED')]) + def unsetrange(self, keys=None, prefix=""): + """ + Remove a range of keys starting with a common prefix, from the database + entirely. + + :param list keys: List of keys to remove. + :param str prefix: Optional prefix to apply to all keys in ``keys`` + before removing. + """ + if keys is not None: + keys = ['%s%s' % (prefix, key) for key in keys] + self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)), + list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys))) + else: + self.cursor.execute('delete from kv where key like ?', + ['%s%%' % prefix]) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values (?, ?, ?)', + ['%s%%' % prefix, self.revision, json.dumps('DELETED')]) + def set(self, key, value): + """ + Set a value in the database. + + :param str key: Key to set the value for + :param value: Any JSON-serializable value to be set + """ serialized = json.dumps(value) - self.cursor.execute( - 'select data from kv where key=?', [key]) + self.cursor.execute('select data from kv where key=?', [key]) exists = self.cursor.fetchone() # Skip mutations to the same value diff --git a/hooks/charmhelpers/fetch/__init__.py b/hooks/charmhelpers/fetch/__init__.py index 0a3bb96..5f831c3 100644 --- a/hooks/charmhelpers/fetch/__init__.py +++ b/hooks/charmhelpers/fetch/__init__.py @@ -90,6 +90,14 @@ CLOUD_ARCHIVE_POCKETS = { 'kilo/proposed': 'trusty-proposed/kilo', 'trusty-kilo/proposed': 'trusty-proposed/kilo', 'trusty-proposed/kilo': 'trusty-proposed/kilo', + # Liberty + 'liberty': 'trusty-updates/liberty', + 'trusty-liberty': 'trusty-updates/liberty', + 'trusty-liberty/updates': 'trusty-updates/liberty', + 'trusty-updates/liberty': 'trusty-updates/liberty', + 'liberty/proposed': 'trusty-proposed/liberty', + 'trusty-liberty/proposed': 'trusty-proposed/liberty', + 'trusty-proposed/liberty': 'trusty-proposed/liberty', } # The order of this list is very important. Handlers should be listed in from @@ -217,12 +225,12 @@ def apt_purge(packages, fatal=False): def apt_mark(packages, mark, fatal=False): """Flag one or more packages using apt-mark""" + log("Marking {} as {}".format(packages, mark)) cmd = ['apt-mark', mark] if isinstance(packages, six.string_types): cmd.append(packages) else: cmd.extend(packages) - log("Holding {}".format(packages)) if fatal: subprocess.check_call(cmd, universal_newlines=True) diff --git a/metadata.yaml b/metadata.yaml index c93c542..ffabdd3 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -6,7 +6,7 @@ description: | virtual-network to virtual-machines, containers or network namespaces. . This charm provides the controller component. -categories: +tags: - openstack provides: controller-api: diff --git a/tests/015-basic-trusty-icehouse b/tests/015-basic-trusty-icehouse new file mode 100755 index 0000000..1aa11fd --- /dev/null +++ b/tests/015-basic-trusty-icehouse @@ -0,0 +1,9 @@ +#!/usr/bin/python + +"""Amulet tests on a basic odl controller deployment on trusty-icehouse.""" + +from basic_deployment import ODLControllerBasicDeployment + +if __name__ == '__main__': + deployment = ODLControllerBasicDeployment(series='trusty') + deployment.run_tests() diff --git a/tests/016-basic-trusty-juno b/tests/016-basic-trusty-juno new file mode 100755 index 0000000..a5c9271 --- /dev/null +++ b/tests/016-basic-trusty-juno @@ -0,0 +1,11 @@ +#!/usr/bin/python + +"""Amulet tests on a basic odl controller deployment on trusty-juno.""" + +from basic_deployment import ODLControllerBasicDeployment + +if __name__ == '__main__': + deployment = ODLControllerBasicDeployment(series='trusty', + openstack='cloud:trusty-juno', + source='cloud:trusty-updates/juno') + deployment.run_tests() diff --git a/tests/017-basic-trusty-kilo b/tests/017-basic-trusty-kilo new file mode 100755 index 0000000..942051e --- /dev/null +++ b/tests/017-basic-trusty-kilo @@ -0,0 +1,11 @@ +#!/usr/bin/python + +"""Amulet tests on a basic odl controller deployment on trusty-kilo.""" + +from basic_deployment import ODLControllerBasicDeployment + +if __name__ == '__main__': + deployment = ODLControllerBasicDeployment(series='trusty', + openstack='cloud:trusty-kilo', + source='cloud:trusty-updates/kilo') + deployment.run_tests() diff --git a/tests/basic_deployment.py b/tests/basic_deployment.py new file mode 100644 index 0000000..0eb7304 --- /dev/null +++ b/tests/basic_deployment.py @@ -0,0 +1,465 @@ +#!/usr/bin/python + +import amulet +import os + +from neutronclient.v2_0 import client as neutronclient + +from charmhelpers.contrib.openstack.amulet.deployment import ( + OpenStackAmuletDeployment +) + +from charmhelpers.contrib.openstack.amulet.utils import ( + OpenStackAmuletUtils, + DEBUG, + # ERROR +) + +# Use DEBUG to turn on debug logging +u = OpenStackAmuletUtils(DEBUG) + + +class ODLControllerBasicDeployment(OpenStackAmuletDeployment): + """Amulet tests on a basic OVS ODL deployment.""" + + def __init__(self, series, openstack=None, source=None, git=False, + stable=False): + """Deploy the entire test environment.""" + super(ODLControllerBasicDeployment, self).__init__(series, openstack, + source, stable) + self._add_services() + self._add_relations() + self._configure_services() + self._deploy() + exclude_services = ['mysql', 'odl-controller', 'neutron-api-odl'] + self._auto_wait_for_status(exclude_services=exclude_services) + self._initialize_tests() + + def _add_services(self): + """Add services + + Add the services that we're testing, where odl-controller is local, + and the rest of the service are from lp branches that are + compatible with the local charm (e.g. stable or next). + """ + this_service = { + 'name': 'odl-controller', + 'constraints': {'mem': '8G'}, + } + other_services = [ + {'name': 'mysql'}, + {'name': 'rabbitmq-server'}, + {'name': 'keystone'}, + {'name': 'nova-cloud-controller'}, + {'name': 'neutron-gateway'}, + { + 'name': 'neutron-api-odl', + 'location': 'lp:~openstack-charmers/charms/trusty/' + 'neutron-api-odl/vpp', + }, + { + 'name': 'openvswitch-odl', + 'location': 'lp:~openstack-charmers/charms/trusty/' + 'openvswitch-odl/trunk', + }, + {'name': 'neutron-api'}, + {'name': 'nova-compute'}, + {'name': 'glance'}, + ] + + super(ODLControllerBasicDeployment, self)._add_services( + this_service, other_services) + + def _add_relations(self): + """Add all of the relations for the services.""" + relations = { + 'keystone:shared-db': 'mysql:shared-db', + 'neutron-gateway:shared-db': 'mysql:shared-db', + 'neutron-gateway:amqp': 'rabbitmq-server:amqp', + 'nova-cloud-controller:quantum-network-service': + 'neutron-gateway:quantum-network-service', + 'nova-cloud-controller:shared-db': 'mysql:shared-db', + 'nova-cloud-controller:identity-service': 'keystone:' + 'identity-service', + 'nova-cloud-controller:amqp': 'rabbitmq-server:amqp', + 'neutron-api:shared-db': 'mysql:shared-db', + 'neutron-api:amqp': 'rabbitmq-server:amqp', + 'neutron-api:neutron-api': 'nova-cloud-controller:neutron-api', + 'neutron-api:identity-service': 'keystone:identity-service', + 'neutron-api:neutron-plugin-api-subordinate': + 'neutron-api-odl:neutron-plugin-api-subordinate', + 'neutron-gateway:juju-info': 'openvswitch-odl:container', + 'openvswitch-odl:ovsdb-manager': 'odl-controller:ovsdb-manager', + 'neutron-api-odl:odl-controller': 'odl-controller:controller-api', + 'glance:identity-service': 'keystone:identity-service', + 'glance:shared-db': 'mysql:shared-db', + 'glance:amqp': 'rabbitmq-server:amqp', + 'nova-compute:image-service': 'glance:image-service', + 'nova-compute:shared-db': 'mysql:shared-db', + 'nova-compute:amqp': 'rabbitmq-server:amqp', + 'nova-cloud-controller:cloud-compute': 'nova-compute:' + 'cloud-compute', + 'nova-cloud-controller:image-service': 'glance:image-service', + } + super(ODLControllerBasicDeployment, self)._add_relations(relations) + + def _configure_services(self): + """Configure all of the services.""" + neutron_gateway_config = {'plugin': 'ovs-odl', + 'instance-mtu': '1400'} + neutron_api_config = {'neutron-security-groups': 'False', + 'manage-neutron-plugin-legacy-mode': 'False'} + neutron_api_odl_config = {'overlay-network-type': 'vxlan gre'} + odl_controller_config = {} + if os.environ.get('AMULET_ODL_LOCATION'): + odl_controller_config['install-url'] = \ + os.environ['AMULET_ODL_LOCATION'] + if os.environ.get('AMULET_HTTP_PROXY'): + odl_controller_config['http-proxy'] = \ + os.environ['AMULET_HTTP_PROXY'] + if os.environ.get('AMULET_HTTP_PROXY'): + odl_controller_config['https-proxy'] = \ + os.environ['AMULET_HTTP_PROXY'] + keystone_config = {'admin-password': 'openstack', + 'admin-token': 'ubuntutesting'} + nova_cc_config = {'network-manager': 'Quantum', + 'quantum-security-groups': 'yes'} + configs = {'neutron-gateway': neutron_gateway_config, + 'neutron-api': neutron_api_config, + 'neutron-api-odl': neutron_api_odl_config, + 'odl-controller': odl_controller_config, + 'keystone': keystone_config, + 'nova-cloud-controller': nova_cc_config} + super(ODLControllerBasicDeployment, self)._configure_services(configs) + + def _initialize_tests(self): + """Perform final initialization before tests get run.""" + # Access the sentries for inspecting service units + self.mysql_sentry = self.d.sentry['mysql'][0] + self.keystone_sentry = self.d.sentry['keystone'][0] + self.rmq_sentry = self.d.sentry['rabbitmq-server'][0] + self.nova_cc_sentry = self.d.sentry['nova-cloud-controller'][0] + self.neutron_gateway_sentry = self.d.sentry['neutron-gateway'][0] + self.neutron_api_sentry = self.d.sentry['neutron-api'][0] + self.odl_controller_sentry = self.d.sentry['odl-controller'][0] + self.neutron_api_odl_sentry = self.d.sentry['neutron-api-odl'][0] + self.openvswitch_odl_sentry = self.d.sentry['openvswitch-odl'][0] + + # Authenticate admin with keystone + self.keystone = u.authenticate_keystone_admin(self.keystone_sentry, + user='admin', + password='openstack', + tenant='admin') + + # Authenticate admin with neutron + ep = self.keystone.service_catalog.url_for(service_type='identity', + endpoint_type='publicURL') + self.neutron = neutronclient.Client(auth_url=ep, + username='admin', + password='openstack', + tenant_name='admin', + region_name='RegionOne') + # Authenticate admin with glance endpoint + self.glance = u.authenticate_glance_admin(self.keystone) + # Create a demo tenant/role/user + self.demo_tenant = 'demoTenant' + self.demo_role = 'demoRole' + self.demo_user = 'demoUser' + if not u.tenant_exists(self.keystone, self.demo_tenant): + tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant, + description='demo tenant', + enabled=True) + self.keystone.roles.create(name=self.demo_role) + self.keystone.users.create(name=self.demo_user, + password='password', + tenant_id=tenant.id, + email='demo@demo.com') + + # Authenticate demo user with keystone + self.keystone_demo = \ + u.authenticate_keystone_user(self.keystone, user=self.demo_user, + password='password', + tenant=self.demo_tenant) + + # Authenticate demo user with nova-api + self.nova_demo = u.authenticate_nova_user(self.keystone, + user=self.demo_user, + password='password', + tenant=self.demo_tenant) + + def test_100_services(self): + """Verify the expected services are running on the corresponding + service units.""" + neutron_services = ['neutron-dhcp-agent', + 'neutron-lbaas-agent', + 'neutron-metadata-agent', + 'neutron-metering-agent', + 'neutron-l3-agent'] + + nova_cc_services = ['nova-api-ec2', + 'nova-api-os-compute', + 'nova-objectstore', + 'nova-cert', + 'nova-scheduler', + 'nova-conductor'] + + odl_c_services = ['odl-controller'] + + commands = { + self.mysql_sentry: ['mysql'], + self.keystone_sentry: ['keystone'], + self.nova_cc_sentry: nova_cc_services, + self.neutron_gateway_sentry: neutron_services, + self.odl_controller_sentry: odl_c_services, + } + + ret = u.validate_services_by_name(commands) + if ret: + amulet.raise_status(amulet.FAIL, msg=ret) + + def test_102_service_catalog(self): + """Verify that the service catalog endpoint data is valid.""" + u.log.debug('Checking keystone service catalog...') + endpoint_check = { + 'adminURL': u.valid_url, + 'id': u.not_null, + 'region': 'RegionOne', + 'publicURL': u.valid_url, + 'internalURL': u.valid_url + } + expected = { + 'network': [endpoint_check], + 'compute': [endpoint_check], + 'identity': [endpoint_check] + } + actual = self.keystone.service_catalog.get_endpoints() + + ret = u.validate_svc_catalog_endpoint_data(expected, actual) + if ret: + amulet.raise_status(amulet.FAIL, msg=ret) + + def test_104_network_endpoint(self): + """Verify the neutron network endpoint data.""" + u.log.debug('Checking neutron network api endpoint data...') + endpoints = self.keystone.endpoints.list() + admin_port = internal_port = public_port = '9696' + expected = { + 'id': u.not_null, + 'region': 'RegionOne', + 'adminurl': u.valid_url, + 'internalurl': u.valid_url, + 'publicurl': u.valid_url, + 'service_id': u.not_null + } + ret = u.validate_endpoint_data(endpoints, admin_port, internal_port, + public_port, expected) + + if ret: + amulet.raise_status(amulet.FAIL, + msg='glance endpoint: {}'.format(ret)) + + def test_110_users(self): + """Verify expected users.""" + u.log.debug('Checking keystone users...') + expected = [ + {'name': 'admin', + 'enabled': True, + 'tenantId': u.not_null, + 'id': u.not_null, + 'email': 'juju@localhost'}, + {'name': 'quantum', + 'enabled': True, + 'tenantId': u.not_null, + 'id': u.not_null, + 'email': 'juju@localhost'} + ] + + if self._get_openstack_release() >= self.trusty_kilo: + # Kilo or later + expected.append({ + 'name': 'nova', + 'enabled': True, + 'tenantId': u.not_null, + 'id': u.not_null, + 'email': 'juju@localhost' + }) + else: + # Juno and earlier + expected.append({ + 'name': 's3_ec2_nova', + 'enabled': True, + 'tenantId': u.not_null, + 'id': u.not_null, + 'email': 'juju@localhost' + }) + + actual = self.keystone.users.list() + ret = u.validate_user_data(expected, actual) + if ret: + amulet.raise_status(amulet.FAIL, msg=ret) + + def test_200_odl_controller_controller_api_relation(self): + """Verify the odl-controller to neutron-api-odl relation data""" + u.log.debug('Checking odl-controller to neutron-api-odl relation data') + unit = self.odl_controller_sentry + relation = ['controller-api', 'neutron-api-odl:odl-controller'] + expected = { + 'private-address': u.valid_ip, + 'username': 'admin', + 'password': 'admin', + 'port': '8080', + } + + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('odl-controller controller-api', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_201_neutron_api_odl_odl_controller_relation(self): + """Verify the odl-controller to neutron-api-odl relation data""" + u.log.debug('Checking odl-controller to neutron-api-odl relation data') + unit = self.neutron_api_odl_sentry + relation = ['odl-controller', 'odl-controller:controller-api'] + expected = { + 'private-address': u.valid_ip, + } + + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('neutron-api-odl odl-controller', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_202_odl_controller_ovsdb_manager_relation(self): + """Verify the odl-controller to openvswitch-odl relation data""" + u.log.debug('Checking odl-controller to openvswitch-odl relation data') + unit = self.odl_controller_sentry + relation = ['ovsdb-manager', 'openvswitch-odl:ovsdb-manager'] + expected = { + 'private-address': u.valid_ip, + 'protocol': 'tcp', + 'port': '6640', + } + + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('odl-controller openvswitch-odl', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_203_openvswitch_odl_ovsdb_manager_relation(self): + """Verify the openvswitch-odl to odl-controller relation data""" + u.log.debug('Checking openvswitch-odl to odl-controller relation data') + unit = self.openvswitch_odl_sentry + relation = ['ovsdb-manager', 'odl-controller:ovsdb-manager'] + expected = { + 'private-address': u.valid_ip, + } + + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('openvswitch-odl to odl-controller', + ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_400_create_network(self): + """Create a network, verify that it exists, and then delete it.""" + u.log.debug('Creating neutron network...') + self.neutron.format = 'json' + net_name = 'ext_net' + + # Verify that the network doesn't exist + networks = self.neutron.list_networks(name=net_name) + net_count = len(networks['networks']) + if net_count != 0: + msg = "Expected zero networks, found {}".format(net_count) + amulet.raise_status(amulet.FAIL, msg=msg) + + # Create a network and verify that it exists + network = {'name': net_name} + self.neutron.create_network({'network': network}) + + networks = self.neutron.list_networks(name=net_name) + u.log.debug('Networks: {}'.format(networks)) + net_len = len(networks['networks']) + if net_len != 1: + msg = "Expected 1 network, found {}".format(net_len) + amulet.raise_status(amulet.FAIL, msg=msg) + + u.log.debug('Confirming new neutron network...') + network = networks['networks'][0] + if network['name'] != net_name: + amulet.raise_status(amulet.FAIL, msg="network ext_net not found") + + # Cleanup + u.log.debug('Deleting neutron network...') + self.neutron.delete_network(network['id']) + + def test_400_gateway_bridges(self): + """Ensure that all bridges are present and configured with the + ODL controller as their NorthBound controller URL.""" + odl_ip = self.odl_controller_sentry.relation( + 'ovsdb-manager', + 'openvswitch-odl:ovsdb-manager' + )['private-address'] + controller_url = "tcp:{}:6633".format(odl_ip) + cmd = 'ovs-vsctl list-br' + output, _ = self.neutron_gateway_sentry.run(cmd) + bridges = output.split() + u.log.debug('Checking bridge configuration...') + for bridge in ['br-int', 'br-ex', 'br-data']: + if bridge not in bridges: + amulet.raise_status( + amulet.FAIL, + msg="Missing bridge {} from gateway unit".format(bridge) + ) + cmd = 'ovs-vsctl get-controller {}'.format(bridge) + br_controllers, _ = self.neutron_gateway_sentry.run(cmd) + br_controllers = list(set(br_controllers.split('\n'))) + if len(br_controllers) != 1 or br_controllers[0] != controller_url: + status, _ = self.neutron_gateway_sentry.run('ovs-vsctl show') + amulet.raise_status( + amulet.FAIL, + msg="Controller configuration on bridge" + " {} incorrect: !{}! != !{}!\n" + "{}".format(bridge, + br_controllers, + controller_url, + status) + ) + + def test_400_image_instance_create(self): + """Create an image/instance, verify they exist, and delete them.""" + # NOTE(coreycb): Skipping failing test on essex until resolved. essex + # nova API calls are getting "Malformed request url + # (HTTP 400)". + if self._get_openstack_release() == self.precise_essex: + u.log.error("Skipping test (due to Essex)") + return + + u.log.debug('Checking nova instance creation...') + + image = u.create_cirros_image(self.glance, "cirros-image") + if not image: + amulet.raise_status(amulet.FAIL, msg="Image create failed") + + instance = u.create_instance(self.nova_demo, "cirros-image", "cirros", + "m1.tiny") + if not instance: + amulet.raise_status(amulet.FAIL, msg="Instance create failed") + + found = False + for instance in self.nova_demo.servers.list(): + if instance.name == 'cirros': + found = True + if instance.status != 'ACTIVE': + msg = "cirros instance is not active" + amulet.raise_status(amulet.FAIL, msg=msg) + + if not found: + message = "nova cirros instance does not exist" + amulet.raise_status(amulet.FAIL, msg=message) + + u.delete_resource(self.glance.images, image.id, + msg="glance image") + + u.delete_resource(self.nova_demo.servers, instance.id, + msg="nova instance") diff --git a/tests/charmhelpers/__init__.py b/tests/charmhelpers/__init__.py new file mode 100644 index 0000000..f72e7f8 --- /dev/null +++ b/tests/charmhelpers/__init__.py @@ -0,0 +1,38 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +# Bootstrap charm-helpers, installing its dependencies if necessary using +# only standard libraries. +import subprocess +import sys + +try: + import six # flake8: noqa +except ImportError: + if sys.version_info.major == 2: + subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) + else: + subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) + import six # flake8: noqa + +try: + import yaml # flake8: noqa +except ImportError: + if sys.version_info.major == 2: + subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) + else: + subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) + import yaml # flake8: noqa diff --git a/tests/charmhelpers/contrib/__init__.py b/tests/charmhelpers/contrib/__init__.py new file mode 100644 index 0000000..d1400a0 --- /dev/null +++ b/tests/charmhelpers/contrib/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/tests/charmhelpers/contrib/amulet/__init__.py b/tests/charmhelpers/contrib/amulet/__init__.py new file mode 100644 index 0000000..d1400a0 --- /dev/null +++ b/tests/charmhelpers/contrib/amulet/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/tests/charmhelpers/contrib/amulet/deployment.py b/tests/charmhelpers/contrib/amulet/deployment.py new file mode 100644 index 0000000..d451698 --- /dev/null +++ b/tests/charmhelpers/contrib/amulet/deployment.py @@ -0,0 +1,95 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import amulet +import os +import six + + +class AmuletDeployment(object): + """Amulet deployment. + + This class provides generic Amulet deployment and test runner + methods. + """ + + def __init__(self, series=None): + """Initialize the deployment environment.""" + self.series = None + + if series: + self.series = series + self.d = amulet.Deployment(series=self.series) + else: + self.d = amulet.Deployment() + + def _add_services(self, this_service, other_services): + """Add services. + + Add services to the deployment where this_service is the local charm + that we're testing and other_services are the other services that + are being used in the local amulet tests. + """ + if this_service['name'] != os.path.basename(os.getcwd()): + s = this_service['name'] + msg = "The charm's root directory name needs to be {}".format(s) + amulet.raise_status(amulet.FAIL, msg=msg) + + if 'units' not in this_service: + this_service['units'] = 1 + + self.d.add(this_service['name'], units=this_service['units'], + constraints=this_service.get('constraints')) + + for svc in other_services: + if 'location' in svc: + branch_location = svc['location'] + elif self.series: + branch_location = 'cs:{}/{}'.format(self.series, svc['name']), + else: + branch_location = None + + if 'units' not in svc: + svc['units'] = 1 + + self.d.add(svc['name'], charm=branch_location, units=svc['units'], + constraints=svc.get('constraints')) + + def _add_relations(self, relations): + """Add all of the relations for the services.""" + for k, v in six.iteritems(relations): + self.d.relate(k, v) + + def _configure_services(self, configs): + """Configure all of the services.""" + for service, config in six.iteritems(configs): + self.d.configure(service, config) + + def _deploy(self): + """Deploy environment and wait for all hooks to finish executing.""" + try: + self.d.setup(timeout=900) + self.d.sentry.wait(timeout=900) + except amulet.helpers.TimeoutError: + amulet.raise_status(amulet.FAIL, msg="Deployment timed out") + except Exception: + raise + + def run_tests(self): + """Run all of the methods that are prefixed with 'test_'.""" + for test in dir(self): + if test.startswith('test_'): + getattr(self, test)() diff --git a/tests/charmhelpers/contrib/amulet/utils.py b/tests/charmhelpers/contrib/amulet/utils.py new file mode 100644 index 0000000..2591a9b --- /dev/null +++ b/tests/charmhelpers/contrib/amulet/utils.py @@ -0,0 +1,818 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import io +import json +import logging +import os +import re +import socket +import subprocess +import sys +import time +import uuid + +import amulet +import distro_info +import six +from six.moves import configparser +if six.PY3: + from urllib import parse as urlparse +else: + import urlparse + + +class AmuletUtils(object): + """Amulet utilities. + + This class provides common utility functions that are used by Amulet + tests. + """ + + def __init__(self, log_level=logging.ERROR): + self.log = self.get_logger(level=log_level) + self.ubuntu_releases = self.get_ubuntu_releases() + + def get_logger(self, name="amulet-logger", level=logging.DEBUG): + """Get a logger object that will log to stdout.""" + log = logging + logger = log.getLogger(name) + fmt = log.Formatter("%(asctime)s %(funcName)s " + "%(levelname)s: %(message)s") + + handler = log.StreamHandler(stream=sys.stdout) + handler.setLevel(level) + handler.setFormatter(fmt) + + logger.addHandler(handler) + logger.setLevel(level) + + return logger + + def valid_ip(self, ip): + if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip): + return True + else: + return False + + def valid_url(self, url): + p = re.compile( + r'^(?:http|ftp)s?://' + r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa + r'localhost|' + r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' + r'(?::\d+)?' + r'(?:/?|[/?]\S+)$', + re.IGNORECASE) + if p.match(url): + return True + else: + return False + + def get_ubuntu_release_from_sentry(self, sentry_unit): + """Get Ubuntu release codename from sentry unit. + + :param sentry_unit: amulet sentry/service unit pointer + :returns: list of strings - release codename, failure message + """ + msg = None + cmd = 'lsb_release -cs' + release, code = sentry_unit.run(cmd) + if code == 0: + self.log.debug('{} lsb_release: {}'.format( + sentry_unit.info['unit_name'], release)) + else: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, release, code)) + if release not in self.ubuntu_releases: + msg = ("Release ({}) not found in Ubuntu releases " + "({})".format(release, self.ubuntu_releases)) + return release, msg + + def validate_services(self, commands): + """Validate that lists of commands succeed on service units. Can be + used to verify system services are running on the corresponding + service units. + + :param commands: dict with sentry keys and arbitrary command list vals + :returns: None if successful, Failure string message otherwise + """ + self.log.debug('Checking status of system services...') + + # /!\ DEPRECATION WARNING (beisner): + # New and existing tests should be rewritten to use + # validate_services_by_name() as it is aware of init systems. + self.log.warn('DEPRECATION WARNING: use ' + 'validate_services_by_name instead of validate_services ' + 'due to init system differences.') + + for k, v in six.iteritems(commands): + for cmd in v: + output, code = k.run(cmd) + self.log.debug('{} `{}` returned ' + '{}'.format(k.info['unit_name'], + cmd, code)) + if code != 0: + return "command `{}` returned {}".format(cmd, str(code)) + return None + + def validate_services_by_name(self, sentry_services): + """Validate system service status by service name, automatically + detecting init system based on Ubuntu release codename. + + :param sentry_services: dict with sentry keys and svc list values + :returns: None if successful, Failure string message otherwise + """ + self.log.debug('Checking status of system services...') + + # Point at which systemd became a thing + systemd_switch = self.ubuntu_releases.index('vivid') + + for sentry_unit, services_list in six.iteritems(sentry_services): + # Get lsb_release codename from unit + release, ret = self.get_ubuntu_release_from_sentry(sentry_unit) + if ret: + return ret + + for service_name in services_list: + if (self.ubuntu_releases.index(release) >= systemd_switch or + service_name in ['rabbitmq-server', 'apache2']): + # init is systemd (or regular sysv) + cmd = 'sudo service {} status'.format(service_name) + output, code = sentry_unit.run(cmd) + service_running = code == 0 + elif self.ubuntu_releases.index(release) < systemd_switch: + # init is upstart + cmd = 'sudo status {}'.format(service_name) + output, code = sentry_unit.run(cmd) + service_running = code == 0 and "start/running" in output + + self.log.debug('{} `{}` returned ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code)) + if not service_running: + return u"command `{}` returned {} {}".format( + cmd, output, str(code)) + return None + + def _get_config(self, unit, filename): + """Get a ConfigParser object for parsing a unit's config file.""" + file_contents = unit.file_contents(filename) + + # NOTE(beisner): by default, ConfigParser does not handle options + # with no value, such as the flags used in the mysql my.cnf file. + # https://bugs.python.org/issue7005 + config = configparser.ConfigParser(allow_no_value=True) + config.readfp(io.StringIO(file_contents)) + return config + + def validate_config_data(self, sentry_unit, config_file, section, + expected): + """Validate config file data. + + Verify that the specified section of the config file contains + the expected option key:value pairs. + + Compare expected dictionary data vs actual dictionary data. + The values in the 'expected' dictionary can be strings, bools, ints, + longs, or can be a function that evaluates a variable and returns a + bool. + """ + self.log.debug('Validating config file data ({} in {} on {})' + '...'.format(section, config_file, + sentry_unit.info['unit_name'])) + config = self._get_config(sentry_unit, config_file) + + if section != 'DEFAULT' and not config.has_section(section): + return "section [{}] does not exist".format(section) + + for k in expected.keys(): + if not config.has_option(section, k): + return "section [{}] is missing option {}".format(section, k) + + actual = config.get(section, k) + v = expected[k] + if (isinstance(v, six.string_types) or + isinstance(v, bool) or + isinstance(v, six.integer_types)): + # handle explicit values + if actual != v: + return "section [{}] {}:{} != expected {}:{}".format( + section, k, actual, k, expected[k]) + # handle function pointers, such as not_null or valid_ip + elif not v(actual): + return "section [{}] {}:{} != expected {}:{}".format( + section, k, actual, k, expected[k]) + return None + + def _validate_dict_data(self, expected, actual): + """Validate dictionary data. + + Compare expected dictionary data vs actual dictionary data. + The values in the 'expected' dictionary can be strings, bools, ints, + longs, or can be a function that evaluates a variable and returns a + bool. + """ + self.log.debug('actual: {}'.format(repr(actual))) + self.log.debug('expected: {}'.format(repr(expected))) + + for k, v in six.iteritems(expected): + if k in actual: + if (isinstance(v, six.string_types) or + isinstance(v, bool) or + isinstance(v, six.integer_types)): + # handle explicit values + if v != actual[k]: + return "{}:{}".format(k, actual[k]) + # handle function pointers, such as not_null or valid_ip + elif not v(actual[k]): + return "{}:{}".format(k, actual[k]) + else: + return "key '{}' does not exist".format(k) + return None + + def validate_relation_data(self, sentry_unit, relation, expected): + """Validate actual relation data based on expected relation data.""" + actual = sentry_unit.relation(relation[0], relation[1]) + return self._validate_dict_data(expected, actual) + + def _validate_list_data(self, expected, actual): + """Compare expected list vs actual list data.""" + for e in expected: + if e not in actual: + return "expected item {} not found in actual list".format(e) + return None + + def not_null(self, string): + if string is not None: + return True + else: + return False + + def _get_file_mtime(self, sentry_unit, filename): + """Get last modification time of file.""" + return sentry_unit.file_stat(filename)['mtime'] + + def _get_dir_mtime(self, sentry_unit, directory): + """Get last modification time of directory.""" + return sentry_unit.directory_stat(directory)['mtime'] + + def _get_proc_start_time(self, sentry_unit, service, pgrep_full=None): + """Get start time of a process based on the last modification time + of the /proc/pid directory. + + :sentry_unit: The sentry unit to check for the service on + :service: service name to look for in process table + :pgrep_full: [Deprecated] Use full command line search mode with pgrep + :returns: epoch time of service process start + :param commands: list of bash commands + :param sentry_units: list of sentry unit pointers + :returns: None if successful; Failure message otherwise + """ + if pgrep_full is not None: + # /!\ DEPRECATION WARNING (beisner): + # No longer implemented, as pidof is now used instead of pgrep. + # https://bugs.launchpad.net/charm-helpers/+bug/1474030 + self.log.warn('DEPRECATION WARNING: pgrep_full bool is no ' + 'longer implemented re: lp 1474030.') + + pid_list = self.get_process_id_list(sentry_unit, service) + pid = pid_list[0] + proc_dir = '/proc/{}'.format(pid) + self.log.debug('Pid for {} on {}: {}'.format( + service, sentry_unit.info['unit_name'], pid)) + + return self._get_dir_mtime(sentry_unit, proc_dir) + + def service_restarted(self, sentry_unit, service, filename, + pgrep_full=None, sleep_time=20): + """Check if service was restarted. + + Compare a service's start time vs a file's last modification time + (such as a config file for that service) to determine if the service + has been restarted. + """ + # /!\ DEPRECATION WARNING (beisner): + # This method is prone to races in that no before-time is known. + # Use validate_service_config_changed instead. + + # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now + # used instead of pgrep. pgrep_full is still passed through to ensure + # deprecation WARNS. lp1474030 + self.log.warn('DEPRECATION WARNING: use ' + 'validate_service_config_changed instead of ' + 'service_restarted due to known races.') + + time.sleep(sleep_time) + if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >= + self._get_file_mtime(sentry_unit, filename)): + return True + else: + return False + + def service_restarted_since(self, sentry_unit, mtime, service, + pgrep_full=None, sleep_time=20, + retry_count=30, retry_sleep_time=10): + """Check if service was been started after a given time. + + Args: + sentry_unit (sentry): The sentry unit to check for the service on + mtime (float): The epoch time to check against + service (string): service name to look for in process table + pgrep_full: [Deprecated] Use full command line search mode with pgrep + sleep_time (int): Initial sleep time (s) before looking for file + retry_sleep_time (int): Time (s) to sleep between retries + retry_count (int): If file is not found, how many times to retry + + Returns: + bool: True if service found and its start time it newer than mtime, + False if service is older than mtime or if service was + not found. + """ + # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now + # used instead of pgrep. pgrep_full is still passed through to ensure + # deprecation WARNS. lp1474030 + + unit_name = sentry_unit.info['unit_name'] + self.log.debug('Checking that %s service restarted since %s on ' + '%s' % (service, mtime, unit_name)) + time.sleep(sleep_time) + proc_start_time = None + tries = 0 + while tries <= retry_count and not proc_start_time: + try: + proc_start_time = self._get_proc_start_time(sentry_unit, + service, + pgrep_full) + self.log.debug('Attempt {} to get {} proc start time on {} ' + 'OK'.format(tries, service, unit_name)) + except IOError as e: + # NOTE(beisner) - race avoidance, proc may not exist yet. + # https://bugs.launchpad.net/charm-helpers/+bug/1474030 + self.log.debug('Attempt {} to get {} proc start time on {} ' + 'failed\n{}'.format(tries, service, + unit_name, e)) + time.sleep(retry_sleep_time) + tries += 1 + + if not proc_start_time: + self.log.warn('No proc start time found, assuming service did ' + 'not start') + return False + if proc_start_time >= mtime: + self.log.debug('Proc start time is newer than provided mtime' + '(%s >= %s) on %s (OK)' % (proc_start_time, + mtime, unit_name)) + return True + else: + self.log.warn('Proc start time (%s) is older than provided mtime ' + '(%s) on %s, service did not ' + 'restart' % (proc_start_time, mtime, unit_name)) + return False + + def config_updated_since(self, sentry_unit, filename, mtime, + sleep_time=20, retry_count=30, + retry_sleep_time=10): + """Check if file was modified after a given time. + + Args: + sentry_unit (sentry): The sentry unit to check the file mtime on + filename (string): The file to check mtime of + mtime (float): The epoch time to check against + sleep_time (int): Initial sleep time (s) before looking for file + retry_sleep_time (int): Time (s) to sleep between retries + retry_count (int): If file is not found, how many times to retry + + Returns: + bool: True if file was modified more recently than mtime, False if + file was modified before mtime, or if file not found. + """ + unit_name = sentry_unit.info['unit_name'] + self.log.debug('Checking that %s updated since %s on ' + '%s' % (filename, mtime, unit_name)) + time.sleep(sleep_time) + file_mtime = None + tries = 0 + while tries <= retry_count and not file_mtime: + try: + file_mtime = self._get_file_mtime(sentry_unit, filename) + self.log.debug('Attempt {} to get {} file mtime on {} ' + 'OK'.format(tries, filename, unit_name)) + except IOError as e: + # NOTE(beisner) - race avoidance, file may not exist yet. + # https://bugs.launchpad.net/charm-helpers/+bug/1474030 + self.log.debug('Attempt {} to get {} file mtime on {} ' + 'failed\n{}'.format(tries, filename, + unit_name, e)) + time.sleep(retry_sleep_time) + tries += 1 + + if not file_mtime: + self.log.warn('Could not determine file mtime, assuming ' + 'file does not exist') + return False + + if file_mtime >= mtime: + self.log.debug('File mtime is newer than provided mtime ' + '(%s >= %s) on %s (OK)' % (file_mtime, + mtime, unit_name)) + return True + else: + self.log.warn('File mtime is older than provided mtime' + '(%s < on %s) on %s' % (file_mtime, + mtime, unit_name)) + return False + + def validate_service_config_changed(self, sentry_unit, mtime, service, + filename, pgrep_full=None, + sleep_time=20, retry_count=30, + retry_sleep_time=10): + """Check service and file were updated after mtime + + Args: + sentry_unit (sentry): The sentry unit to check for the service on + mtime (float): The epoch time to check against + service (string): service name to look for in process table + filename (string): The file to check mtime of + pgrep_full: [Deprecated] Use full command line search mode with pgrep + sleep_time (int): Initial sleep in seconds to pass to test helpers + retry_count (int): If service is not found, how many times to retry + retry_sleep_time (int): Time in seconds to wait between retries + + Typical Usage: + u = OpenStackAmuletUtils(ERROR) + ... + mtime = u.get_sentry_time(self.cinder_sentry) + self.d.configure('cinder', {'verbose': 'True', 'debug': 'True'}) + if not u.validate_service_config_changed(self.cinder_sentry, + mtime, + 'cinder-api', + '/etc/cinder/cinder.conf') + amulet.raise_status(amulet.FAIL, msg='update failed') + Returns: + bool: True if both service and file where updated/restarted after + mtime, False if service is older than mtime or if service was + not found or if filename was modified before mtime. + """ + + # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now + # used instead of pgrep. pgrep_full is still passed through to ensure + # deprecation WARNS. lp1474030 + + service_restart = self.service_restarted_since( + sentry_unit, mtime, + service, + pgrep_full=pgrep_full, + sleep_time=sleep_time, + retry_count=retry_count, + retry_sleep_time=retry_sleep_time) + + config_update = self.config_updated_since( + sentry_unit, + filename, + mtime, + sleep_time=sleep_time, + retry_count=retry_count, + retry_sleep_time=retry_sleep_time) + + return service_restart and config_update + + def get_sentry_time(self, sentry_unit): + """Return current epoch time on a sentry""" + cmd = "date +'%s'" + return float(sentry_unit.run(cmd)[0]) + + def relation_error(self, name, data): + return 'unexpected relation data in {} - {}'.format(name, data) + + def endpoint_error(self, name, data): + return 'unexpected endpoint data in {} - {}'.format(name, data) + + def get_ubuntu_releases(self): + """Return a list of all Ubuntu releases in order of release.""" + _d = distro_info.UbuntuDistroInfo() + _release_list = _d.all + return _release_list + + def file_to_url(self, file_rel_path): + """Convert a relative file path to a file URL.""" + _abs_path = os.path.abspath(file_rel_path) + return urlparse.urlparse(_abs_path, scheme='file').geturl() + + def check_commands_on_units(self, commands, sentry_units): + """Check that all commands in a list exit zero on all + sentry units in a list. + + :param commands: list of bash commands + :param sentry_units: list of sentry unit pointers + :returns: None if successful; Failure message otherwise + """ + self.log.debug('Checking exit codes for {} commands on {} ' + 'sentry units...'.format(len(commands), + len(sentry_units))) + for sentry_unit in sentry_units: + for cmd in commands: + output, code = sentry_unit.run(cmd) + if code == 0: + self.log.debug('{} `{}` returned {} ' + '(OK)'.format(sentry_unit.info['unit_name'], + cmd, code)) + else: + return ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + return None + + def get_process_id_list(self, sentry_unit, process_name, + expect_success=True): + """Get a list of process ID(s) from a single sentry juju unit + for a single process name. + + :param sentry_unit: Amulet sentry instance (juju unit) + :param process_name: Process name + :param expect_success: If False, expect the PID to be missing, + raise if it is present. + :returns: List of process IDs + """ + cmd = 'pidof -x {}'.format(process_name) + if not expect_success: + cmd += " || exit 0 && exit 1" + output, code = sentry_unit.run(cmd) + if code != 0: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + amulet.raise_status(amulet.FAIL, msg=msg) + return str(output).split() + + def get_unit_process_ids(self, unit_processes, expect_success=True): + """Construct a dict containing unit sentries, process names, and + process IDs. + + :param unit_processes: A dictionary of Amulet sentry instance + to list of process names. + :param expect_success: if False expect the processes to not be + running, raise if they are. + :returns: Dictionary of Amulet sentry instance to dictionary + of process names to PIDs. + """ + pid_dict = {} + for sentry_unit, process_list in six.iteritems(unit_processes): + pid_dict[sentry_unit] = {} + for process in process_list: + pids = self.get_process_id_list( + sentry_unit, process, expect_success=expect_success) + pid_dict[sentry_unit].update({process: pids}) + return pid_dict + + def validate_unit_process_ids(self, expected, actual): + """Validate process id quantities for services on units.""" + self.log.debug('Checking units for running processes...') + self.log.debug('Expected PIDs: {}'.format(expected)) + self.log.debug('Actual PIDs: {}'.format(actual)) + + if len(actual) != len(expected): + return ('Unit count mismatch. expected, actual: {}, ' + '{} '.format(len(expected), len(actual))) + + for (e_sentry, e_proc_names) in six.iteritems(expected): + e_sentry_name = e_sentry.info['unit_name'] + if e_sentry in actual.keys(): + a_proc_names = actual[e_sentry] + else: + return ('Expected sentry ({}) not found in actual dict data.' + '{}'.format(e_sentry_name, e_sentry)) + + if len(e_proc_names.keys()) != len(a_proc_names.keys()): + return ('Process name count mismatch. expected, actual: {}, ' + '{}'.format(len(expected), len(actual))) + + for (e_proc_name, e_pids_length), (a_proc_name, a_pids) in \ + zip(e_proc_names.items(), a_proc_names.items()): + if e_proc_name != a_proc_name: + return ('Process name mismatch. expected, actual: {}, ' + '{}'.format(e_proc_name, a_proc_name)) + + a_pids_length = len(a_pids) + fail_msg = ('PID count mismatch. {} ({}) expected, actual: ' + '{}, {} ({})'.format(e_sentry_name, e_proc_name, + e_pids_length, a_pids_length, + a_pids)) + + # If expected is not bool, ensure PID quantities match + if not isinstance(e_pids_length, bool) and \ + a_pids_length != e_pids_length: + return fail_msg + # If expected is bool True, ensure 1 or more PIDs exist + elif isinstance(e_pids_length, bool) and \ + e_pids_length is True and a_pids_length < 1: + return fail_msg + # If expected is bool False, ensure 0 PIDs exist + elif isinstance(e_pids_length, bool) and \ + e_pids_length is False and a_pids_length != 0: + return fail_msg + else: + self.log.debug('PID check OK: {} {} {}: ' + '{}'.format(e_sentry_name, e_proc_name, + e_pids_length, a_pids)) + return None + + def validate_list_of_identical_dicts(self, list_of_dicts): + """Check that all dicts within a list are identical.""" + hashes = [] + for _dict in list_of_dicts: + hashes.append(hash(frozenset(_dict.items()))) + + self.log.debug('Hashes: {}'.format(hashes)) + if len(set(hashes)) == 1: + self.log.debug('Dicts within list are identical') + else: + return 'Dicts within list are not identical' + + return None + + def validate_sectionless_conf(self, file_contents, expected): + """A crude conf parser. Useful to inspect configuration files which + do not have section headers (as would be necessary in order to use + the configparser). Such as openstack-dashboard or rabbitmq confs.""" + for line in file_contents.split('\n'): + if '=' in line: + args = line.split('=') + if len(args) <= 1: + continue + key = args[0].strip() + value = args[1].strip() + if key in expected.keys(): + if expected[key] != value: + msg = ('Config mismatch. Expected, actual: {}, ' + '{}'.format(expected[key], value)) + amulet.raise_status(amulet.FAIL, msg=msg) + + def get_unit_hostnames(self, units): + """Return a dict of juju unit names to hostnames.""" + host_names = {} + for unit in units: + host_names[unit.info['unit_name']] = \ + str(unit.file_contents('/etc/hostname').strip()) + self.log.debug('Unit host names: {}'.format(host_names)) + return host_names + + def run_cmd_unit(self, sentry_unit, cmd): + """Run a command on a unit, return the output and exit code.""" + output, code = sentry_unit.run(cmd) + if code == 0: + self.log.debug('{} `{}` command returned {} ' + '(OK)'.format(sentry_unit.info['unit_name'], + cmd, code)) + else: + msg = ('{} `{}` command returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + amulet.raise_status(amulet.FAIL, msg=msg) + return str(output), code + + def file_exists_on_unit(self, sentry_unit, file_name): + """Check if a file exists on a unit.""" + try: + sentry_unit.file_stat(file_name) + return True + except IOError: + return False + except Exception as e: + msg = 'Error checking file {}: {}'.format(file_name, e) + amulet.raise_status(amulet.FAIL, msg=msg) + + def file_contents_safe(self, sentry_unit, file_name, + max_wait=60, fatal=False): + """Get file contents from a sentry unit. Wrap amulet file_contents + with retry logic to address races where a file checks as existing, + but no longer exists by the time file_contents is called. + Return None if file not found. Optionally raise if fatal is True.""" + unit_name = sentry_unit.info['unit_name'] + file_contents = False + tries = 0 + while not file_contents and tries < (max_wait / 4): + try: + file_contents = sentry_unit.file_contents(file_name) + except IOError: + self.log.debug('Attempt {} to open file {} from {} ' + 'failed'.format(tries, file_name, + unit_name)) + time.sleep(4) + tries += 1 + + if file_contents: + return file_contents + elif not fatal: + return None + elif fatal: + msg = 'Failed to get file contents from unit.' + amulet.raise_status(amulet.FAIL, msg) + + def port_knock_tcp(self, host="localhost", port=22, timeout=15): + """Open a TCP socket to check for a listening sevice on a host. + + :param host: host name or IP address, default to localhost + :param port: TCP port number, default to 22 + :param timeout: Connect timeout, default to 15 seconds + :returns: True if successful, False if connect failed + """ + + # Resolve host name if possible + try: + connect_host = socket.gethostbyname(host) + host_human = "{} ({})".format(connect_host, host) + except socket.error as e: + self.log.warn('Unable to resolve address: ' + '{} ({}) Trying anyway!'.format(host, e)) + connect_host = host + host_human = connect_host + + # Attempt socket connection + try: + knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + knock.settimeout(timeout) + knock.connect((connect_host, port)) + knock.close() + self.log.debug('Socket connect OK for host ' + '{} on port {}.'.format(host_human, port)) + return True + except socket.error as e: + self.log.debug('Socket connect FAIL for' + ' {} port {} ({})'.format(host_human, port, e)) + return False + + def port_knock_units(self, sentry_units, port=22, + timeout=15, expect_success=True): + """Open a TCP socket to check for a listening sevice on each + listed juju unit. + + :param sentry_units: list of sentry unit pointers + :param port: TCP port number, default to 22 + :param timeout: Connect timeout, default to 15 seconds + :expect_success: True by default, set False to invert logic + :returns: None if successful, Failure message otherwise + """ + for unit in sentry_units: + host = unit.info['public-address'] + connected = self.port_knock_tcp(host, port, timeout) + if not connected and expect_success: + return 'Socket connect failed.' + elif connected and not expect_success: + return 'Socket connected unexpectedly.' + + def get_uuid_epoch_stamp(self): + """Returns a stamp string based on uuid4 and epoch time. Useful in + generating test messages which need to be unique-ish.""" + return '[{}-{}]'.format(uuid.uuid4(), time.time()) + +# amulet juju action helpers: + def run_action(self, unit_sentry, action, + _check_output=subprocess.check_output): + """Run the named action on a given unit sentry. + + _check_output parameter is used for dependency injection. + + @return action_id. + """ + unit_id = unit_sentry.info["unit_name"] + command = ["juju", "action", "do", "--format=json", unit_id, action] + self.log.info("Running command: %s\n" % " ".join(command)) + output = _check_output(command, universal_newlines=True) + data = json.loads(output) + action_id = data[u'Action queued with id'] + return action_id + + def wait_on_action(self, action_id, _check_output=subprocess.check_output): + """Wait for a given action, returning if it completed or not. + + _check_output parameter is used for dependency injection. + """ + command = ["juju", "action", "fetch", "--format=json", "--wait=0", + action_id] + output = _check_output(command, universal_newlines=True) + data = json.loads(output) + return data.get(u"status") == "completed" + + def status_get(self, unit): + """Return the current service status of this unit.""" + raw_status, return_code = unit.run( + "status-get --format=json --include-data") + if return_code != 0: + return ("unknown", "") + status = json.loads(raw_status) + return (status["status"], status["message"]) diff --git a/tests/charmhelpers/contrib/openstack/__init__.py b/tests/charmhelpers/contrib/openstack/__init__.py new file mode 100644 index 0000000..d1400a0 --- /dev/null +++ b/tests/charmhelpers/contrib/openstack/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/tests/charmhelpers/contrib/openstack/amulet/__init__.py b/tests/charmhelpers/contrib/openstack/amulet/__init__.py new file mode 100644 index 0000000..d1400a0 --- /dev/null +++ b/tests/charmhelpers/contrib/openstack/amulet/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/tests/charmhelpers/contrib/openstack/amulet/deployment.py new file mode 100644 index 0000000..e4ad62b --- /dev/null +++ b/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -0,0 +1,296 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import logging +import re +import sys +import six +from collections import OrderedDict +from charmhelpers.contrib.amulet.deployment import ( + AmuletDeployment +) + +DEBUG = logging.DEBUG +ERROR = logging.ERROR + + +class OpenStackAmuletDeployment(AmuletDeployment): + """OpenStack amulet deployment. + + This class inherits from AmuletDeployment and has additional support + that is specifically for use by OpenStack charms. + """ + + def __init__(self, series=None, openstack=None, source=None, + stable=True, log_level=DEBUG): + """Initialize the deployment environment.""" + super(OpenStackAmuletDeployment, self).__init__(series) + self.log = self.get_logger(level=log_level) + self.log.info('OpenStackAmuletDeployment: init') + self.openstack = openstack + self.source = source + self.stable = stable + # Note(coreycb): this needs to be changed when new next branches come + # out. + self.current_next = "trusty" + + def get_logger(self, name="deployment-logger", level=logging.DEBUG): + """Get a logger object that will log to stdout.""" + log = logging + logger = log.getLogger(name) + fmt = log.Formatter("%(asctime)s %(funcName)s " + "%(levelname)s: %(message)s") + + handler = log.StreamHandler(stream=sys.stdout) + handler.setLevel(level) + handler.setFormatter(fmt) + + logger.addHandler(handler) + logger.setLevel(level) + + return logger + + def _determine_branch_locations(self, other_services): + """Determine the branch locations for the other services. + + Determine if the local branch being tested is derived from its + stable or next (dev) branch, and based on this, use the corresonding + stable or next branches for the other_services.""" + + self.log.info('OpenStackAmuletDeployment: determine branch locations') + + # Charms outside the lp:~openstack-charmers namespace + base_charms = ['mysql', 'mongodb', 'nrpe'] + + # Force these charms to current series even when using an older series. + # ie. Use trusty/nrpe even when series is precise, as the P charm + # does not possess the necessary external master config and hooks. + force_series_current = ['nrpe'] + + if self.series in ['precise', 'trusty']: + base_series = self.series + else: + base_series = self.current_next + + for svc in other_services: + if svc['name'] in force_series_current: + base_series = self.current_next + # If a location has been explicitly set, use it + if svc.get('location'): + continue + if self.stable: + temp = 'lp:charms/{}/{}' + svc['location'] = temp.format(base_series, + svc['name']) + else: + if svc['name'] in base_charms: + temp = 'lp:charms/{}/{}' + svc['location'] = temp.format(base_series, + svc['name']) + else: + temp = 'lp:~openstack-charmers/charms/{}/{}/next' + svc['location'] = temp.format(self.current_next, + svc['name']) + + return other_services + + def _add_services(self, this_service, other_services): + """Add services to the deployment and set openstack-origin/source.""" + self.log.info('OpenStackAmuletDeployment: adding services') + + other_services = self._determine_branch_locations(other_services) + + super(OpenStackAmuletDeployment, self)._add_services(this_service, + other_services) + + services = other_services + services.append(this_service) + + # Charms which should use the source config option + use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', + 'ceph-osd', 'ceph-radosgw'] + + # Charms which can not use openstack-origin, ie. many subordinates + no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe'] + + if self.openstack: + for svc in services: + if svc['name'] not in use_source + no_origin: + config = {'openstack-origin': self.openstack} + self.d.configure(svc['name'], config) + + if self.source: + for svc in services: + if svc['name'] in use_source and svc['name'] not in no_origin: + config = {'source': self.source} + self.d.configure(svc['name'], config) + + def _configure_services(self, configs): + """Configure all of the services.""" + self.log.info('OpenStackAmuletDeployment: configure services') + for service, config in six.iteritems(configs): + self.d.configure(service, config) + + def _auto_wait_for_status(self, message=None, exclude_services=None, + include_only=None, timeout=1800): + """Wait for all units to have a specific extended status, except + for any defined as excluded. Unless specified via message, any + status containing any case of 'ready' will be considered a match. + + Examples of message usage: + + Wait for all unit status to CONTAIN any case of 'ready' or 'ok': + message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE) + + Wait for all units to reach this status (exact match): + message = re.compile('^Unit is ready and clustered$') + + Wait for all units to reach any one of these (exact match): + message = re.compile('Unit is ready|OK|Ready') + + Wait for at least one unit to reach this status (exact match): + message = {'ready'} + + See Amulet's sentry.wait_for_messages() for message usage detail. + https://github.com/juju/amulet/blob/master/amulet/sentry.py + + :param message: Expected status match + :param exclude_services: List of juju service names to ignore, + not to be used in conjuction with include_only. + :param include_only: List of juju service names to exclusively check, + not to be used in conjuction with exclude_services. + :param timeout: Maximum time in seconds to wait for status match + :returns: None. Raises if timeout is hit. + """ + self.log.info('Waiting for extended status on units...') + + all_services = self.d.services.keys() + + if exclude_services and include_only: + raise ValueError('exclude_services can not be used ' + 'with include_only') + + if message: + if isinstance(message, re._pattern_type): + match = message.pattern + else: + match = message + + self.log.debug('Custom extended status wait match: ' + '{}'.format(match)) + else: + self.log.debug('Default extended status wait match: contains ' + 'READY (case-insensitive)') + message = re.compile('.*ready.*', re.IGNORECASE) + + if exclude_services: + self.log.debug('Excluding services from extended status match: ' + '{}'.format(exclude_services)) + else: + exclude_services = [] + + if include_only: + services = include_only + else: + services = list(set(all_services) - set(exclude_services)) + + self.log.debug('Waiting up to {}s for extended status on services: ' + '{}'.format(timeout, services)) + service_messages = {service: message for service in services} + self.d.sentry.wait_for_messages(service_messages, timeout=timeout) + self.log.info('OK') + + def _get_openstack_release(self): + """Get openstack release. + + Return an integer representing the enum value of the openstack + release. + """ + # Must be ordered by OpenStack release (not by Ubuntu release): + (self.precise_essex, self.precise_folsom, self.precise_grizzly, + self.precise_havana, self.precise_icehouse, + self.trusty_icehouse, self.trusty_juno, self.utopic_juno, + self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, + self.wily_liberty) = range(12) + + releases = { + ('precise', None): self.precise_essex, + ('precise', 'cloud:precise-folsom'): self.precise_folsom, + ('precise', 'cloud:precise-grizzly'): self.precise_grizzly, + ('precise', 'cloud:precise-havana'): self.precise_havana, + ('precise', 'cloud:precise-icehouse'): self.precise_icehouse, + ('trusty', None): self.trusty_icehouse, + ('trusty', 'cloud:trusty-juno'): self.trusty_juno, + ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, + ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, + ('utopic', None): self.utopic_juno, + ('vivid', None): self.vivid_kilo, + ('wily', None): self.wily_liberty} + return releases[(self.series, self.openstack)] + + def _get_openstack_release_string(self): + """Get openstack release string. + + Return a string representing the openstack release. + """ + releases = OrderedDict([ + ('precise', 'essex'), + ('quantal', 'folsom'), + ('raring', 'grizzly'), + ('saucy', 'havana'), + ('trusty', 'icehouse'), + ('utopic', 'juno'), + ('vivid', 'kilo'), + ('wily', 'liberty'), + ]) + if self.openstack: + os_origin = self.openstack.split(':')[1] + return os_origin.split('%s-' % self.series)[1].split('/')[0] + else: + return releases[self.series] + + def get_ceph_expected_pools(self, radosgw=False): + """Return a list of expected ceph pools in a ceph + cinder + glance + test scenario, based on OpenStack release and whether ceph radosgw + is flagged as present or not.""" + + if self._get_openstack_release() >= self.trusty_kilo: + # Kilo or later + pools = [ + 'rbd', + 'cinder', + 'glance' + ] + else: + # Juno or earlier + pools = [ + 'data', + 'metadata', + 'rbd', + 'cinder', + 'glance' + ] + + if radosgw: + pools.extend([ + '.rgw.root', + '.rgw.control', + '.rgw', + '.rgw.gc', + '.users.uid' + ]) + + return pools diff --git a/tests/charmhelpers/contrib/openstack/amulet/utils.py b/tests/charmhelpers/contrib/openstack/amulet/utils.py new file mode 100644 index 0000000..388b60e --- /dev/null +++ b/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -0,0 +1,985 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import amulet +import json +import logging +import os +import re +import six +import time +import urllib + +import cinderclient.v1.client as cinder_client +import glanceclient.v1.client as glance_client +import heatclient.v1.client as heat_client +import keystoneclient.v2_0 as keystone_client +import novaclient.v1_1.client as nova_client +import pika +import swiftclient + +from charmhelpers.contrib.amulet.utils import ( + AmuletUtils +) + +DEBUG = logging.DEBUG +ERROR = logging.ERROR + + +class OpenStackAmuletUtils(AmuletUtils): + """OpenStack amulet utilities. + + This class inherits from AmuletUtils and has additional support + that is specifically for use by OpenStack charm tests. + """ + + def __init__(self, log_level=ERROR): + """Initialize the deployment environment.""" + super(OpenStackAmuletUtils, self).__init__(log_level) + + def validate_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected): + """Validate endpoint data. + + Validate actual endpoint data vs expected endpoint data. The ports + are used to find the matching endpoint. + """ + self.log.debug('Validating endpoint data...') + self.log.debug('actual: {}'.format(repr(endpoints))) + found = False + for ep in endpoints: + self.log.debug('endpoint: {}'.format(repr(ep))) + if (admin_port in ep.adminurl and + internal_port in ep.internalurl and + public_port in ep.publicurl): + found = True + actual = {'id': ep.id, + 'region': ep.region, + 'adminurl': ep.adminurl, + 'internalurl': ep.internalurl, + 'publicurl': ep.publicurl, + 'service_id': ep.service_id} + ret = self._validate_dict_data(expected, actual) + if ret: + return 'unexpected endpoint data - {}'.format(ret) + + if not found: + return 'endpoint not found' + + def validate_svc_catalog_endpoint_data(self, expected, actual): + """Validate service catalog endpoint data. + + Validate a list of actual service catalog endpoints vs a list of + expected service catalog endpoints. + """ + self.log.debug('Validating service catalog endpoint data...') + self.log.debug('actual: {}'.format(repr(actual))) + for k, v in six.iteritems(expected): + if k in actual: + ret = self._validate_dict_data(expected[k][0], actual[k][0]) + if ret: + return self.endpoint_error(k, ret) + else: + return "endpoint {} does not exist".format(k) + return ret + + def validate_tenant_data(self, expected, actual): + """Validate tenant data. + + Validate a list of actual tenant data vs list of expected tenant + data. + """ + self.log.debug('Validating tenant data...') + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'enabled': act.enabled, 'description': act.description, + 'name': act.name, 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected tenant data - {}".format(ret) + if not found: + return "tenant {} does not exist".format(e['name']) + return ret + + def validate_role_data(self, expected, actual): + """Validate role data. + + Validate a list of actual role data vs a list of expected role + data. + """ + self.log.debug('Validating role data...') + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'name': act.name, 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected role data - {}".format(ret) + if not found: + return "role {} does not exist".format(e['name']) + return ret + + def validate_user_data(self, expected, actual): + """Validate user data. + + Validate a list of actual user data vs a list of expected user + data. + """ + self.log.debug('Validating user data...') + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'enabled': act.enabled, 'name': act.name, + 'email': act.email, 'tenantId': act.tenantId, + 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected user data - {}".format(ret) + if not found: + return "user {} does not exist".format(e['name']) + return ret + + def validate_flavor_data(self, expected, actual): + """Validate flavor data. + + Validate a list of actual flavors vs a list of expected flavors. + """ + self.log.debug('Validating flavor data...') + self.log.debug('actual: {}'.format(repr(actual))) + act = [a.name for a in actual] + return self._validate_list_data(expected, act) + + def tenant_exists(self, keystone, tenant): + """Return True if tenant exists.""" + self.log.debug('Checking if tenant exists ({})...'.format(tenant)) + return tenant in [t.name for t in keystone.tenants.list()] + + def authenticate_cinder_admin(self, keystone_sentry, username, + password, tenant): + """Authenticates admin user with cinder.""" + # NOTE(beisner): cinder python client doesn't accept tokens. + service_ip = \ + keystone_sentry.relation('shared-db', + 'mysql:shared-db')['private-address'] + ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8')) + return cinder_client.Client(username, password, tenant, ept) + + def authenticate_keystone_admin(self, keystone_sentry, user, password, + tenant): + """Authenticates admin user with the keystone admin endpoint.""" + self.log.debug('Authenticating keystone admin...') + unit = keystone_sentry + service_ip = unit.relation('shared-db', + 'mysql:shared-db')['private-address'] + ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8')) + return keystone_client.Client(username=user, password=password, + tenant_name=tenant, auth_url=ep) + + def authenticate_keystone_user(self, keystone, user, password, tenant): + """Authenticates a regular user with the keystone public endpoint.""" + self.log.debug('Authenticating keystone user ({})...'.format(user)) + ep = keystone.service_catalog.url_for(service_type='identity', + endpoint_type='publicURL') + return keystone_client.Client(username=user, password=password, + tenant_name=tenant, auth_url=ep) + + def authenticate_glance_admin(self, keystone): + """Authenticates admin user with glance.""" + self.log.debug('Authenticating glance admin...') + ep = keystone.service_catalog.url_for(service_type='image', + endpoint_type='adminURL') + return glance_client.Client(ep, token=keystone.auth_token) + + def authenticate_heat_admin(self, keystone): + """Authenticates the admin user with heat.""" + self.log.debug('Authenticating heat admin...') + ep = keystone.service_catalog.url_for(service_type='orchestration', + endpoint_type='publicURL') + return heat_client.Client(endpoint=ep, token=keystone.auth_token) + + def authenticate_nova_user(self, keystone, user, password, tenant): + """Authenticates a regular user with nova-api.""" + self.log.debug('Authenticating nova user ({})...'.format(user)) + ep = keystone.service_catalog.url_for(service_type='identity', + endpoint_type='publicURL') + return nova_client.Client(username=user, api_key=password, + project_id=tenant, auth_url=ep) + + def authenticate_swift_user(self, keystone, user, password, tenant): + """Authenticates a regular user with swift api.""" + self.log.debug('Authenticating swift user ({})...'.format(user)) + ep = keystone.service_catalog.url_for(service_type='identity', + endpoint_type='publicURL') + return swiftclient.Connection(authurl=ep, + user=user, + key=password, + tenant_name=tenant, + auth_version='2.0') + + def create_cirros_image(self, glance, image_name): + """Download the latest cirros image and upload it to glance, + validate and return a resource pointer. + + :param glance: pointer to authenticated glance connection + :param image_name: display name for new image + :returns: glance image pointer + """ + self.log.debug('Creating glance cirros image ' + '({})...'.format(image_name)) + + # Download cirros image + http_proxy = os.getenv('AMULET_HTTP_PROXY') + self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) + if http_proxy: + proxies = {'http': http_proxy} + opener = urllib.FancyURLopener(proxies) + else: + opener = urllib.FancyURLopener() + + f = opener.open('http://download.cirros-cloud.net/version/released') + version = f.read().strip() + cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) + local_path = os.path.join('tests', cirros_img) + + if not os.path.exists(local_path): + cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', + version, cirros_img) + opener.retrieve(cirros_url, local_path) + f.close() + + # Create glance image + with open(local_path) as f: + image = glance.images.create(name=image_name, is_public=True, + disk_format='qcow2', + container_format='bare', data=f) + + # Wait for image to reach active status + img_id = image.id + ret = self.resource_reaches_status(glance.images, img_id, + expected_stat='active', + msg='Image status wait') + if not ret: + msg = 'Glance image failed to reach expected state.' + amulet.raise_status(amulet.FAIL, msg=msg) + + # Re-validate new image + self.log.debug('Validating image attributes...') + val_img_name = glance.images.get(img_id).name + val_img_stat = glance.images.get(img_id).status + val_img_pub = glance.images.get(img_id).is_public + val_img_cfmt = glance.images.get(img_id).container_format + val_img_dfmt = glance.images.get(img_id).disk_format + msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' + 'container fmt:{} disk fmt:{}'.format( + val_img_name, val_img_pub, img_id, + val_img_stat, val_img_cfmt, val_img_dfmt)) + + if val_img_name == image_name and val_img_stat == 'active' \ + and val_img_pub is True and val_img_cfmt == 'bare' \ + and val_img_dfmt == 'qcow2': + self.log.debug(msg_attr) + else: + msg = ('Volume validation failed, {}'.format(msg_attr)) + amulet.raise_status(amulet.FAIL, msg=msg) + + return image + + def delete_image(self, glance, image): + """Delete the specified image.""" + + # /!\ DEPRECATION WARNING + self.log.warn('/!\\ DEPRECATION WARNING: use ' + 'delete_resource instead of delete_image.') + self.log.debug('Deleting glance image ({})...'.format(image)) + return self.delete_resource(glance.images, image, msg='glance image') + + def create_instance(self, nova, image_name, instance_name, flavor): + """Create the specified instance.""" + self.log.debug('Creating instance ' + '({}|{}|{})'.format(instance_name, image_name, flavor)) + image = nova.images.find(name=image_name) + flavor = nova.flavors.find(name=flavor) + instance = nova.servers.create(name=instance_name, image=image, + flavor=flavor) + + count = 1 + status = instance.status + while status != 'ACTIVE' and count < 60: + time.sleep(3) + instance = nova.servers.get(instance.id) + status = instance.status + self.log.debug('instance status: {}'.format(status)) + count += 1 + + if status != 'ACTIVE': + self.log.error('instance creation timed out') + return None + + return instance + + def delete_instance(self, nova, instance): + """Delete the specified instance.""" + + # /!\ DEPRECATION WARNING + self.log.warn('/!\\ DEPRECATION WARNING: use ' + 'delete_resource instead of delete_instance.') + self.log.debug('Deleting instance ({})...'.format(instance)) + return self.delete_resource(nova.servers, instance, + msg='nova instance') + + def create_or_get_keypair(self, nova, keypair_name="testkey"): + """Create a new keypair, or return pointer if it already exists.""" + try: + _keypair = nova.keypairs.get(keypair_name) + self.log.debug('Keypair ({}) already exists, ' + 'using it.'.format(keypair_name)) + return _keypair + except: + self.log.debug('Keypair ({}) does not exist, ' + 'creating it.'.format(keypair_name)) + + _keypair = nova.keypairs.create(name=keypair_name) + return _keypair + + def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, + img_id=None, src_vol_id=None, snap_id=None): + """Create cinder volume, optionally from a glance image, OR + optionally as a clone of an existing volume, OR optionally + from a snapshot. Wait for the new volume status to reach + the expected status, validate and return a resource pointer. + + :param vol_name: cinder volume display name + :param vol_size: size in gigabytes + :param img_id: optional glance image id + :param src_vol_id: optional source volume id to clone + :param snap_id: optional snapshot id to use + :returns: cinder volume pointer + """ + # Handle parameter input and avoid impossible combinations + if img_id and not src_vol_id and not snap_id: + # Create volume from image + self.log.debug('Creating cinder volume from glance image...') + bootable = 'true' + elif src_vol_id and not img_id and not snap_id: + # Clone an existing volume + self.log.debug('Cloning cinder volume...') + bootable = cinder.volumes.get(src_vol_id).bootable + elif snap_id and not src_vol_id and not img_id: + # Create volume from snapshot + self.log.debug('Creating cinder volume from snapshot...') + snap = cinder.volume_snapshots.find(id=snap_id) + vol_size = snap.size + snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id + bootable = cinder.volumes.get(snap_vol_id).bootable + elif not img_id and not src_vol_id and not snap_id: + # Create volume + self.log.debug('Creating cinder volume...') + bootable = 'false' + else: + # Impossible combination of parameters + msg = ('Invalid method use - name:{} size:{} img_id:{} ' + 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size, + img_id, src_vol_id, + snap_id)) + amulet.raise_status(amulet.FAIL, msg=msg) + + # Create new volume + try: + vol_new = cinder.volumes.create(display_name=vol_name, + imageRef=img_id, + size=vol_size, + source_volid=src_vol_id, + snapshot_id=snap_id) + vol_id = vol_new.id + except Exception as e: + msg = 'Failed to create volume: {}'.format(e) + amulet.raise_status(amulet.FAIL, msg=msg) + + # Wait for volume to reach available status + ret = self.resource_reaches_status(cinder.volumes, vol_id, + expected_stat="available", + msg="Volume status wait") + if not ret: + msg = 'Cinder volume failed to reach expected state.' + amulet.raise_status(amulet.FAIL, msg=msg) + + # Re-validate new volume + self.log.debug('Validating volume attributes...') + val_vol_name = cinder.volumes.get(vol_id).display_name + val_vol_boot = cinder.volumes.get(vol_id).bootable + val_vol_stat = cinder.volumes.get(vol_id).status + val_vol_size = cinder.volumes.get(vol_id).size + msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:' + '{} size:{}'.format(val_vol_name, vol_id, + val_vol_stat, val_vol_boot, + val_vol_size)) + + if val_vol_boot == bootable and val_vol_stat == 'available' \ + and val_vol_name == vol_name and val_vol_size == vol_size: + self.log.debug(msg_attr) + else: + msg = ('Volume validation failed, {}'.format(msg_attr)) + amulet.raise_status(amulet.FAIL, msg=msg) + + return vol_new + + def delete_resource(self, resource, resource_id, + msg="resource", max_wait=120): + """Delete one openstack resource, such as one instance, keypair, + image, volume, stack, etc., and confirm deletion within max wait time. + + :param resource: pointer to os resource type, ex:glance_client.images + :param resource_id: unique name or id for the openstack resource + :param msg: text to identify purpose in logging + :param max_wait: maximum wait time in seconds + :returns: True if successful, otherwise False + """ + self.log.debug('Deleting OpenStack resource ' + '{} ({})'.format(resource_id, msg)) + num_before = len(list(resource.list())) + resource.delete(resource_id) + + tries = 0 + num_after = len(list(resource.list())) + while num_after != (num_before - 1) and tries < (max_wait / 4): + self.log.debug('{} delete check: ' + '{} [{}:{}] {}'.format(msg, tries, + num_before, + num_after, + resource_id)) + time.sleep(4) + num_after = len(list(resource.list())) + tries += 1 + + self.log.debug('{}: expected, actual count = {}, ' + '{}'.format(msg, num_before - 1, num_after)) + + if num_after == (num_before - 1): + return True + else: + self.log.error('{} delete timed out'.format(msg)) + return False + + def resource_reaches_status(self, resource, resource_id, + expected_stat='available', + msg='resource', max_wait=120): + """Wait for an openstack resources status to reach an + expected status within a specified time. Useful to confirm that + nova instances, cinder vols, snapshots, glance images, heat stacks + and other resources eventually reach the expected status. + + :param resource: pointer to os resource type, ex: heat_client.stacks + :param resource_id: unique id for the openstack resource + :param expected_stat: status to expect resource to reach + :param msg: text to identify purpose in logging + :param max_wait: maximum wait time in seconds + :returns: True if successful, False if status is not reached + """ + + tries = 0 + resource_stat = resource.get(resource_id).status + while resource_stat != expected_stat and tries < (max_wait / 4): + self.log.debug('{} status check: ' + '{} [{}:{}] {}'.format(msg, tries, + resource_stat, + expected_stat, + resource_id)) + time.sleep(4) + resource_stat = resource.get(resource_id).status + tries += 1 + + self.log.debug('{}: expected, actual status = {}, ' + '{}'.format(msg, resource_stat, expected_stat)) + + if resource_stat == expected_stat: + return True + else: + self.log.debug('{} never reached expected status: ' + '{}'.format(resource_id, expected_stat)) + return False + + def get_ceph_osd_id_cmd(self, index): + """Produce a shell command that will return a ceph-osd id.""" + return ("`initctl list | grep 'ceph-osd ' | " + "awk 'NR=={} {{ print $2 }}' | " + "grep -o '[0-9]*'`".format(index + 1)) + + def get_ceph_pools(self, sentry_unit): + """Return a dict of ceph pools from a single ceph unit, with + pool name as keys, pool id as vals.""" + pools = {} + cmd = 'sudo ceph osd lspools' + output, code = sentry_unit.run(cmd) + if code != 0: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + amulet.raise_status(amulet.FAIL, msg=msg) + + # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, + for pool in str(output).split(','): + pool_id_name = pool.split(' ') + if len(pool_id_name) == 2: + pool_id = pool_id_name[0] + pool_name = pool_id_name[1] + pools[pool_name] = int(pool_id) + + self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'], + pools)) + return pools + + def get_ceph_df(self, sentry_unit): + """Return dict of ceph df json output, including ceph pool state. + + :param sentry_unit: Pointer to amulet sentry instance (juju unit) + :returns: Dict of ceph df output + """ + cmd = 'sudo ceph df --format=json' + output, code = sentry_unit.run(cmd) + if code != 0: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + amulet.raise_status(amulet.FAIL, msg=msg) + return json.loads(output) + + def get_ceph_pool_sample(self, sentry_unit, pool_id=0): + """Take a sample of attributes of a ceph pool, returning ceph + pool name, object count and disk space used for the specified + pool ID number. + + :param sentry_unit: Pointer to amulet sentry instance (juju unit) + :param pool_id: Ceph pool ID + :returns: List of pool name, object count, kb disk space used + """ + df = self.get_ceph_df(sentry_unit) + pool_name = df['pools'][pool_id]['name'] + obj_count = df['pools'][pool_id]['stats']['objects'] + kb_used = df['pools'][pool_id]['stats']['kb_used'] + self.log.debug('Ceph {} pool (ID {}): {} objects, ' + '{} kb used'.format(pool_name, pool_id, + obj_count, kb_used)) + return pool_name, obj_count, kb_used + + def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): + """Validate ceph pool samples taken over time, such as pool + object counts or pool kb used, before adding, after adding, and + after deleting items which affect those pool attributes. The + 2nd element is expected to be greater than the 1st; 3rd is expected + to be less than the 2nd. + + :param samples: List containing 3 data samples + :param sample_type: String for logging and usage context + :returns: None if successful, Failure message otherwise + """ + original, created, deleted = range(3) + if samples[created] <= samples[original] or \ + samples[deleted] >= samples[created]: + return ('Ceph {} samples ({}) ' + 'unexpected.'.format(sample_type, samples)) + else: + self.log.debug('Ceph {} samples (OK): ' + '{}'.format(sample_type, samples)) + return None + + # rabbitmq/amqp specific helpers: + + def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200): + """Wait for rmq units extended status to show cluster readiness, + after an optional initial sleep period. Initial sleep is likely + necessary to be effective following a config change, as status + message may not instantly update to non-ready.""" + + if init_sleep: + time.sleep(init_sleep) + + message = re.compile('^Unit is ready and clustered$') + deployment._auto_wait_for_status(message=message, + timeout=timeout, + include_only=['rabbitmq-server']) + + def add_rmq_test_user(self, sentry_units, + username="testuser1", password="changeme"): + """Add a test user via the first rmq juju unit, check connection as + the new user against all sentry units. + + :param sentry_units: list of sentry unit pointers + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: None if successful. Raise on error. + """ + self.log.debug('Adding rmq user ({})...'.format(username)) + + # Check that user does not already exist + cmd_user_list = 'rabbitmqctl list_users' + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) + if username in output: + self.log.warning('User ({}) already exists, returning ' + 'gracefully.'.format(username)) + return + + perms = '".*" ".*" ".*"' + cmds = ['rabbitmqctl add_user {} {}'.format(username, password), + 'rabbitmqctl set_permissions {} {}'.format(username, perms)] + + # Add user via first unit + for cmd in cmds: + output, _ = self.run_cmd_unit(sentry_units[0], cmd) + + # Check connection against the other sentry_units + self.log.debug('Checking user connect against units...') + for sentry_unit in sentry_units: + connection = self.connect_amqp_by_unit(sentry_unit, ssl=False, + username=username, + password=password) + connection.close() + + def delete_rmq_test_user(self, sentry_units, username="testuser1"): + """Delete a rabbitmq user via the first rmq juju unit. + + :param sentry_units: list of sentry unit pointers + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: None if successful or no such user. + """ + self.log.debug('Deleting rmq user ({})...'.format(username)) + + # Check that the user exists + cmd_user_list = 'rabbitmqctl list_users' + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) + + if username not in output: + self.log.warning('User ({}) does not exist, returning ' + 'gracefully.'.format(username)) + return + + # Delete the user + cmd_user_del = 'rabbitmqctl delete_user {}'.format(username) + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del) + + def get_rmq_cluster_status(self, sentry_unit): + """Execute rabbitmq cluster status command on a unit and return + the full output. + + :param unit: sentry unit + :returns: String containing console output of cluster status command + """ + cmd = 'rabbitmqctl cluster_status' + output, _ = self.run_cmd_unit(sentry_unit, cmd) + self.log.debug('{} cluster_status:\n{}'.format( + sentry_unit.info['unit_name'], output)) + return str(output) + + def get_rmq_cluster_running_nodes(self, sentry_unit): + """Parse rabbitmqctl cluster_status output string, return list of + running rabbitmq cluster nodes. + + :param unit: sentry unit + :returns: List containing node names of running nodes + """ + # NOTE(beisner): rabbitmqctl cluster_status output is not + # json-parsable, do string chop foo, then json.loads that. + str_stat = self.get_rmq_cluster_status(sentry_unit) + if 'running_nodes' in str_stat: + pos_start = str_stat.find("{running_nodes,") + 15 + pos_end = str_stat.find("]},", pos_start) + 1 + str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"') + run_nodes = json.loads(str_run_nodes) + return run_nodes + else: + return [] + + def validate_rmq_cluster_running_nodes(self, sentry_units): + """Check that all rmq unit hostnames are represented in the + cluster_status output of all units. + + :param host_names: dict of juju unit names to host names + :param units: list of sentry unit pointers (all rmq units) + :returns: None if successful, otherwise return error message + """ + host_names = self.get_unit_hostnames(sentry_units) + errors = [] + + # Query every unit for cluster_status running nodes + for query_unit in sentry_units: + query_unit_name = query_unit.info['unit_name'] + running_nodes = self.get_rmq_cluster_running_nodes(query_unit) + + # Confirm that every unit is represented in the queried unit's + # cluster_status running nodes output. + for validate_unit in sentry_units: + val_host_name = host_names[validate_unit.info['unit_name']] + val_node_name = 'rabbit@{}'.format(val_host_name) + + if val_node_name not in running_nodes: + errors.append('Cluster member check failed on {}: {} not ' + 'in {}\n'.format(query_unit_name, + val_node_name, + running_nodes)) + if errors: + return ''.join(errors) + + def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None): + """Check a single juju rmq unit for ssl and port in the config file.""" + host = sentry_unit.info['public-address'] + unit_name = sentry_unit.info['unit_name'] + + conf_file = '/etc/rabbitmq/rabbitmq.config' + conf_contents = str(self.file_contents_safe(sentry_unit, + conf_file, max_wait=16)) + # Checks + conf_ssl = 'ssl' in conf_contents + conf_port = str(port) in conf_contents + + # Port explicitly checked in config + if port and conf_port and conf_ssl: + self.log.debug('SSL is enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return True + elif port and not conf_port and conf_ssl: + self.log.debug('SSL is enabled @{} but not on port {} ' + '({})'.format(host, port, unit_name)) + return False + # Port not checked (useful when checking that ssl is disabled) + elif not port and conf_ssl: + self.log.debug('SSL is enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return True + elif not conf_ssl: + self.log.debug('SSL not enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return False + else: + msg = ('Unknown condition when checking SSL status @{}:{} ' + '({})'.format(host, port, unit_name)) + amulet.raise_status(amulet.FAIL, msg) + + def validate_rmq_ssl_enabled_units(self, sentry_units, port=None): + """Check that ssl is enabled on rmq juju sentry units. + + :param sentry_units: list of all rmq sentry units + :param port: optional ssl port override to validate + :returns: None if successful, otherwise return error message + """ + for sentry_unit in sentry_units: + if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port): + return ('Unexpected condition: ssl is disabled on unit ' + '({})'.format(sentry_unit.info['unit_name'])) + return None + + def validate_rmq_ssl_disabled_units(self, sentry_units): + """Check that ssl is enabled on listed rmq juju sentry units. + + :param sentry_units: list of all rmq sentry units + :returns: True if successful. Raise on error. + """ + for sentry_unit in sentry_units: + if self.rmq_ssl_is_enabled_on_unit(sentry_unit): + return ('Unexpected condition: ssl is enabled on unit ' + '({})'.format(sentry_unit.info['unit_name'])) + return None + + def configure_rmq_ssl_on(self, sentry_units, deployment, + port=None, max_wait=60): + """Turn ssl charm config option on, with optional non-default + ssl port specification. Confirm that it is enabled on every + unit. + + :param sentry_units: list of sentry units + :param deployment: amulet deployment object pointer + :param port: amqp port, use defaults if None + :param max_wait: maximum time to wait in seconds to confirm + :returns: None if successful. Raise on error. + """ + self.log.debug('Setting ssl charm config option: on') + + # Enable RMQ SSL + config = {'ssl': 'on'} + if port: + config['ssl_port'] = port + + deployment.d.configure('rabbitmq-server', config) + + # Wait for unit status + self.rmq_wait_for_cluster(deployment) + + # Confirm + tries = 0 + ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) + while ret and tries < (max_wait / 4): + time.sleep(4) + self.log.debug('Attempt {}: {}'.format(tries, ret)) + ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) + tries += 1 + + if ret: + amulet.raise_status(amulet.FAIL, ret) + + def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60): + """Turn ssl charm config option off, confirm that it is disabled + on every unit. + + :param sentry_units: list of sentry units + :param deployment: amulet deployment object pointer + :param max_wait: maximum time to wait in seconds to confirm + :returns: None if successful. Raise on error. + """ + self.log.debug('Setting ssl charm config option: off') + + # Disable RMQ SSL + config = {'ssl': 'off'} + deployment.d.configure('rabbitmq-server', config) + + # Wait for unit status + self.rmq_wait_for_cluster(deployment) + + # Confirm + tries = 0 + ret = self.validate_rmq_ssl_disabled_units(sentry_units) + while ret and tries < (max_wait / 4): + time.sleep(4) + self.log.debug('Attempt {}: {}'.format(tries, ret)) + ret = self.validate_rmq_ssl_disabled_units(sentry_units) + tries += 1 + + if ret: + amulet.raise_status(amulet.FAIL, ret) + + def connect_amqp_by_unit(self, sentry_unit, ssl=False, + port=None, fatal=True, + username="testuser1", password="changeme"): + """Establish and return a pika amqp connection to the rabbitmq service + running on a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :param fatal: boolean, default to True (raises on connect error) + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: pika amqp connection pointer or None if failed and non-fatal + """ + host = sentry_unit.info['public-address'] + unit_name = sentry_unit.info['unit_name'] + + # Default port logic if port is not specified + if ssl and not port: + port = 5671 + elif not ssl and not port: + port = 5672 + + self.log.debug('Connecting to amqp on {}:{} ({}) as ' + '{}...'.format(host, port, unit_name, username)) + + try: + credentials = pika.PlainCredentials(username, password) + parameters = pika.ConnectionParameters(host=host, port=port, + credentials=credentials, + ssl=ssl, + connection_attempts=3, + retry_delay=5, + socket_timeout=1) + connection = pika.BlockingConnection(parameters) + assert connection.server_properties['product'] == 'RabbitMQ' + self.log.debug('Connect OK') + return connection + except Exception as e: + msg = ('amqp connection failed to {}:{} as ' + '{} ({})'.format(host, port, username, str(e))) + if fatal: + amulet.raise_status(amulet.FAIL, msg) + else: + self.log.warn(msg) + return None + + def publish_amqp_message_by_unit(self, sentry_unit, message, + queue="test", ssl=False, + username="testuser1", + password="changeme", + port=None): + """Publish an amqp message to a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param message: amqp message string + :param queue: message queue, default to test + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :returns: None. Raises exception if publish failed. + """ + self.log.debug('Publishing message to {} queue:\n{}'.format(queue, + message)) + connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, + port=port, + username=username, + password=password) + + # NOTE(beisner): extra debug here re: pika hang potential: + # https://github.com/pika/pika/issues/297 + # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw + self.log.debug('Defining channel...') + channel = connection.channel() + self.log.debug('Declaring queue...') + channel.queue_declare(queue=queue, auto_delete=False, durable=True) + self.log.debug('Publishing message...') + channel.basic_publish(exchange='', routing_key=queue, body=message) + self.log.debug('Closing channel...') + channel.close() + self.log.debug('Closing connection...') + connection.close() + + def get_amqp_message_by_unit(self, sentry_unit, queue="test", + username="testuser1", + password="changeme", + ssl=False, port=None): + """Get an amqp message from a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param queue: message queue, default to test + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :returns: amqp message body as string. Raise if get fails. + """ + connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, + port=port, + username=username, + password=password) + channel = connection.channel() + method_frame, _, body = channel.basic_get(queue) + + if method_frame: + self.log.debug('Retreived message from {} queue:\n{}'.format(queue, + body)) + channel.basic_ack(method_frame.delivery_tag) + channel.close() + connection.close() + return body + else: + msg = 'No message retrieved.' + amulet.raise_status(amulet.FAIL, msg) diff --git a/tests/setup/00-setup b/tests/setup/00-setup new file mode 100755 index 0000000..94e5611 --- /dev/null +++ b/tests/setup/00-setup @@ -0,0 +1,17 @@ +#!/bin/bash + +set -ex + +sudo add-apt-repository --yes ppa:juju/stable +sudo apt-get update --yes +sudo apt-get install --yes amulet \ + distro-info-data \ + python-cinderclient \ + python-distro-info \ + python-glanceclient \ + python-heatclient \ + python-keystoneclient \ + python-neutronclient \ + python-novaclient \ + python-pika \ + python-swiftclient diff --git a/unit_tests/__init__.py b/unit_tests/__init__.py new file mode 100644 index 0000000..afaed60 --- /dev/null +++ b/unit_tests/__init__.py @@ -0,0 +1,3 @@ +import sys + +sys.path.append('hooks/') diff --git a/unit_tests/odl_outputs.py b/unit_tests/odl_outputs.py new file mode 100644 index 0000000..da41422 --- /dev/null +++ b/unit_tests/odl_outputs.py @@ -0,0 +1,271 @@ +# flake8: noqa +ODL_023_FEATURE_LIST = """ +client: JAVA_HOME not set; results may vary +Name | Version | Installed | Repository | Description +----------------------------------------------------------------------------------------------------------------------------------------------------------------------- +odl-aaa-all | 0.1.3-Helium-SR3 | | odl-aaa-0.1.3-Helium-SR3 | OpenDaylight :: AAA :: Authentication :: All Featu +odl-aaa-authn | 0.1.3-Helium-SR3 | x | odl-aaa-0.1.3-Helium-SR3 | OpenDaylight :: AAA :: Authentication +odl-aaa-authn-sssd | 0.1.3-Helium-SR3 | | odl-aaa-0.1.3-Helium-SR3 | OpenDaylight :: AAA :: SSSD Federation +odl-aaa-authn-plugin | 0.1.3-Helium-SR3 | | odl-aaa-0.1.3-Helium-SR3 | OpenDaylight :: AAA :: ODL NETCONF Plugin +framework-security | 3.0.1 | | standard-3.0.1 | OSGi Security for Karaf +standard | 3.0.1 | x | standard-3.0.1 | Karaf standard feature +aries-annotation | 3.0.1 | | standard-3.0.1 | Aries Annotations +wrapper | 3.0.1 | | standard-3.0.1 | Provide OS integration +service-wrapper | 3.0.1 | | standard-3.0.1 | Provide OS integration (alias to wrapper feature) +obr | 3.0.1 | | standard-3.0.1 | Provide OSGi Bundle Repository (OBR) support +config | 3.0.1 | x | standard-3.0.1 | Provide OSGi ConfigAdmin support +region | 3.0.1 | | standard-3.0.1 | Provide Region Support +package | 3.0.1 | x | standard-3.0.1 | Package commands and mbeans +http | 3.0.1 | x | standard-3.0.1 | Implementation of the OSGI HTTP Service +http-whiteboard | 3.0.1 | | standard-3.0.1 | Provide HTTP Whiteboard pattern support +war | 3.0.1 | x | standard-3.0.1 | Turn Karaf as a full WebContainer +jetty | 8.1.9.v20130131 | | standard-3.0.1 | +kar | 3.0.1 | x | standard-3.0.1 | Provide KAR (KARaf archive) support +webconsole | 3.0.1 | | standard-3.0.1 | Base support of the Karaf WebConsole +ssh | 3.0.1 | x | standard-3.0.1 | Provide a SSHd server on Karaf +management | 3.0.1 | x | standard-3.0.1 | Provide a JMX MBeanServer and a set of MBeans in K +scheduler | 3.0.1 | | standard-3.0.1 | Provide a scheduler service in Karaf to fire event +eventadmin | 3.0.1 | | standard-3.0.1 | OSGi Event Admin service specification for event-b +jasypt-encryption | 3.0.1 | | standard-3.0.1 | Advanced encryption support for Karaf security +scr | 3.0.1 | | standard-3.0.1 | Declarative Service support +blueprint-web | 3.0.1 | | standard-3.0.1 | Provides an OSGI-aware Servlet ContextListener for +odl-l2switch-all | 0.1.3-Helium-SR3 | | l2switch-0.1.3-Helium-SR3 | OpenDaylight :: L2Switch :: All +odl-l2switch-switch | 0.1.3-Helium-SR3 | | l2switch-0.1.3-Helium-SR3 | OpenDaylight :: L2Switch :: Switch +odl-l2switch-switch-rest | 0.1.3-Helium-SR3 | | l2switch-0.1.3-Helium-SR3 | OpenDaylight :: L2Switch :: Switch +odl-l2switch-switch-ui | 0.1.3-Helium-SR3 | | l2switch-0.1.3-Helium-SR3 | OpenDaylight :: L2Switch :: Switch +odl-l2switch-hosttracker | 0.1.3-Helium-SR3 | | l2switch-0.1.3-Helium-SR3 | OpenDaylight :: L2Switch :: HostTracker +odl-l2switch-addresstracker | 0.1.3-Helium-SR3 | | l2switch-0.1.3-Helium-SR3 | OpenDaylight :: L2Switch :: AddressTracker +odl-l2switch-arphandler | 0.1.3-Helium-SR3 | | l2switch-0.1.3-Helium-SR3 | OpenDaylight :: L2Switch :: ArpHandler +odl-l2switch-loopremover | 0.1.3-Helium-SR3 | | l2switch-0.1.3-Helium-SR3 | OpenDaylight :: L2Switch :: LoopRemover +odl-l2switch-packethandler | 0.1.3-Helium-SR3 | | l2switch-0.1.3-Helium-SR3 | OpenDaylight :: L2Switch :: PacketHandler +odl-packetcable-all | 1.1.3-Helium-SR3 | | odl-packetcable-1.1.3-Helium-SR3 | OpenDaylight :: packetcable :: All +odl-packetcable-consumer | 1.1.3-Helium-SR3 | | odl-packetcable-1.1.3-Helium-SR3 | OpenDaylight :: packetcable :: Consumer +odl-packetcable-model | 1.1.3-Helium-SR3 | | odl-packetcable-1.1.3-Helium-SR3 | OpenDaylight :: packetcable :: Model +odl-packetcable-provider | 1.1.3-Helium-SR3 | | odl-packetcable-1.1.3-Helium-SR3 | OpenDaylight :: packetcable :: Provider +odl-packetcable-driver | 1.1.3-Helium-SR3 | | odl-packetcable-1.1.3-Helium-SR3 | OpenDaylight :: packetcable :: Driver +odl-netconf-connector-all | 1.1.3-Helium-SR3 | | odl-controller-1.1.3-Helium-SR3 | OpenDaylight :: Netconf Connector :: All +odl-netconf-connector | 1.1.3-Helium-SR3 | | odl-controller-1.1.3-Helium-SR3 | OpenDaylight :: Netconf Connector :: Netconf Conne +odl-netconf-connector-ssh | 1.1.3-Helium-SR3 | | odl-controller-1.1.3-Helium-SR3 | OpenDaylight :: Netconf Connector :: Netconf Conne +odl-netconf-ssh | 0.2.8-Helium-SR3 | | odl-controller-1.1.3-Helium-SR3 | OpenDaylight :: Netconf Connector :: SSH +odl-netconf-tcp | 0.2.8-Helium-SR3 | | odl-controller-1.1.3-Helium-SR3 | OpenDaylight :: Netconf Connector :: TCP +odl-snmp4sdn-all | 0.1.6-Helium-SR3 | | odl-snmp4sdn-0.1.6-Helium-SR3 | OpenDaylight :: SNMP4SDN :: All +odl-snmp4sdn-snmp4sdn | 0.1.6-Helium-SR3 | | odl-snmp4sdn-0.1.6-Helium-SR3 | OpenDaylight :: SNMP4SDN :: Plugin +odl-plugin2oc | 0.1.3-Helium-SR3 | | odl-plugin2oc-0.1.3-Helium-SR3 | OpenDaylight :: plugin2oc :: Plugin +odl-config-all | 0.2.8-Helium-SR3 | | odl-config-0.2.8-Helium-SR3 | OpenDaylight :: Config :: All +odl-mdsal-common | 1.1.3-Helium-SR3 | x | odl-config-0.2.8-Helium-SR3 | OpenDaylight :: Config :: All +odl-config-api | 0.2.8-Helium-SR3 | x | odl-config-0.2.8-Helium-SR3 | OpenDaylight :: Config :: API +odl-config-netty-config-api | 0.2.8-Helium-SR3 | x | odl-config-0.2.8-Helium-SR3 | OpenDaylight :: Config :: Netty Config API +odl-config-core | 0.2.8-Helium-SR3 | x | odl-config-0.2.8-Helium-SR3 | OpenDaylight :: Config :: Core +odl-config-manager | 0.2.8-Helium-SR3 | x | odl-config-0.2.8-Helium-SR3 | OpenDaylight :: Config :: Manager +odl-adsal-all | 0.8.4-Helium-SR3 | x | adsal-0.8.4-Helium-SR3 | OpenDaylight AD-SAL All Features +odl-adsal-core | 0.8.4-Helium-SR3 | x | adsal-0.8.4-Helium-SR3 | OpenDaylight :: AD-SAL :: Core +odl-adsal-networkconfiguration | 0.0.6-Helium-SR3 | x | adsal-0.8.4-Helium-SR3 | OpenDaylight :: AD-SAL :: Network Configuration +odl-adsal-connection | 0.1.5-Helium-SR3 | x | adsal-0.8.4-Helium-SR3 | OpenDaylight :: AD-SAL :: Connection +odl-adsal-clustering | 0.5.4-Helium-SR3 | x | adsal-0.8.4-Helium-SR3 | OpenDaylight :: AD-SAL :: Clustering +odl-adsal-configuration | 0.4.6-Helium-SR3 | x | adsal-0.8.4-Helium-SR3 | OpenDaylight :: AD-SAL :: Configuration +odl-adsal-thirdparty | 0.8.4-Helium-SR3 | x | adsal-0.8.4-Helium-SR3 | OpenDaylight :: AD-SAL :: Third-Party Depenencies +odl-config-netty | 0.2.8-Helium-SR3 | x | odl-config-persister-0.2.8-Helium-SR3 | OpenDaylight :: Config-Netty +odl-base-all | 1.4.5-Helium-SR3 | x | odl-base-1.4.5-Helium-SR3 | OpenDaylight Controller +odl-base-dummy-console | 1.1.3-Helium-SR3 | x | odl-base-1.4.5-Helium-SR3 | Temporary Dummy Console +odl-base-felix-dm | 3.1.0 | x | odl-base-1.4.5-Helium-SR3 | Felix Dependency Manager +odl-base-aries-spi-fly | 1.0.0 | x | odl-base-1.4.5-Helium-SR3 | Aries SPI Fly +odl-base-netty | 4.0.23.Final | x | odl-base-1.4.5-Helium-SR3 | +odl-base-jersey | 1.17 | x | odl-base-1.4.5-Helium-SR3 | Jersey +odl-base-jersey2-osgi | 4.0 | | odl-base-1.4.5-Helium-SR3 | OSGi friendly Jersey +odl-base-jackson | 2.3.2 | x | odl-base-1.4.5-Helium-SR3 | Jackson JAX-RS +odl-base-slf4j | 1.7.2 | x | odl-base-1.4.5-Helium-SR3 | SLF4J Logging +odl-base-apache-commons | 1.4.5-Helium-SR3 | x | odl-base-1.4.5-Helium-SR3 | Apache Commons Libraries +odl-base-eclipselink-persistence | 2.0.4.v201112161009 | x | odl-base-1.4.5-Helium-SR3 | EclipseLink Persistence API +odl-base-gemini-web | 2.2.0.RELEASE | x | odl-base-1.4.5-Helium-SR3 | Gemini Web +odl-base-tomcat | 7.0.53 | x | odl-base-1.4.5-Helium-SR3 | OpenDaylight Tomcat +odl-base-spring | 3.1.3.RELEASE | x | odl-base-1.4.5-Helium-SR3 | Opendaylight Spring Support +odl-base-spring-web | 3.1.3.RELEASE | x | odl-base-1.4.5-Helium-SR3 | OpenDaylight Spring Web +odl-base-spring-security | 3.1.3.RELEASE | x | odl-base-1.4.5-Helium-SR3 | OpenDaylight Spring Security +odl-tcpmd5-all | 1.0.3-Helium-SR3 | | odl-tcpmd5-1.0.3-Helium-SR3 | +odl-tcpmd5-base | 1.0.3-Helium-SR3 | | odl-tcpmd5-1.0.3-Helium-SR3 | +odl-tcpmd5-netty | 1.0.3-Helium-SR3 | | odl-tcpmd5-1.0.3-Helium-SR3 | +odl-tcpmd5-nio | 1.0.3-Helium-SR3 | | odl-tcpmd5-1.0.3-Helium-SR3 | +odl-sfc-all | 0.0.4-Helium-SR3 | | odl-sfc-0.0.4-Helium-SR3 | OpenDaylight :: sfc :: All +odl-sfc-provider | 0.0.4-Helium-SR3 | | odl-sfc-0.0.4-Helium-SR3 | OpenDaylight :: sfc :: Provider +odl-sfc-model | 0.0.4-Helium-SR3 | | odl-sfc-0.0.4-Helium-SR3 | OpenDaylight :: sfc :: Model +odl-sfc-test-consumer | 0.0.4-Helium-SR3 | | odl-sfc-0.0.4-Helium-SR3 | OpenDaylight :: sfc :: Test :: Consumer +odl-sfc-ui | 0.0.4-Helium-SR3 | | odl-sfc-0.0.4-Helium-SR3 | OpenDaylight :: sfc :: UI +odl-mdsal-all | 1.1.3-Helium-SR3 | | odl-mdsal-1.1.3-Helium-SR3 | OpenDaylight :: MDSAL :: All +odl-mdsal-broker | 1.1.3-Helium-SR3 | x | odl-mdsal-1.1.3-Helium-SR3 | OpenDaylight :: MDSAL :: Broker +odl-toaster | 1.1.3-Helium-SR3 | | odl-mdsal-1.1.3-Helium-SR3 | OpenDaylight :: Toaster +odl-mdsal-xsql | 1.1.3-Helium-SR3 | | odl-mdsal-1.1.3-Helium-SR3 | +odl-mdsal-clustering-commons | 1.1.3-Helium-SR3 | | odl-mdsal-1.1.3-Helium-SR3 | +odl-mdsal-distributed-datastore | 1.1.3-Helium-SR3 | | odl-mdsal-1.1.3-Helium-SR3 | +odl-mdsal-remoterpc-connector | 1.1.3-Helium-SR3 | | odl-mdsal-1.1.3-Helium-SR3 | +odl-mdsal-clustering | 1.1.3-Helium-SR3 | | odl-mdsal-1.1.3-Helium-SR3 | +odl-clustering-test-app | 1.1.3-Helium-SR3 | | odl-mdsal-1.1.3-Helium-SR3 | +odl-flow-model | 1.1.3-Helium-SR3 | x | odl-flow-1.1.3-Helium-SR3 | OpenDaylight :: Flow :: Model +odl-flow-services | 1.1.3-Helium-SR3 | x | odl-flow-1.1.3-Helium-SR3 | OpenDaylight :: Flow :: Services +odl-openflow-nxm-extensions | 0.0.6-Helium-SR3 | x | ovsdb-0.0.6-Helium-SR3 | OpenDaylight :: Openflow :: Nicira Extensions +odl-integration-compatible-with-all | 0.2.3-Helium-SR3 | | odl-integration-0.2.3-Helium-SR3 | +odl-integration-all | 0.2.3-Helium-SR3 | | odl-integration-0.2.3-Helium-SR3 | +odl-openflowjava-all | 0.0.0 | | odl-openflowjava-0.5.3-Helium-SR3 | OpenDaylight :: Openflow Java :: All +odl-openflowjava-protocol | 0.5.3-Helium-SR3 | x | odl-openflowjava-0.5.3-Helium-SR3 | OpenDaylight :: Openflow Java :: Protocol +odl-nsf-all | 0.4.5-Helium-SR3 | x | nsf-0.4.5-Helium-SR3 | OpenDaylight :: NSF :: All Network Service Functio +odl-nsf-managers | 0.4.5-Helium-SR3 | x | nsf-0.4.5-Helium-SR3 | OpenDaylight :: AD-SAL :: Network Service Function +odl-adsal-northbound | 0.4.5-Helium-SR3 | x | nsf-0.4.5-Helium-SR3 | OpenDaylight :: AD-SAL :: Northbound APIs +odl-bgpcep-all | 0.3.4-Helium-SR3 | | odl-bgpcep-0.3.4-Helium-SR3 | +odl-bgpcep-dependencies | 0.3.4-Helium-SR3 | | odl-bgpcep-0.3.4-Helium-SR3 | +odl-bgpcep-util | 0.3.4-Helium-SR3 | | odl-bgpcep-0.3.4-Helium-SR3 | +odl-bgpcep-concepts | 0.3.4-Helium-SR3 | | odl-bgpcep-0.3.4-Helium-SR3 | +odl-bgpcep-linkstate | 0.3.4-Helium-SR3 | | odl-bgpcep-0.3.4-Helium-SR3 | +odl-bgpcep-pcep-impl | 0.3.4-Helium-SR3 | | odl-bgpcep-0.3.4-Helium-SR3 | +odl-pcep-segment-routing | 0.3.4-Helium-SR3 | | odl-bgpcep-0.3.4-Helium-SR3 | +odl-bgpcep-parser | 0.3.4-Helium-SR3 | | odl-bgpcep-0.3.4-Helium-SR3 | +odl-bgpcep-rib | 0.3.4-Helium-SR3 | | odl-bgpcep-0.3.4-Helium-SR3 | +odl-bgpcep-tunnel | 0.3.4-Helium-SR3 | | odl-bgpcep-0.3.4-Helium-SR3 | +odl-bgpcep-programming | 0.3.4-Helium-SR3 | | odl-bgpcep-0.3.4-Helium-SR3 | +odl-pcep-api | 0.3.4-Helium-SR3 | | odl-bgpcep-0.3.4-Helium-SR3 | +odl-pcep-spi | 0.3.4-Helium-SR3 | | odl-bgpcep-0.3.4-Helium-SR3 | +odl-bgpcep-topology | 0.3.4-Helium-SR3 | | odl-bgpcep-0.3.4-Helium-SR3 | +odl-config-persister-all | 0.2.8-Helium-SR3 | | odl-config-persister-0.2.8-Helium-SR3 | OpenDaylight :: Config Persister:: All +odl-config-persister | 0.2.8-Helium-SR3 | x | odl-config-persister-0.2.8-Helium-SR3 | OpenDaylight :: Config Persister +odl-config-startup | 0.2.8-Helium-SR3 | x | odl-config-persister-0.2.8-Helium-SR3 | OpenDaylight :: Config Persister:: Config Startup +odl-protocol-framework | 0.5.3-Helium-SR3 | x | odl-protocol-framework-0.5.3-Helium-SR3 | OpenDaylight :: Protocol Framework +odl-restconf-all | 1.1.3-Helium-SR3 | | odl-controller-1.1.3-Helium-SR3 | OpenDaylight :: Restconf :: All +odl-restconf | 1.1.3-Helium-SR3 | x | odl-controller-1.1.3-Helium-SR3 | OpenDaylight :: Restconf +odl-restconf-noauth | 1.1.3-Helium-SR3 | x | odl-controller-1.1.3-Helium-SR3 | OpenDaylight :: Restconf +odl-mdsal-apidocs | 1.1.3-Helium-SR3 | x | odl-controller-1.1.3-Helium-SR3 | OpenDaylight :: MDSAL :: APIDOCS +odl-toaster-rest | 1.1.3-Helium-SR3 | | odl-controller-1.1.3-Helium-SR3 | +odl-toaster-ui | 1.1.3-Helium-SR3 | | odl-controller-1.1.3-Helium-SR3 | +odl-yangtools-all | 0.6.5-Helium-SR3 | | odl-yangtools-0.6.5-Helium-SR3 | OpenDaylight Yangtools All +odl-yangtools-models | 0.6.5-Helium-SR3 | x | odl-yangtools-0.6.5-Helium-SR3 | OpenDaylight :: Yangtools :: Models +odl-yangtools-data-binding | 0.6.5-Helium-SR3 | x | odl-yangtools-0.6.5-Helium-SR3 | OpenDaylight :: Yangtools :: Data Binding +odl-yangtools-binding | 0.6.5-Helium-SR3 | x | odl-yangtools-0.6.5-Helium-SR3 | OpenDaylight :: Yangtools :: Binding +odl-yangtools-common | 0.6.5-Helium-SR3 | x | odl-yangtools-0.6.5-Helium-SR3 | OpenDaylight :: Yangtools :: Common +odl-yangtools-binding-generator | 0.6.5-Helium-SR3 | x | odl-yangtools-0.6.5-Helium-SR3 | OpenDaylight :: Yangtools :: Binding Generator +odl-yangtools-restconf | 0.6.5-Helium-SR3 | | odl-yangtools-0.6.5-Helium-SR3 | OpenDaylight :: Yangtools :: Restconf +odl-adsal-compatibility-all | 1.4.5-Helium-SR3 | | odl-adsal-compatibility-0.8.4-Helium-SR3 | OpenDaylight :: controller :: All +odl-adsal-compatibility | 0.8.4-Helium-SR3 | x | odl-adsal-compatibility-0.8.4-Helium-SR3 | OpenDaylight :: AD-SAL :: Compatibility +odl-ovsdb-all | 1.0.3-Helium-SR3 | | ovsdb-1.0.3-Helium-SR3 | OpenDaylight :: OVSDB :: all +odl-ovsdb-library | 1.0.3-Helium-SR3 | x | ovsdb-1.0.3-Helium-SR3 | OVSDB :: Library +odl-ovsdb-schema-openvswitch | 1.0.3-Helium-SR3 | x | ovsdb-1.0.3-Helium-SR3 | OVSDB :: Schema :: Open_vSwitch +odl-ovsdb-schema-hardwarevtep | 1.0.3-Helium-SR3 | x | ovsdb-1.0.3-Helium-SR3 | OVSDB :: Schema :: hardware_vtep +odl-ovsdb-plugin | 1.0.3-Helium-SR3 | x | ovsdb-1.0.3-Helium-SR3 | OpenDaylight :: OVSDB :: Plugin +odl-ovsdb-northbound | 0.6.3-Helium-SR3 | x | ovsdb-1.0.3-Helium-SR3 | OpenDaylight :: OVSDB :: Northbound +odl-ovsdb-openstack | 1.0.3-Helium-SR3 | x | ovsdb-1.0.3-Helium-SR3 | OpenDaylight :: OVSDB :: OpenStack Network Virtual +odl-ovsdb-ovssfc | 0.0.4-Helium-SR3 | | ovsdb-0.0.4-Helium-SR3 | OpenDaylight :: OVSDB :: OVS Service Function Chai +odl-sfclisp | 0.0.4-Helium-SR3 | | odl-sfclisp-0.0.4-Helium-SR3 | OpenDaylight :: sfclisp :: all +odl-sfcofl2 | 0.0.4-Helium-SR3 | | odl-sfcofl2-0.0.4-Helium-SR3 | OpenDaylight :: sfcofl2 +odl-dlux-all | 0.1.3-Helium-SR3 | | odl-dlux-0.1.3-Helium-SR3 | +odl-dlux-core | 0.1.3-Helium-SR3 | x | odl-dlux-0.1.3-Helium-SR3 | +pax-cdi | 0.7.0 | | org.ops4j.pax.cdi-0.7.0 | Provide CDI support +pax-cdi-1.1 | 0.7.0 | | org.ops4j.pax.cdi-0.7.0 | Provide CDI 1.1 support +pax-cdi-weld | 0.7.0 | | org.ops4j.pax.cdi-0.7.0 | Weld CDI support +pax-cdi-1.1-weld | 0.7.0 | | org.ops4j.pax.cdi-0.7.0 | Weld CDI 1.1 support +pax-cdi-openwebbeans | 0.7.0 | | org.ops4j.pax.cdi-0.7.0 | OpenWebBeans CDI support +pax-cdi-web | 0.7.0 | | org.ops4j.pax.cdi-0.7.0 | Web CDI support +pax-cdi-1.1-web | 0.7.0 | | org.ops4j.pax.cdi-0.7.0 | Web CDI 1.1 support +pax-cdi-web-weld | 0.7.0 | | org.ops4j.pax.cdi-0.7.0 | Weld Web CDI support +pax-cdi-1.1-web-weld | 0.7.0 | | org.ops4j.pax.cdi-0.7.0 | Weld Web CDI 1.1 support +pax-cdi-web-openwebbeans | 0.7.0 | | org.ops4j.pax.cdi-0.7.0 | OpenWebBeans Web CDI support +pax-cdi-deltaspike-core | >0.5 | | org.ops4j.pax.cdi-0.7.0 | Apache Deltaspike core support +pax-cdi-deltaspike-jpa | 0.5 | | org.ops4j.pax.cdi-0.7.0 | Apche Deltaspike jpa support +odl-vtn-manager-all | 0.2.3-Helium-SR3 | | vtn-manager-0.2.3-Helium-SR3 | OpenDaylight VTN Manager All +odl-vtn-manager-java-api | 0.2.3-Helium-SR3 | | vtn-manager-0.2.3-Helium-SR3 | OpenDaylight :: VTN Manager :: Java API +odl-vtn-manager-northbound | 0.2.3-Helium-SR3 | | vtn-manager-0.2.3-Helium-SR3 | OpenDaylight :: VTN Manager :: Northbound +odl-vtn-manager-neutron | 0.2.3-Helium-SR3 | | vtn-manager-0.2.3-Helium-SR3 | OpenDaylight :: VTN Manager :: Neutron Interface +odl-aaa-authz-all | 0.1.3-Helium-SR3 | | odl-aaa-0.1.3-Helium-SR3 | OpenDaylight :: AAA :: Authorization :: All Featur +odl-aaa-authz | 0.1.3-Helium-SR3 | | odl-aaa-0.1.3-Helium-SR3 | OpenDaylight :: AAA :: Authorization +spring-dm | 1.2.1 | | spring-3.0.1 | Spring DM support +spring-dm-web | 1.2.1 | | spring-3.0.1 | Spring DM Web support +spring | 3.1.4.RELEASE | | spring-3.0.1 | Spring 3.1.x support +spring-aspects | 3.1.4.RELEASE | | spring-3.0.1 | Spring 3.1.x AOP support +spring-instrument | 3.1.4.RELEASE | | spring-3.0.1 | Spring 3.1.x Instrument support +spring-jdbc | 3.1.4.RELEASE | | spring-3.0.1 | Spring 3.1.x JDBC support +spring-jms | 3.1.4.RELEASE | | spring-3.0.1 | Spring 3.1.x JMS support +spring-struts | 3.1.4.RELEASE | | spring-3.0.1 | Spring 3.1.x Struts support +spring-test | 3.1.4.RELEASE | | spring-3.0.1 | Spring 3.1.x Test support +spring-orm | 3.1.4.RELEASE | | spring-3.0.1 | Spring 3.1.x ORM support +spring-oxm | 3.1.4.RELEASE | | spring-3.0.1 | Spring 3.1.x OXM support +spring-tx | 3.1.4.RELEASE | | spring-3.0.1 | Spring 3.1.x Transaction (TX) support +spring-web | 3.1.4.RELEASE | | spring-3.0.1 | Spring 3.1.x Web support +spring-web-portlet | 3.1.4.RELEASE | | spring-3.0.1 | Spring 3.1.x Web Portlet support +spring | 3.2.4.RELEASE | | spring-3.0.1 | Spring 3.2.x support +spring-aspects | 3.2.4.RELEASE | | spring-3.0.1 | Spring 3.2.x AOP support +spring-instrument | 3.2.4.RELEASE | | spring-3.0.1 | Spring 3.2.x Instrument support +spring-jdbc | 3.2.4.RELEASE | | spring-3.0.1 | Spring 3.2.x JDBC support +spring-jms | 3.2.4.RELEASE | | spring-3.0.1 | Spring 3.2.x JMS support +spring-struts | 3.2.4.RELEASE | | spring-3.0.1 | Spring 3.2.x Struts support +spring-test | 3.2.4.RELEASE | | spring-3.0.1 | Spring 3.2.x Test support +spring-orm | 3.2.4.RELEASE | | spring-3.0.1 | Spring 3.2.x ORM support +spring-oxm | 3.2.4.RELEASE | | spring-3.0.1 | Spring 3.2.x OXM support +spring-tx | 3.2.4.RELEASE | | spring-3.0.1 | Spring 3.2.x Transaction (TX) support +spring-web | 3.2.4.RELEASE | | spring-3.0.1 | Spring 3.2.x Web support +spring-web-portlet | 3.2.4.RELEASE | | spring-3.0.1 | Spring 3.2.x Web Portlet support +spring | 4.0.2.RELEASE_1 | | spring-3.0.1 | Spring 4.0.x support +spring-aspects | 4.0.2.RELEASE_1 | | spring-3.0.1 | Spring 4.0.x AOP support +spring-instrument | 4.0.2.RELEASE_1 | | spring-3.0.1 | Spring 4.0.x Instrument support +spring-jdbc | 4.0.2.RELEASE_1 | | spring-3.0.1 | Spring 4.0.x JDBC support +spring-jms | 4.0.2.RELEASE_1 | | spring-3.0.1 | Spring 4.0.x JMS support +spring-test | 4.0.2.RELEASE_1 | | spring-3.0.1 | Spring 4.0.x Test support +spring-orm | 4.0.2.RELEASE_1 | | spring-3.0.1 | Spring 4.0.x ORM support +spring-oxm | 4.0.2.RELEASE_1 | | spring-3.0.1 | Spring 4.0.x OXM support +spring-tx | 4.0.2.RELEASE_1 | | spring-3.0.1 | Spring 4.0.x Transaction (TX) support +spring-web | 4.0.2.RELEASE_1 | | spring-3.0.1 | Spring 4.0.x Web support +spring-web-portlet | 4.0.2.RELEASE_1 | | spring-3.0.1 | Spring 4.0.x Web Portlet support +spring-websocket | 4.0.2.RELEASE_1 | | spring-3.0.1 | Spring 4.0.x WebSocket support +spring-security | 3.1.4.RELEASE | | spring-3.0.1 | Spring Security 3.1.x support +gemini-blueprint | 1.0.0.RELEASE | | spring-3.0.1 | Gemini Blueprint Extender +odl-sdninterfaceapp-all | 1.4.5-Helium-SR3 | | odl-sdninterfaceapp-1.4.5-Helium-SR3 | OpenDaylight :: sdninterfaceapp +odl-lispflowmapping-all | 1.1.14-Helium-SR3 | | odl-lispflowmapping-1.1.14-Helium-SR3 | OpenDaylight :: LISP Flow Mapping :: All +odl-lispflowmapping-mappingservice | 1.1.14-Helium-SR3 | | odl-lispflowmapping-1.1.14-Helium-SR3 | OpenDaylight :: LISP Flow Mapping :: Mapping Servi +odl-lispflowmapping-southbound | 1.1.14-Helium-SR3 | | odl-lispflowmapping-1.1.14-Helium-SR3 | OpenDaylight :: LISP Flow Mapping :: Southbound Pl +odl-lispflowmapping-northbound | 1.1.14-Helium-SR3 | | odl-lispflowmapping-1.1.14-Helium-SR3 | OpenDaylight :: LISP Flow Mapping :: Northbound +odl-lispflowmapping-netconf | 1.1.14-Helium-SR3 | | odl-lispflowmapping-1.1.14-Helium-SR3 | OpenDaylight :: LISP Flow Mapping :: NETCONF +odl-lispflowmapping-neutron | 1.1.14-Helium-SR3 | | odl-lispflowmapping-1.1.14-Helium-SR3 | OpenDaylight :: LISP Flow Mapping :: Neutron Integ +odl-akka-all | 1.4.5-Helium-SR3 | | odl-controller-1.4.5-Helium-SR3 | OpenDaylight :: Akka :: All +odl-akka-scala | 2.10 | | odl-controller-1.4.5-Helium-SR3 | Scala Runtime for OpenDaylight +odl-akka-system | 2.3.4 | | odl-controller-1.4.5-Helium-SR3 | Akka Actor Framework System Bundles +odl-akka-clustering | 2.3.4 | | odl-controller-1.4.5-Helium-SR3 | Akka Clustering +odl-akka-leveldb | 0.7 | | odl-controller-1.4.5-Helium-SR3 | LevelDB +odl-akka-persistence | 2.3.4 | | odl-controller-1.4.5-Helium-SR3 | Akka Persistence +transaction | 1.0.1 | x | enterprise-3.0.1 | OSGi Transaction Manager +jpa | 1.0.1 | | enterprise-3.0.1 | OSGi Persistence Container +openjpa | 2.2.2 | | enterprise-3.0.1 | Apache OpenJPA 2.2.x persistence engine support +openjpa | 2.3.0 | | enterprise-3.0.1 | Apache OpenJPA 2.3.x persistence engine support +hibernate | 3.3.2.GA | | enterprise-3.0.1 | Hibernate 3.x JPA persistence engine support +hibernate | 4.2.7.Final | | enterprise-3.0.1 | Hibernate 4.2.x JPA persistence engine support +hibernate-envers | 4.2.7.Final | | enterprise-3.0.1 | Hibernate Envers 4.2.x +hibernate | 4.3.1.Final | | enterprise-3.0.1 | Hibernate 4.3.x JPA persistence engine support +hibernate-envers | 4.3.1.Final | | enterprise-3.0.1 | Hibernate Envers 4.3.x +hibernate-validator | 5.0.3.Final | | enterprise-3.0.1 | Hibernate Validator support +jndi | 3.0.1 | | enterprise-3.0.1 | OSGi Service Registry JNDI access +jdbc | 3.0.1 | | enterprise-3.0.1 | JDBC service and commands +jms | 3.0.1 | | enterprise-3.0.1 | JMS service and commands +openwebbeans | 1.2.1 | | enterprise-3.0.1 | Apache OpenWebBeans CDI container support +weld | 2.1.1.Final | | enterprise-3.0.1 | JBoss Weld CDI container support +application-without-isolation | 1.0.0 | | enterprise-3.0.1 | Provide EBA archive support +odl-ttp-all | 0.0.4-Helium-SR3 | | odl-ttp-0.0.4-Helium-SR3 | OpenDaylight :: ttp :: All +odl-ttp-model | 0.0.4-Helium-SR3 | | odl-ttp-0.0.4-Helium-SR3 | OpenDaylight :: ttp :: Model +odl-netconf-all | 0.2.8-Helium-SR3 | | odl-netconf-0.2.8-Helium-SR3 | OpenDaylight :: Netconf :: All +odl-netconf-api | 0.2.8-Helium-SR3 | x | odl-netconf-0.2.8-Helium-SR3 | OpenDaylight :: Netconf :: API +odl-netconf-mapping-api | 0.2.8-Helium-SR3 | x | odl-netconf-0.2.8-Helium-SR3 | OpenDaylight :: Netconf :: Mapping API +odl-netconf-util | 0.2.8-Helium-SR3 | x | odl-netconf-0.2.8-Helium-SR3 | +odl-netconf-impl | 0.2.8-Helium-SR3 | x | odl-netconf-0.2.8-Helium-SR3 | OpenDaylight :: Netconf :: Impl +odl-config-netconf-connector | 0.2.8-Helium-SR3 | x | odl-netconf-0.2.8-Helium-SR3 | OpenDaylight :: Netconf :: Connector +odl-netconf-netty-util | 0.2.8-Helium-SR3 | x | odl-netconf-0.2.8-Helium-SR3 | OpenDaylight :: Netconf :: Netty Util +odl-netconf-client | 0.2.8-Helium-SR3 | | odl-netconf-0.2.8-Helium-SR3 | OpenDaylight :: Netconf :: Client +odl-netconf-monitoring | 0.2.8-Helium-SR3 | x | odl-netconf-0.2.8-Helium-SR3 | OpenDaylight :: Netconf :: Monitoring +odl-snbi-all | 1.0.3-Helium-SR3 | | odl-snbi-1.0.3-Helium-SR3 | OpenDaylight :: snbi :: All +odl-snbi-southplugin | 1.0.3-Helium-SR3 | | odl-snbi-1.0.3-Helium-SR3 | OpenDaylight :: SNBI :: SouthPlugin +odl-snbi-shellplugin | 1.0.3-Helium-SR3 | | odl-snbi-1.0.3-Helium-SR3 | OpenDaylight :: SNBI :: ShellPlugin +pax-jetty | 8.1.14.v20131031 | x | org.ops4j.pax.web-3.1.0 | Provide Jetty engine support +pax-tomcat | 7.0.27.1 | | org.ops4j.pax.web-3.1.0 | Provide Tomcat engine support +pax-http | 3.1.0 | x | org.ops4j.pax.web-3.1.0 | Implementation of the OSGI HTTP Service +pax-http-whiteboard | 3.1.0 | x | org.ops4j.pax.web-3.1.0 | Provide HTTP Whiteboard pattern support +pax-war | 3.1.0 | x | org.ops4j.pax.web-3.1.0 | Provide support of a full WebContainer +odl-openflowplugin-all | 0.0.6-Helium-SR3 | | openflowplugin-0.0.6-Helium-SR3 | OpenDaylight :: Openflow Plugin :: All +odl-openflowplugin-southbound | 0.0.6-Helium-SR3 | x | openflowplugin-0.0.6-Helium-SR3 | OpenDaylight :: Openflow Plugin :: SouthBound +odl-openflowplugin-flow-services | 0.0.6-Helium-SR3 | x | openflowplugin-0.0.6-Helium-SR3 | OpenDaylight :: Openflow Plugin :: Flow Services +odl-openflowplugin-flow-services-rest | 0.0.6-Helium-SR3 | | openflowplugin-0.0.6-Helium-SR3 | OpenDaylight :: Openflow Plugin :: Flow Services : +odl-openflowplugin-flow-services-ui | 0.0.6-Helium-SR3 | | openflowplugin-0.0.6-Helium-SR3 | OpenDaylight :: Openflow Plugin :: Flow Services : +odl-openflowplugin-drop-test | 0.0.6-Helium-SR3 | | openflowplugin-0.0.6-Helium-SR3 | OpenDaylight :: Openflow Plugin :: Drop Test +odl-openflowplugin-apps | 0.0.6-Helium-SR3 | | openflowplugin-0.0.6-Helium-SR3 | OpenDaylight :: Openflow Plugin :: Applications +odl-groupbasedpolicy-ofoverlay | 0.1.3-Helium-SR3 | | odl-groupbasedpolicy-0.1.3-Helium-SR3 | OpenDaylight :: groupbasedpolicy :: OpenFlow Overl +""" diff --git a/unit_tests/test_odl_controller_hooks.py b/unit_tests/test_odl_controller_hooks.py new file mode 100644 index 0000000..28ed86e --- /dev/null +++ b/unit_tests/test_odl_controller_hooks.py @@ -0,0 +1,95 @@ +from mock import patch, call +from test_utils import CharmTestCase + +with patch('charmhelpers.core.hookenv.config') as config: + config.return_value = False + import odl_controller_hooks as hooks + + +TO_PATCH = [ + 'adduser', + 'apt_install', + 'check_call', + 'config', + 'install_remote', + 'log', + 'mkdir', + 'process_odl_cmds', + 'relation_set', + 'relation_ids', + 'restart_on_change', + 'service_start', + 'shutil', + 'write_mvn_config', +] + + +class ODLControllerHooksTests(CharmTestCase): + + def setUp(self): + super(ODLControllerHooksTests, self).setUp(hooks, TO_PATCH) + + self.config.__getitem__.side_effect = self.test_config.get + self.config.get.side_effect = self.test_config.get + self.install_url = 'http://10.10.10.10/distribution-karaf.tgz' + self.test_config.set('install-url', self.install_url) + self.test_config.set('profile', 'default') + + def _call_hook(self, hookname): + hooks.hooks.execute([ + 'hooks/{}'.format(hookname)]) + + @patch('os.symlink') + @patch('os.path.exists') + @patch('os.listdir') + def test_install_hook(self, mock_listdir, mock_path_exists, mock_symlink): + mock_listdir.return_value = ['random-file', 'distribution-karaf.tgz'] + mock_path_exists.return_value = False + self._call_hook('install') + self.apt_install.assert_called_with([ + "default-jre-headless", "python-jinja2"], + fatal=True + ) + mock_symlink.assert_called_with('distribution-karaf.tgz', + '/opt/opendaylight-karaf') + self.adduser.assert_called_with("opendaylight", system_user=True) + self.mkdir.assert_has_calls([ + call('/home/opendaylight', owner="opendaylight", + group="opendaylight", perms=0755), + call('/var/log/opendaylight', owner="opendaylight", + group="opendaylight", perms=0755) + ]) + self.check_call.assert_called_with([ + "chown", "-R", "opendaylight:opendaylight", + "/opt/distribution-karaf.tgz" + ]) + self.write_mvn_config.assert_called_with() + self.service_start.assert_called_with('odl-controller') + self.shutil.copy.assert_called_with('files/odl-controller.conf', + '/etc/init') + + def test_ovsdb_manager_joined_hook(self): + self._call_hook('ovsdb-manager-relation-joined') + self.relation_set.assert_called_with(port=6640, protocol="tcp") + + def test_controller_api_relation_joined_hook(self): + self._call_hook('controller-api-relation-joined') + self.relation_set.assert_called_with(relation_id=None, port=8080, + username="admin", + password="admin") + + @patch.object(hooks, 'controller_api_joined') + def test_config_changed_hook(self, mock_controller_api_joined): + self.relation_ids.return_value = ['controller-api:2'] + self._call_hook('config-changed') + self.write_mvn_config.assert_called_with() + mock_controller_api_joined.assert_called_with('controller-api:2') + self.process_odl_cmds.assert_called_with({ + 'feature:install': [ + 'odl-base-all', 'odl-aaa-authn', 'odl-restconf', 'odl-nsf-all', + 'odl-adsal-northbound', 'odl-mdsal-apidocs', + 'odl-ovsdb-openstack', 'odl-ovsdb-northbound', + 'odl-dlux-core' + ], + 'port': 8080 + }) diff --git a/unit_tests/test_odl_controller_utils.py b/unit_tests/test_odl_controller_utils.py new file mode 100644 index 0000000..4681ff2 --- /dev/null +++ b/unit_tests/test_odl_controller_utils.py @@ -0,0 +1,93 @@ +from mock import patch, call +from test_utils import CharmTestCase + +import odl_controller_utils as utils +import odl_outputs + +TO_PATCH = [ + 'subprocess', + 'render', + 'config', + 'retry_on_exception', +] + + +class ODLControllerUtilsTests(CharmTestCase): + + def setUp(self): + super(ODLControllerUtilsTests, self).setUp(utils, TO_PATCH) + self.config.side_effect = self.test_config.get + self.test_config.set('http-proxy', 'http://proxy.int:3128') + + def test_mvn_proxy_ctx(self): + expect = { + 'http_noproxy': [], + 'http_proxy': True, + 'http_proxy_host': 'proxy.int', + 'http_proxy_port': 3128 + } + self.assertEqual(utils.mvn_proxy_ctx('http'), expect) + + def test_mvn_ctx(self): + self.test_config.set('http-proxy', 'http://proxy.int:3128') + expect = { + 'http_noproxy': [], + 'http_proxy': True, + 'http_proxy_host': 'proxy.int', + 'http_proxy_port': 3128 + } + self.assertEqual(utils.mvn_ctx(), expect) + + def test_mvn_ctx_unset(self): + self.test_config.set('http-proxy', '') + self.assertEqual(utils.mvn_ctx(), {}) + + def test_write_mvn_config(self): + self.test_config.set('http-proxy', '') + self.test_config.set('https-proxy', '') + utils.write_mvn_config() + self.render.assert_called_with( + "settings.xml", "/home/opendaylight/.m2/settings.xml", {}, + "opendaylight", "opendaylight", 0400 + ) + + def test_run_odl(self): + utils.run_odl(["feature:list"]) + self.subprocess.check_output.assert_called_with( + ["/opt/opendaylight-karaf/bin/client", "-r", '20', "-h", + 'localhost', "-a", '8101', 'feature:list'] + ) + + def test_installed_features(self): + self.subprocess.check_output.return_value = \ + odl_outputs.ODL_023_FEATURE_LIST + installed = utils.installed_features() + for feature in utils.PROFILES["openvswitch-odl"]["feature:install"]: + self.assertTrue(feature in installed) + self.assertFalse('odl-l2switch-hosttracker' in installed) + + def test_filter_installed(self): + self.subprocess.check_output.return_value = \ + odl_outputs.ODL_023_FEATURE_LIST + self.assertEqual( + utils.filter_installed(['odl-l2switch-hosttracker']), + ['odl-l2switch-hosttracker'] + ) + self.assertEqual(utils.filter_installed(['odl-config-api']), []) + + @patch.object(utils, 'run_odl') + @patch.object(utils, 'filter_installed') + def test_process_odl_cmds(self, mock_filter_installed, mock_run_odl): + test_profile = { + "feature:install": ["odl-l2switch-all"], + "log:set": { + "TRACE": ["cosc-cvpn-ovs-rest"], + }, + "port": 1181 + } + mock_filter_installed.return_value = ["odl-l2switch-all"] + utils.process_odl_cmds(test_profile) + mock_run_odl.assert_has_calls([ + call(["feature:install", "odl-l2switch-all"]), + call(['log:set', 'TRACE', 'cosc-cvpn-ovs-rest']) + ]) diff --git a/unit_tests/test_utils.py b/unit_tests/test_utils.py new file mode 100644 index 0000000..bbc71fb --- /dev/null +++ b/unit_tests/test_utils.py @@ -0,0 +1,124 @@ +import logging +import unittest +import os +import yaml + +from contextlib import contextmanager +from mock import patch, MagicMock + +patch('charmhelpers.contrib.openstack.utils.set_os_workload_status').start() +patch('charmhelpers.core.hookenv.status_set').start() + + +def load_config(): + ''' + Walk backwords from __file__ looking for config.yaml, load and return the + 'options' section' + ''' + config = None + f = __file__ + while config is None: + d = os.path.dirname(f) + if os.path.isfile(os.path.join(d, 'config.yaml')): + config = os.path.join(d, 'config.yaml') + break + f = d + + if not config: + logging.error('Could not find config.yaml in any parent directory ' + 'of %s. ' % file) + raise Exception + + return yaml.safe_load(open(config).read())['options'] + + +def get_default_config(): + ''' + Load default charm config from config.yaml return as a dict. + If no default is set in config.yaml, its value is None. + ''' + default_config = {} + config = load_config() + for k, v in config.iteritems(): + if 'default' in v: + default_config[k] = v['default'] + else: + default_config[k] = None + return default_config + + +class CharmTestCase(unittest.TestCase): + + def setUp(self, obj, patches): + super(CharmTestCase, self).setUp() + self.patches = patches + self.obj = obj + self.test_config = TestConfig() + self.test_relation = TestRelation() + self.patch_all() + + def patch(self, method): + _m = patch.object(self.obj, method) + mock = _m.start() + self.addCleanup(_m.stop) + return mock + + def patch_all(self): + for method in self.patches: + setattr(self, method, self.patch(method)) + + +class TestConfig(object): + + def __init__(self): + self.config = get_default_config() + + def get(self, attr=None): + if not attr: + return self.get_all() + try: + return self.config[attr] + except KeyError: + return None + + def get_all(self): + return self.config + + def set(self, attr, value): + if attr not in self.config: + raise KeyError + self.config[attr] = value + + +class TestRelation(object): + + def __init__(self, relation_data={}): + self.relation_data = relation_data + + def set(self, relation_data): + self.relation_data = relation_data + + def get(self, attribute=None, unit=None, rid=None): + if attribute is None: + return self.relation_data + elif attribute in self.relation_data: + return self.relation_data[attribute] + return None + + +@contextmanager +def patch_open(): + '''Patch open() to allow mocking both open() itself and the file that is + yielded. + + Yields the mock for "open" and "file", respectively.''' + mock_open = MagicMock(spec=open) + mock_file = MagicMock(spec=file) + + @contextmanager + def stub_open(*args, **kwargs): + mock_open(*args, **kwargs) + yield mock_file + + with patch('__builtin__.open', stub_open): + yield mock_open, mock_file From 4e62fc8cbc2197ea5610ceccf1fa285a72811090 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 11 Nov 2015 19:54:58 +0000 Subject: [PATCH 2/2] Fixes --- Makefile | 3 ++- tests/018-basic-trusty-liberty | 11 +++++++++++ .../contrib/openstack/amulet/deployment.py | 3 ++- 3 files changed, 15 insertions(+), 2 deletions(-) create mode 100755 tests/018-basic-trusty-liberty diff --git a/Makefile b/Makefile index 5a3cd4b..4d5bc01 100644 --- a/Makefile +++ b/Makefile @@ -14,7 +14,8 @@ test: functional_test: @echo Starting amulet tests... @tests/setup/00-setup - @juju test -v -p AMULET_HTTP_PROXY,AMULET_OS_VIP --timeout 2700 + @juju test -v -p AMULET_ODL_LOCATION,AMULET_HTTP_PROXY,AMULET_OS_VIP \ + --timeout 2700 bin/charm_helpers_sync.py: @mkdir -p bin diff --git a/tests/018-basic-trusty-liberty b/tests/018-basic-trusty-liberty new file mode 100755 index 0000000..5f8c475 --- /dev/null +++ b/tests/018-basic-trusty-liberty @@ -0,0 +1,11 @@ +#!/usr/bin/python + +"""Amulet tests on a basic odl controller deployment on trusty-liberty.""" + +from basic_deployment import ODLControllerBasicDeployment + +if __name__ == '__main__': + deployment = ODLControllerBasicDeployment(series='trusty', + openstack='cloud:trusty-liberty', + source='cloud:trusty-updates/liberty') + deployment.run_tests() diff --git a/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/tests/charmhelpers/contrib/openstack/amulet/deployment.py index e4ad62b..0506491 100644 --- a/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -124,7 +124,8 @@ class OpenStackAmuletDeployment(AmuletDeployment): 'ceph-osd', 'ceph-radosgw'] # Charms which can not use openstack-origin, ie. many subordinates - no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe'] + no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', + 'openvswitch-odl', 'neutron-api-odl', 'odl-controller'] if self.openstack: for svc in services: