diff --git a/.gitignore b/.gitignore
index 358fdb4..295e7c9 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,7 +1,17 @@
-*pyc
-.bzr
-bin
.coverage
-.venv
-.testrepository/
-.tox/
+bin
+tags
+.tox
+.testrepository
+*.sw[nop]
+*.pyc
+.unit-state.db
+tests/*.img
+.idea
+.stestr
+.local
+.pydevproject
+repo-info
+results*.txt
+func-results.json
+__pycache__
diff --git a/.pydevproject b/.pydevproject
deleted file mode 100644
index 8ee4310..0000000
--- a/.pydevproject
+++ /dev/null
@@ -1,8 +0,0 @@
-
-
-
-/glance-simplestreams-sync
-
-python 2.7
-Default
-
diff --git a/Makefile b/Makefile
index 672c26e..a2b8c5c 100644
--- a/Makefile
+++ b/Makefile
@@ -1,23 +1,24 @@
#!/usr/bin/make
PYTHON := /usr/bin/env python
-CHARM_DIR := $(PWD)
-HOOKS_DIR := $(PWD)/hooks
-TEST_PREFIX := PYTHONPATH=$(HOOKS_DIR)
-
-clean:
- find . -name '*.pyc' -delete
lint:
@tox -e pep8
-unit_tests:
+test:
+ @echo Starting unit tests...
@tox -e py27
+functional_test:
+ @echo Starting functional tests...
+ @tox -e func27
bin/charm_helpers_sync.py:
@mkdir -p bin
- @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \
- > bin/charm_helpers_sync.py
+ @curl -o bin/charm_helpers_sync.py https://raw.githubusercontent.com/juju/charm-helpers/master/tools/charm_helpers_sync/charm_helpers_sync.py
+
sync: bin/charm_helpers_sync.py
- @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-sync.yaml
+ @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml
+ @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml
+
+all: test lint
diff --git a/README.md b/README.md
index b887feb..94c3b7f 100644
--- a/README.md
+++ b/README.md
@@ -1,3 +1,8 @@
+# Notice
+
+This charm has been tested with Trusty and Xenial series. Bionic series
+is enabled, but is still under testing and in development.
+
# Overview
This charm provides a service that syncs your OpenStack cloud's
diff --git a/actions/.gitkeep b/actions/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/charm-helpers-sync.yaml b/charm-helpers-hooks.yaml
similarity index 100%
rename from charm-helpers-sync.yaml
rename to charm-helpers-hooks.yaml
diff --git a/charm-helpers-tests.yaml b/charm-helpers-tests.yaml
new file mode 100644
index 0000000..3a8c294
--- /dev/null
+++ b/charm-helpers-tests.yaml
@@ -0,0 +1,7 @@
+repo: https://github.com/juju/charm-helpers
+destination: tests/charmhelpers
+include:
+ - core
+ - contrib.amulet
+ - contrib.openstack.amulet
+ - osplatform
diff --git a/charmhelpers/contrib/charmsupport/nrpe.py b/charmhelpers/contrib/charmsupport/nrpe.py
index 80d574d..1c55b30 100644
--- a/charmhelpers/contrib/charmsupport/nrpe.py
+++ b/charmhelpers/contrib/charmsupport/nrpe.py
@@ -30,6 +30,7 @@ import yaml
from charmhelpers.core.hookenv import (
config,
+ hook_name,
local_unit,
log,
relation_ids,
@@ -285,7 +286,7 @@ class NRPE(object):
try:
nagios_uid = pwd.getpwnam('nagios').pw_uid
nagios_gid = grp.getgrnam('nagios').gr_gid
- except:
+ except Exception:
log("Nagios user not set up, nrpe checks not updated")
return
@@ -302,7 +303,12 @@ class NRPE(object):
"command": nrpecheck.command,
}
- service('restart', 'nagios-nrpe-server')
+ # update-status hooks are configured to firing every 5 minutes by
+ # default. When nagios-nrpe-server is restarted, the nagios server
+ # reports checks failing causing unneccessary alerts. Let's not restart
+ # on update-status hooks.
+ if not hook_name() == 'update-status':
+ service('restart', 'nagios-nrpe-server')
monitor_ids = relation_ids("local-monitors") + \
relation_ids("nrpe-external-master")
diff --git a/charmhelpers/contrib/hahelpers/apache.py b/charmhelpers/contrib/hahelpers/apache.py
index d0c6994..a852704 100644
--- a/charmhelpers/contrib/hahelpers/apache.py
+++ b/charmhelpers/contrib/hahelpers/apache.py
@@ -65,7 +65,8 @@ def get_ca_cert():
if ca_cert is None:
log("Inspecting identity-service relations for CA SSL certificate.",
level=INFO)
- for r_id in relation_ids('identity-service'):
+ for r_id in (relation_ids('identity-service') +
+ relation_ids('identity-credentials')):
for unit in relation_list(r_id):
if ca_cert is None:
ca_cert = relation_get('ca_cert',
@@ -90,6 +91,6 @@ def install_ca_cert(ca_cert):
log("CA cert is the same as installed version", level=INFO)
else:
log("Installing new CA cert", level=INFO)
- with open(cert_file, 'w') as crt:
+ with open(cert_file, 'wb') as crt:
crt.write(ca_cert)
subprocess.check_call(['update-ca-certificates', '--fresh'])
diff --git a/charmhelpers/contrib/hahelpers/cluster.py b/charmhelpers/contrib/hahelpers/cluster.py
index e02350e..47facd9 100644
--- a/charmhelpers/contrib/hahelpers/cluster.py
+++ b/charmhelpers/contrib/hahelpers/cluster.py
@@ -27,6 +27,7 @@ clustering-related helpers.
import subprocess
import os
+import time
from socket import gethostname as get_unit_hostname
@@ -45,6 +46,9 @@ from charmhelpers.core.hookenv import (
is_leader as juju_is_leader,
status_set,
)
+from charmhelpers.core.host import (
+ modulo_distribution,
+)
from charmhelpers.core.decorators import (
retry_on_exception,
)
@@ -361,3 +365,37 @@ def canonical_url(configs, vip_setting='vip'):
else:
addr = unit_get('private-address')
return '%s://%s' % (scheme, addr)
+
+
+def distributed_wait(modulo=None, wait=None, operation_name='operation'):
+ ''' Distribute operations by waiting based on modulo_distribution
+
+ If modulo and or wait are not set, check config_get for those values.
+ If config values are not set, default to modulo=3 and wait=30.
+
+ :param modulo: int The modulo number creates the group distribution
+ :param wait: int The constant time wait value
+ :param operation_name: string Operation name for status message
+ i.e. 'restart'
+ :side effect: Calls config_get()
+ :side effect: Calls log()
+ :side effect: Calls status_set()
+ :side effect: Calls time.sleep()
+ '''
+ if modulo is None:
+ modulo = config_get('modulo-nodes') or 3
+ if wait is None:
+ wait = config_get('known-wait') or 30
+ if juju_is_leader():
+ # The leader should never wait
+ calculated_wait = 0
+ else:
+ # non_zero_wait=True guarantees the non-leader who gets modulo 0
+ # will still wait
+ calculated_wait = modulo_distribution(modulo=modulo, wait=wait,
+ non_zero_wait=True)
+ msg = "Waiting {} seconds for {} ...".format(calculated_wait,
+ operation_name)
+ log(msg, DEBUG)
+ status_set('maintenance', msg)
+ time.sleep(calculated_wait)
diff --git a/charmhelpers/contrib/network/ip.py b/charmhelpers/contrib/network/ip.py
index d7e6deb..b13277b 100644
--- a/charmhelpers/contrib/network/ip.py
+++ b/charmhelpers/contrib/network/ip.py
@@ -27,6 +27,7 @@ from charmhelpers.core.hookenv import (
network_get_primary_address,
unit_get,
WARNING,
+ NoNetworkBinding,
)
from charmhelpers.core.host import (
@@ -109,7 +110,12 @@ def get_address_in_network(network, fallback=None, fatal=False):
_validate_cidr(network)
network = netaddr.IPNetwork(network)
for iface in netifaces.interfaces():
- addresses = netifaces.ifaddresses(iface)
+ try:
+ addresses = netifaces.ifaddresses(iface)
+ except ValueError:
+ # If an instance was deleted between
+ # netifaces.interfaces() run and now, its interfaces are gone
+ continue
if network.version == 4 and netifaces.AF_INET in addresses:
for addr in addresses[netifaces.AF_INET]:
cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
@@ -490,7 +496,7 @@ def get_host_ip(hostname, fallback=None):
if not ip_addr:
try:
ip_addr = socket.gethostbyname(hostname)
- except:
+ except Exception:
log("Failed to resolve hostname '%s'" % (hostname),
level=WARNING)
return fallback
@@ -518,7 +524,7 @@ def get_hostname(address, fqdn=True):
if not result:
try:
result = socket.gethostbyaddr(address)[0]
- except:
+ except Exception:
return None
else:
result = address
@@ -578,6 +584,9 @@ def get_relation_ip(interface, cidr_network=None):
except NotImplementedError:
# If network-get is not available
address = get_host_ip(unit_get('private-address'))
+ except NoNetworkBinding:
+ log("No network binding for {}".format(interface), WARNING)
+ address = get_host_ip(unit_get('private-address'))
if config('prefer-ipv6'):
# Currently IPv6 has priority, eventually we want IPv6 to just be
diff --git a/charmhelpers/contrib/openstack/alternatives.py b/charmhelpers/contrib/openstack/alternatives.py
index 1501641..547de09 100644
--- a/charmhelpers/contrib/openstack/alternatives.py
+++ b/charmhelpers/contrib/openstack/alternatives.py
@@ -29,3 +29,16 @@ def install_alternative(name, target, source, priority=50):
target, name, source, str(priority)
]
subprocess.check_call(cmd)
+
+
+def remove_alternative(name, source):
+ """Remove an installed alternative configuration file
+
+ :param name: string name of the alternative to remove
+ :param source: string full path to alternative to remove
+ """
+ cmd = [
+ 'update-alternatives', '--remove',
+ name, source
+ ]
+ subprocess.check_call(cmd)
diff --git a/charmhelpers/contrib/openstack/amulet/deployment.py b/charmhelpers/contrib/openstack/amulet/deployment.py
index 5c041d2..66beeda 100644
--- a/charmhelpers/contrib/openstack/amulet/deployment.py
+++ b/charmhelpers/contrib/openstack/amulet/deployment.py
@@ -13,6 +13,7 @@
# limitations under the License.
import logging
+import os
import re
import sys
import six
@@ -20,6 +21,9 @@ from collections import OrderedDict
from charmhelpers.contrib.amulet.deployment import (
AmuletDeployment
)
+from charmhelpers.contrib.openstack.amulet.utils import (
+ OPENSTACK_RELEASES_PAIRS
+)
DEBUG = logging.DEBUG
ERROR = logging.ERROR
@@ -185,7 +189,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
self.d.configure(service, config)
def _auto_wait_for_status(self, message=None, exclude_services=None,
- include_only=None, timeout=1800):
+ include_only=None, timeout=None):
"""Wait for all units to have a specific extended status, except
for any defined as excluded. Unless specified via message, any
status containing any case of 'ready' will be considered a match.
@@ -215,7 +219,10 @@ class OpenStackAmuletDeployment(AmuletDeployment):
:param timeout: Maximum time in seconds to wait for status match
:returns: None. Raises if timeout is hit.
"""
- self.log.info('Waiting for extended status on units...')
+ if not timeout:
+ timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 1800))
+ self.log.info('Waiting for extended status on units for {}s...'
+ ''.format(timeout))
all_services = self.d.services.keys()
@@ -250,7 +257,14 @@ class OpenStackAmuletDeployment(AmuletDeployment):
self.log.debug('Waiting up to {}s for extended status on services: '
'{}'.format(timeout, services))
service_messages = {service: message for service in services}
+
+ # Check for idleness
+ self.d.sentry.wait(timeout=timeout)
+ # Check for error states and bail early
+ self.d.sentry.wait_for_status(self.d.juju_env, services, timeout=timeout)
+ # Check for ready messages
self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
+
self.log.info('OK')
def _get_openstack_release(self):
@@ -260,10 +274,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
release.
"""
# Must be ordered by OpenStack release (not by Ubuntu release):
- (self.trusty_icehouse, self.trusty_kilo, self.trusty_liberty,
- self.trusty_mitaka, self.xenial_mitaka, self.xenial_newton,
- self.yakkety_newton, self.xenial_ocata, self.zesty_ocata,
- self.xenial_pike, self.artful_pike) = range(11)
+ for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS):
+ setattr(self, os_pair, i)
releases = {
('trusty', None): self.trusty_icehouse,
@@ -274,9 +286,11 @@ class OpenStackAmuletDeployment(AmuletDeployment):
('xenial', 'cloud:xenial-newton'): self.xenial_newton,
('xenial', 'cloud:xenial-ocata'): self.xenial_ocata,
('xenial', 'cloud:xenial-pike'): self.xenial_pike,
+ ('xenial', 'cloud:xenial-queens'): self.xenial_queens,
('yakkety', None): self.yakkety_newton,
('zesty', None): self.zesty_ocata,
('artful', None): self.artful_pike,
+ ('bionic', None): self.bionic_queens,
}
return releases[(self.series, self.openstack)]
@@ -291,6 +305,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
('yakkety', 'newton'),
('zesty', 'ocata'),
('artful', 'pike'),
+ ('bionic', 'queens'),
])
if self.openstack:
os_origin = self.openstack.split(':')[1]
@@ -303,20 +318,27 @@ class OpenStackAmuletDeployment(AmuletDeployment):
test scenario, based on OpenStack release and whether ceph radosgw
is flagged as present or not."""
- if self._get_openstack_release() >= self.trusty_kilo:
- # Kilo or later
- pools = [
- 'rbd',
- 'cinder',
- 'glance'
- ]
- else:
- # Juno or earlier
+ if self._get_openstack_release() == self.trusty_icehouse:
+ # Icehouse
pools = [
'data',
'metadata',
'rbd',
- 'cinder',
+ 'cinder-ceph',
+ 'glance'
+ ]
+ elif (self.trusty_kilo <= self._get_openstack_release() <=
+ self.zesty_ocata):
+ # Kilo through Ocata
+ pools = [
+ 'rbd',
+ 'cinder-ceph',
+ 'glance'
+ ]
+ else:
+ # Pike and later
+ pools = [
+ 'cinder-ceph',
'glance'
]
diff --git a/charmhelpers/contrib/openstack/amulet/utils.py b/charmhelpers/contrib/openstack/amulet/utils.py
index c8edbf6..84e87f5 100644
--- a/charmhelpers/contrib/openstack/amulet/utils.py
+++ b/charmhelpers/contrib/openstack/amulet/utils.py
@@ -23,6 +23,7 @@ import urllib
import urlparse
import cinderclient.v1.client as cinder_client
+import cinderclient.v2.client as cinder_clientv2
import glanceclient.v1.client as glance_client
import heatclient.v1.client as heat_client
from keystoneclient.v2_0 import client as keystone_client
@@ -42,7 +43,6 @@ import swiftclient
from charmhelpers.contrib.amulet.utils import (
AmuletUtils
)
-from charmhelpers.core.decorators import retry_on_exception
from charmhelpers.core.host import CompareHostReleases
DEBUG = logging.DEBUG
@@ -50,6 +50,13 @@ ERROR = logging.ERROR
NOVA_CLIENT_VERSION = "2"
+OPENSTACK_RELEASES_PAIRS = [
+ 'trusty_icehouse', 'trusty_kilo', 'trusty_liberty',
+ 'trusty_mitaka', 'xenial_mitaka', 'xenial_newton',
+ 'yakkety_newton', 'xenial_ocata', 'zesty_ocata',
+ 'xenial_pike', 'artful_pike', 'xenial_queens',
+ 'bionic_queens']
+
class OpenStackAmuletUtils(AmuletUtils):
"""OpenStack amulet utilities.
@@ -63,7 +70,34 @@ class OpenStackAmuletUtils(AmuletUtils):
super(OpenStackAmuletUtils, self).__init__(log_level)
def validate_endpoint_data(self, endpoints, admin_port, internal_port,
- public_port, expected):
+ public_port, expected, openstack_release=None):
+ """Validate endpoint data. Pick the correct validator based on
+ OpenStack release. Expected data should be in the v2 format:
+ {
+ 'id': id,
+ 'region': region,
+ 'adminurl': adminurl,
+ 'internalurl': internalurl,
+ 'publicurl': publicurl,
+ 'service_id': service_id}
+
+ """
+ validation_function = self.validate_v2_endpoint_data
+ xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens')
+ if openstack_release and openstack_release >= xenial_queens:
+ validation_function = self.validate_v3_endpoint_data
+ expected = {
+ 'id': expected['id'],
+ 'region': expected['region'],
+ 'region_id': 'RegionOne',
+ 'url': self.valid_url,
+ 'interface': self.not_null,
+ 'service_id': expected['service_id']}
+ return validation_function(endpoints, admin_port, internal_port,
+ public_port, expected)
+
+ def validate_v2_endpoint_data(self, endpoints, admin_port, internal_port,
+ public_port, expected):
"""Validate endpoint data.
Validate actual endpoint data vs expected endpoint data. The ports
@@ -92,7 +126,7 @@ class OpenStackAmuletUtils(AmuletUtils):
return 'endpoint not found'
def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port,
- public_port, expected):
+ public_port, expected, expected_num_eps=3):
"""Validate keystone v3 endpoint data.
Validate the v3 endpoint data which has changed from v2. The
@@ -138,10 +172,89 @@ class OpenStackAmuletUtils(AmuletUtils):
if ret:
return 'unexpected endpoint data - {}'.format(ret)
- if len(found) != 3:
+ if len(found) != expected_num_eps:
return 'Unexpected number of endpoints found'
- def validate_svc_catalog_endpoint_data(self, expected, actual):
+ def convert_svc_catalog_endpoint_data_to_v3(self, ep_data):
+ """Convert v2 endpoint data into v3.
+
+ {
+ 'service_name1': [
+ {
+ 'adminURL': adminURL,
+ 'id': id,
+ 'region': region.
+ 'publicURL': publicURL,
+ 'internalURL': internalURL
+ }],
+ 'service_name2': [
+ {
+ 'adminURL': adminURL,
+ 'id': id,
+ 'region': region.
+ 'publicURL': publicURL,
+ 'internalURL': internalURL
+ }],
+ }
+ """
+ self.log.warn("Endpoint ID and Region ID validation is limited to not "
+ "null checks after v2 to v3 conversion")
+ for svc in ep_data.keys():
+ assert len(ep_data[svc]) == 1, "Unknown data format"
+ svc_ep_data = ep_data[svc][0]
+ ep_data[svc] = [
+ {
+ 'url': svc_ep_data['adminURL'],
+ 'interface': 'admin',
+ 'region': svc_ep_data['region'],
+ 'region_id': self.not_null,
+ 'id': self.not_null},
+ {
+ 'url': svc_ep_data['publicURL'],
+ 'interface': 'public',
+ 'region': svc_ep_data['region'],
+ 'region_id': self.not_null,
+ 'id': self.not_null},
+ {
+ 'url': svc_ep_data['internalURL'],
+ 'interface': 'internal',
+ 'region': svc_ep_data['region'],
+ 'region_id': self.not_null,
+ 'id': self.not_null}]
+ return ep_data
+
+ def validate_svc_catalog_endpoint_data(self, expected, actual,
+ openstack_release=None):
+ """Validate service catalog endpoint data. Pick the correct validator
+ for the OpenStack version. Expected data should be in the v2 format:
+ {
+ 'service_name1': [
+ {
+ 'adminURL': adminURL,
+ 'id': id,
+ 'region': region.
+ 'publicURL': publicURL,
+ 'internalURL': internalURL
+ }],
+ 'service_name2': [
+ {
+ 'adminURL': adminURL,
+ 'id': id,
+ 'region': region.
+ 'publicURL': publicURL,
+ 'internalURL': internalURL
+ }],
+ }
+
+ """
+ validation_function = self.validate_v2_svc_catalog_endpoint_data
+ xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens')
+ if openstack_release and openstack_release >= xenial_queens:
+ validation_function = self.validate_v3_svc_catalog_endpoint_data
+ expected = self.convert_svc_catalog_endpoint_data_to_v3(expected)
+ return validation_function(expected, actual)
+
+ def validate_v2_svc_catalog_endpoint_data(self, expected, actual):
"""Validate service catalog endpoint data.
Validate a list of actual service catalog endpoints vs a list of
@@ -310,7 +423,6 @@ class OpenStackAmuletUtils(AmuletUtils):
self.log.debug('Checking if tenant exists ({})...'.format(tenant))
return tenant in [t.name for t in keystone.tenants.list()]
- @retry_on_exception(5, base_delay=10)
def keystone_wait_for_propagation(self, sentry_relation_pairs,
api_version):
"""Iterate over list of sentry and relation tuples and verify that
@@ -326,10 +438,10 @@ class OpenStackAmuletUtils(AmuletUtils):
rel = sentry.relation('identity-service',
relation_name)
self.log.debug('keystone relation data: {}'.format(rel))
- if rel['api_version'] != str(api_version):
+ if rel.get('api_version') != str(api_version):
raise Exception("api_version not propagated through relation"
" data yet ('{}' != '{}')."
- "".format(rel['api_version'], api_version))
+ "".format(rel.get('api_version'), api_version))
def keystone_configure_api_version(self, sentry_relation_pairs, deployment,
api_version):
@@ -348,15 +460,16 @@ class OpenStackAmuletUtils(AmuletUtils):
config = {'preferred-api-version': api_version}
deployment.d.configure('keystone', config)
+ deployment._auto_wait_for_status()
self.keystone_wait_for_propagation(sentry_relation_pairs, api_version)
- def authenticate_cinder_admin(self, keystone_sentry, username,
- password, tenant):
+ def authenticate_cinder_admin(self, keystone, api_version=2):
"""Authenticates admin user with cinder."""
- # NOTE(beisner): cinder python client doesn't accept tokens.
- keystone_ip = keystone_sentry.info['public-address']
- ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8'))
- return cinder_client.Client(username, password, tenant, ept)
+ self.log.debug('Authenticating cinder admin...')
+ _clients = {
+ 1: cinder_client.Client,
+ 2: cinder_clientv2.Client}
+ return _clients[api_version](session=keystone.session)
def authenticate_keystone(self, keystone_ip, username, password,
api_version=False, admin_port=False,
@@ -364,13 +477,36 @@ class OpenStackAmuletUtils(AmuletUtils):
project_domain_name=None, project_name=None):
"""Authenticate with Keystone"""
self.log.debug('Authenticating with keystone...')
- port = 5000
- if admin_port:
- port = 35357
- base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'),
- port)
- if not api_version or api_version == 2:
- ep = base_ep + "/v2.0"
+ if not api_version:
+ api_version = 2
+ sess, auth = self.get_keystone_session(
+ keystone_ip=keystone_ip,
+ username=username,
+ password=password,
+ api_version=api_version,
+ admin_port=admin_port,
+ user_domain_name=user_domain_name,
+ domain_name=domain_name,
+ project_domain_name=project_domain_name,
+ project_name=project_name
+ )
+ if api_version == 2:
+ client = keystone_client.Client(session=sess)
+ else:
+ client = keystone_client_v3.Client(session=sess)
+ # This populates the client.service_catalog
+ client.auth_ref = auth.get_access(sess)
+ return client
+
+ def get_keystone_session(self, keystone_ip, username, password,
+ api_version=False, admin_port=False,
+ user_domain_name=None, domain_name=None,
+ project_domain_name=None, project_name=None):
+ """Return a keystone session object"""
+ ep = self.get_keystone_endpoint(keystone_ip,
+ api_version=api_version,
+ admin_port=admin_port)
+ if api_version == 2:
auth = v2.Password(
username=username,
password=password,
@@ -378,12 +514,7 @@ class OpenStackAmuletUtils(AmuletUtils):
auth_url=ep
)
sess = keystone_session.Session(auth=auth)
- client = keystone_client.Client(session=sess)
- # This populates the client.service_catalog
- client.auth_ref = auth.get_access(sess)
- return client
else:
- ep = base_ep + "/v3"
auth = v3.Password(
user_domain_name=user_domain_name,
username=username,
@@ -394,10 +525,57 @@ class OpenStackAmuletUtils(AmuletUtils):
auth_url=ep
)
sess = keystone_session.Session(auth=auth)
- client = keystone_client_v3.Client(session=sess)
- # This populates the client.service_catalog
- client.auth_ref = auth.get_access(sess)
- return client
+ return (sess, auth)
+
+ def get_keystone_endpoint(self, keystone_ip, api_version=None,
+ admin_port=False):
+ """Return keystone endpoint"""
+ port = 5000
+ if admin_port:
+ port = 35357
+ base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'),
+ port)
+ if api_version == 2:
+ ep = base_ep + "/v2.0"
+ else:
+ ep = base_ep + "/v3"
+ return ep
+
+ def get_default_keystone_session(self, keystone_sentry,
+ openstack_release=None):
+ """Return a keystone session object and client object assuming standard
+ default settings
+
+ Example call in amulet tests:
+ self.keystone_session, self.keystone = u.get_default_keystone_session(
+ self.keystone_sentry,
+ openstack_release=self._get_openstack_release())
+
+ The session can then be used to auth other clients:
+ neutronclient.Client(session=session)
+ aodh_client.Client(session=session)
+ eyc
+ """
+ self.log.debug('Authenticating keystone admin...')
+ api_version = 2
+ client_class = keystone_client.Client
+ # 11 => xenial_queens
+ if openstack_release and openstack_release >= 11:
+ api_version = 3
+ client_class = keystone_client_v3.Client
+ keystone_ip = keystone_sentry.info['public-address']
+ session, auth = self.get_keystone_session(
+ keystone_ip,
+ api_version=api_version,
+ username='admin',
+ password='openstack',
+ project_name='admin',
+ user_domain_name='admin_domain',
+ project_domain_name='admin_domain')
+ client = client_class(session=session)
+ # This populates the client.service_catalog
+ client.auth_ref = auth.get_access(session)
+ return session, client
def authenticate_keystone_admin(self, keystone_sentry, user, password,
tenant=None, api_version=None,
@@ -617,13 +795,25 @@ class OpenStackAmuletUtils(AmuletUtils):
self.log.debug('Keypair ({}) already exists, '
'using it.'.format(keypair_name))
return _keypair
- except:
+ except Exception:
self.log.debug('Keypair ({}) does not exist, '
'creating it.'.format(keypair_name))
_keypair = nova.keypairs.create(name=keypair_name)
return _keypair
+ def _get_cinder_obj_name(self, cinder_object):
+ """Retrieve name of cinder object.
+
+ :param cinder_object: cinder snapshot or volume object
+ :returns: str cinder object name
+ """
+ # v1 objects store name in 'display_name' attr but v2+ use 'name'
+ try:
+ return cinder_object.display_name
+ except AttributeError:
+ return cinder_object.name
+
def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
img_id=None, src_vol_id=None, snap_id=None):
"""Create cinder volume, optionally from a glance image, OR
@@ -674,6 +864,13 @@ class OpenStackAmuletUtils(AmuletUtils):
source_volid=src_vol_id,
snapshot_id=snap_id)
vol_id = vol_new.id
+ except TypeError:
+ vol_new = cinder.volumes.create(name=vol_name,
+ imageRef=img_id,
+ size=vol_size,
+ source_volid=src_vol_id,
+ snapshot_id=snap_id)
+ vol_id = vol_new.id
except Exception as e:
msg = 'Failed to create volume: {}'.format(e)
amulet.raise_status(amulet.FAIL, msg=msg)
@@ -688,7 +885,7 @@ class OpenStackAmuletUtils(AmuletUtils):
# Re-validate new volume
self.log.debug('Validating volume attributes...')
- val_vol_name = cinder.volumes.get(vol_id).display_name
+ val_vol_name = self._get_cinder_obj_name(cinder.volumes.get(vol_id))
val_vol_boot = cinder.volumes.get(vol_id).bootable
val_vol_stat = cinder.volumes.get(vol_id).status
val_vol_size = cinder.volumes.get(vol_id).size
@@ -836,9 +1033,12 @@ class OpenStackAmuletUtils(AmuletUtils):
:returns: List of pool name, object count, kb disk space used
"""
df = self.get_ceph_df(sentry_unit)
- pool_name = df['pools'][pool_id]['name']
- obj_count = df['pools'][pool_id]['stats']['objects']
- kb_used = df['pools'][pool_id]['stats']['kb_used']
+ for pool in df['pools']:
+ if pool['id'] == pool_id:
+ pool_name = pool['name']
+ obj_count = pool['stats']['objects']
+ kb_used = pool['stats']['kb_used']
+
self.log.debug('Ceph {} pool (ID {}): {} objects, '
'{} kb used'.format(pool_name, pool_id,
obj_count, kb_used))
diff --git a/charmhelpers/contrib/openstack/context.py b/charmhelpers/contrib/openstack/context.py
index f67f326..6c4497b 100644
--- a/charmhelpers/contrib/openstack/context.py
+++ b/charmhelpers/contrib/openstack/context.py
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import collections
import glob
import json
import math
@@ -92,14 +93,14 @@ from charmhelpers.contrib.network.ip import (
format_ipv6_addr,
is_bridge_member,
is_ipv6_disabled,
+ get_relation_ip,
)
from charmhelpers.contrib.openstack.utils import (
config_flags_parser,
- get_host_ip,
- git_determine_usr_bin,
- git_determine_python_path,
enable_memcache,
snap_install_requested,
+ CompareOpenStackReleases,
+ os_release,
)
from charmhelpers.core.unitdata import kv
@@ -292,7 +293,7 @@ class PostgresqlDBContext(OSContextGenerator):
def db_ssl(rdata, ctxt, ssl_dir):
if 'ssl_ca' in rdata and ssl_dir:
ca_path = os.path.join(ssl_dir, 'db-client.ca')
- with open(ca_path, 'w') as fh:
+ with open(ca_path, 'wb') as fh:
fh.write(b64decode(rdata['ssl_ca']))
ctxt['database_ssl_ca'] = ca_path
@@ -307,12 +308,12 @@ def db_ssl(rdata, ctxt, ssl_dir):
log("Waiting 1m for ssl client cert validity", level=INFO)
time.sleep(60)
- with open(cert_path, 'w') as fh:
+ with open(cert_path, 'wb') as fh:
fh.write(b64decode(rdata['ssl_cert']))
ctxt['database_ssl_cert'] = cert_path
key_path = os.path.join(ssl_dir, 'db-client.key')
- with open(key_path, 'w') as fh:
+ with open(key_path, 'wb') as fh:
fh.write(b64decode(rdata['ssl_key']))
ctxt['database_ssl_key'] = key_path
@@ -331,10 +332,7 @@ class IdentityServiceContext(OSContextGenerator):
self.rel_name = rel_name
self.interfaces = [self.rel_name]
- def __call__(self):
- log('Generating template context for ' + self.rel_name, level=DEBUG)
- ctxt = {}
-
+ def _setup_pki_cache(self):
if self.service and self.service_user:
# This is required for pki token signing if we don't want /tmp to
# be used.
@@ -344,6 +342,15 @@ class IdentityServiceContext(OSContextGenerator):
mkdir(path=cachedir, owner=self.service_user,
group=self.service_user, perms=0o700)
+ return cachedir
+ return None
+
+ def __call__(self):
+ log('Generating template context for ' + self.rel_name, level=DEBUG)
+ ctxt = {}
+
+ cachedir = self._setup_pki_cache()
+ if cachedir:
ctxt['signing_dir'] = cachedir
for rid in relation_ids(self.rel_name):
@@ -377,6 +384,63 @@ class IdentityServiceContext(OSContextGenerator):
# so a missing value just indicates keystone needs
# upgrading
ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')
+ ctxt['admin_domain_id'] = rdata.get('service_domain_id')
+ return ctxt
+
+ return {}
+
+
+class IdentityCredentialsContext(IdentityServiceContext):
+ '''Context for identity-credentials interface type'''
+
+ def __init__(self,
+ service=None,
+ service_user=None,
+ rel_name='identity-credentials'):
+ super(IdentityCredentialsContext, self).__init__(service,
+ service_user,
+ rel_name)
+
+ def __call__(self):
+ log('Generating template context for ' + self.rel_name, level=DEBUG)
+ ctxt = {}
+
+ cachedir = self._setup_pki_cache()
+ if cachedir:
+ ctxt['signing_dir'] = cachedir
+
+ for rid in relation_ids(self.rel_name):
+ self.related = True
+ for unit in related_units(rid):
+ rdata = relation_get(rid=rid, unit=unit)
+ credentials_host = rdata.get('credentials_host')
+ credentials_host = (
+ format_ipv6_addr(credentials_host) or credentials_host
+ )
+ auth_host = rdata.get('auth_host')
+ auth_host = format_ipv6_addr(auth_host) or auth_host
+ svc_protocol = rdata.get('credentials_protocol') or 'http'
+ auth_protocol = rdata.get('auth_protocol') or 'http'
+ api_version = rdata.get('api_version') or '2.0'
+ ctxt.update({
+ 'service_port': rdata.get('credentials_port'),
+ 'service_host': credentials_host,
+ 'auth_host': auth_host,
+ 'auth_port': rdata.get('auth_port'),
+ 'admin_tenant_name': rdata.get('credentials_project'),
+ 'admin_tenant_id': rdata.get('credentials_project_id'),
+ 'admin_user': rdata.get('credentials_username'),
+ 'admin_password': rdata.get('credentials_password'),
+ 'service_protocol': svc_protocol,
+ 'auth_protocol': auth_protocol,
+ 'api_version': api_version
+ })
+
+ if float(api_version) > 2:
+ ctxt.update({'admin_domain_name':
+ rdata.get('domain')})
+
+ if self.context_complete(ctxt):
return ctxt
return {}
@@ -458,7 +522,7 @@ class AMQPContext(OSContextGenerator):
ca_path = os.path.join(
self.ssl_dir, 'rabbit-client-ca.pem')
- with open(ca_path, 'w') as fh:
+ with open(ca_path, 'wb') as fh:
fh.write(b64decode(ctxt['rabbit_ssl_ca']))
ctxt['rabbit_ssl_ca'] = ca_path
@@ -554,7 +618,9 @@ class HAProxyContext(OSContextGenerator):
"""
interfaces = ['cluster']
- def __init__(self, singlenode_mode=False):
+ def __init__(self, singlenode_mode=False,
+ address_types=ADDRESS_TYPES):
+ self.address_types = address_types
self.singlenode_mode = singlenode_mode
def __call__(self):
@@ -563,41 +629,56 @@ class HAProxyContext(OSContextGenerator):
if not relation_ids('cluster') and not self.singlenode_mode:
return {}
- if config('prefer-ipv6'):
- addr = get_ipv6_addr(exc_list=[config('vip')])[0]
- else:
- addr = get_host_ip(unit_get('private-address'))
-
l_unit = local_unit().replace('/', '-')
cluster_hosts = {}
# NOTE(jamespage): build out map of configured network endpoints
# and associated backends
- for addr_type in ADDRESS_TYPES:
+ for addr_type in self.address_types:
cfg_opt = 'os-{}-network'.format(addr_type)
- laddr = get_address_in_network(config(cfg_opt))
+ # NOTE(thedac) For some reason the ADDRESS_MAP uses 'int' rather
+ # than 'internal'
+ if addr_type == 'internal':
+ _addr_map_type = INTERNAL
+ else:
+ _addr_map_type = addr_type
+ # Network spaces aware
+ laddr = get_relation_ip(ADDRESS_MAP[_addr_map_type]['binding'],
+ config(cfg_opt))
if laddr:
netmask = get_netmask_for_address(laddr)
- cluster_hosts[laddr] = {'network': "{}/{}".format(laddr,
- netmask),
- 'backends': {l_unit: laddr}}
+ cluster_hosts[laddr] = {
+ 'network': "{}/{}".format(laddr,
+ netmask),
+ 'backends': collections.OrderedDict([(l_unit,
+ laddr)])
+ }
for rid in relation_ids('cluster'):
- for unit in related_units(rid):
+ for unit in sorted(related_units(rid)):
+ # API Charms will need to set {addr_type}-address with
+ # get_relation_ip(addr_type)
_laddr = relation_get('{}-address'.format(addr_type),
rid=rid, unit=unit)
if _laddr:
_unit = unit.replace('/', '-')
cluster_hosts[laddr]['backends'][_unit] = _laddr
- # NOTE(jamespage) add backend based on private address - this
- # with either be the only backend or the fallback if no acls
+ # NOTE(jamespage) add backend based on get_relation_ip - this
+ # will either be the only backend or the fallback if no acls
# match in the frontend
+ # Network spaces aware
+ addr = get_relation_ip('cluster')
cluster_hosts[addr] = {}
netmask = get_netmask_for_address(addr)
- cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask),
- 'backends': {l_unit: addr}}
+ cluster_hosts[addr] = {
+ 'network': "{}/{}".format(addr, netmask),
+ 'backends': collections.OrderedDict([(l_unit,
+ addr)])
+ }
for rid in relation_ids('cluster'):
- for unit in related_units(rid):
+ for unit in sorted(related_units(rid)):
+ # API Charms will need to set their private-address with
+ # get_relation_ip('cluster')
_laddr = relation_get('private-address',
rid=rid, unit=unit)
if _laddr:
@@ -628,6 +709,8 @@ class HAProxyContext(OSContextGenerator):
ctxt['local_host'] = '127.0.0.1'
ctxt['haproxy_host'] = '0.0.0.0'
+ ctxt['ipv6_enabled'] = not is_ipv6_disabled()
+
ctxt['stat_port'] = '8888'
db = kv()
@@ -802,8 +885,9 @@ class ApacheSSLContext(OSContextGenerator):
else:
# Expect cert/key provided in config (currently assumed that ca
# uses ip for cn)
- cn = resolve_address(endpoint_type=INTERNAL)
- self.configure_cert(cn)
+ for net_type in (INTERNAL, ADMIN, PUBLIC):
+ cn = resolve_address(endpoint_type=net_type)
+ self.configure_cert(cn)
addresses = self.get_network_addresses()
for address, endpoint in addresses:
@@ -843,15 +927,6 @@ class NeutronContext(OSContextGenerator):
for pkgs in self.packages:
ensure_packages(pkgs)
- def _save_flag_file(self):
- if self.network_manager == 'quantum':
- _file = '/etc/nova/quantum_plugin.conf'
- else:
- _file = '/etc/nova/neutron_plugin.conf'
-
- with open(_file, 'wb') as out:
- out.write(self.plugin + '\n')
-
def ovs_ctxt(self):
driver = neutron_plugin_attribute(self.plugin, 'driver',
self.network_manager)
@@ -996,7 +1071,6 @@ class NeutronContext(OSContextGenerator):
flags = config_flags_parser(alchemy_flags)
ctxt['neutron_alchemy_flags'] = flags
- self._save_flag_file()
return ctxt
@@ -1176,7 +1250,7 @@ class SubordinateConfigContext(OSContextGenerator):
if sub_config and sub_config != '':
try:
sub_config = json.loads(sub_config)
- except:
+ except Exception:
log('Could not parse JSON from '
'subordinate_configuration setting from %s'
% rid, level=ERROR)
@@ -1321,8 +1395,6 @@ class WSGIWorkerConfigContext(WorkerConfigContext):
"public_processes": int(math.ceil(self.public_process_weight *
total_processes)),
"threads": 1,
- "usr_bin": git_determine_usr_bin(),
- "python_path": git_determine_python_path(),
}
return ctxt
@@ -1570,6 +1642,82 @@ class InternalEndpointContext(OSContextGenerator):
return {'use_internal_endpoints': config('use-internal-endpoints')}
+class VolumeAPIContext(InternalEndpointContext):
+ """Volume API context.
+
+ This context provides information regarding the volume endpoint to use
+ when communicating between services. It determines which version of the
+ API is appropriate for use.
+
+ This value will be determined in the resulting context dictionary
+ returned from calling the VolumeAPIContext object. Information provided
+ by this context is as follows:
+
+ volume_api_version: the volume api version to use, currently
+ 'v2' or 'v3'
+ volume_catalog_info: the information to use for a cinder client
+ configuration that consumes API endpoints from the keystone
+ catalog. This is defined as the type:name:endpoint_type string.
+ """
+ # FIXME(wolsen) This implementation is based on the provider being able
+ # to specify the package version to check but does not guarantee that the
+ # volume service api version selected is available. In practice, it is
+ # quite likely the volume service *is* providing the v3 volume service.
+ # This should be resolved when the service-discovery spec is implemented.
+ def __init__(self, pkg):
+ """
+ Creates a new VolumeAPIContext for use in determining which version
+ of the Volume API should be used for communication. A package codename
+ should be supplied for determining the currently installed OpenStack
+ version.
+
+ :param pkg: the package codename to use in order to determine the
+ component version (e.g. nova-common). See
+ charmhelpers.contrib.openstack.utils.PACKAGE_CODENAMES for more.
+ """
+ super(VolumeAPIContext, self).__init__()
+ self._ctxt = None
+ if not pkg:
+ raise ValueError('package name must be provided in order to '
+ 'determine current OpenStack version.')
+ self.pkg = pkg
+
+ @property
+ def ctxt(self):
+ if self._ctxt is not None:
+ return self._ctxt
+ self._ctxt = self._determine_ctxt()
+ return self._ctxt
+
+ def _determine_ctxt(self):
+ """Determines the Volume API endpoint information.
+
+ Determines the appropriate version of the API that should be used
+ as well as the catalog_info string that would be supplied. Returns
+ a dict containing the volume_api_version and the volume_catalog_info.
+ """
+ rel = os_release(self.pkg, base='icehouse')
+ version = '2'
+ if CompareOpenStackReleases(rel) >= 'pike':
+ version = '3'
+
+ service_type = 'volumev{version}'.format(version=version)
+ service_name = 'cinderv{version}'.format(version=version)
+ endpoint_type = 'publicURL'
+ if config('use-internal-endpoints'):
+ endpoint_type = 'internalURL'
+ catalog_info = '{type}:{name}:{endpoint}'.format(
+ type=service_type, name=service_name, endpoint=endpoint_type)
+
+ return {
+ 'volume_api_version': version,
+ 'volume_catalog_info': catalog_info,
+ }
+
+ def __call__(self):
+ return self.ctxt
+
+
class AppArmorContext(OSContextGenerator):
"""Base class for apparmor contexts."""
@@ -1705,3 +1853,30 @@ class MemcacheContext(OSContextGenerator):
ctxt['memcache_server_formatted'],
ctxt['memcache_port'])
return ctxt
+
+
+class EnsureDirContext(OSContextGenerator):
+ '''
+ Serves as a generic context to create a directory as a side-effect.
+
+ Useful for software that supports drop-in files (.d) in conjunction
+ with config option-based templates. Examples include:
+ * OpenStack oslo.policy drop-in files;
+ * systemd drop-in config files;
+ * other software that supports overriding defaults with .d files
+
+ Another use-case is when a subordinate generates a configuration for
+ primary to render in a separate directory.
+
+ Some software requires a user to create a target directory to be
+ scanned for drop-in files with a specific format. This is why this
+ context is needed to do that before rendering a template.
+ '''
+
+ def __init__(self, dirname):
+ '''Used merely to ensure that a given directory exists.'''
+ self.dirname = dirname
+
+ def __call__(self):
+ mkdir(self.dirname)
+ return {}
diff --git a/charmhelpers/contrib/openstack/files/check_haproxy.sh b/charmhelpers/contrib/openstack/files/check_haproxy.sh
index 0df0717..1df55db 100755
--- a/charmhelpers/contrib/openstack/files/check_haproxy.sh
+++ b/charmhelpers/contrib/openstack/files/check_haproxy.sh
@@ -9,7 +9,7 @@
CRITICAL=0
NOTACTIVE=''
LOGFILE=/var/log/nagios/check_haproxy.log
-AUTH=$(grep -r "stats auth" /etc/haproxy | awk 'NR=1{print $4}')
+AUTH=$(grep -r "stats auth" /etc/haproxy/haproxy.cfg | awk 'NR=1{print $3}')
typeset -i N_INSTANCES=0
for appserver in $(awk '/^\s+server/{print $2}' /etc/haproxy/haproxy.cfg)
diff --git a/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh b/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh
index 3ebb532..91ce024 100755
--- a/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh
+++ b/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh
@@ -10,7 +10,7 @@
CURRQthrsh=0
MAXQthrsh=100
-AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}')
+AUTH=$(grep -r "stats auth" /etc/haproxy/haproxy.cfg | awk 'NR=1{print $3}')
HAPROXYSTATS=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v)
diff --git a/charmhelpers/contrib/openstack/ha/utils.py b/charmhelpers/contrib/openstack/ha/utils.py
index 254a90e..6060ae5 100644
--- a/charmhelpers/contrib/openstack/ha/utils.py
+++ b/charmhelpers/contrib/openstack/ha/utils.py
@@ -23,6 +23,8 @@
Helpers for high availability.
"""
+import json
+
import re
from charmhelpers.core.hookenv import (
@@ -32,6 +34,7 @@ from charmhelpers.core.hookenv import (
config,
status_set,
DEBUG,
+ WARNING,
)
from charmhelpers.core.host import (
@@ -40,6 +43,23 @@ from charmhelpers.core.host import (
from charmhelpers.contrib.openstack.ip import (
resolve_address,
+ is_ipv6,
+)
+
+from charmhelpers.contrib.network.ip import (
+ get_iface_for_address,
+ get_netmask_for_address,
+)
+
+from charmhelpers.contrib.hahelpers.cluster import (
+ get_hacluster_config
+)
+
+JSON_ENCODE_OPTIONS = dict(
+ sort_keys=True,
+ allow_nan=False,
+ indent=None,
+ separators=(',', ':'),
)
@@ -53,8 +73,8 @@ class DNSHAException(Exception):
def update_dns_ha_resource_params(resources, resource_params,
relation_id=None,
crm_ocf='ocf:maas:dns'):
- """ Check for os-*-hostname settings and update resource dictionaries for
- the HA relation.
+ """ Configure DNS-HA resources based on provided configuration and
+ update resource dictionaries for the HA relation.
@param resources: Pointer to dictionary of resources.
Usually instantiated in ha_joined().
@@ -64,61 +84,20 @@ def update_dns_ha_resource_params(resources, resource_params,
@param crm_ocf: Corosync Open Cluster Framework resource agent to use for
DNS HA
"""
-
- # Validate the charm environment for DNS HA
- assert_charm_supports_dns_ha()
-
- settings = ['os-admin-hostname', 'os-internal-hostname',
- 'os-public-hostname', 'os-access-hostname']
-
- # Check which DNS settings are set and update dictionaries
- hostname_group = []
- for setting in settings:
- hostname = config(setting)
- if hostname is None:
- log('DNS HA: Hostname setting {} is None. Ignoring.'
- ''.format(setting),
- DEBUG)
- continue
- m = re.search('os-(.+?)-hostname', setting)
- if m:
- networkspace = m.group(1)
- else:
- msg = ('Unexpected DNS hostname setting: {}. '
- 'Cannot determine network space name'
- ''.format(setting))
- status_set('blocked', msg)
- raise DNSHAException(msg)
-
- hostname_key = 'res_{}_{}_hostname'.format(charm_name(), networkspace)
- if hostname_key in hostname_group:
- log('DNS HA: Resource {}: {} already exists in '
- 'hostname group - skipping'.format(hostname_key, hostname),
- DEBUG)
- continue
-
- hostname_group.append(hostname_key)
- resources[hostname_key] = crm_ocf
- resource_params[hostname_key] = (
- 'params fqdn="{}" ip_address="{}" '
- ''.format(hostname, resolve_address(endpoint_type=networkspace,
- override=False)))
-
- if len(hostname_group) >= 1:
- log('DNS HA: Hostname group is set with {} as members. '
- 'Informing the ha relation'.format(' '.join(hostname_group)),
- DEBUG)
- relation_set(relation_id=relation_id, groups={
- 'grp_{}_hostnames'.format(charm_name()): ' '.join(hostname_group)})
- else:
- msg = 'DNS HA: Hostname group has no members.'
- status_set('blocked', msg)
- raise DNSHAException(msg)
+ _relation_data = {'resources': {}, 'resource_params': {}}
+ update_hacluster_dns_ha(charm_name(),
+ _relation_data,
+ crm_ocf)
+ resources.update(_relation_data['resources'])
+ resource_params.update(_relation_data['resource_params'])
+ relation_set(relation_id=relation_id, groups=_relation_data['groups'])
def assert_charm_supports_dns_ha():
"""Validate prerequisites for DNS HA
The MAAS client is only available on Xenial or greater
+
+ :raises DNSHAException: if release is < 16.04
"""
if lsb_release().get('DISTRIB_RELEASE') < '16.04':
msg = ('DNS HA is only supported on 16.04 and greater '
@@ -137,3 +116,150 @@ def expect_ha():
@returns boolean
"""
return config('vip') or config('dns-ha')
+
+
+def generate_ha_relation_data(service):
+ """ Generate relation data for ha relation
+
+ Based on configuration options and unit interfaces, generate a json
+ encoded dict of relation data items for the hacluster relation,
+ providing configuration for DNS HA or VIP's + haproxy clone sets.
+
+ @returns dict: json encoded data for use with relation_set
+ """
+ _haproxy_res = 'res_{}_haproxy'.format(service)
+ _relation_data = {
+ 'resources': {
+ _haproxy_res: 'lsb:haproxy',
+ },
+ 'resource_params': {
+ _haproxy_res: 'op monitor interval="5s"'
+ },
+ 'init_services': {
+ _haproxy_res: 'haproxy'
+ },
+ 'clones': {
+ 'cl_{}_haproxy'.format(service): _haproxy_res
+ },
+ }
+
+ if config('dns-ha'):
+ update_hacluster_dns_ha(service, _relation_data)
+ else:
+ update_hacluster_vip(service, _relation_data)
+
+ return {
+ 'json_{}'.format(k): json.dumps(v, **JSON_ENCODE_OPTIONS)
+ for k, v in _relation_data.items() if v
+ }
+
+
+def update_hacluster_dns_ha(service, relation_data,
+ crm_ocf='ocf:maas:dns'):
+ """ Configure DNS-HA resources based on provided configuration
+
+ @param service: Name of the service being configured
+ @param relation_data: Pointer to dictionary of relation data.
+ @param crm_ocf: Corosync Open Cluster Framework resource agent to use for
+ DNS HA
+ """
+ # Validate the charm environment for DNS HA
+ assert_charm_supports_dns_ha()
+
+ settings = ['os-admin-hostname', 'os-internal-hostname',
+ 'os-public-hostname', 'os-access-hostname']
+
+ # Check which DNS settings are set and update dictionaries
+ hostname_group = []
+ for setting in settings:
+ hostname = config(setting)
+ if hostname is None:
+ log('DNS HA: Hostname setting {} is None. Ignoring.'
+ ''.format(setting),
+ DEBUG)
+ continue
+ m = re.search('os-(.+?)-hostname', setting)
+ if m:
+ endpoint_type = m.group(1)
+ # resolve_address's ADDRESS_MAP uses 'int' not 'internal'
+ if endpoint_type == 'internal':
+ endpoint_type = 'int'
+ else:
+ msg = ('Unexpected DNS hostname setting: {}. '
+ 'Cannot determine endpoint_type name'
+ ''.format(setting))
+ status_set('blocked', msg)
+ raise DNSHAException(msg)
+
+ hostname_key = 'res_{}_{}_hostname'.format(service, endpoint_type)
+ if hostname_key in hostname_group:
+ log('DNS HA: Resource {}: {} already exists in '
+ 'hostname group - skipping'.format(hostname_key, hostname),
+ DEBUG)
+ continue
+
+ hostname_group.append(hostname_key)
+ relation_data['resources'][hostname_key] = crm_ocf
+ relation_data['resource_params'][hostname_key] = (
+ 'params fqdn="{}" ip_address="{}"'
+ .format(hostname, resolve_address(endpoint_type=endpoint_type,
+ override=False)))
+
+ if len(hostname_group) >= 1:
+ log('DNS HA: Hostname group is set with {} as members. '
+ 'Informing the ha relation'.format(' '.join(hostname_group)),
+ DEBUG)
+ relation_data['groups'] = {
+ 'grp_{}_hostnames'.format(service): ' '.join(hostname_group)
+ }
+ else:
+ msg = 'DNS HA: Hostname group has no members.'
+ status_set('blocked', msg)
+ raise DNSHAException(msg)
+
+
+def update_hacluster_vip(service, relation_data):
+ """ Configure VIP resources based on provided configuration
+
+ @param service: Name of the service being configured
+ @param relation_data: Pointer to dictionary of relation data.
+ """
+ cluster_config = get_hacluster_config()
+ vip_group = []
+ for vip in cluster_config['vip'].split():
+ if is_ipv6(vip):
+ res_neutron_vip = 'ocf:heartbeat:IPv6addr'
+ vip_params = 'ipv6addr'
+ else:
+ res_neutron_vip = 'ocf:heartbeat:IPaddr2'
+ vip_params = 'ip'
+
+ iface = (get_iface_for_address(vip) or
+ config('vip_iface'))
+ netmask = (get_netmask_for_address(vip) or
+ config('vip_cidr'))
+
+ if iface is not None:
+ vip_key = 'res_{}_{}_vip'.format(service, iface)
+ if vip_key in vip_group:
+ if vip not in relation_data['resource_params'][vip_key]:
+ vip_key = '{}_{}'.format(vip_key, vip_params)
+ else:
+ log("Resource '%s' (vip='%s') already exists in "
+ "vip group - skipping" % (vip_key, vip), WARNING)
+ continue
+
+ relation_data['resources'][vip_key] = res_neutron_vip
+ relation_data['resource_params'][vip_key] = (
+ 'params {ip}="{vip}" cidr_netmask="{netmask}" '
+ 'nic="{iface}"'.format(ip=vip_params,
+ vip=vip,
+ iface=iface,
+ netmask=netmask)
+ )
+ vip_group.append(vip_key)
+
+ if len(vip_group) >= 1:
+ relation_data['groups'] = {
+ 'grp_{}_vips'.format(service): ' '.join(vip_group)
+ }
diff --git a/charmhelpers/contrib/openstack/neutron.py b/charmhelpers/contrib/openstack/neutron.py
index 37fa0eb..0f847f5 100644
--- a/charmhelpers/contrib/openstack/neutron.py
+++ b/charmhelpers/contrib/openstack/neutron.py
@@ -59,18 +59,13 @@ def determine_dkms_package():
def quantum_plugins():
- from charmhelpers.contrib.openstack import context
return {
'ovs': {
'config': '/etc/quantum/plugins/openvswitch/'
'ovs_quantum_plugin.ini',
'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'
'OVSQuantumPluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=QUANTUM_CONF_DIR)],
+ 'contexts': [],
'services': ['quantum-plugin-openvswitch-agent'],
'packages': [determine_dkms_package(),
['quantum-plugin-openvswitch-agent']],
@@ -82,11 +77,7 @@ def quantum_plugins():
'config': '/etc/quantum/plugins/nicira/nvp.ini',
'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
'QuantumPlugin.NvpPluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=QUANTUM_CONF_DIR)],
+ 'contexts': [],
'services': [],
'packages': [],
'server_packages': ['quantum-server',
@@ -100,7 +91,6 @@ NEUTRON_CONF_DIR = '/etc/neutron'
def neutron_plugins():
- from charmhelpers.contrib.openstack import context
release = os_release('nova-common')
plugins = {
'ovs': {
@@ -108,11 +98,7 @@ def neutron_plugins():
'ovs_neutron_plugin.ini',
'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.'
'OVSNeutronPluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
+ 'contexts': [],
'services': ['neutron-plugin-openvswitch-agent'],
'packages': [determine_dkms_package(),
['neutron-plugin-openvswitch-agent']],
@@ -124,11 +110,7 @@ def neutron_plugins():
'config': '/etc/neutron/plugins/nicira/nvp.ini',
'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
'NeutronPlugin.NvpPluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
+ 'contexts': [],
'services': [],
'packages': [],
'server_packages': ['neutron-server',
@@ -138,11 +120,7 @@ def neutron_plugins():
'nsx': {
'config': '/etc/neutron/plugins/vmware/nsx.ini',
'driver': 'vmware',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
+ 'contexts': [],
'services': [],
'packages': [],
'server_packages': ['neutron-server',
@@ -152,11 +130,7 @@ def neutron_plugins():
'n1kv': {
'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
+ 'contexts': [],
'services': [],
'packages': [determine_dkms_package(),
['neutron-plugin-cisco']],
@@ -167,11 +141,7 @@ def neutron_plugins():
'Calico': {
'config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
+ 'contexts': [],
'services': ['calico-felix',
'bird',
'neutron-dhcp-agent',
@@ -189,11 +159,7 @@ def neutron_plugins():
'vsp': {
'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini',
'driver': 'neutron.plugins.nuage.plugin.NuagePlugin',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
+ 'contexts': [],
'services': [],
'packages': [],
'server_packages': ['neutron-server', 'neutron-plugin-nuage'],
@@ -203,10 +169,7 @@ def neutron_plugins():
'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini',
'driver': ('neutron.plugins.plumgrid.plumgrid_plugin'
'.plumgrid_plugin.NeutronPluginPLUMgridV2'),
- 'contexts': [
- context.SharedDBContext(user=config('database-user'),
- database=config('database'),
- ssl_dir=NEUTRON_CONF_DIR)],
+ 'contexts': [],
'services': [],
'packages': ['plumgrid-lxc',
'iovisor-dkms'],
@@ -217,11 +180,7 @@ def neutron_plugins():
'midonet': {
'config': '/etc/neutron/plugins/midonet/midonet.ini',
'driver': 'midonet.neutron.plugin.MidonetPluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
+ 'contexts': [],
'services': [],
'packages': [determine_dkms_package()],
'server_packages': ['neutron-server',
diff --git a/charmhelpers/contrib/openstack/templates/ceph.conf b/charmhelpers/contrib/openstack/templates/ceph.conf
index ed5c4f1..a11ce8a 100644
--- a/charmhelpers/contrib/openstack/templates/ceph.conf
+++ b/charmhelpers/contrib/openstack/templates/ceph.conf
@@ -18,7 +18,7 @@ rbd default features = {{ rbd_features }}
[client]
{% if rbd_client_cache_settings -%}
-{% for key, value in rbd_client_cache_settings.iteritems() -%}
+{% for key, value in rbd_client_cache_settings.items() -%}
{{ key }} = {{ value }}
{% endfor -%}
-{%- endif %}
\ No newline at end of file
+{%- endif %}
diff --git a/charmhelpers/contrib/openstack/templates/haproxy.cfg b/charmhelpers/contrib/openstack/templates/haproxy.cfg
index 2e66045..d36af2a 100644
--- a/charmhelpers/contrib/openstack/templates/haproxy.cfg
+++ b/charmhelpers/contrib/openstack/templates/haproxy.cfg
@@ -17,22 +17,22 @@ defaults
{%- if haproxy_queue_timeout %}
timeout queue {{ haproxy_queue_timeout }}
{%- else %}
- timeout queue 5000
+ timeout queue 9000
{%- endif %}
{%- if haproxy_connect_timeout %}
timeout connect {{ haproxy_connect_timeout }}
{%- else %}
- timeout connect 5000
+ timeout connect 9000
{%- endif %}
{%- if haproxy_client_timeout %}
timeout client {{ haproxy_client_timeout }}
{%- else %}
- timeout client 30000
+ timeout client 90000
{%- endif %}
{%- if haproxy_server_timeout %}
timeout server {{ haproxy_server_timeout }}
{%- else %}
- timeout server 30000
+ timeout server 90000
{%- endif %}
listen stats
@@ -48,7 +48,9 @@ listen stats
{% for service, ports in service_ports.items() -%}
frontend tcp-in_{{ service }}
bind *:{{ ports[0] }}
+ {% if ipv6_enabled -%}
bind :::{{ ports[0] }}
+ {% endif -%}
{% for frontend in frontends -%}
acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }}
use_backend {{ service }}_{{ frontend }} if net_{{ frontend }}
diff --git a/charmhelpers/contrib/openstack/templates/section-oslo-cache b/charmhelpers/contrib/openstack/templates/section-oslo-cache
new file mode 100644
index 0000000..e056a32
--- /dev/null
+++ b/charmhelpers/contrib/openstack/templates/section-oslo-cache
@@ -0,0 +1,6 @@
+[cache]
+{% if memcache_url %}
+enabled = true
+backend = oslo_cache.memcache_pool
+memcache_servers = {{ memcache_url }}
+{% endif %}
diff --git a/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf b/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf
index 315b2a3..e2e73b2 100644
--- a/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf
+++ b/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf
@@ -15,9 +15,6 @@ Listen {{ public_port }}
{% if port -%}
WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \
-{% if python_path -%}
- python-path={{ python_path }} \
-{% endif -%}
display-name=%{GROUP}
WSGIProcessGroup {{ service_name }}
WSGIScriptAlias / {{ script }}
@@ -29,7 +26,7 @@ Listen {{ public_port }}
ErrorLog /var/log/apache2/{{ service_name }}_error.log
CustomLog /var/log/apache2/{{ service_name }}_access.log combined
-
+
= 2.4>
Require all granted
@@ -44,9 +41,6 @@ Listen {{ public_port }}
{% if admin_port -%}
WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \
-{% if python_path -%}
- python-path={{ python_path }} \
-{% endif -%}
display-name=%{GROUP}
WSGIProcessGroup {{ service_name }}-admin
WSGIScriptAlias / {{ admin_script }}
@@ -58,7 +52,7 @@ Listen {{ public_port }}
ErrorLog /var/log/apache2/{{ service_name }}_error.log
CustomLog /var/log/apache2/{{ service_name }}_access.log combined
-
+
= 2.4>
Require all granted
@@ -73,9 +67,6 @@ Listen {{ public_port }}
{% if public_port -%}
WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \
-{% if python_path -%}
- python-path={{ python_path }} \
-{% endif -%}
display-name=%{GROUP}
WSGIProcessGroup {{ service_name }}-public
WSGIScriptAlias / {{ public_script }}
@@ -87,7 +78,7 @@ Listen {{ public_port }}
ErrorLog /var/log/apache2/{{ service_name }}_error.log
CustomLog /var/log/apache2/{{ service_name }}_access.log combined
-
+
= 2.4>
Require all granted
diff --git a/charmhelpers/contrib/openstack/templating.py b/charmhelpers/contrib/openstack/templating.py
index d8c1fc7..a623315 100644
--- a/charmhelpers/contrib/openstack/templating.py
+++ b/charmhelpers/contrib/openstack/templating.py
@@ -93,7 +93,8 @@ class OSConfigTemplate(object):
Associates a config file template with a list of context generators.
Responsible for constructing a template context based on those generators.
"""
- def __init__(self, config_file, contexts):
+
+ def __init__(self, config_file, contexts, config_template=None):
self.config_file = config_file
if hasattr(contexts, '__call__'):
@@ -103,6 +104,8 @@ class OSConfigTemplate(object):
self._complete_contexts = []
+ self.config_template = config_template
+
def context(self):
ctxt = {}
for context in self.contexts:
@@ -124,6 +127,11 @@ class OSConfigTemplate(object):
self.context()
return self._complete_contexts
+ @property
+ def is_string_template(self):
+ """:returns: Boolean if this instance is a template initialised with a string"""
+ return self.config_template is not None
+
class OSConfigRenderer(object):
"""
@@ -148,6 +156,10 @@ class OSConfigRenderer(object):
contexts=[context.IdentityServiceContext()])
configs.register(config_file='/etc/haproxy/haproxy.conf',
contexts=[context.HAProxyContext()])
+ configs.register(config_file='/etc/keystone/policy.d/extra.cfg',
+ contexts=[context.ExtraPolicyContext()
+ context.KeystoneContext()],
+ config_template=hookenv.config('extra-policy'))
# write out a single config
configs.write('/etc/nova/nova.conf')
# write out all registered configs
@@ -218,14 +230,23 @@ class OSConfigRenderer(object):
else:
apt_install('python3-jinja2')
- def register(self, config_file, contexts):
+ def register(self, config_file, contexts, config_template=None):
"""
Register a config file with a list of context generators to be called
during rendering.
+ config_template can be used to load a template from a string instead of
+ using template loaders and template files.
+ :param config_file (str): a path where a config file will be rendered
+ :param contexts (list): a list of context dictionaries with kv pairs
+ :param config_template (str): an optional template string to use
"""
- self.templates[config_file] = OSConfigTemplate(config_file=config_file,
- contexts=contexts)
- log('Registered config file: %s' % config_file, level=INFO)
+ self.templates[config_file] = OSConfigTemplate(
+ config_file=config_file,
+ contexts=contexts,
+ config_template=config_template
+ )
+ log('Registered config file: {}'.format(config_file),
+ level=INFO)
def _get_tmpl_env(self):
if not self._tmpl_env:
@@ -235,32 +256,58 @@ class OSConfigRenderer(object):
def _get_template(self, template):
self._get_tmpl_env()
template = self._tmpl_env.get_template(template)
- log('Loaded template from %s' % template.filename, level=INFO)
+ log('Loaded template from {}'.format(template.filename),
+ level=INFO)
+ return template
+
+ def _get_template_from_string(self, ostmpl):
+ '''
+ Get a jinja2 template object from a string.
+ :param ostmpl: OSConfigTemplate to use as a data source.
+ '''
+ self._get_tmpl_env()
+ template = self._tmpl_env.from_string(ostmpl.config_template)
+ log('Loaded a template from a string for {}'.format(
+ ostmpl.config_file),
+ level=INFO)
return template
def render(self, config_file):
if config_file not in self.templates:
- log('Config not registered: %s' % config_file, level=ERROR)
+ log('Config not registered: {}'.format(config_file), level=ERROR)
raise OSConfigException
- ctxt = self.templates[config_file].context()
- _tmpl = os.path.basename(config_file)
- try:
- template = self._get_template(_tmpl)
- except exceptions.TemplateNotFound:
- # if no template is found with basename, try looking for it
- # using a munged full path, eg:
- # /etc/apache2/apache2.conf -> etc_apache2_apache2.conf
- _tmpl = '_'.join(config_file.split('/')[1:])
+ ostmpl = self.templates[config_file]
+ ctxt = ostmpl.context()
+
+ if ostmpl.is_string_template:
+ template = self._get_template_from_string(ostmpl)
+ log('Rendering from a string template: '
+ '{}'.format(config_file),
+ level=INFO)
+ else:
+ _tmpl = os.path.basename(config_file)
try:
template = self._get_template(_tmpl)
- except exceptions.TemplateNotFound as e:
- log('Could not load template from %s by %s or %s.' %
- (self.templates_dir, os.path.basename(config_file), _tmpl),
- level=ERROR)
- raise e
+ except exceptions.TemplateNotFound:
+ # if no template is found with basename, try looking
+ # for it using a munged full path, eg:
+ # /etc/apache2/apache2.conf -> etc_apache2_apache2.conf
+ _tmpl = '_'.join(config_file.split('/')[1:])
+ try:
+ template = self._get_template(_tmpl)
+ except exceptions.TemplateNotFound as e:
+ log('Could not load template from {} by {} or {}.'
+ ''.format(
+ self.templates_dir,
+ os.path.basename(config_file),
+ _tmpl
+ ),
+ level=ERROR)
+ raise e
- log('Rendering from template: %s' % _tmpl, level=INFO)
+ log('Rendering from template: {}'.format(config_file),
+ level=INFO)
return template.render(ctxt)
def write(self, config_file):
@@ -272,6 +319,8 @@ class OSConfigRenderer(object):
raise OSConfigException
_out = self.render(config_file)
+ if six.PY3:
+ _out = _out.encode('UTF-8')
with open(config_file, 'wb') as out:
out.write(_out)
diff --git a/charmhelpers/contrib/openstack/utils.py b/charmhelpers/contrib/openstack/utils.py
index 837a167..e719426 100644
--- a/charmhelpers/contrib/openstack/utils.py
+++ b/charmhelpers/contrib/openstack/utils.py
@@ -23,7 +23,6 @@ import sys
import re
import itertools
import functools
-import shutil
import six
import traceback
@@ -47,7 +46,6 @@ from charmhelpers.core.hookenv import (
related_units,
relation_ids,
relation_set,
- service_name,
status_set,
hook_name,
application_version_set,
@@ -68,11 +66,6 @@ from charmhelpers.contrib.network.ip import (
port_has_listener,
)
-from charmhelpers.contrib.python.packages import (
- pip_create_virtualenv,
- pip_install,
-)
-
from charmhelpers.core.host import (
lsb_release,
mounts,
@@ -84,7 +77,6 @@ from charmhelpers.core.host import (
)
from charmhelpers.fetch import (
apt_cache,
- install_remote,
import_key as fetch_import_key,
add_source as fetch_add_source,
SourceConfigError,
@@ -95,7 +87,7 @@ from charmhelpers.fetch import (
from charmhelpers.fetch.snap import (
snap_install,
snap_refresh,
- SNAP_CHANNELS,
+ valid_snap_channel,
)
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
@@ -140,6 +132,7 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([
('yakkety', 'newton'),
('zesty', 'ocata'),
('artful', 'pike'),
+ ('bionic', 'queens'),
])
@@ -157,6 +150,7 @@ OPENSTACK_CODENAMES = OrderedDict([
('2016.2', 'newton'),
('2017.1', 'ocata'),
('2017.2', 'pike'),
+ ('2018.1', 'queens'),
])
# The ugly duckling - must list releases oldest to newest
@@ -187,6 +181,8 @@ SWIFT_CODENAMES = OrderedDict([
['2.11.0', '2.12.0', '2.13.0']),
('pike',
['2.13.0', '2.15.0']),
+ ('queens',
+ ['2.16.0', '2.17.0']),
])
# >= Liberty version->codename mapping
@@ -274,27 +270,6 @@ PACKAGE_CODENAMES = {
]),
}
-GIT_DEFAULT_REPOS = {
- 'requirements': 'git://github.com/openstack/requirements',
- 'cinder': 'git://github.com/openstack/cinder',
- 'glance': 'git://github.com/openstack/glance',
- 'horizon': 'git://github.com/openstack/horizon',
- 'keystone': 'git://github.com/openstack/keystone',
- 'networking-hyperv': 'git://github.com/openstack/networking-hyperv',
- 'neutron': 'git://github.com/openstack/neutron',
- 'neutron-fwaas': 'git://github.com/openstack/neutron-fwaas',
- 'neutron-lbaas': 'git://github.com/openstack/neutron-lbaas',
- 'neutron-vpnaas': 'git://github.com/openstack/neutron-vpnaas',
- 'nova': 'git://github.com/openstack/nova',
-}
-
-GIT_DEFAULT_BRANCHES = {
- 'liberty': 'stable/liberty',
- 'mitaka': 'stable/mitaka',
- 'newton': 'stable/newton',
- 'master': 'master',
-}
-
DEFAULT_LOOPBACK_SIZE = '5G'
@@ -388,6 +363,8 @@ def get_swift_codename(version):
releases = UBUNTU_OPENSTACK_RELEASE
release = [k for k, v in six.iteritems(releases) if codename in v]
ret = subprocess.check_output(['apt-cache', 'policy', 'swift'])
+ if six.PY3:
+ ret = ret.decode('UTF-8')
if codename in ret or release[0] in ret:
return codename
elif len(codenames) == 1:
@@ -412,6 +389,8 @@ def get_os_codename_package(package, fatal=True):
cmd = ['snap', 'list', package]
try:
out = subprocess.check_output(cmd)
+ if six.PY3:
+ out = out.decode('UTF-8')
except subprocess.CalledProcessError as e:
return None
lines = out.split('\n')
@@ -426,7 +405,7 @@ def get_os_codename_package(package, fatal=True):
try:
pkg = cache[package]
- except:
+ except Exception:
if not fatal:
return None
# the package is unknown to the current apt cache.
@@ -522,7 +501,6 @@ def os_release(package, base='essex', reset_cache=False):
if _os_rel:
return _os_rel
_os_rel = (
- git_os_codename_install_source(config('openstack-origin-git')) or
get_os_codename_package(package, fatal=False) or
get_os_codename_install_source(config('openstack-origin')) or
base)
@@ -579,6 +557,9 @@ def configure_installation_source(source_plus_key):
Note that the behaviour on error is to log the error to the juju log and
then call sys.exit(1).
"""
+ if source_plus_key.startswith('snap'):
+ # Do nothing for snap installs
+ return
# extract the key if there is one, denoted by a '|' in the rel
source, key = get_source_and_pgp_key(source_plus_key)
@@ -615,7 +596,7 @@ def save_script_rc(script_path="scripts/scriptrc", **env_vars):
juju_rc_path = "%s/%s" % (charm_dir(), script_path)
if not os.path.exists(os.path.dirname(juju_rc_path)):
os.mkdir(os.path.dirname(juju_rc_path))
- with open(juju_rc_path, 'wb') as rc_script:
+ with open(juju_rc_path, 'wt') as rc_script:
rc_script.write(
"#!/bin/bash\n")
[rc_script.write('export %s=%s\n' % (u, p))
@@ -645,11 +626,6 @@ def openstack_upgrade_available(package):
else:
avail_vers = get_os_version_install_source(src)
apt.init()
- if "swift" in package:
- major_cur_vers = cur_vers.split('.', 1)[0]
- major_avail_vers = avail_vers.split('.', 1)[0]
- major_diff = apt.version_compare(major_avail_vers, major_cur_vers)
- return avail_vers > cur_vers and (major_diff == 1 or major_diff == 0)
return apt.version_compare(avail_vers, cur_vers) == 1
@@ -760,417 +736,6 @@ def os_requires_version(ostack_release, pkg):
return wrap
-def git_install_requested():
- """
- Returns true if openstack-origin-git is specified.
- """
- return config('openstack-origin-git') is not None
-
-
-def git_os_codename_install_source(projects_yaml):
- """
- Returns OpenStack codename of release being installed from source.
- """
- if git_install_requested():
- projects = _git_yaml_load(projects_yaml)
-
- if projects in GIT_DEFAULT_BRANCHES.keys():
- if projects == 'master':
- return 'ocata'
- return projects
-
- if 'release' in projects:
- if projects['release'] == 'master':
- return 'ocata'
- return projects['release']
-
- return None
-
-
-def git_default_repos(projects_yaml):
- """
- Returns default repos if a default openstack-origin-git value is specified.
- """
- service = service_name()
- core_project = service
-
- for default, branch in GIT_DEFAULT_BRANCHES.iteritems():
- if projects_yaml == default:
-
- # add the requirements repo first
- repo = {
- 'name': 'requirements',
- 'repository': GIT_DEFAULT_REPOS['requirements'],
- 'branch': branch,
- }
- repos = [repo]
-
- # neutron-* and nova-* charms require some additional repos
- if service in ['neutron-api', 'neutron-gateway',
- 'neutron-openvswitch']:
- core_project = 'neutron'
- if service == 'neutron-api':
- repo = {
- 'name': 'networking-hyperv',
- 'repository': GIT_DEFAULT_REPOS['networking-hyperv'],
- 'branch': branch,
- }
- repos.append(repo)
- for project in ['neutron-fwaas', 'neutron-lbaas',
- 'neutron-vpnaas', 'nova']:
- repo = {
- 'name': project,
- 'repository': GIT_DEFAULT_REPOS[project],
- 'branch': branch,
- }
- repos.append(repo)
-
- elif service in ['nova-cloud-controller', 'nova-compute']:
- core_project = 'nova'
- repo = {
- 'name': 'neutron',
- 'repository': GIT_DEFAULT_REPOS['neutron'],
- 'branch': branch,
- }
- repos.append(repo)
- elif service == 'openstack-dashboard':
- core_project = 'horizon'
-
- # finally add the current service's core project repo
- repo = {
- 'name': core_project,
- 'repository': GIT_DEFAULT_REPOS[core_project],
- 'branch': branch,
- }
- repos.append(repo)
-
- return yaml.dump(dict(repositories=repos, release=default))
-
- return projects_yaml
-
-
-def _git_yaml_load(projects_yaml):
- """
- Load the specified yaml into a dictionary.
- """
- if not projects_yaml:
- return None
-
- return yaml.load(projects_yaml)
-
-
-requirements_dir = None
-
-
-def git_clone_and_install(projects_yaml, core_project):
- """
- Clone/install all specified OpenStack repositories.
-
- The expected format of projects_yaml is:
-
- repositories:
- - {name: keystone,
- repository: 'git://git.openstack.org/openstack/keystone.git',
- branch: 'stable/icehouse'}
- - {name: requirements,
- repository: 'git://git.openstack.org/openstack/requirements.git',
- branch: 'stable/icehouse'}
-
- directory: /mnt/openstack-git
- http_proxy: squid-proxy-url
- https_proxy: squid-proxy-url
-
- The directory, http_proxy, and https_proxy keys are optional.
-
- """
- global requirements_dir
- parent_dir = '/mnt/openstack-git'
- http_proxy = None
-
- projects = _git_yaml_load(projects_yaml)
- _git_validate_projects_yaml(projects, core_project)
-
- old_environ = dict(os.environ)
-
- if 'http_proxy' in projects.keys():
- http_proxy = projects['http_proxy']
- os.environ['http_proxy'] = projects['http_proxy']
- if 'https_proxy' in projects.keys():
- os.environ['https_proxy'] = projects['https_proxy']
-
- if 'directory' in projects.keys():
- parent_dir = projects['directory']
-
- pip_create_virtualenv(os.path.join(parent_dir, 'venv'))
-
- # Upgrade setuptools and pip from default virtualenv versions. The default
- # versions in trusty break master OpenStack branch deployments.
- for p in ['pip', 'setuptools']:
- pip_install(p, upgrade=True, proxy=http_proxy,
- venv=os.path.join(parent_dir, 'venv'))
-
- constraints = None
- for p in projects['repositories']:
- repo = p['repository']
- branch = p['branch']
- depth = '1'
- if 'depth' in p.keys():
- depth = p['depth']
- if p['name'] == 'requirements':
- repo_dir = _git_clone_and_install_single(repo, branch, depth,
- parent_dir, http_proxy,
- update_requirements=False)
- requirements_dir = repo_dir
- constraints = os.path.join(repo_dir, "upper-constraints.txt")
- # upper-constraints didn't exist until after icehouse
- if not os.path.isfile(constraints):
- constraints = None
- # use constraints unless project yaml sets use_constraints to false
- if 'use_constraints' in projects.keys():
- if not projects['use_constraints']:
- constraints = None
- else:
- repo_dir = _git_clone_and_install_single(repo, branch, depth,
- parent_dir, http_proxy,
- update_requirements=True,
- constraints=constraints)
-
- os.environ = old_environ
-
-
-def _git_validate_projects_yaml(projects, core_project):
- """
- Validate the projects yaml.
- """
- _git_ensure_key_exists('repositories', projects)
-
- for project in projects['repositories']:
- _git_ensure_key_exists('name', project.keys())
- _git_ensure_key_exists('repository', project.keys())
- _git_ensure_key_exists('branch', project.keys())
-
- if projects['repositories'][0]['name'] != 'requirements':
- error_out('{} git repo must be specified first'.format('requirements'))
-
- if projects['repositories'][-1]['name'] != core_project:
- error_out('{} git repo must be specified last'.format(core_project))
-
- _git_ensure_key_exists('release', projects)
-
-
-def _git_ensure_key_exists(key, keys):
- """
- Ensure that key exists in keys.
- """
- if key not in keys:
- error_out('openstack-origin-git key \'{}\' is missing'.format(key))
-
-
-def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy,
- update_requirements, constraints=None):
- """
- Clone and install a single git repository.
- """
- if not os.path.exists(parent_dir):
- juju_log('Directory already exists at {}. '
- 'No need to create directory.'.format(parent_dir))
- os.mkdir(parent_dir)
-
- juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
- repo_dir = install_remote(
- repo, dest=parent_dir, branch=branch, depth=depth)
-
- venv = os.path.join(parent_dir, 'venv')
-
- if update_requirements:
- if not requirements_dir:
- error_out('requirements repo must be cloned before '
- 'updating from global requirements.')
- _git_update_requirements(venv, repo_dir, requirements_dir)
-
- juju_log('Installing git repo from dir: {}'.format(repo_dir))
- if http_proxy:
- pip_install(repo_dir, proxy=http_proxy, venv=venv,
- constraints=constraints)
- else:
- pip_install(repo_dir, venv=venv, constraints=constraints)
-
- return repo_dir
-
-
-def _git_update_requirements(venv, package_dir, reqs_dir):
- """
- Update from global requirements.
-
- Update an OpenStack git directory's requirements.txt and
- test-requirements.txt from global-requirements.txt.
- """
- orig_dir = os.getcwd()
- os.chdir(reqs_dir)
- python = os.path.join(venv, 'bin/python')
- cmd = [python, 'update.py', package_dir]
- try:
- subprocess.check_call(cmd)
- except subprocess.CalledProcessError:
- package = os.path.basename(package_dir)
- error_out("Error updating {} from "
- "global-requirements.txt".format(package))
- os.chdir(orig_dir)
-
-
-def git_pip_venv_dir(projects_yaml):
- """
- Return the pip virtualenv path.
- """
- parent_dir = '/mnt/openstack-git'
-
- projects = _git_yaml_load(projects_yaml)
-
- if 'directory' in projects.keys():
- parent_dir = projects['directory']
-
- return os.path.join(parent_dir, 'venv')
-
-
-def git_src_dir(projects_yaml, project):
- """
- Return the directory where the specified project's source is located.
- """
- parent_dir = '/mnt/openstack-git'
-
- projects = _git_yaml_load(projects_yaml)
-
- if 'directory' in projects.keys():
- parent_dir = projects['directory']
-
- for p in projects['repositories']:
- if p['name'] == project:
- return os.path.join(parent_dir, os.path.basename(p['repository']))
-
- return None
-
-
-def git_yaml_value(projects_yaml, key):
- """
- Return the value in projects_yaml for the specified key.
- """
- projects = _git_yaml_load(projects_yaml)
-
- if key in projects.keys():
- return projects[key]
-
- return None
-
-
-def git_generate_systemd_init_files(templates_dir):
- """
- Generate systemd init files.
-
- Generates and installs systemd init units and script files based on the
- *.init.in files contained in the templates_dir directory.
-
- This code is based on the openstack-pkg-tools package and its init
- script generation, which is used by the OpenStack packages.
- """
- for f in os.listdir(templates_dir):
- # Create the init script and systemd unit file from the template
- if f.endswith(".init.in"):
- init_in_file = f
- init_file = f[:-8]
- service_file = "{}.service".format(init_file)
-
- init_in_source = os.path.join(templates_dir, init_in_file)
- init_source = os.path.join(templates_dir, init_file)
- service_source = os.path.join(templates_dir, service_file)
-
- init_dest = os.path.join('/etc/init.d', init_file)
- service_dest = os.path.join('/lib/systemd/system', service_file)
-
- shutil.copyfile(init_in_source, init_source)
- with open(init_source, 'a') as outfile:
- template = ('/usr/share/openstack-pkg-tools/'
- 'init-script-template')
- with open(template) as infile:
- outfile.write('\n\n{}'.format(infile.read()))
-
- cmd = ['pkgos-gen-systemd-unit', init_in_source]
- subprocess.check_call(cmd)
-
- if os.path.exists(init_dest):
- os.remove(init_dest)
- if os.path.exists(service_dest):
- os.remove(service_dest)
- shutil.copyfile(init_source, init_dest)
- shutil.copyfile(service_source, service_dest)
- os.chmod(init_dest, 0o755)
-
- for f in os.listdir(templates_dir):
- # If there's a service.in file, use it instead of the generated one
- if f.endswith(".service.in"):
- service_in_file = f
- service_file = f[:-3]
-
- service_in_source = os.path.join(templates_dir, service_in_file)
- service_source = os.path.join(templates_dir, service_file)
- service_dest = os.path.join('/lib/systemd/system', service_file)
-
- shutil.copyfile(service_in_source, service_source)
-
- if os.path.exists(service_dest):
- os.remove(service_dest)
- shutil.copyfile(service_source, service_dest)
-
- for f in os.listdir(templates_dir):
- # Generate the systemd unit if there's no existing .service.in
- if f.endswith(".init.in"):
- init_in_file = f
- init_file = f[:-8]
- service_in_file = "{}.service.in".format(init_file)
- service_file = "{}.service".format(init_file)
-
- init_in_source = os.path.join(templates_dir, init_in_file)
- service_in_source = os.path.join(templates_dir, service_in_file)
- service_source = os.path.join(templates_dir, service_file)
- service_dest = os.path.join('/lib/systemd/system', service_file)
-
- if not os.path.exists(service_in_source):
- cmd = ['pkgos-gen-systemd-unit', init_in_source]
- subprocess.check_call(cmd)
-
- if os.path.exists(service_dest):
- os.remove(service_dest)
- shutil.copyfile(service_source, service_dest)
-
-
-def git_determine_usr_bin():
- """Return the /usr/bin path for Apache2 config.
-
- The /usr/bin path will be located in the virtualenv if the charm
- is configured to deploy from source.
- """
- if git_install_requested():
- projects_yaml = config('openstack-origin-git')
- projects_yaml = git_default_repos(projects_yaml)
- return os.path.join(git_pip_venv_dir(projects_yaml), 'bin')
- else:
- return '/usr/bin'
-
-
-def git_determine_python_path():
- """Return the python-path for Apache2 config.
-
- Returns 'None' unless the charm is configured to deploy from source,
- in which case the path of the virtualenv's site-packages is returned.
- """
- if git_install_requested():
- projects_yaml = config('openstack-origin-git')
- projects_yaml = git_default_repos(projects_yaml)
- return os.path.join(git_pip_venv_dir(projects_yaml),
- 'lib/python2.7/site-packages')
- else:
- return None
-
-
def os_workload_status(configs, required_interfaces, charm_func=None):
"""
Decorator to set workload status based on complete contexts
@@ -1604,27 +1169,24 @@ def do_action_openstack_upgrade(package, upgrade_callback, configs):
"""
ret = False
- if git_install_requested():
- action_set({'outcome': 'installed from source, skipped upgrade.'})
- else:
- if openstack_upgrade_available(package):
- if config('action-managed-upgrade'):
- juju_log('Upgrading OpenStack release')
+ if openstack_upgrade_available(package):
+ if config('action-managed-upgrade'):
+ juju_log('Upgrading OpenStack release')
- try:
- upgrade_callback(configs=configs)
- action_set({'outcome': 'success, upgrade completed.'})
- ret = True
- except:
- action_set({'outcome': 'upgrade failed, see traceback.'})
- action_set({'traceback': traceback.format_exc()})
- action_fail('do_openstack_upgrade resulted in an '
- 'unexpected error')
- else:
- action_set({'outcome': 'action-managed-upgrade config is '
- 'False, skipped upgrade.'})
+ try:
+ upgrade_callback(configs=configs)
+ action_set({'outcome': 'success, upgrade completed.'})
+ ret = True
+ except Exception:
+ action_set({'outcome': 'upgrade failed, see traceback.'})
+ action_set({'traceback': traceback.format_exc()})
+ action_fail('do_openstack_upgrade resulted in an '
+ 'unexpected error')
else:
- action_set({'outcome': 'no upgrade available.'})
+ action_set({'outcome': 'action-managed-upgrade config is '
+ 'False, skipped upgrade.'})
+ else:
+ action_set({'outcome': 'no upgrade available.'})
return ret
@@ -1720,7 +1282,7 @@ def is_unit_paused_set():
kv = t[0]
# transform something truth-y into a Boolean.
return not(not(kv.get('unit-paused')))
- except:
+ except Exception:
return False
@@ -2034,21 +1596,32 @@ def token_cache_pkgs(source=None, release=None):
def update_json_file(filename, items):
"""Updates the json `filename` with a given dict.
- :param filename: json filename (i.e.: /etc/glance/policy.json)
+ :param filename: path to json file (e.g. /etc/glance/policy.json)
:param items: dict of items to update
"""
+ if not items:
+ return
+
with open(filename) as fd:
policy = json.load(fd)
+
+ # Compare before and after and if nothing has changed don't write the file
+ # since that could cause unnecessary service restarts.
+ before = json.dumps(policy, indent=4, sort_keys=True)
policy.update(items)
+ after = json.dumps(policy, indent=4, sort_keys=True)
+ if before == after:
+ return
+
with open(filename, "w") as fd:
- fd.write(json.dumps(policy, indent=4))
+ fd.write(after)
@cached
def snap_install_requested():
""" Determine if installing from snaps
- If openstack-origin is of the form snap:channel-series-release
+ If openstack-origin is of the form snap:track/channel[/branch]
and channel is in SNAPS_CHANNELS return True.
"""
origin = config('openstack-origin') or ""
@@ -2056,10 +1629,12 @@ def snap_install_requested():
return False
_src = origin[5:]
- channel, series, release = _src.split('-')
- if channel.lower() in SNAP_CHANNELS:
- return True
- return False
+ if '/' in _src:
+ channel = _src.split('/')[1]
+ else:
+ # Handle snap:track with no channel
+ channel = 'stable'
+ return valid_snap_channel(channel)
def get_snaps_install_info_from_origin(snaps, src, mode='classic'):
@@ -2067,7 +1642,7 @@ def get_snaps_install_info_from_origin(snaps, src, mode='classic'):
@param snaps: List of snaps
@param src: String of openstack-origin or source of the form
- snap:channel-series-track
+ snap:track/channel
@param mode: String classic, devmode or jailmode
@returns: Dictionary of snaps with channels and modes
"""
@@ -2077,8 +1652,7 @@ def get_snaps_install_info_from_origin(snaps, src, mode='classic'):
return {}
_src = src[5:]
- _channel, _series, _release = _src.split('-')
- channel = '--channel={}/{}'.format(_release, _channel)
+ channel = '--channel={}'.format(_src)
return {snap: {'channel': channel, 'mode': mode}
for snap in snaps}
@@ -2090,8 +1664,8 @@ def install_os_snaps(snaps, refresh=False):
@param snaps: Dictionary of snaps with channels and modes of the form:
{'snap_name': {'channel': 'snap_channel',
'mode': 'snap_mode'}}
- Where channel a snapstore channel and mode is --classic, --devmode or
- --jailmode.
+ Where channel is a snapstore channel and mode is --classic, --devmode
+ or --jailmode.
@param post_snap_install: Callback function to run after snaps have been
installed
"""
diff --git a/charmhelpers/contrib/storage/linux/ceph.py b/charmhelpers/contrib/storage/linux/ceph.py
index e5a01b1..e13e60a 100644
--- a/charmhelpers/contrib/storage/linux/ceph.py
+++ b/charmhelpers/contrib/storage/linux/ceph.py
@@ -113,7 +113,7 @@ def validator(value, valid_type, valid_range=None):
assert isinstance(valid_range, list), \
"valid_range must be a list, was given {}".format(valid_range)
# If we're dealing with strings
- if valid_type is six.string_types:
+ if isinstance(value, six.string_types):
assert value in valid_range, \
"{} is not in the list {}".format(value, valid_range)
# Integer, float should have a min and max
@@ -370,18 +370,19 @@ def get_mon_map(service):
Also raises CalledProcessError if our ceph command fails
"""
try:
- mon_status = check_output(
- ['ceph', '--id', service,
- 'mon_status', '--format=json'])
+ mon_status = check_output(['ceph', '--id', service,
+ 'mon_status', '--format=json'])
+ if six.PY3:
+ mon_status = mon_status.decode('UTF-8')
try:
return json.loads(mon_status)
except ValueError as v:
- log("Unable to parse mon_status json: {}. Error: {}".format(
- mon_status, v.message))
+ log("Unable to parse mon_status json: {}. Error: {}"
+ .format(mon_status, str(v)))
raise
except CalledProcessError as e:
- log("mon_status command failed with message: {}".format(
- e.message))
+ log("mon_status command failed with message: {}"
+ .format(str(e)))
raise
@@ -457,7 +458,7 @@ def monitor_key_get(service, key):
try:
output = check_output(
['ceph', '--id', service,
- 'config-key', 'get', str(key)])
+ 'config-key', 'get', str(key)]).decode('UTF-8')
return output
except CalledProcessError as e:
log("Monitor config-key get failed with message: {}".format(
@@ -500,6 +501,8 @@ def get_erasure_profile(service, name):
out = check_output(['ceph', '--id', service,
'osd', 'erasure-code-profile', 'get',
name, '--format=json'])
+ if six.PY3:
+ out = out.decode('UTF-8')
return json.loads(out)
except (CalledProcessError, OSError, ValueError):
return None
@@ -514,7 +517,8 @@ def pool_set(service, pool_name, key, value):
:param value:
:return: None. Can raise CalledProcessError
"""
- cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, value]
+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key,
+ str(value).lower()]
try:
check_call(cmd)
except CalledProcessError:
@@ -618,16 +622,24 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure'
:param durability_estimator: int
:return: None. Can raise CalledProcessError
"""
+ version = ceph_version()
+
# Ensure this failure_domain is allowed by Ceph
validator(failure_domain, six.string_types,
['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row'])
cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name,
- 'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks),
- 'ruleset_failure_domain=' + failure_domain]
+ 'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks)
+ ]
if locality is not None and durability_estimator is not None:
raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.")
+ # failure_domain changed in luminous
+ if version and version >= '12.0.0':
+ cmd.append('crush-failure-domain=' + failure_domain)
+ else:
+ cmd.append('ruleset-failure-domain=' + failure_domain)
+
# Add plugin specific information
if locality is not None:
# For local erasure codes
@@ -686,7 +698,10 @@ def get_cache_mode(service, pool_name):
"""
validator(value=service, valid_type=six.string_types)
validator(value=pool_name, valid_type=six.string_types)
- out = check_output(['ceph', '--id', service, 'osd', 'dump', '--format=json'])
+ out = check_output(['ceph', '--id', service,
+ 'osd', 'dump', '--format=json'])
+ if six.PY3:
+ out = out.decode('UTF-8')
try:
osd_json = json.loads(out)
for pool in osd_json['pools']:
@@ -700,8 +715,9 @@ def get_cache_mode(service, pool_name):
def pool_exists(service, name):
"""Check to see if a RADOS pool already exists."""
try:
- out = check_output(['rados', '--id', service,
- 'lspools']).decode('UTF-8')
+ out = check_output(['rados', '--id', service, 'lspools'])
+ if six.PY3:
+ out = out.decode('UTF-8')
except CalledProcessError:
return False
@@ -714,9 +730,12 @@ def get_osds(service):
"""
version = ceph_version()
if version and version >= '0.56':
- return json.loads(check_output(['ceph', '--id', service,
- 'osd', 'ls',
- '--format=json']).decode('UTF-8'))
+ out = check_output(['ceph', '--id', service,
+ 'osd', 'ls',
+ '--format=json'])
+ if six.PY3:
+ out = out.decode('UTF-8')
+ return json.loads(out)
return None
@@ -734,7 +753,9 @@ def rbd_exists(service, pool, rbd_img):
"""Check to see if a RADOS block device exists."""
try:
out = check_output(['rbd', 'list', '--id',
- service, '--pool', pool]).decode('UTF-8')
+ service, '--pool', pool])
+ if six.PY3:
+ out = out.decode('UTF-8')
except CalledProcessError:
return False
@@ -859,7 +880,9 @@ def configure(service, key, auth, use_syslog):
def image_mapped(name):
"""Determine whether a RADOS block device is mapped locally."""
try:
- out = check_output(['rbd', 'showmapped']).decode('UTF-8')
+ out = check_output(['rbd', 'showmapped'])
+ if six.PY3:
+ out = out.decode('UTF-8')
except CalledProcessError:
return False
@@ -1018,7 +1041,9 @@ def ceph_version():
"""Retrieve the local version of ceph."""
if os.path.exists('/usr/bin/ceph'):
cmd = ['ceph', '-v']
- output = check_output(cmd).decode('US-ASCII')
+ output = check_output(cmd)
+ if six.PY3:
+ output = output.decode('UTF-8')
output = output.split()
if len(output) > 3:
return output[2]
@@ -1048,14 +1073,24 @@ class CephBrokerRq(object):
self.ops = []
def add_op_request_access_to_group(self, name, namespace=None,
- permission=None, key_name=None):
+ permission=None, key_name=None,
+ object_prefix_permissions=None):
"""
Adds the requested permissions to the current service's Ceph key,
- allowing the key to access only the specified pools
+ allowing the key to access only the specified pools or
+ object prefixes. object_prefix_permissions should be a dictionary
+ keyed on the permission with the corresponding value being a list
+ of prefixes to apply that permission to.
+ {
+ 'rwx': ['prefix1', 'prefix2'],
+ 'class-read': ['prefix3']}
"""
- self.ops.append({'op': 'add-permissions-to-key', 'group': name,
- 'namespace': namespace, 'name': key_name or service_name(),
- 'group-permission': permission})
+ self.ops.append({
+ 'op': 'add-permissions-to-key', 'group': name,
+ 'namespace': namespace,
+ 'name': key_name or service_name(),
+ 'group-permission': permission,
+ 'object-prefix-permissions': object_prefix_permissions})
def add_op_create_pool(self, name, replica_count=3, pg_num=None,
weight=None, group=None, namespace=None):
@@ -1091,7 +1126,10 @@ class CephBrokerRq(object):
def _ops_equal(self, other):
if len(self.ops) == len(other.ops):
for req_no in range(0, len(self.ops)):
- for key in ['replicas', 'name', 'op', 'pg_num', 'weight']:
+ for key in [
+ 'replicas', 'name', 'op', 'pg_num', 'weight',
+ 'group', 'group-namespace', 'group-permission',
+ 'object-prefix-permissions']:
if self.ops[req_no].get(key) != other.ops[req_no].get(key):
return False
else:
diff --git a/charmhelpers/contrib/storage/linux/lvm.py b/charmhelpers/contrib/storage/linux/lvm.py
index 4719f53..c8bde69 100644
--- a/charmhelpers/contrib/storage/linux/lvm.py
+++ b/charmhelpers/contrib/storage/linux/lvm.py
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import functools
from subprocess import (
CalledProcessError,
check_call,
@@ -74,10 +75,10 @@ def list_lvm_volume_group(block_device):
'''
vg = None
pvd = check_output(['pvdisplay', block_device]).splitlines()
- for l in pvd:
- l = l.decode('UTF-8')
- if l.strip().startswith('VG Name'):
- vg = ' '.join(l.strip().split()[2:])
+ for lvm in pvd:
+ lvm = lvm.decode('UTF-8')
+ if lvm.strip().startswith('VG Name'):
+ vg = ' '.join(lvm.strip().split()[2:])
return vg
@@ -101,3 +102,81 @@ def create_lvm_volume_group(volume_group, block_device):
:block_device: str: Full path of PV-initialized block device.
'''
check_call(['vgcreate', volume_group, block_device])
+
+
+def list_logical_volumes(select_criteria=None, path_mode=False):
+ '''
+ List logical volumes
+
+ :param select_criteria: str: Limit list to those volumes matching this
+ criteria (see 'lvs -S help' for more details)
+ :param path_mode: bool: return logical volume name in 'vg/lv' format, this
+ format is required for some commands like lvextend
+ :returns: [str]: List of logical volumes
+ '''
+ lv_diplay_attr = 'lv_name'
+ if path_mode:
+ # Parsing output logic relies on the column order
+ lv_diplay_attr = 'vg_name,' + lv_diplay_attr
+ cmd = ['lvs', '--options', lv_diplay_attr, '--noheadings']
+ if select_criteria:
+ cmd.extend(['--select', select_criteria])
+ lvs = []
+ for lv in check_output(cmd).decode('UTF-8').splitlines():
+ if not lv:
+ continue
+ if path_mode:
+ lvs.append('/'.join(lv.strip().split()))
+ else:
+ lvs.append(lv.strip())
+ return lvs
+
+
+list_thin_logical_volume_pools = functools.partial(
+ list_logical_volumes,
+ select_criteria='lv_attr =~ ^t')
+
+list_thin_logical_volumes = functools.partial(
+ list_logical_volumes,
+ select_criteria='lv_attr =~ ^V')
+
+
+def extend_logical_volume_by_device(lv_name, block_device):
+ '''
+ Extends the size of logical volume lv_name by the amount of free space on
+ physical volume block_device.
+
+ :param lv_name: str: name of logical volume to be extended (vg/lv format)
+ :param block_device: str: name of block_device to be allocated to lv_name
+ '''
+ cmd = ['lvextend', lv_name, block_device]
+ check_call(cmd)
+
+
+def create_logical_volume(lv_name, volume_group, size=None):
+ '''
+ Create a new logical volume in an existing volume group
+
+ :param lv_name: str: name of logical volume to be created.
+ :param volume_group: str: Name of volume group to use for the new volume.
+ :param size: str: Size of logical volume to create (100% if not supplied)
+ :raises subprocess.CalledProcessError: in the event that the lvcreate fails.
+ '''
+ if size:
+ check_call([
+ 'lvcreate',
+ '--yes',
+ '-L',
+ '{}'.format(size),
+ '-n', lv_name, volume_group
+ ])
+ # create the lv with all the space available, this is needed because the
+ # system call is different for LVM
+ else:
+ check_call([
+ 'lvcreate',
+ '--yes',
+ '-l',
+ '100%FREE',
+ '-n', lv_name, volume_group
+ ])
diff --git a/charmhelpers/contrib/storage/linux/utils.py b/charmhelpers/contrib/storage/linux/utils.py
index 3dc0df6..c942889 100644
--- a/charmhelpers/contrib/storage/linux/utils.py
+++ b/charmhelpers/contrib/storage/linux/utils.py
@@ -64,6 +64,6 @@ def is_device_mounted(device):
'''
try:
out = check_output(['lsblk', '-P', device]).decode('UTF-8')
- except:
+ except Exception:
return False
return bool(re.search(r'MOUNTPOINT=".+"', out))
diff --git a/charmhelpers/core/hookenv.py b/charmhelpers/core/hookenv.py
index 899722f..89f1024 100644
--- a/charmhelpers/core/hookenv.py
+++ b/charmhelpers/core/hookenv.py
@@ -22,10 +22,12 @@ from __future__ import print_function
import copy
from distutils.version import LooseVersion
from functools import wraps
+from collections import namedtuple
import glob
import os
import json
import yaml
+import re
import subprocess
import sys
import errno
@@ -38,6 +40,7 @@ if not six.PY3:
else:
from collections import UserDict
+
CRITICAL = "CRITICAL"
ERROR = "ERROR"
WARNING = "WARNING"
@@ -65,7 +68,7 @@ def cached(func):
@wraps(func)
def wrapper(*args, **kwargs):
global cache
- key = str((func, args, kwargs))
+ key = json.dumps((func, args, kwargs), sort_keys=True, default=str)
try:
return cache[key]
except KeyError:
@@ -343,6 +346,7 @@ class Config(dict):
"""
with open(self.path, 'w') as f:
+ os.fchmod(f.fileno(), 0o600)
json.dump(self, f)
def _implicit_save(self):
@@ -644,18 +648,31 @@ def is_relation_made(relation, keys='private-address'):
return False
+def _port_op(op_name, port, protocol="TCP"):
+ """Open or close a service network port"""
+ _args = [op_name]
+ icmp = protocol.upper() == "ICMP"
+ if icmp:
+ _args.append(protocol)
+ else:
+ _args.append('{}/{}'.format(port, protocol))
+ try:
+ subprocess.check_call(_args)
+ except subprocess.CalledProcessError:
+ # Older Juju pre 2.3 doesn't support ICMP
+ # so treat it as a no-op if it fails.
+ if not icmp:
+ raise
+
+
def open_port(port, protocol="TCP"):
"""Open a service network port"""
- _args = ['open-port']
- _args.append('{}/{}'.format(port, protocol))
- subprocess.check_call(_args)
+ _port_op('open-port', port, protocol)
def close_port(port, protocol="TCP"):
"""Close a service network port"""
- _args = ['close-port']
- _args.append('{}/{}'.format(port, protocol))
- subprocess.check_call(_args)
+ _port_op('close-port', port, protocol)
def open_ports(start, end, protocol="TCP"):
@@ -672,6 +689,17 @@ def close_ports(start, end, protocol="TCP"):
subprocess.check_call(_args)
+def opened_ports():
+ """Get the opened ports
+
+ *Note that this will only show ports opened in a previous hook*
+
+ :returns: Opened ports as a list of strings: ``['8080/tcp', '8081-8083/tcp']``
+ """
+ _args = ['opened-ports', '--format=json']
+ return json.loads(subprocess.check_output(_args).decode('UTF-8'))
+
+
@cached
def unit_get(attribute):
"""Get the unit ID for the remote unit"""
@@ -793,6 +821,10 @@ class Hooks(object):
return wrapper
+class NoNetworkBinding(Exception):
+ pass
+
+
def charm_dir():
"""Return the root directory of the current charm"""
d = os.environ.get('JUJU_CHARM_DIR')
@@ -1012,7 +1044,6 @@ def juju_version():
universal_newlines=True).strip()
-@cached
def has_juju_version(minimum_version):
"""Return True if the Juju version is at least the provided version"""
return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
@@ -1072,6 +1103,8 @@ def _run_atexit():
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
def network_get_primary_address(binding):
'''
+ Deprecated since Juju 2.3; use network_get()
+
Retrieve the primary network address for a named binding
:param binding: string. The name of a relation of extra-binding
@@ -1079,7 +1112,41 @@ def network_get_primary_address(binding):
:raise: NotImplementedError if run on Juju < 2.0
'''
cmd = ['network-get', '--primary-address', binding]
- return subprocess.check_output(cmd).decode('UTF-8').strip()
+ try:
+ response = subprocess.check_output(
+ cmd,
+ stderr=subprocess.STDOUT).decode('UTF-8').strip()
+ except CalledProcessError as e:
+ if 'no network config found for binding' in e.output.decode('UTF-8'):
+ raise NoNetworkBinding("No network binding for {}"
+ .format(binding))
+ else:
+ raise
+ return response
+
+
+def network_get(endpoint, relation_id=None):
+ """
+ Retrieve the network details for a relation endpoint
+
+ :param endpoint: string. The name of a relation endpoint
+ :param relation_id: int. The ID of the relation for the current context.
+ :return: dict. The loaded YAML output of the network-get query.
+ :raise: NotImplementedError if request not supported by the Juju version.
+ """
+ if not has_juju_version('2.2'):
+ raise NotImplementedError(juju_version()) # earlier versions require --primary-address
+ if relation_id and not has_juju_version('2.3'):
+ raise NotImplementedError # 2.3 added the -r option
+
+ cmd = ['network-get', endpoint, '--format', 'yaml']
+ if relation_id:
+ cmd.append('-r')
+ cmd.append(relation_id)
+ response = subprocess.check_output(
+ cmd,
+ stderr=subprocess.STDOUT).decode('UTF-8').strip()
+ return yaml.safe_load(response)
def add_metric(*args, **kwargs):
@@ -1111,3 +1178,93 @@ def meter_info():
"""Get the meter status information, if running in the meter-status-changed
hook."""
return os.environ.get('JUJU_METER_INFO')
+
+
+def iter_units_for_relation_name(relation_name):
+ """Iterate through all units in a relation
+
+ Generator that iterates through all the units in a relation and yields
+ a named tuple with rid and unit field names.
+
+ Usage:
+ data = [(u.rid, u.unit)
+ for u in iter_units_for_relation_name(relation_name)]
+
+ :param relation_name: string relation name
+ :yield: Named Tuple with rid and unit field names
+ """
+ RelatedUnit = namedtuple('RelatedUnit', 'rid, unit')
+ for rid in relation_ids(relation_name):
+ for unit in related_units(rid):
+ yield RelatedUnit(rid, unit)
+
+
+def ingress_address(rid=None, unit=None):
+ """
+ Retrieve the ingress-address from a relation when available.
+ Otherwise, return the private-address.
+
+ When used on the consuming side of the relation (unit is a remote
+ unit), the ingress-address is the IP address that this unit needs
+ to use to reach the provided service on the remote unit.
+
+ When used on the providing side of the relation (unit == local_unit()),
+ the ingress-address is the IP address that is advertised to remote
+ units on this relation. Remote units need to use this address to
+ reach the local provided service on this unit.
+
+ Note that charms may document some other method to use in
+ preference to the ingress_address(), such as an address provided
+ on a different relation attribute or a service discovery mechanism.
+ This allows charms to redirect inbound connections to their peers
+ or different applications such as load balancers.
+
+ Usage:
+ addresses = [ingress_address(rid=u.rid, unit=u.unit)
+ for u in iter_units_for_relation_name(relation_name)]
+
+ :param rid: string relation id
+ :param unit: string unit name
+ :side effect: calls relation_get
+ :return: string IP address
+ """
+ settings = relation_get(rid=rid, unit=unit)
+ return (settings.get('ingress-address') or
+ settings.get('private-address'))
+
+
+def egress_subnets(rid=None, unit=None):
+ """
+ Retrieve the egress-subnets from a relation.
+
+ This function is to be used on the providing side of the
+ relation, and provides the ranges of addresses that client
+ connections may come from. The result is uninteresting on
+ the consuming side of a relation (unit == local_unit()).
+
+ Returns a stable list of subnets in CIDR format.
+ eg. ['192.168.1.0/24', '2001::F00F/128']
+
+ If egress-subnets is not available, falls back to using the published
+ ingress-address, or finally private-address.
+
+ :param rid: string relation id
+ :param unit: string unit name
+ :side effect: calls relation_get
+ :return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128']
+ """
+ def _to_range(addr):
+ if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None:
+ addr += '/32'
+ elif ':' in addr and '/' not in addr: # IPv6
+ addr += '/128'
+ return addr
+
+ settings = relation_get(rid=rid, unit=unit)
+ if 'egress-subnets' in settings:
+ return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()]
+ if 'ingress-address' in settings:
+ return [_to_range(settings['ingress-address'])]
+ if 'private-address' in settings:
+ return [_to_range(settings['private-address'])]
+ return [] # Should never happen
diff --git a/charmhelpers/core/host.py b/charmhelpers/core/host.py
index 5656e2f..322ab2a 100644
--- a/charmhelpers/core/host.py
+++ b/charmhelpers/core/host.py
@@ -34,7 +34,7 @@ import six
from contextlib import contextmanager
from collections import OrderedDict
-from .hookenv import log, DEBUG
+from .hookenv import log, DEBUG, local_unit
from .fstab import Fstab
from charmhelpers.osplatform import get_platform
@@ -441,6 +441,49 @@ def add_user_to_group(username, group):
subprocess.check_call(cmd)
+def chage(username, lastday=None, expiredate=None, inactive=None,
+ mindays=None, maxdays=None, root=None, warndays=None):
+ """Change user password expiry information
+
+ :param str username: User to update
+ :param str lastday: Set when password was changed in YYYY-MM-DD format
+ :param str expiredate: Set when user's account will no longer be
+ accessible in YYYY-MM-DD format.
+ -1 will remove an account expiration date.
+ :param str inactive: Set the number of days of inactivity after a password
+ has expired before the account is locked.
+ -1 will remove an account's inactivity.
+ :param str mindays: Set the minimum number of days between password
+ changes to MIN_DAYS.
+ 0 indicates the password can be changed anytime.
+ :param str maxdays: Set the maximum number of days during which a
+ password is valid.
+ -1 as MAX_DAYS will remove checking maxdays
+ :param str root: Apply changes in the CHROOT_DIR directory
+ :param str warndays: Set the number of days of warning before a password
+ change is required
+ :raises subprocess.CalledProcessError: if call to chage fails
+ """
+ cmd = ['chage']
+ if root:
+ cmd.extend(['--root', root])
+ if lastday:
+ cmd.extend(['--lastday', lastday])
+ if expiredate:
+ cmd.extend(['--expiredate', expiredate])
+ if inactive:
+ cmd.extend(['--inactive', inactive])
+ if mindays:
+ cmd.extend(['--mindays', mindays])
+ if maxdays:
+ cmd.extend(['--maxdays', maxdays])
+ if warndays:
+ cmd.extend(['--warndays', warndays])
+ cmd.append(username)
+ subprocess.check_call(cmd)
+
+remove_password_expiry = functools.partial(chage, expiredate='-1', inactive='-1', mindays='0', maxdays='-1')
+
def rsync(from_path, to_path, flags='-r', options=None, timeout=None):
"""Replicate the contents of a path"""
options = options or ['--delete', '--executability']
@@ -506,6 +549,8 @@ def write_file(path, content, owner='root', group='root', perms=0o444):
with open(path, 'wb') as target:
os.fchown(target.fileno(), uid, gid)
os.fchmod(target.fileno(), perms)
+ if six.PY3 and isinstance(content, six.string_types):
+ content = content.encode('UTF-8')
target.write(content)
return
# the contents were the same, but we might still need to change the
@@ -946,3 +991,38 @@ def updatedb(updatedb_text, new_path):
lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths))
output = "\n".join(lines)
return output
+
+
+def modulo_distribution(modulo=3, wait=30, non_zero_wait=False):
+ """ Modulo distribution
+
+ This helper uses the unit number, a modulo value and a constant wait time
+ to produce a calculated wait time distribution. This is useful in large
+ scale deployments to distribute load during an expensive operation such as
+ service restarts.
+
+ If you have 1000 nodes that need to restart 100 at a time 1 minute at a
+ time:
+
+ time.wait(modulo_distribution(modulo=100, wait=60))
+ restart()
+
+ If you need restarts to happen serially set modulo to the exact number of
+ nodes and set a high constant wait time:
+
+ time.wait(modulo_distribution(modulo=10, wait=120))
+ restart()
+
+ @param modulo: int The modulo number creates the group distribution
+ @param wait: int The constant time wait value
+ @param non_zero_wait: boolean Override unit % modulo == 0,
+ return modulo * wait. Used to avoid collisions with
+ leader nodes which are often given priority.
+ @return: int Calculated time to wait for unit operation
+ """
+ unit_number = int(local_unit().split('/')[1])
+ calculated_wait_time = (unit_number % modulo) * wait
+ if non_zero_wait and calculated_wait_time == 0:
+ return modulo * wait
+ else:
+ return calculated_wait_time
diff --git a/charmhelpers/core/host_factory/ubuntu.py b/charmhelpers/core/host_factory/ubuntu.py
index d8dc378..99451b5 100644
--- a/charmhelpers/core/host_factory/ubuntu.py
+++ b/charmhelpers/core/host_factory/ubuntu.py
@@ -20,6 +20,7 @@ UBUNTU_RELEASES = (
'yakkety',
'zesty',
'artful',
+ 'bionic',
)
diff --git a/charmhelpers/core/services/base.py b/charmhelpers/core/services/base.py
index ca9dc99..345b60d 100644
--- a/charmhelpers/core/services/base.py
+++ b/charmhelpers/core/services/base.py
@@ -313,17 +313,26 @@ class PortManagerCallback(ManagerCallback):
with open(port_file) as fp:
old_ports = fp.read().split(',')
for old_port in old_ports:
- if bool(old_port):
- old_port = int(old_port)
- if old_port not in new_ports:
- hookenv.close_port(old_port)
+ if bool(old_port) and not self.ports_contains(old_port, new_ports):
+ hookenv.close_port(old_port)
with open(port_file, 'w') as fp:
fp.write(','.join(str(port) for port in new_ports))
for port in new_ports:
+ # A port is either a number or 'ICMP'
+ protocol = 'TCP'
+ if str(port).upper() == 'ICMP':
+ protocol = 'ICMP'
if event_name == 'start':
- hookenv.open_port(port)
+ hookenv.open_port(port, protocol)
elif event_name == 'stop':
- hookenv.close_port(port)
+ hookenv.close_port(port, protocol)
+
+ def ports_contains(self, port, ports):
+ if not bool(port):
+ return False
+ if str(port).upper() != 'ICMP':
+ port = int(port)
+ return port in ports
def service_stop(service_name):
diff --git a/charmhelpers/core/strutils.py b/charmhelpers/core/strutils.py
index 685dabd..e8df045 100644
--- a/charmhelpers/core/strutils.py
+++ b/charmhelpers/core/strutils.py
@@ -61,13 +61,19 @@ def bytes_from_string(value):
if isinstance(value, six.string_types):
value = six.text_type(value)
else:
- msg = "Unable to interpret non-string value '%s' as boolean" % (value)
+ msg = "Unable to interpret non-string value '%s' as bytes" % (value)
raise ValueError(msg)
matches = re.match("([0-9]+)([a-zA-Z]+)", value)
- if not matches:
- msg = "Unable to interpret string value '%s' as bytes" % (value)
- raise ValueError(msg)
- return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
+ if matches:
+ size = int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
+ else:
+ # Assume that value passed in is bytes
+ try:
+ size = int(value)
+ except ValueError:
+ msg = "Unable to interpret string value '%s' as bytes" % (value)
+ raise ValueError(msg)
+ return size
class BasicStringComparator(object):
diff --git a/charmhelpers/core/templating.py b/charmhelpers/core/templating.py
index 7b801a3..9014015 100644
--- a/charmhelpers/core/templating.py
+++ b/charmhelpers/core/templating.py
@@ -20,7 +20,8 @@ from charmhelpers.core import hookenv
def render(source, target, context, owner='root', group='root',
- perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None):
+ perms=0o444, templates_dir=None, encoding='UTF-8',
+ template_loader=None, config_template=None):
"""
Render a template.
@@ -32,6 +33,9 @@ def render(source, target, context, owner='root', group='root',
The context should be a dict containing the values to be replaced in the
template.
+ config_template may be provided to render from a provided template instead
+ of loading from a file.
+
The `owner`, `group`, and `perms` options will be passed to `write_file`.
If omitted, `templates_dir` defaults to the `templates` folder in the charm.
@@ -65,14 +69,19 @@ def render(source, target, context, owner='root', group='root',
if templates_dir is None:
templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
template_env = Environment(loader=FileSystemLoader(templates_dir))
- try:
- source = source
- template = template_env.get_template(source)
- except exceptions.TemplateNotFound as e:
- hookenv.log('Could not load template %s from %s.' %
- (source, templates_dir),
- level=hookenv.ERROR)
- raise e
+
+ # load from a string if provided explicitly
+ if config_template is not None:
+ template = template_env.from_string(config_template)
+ else:
+ try:
+ source = source
+ template = template_env.get_template(source)
+ except exceptions.TemplateNotFound as e:
+ hookenv.log('Could not load template %s from %s.' %
+ (source, templates_dir),
+ level=hookenv.ERROR)
+ raise e
content = template.render(context)
if target is not None:
target_dir = os.path.dirname(target)
diff --git a/charmhelpers/core/unitdata.py b/charmhelpers/core/unitdata.py
index 54ec969..6d7b494 100644
--- a/charmhelpers/core/unitdata.py
+++ b/charmhelpers/core/unitdata.py
@@ -175,6 +175,8 @@ class Storage(object):
else:
self.db_path = os.path.join(
os.environ.get('CHARM_DIR', ''), '.unit-state.db')
+ with open(self.db_path, 'a') as f:
+ os.fchmod(f.fileno(), 0o600)
self.conn = sqlite3.connect('%s' % self.db_path)
self.cursor = self.conn.cursor()
self.revision = None
@@ -358,7 +360,7 @@ class Storage(object):
try:
yield self.revision
self.revision = None
- except:
+ except Exception:
self.flush(False)
self.revision = None
raise
diff --git a/charmhelpers/fetch/snap.py b/charmhelpers/fetch/snap.py
index 112a54c..395836c 100644
--- a/charmhelpers/fetch/snap.py
+++ b/charmhelpers/fetch/snap.py
@@ -41,6 +41,10 @@ class CouldNotAcquireLockException(Exception):
pass
+class InvalidSnapChannel(Exception):
+ pass
+
+
def _snap_exec(commands):
"""
Execute snap commands.
@@ -132,3 +136,15 @@ def snap_refresh(packages, *flags):
log(message, level='INFO')
return _snap_exec(['refresh'] + flags + packages)
+
+
+def valid_snap_channel(channel):
+ """ Validate snap channel exists
+
+ :raises InvalidSnapChannel: When channel does not exist
+ :return: Boolean
+ """
+ if channel.lower() in SNAP_CHANNELS:
+ return True
+ else:
+ raise InvalidSnapChannel("Invalid Snap Channel: {}".format(channel))
diff --git a/charmhelpers/fetch/ubuntu.py b/charmhelpers/fetch/ubuntu.py
index 40e1cb5..910e96a 100644
--- a/charmhelpers/fetch/ubuntu.py
+++ b/charmhelpers/fetch/ubuntu.py
@@ -572,7 +572,7 @@ def get_upstream_version(package):
cache = apt_cache()
try:
pkg = cache[package]
- except:
+ except Exception:
# the package is unknown to the current apt cache.
return None
diff --git a/hooks/hooks.py b/hooks/hooks.py
index a231c66..54a58eb 100755
--- a/hooks/hooks.py
+++ b/hooks/hooks.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.7
+#!/usr/bin/env python
#
# Copyright 2014 Canonical Ltd. released under AGPL
#
@@ -113,6 +113,15 @@ class MirrorsConfigServiceContext(OSContextGenerator):
hypervisor_mapping=config['hypervisor_mapping'])
+def ensure_perms():
+ """Ensure gss file permissions."""
+ if os.path.isfile(ID_CONF_FILE_NAME):
+ os.chmod(ID_CONF_FILE_NAME, 0o640)
+
+ if os.path.isfile(MIRRORS_CONF_FILE_NAME,):
+ os.chmod(MIRRORS_CONF_FILE_NAME, 0o640)
+
+
def get_release():
return get_os_codename_package('glance-common', fatal=False) or 'icehouse'
@@ -198,6 +207,7 @@ def identity_service_joined(relation_id=None):
def identity_service_changed():
configs = get_configs()
configs.write(ID_CONF_FILE_NAME)
+ ensure_perms()
@hooks.hook('install.real')
@@ -230,6 +240,7 @@ def config_changed():
hookenv.log('begin config-changed hook.')
configs = get_configs()
configs.write(MIRRORS_CONF_FILE_NAME)
+ ensure_perms()
update_nrpe_config()
@@ -259,6 +270,7 @@ def upgrade_charm():
update_nrpe_config()
configs = get_configs()
configs.write_all()
+ ensure_perms()
@hooks.hook('amqp-relation-joined')
diff --git a/lib/.gitkeep b/lib/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/metadata.yaml b/metadata.yaml
index 1939715..cc6f242 100644
--- a/metadata.yaml
+++ b/metadata.yaml
@@ -10,7 +10,7 @@ tags:
series:
- trusty
- xenial
- - yakkety
+ - bionic
subordinate: false
provides:
simplestreams-image-service:
diff --git a/requirements.txt b/requirements.txt
index 730cb69..6a3271b 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -10,4 +10,3 @@ Jinja2>=2.6 # BSD License (3 clause)
six>=1.9.0
dnspython>=1.12.0
psutil>=1.1.1,<2.0.0
-python-neutronclient>=2.6.0
diff --git a/test-requirements.txt b/test-requirements.txt
index 0697294..9edd4bb 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -11,13 +11,17 @@ requests==2.6.0
# Liberty client lower constraints
amulet>=1.14.3,<2.0
bundletester>=0.6.1,<1.0
-python-ceilometerclient>=1.5.0,<2.0
-python-cinderclient>=1.4.0,<2.0
-python-glanceclient>=1.1.0,<2.0
-python-heatclient>=0.8.0,<1.0
-python-novaclient>=2.30.1,<3.0
-python-openstackclient>=1.7.0,<2.0
-python-swiftclient>=2.6.0,<3.0
+python-ceilometerclient>=1.5.0
+python-cinderclient>=1.4.0
+python-glanceclient>=1.1.0
+python-heatclient>=0.8.0
+python-keystoneclient>=1.7.1
+python-neutronclient>=3.1.0
+python-novaclient>=2.30.1
+python-openstackclient>=1.7.0
+python-swiftclient>=2.6.0
pika>=0.10.0,<1.0
distro-info
# END: Amulet OpenStack Charm Helper Requirements
+# NOTE: workaround for 14.04 pip/tox
+pytz
diff --git a/tests/README.md b/tests/README.md
new file mode 100644
index 0000000..046be7f
--- /dev/null
+++ b/tests/README.md
@@ -0,0 +1,9 @@
+# Overview
+
+This directory provides Amulet tests to verify basic deployment functionality
+from the perspective of this charm, its requirements and its features, as
+exercised in a subset of the full OpenStack deployment test bundle topology.
+
+For full details on functional testing of OpenStack charms please refer to
+the [functional testing](http://docs.openstack.org/developer/charm-guide/testing.html#functional-testing)
+section of the OpenStack Charm Guide.
diff --git a/tests/basic_deployment.py b/tests/basic_deployment.py
new file mode 100644
index 0000000..e0103ba
--- /dev/null
+++ b/tests/basic_deployment.py
@@ -0,0 +1,237 @@
+#!/usr/bin/env python
+#
+# Copyright 2016 Canonical Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Basic glance-simplestreams-sync functional tests.
+"""
+
+import time
+import amulet
+
+from charmhelpers.contrib.openstack.amulet.deployment import (
+ OpenStackAmuletDeployment
+)
+
+from charmhelpers.contrib.openstack.amulet.utils import (
+ OpenStackAmuletUtils,
+ DEBUG,
+ # ERROR
+)
+
+# Use DEBUG to turn on debug logging
+u = OpenStackAmuletUtils(DEBUG)
+
+
+class GlanceBasicDeployment(OpenStackAmuletDeployment):
+ """Amulet tests on a basic file-backed glance deployment. Verify
+ relations, service status, endpoint service catalog, create and
+ delete new image."""
+
+ SERVICES = ('apache2', 'haproxy', 'glance-api', 'glance-registry')
+
+ def __init__(self, series=None, openstack=None, source=None,
+ stable=False):
+ """Deploy the entire test environment."""
+ super(GlanceBasicDeployment, self).__init__(series, openstack,
+ source, stable)
+ self._add_services()
+ self._add_relations()
+ self._configure_services()
+ self._deploy()
+
+ u.log.info('Waiting on extended status checks...')
+
+ # NOTE(beisner): This charm lacks support for juju workload status.
+ # That means that this charm will not affirm a ready state for those
+ # waiting for a ready state through tools like juju-wait, leading to
+ # anticipated race conditions and automation blocking conditions.
+ exclude_services = ['glance-simplestreams-sync']
+
+ self._auto_wait_for_status(exclude_services=exclude_services)
+
+ self.d.sentry.wait()
+ self._initialize_tests()
+
+ def _assert_services(self, should_run):
+ u.get_unit_process_ids(
+ {self.glance_sentry: self.SERVICES},
+ expect_success=should_run)
+
+ def _add_services(self):
+ """Add services
+
+ Add the services that we're testing, where glance is local,
+ and the rest of the service are from lp branches that are
+ compatible with the local charm (e.g. stable or next).
+ """
+ this_service = {'name': 'glance-simplestreams-sync'}
+ other_services = [
+ {'name': 'percona-cluster', 'constraints': {'mem': '3072M'}},
+ {'name': 'glance'},
+ {'name': 'rabbitmq-server'},
+ {'name': 'keystone'},
+ ]
+ super(GlanceBasicDeployment, self)._add_services(
+ this_service,
+ other_services,
+ use_source=['glance-simplestreams-sync'],
+ )
+
+ def _add_relations(self):
+ """Add relations for the services."""
+ relations = {
+ 'glance:identity-service': 'keystone:identity-service',
+ 'glance:shared-db': 'percona-cluster:shared-db',
+ 'keystone:shared-db': 'percona-cluster:shared-db',
+ 'glance:amqp': 'rabbitmq-server:amqp',
+ 'glance-simplestreams-sync:identity-service':
+ 'keystone:identity-service',
+ 'glance-simplestreams-sync:amqp':
+ 'rabbitmq-server:amqp',
+ }
+
+ super(GlanceBasicDeployment, self)._add_relations(relations)
+
+ def _configure_services(self):
+ """Configure all of the services."""
+ gss_config = {
+ # https://bugs.launchpad.net/bugs/1686437
+ 'source': 'ppa:simplestreams-dev/trunk',
+ 'use_swift': 'False',
+ }
+ glance_config = {}
+ keystone_config = {
+ 'admin-password': 'openstack',
+ 'admin-token': 'ubuntutesting',
+ }
+ pxc_config = {
+ 'dataset-size': '25%',
+ 'max-connections': 1000,
+ 'root-password': 'ChangeMe123',
+ 'sst-password': 'ChangeMe123',
+ }
+ configs = {
+ 'glance-simplestreams-sync': gss_config,
+ 'glance': glance_config,
+ 'keystone': keystone_config,
+ 'percona-cluster': pxc_config,
+ }
+ super(GlanceBasicDeployment, self)._configure_services(configs)
+
+ def _initialize_tests(self):
+ """Perform final initialization before tests get run."""
+ # Access the sentries for inspecting service units
+ self.gss_sentry = self.d.sentry['glance-simplestreams-sync'][0]
+ self.pxc_sentry = self.d.sentry['percona-cluster'][0]
+ self.glance_sentry = self.d.sentry['glance'][0]
+ self.keystone_sentry = self.d.sentry['keystone'][0]
+ self.rabbitmq_sentry = self.d.sentry['rabbitmq-server'][0]
+ u.log.debug('openstack release val: {}'.format(
+ self._get_openstack_release()))
+ u.log.debug('openstack release str: {}'.format(
+ self._get_openstack_release_string()))
+
+ # Authenticate admin with keystone
+ self.keystone_session, self.keystone = u.get_default_keystone_session(
+ self.keystone_sentry,
+ openstack_release=self._get_openstack_release())
+
+ # Authenticate admin with glance endpoint
+ self.glance = u.authenticate_glance_admin(self.keystone)
+
+ def test_001_wait_for_image_sync(self):
+ """Wait for images to be synced. Expect at least one."""
+
+ max_image_wait = 600
+ retry_sleep = 2
+ images = []
+
+ time_start = time.time()
+ while not images:
+ images = [image.name for image in self.glance.images.list()]
+ u.log.debug('Images: {}'.format(images))
+ if images:
+ break
+
+ time_now = time.time()
+ if time_now - time_start >= max_image_wait:
+ raise Exception('Images not synced within '
+ '{}s'.format(time_now - time_start))
+ else:
+ u.log.debug('Waiting {}s'.format(retry_sleep))
+ time.sleep(retry_sleep)
+ retry_sleep = retry_sleep + 4 if retry_sleep < 30 else 30
+
+ def test_050_gss_permissions_regression_check_lp1611987(self):
+ """Assert the intended file permissions on gss config files
+ https://bugs.launchpad.net/bugs/1611987"""
+
+ perm_check = [
+ {
+ 'file_path': '/etc/glance-simplestreams-sync/identity.yaml',
+ 'expected_perms': '640',
+ 'unit_sentry': self.gss_sentry
+ },
+ {
+ 'file_path': '/etc/glance-simplestreams-sync/mirrors.yaml',
+ 'expected_perms': '640',
+ 'unit_sentry': self.gss_sentry
+ },
+ ]
+
+ for _check in perm_check:
+ cmd = 'stat -c %a {}'.format(_check['file_path'])
+ output, _ = u.run_cmd_unit(_check['unit_sentry'], cmd)
+
+ assert output == _check['expected_perms'], \
+ '{} perms not as expected'.format(_check['file_path'])
+
+ u.log.debug('Permissions on {}: {}'.format(
+ _check['file_path'], output))
+
+ def test_102_service_catalog(self):
+ """Verify that the service catalog endpoint data is valid."""
+ u.log.debug('Checking keystone service catalog...')
+ endpoint_check = {
+ 'adminURL': u.valid_url,
+ 'id': u.not_null,
+ 'region': 'RegionOne',
+ 'publicURL': u.valid_url,
+ 'internalURL': u.valid_url
+ }
+ expected = {
+ 'product-streams': [endpoint_check],
+ 'image': [endpoint_check],
+ 'identity': [endpoint_check]
+ }
+ actual = self.keystone.service_catalog.get_endpoints()
+
+ ret = u.validate_svc_catalog_endpoint_data(
+ expected,
+ actual,
+ openstack_release=self._get_openstack_release())
+ if ret:
+ amulet.raise_status(amulet.FAIL, msg=ret)
+
+ def test_115_memcache(self):
+ u.validate_memcache(self.glance_sentry,
+ '/etc/glance/glance-api.conf',
+ self._get_openstack_release(),
+ earliest_release=self.trusty_mitaka)
+ u.validate_memcache(self.glance_sentry,
+ '/etc/glance/glance-registry.conf',
+ self._get_openstack_release(),
+ earliest_release=self.trusty_mitaka)
diff --git a/tests/charmhelpers/__init__.py b/tests/charmhelpers/__init__.py
new file mode 100644
index 0000000..e7aa471
--- /dev/null
+++ b/tests/charmhelpers/__init__.py
@@ -0,0 +1,97 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Bootstrap charm-helpers, installing its dependencies if necessary using
+# only standard libraries.
+from __future__ import print_function
+from __future__ import absolute_import
+
+import functools
+import inspect
+import subprocess
+import sys
+
+try:
+ import six # flake8: noqa
+except ImportError:
+ if sys.version_info.major == 2:
+ subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
+ else:
+ subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
+ import six # flake8: noqa
+
+try:
+ import yaml # flake8: noqa
+except ImportError:
+ if sys.version_info.major == 2:
+ subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
+ else:
+ subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
+ import yaml # flake8: noqa
+
+
+# Holds a list of mapping of mangled function names that have been deprecated
+# using the @deprecate decorator below. This is so that the warning is only
+# printed once for each usage of the function.
+__deprecated_functions = {}
+
+
+def deprecate(warning, date=None, log=None):
+ """Add a deprecation warning the first time the function is used.
+ The date, which is a string in semi-ISO8660 format indicate the year-month
+ that the function is officially going to be removed.
+
+ usage:
+
+ @deprecate('use core/fetch/add_source() instead', '2017-04')
+ def contributed_add_source_thing(...):
+ ...
+
+ And it then prints to the log ONCE that the function is deprecated.
+ The reason for passing the logging function (log) is so that hookenv.log
+ can be used for a charm if needed.
+
+ :param warning: String to indicat where it has moved ot.
+ :param date: optional sting, in YYYY-MM format to indicate when the
+ function will definitely (probably) be removed.
+ :param log: The log function to call to log. If not, logs to stdout
+ """
+ def wrap(f):
+
+ @functools.wraps(f)
+ def wrapped_f(*args, **kwargs):
+ try:
+ module = inspect.getmodule(f)
+ file = inspect.getsourcefile(f)
+ lines = inspect.getsourcelines(f)
+ f_name = "{}-{}-{}..{}-{}".format(
+ module.__name__, file, lines[0], lines[-1], f.__name__)
+ except (IOError, TypeError):
+ # assume it was local, so just use the name of the function
+ f_name = f.__name__
+ if f_name not in __deprecated_functions:
+ __deprecated_functions[f_name] = True
+ s = "DEPRECATION WARNING: Function {} is being removed".format(
+ f.__name__)
+ if date:
+ s = "{} on/around {}".format(s, date)
+ if warning:
+ s = "{} : {}".format(s, warning)
+ if log:
+ log(s)
+ else:
+ print(s)
+ return f(*args, **kwargs)
+ return wrapped_f
+ return wrap
diff --git a/tests/charmhelpers/contrib/__init__.py b/tests/charmhelpers/contrib/__init__.py
new file mode 100644
index 0000000..d7567b8
--- /dev/null
+++ b/tests/charmhelpers/contrib/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/tests/charmhelpers/contrib/amulet/__init__.py b/tests/charmhelpers/contrib/amulet/__init__.py
new file mode 100644
index 0000000..d7567b8
--- /dev/null
+++ b/tests/charmhelpers/contrib/amulet/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/tests/charmhelpers/contrib/amulet/deployment.py b/tests/charmhelpers/contrib/amulet/deployment.py
new file mode 100644
index 0000000..9c65518
--- /dev/null
+++ b/tests/charmhelpers/contrib/amulet/deployment.py
@@ -0,0 +1,97 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import amulet
+import os
+import six
+
+
+class AmuletDeployment(object):
+ """Amulet deployment.
+
+ This class provides generic Amulet deployment and test runner
+ methods.
+ """
+
+ def __init__(self, series=None):
+ """Initialize the deployment environment."""
+ self.series = None
+
+ if series:
+ self.series = series
+ self.d = amulet.Deployment(series=self.series)
+ else:
+ self.d = amulet.Deployment()
+
+ def _add_services(self, this_service, other_services):
+ """Add services.
+
+ Add services to the deployment where this_service is the local charm
+ that we're testing and other_services are the other services that
+ are being used in the local amulet tests.
+ """
+ if this_service['name'] != os.path.basename(os.getcwd()):
+ s = this_service['name']
+ msg = "The charm's root directory name needs to be {}".format(s)
+ amulet.raise_status(amulet.FAIL, msg=msg)
+
+ if 'units' not in this_service:
+ this_service['units'] = 1
+
+ self.d.add(this_service['name'], units=this_service['units'],
+ constraints=this_service.get('constraints'))
+
+ for svc in other_services:
+ if 'location' in svc:
+ branch_location = svc['location']
+ elif self.series:
+ branch_location = 'cs:{}/{}'.format(self.series, svc['name']),
+ else:
+ branch_location = None
+
+ if 'units' not in svc:
+ svc['units'] = 1
+
+ self.d.add(svc['name'], charm=branch_location, units=svc['units'],
+ constraints=svc.get('constraints'))
+
+ def _add_relations(self, relations):
+ """Add all of the relations for the services."""
+ for k, v in six.iteritems(relations):
+ self.d.relate(k, v)
+
+ def _configure_services(self, configs):
+ """Configure all of the services."""
+ for service, config in six.iteritems(configs):
+ self.d.configure(service, config)
+
+ def _deploy(self):
+ """Deploy environment and wait for all hooks to finish executing."""
+ timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 900))
+ try:
+ self.d.setup(timeout=timeout)
+ self.d.sentry.wait(timeout=timeout)
+ except amulet.helpers.TimeoutError:
+ amulet.raise_status(
+ amulet.FAIL,
+ msg="Deployment timed out ({}s)".format(timeout)
+ )
+ except Exception:
+ raise
+
+ def run_tests(self):
+ """Run all of the methods that are prefixed with 'test_'."""
+ for test in dir(self):
+ if test.startswith('test_'):
+ getattr(self, test)()
diff --git a/tests/charmhelpers/contrib/amulet/utils.py b/tests/charmhelpers/contrib/amulet/utils.py
new file mode 100644
index 0000000..8a6b764
--- /dev/null
+++ b/tests/charmhelpers/contrib/amulet/utils.py
@@ -0,0 +1,821 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import io
+import json
+import logging
+import os
+import re
+import socket
+import subprocess
+import sys
+import time
+import uuid
+
+import amulet
+import distro_info
+import six
+from six.moves import configparser
+if six.PY3:
+ from urllib import parse as urlparse
+else:
+ import urlparse
+
+
+class AmuletUtils(object):
+ """Amulet utilities.
+
+ This class provides common utility functions that are used by Amulet
+ tests.
+ """
+
+ def __init__(self, log_level=logging.ERROR):
+ self.log = self.get_logger(level=log_level)
+ self.ubuntu_releases = self.get_ubuntu_releases()
+
+ def get_logger(self, name="amulet-logger", level=logging.DEBUG):
+ """Get a logger object that will log to stdout."""
+ log = logging
+ logger = log.getLogger(name)
+ fmt = log.Formatter("%(asctime)s %(funcName)s "
+ "%(levelname)s: %(message)s")
+
+ handler = log.StreamHandler(stream=sys.stdout)
+ handler.setLevel(level)
+ handler.setFormatter(fmt)
+
+ logger.addHandler(handler)
+ logger.setLevel(level)
+
+ return logger
+
+ def valid_ip(self, ip):
+ if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip):
+ return True
+ else:
+ return False
+
+ def valid_url(self, url):
+ p = re.compile(
+ r'^(?:http|ftp)s?://'
+ r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa
+ r'localhost|'
+ r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
+ r'(?::\d+)?'
+ r'(?:/?|[/?]\S+)$',
+ re.IGNORECASE)
+ if p.match(url):
+ return True
+ else:
+ return False
+
+ def get_ubuntu_release_from_sentry(self, sentry_unit):
+ """Get Ubuntu release codename from sentry unit.
+
+ :param sentry_unit: amulet sentry/service unit pointer
+ :returns: list of strings - release codename, failure message
+ """
+ msg = None
+ cmd = 'lsb_release -cs'
+ release, code = sentry_unit.run(cmd)
+ if code == 0:
+ self.log.debug('{} lsb_release: {}'.format(
+ sentry_unit.info['unit_name'], release))
+ else:
+ msg = ('{} `{}` returned {} '
+ '{}'.format(sentry_unit.info['unit_name'],
+ cmd, release, code))
+ if release not in self.ubuntu_releases:
+ msg = ("Release ({}) not found in Ubuntu releases "
+ "({})".format(release, self.ubuntu_releases))
+ return release, msg
+
+ def validate_services(self, commands):
+ """Validate that lists of commands succeed on service units. Can be
+ used to verify system services are running on the corresponding
+ service units.
+
+ :param commands: dict with sentry keys and arbitrary command list vals
+ :returns: None if successful, Failure string message otherwise
+ """
+ self.log.debug('Checking status of system services...')
+
+ # /!\ DEPRECATION WARNING (beisner):
+ # New and existing tests should be rewritten to use
+ # validate_services_by_name() as it is aware of init systems.
+ self.log.warn('DEPRECATION WARNING: use '
+ 'validate_services_by_name instead of validate_services '
+ 'due to init system differences.')
+
+ for k, v in six.iteritems(commands):
+ for cmd in v:
+ output, code = k.run(cmd)
+ self.log.debug('{} `{}` returned '
+ '{}'.format(k.info['unit_name'],
+ cmd, code))
+ if code != 0:
+ return "command `{}` returned {}".format(cmd, str(code))
+ return None
+
+ def validate_services_by_name(self, sentry_services):
+ """Validate system service status by service name, automatically
+ detecting init system based on Ubuntu release codename.
+
+ :param sentry_services: dict with sentry keys and svc list values
+ :returns: None if successful, Failure string message otherwise
+ """
+ self.log.debug('Checking status of system services...')
+
+ # Point at which systemd became a thing
+ systemd_switch = self.ubuntu_releases.index('vivid')
+
+ for sentry_unit, services_list in six.iteritems(sentry_services):
+ # Get lsb_release codename from unit
+ release, ret = self.get_ubuntu_release_from_sentry(sentry_unit)
+ if ret:
+ return ret
+
+ for service_name in services_list:
+ if (self.ubuntu_releases.index(release) >= systemd_switch or
+ service_name in ['rabbitmq-server', 'apache2',
+ 'memcached']):
+ # init is systemd (or regular sysv)
+ cmd = 'sudo service {} status'.format(service_name)
+ output, code = sentry_unit.run(cmd)
+ service_running = code == 0
+ elif self.ubuntu_releases.index(release) < systemd_switch:
+ # init is upstart
+ cmd = 'sudo status {}'.format(service_name)
+ output, code = sentry_unit.run(cmd)
+ service_running = code == 0 and "start/running" in output
+
+ self.log.debug('{} `{}` returned '
+ '{}'.format(sentry_unit.info['unit_name'],
+ cmd, code))
+ if not service_running:
+ return u"command `{}` returned {} {}".format(
+ cmd, output, str(code))
+ return None
+
+ def _get_config(self, unit, filename):
+ """Get a ConfigParser object for parsing a unit's config file."""
+ file_contents = unit.file_contents(filename)
+
+ # NOTE(beisner): by default, ConfigParser does not handle options
+ # with no value, such as the flags used in the mysql my.cnf file.
+ # https://bugs.python.org/issue7005
+ config = configparser.ConfigParser(allow_no_value=True)
+ config.readfp(io.StringIO(file_contents))
+ return config
+
+ def validate_config_data(self, sentry_unit, config_file, section,
+ expected):
+ """Validate config file data.
+
+ Verify that the specified section of the config file contains
+ the expected option key:value pairs.
+
+ Compare expected dictionary data vs actual dictionary data.
+ The values in the 'expected' dictionary can be strings, bools, ints,
+ longs, or can be a function that evaluates a variable and returns a
+ bool.
+ """
+ self.log.debug('Validating config file data ({} in {} on {})'
+ '...'.format(section, config_file,
+ sentry_unit.info['unit_name']))
+ config = self._get_config(sentry_unit, config_file)
+
+ if section != 'DEFAULT' and not config.has_section(section):
+ return "section [{}] does not exist".format(section)
+
+ for k in expected.keys():
+ if not config.has_option(section, k):
+ return "section [{}] is missing option {}".format(section, k)
+
+ actual = config.get(section, k)
+ v = expected[k]
+ if (isinstance(v, six.string_types) or
+ isinstance(v, bool) or
+ isinstance(v, six.integer_types)):
+ # handle explicit values
+ if actual != v:
+ return "section [{}] {}:{} != expected {}:{}".format(
+ section, k, actual, k, expected[k])
+ # handle function pointers, such as not_null or valid_ip
+ elif not v(actual):
+ return "section [{}] {}:{} != expected {}:{}".format(
+ section, k, actual, k, expected[k])
+ return None
+
+ def _validate_dict_data(self, expected, actual):
+ """Validate dictionary data.
+
+ Compare expected dictionary data vs actual dictionary data.
+ The values in the 'expected' dictionary can be strings, bools, ints,
+ longs, or can be a function that evaluates a variable and returns a
+ bool.
+ """
+ self.log.debug('actual: {}'.format(repr(actual)))
+ self.log.debug('expected: {}'.format(repr(expected)))
+
+ for k, v in six.iteritems(expected):
+ if k in actual:
+ if (isinstance(v, six.string_types) or
+ isinstance(v, bool) or
+ isinstance(v, six.integer_types)):
+ # handle explicit values
+ if v != actual[k]:
+ return "{}:{}".format(k, actual[k])
+ # handle function pointers, such as not_null or valid_ip
+ elif not v(actual[k]):
+ return "{}:{}".format(k, actual[k])
+ else:
+ return "key '{}' does not exist".format(k)
+ return None
+
+ def validate_relation_data(self, sentry_unit, relation, expected):
+ """Validate actual relation data based on expected relation data."""
+ actual = sentry_unit.relation(relation[0], relation[1])
+ return self._validate_dict_data(expected, actual)
+
+ def _validate_list_data(self, expected, actual):
+ """Compare expected list vs actual list data."""
+ for e in expected:
+ if e not in actual:
+ return "expected item {} not found in actual list".format(e)
+ return None
+
+ def not_null(self, string):
+ if string is not None:
+ return True
+ else:
+ return False
+
+ def _get_file_mtime(self, sentry_unit, filename):
+ """Get last modification time of file."""
+ return sentry_unit.file_stat(filename)['mtime']
+
+ def _get_dir_mtime(self, sentry_unit, directory):
+ """Get last modification time of directory."""
+ return sentry_unit.directory_stat(directory)['mtime']
+
+ def _get_proc_start_time(self, sentry_unit, service, pgrep_full=None):
+ """Get start time of a process based on the last modification time
+ of the /proc/pid directory.
+
+ :sentry_unit: The sentry unit to check for the service on
+ :service: service name to look for in process table
+ :pgrep_full: [Deprecated] Use full command line search mode with pgrep
+ :returns: epoch time of service process start
+ :param commands: list of bash commands
+ :param sentry_units: list of sentry unit pointers
+ :returns: None if successful; Failure message otherwise
+ """
+ if pgrep_full is not None:
+ # /!\ DEPRECATION WARNING (beisner):
+ # No longer implemented, as pidof is now used instead of pgrep.
+ # https://bugs.launchpad.net/charm-helpers/+bug/1474030
+ self.log.warn('DEPRECATION WARNING: pgrep_full bool is no '
+ 'longer implemented re: lp 1474030.')
+
+ pid_list = self.get_process_id_list(sentry_unit, service)
+ pid = pid_list[0]
+ proc_dir = '/proc/{}'.format(pid)
+ self.log.debug('Pid for {} on {}: {}'.format(
+ service, sentry_unit.info['unit_name'], pid))
+
+ return self._get_dir_mtime(sentry_unit, proc_dir)
+
+ def service_restarted(self, sentry_unit, service, filename,
+ pgrep_full=None, sleep_time=20):
+ """Check if service was restarted.
+
+ Compare a service's start time vs a file's last modification time
+ (such as a config file for that service) to determine if the service
+ has been restarted.
+ """
+ # /!\ DEPRECATION WARNING (beisner):
+ # This method is prone to races in that no before-time is known.
+ # Use validate_service_config_changed instead.
+
+ # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
+ # used instead of pgrep. pgrep_full is still passed through to ensure
+ # deprecation WARNS. lp1474030
+ self.log.warn('DEPRECATION WARNING: use '
+ 'validate_service_config_changed instead of '
+ 'service_restarted due to known races.')
+
+ time.sleep(sleep_time)
+ if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >=
+ self._get_file_mtime(sentry_unit, filename)):
+ return True
+ else:
+ return False
+
+ def service_restarted_since(self, sentry_unit, mtime, service,
+ pgrep_full=None, sleep_time=20,
+ retry_count=30, retry_sleep_time=10):
+ """Check if service was been started after a given time.
+
+ Args:
+ sentry_unit (sentry): The sentry unit to check for the service on
+ mtime (float): The epoch time to check against
+ service (string): service name to look for in process table
+ pgrep_full: [Deprecated] Use full command line search mode with pgrep
+ sleep_time (int): Initial sleep time (s) before looking for file
+ retry_sleep_time (int): Time (s) to sleep between retries
+ retry_count (int): If file is not found, how many times to retry
+
+ Returns:
+ bool: True if service found and its start time it newer than mtime,
+ False if service is older than mtime or if service was
+ not found.
+ """
+ # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
+ # used instead of pgrep. pgrep_full is still passed through to ensure
+ # deprecation WARNS. lp1474030
+
+ unit_name = sentry_unit.info['unit_name']
+ self.log.debug('Checking that %s service restarted since %s on '
+ '%s' % (service, mtime, unit_name))
+ time.sleep(sleep_time)
+ proc_start_time = None
+ tries = 0
+ while tries <= retry_count and not proc_start_time:
+ try:
+ proc_start_time = self._get_proc_start_time(sentry_unit,
+ service,
+ pgrep_full)
+ self.log.debug('Attempt {} to get {} proc start time on {} '
+ 'OK'.format(tries, service, unit_name))
+ except IOError as e:
+ # NOTE(beisner) - race avoidance, proc may not exist yet.
+ # https://bugs.launchpad.net/charm-helpers/+bug/1474030
+ self.log.debug('Attempt {} to get {} proc start time on {} '
+ 'failed\n{}'.format(tries, service,
+ unit_name, e))
+ time.sleep(retry_sleep_time)
+ tries += 1
+
+ if not proc_start_time:
+ self.log.warn('No proc start time found, assuming service did '
+ 'not start')
+ return False
+ if proc_start_time >= mtime:
+ self.log.debug('Proc start time is newer than provided mtime'
+ '(%s >= %s) on %s (OK)' % (proc_start_time,
+ mtime, unit_name))
+ return True
+ else:
+ self.log.warn('Proc start time (%s) is older than provided mtime '
+ '(%s) on %s, service did not '
+ 'restart' % (proc_start_time, mtime, unit_name))
+ return False
+
+ def config_updated_since(self, sentry_unit, filename, mtime,
+ sleep_time=20, retry_count=30,
+ retry_sleep_time=10):
+ """Check if file was modified after a given time.
+
+ Args:
+ sentry_unit (sentry): The sentry unit to check the file mtime on
+ filename (string): The file to check mtime of
+ mtime (float): The epoch time to check against
+ sleep_time (int): Initial sleep time (s) before looking for file
+ retry_sleep_time (int): Time (s) to sleep between retries
+ retry_count (int): If file is not found, how many times to retry
+
+ Returns:
+ bool: True if file was modified more recently than mtime, False if
+ file was modified before mtime, or if file not found.
+ """
+ unit_name = sentry_unit.info['unit_name']
+ self.log.debug('Checking that %s updated since %s on '
+ '%s' % (filename, mtime, unit_name))
+ time.sleep(sleep_time)
+ file_mtime = None
+ tries = 0
+ while tries <= retry_count and not file_mtime:
+ try:
+ file_mtime = self._get_file_mtime(sentry_unit, filename)
+ self.log.debug('Attempt {} to get {} file mtime on {} '
+ 'OK'.format(tries, filename, unit_name))
+ except IOError as e:
+ # NOTE(beisner) - race avoidance, file may not exist yet.
+ # https://bugs.launchpad.net/charm-helpers/+bug/1474030
+ self.log.debug('Attempt {} to get {} file mtime on {} '
+ 'failed\n{}'.format(tries, filename,
+ unit_name, e))
+ time.sleep(retry_sleep_time)
+ tries += 1
+
+ if not file_mtime:
+ self.log.warn('Could not determine file mtime, assuming '
+ 'file does not exist')
+ return False
+
+ if file_mtime >= mtime:
+ self.log.debug('File mtime is newer than provided mtime '
+ '(%s >= %s) on %s (OK)' % (file_mtime,
+ mtime, unit_name))
+ return True
+ else:
+ self.log.warn('File mtime is older than provided mtime'
+ '(%s < on %s) on %s' % (file_mtime,
+ mtime, unit_name))
+ return False
+
+ def validate_service_config_changed(self, sentry_unit, mtime, service,
+ filename, pgrep_full=None,
+ sleep_time=20, retry_count=30,
+ retry_sleep_time=10):
+ """Check service and file were updated after mtime
+
+ Args:
+ sentry_unit (sentry): The sentry unit to check for the service on
+ mtime (float): The epoch time to check against
+ service (string): service name to look for in process table
+ filename (string): The file to check mtime of
+ pgrep_full: [Deprecated] Use full command line search mode with pgrep
+ sleep_time (int): Initial sleep in seconds to pass to test helpers
+ retry_count (int): If service is not found, how many times to retry
+ retry_sleep_time (int): Time in seconds to wait between retries
+
+ Typical Usage:
+ u = OpenStackAmuletUtils(ERROR)
+ ...
+ mtime = u.get_sentry_time(self.cinder_sentry)
+ self.d.configure('cinder', {'verbose': 'True', 'debug': 'True'})
+ if not u.validate_service_config_changed(self.cinder_sentry,
+ mtime,
+ 'cinder-api',
+ '/etc/cinder/cinder.conf')
+ amulet.raise_status(amulet.FAIL, msg='update failed')
+ Returns:
+ bool: True if both service and file where updated/restarted after
+ mtime, False if service is older than mtime or if service was
+ not found or if filename was modified before mtime.
+ """
+
+ # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
+ # used instead of pgrep. pgrep_full is still passed through to ensure
+ # deprecation WARNS. lp1474030
+
+ service_restart = self.service_restarted_since(
+ sentry_unit, mtime,
+ service,
+ pgrep_full=pgrep_full,
+ sleep_time=sleep_time,
+ retry_count=retry_count,
+ retry_sleep_time=retry_sleep_time)
+
+ config_update = self.config_updated_since(
+ sentry_unit,
+ filename,
+ mtime,
+ sleep_time=sleep_time,
+ retry_count=retry_count,
+ retry_sleep_time=retry_sleep_time)
+
+ return service_restart and config_update
+
+ def get_sentry_time(self, sentry_unit):
+ """Return current epoch time on a sentry"""
+ cmd = "date +'%s'"
+ return float(sentry_unit.run(cmd)[0])
+
+ def relation_error(self, name, data):
+ return 'unexpected relation data in {} - {}'.format(name, data)
+
+ def endpoint_error(self, name, data):
+ return 'unexpected endpoint data in {} - {}'.format(name, data)
+
+ def get_ubuntu_releases(self):
+ """Return a list of all Ubuntu releases in order of release."""
+ _d = distro_info.UbuntuDistroInfo()
+ _release_list = _d.all
+ return _release_list
+
+ def file_to_url(self, file_rel_path):
+ """Convert a relative file path to a file URL."""
+ _abs_path = os.path.abspath(file_rel_path)
+ return urlparse.urlparse(_abs_path, scheme='file').geturl()
+
+ def check_commands_on_units(self, commands, sentry_units):
+ """Check that all commands in a list exit zero on all
+ sentry units in a list.
+
+ :param commands: list of bash commands
+ :param sentry_units: list of sentry unit pointers
+ :returns: None if successful; Failure message otherwise
+ """
+ self.log.debug('Checking exit codes for {} commands on {} '
+ 'sentry units...'.format(len(commands),
+ len(sentry_units)))
+ for sentry_unit in sentry_units:
+ for cmd in commands:
+ output, code = sentry_unit.run(cmd)
+ if code == 0:
+ self.log.debug('{} `{}` returned {} '
+ '(OK)'.format(sentry_unit.info['unit_name'],
+ cmd, code))
+ else:
+ return ('{} `{}` returned {} '
+ '{}'.format(sentry_unit.info['unit_name'],
+ cmd, code, output))
+ return None
+
+ def get_process_id_list(self, sentry_unit, process_name,
+ expect_success=True):
+ """Get a list of process ID(s) from a single sentry juju unit
+ for a single process name.
+
+ :param sentry_unit: Amulet sentry instance (juju unit)
+ :param process_name: Process name
+ :param expect_success: If False, expect the PID to be missing,
+ raise if it is present.
+ :returns: List of process IDs
+ """
+ cmd = 'pidof -x "{}"'.format(process_name)
+ if not expect_success:
+ cmd += " || exit 0 && exit 1"
+ output, code = sentry_unit.run(cmd)
+ if code != 0:
+ msg = ('{} `{}` returned {} '
+ '{}'.format(sentry_unit.info['unit_name'],
+ cmd, code, output))
+ amulet.raise_status(amulet.FAIL, msg=msg)
+ return str(output).split()
+
+ def get_unit_process_ids(self, unit_processes, expect_success=True):
+ """Construct a dict containing unit sentries, process names, and
+ process IDs.
+
+ :param unit_processes: A dictionary of Amulet sentry instance
+ to list of process names.
+ :param expect_success: if False expect the processes to not be
+ running, raise if they are.
+ :returns: Dictionary of Amulet sentry instance to dictionary
+ of process names to PIDs.
+ """
+ pid_dict = {}
+ for sentry_unit, process_list in six.iteritems(unit_processes):
+ pid_dict[sentry_unit] = {}
+ for process in process_list:
+ pids = self.get_process_id_list(
+ sentry_unit, process, expect_success=expect_success)
+ pid_dict[sentry_unit].update({process: pids})
+ return pid_dict
+
+ def validate_unit_process_ids(self, expected, actual):
+ """Validate process id quantities for services on units."""
+ self.log.debug('Checking units for running processes...')
+ self.log.debug('Expected PIDs: {}'.format(expected))
+ self.log.debug('Actual PIDs: {}'.format(actual))
+
+ if len(actual) != len(expected):
+ return ('Unit count mismatch. expected, actual: {}, '
+ '{} '.format(len(expected), len(actual)))
+
+ for (e_sentry, e_proc_names) in six.iteritems(expected):
+ e_sentry_name = e_sentry.info['unit_name']
+ if e_sentry in actual.keys():
+ a_proc_names = actual[e_sentry]
+ else:
+ return ('Expected sentry ({}) not found in actual dict data.'
+ '{}'.format(e_sentry_name, e_sentry))
+
+ if len(e_proc_names.keys()) != len(a_proc_names.keys()):
+ return ('Process name count mismatch. expected, actual: {}, '
+ '{}'.format(len(expected), len(actual)))
+
+ for (e_proc_name, e_pids), (a_proc_name, a_pids) in \
+ zip(e_proc_names.items(), a_proc_names.items()):
+ if e_proc_name != a_proc_name:
+ return ('Process name mismatch. expected, actual: {}, '
+ '{}'.format(e_proc_name, a_proc_name))
+
+ a_pids_length = len(a_pids)
+ fail_msg = ('PID count mismatch. {} ({}) expected, actual: '
+ '{}, {} ({})'.format(e_sentry_name, e_proc_name,
+ e_pids, a_pids_length,
+ a_pids))
+
+ # If expected is a list, ensure at least one PID quantity match
+ if isinstance(e_pids, list) and \
+ a_pids_length not in e_pids:
+ return fail_msg
+ # If expected is not bool and not list,
+ # ensure PID quantities match
+ elif not isinstance(e_pids, bool) and \
+ not isinstance(e_pids, list) and \
+ a_pids_length != e_pids:
+ return fail_msg
+ # If expected is bool True, ensure 1 or more PIDs exist
+ elif isinstance(e_pids, bool) and \
+ e_pids is True and a_pids_length < 1:
+ return fail_msg
+ # If expected is bool False, ensure 0 PIDs exist
+ elif isinstance(e_pids, bool) and \
+ e_pids is False and a_pids_length != 0:
+ return fail_msg
+ else:
+ self.log.debug('PID check OK: {} {} {}: '
+ '{}'.format(e_sentry_name, e_proc_name,
+ e_pids, a_pids))
+ return None
+
+ def validate_list_of_identical_dicts(self, list_of_dicts):
+ """Check that all dicts within a list are identical."""
+ hashes = []
+ for _dict in list_of_dicts:
+ hashes.append(hash(frozenset(_dict.items())))
+
+ self.log.debug('Hashes: {}'.format(hashes))
+ if len(set(hashes)) == 1:
+ self.log.debug('Dicts within list are identical')
+ else:
+ return 'Dicts within list are not identical'
+
+ return None
+
+ def validate_sectionless_conf(self, file_contents, expected):
+ """A crude conf parser. Useful to inspect configuration files which
+ do not have section headers (as would be necessary in order to use
+ the configparser). Such as openstack-dashboard or rabbitmq confs."""
+ for line in file_contents.split('\n'):
+ if '=' in line:
+ args = line.split('=')
+ if len(args) <= 1:
+ continue
+ key = args[0].strip()
+ value = args[1].strip()
+ if key in expected.keys():
+ if expected[key] != value:
+ msg = ('Config mismatch. Expected, actual: {}, '
+ '{}'.format(expected[key], value))
+ amulet.raise_status(amulet.FAIL, msg=msg)
+
+ def get_unit_hostnames(self, units):
+ """Return a dict of juju unit names to hostnames."""
+ host_names = {}
+ for unit in units:
+ host_names[unit.info['unit_name']] = \
+ str(unit.file_contents('/etc/hostname').strip())
+ self.log.debug('Unit host names: {}'.format(host_names))
+ return host_names
+
+ def run_cmd_unit(self, sentry_unit, cmd):
+ """Run a command on a unit, return the output and exit code."""
+ output, code = sentry_unit.run(cmd)
+ if code == 0:
+ self.log.debug('{} `{}` command returned {} '
+ '(OK)'.format(sentry_unit.info['unit_name'],
+ cmd, code))
+ else:
+ msg = ('{} `{}` command returned {} '
+ '{}'.format(sentry_unit.info['unit_name'],
+ cmd, code, output))
+ amulet.raise_status(amulet.FAIL, msg=msg)
+ return str(output), code
+
+ def file_exists_on_unit(self, sentry_unit, file_name):
+ """Check if a file exists on a unit."""
+ try:
+ sentry_unit.file_stat(file_name)
+ return True
+ except IOError:
+ return False
+ except Exception as e:
+ msg = 'Error checking file {}: {}'.format(file_name, e)
+ amulet.raise_status(amulet.FAIL, msg=msg)
+
+ def file_contents_safe(self, sentry_unit, file_name,
+ max_wait=60, fatal=False):
+ """Get file contents from a sentry unit. Wrap amulet file_contents
+ with retry logic to address races where a file checks as existing,
+ but no longer exists by the time file_contents is called.
+ Return None if file not found. Optionally raise if fatal is True."""
+ unit_name = sentry_unit.info['unit_name']
+ file_contents = False
+ tries = 0
+ while not file_contents and tries < (max_wait / 4):
+ try:
+ file_contents = sentry_unit.file_contents(file_name)
+ except IOError:
+ self.log.debug('Attempt {} to open file {} from {} '
+ 'failed'.format(tries, file_name,
+ unit_name))
+ time.sleep(4)
+ tries += 1
+
+ if file_contents:
+ return file_contents
+ elif not fatal:
+ return None
+ elif fatal:
+ msg = 'Failed to get file contents from unit.'
+ amulet.raise_status(amulet.FAIL, msg)
+
+ def port_knock_tcp(self, host="localhost", port=22, timeout=15):
+ """Open a TCP socket to check for a listening sevice on a host.
+
+ :param host: host name or IP address, default to localhost
+ :param port: TCP port number, default to 22
+ :param timeout: Connect timeout, default to 15 seconds
+ :returns: True if successful, False if connect failed
+ """
+
+ # Resolve host name if possible
+ try:
+ connect_host = socket.gethostbyname(host)
+ host_human = "{} ({})".format(connect_host, host)
+ except socket.error as e:
+ self.log.warn('Unable to resolve address: '
+ '{} ({}) Trying anyway!'.format(host, e))
+ connect_host = host
+ host_human = connect_host
+
+ # Attempt socket connection
+ try:
+ knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ knock.settimeout(timeout)
+ knock.connect((connect_host, port))
+ knock.close()
+ self.log.debug('Socket connect OK for host '
+ '{} on port {}.'.format(host_human, port))
+ return True
+ except socket.error as e:
+ self.log.debug('Socket connect FAIL for'
+ ' {} port {} ({})'.format(host_human, port, e))
+ return False
+
+ def port_knock_units(self, sentry_units, port=22,
+ timeout=15, expect_success=True):
+ """Open a TCP socket to check for a listening sevice on each
+ listed juju unit.
+
+ :param sentry_units: list of sentry unit pointers
+ :param port: TCP port number, default to 22
+ :param timeout: Connect timeout, default to 15 seconds
+ :expect_success: True by default, set False to invert logic
+ :returns: None if successful, Failure message otherwise
+ """
+ for unit in sentry_units:
+ host = unit.info['public-address']
+ connected = self.port_knock_tcp(host, port, timeout)
+ if not connected and expect_success:
+ return 'Socket connect failed.'
+ elif connected and not expect_success:
+ return 'Socket connected unexpectedly.'
+
+ def get_uuid_epoch_stamp(self):
+ """Returns a stamp string based on uuid4 and epoch time. Useful in
+ generating test messages which need to be unique-ish."""
+ return '[{}-{}]'.format(uuid.uuid4(), time.time())
+
+ # amulet juju action helpers:
+ def run_action(self, unit_sentry, action,
+ _check_output=subprocess.check_output,
+ params=None):
+ """Translate to amulet's built in run_action(). Deprecated.
+
+ Run the named action on a given unit sentry.
+
+ params a dict of parameters to use
+ _check_output parameter is no longer used
+
+ @return action_id.
+ """
+ self.log.warn('charmhelpers.contrib.amulet.utils.run_action has been '
+ 'deprecated for amulet.run_action')
+ return unit_sentry.run_action(action, action_args=params)
+
+ def wait_on_action(self, action_id, _check_output=subprocess.check_output):
+ """Wait for a given action, returning if it completed or not.
+
+ action_id a string action uuid
+ _check_output parameter is no longer used
+ """
+ data = amulet.actions.get_action_output(action_id, full_output=True)
+ return data.get(u"status") == "completed"
+
+ def status_get(self, unit):
+ """Return the current service status of this unit."""
+ raw_status, return_code = unit.run(
+ "status-get --format=json --include-data")
+ if return_code != 0:
+ return ("unknown", "")
+ status = json.loads(raw_status)
+ return (status["status"], status["message"])
diff --git a/tests/charmhelpers/contrib/openstack/__init__.py b/tests/charmhelpers/contrib/openstack/__init__.py
new file mode 100644
index 0000000..d7567b8
--- /dev/null
+++ b/tests/charmhelpers/contrib/openstack/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/tests/charmhelpers/contrib/openstack/amulet/__init__.py b/tests/charmhelpers/contrib/openstack/amulet/__init__.py
new file mode 100644
index 0000000..d7567b8
--- /dev/null
+++ b/tests/charmhelpers/contrib/openstack/amulet/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/tests/charmhelpers/contrib/openstack/amulet/deployment.py
new file mode 100644
index 0000000..66beeda
--- /dev/null
+++ b/tests/charmhelpers/contrib/openstack/amulet/deployment.py
@@ -0,0 +1,354 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import os
+import re
+import sys
+import six
+from collections import OrderedDict
+from charmhelpers.contrib.amulet.deployment import (
+ AmuletDeployment
+)
+from charmhelpers.contrib.openstack.amulet.utils import (
+ OPENSTACK_RELEASES_PAIRS
+)
+
+DEBUG = logging.DEBUG
+ERROR = logging.ERROR
+
+
+class OpenStackAmuletDeployment(AmuletDeployment):
+ """OpenStack amulet deployment.
+
+ This class inherits from AmuletDeployment and has additional support
+ that is specifically for use by OpenStack charms.
+ """
+
+ def __init__(self, series=None, openstack=None, source=None,
+ stable=True, log_level=DEBUG):
+ """Initialize the deployment environment."""
+ super(OpenStackAmuletDeployment, self).__init__(series)
+ self.log = self.get_logger(level=log_level)
+ self.log.info('OpenStackAmuletDeployment: init')
+ self.openstack = openstack
+ self.source = source
+ self.stable = stable
+
+ def get_logger(self, name="deployment-logger", level=logging.DEBUG):
+ """Get a logger object that will log to stdout."""
+ log = logging
+ logger = log.getLogger(name)
+ fmt = log.Formatter("%(asctime)s %(funcName)s "
+ "%(levelname)s: %(message)s")
+
+ handler = log.StreamHandler(stream=sys.stdout)
+ handler.setLevel(level)
+ handler.setFormatter(fmt)
+
+ logger.addHandler(handler)
+ logger.setLevel(level)
+
+ return logger
+
+ def _determine_branch_locations(self, other_services):
+ """Determine the branch locations for the other services.
+
+ Determine if the local branch being tested is derived from its
+ stable or next (dev) branch, and based on this, use the corresonding
+ stable or next branches for the other_services."""
+
+ self.log.info('OpenStackAmuletDeployment: determine branch locations')
+
+ # Charms outside the ~openstack-charmers
+ base_charms = {
+ 'mysql': ['trusty'],
+ 'mongodb': ['trusty'],
+ 'nrpe': ['trusty', 'xenial'],
+ }
+
+ for svc in other_services:
+ # If a location has been explicitly set, use it
+ if svc.get('location'):
+ continue
+ if svc['name'] in base_charms:
+ # NOTE: not all charms have support for all series we
+ # want/need to test against, so fix to most recent
+ # that each base charm supports
+ target_series = self.series
+ if self.series not in base_charms[svc['name']]:
+ target_series = base_charms[svc['name']][-1]
+ svc['location'] = 'cs:{}/{}'.format(target_series,
+ svc['name'])
+ elif self.stable:
+ svc['location'] = 'cs:{}/{}'.format(self.series,
+ svc['name'])
+ else:
+ svc['location'] = 'cs:~openstack-charmers-next/{}/{}'.format(
+ self.series,
+ svc['name']
+ )
+
+ return other_services
+
+ def _add_services(self, this_service, other_services, use_source=None,
+ no_origin=None):
+ """Add services to the deployment and optionally set
+ openstack-origin/source.
+
+ :param this_service dict: Service dictionary describing the service
+ whose amulet tests are being run
+ :param other_services dict: List of service dictionaries describing
+ the services needed to support the target
+ service
+ :param use_source list: List of services which use the 'source' config
+ option rather than 'openstack-origin'
+ :param no_origin list: List of services which do not support setting
+ the Cloud Archive.
+ Service Dict:
+ {
+ 'name': str charm-name,
+ 'units': int number of units,
+ 'constraints': dict of juju constraints,
+ 'location': str location of charm,
+ }
+ eg
+ this_service = {
+ 'name': 'openvswitch-odl',
+ 'constraints': {'mem': '8G'},
+ }
+ other_services = [
+ {
+ 'name': 'nova-compute',
+ 'units': 2,
+ 'constraints': {'mem': '4G'},
+ 'location': cs:~bob/xenial/nova-compute
+ },
+ {
+ 'name': 'mysql',
+ 'constraints': {'mem': '2G'},
+ },
+ {'neutron-api-odl'}]
+ use_source = ['mysql']
+ no_origin = ['neutron-api-odl']
+ """
+ self.log.info('OpenStackAmuletDeployment: adding services')
+
+ other_services = self._determine_branch_locations(other_services)
+
+ super(OpenStackAmuletDeployment, self)._add_services(this_service,
+ other_services)
+
+ services = other_services
+ services.append(this_service)
+
+ use_source = use_source or []
+ no_origin = no_origin or []
+
+ # Charms which should use the source config option
+ use_source = list(set(
+ use_source + ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
+ 'ceph-osd', 'ceph-radosgw', 'ceph-mon',
+ 'ceph-proxy', 'percona-cluster', 'lxd']))
+
+ # Charms which can not use openstack-origin, ie. many subordinates
+ no_origin = list(set(
+ no_origin + ['cinder-ceph', 'hacluster', 'neutron-openvswitch',
+ 'nrpe', 'openvswitch-odl', 'neutron-api-odl',
+ 'odl-controller', 'cinder-backup', 'nexentaedge-data',
+ 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
+ 'cinder-nexentaedge', 'nexentaedge-mgmt']))
+
+ if self.openstack:
+ for svc in services:
+ if svc['name'] not in use_source + no_origin:
+ config = {'openstack-origin': self.openstack}
+ self.d.configure(svc['name'], config)
+
+ if self.source:
+ for svc in services:
+ if svc['name'] in use_source and svc['name'] not in no_origin:
+ config = {'source': self.source}
+ self.d.configure(svc['name'], config)
+
+ def _configure_services(self, configs):
+ """Configure all of the services."""
+ self.log.info('OpenStackAmuletDeployment: configure services')
+ for service, config in six.iteritems(configs):
+ self.d.configure(service, config)
+
+ def _auto_wait_for_status(self, message=None, exclude_services=None,
+ include_only=None, timeout=None):
+ """Wait for all units to have a specific extended status, except
+ for any defined as excluded. Unless specified via message, any
+ status containing any case of 'ready' will be considered a match.
+
+ Examples of message usage:
+
+ Wait for all unit status to CONTAIN any case of 'ready' or 'ok':
+ message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE)
+
+ Wait for all units to reach this status (exact match):
+ message = re.compile('^Unit is ready and clustered$')
+
+ Wait for all units to reach any one of these (exact match):
+ message = re.compile('Unit is ready|OK|Ready')
+
+ Wait for at least one unit to reach this status (exact match):
+ message = {'ready'}
+
+ See Amulet's sentry.wait_for_messages() for message usage detail.
+ https://github.com/juju/amulet/blob/master/amulet/sentry.py
+
+ :param message: Expected status match
+ :param exclude_services: List of juju service names to ignore,
+ not to be used in conjuction with include_only.
+ :param include_only: List of juju service names to exclusively check,
+ not to be used in conjuction with exclude_services.
+ :param timeout: Maximum time in seconds to wait for status match
+ :returns: None. Raises if timeout is hit.
+ """
+ if not timeout:
+ timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 1800))
+ self.log.info('Waiting for extended status on units for {}s...'
+ ''.format(timeout))
+
+ all_services = self.d.services.keys()
+
+ if exclude_services and include_only:
+ raise ValueError('exclude_services can not be used '
+ 'with include_only')
+
+ if message:
+ if isinstance(message, re._pattern_type):
+ match = message.pattern
+ else:
+ match = message
+
+ self.log.debug('Custom extended status wait match: '
+ '{}'.format(match))
+ else:
+ self.log.debug('Default extended status wait match: contains '
+ 'READY (case-insensitive)')
+ message = re.compile('.*ready.*', re.IGNORECASE)
+
+ if exclude_services:
+ self.log.debug('Excluding services from extended status match: '
+ '{}'.format(exclude_services))
+ else:
+ exclude_services = []
+
+ if include_only:
+ services = include_only
+ else:
+ services = list(set(all_services) - set(exclude_services))
+
+ self.log.debug('Waiting up to {}s for extended status on services: '
+ '{}'.format(timeout, services))
+ service_messages = {service: message for service in services}
+
+ # Check for idleness
+ self.d.sentry.wait(timeout=timeout)
+ # Check for error states and bail early
+ self.d.sentry.wait_for_status(self.d.juju_env, services, timeout=timeout)
+ # Check for ready messages
+ self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
+
+ self.log.info('OK')
+
+ def _get_openstack_release(self):
+ """Get openstack release.
+
+ Return an integer representing the enum value of the openstack
+ release.
+ """
+ # Must be ordered by OpenStack release (not by Ubuntu release):
+ for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS):
+ setattr(self, os_pair, i)
+
+ releases = {
+ ('trusty', None): self.trusty_icehouse,
+ ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
+ ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
+ ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka,
+ ('xenial', None): self.xenial_mitaka,
+ ('xenial', 'cloud:xenial-newton'): self.xenial_newton,
+ ('xenial', 'cloud:xenial-ocata'): self.xenial_ocata,
+ ('xenial', 'cloud:xenial-pike'): self.xenial_pike,
+ ('xenial', 'cloud:xenial-queens'): self.xenial_queens,
+ ('yakkety', None): self.yakkety_newton,
+ ('zesty', None): self.zesty_ocata,
+ ('artful', None): self.artful_pike,
+ ('bionic', None): self.bionic_queens,
+ }
+ return releases[(self.series, self.openstack)]
+
+ def _get_openstack_release_string(self):
+ """Get openstack release string.
+
+ Return a string representing the openstack release.
+ """
+ releases = OrderedDict([
+ ('trusty', 'icehouse'),
+ ('xenial', 'mitaka'),
+ ('yakkety', 'newton'),
+ ('zesty', 'ocata'),
+ ('artful', 'pike'),
+ ('bionic', 'queens'),
+ ])
+ if self.openstack:
+ os_origin = self.openstack.split(':')[1]
+ return os_origin.split('%s-' % self.series)[1].split('/')[0]
+ else:
+ return releases[self.series]
+
+ def get_ceph_expected_pools(self, radosgw=False):
+ """Return a list of expected ceph pools in a ceph + cinder + glance
+ test scenario, based on OpenStack release and whether ceph radosgw
+ is flagged as present or not."""
+
+ if self._get_openstack_release() == self.trusty_icehouse:
+ # Icehouse
+ pools = [
+ 'data',
+ 'metadata',
+ 'rbd',
+ 'cinder-ceph',
+ 'glance'
+ ]
+ elif (self.trusty_kilo <= self._get_openstack_release() <=
+ self.zesty_ocata):
+ # Kilo through Ocata
+ pools = [
+ 'rbd',
+ 'cinder-ceph',
+ 'glance'
+ ]
+ else:
+ # Pike and later
+ pools = [
+ 'cinder-ceph',
+ 'glance'
+ ]
+
+ if radosgw:
+ pools.extend([
+ '.rgw.root',
+ '.rgw.control',
+ '.rgw',
+ '.rgw.gc',
+ '.users.uid'
+ ])
+
+ return pools
diff --git a/tests/charmhelpers/contrib/openstack/amulet/utils.py b/tests/charmhelpers/contrib/openstack/amulet/utils.py
new file mode 100644
index 0000000..84e87f5
--- /dev/null
+++ b/tests/charmhelpers/contrib/openstack/amulet/utils.py
@@ -0,0 +1,1513 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import amulet
+import json
+import logging
+import os
+import re
+import six
+import time
+import urllib
+import urlparse
+
+import cinderclient.v1.client as cinder_client
+import cinderclient.v2.client as cinder_clientv2
+import glanceclient.v1.client as glance_client
+import heatclient.v1.client as heat_client
+from keystoneclient.v2_0 import client as keystone_client
+from keystoneauth1.identity import (
+ v3,
+ v2,
+)
+from keystoneauth1 import session as keystone_session
+from keystoneclient.v3 import client as keystone_client_v3
+from novaclient import exceptions
+
+import novaclient.client as nova_client
+import novaclient
+import pika
+import swiftclient
+
+from charmhelpers.contrib.amulet.utils import (
+ AmuletUtils
+)
+from charmhelpers.core.host import CompareHostReleases
+
+DEBUG = logging.DEBUG
+ERROR = logging.ERROR
+
+NOVA_CLIENT_VERSION = "2"
+
+OPENSTACK_RELEASES_PAIRS = [
+ 'trusty_icehouse', 'trusty_kilo', 'trusty_liberty',
+ 'trusty_mitaka', 'xenial_mitaka', 'xenial_newton',
+ 'yakkety_newton', 'xenial_ocata', 'zesty_ocata',
+ 'xenial_pike', 'artful_pike', 'xenial_queens',
+ 'bionic_queens']
+
+
+class OpenStackAmuletUtils(AmuletUtils):
+ """OpenStack amulet utilities.
+
+ This class inherits from AmuletUtils and has additional support
+ that is specifically for use by OpenStack charm tests.
+ """
+
+ def __init__(self, log_level=ERROR):
+ """Initialize the deployment environment."""
+ super(OpenStackAmuletUtils, self).__init__(log_level)
+
+ def validate_endpoint_data(self, endpoints, admin_port, internal_port,
+ public_port, expected, openstack_release=None):
+ """Validate endpoint data. Pick the correct validator based on
+ OpenStack release. Expected data should be in the v2 format:
+ {
+ 'id': id,
+ 'region': region,
+ 'adminurl': adminurl,
+ 'internalurl': internalurl,
+ 'publicurl': publicurl,
+ 'service_id': service_id}
+
+ """
+ validation_function = self.validate_v2_endpoint_data
+ xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens')
+ if openstack_release and openstack_release >= xenial_queens:
+ validation_function = self.validate_v3_endpoint_data
+ expected = {
+ 'id': expected['id'],
+ 'region': expected['region'],
+ 'region_id': 'RegionOne',
+ 'url': self.valid_url,
+ 'interface': self.not_null,
+ 'service_id': expected['service_id']}
+ return validation_function(endpoints, admin_port, internal_port,
+ public_port, expected)
+
+ def validate_v2_endpoint_data(self, endpoints, admin_port, internal_port,
+ public_port, expected):
+ """Validate endpoint data.
+
+ Validate actual endpoint data vs expected endpoint data. The ports
+ are used to find the matching endpoint.
+ """
+ self.log.debug('Validating endpoint data...')
+ self.log.debug('actual: {}'.format(repr(endpoints)))
+ found = False
+ for ep in endpoints:
+ self.log.debug('endpoint: {}'.format(repr(ep)))
+ if (admin_port in ep.adminurl and
+ internal_port in ep.internalurl and
+ public_port in ep.publicurl):
+ found = True
+ actual = {'id': ep.id,
+ 'region': ep.region,
+ 'adminurl': ep.adminurl,
+ 'internalurl': ep.internalurl,
+ 'publicurl': ep.publicurl,
+ 'service_id': ep.service_id}
+ ret = self._validate_dict_data(expected, actual)
+ if ret:
+ return 'unexpected endpoint data - {}'.format(ret)
+
+ if not found:
+ return 'endpoint not found'
+
+ def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port,
+ public_port, expected, expected_num_eps=3):
+ """Validate keystone v3 endpoint data.
+
+ Validate the v3 endpoint data which has changed from v2. The
+ ports are used to find the matching endpoint.
+
+ The new v3 endpoint data looks like:
+
+ ['},
+ region=RegionOne,
+ region_id=RegionOne,
+ service_id=17f842a0dc084b928e476fafe67e4095,
+ url=http://10.5.6.5:9312>,
+ '},
+ region=RegionOne,
+ region_id=RegionOne,
+ service_id=72fc8736fb41435e8b3584205bb2cfa3,
+ url=http://10.5.6.6:35357/v3>,
+ ... ]
+ """
+ self.log.debug('Validating v3 endpoint data...')
+ self.log.debug('actual: {}'.format(repr(endpoints)))
+ found = []
+ for ep in endpoints:
+ self.log.debug('endpoint: {}'.format(repr(ep)))
+ if ((admin_port in ep.url and ep.interface == 'admin') or
+ (internal_port in ep.url and ep.interface == 'internal') or
+ (public_port in ep.url and ep.interface == 'public')):
+ found.append(ep.interface)
+ # note we ignore the links member.
+ actual = {'id': ep.id,
+ 'region': ep.region,
+ 'region_id': ep.region_id,
+ 'interface': self.not_null,
+ 'url': ep.url,
+ 'service_id': ep.service_id, }
+ ret = self._validate_dict_data(expected, actual)
+ if ret:
+ return 'unexpected endpoint data - {}'.format(ret)
+
+ if len(found) != expected_num_eps:
+ return 'Unexpected number of endpoints found'
+
+ def convert_svc_catalog_endpoint_data_to_v3(self, ep_data):
+ """Convert v2 endpoint data into v3.
+
+ {
+ 'service_name1': [
+ {
+ 'adminURL': adminURL,
+ 'id': id,
+ 'region': region.
+ 'publicURL': publicURL,
+ 'internalURL': internalURL
+ }],
+ 'service_name2': [
+ {
+ 'adminURL': adminURL,
+ 'id': id,
+ 'region': region.
+ 'publicURL': publicURL,
+ 'internalURL': internalURL
+ }],
+ }
+ """
+ self.log.warn("Endpoint ID and Region ID validation is limited to not "
+ "null checks after v2 to v3 conversion")
+ for svc in ep_data.keys():
+ assert len(ep_data[svc]) == 1, "Unknown data format"
+ svc_ep_data = ep_data[svc][0]
+ ep_data[svc] = [
+ {
+ 'url': svc_ep_data['adminURL'],
+ 'interface': 'admin',
+ 'region': svc_ep_data['region'],
+ 'region_id': self.not_null,
+ 'id': self.not_null},
+ {
+ 'url': svc_ep_data['publicURL'],
+ 'interface': 'public',
+ 'region': svc_ep_data['region'],
+ 'region_id': self.not_null,
+ 'id': self.not_null},
+ {
+ 'url': svc_ep_data['internalURL'],
+ 'interface': 'internal',
+ 'region': svc_ep_data['region'],
+ 'region_id': self.not_null,
+ 'id': self.not_null}]
+ return ep_data
+
+ def validate_svc_catalog_endpoint_data(self, expected, actual,
+ openstack_release=None):
+ """Validate service catalog endpoint data. Pick the correct validator
+ for the OpenStack version. Expected data should be in the v2 format:
+ {
+ 'service_name1': [
+ {
+ 'adminURL': adminURL,
+ 'id': id,
+ 'region': region.
+ 'publicURL': publicURL,
+ 'internalURL': internalURL
+ }],
+ 'service_name2': [
+ {
+ 'adminURL': adminURL,
+ 'id': id,
+ 'region': region.
+ 'publicURL': publicURL,
+ 'internalURL': internalURL
+ }],
+ }
+
+ """
+ validation_function = self.validate_v2_svc_catalog_endpoint_data
+ xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens')
+ if openstack_release and openstack_release >= xenial_queens:
+ validation_function = self.validate_v3_svc_catalog_endpoint_data
+ expected = self.convert_svc_catalog_endpoint_data_to_v3(expected)
+ return validation_function(expected, actual)
+
+ def validate_v2_svc_catalog_endpoint_data(self, expected, actual):
+ """Validate service catalog endpoint data.
+
+ Validate a list of actual service catalog endpoints vs a list of
+ expected service catalog endpoints.
+ """
+ self.log.debug('Validating service catalog endpoint data...')
+ self.log.debug('actual: {}'.format(repr(actual)))
+ for k, v in six.iteritems(expected):
+ if k in actual:
+ ret = self._validate_dict_data(expected[k][0], actual[k][0])
+ if ret:
+ return self.endpoint_error(k, ret)
+ else:
+ return "endpoint {} does not exist".format(k)
+ return ret
+
+ def validate_v3_svc_catalog_endpoint_data(self, expected, actual):
+ """Validate the keystone v3 catalog endpoint data.
+
+ Validate a list of dictinaries that make up the keystone v3 service
+ catalogue.
+
+ It is in the form of:
+
+
+ {u'identity': [{u'id': u'48346b01c6804b298cdd7349aadb732e',
+ u'interface': u'admin',
+ u'region': u'RegionOne',
+ u'region_id': u'RegionOne',
+ u'url': u'http://10.5.5.224:35357/v3'},
+ {u'id': u'8414f7352a4b47a69fddd9dbd2aef5cf',
+ u'interface': u'public',
+ u'region': u'RegionOne',
+ u'region_id': u'RegionOne',
+ u'url': u'http://10.5.5.224:5000/v3'},
+ {u'id': u'd5ca31440cc24ee1bf625e2996fb6a5b',
+ u'interface': u'internal',
+ u'region': u'RegionOne',
+ u'region_id': u'RegionOne',
+ u'url': u'http://10.5.5.224:5000/v3'}],
+ u'key-manager': [{u'id': u'68ebc17df0b045fcb8a8a433ebea9e62',
+ u'interface': u'public',
+ u'region': u'RegionOne',
+ u'region_id': u'RegionOne',
+ u'url': u'http://10.5.5.223:9311'},
+ {u'id': u'9cdfe2a893c34afd8f504eb218cd2f9d',
+ u'interface': u'internal',
+ u'region': u'RegionOne',
+ u'region_id': u'RegionOne',
+ u'url': u'http://10.5.5.223:9311'},
+ {u'id': u'f629388955bc407f8b11d8b7ca168086',
+ u'interface': u'admin',
+ u'region': u'RegionOne',
+ u'region_id': u'RegionOne',
+ u'url': u'http://10.5.5.223:9312'}]}
+
+ Note, that an added complication is that the order of admin, public,
+ internal against 'interface' in each region.
+
+ Thus, the function sorts the expected and actual lists using the
+ interface key as a sort key, prior to the comparison.
+ """
+ self.log.debug('Validating v3 service catalog endpoint data...')
+ self.log.debug('actual: {}'.format(repr(actual)))
+ for k, v in six.iteritems(expected):
+ if k in actual:
+ l_expected = sorted(v, key=lambda x: x['interface'])
+ l_actual = sorted(actual[k], key=lambda x: x['interface'])
+ if len(l_actual) != len(l_expected):
+ return ("endpoint {} has differing number of interfaces "
+ " - expected({}), actual({})"
+ .format(k, len(l_expected), len(l_actual)))
+ for i_expected, i_actual in zip(l_expected, l_actual):
+ self.log.debug("checking interface {}"
+ .format(i_expected['interface']))
+ ret = self._validate_dict_data(i_expected, i_actual)
+ if ret:
+ return self.endpoint_error(k, ret)
+ else:
+ return "endpoint {} does not exist".format(k)
+ return ret
+
+ def validate_tenant_data(self, expected, actual):
+ """Validate tenant data.
+
+ Validate a list of actual tenant data vs list of expected tenant
+ data.
+ """
+ self.log.debug('Validating tenant data...')
+ self.log.debug('actual: {}'.format(repr(actual)))
+ for e in expected:
+ found = False
+ for act in actual:
+ a = {'enabled': act.enabled, 'description': act.description,
+ 'name': act.name, 'id': act.id}
+ if e['name'] == a['name']:
+ found = True
+ ret = self._validate_dict_data(e, a)
+ if ret:
+ return "unexpected tenant data - {}".format(ret)
+ if not found:
+ return "tenant {} does not exist".format(e['name'])
+ return ret
+
+ def validate_role_data(self, expected, actual):
+ """Validate role data.
+
+ Validate a list of actual role data vs a list of expected role
+ data.
+ """
+ self.log.debug('Validating role data...')
+ self.log.debug('actual: {}'.format(repr(actual)))
+ for e in expected:
+ found = False
+ for act in actual:
+ a = {'name': act.name, 'id': act.id}
+ if e['name'] == a['name']:
+ found = True
+ ret = self._validate_dict_data(e, a)
+ if ret:
+ return "unexpected role data - {}".format(ret)
+ if not found:
+ return "role {} does not exist".format(e['name'])
+ return ret
+
+ def validate_user_data(self, expected, actual, api_version=None):
+ """Validate user data.
+
+ Validate a list of actual user data vs a list of expected user
+ data.
+ """
+ self.log.debug('Validating user data...')
+ self.log.debug('actual: {}'.format(repr(actual)))
+ for e in expected:
+ found = False
+ for act in actual:
+ if e['name'] == act.name:
+ a = {'enabled': act.enabled, 'name': act.name,
+ 'email': act.email, 'id': act.id}
+ if api_version == 3:
+ a['default_project_id'] = getattr(act,
+ 'default_project_id',
+ 'none')
+ else:
+ a['tenantId'] = act.tenantId
+ found = True
+ ret = self._validate_dict_data(e, a)
+ if ret:
+ return "unexpected user data - {}".format(ret)
+ if not found:
+ return "user {} does not exist".format(e['name'])
+ return ret
+
+ def validate_flavor_data(self, expected, actual):
+ """Validate flavor data.
+
+ Validate a list of actual flavors vs a list of expected flavors.
+ """
+ self.log.debug('Validating flavor data...')
+ self.log.debug('actual: {}'.format(repr(actual)))
+ act = [a.name for a in actual]
+ return self._validate_list_data(expected, act)
+
+ def tenant_exists(self, keystone, tenant):
+ """Return True if tenant exists."""
+ self.log.debug('Checking if tenant exists ({})...'.format(tenant))
+ return tenant in [t.name for t in keystone.tenants.list()]
+
+ def keystone_wait_for_propagation(self, sentry_relation_pairs,
+ api_version):
+ """Iterate over list of sentry and relation tuples and verify that
+ api_version has the expected value.
+
+ :param sentry_relation_pairs: list of sentry, relation name tuples used
+ for monitoring propagation of relation
+ data
+ :param api_version: api_version to expect in relation data
+ :returns: None if successful. Raise on error.
+ """
+ for (sentry, relation_name) in sentry_relation_pairs:
+ rel = sentry.relation('identity-service',
+ relation_name)
+ self.log.debug('keystone relation data: {}'.format(rel))
+ if rel.get('api_version') != str(api_version):
+ raise Exception("api_version not propagated through relation"
+ " data yet ('{}' != '{}')."
+ "".format(rel.get('api_version'), api_version))
+
+ def keystone_configure_api_version(self, sentry_relation_pairs, deployment,
+ api_version):
+ """Configure preferred-api-version of keystone in deployment and
+ monitor provided list of relation objects for propagation
+ before returning to caller.
+
+ :param sentry_relation_pairs: list of sentry, relation tuples used for
+ monitoring propagation of relation data
+ :param deployment: deployment to configure
+ :param api_version: value preferred-api-version will be set to
+ :returns: None if successful. Raise on error.
+ """
+ self.log.debug("Setting keystone preferred-api-version: '{}'"
+ "".format(api_version))
+
+ config = {'preferred-api-version': api_version}
+ deployment.d.configure('keystone', config)
+ deployment._auto_wait_for_status()
+ self.keystone_wait_for_propagation(sentry_relation_pairs, api_version)
+
+ def authenticate_cinder_admin(self, keystone, api_version=2):
+ """Authenticates admin user with cinder."""
+ self.log.debug('Authenticating cinder admin...')
+ _clients = {
+ 1: cinder_client.Client,
+ 2: cinder_clientv2.Client}
+ return _clients[api_version](session=keystone.session)
+
+ def authenticate_keystone(self, keystone_ip, username, password,
+ api_version=False, admin_port=False,
+ user_domain_name=None, domain_name=None,
+ project_domain_name=None, project_name=None):
+ """Authenticate with Keystone"""
+ self.log.debug('Authenticating with keystone...')
+ if not api_version:
+ api_version = 2
+ sess, auth = self.get_keystone_session(
+ keystone_ip=keystone_ip,
+ username=username,
+ password=password,
+ api_version=api_version,
+ admin_port=admin_port,
+ user_domain_name=user_domain_name,
+ domain_name=domain_name,
+ project_domain_name=project_domain_name,
+ project_name=project_name
+ )
+ if api_version == 2:
+ client = keystone_client.Client(session=sess)
+ else:
+ client = keystone_client_v3.Client(session=sess)
+ # This populates the client.service_catalog
+ client.auth_ref = auth.get_access(sess)
+ return client
+
+ def get_keystone_session(self, keystone_ip, username, password,
+ api_version=False, admin_port=False,
+ user_domain_name=None, domain_name=None,
+ project_domain_name=None, project_name=None):
+ """Return a keystone session object"""
+ ep = self.get_keystone_endpoint(keystone_ip,
+ api_version=api_version,
+ admin_port=admin_port)
+ if api_version == 2:
+ auth = v2.Password(
+ username=username,
+ password=password,
+ tenant_name=project_name,
+ auth_url=ep
+ )
+ sess = keystone_session.Session(auth=auth)
+ else:
+ auth = v3.Password(
+ user_domain_name=user_domain_name,
+ username=username,
+ password=password,
+ domain_name=domain_name,
+ project_domain_name=project_domain_name,
+ project_name=project_name,
+ auth_url=ep
+ )
+ sess = keystone_session.Session(auth=auth)
+ return (sess, auth)
+
+ def get_keystone_endpoint(self, keystone_ip, api_version=None,
+ admin_port=False):
+ """Return keystone endpoint"""
+ port = 5000
+ if admin_port:
+ port = 35357
+ base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'),
+ port)
+ if api_version == 2:
+ ep = base_ep + "/v2.0"
+ else:
+ ep = base_ep + "/v3"
+ return ep
+
+ def get_default_keystone_session(self, keystone_sentry,
+ openstack_release=None):
+ """Return a keystone session object and client object assuming standard
+ default settings
+
+ Example call in amulet tests:
+ self.keystone_session, self.keystone = u.get_default_keystone_session(
+ self.keystone_sentry,
+ openstack_release=self._get_openstack_release())
+
+ The session can then be used to auth other clients:
+ neutronclient.Client(session=session)
+ aodh_client.Client(session=session)
+ eyc
+ """
+ self.log.debug('Authenticating keystone admin...')
+ api_version = 2
+ client_class = keystone_client.Client
+ # 11 => xenial_queens
+ if openstack_release and openstack_release >= 11:
+ api_version = 3
+ client_class = keystone_client_v3.Client
+ keystone_ip = keystone_sentry.info['public-address']
+ session, auth = self.get_keystone_session(
+ keystone_ip,
+ api_version=api_version,
+ username='admin',
+ password='openstack',
+ project_name='admin',
+ user_domain_name='admin_domain',
+ project_domain_name='admin_domain')
+ client = client_class(session=session)
+ # This populates the client.service_catalog
+ client.auth_ref = auth.get_access(session)
+ return session, client
+
+ def authenticate_keystone_admin(self, keystone_sentry, user, password,
+ tenant=None, api_version=None,
+ keystone_ip=None, user_domain_name=None,
+ project_domain_name=None,
+ project_name=None):
+ """Authenticates admin user with the keystone admin endpoint."""
+ self.log.debug('Authenticating keystone admin...')
+ if not keystone_ip:
+ keystone_ip = keystone_sentry.info['public-address']
+
+ # To support backward compatibility usage of this function
+ if not project_name:
+ project_name = tenant
+ if api_version == 3 and not user_domain_name:
+ user_domain_name = 'admin_domain'
+ if api_version == 3 and not project_domain_name:
+ project_domain_name = 'admin_domain'
+ if api_version == 3 and not project_name:
+ project_name = 'admin'
+
+ return self.authenticate_keystone(
+ keystone_ip, user, password,
+ api_version=api_version,
+ user_domain_name=user_domain_name,
+ project_domain_name=project_domain_name,
+ project_name=project_name,
+ admin_port=True)
+
+ def authenticate_keystone_user(self, keystone, user, password, tenant):
+ """Authenticates a regular user with the keystone public endpoint."""
+ self.log.debug('Authenticating keystone user ({})...'.format(user))
+ ep = keystone.service_catalog.url_for(service_type='identity',
+ interface='publicURL')
+ keystone_ip = urlparse.urlparse(ep).hostname
+
+ return self.authenticate_keystone(keystone_ip, user, password,
+ project_name=tenant)
+
+ def authenticate_glance_admin(self, keystone):
+ """Authenticates admin user with glance."""
+ self.log.debug('Authenticating glance admin...')
+ ep = keystone.service_catalog.url_for(service_type='image',
+ interface='adminURL')
+ if keystone.session:
+ return glance_client.Client(ep, session=keystone.session)
+ else:
+ return glance_client.Client(ep, token=keystone.auth_token)
+
+ def authenticate_heat_admin(self, keystone):
+ """Authenticates the admin user with heat."""
+ self.log.debug('Authenticating heat admin...')
+ ep = keystone.service_catalog.url_for(service_type='orchestration',
+ interface='publicURL')
+ if keystone.session:
+ return heat_client.Client(endpoint=ep, session=keystone.session)
+ else:
+ return heat_client.Client(endpoint=ep, token=keystone.auth_token)
+
+ def authenticate_nova_user(self, keystone, user, password, tenant):
+ """Authenticates a regular user with nova-api."""
+ self.log.debug('Authenticating nova user ({})...'.format(user))
+ ep = keystone.service_catalog.url_for(service_type='identity',
+ interface='publicURL')
+ if keystone.session:
+ return nova_client.Client(NOVA_CLIENT_VERSION,
+ session=keystone.session,
+ auth_url=ep)
+ elif novaclient.__version__[0] >= "7":
+ return nova_client.Client(NOVA_CLIENT_VERSION,
+ username=user, password=password,
+ project_name=tenant, auth_url=ep)
+ else:
+ return nova_client.Client(NOVA_CLIENT_VERSION,
+ username=user, api_key=password,
+ project_id=tenant, auth_url=ep)
+
+ def authenticate_swift_user(self, keystone, user, password, tenant):
+ """Authenticates a regular user with swift api."""
+ self.log.debug('Authenticating swift user ({})...'.format(user))
+ ep = keystone.service_catalog.url_for(service_type='identity',
+ interface='publicURL')
+ if keystone.session:
+ return swiftclient.Connection(session=keystone.session)
+ else:
+ return swiftclient.Connection(authurl=ep,
+ user=user,
+ key=password,
+ tenant_name=tenant,
+ auth_version='2.0')
+
+ def create_flavor(self, nova, name, ram, vcpus, disk, flavorid="auto",
+ ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True):
+ """Create the specified flavor."""
+ try:
+ nova.flavors.find(name=name)
+ except (exceptions.NotFound, exceptions.NoUniqueMatch):
+ self.log.debug('Creating flavor ({})'.format(name))
+ nova.flavors.create(name, ram, vcpus, disk, flavorid,
+ ephemeral, swap, rxtx_factor, is_public)
+
+ def create_cirros_image(self, glance, image_name):
+ """Download the latest cirros image and upload it to glance,
+ validate and return a resource pointer.
+
+ :param glance: pointer to authenticated glance connection
+ :param image_name: display name for new image
+ :returns: glance image pointer
+ """
+ self.log.debug('Creating glance cirros image '
+ '({})...'.format(image_name))
+
+ # Download cirros image
+ http_proxy = os.getenv('AMULET_HTTP_PROXY')
+ self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
+ if http_proxy:
+ proxies = {'http': http_proxy}
+ opener = urllib.FancyURLopener(proxies)
+ else:
+ opener = urllib.FancyURLopener()
+
+ f = opener.open('http://download.cirros-cloud.net/version/released')
+ version = f.read().strip()
+ cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
+ local_path = os.path.join('tests', cirros_img)
+
+ if not os.path.exists(local_path):
+ cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
+ version, cirros_img)
+ opener.retrieve(cirros_url, local_path)
+ f.close()
+
+ # Create glance image
+ with open(local_path) as f:
+ image = glance.images.create(name=image_name, is_public=True,
+ disk_format='qcow2',
+ container_format='bare', data=f)
+
+ # Wait for image to reach active status
+ img_id = image.id
+ ret = self.resource_reaches_status(glance.images, img_id,
+ expected_stat='active',
+ msg='Image status wait')
+ if not ret:
+ msg = 'Glance image failed to reach expected state.'
+ amulet.raise_status(amulet.FAIL, msg=msg)
+
+ # Re-validate new image
+ self.log.debug('Validating image attributes...')
+ val_img_name = glance.images.get(img_id).name
+ val_img_stat = glance.images.get(img_id).status
+ val_img_pub = glance.images.get(img_id).is_public
+ val_img_cfmt = glance.images.get(img_id).container_format
+ val_img_dfmt = glance.images.get(img_id).disk_format
+ msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
+ 'container fmt:{} disk fmt:{}'.format(
+ val_img_name, val_img_pub, img_id,
+ val_img_stat, val_img_cfmt, val_img_dfmt))
+
+ if val_img_name == image_name and val_img_stat == 'active' \
+ and val_img_pub is True and val_img_cfmt == 'bare' \
+ and val_img_dfmt == 'qcow2':
+ self.log.debug(msg_attr)
+ else:
+ msg = ('Volume validation failed, {}'.format(msg_attr))
+ amulet.raise_status(amulet.FAIL, msg=msg)
+
+ return image
+
+ def delete_image(self, glance, image):
+ """Delete the specified image."""
+
+ # /!\ DEPRECATION WARNING
+ self.log.warn('/!\\ DEPRECATION WARNING: use '
+ 'delete_resource instead of delete_image.')
+ self.log.debug('Deleting glance image ({})...'.format(image))
+ return self.delete_resource(glance.images, image, msg='glance image')
+
+ def create_instance(self, nova, image_name, instance_name, flavor):
+ """Create the specified instance."""
+ self.log.debug('Creating instance '
+ '({}|{}|{})'.format(instance_name, image_name, flavor))
+ image = nova.glance.find_image(image_name)
+ flavor = nova.flavors.find(name=flavor)
+ instance = nova.servers.create(name=instance_name, image=image,
+ flavor=flavor)
+
+ count = 1
+ status = instance.status
+ while status != 'ACTIVE' and count < 60:
+ time.sleep(3)
+ instance = nova.servers.get(instance.id)
+ status = instance.status
+ self.log.debug('instance status: {}'.format(status))
+ count += 1
+
+ if status != 'ACTIVE':
+ self.log.error('instance creation timed out')
+ return None
+
+ return instance
+
+ def delete_instance(self, nova, instance):
+ """Delete the specified instance."""
+
+ # /!\ DEPRECATION WARNING
+ self.log.warn('/!\\ DEPRECATION WARNING: use '
+ 'delete_resource instead of delete_instance.')
+ self.log.debug('Deleting instance ({})...'.format(instance))
+ return self.delete_resource(nova.servers, instance,
+ msg='nova instance')
+
+ def create_or_get_keypair(self, nova, keypair_name="testkey"):
+ """Create a new keypair, or return pointer if it already exists."""
+ try:
+ _keypair = nova.keypairs.get(keypair_name)
+ self.log.debug('Keypair ({}) already exists, '
+ 'using it.'.format(keypair_name))
+ return _keypair
+ except Exception:
+ self.log.debug('Keypair ({}) does not exist, '
+ 'creating it.'.format(keypair_name))
+
+ _keypair = nova.keypairs.create(name=keypair_name)
+ return _keypair
+
+ def _get_cinder_obj_name(self, cinder_object):
+ """Retrieve name of cinder object.
+
+ :param cinder_object: cinder snapshot or volume object
+ :returns: str cinder object name
+ """
+ # v1 objects store name in 'display_name' attr but v2+ use 'name'
+ try:
+ return cinder_object.display_name
+ except AttributeError:
+ return cinder_object.name
+
+ def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
+ img_id=None, src_vol_id=None, snap_id=None):
+ """Create cinder volume, optionally from a glance image, OR
+ optionally as a clone of an existing volume, OR optionally
+ from a snapshot. Wait for the new volume status to reach
+ the expected status, validate and return a resource pointer.
+
+ :param vol_name: cinder volume display name
+ :param vol_size: size in gigabytes
+ :param img_id: optional glance image id
+ :param src_vol_id: optional source volume id to clone
+ :param snap_id: optional snapshot id to use
+ :returns: cinder volume pointer
+ """
+ # Handle parameter input and avoid impossible combinations
+ if img_id and not src_vol_id and not snap_id:
+ # Create volume from image
+ self.log.debug('Creating cinder volume from glance image...')
+ bootable = 'true'
+ elif src_vol_id and not img_id and not snap_id:
+ # Clone an existing volume
+ self.log.debug('Cloning cinder volume...')
+ bootable = cinder.volumes.get(src_vol_id).bootable
+ elif snap_id and not src_vol_id and not img_id:
+ # Create volume from snapshot
+ self.log.debug('Creating cinder volume from snapshot...')
+ snap = cinder.volume_snapshots.find(id=snap_id)
+ vol_size = snap.size
+ snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id
+ bootable = cinder.volumes.get(snap_vol_id).bootable
+ elif not img_id and not src_vol_id and not snap_id:
+ # Create volume
+ self.log.debug('Creating cinder volume...')
+ bootable = 'false'
+ else:
+ # Impossible combination of parameters
+ msg = ('Invalid method use - name:{} size:{} img_id:{} '
+ 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size,
+ img_id, src_vol_id,
+ snap_id))
+ amulet.raise_status(amulet.FAIL, msg=msg)
+
+ # Create new volume
+ try:
+ vol_new = cinder.volumes.create(display_name=vol_name,
+ imageRef=img_id,
+ size=vol_size,
+ source_volid=src_vol_id,
+ snapshot_id=snap_id)
+ vol_id = vol_new.id
+ except TypeError:
+ vol_new = cinder.volumes.create(name=vol_name,
+ imageRef=img_id,
+ size=vol_size,
+ source_volid=src_vol_id,
+ snapshot_id=snap_id)
+ vol_id = vol_new.id
+ except Exception as e:
+ msg = 'Failed to create volume: {}'.format(e)
+ amulet.raise_status(amulet.FAIL, msg=msg)
+
+ # Wait for volume to reach available status
+ ret = self.resource_reaches_status(cinder.volumes, vol_id,
+ expected_stat="available",
+ msg="Volume status wait")
+ if not ret:
+ msg = 'Cinder volume failed to reach expected state.'
+ amulet.raise_status(amulet.FAIL, msg=msg)
+
+ # Re-validate new volume
+ self.log.debug('Validating volume attributes...')
+ val_vol_name = self._get_cinder_obj_name(cinder.volumes.get(vol_id))
+ val_vol_boot = cinder.volumes.get(vol_id).bootable
+ val_vol_stat = cinder.volumes.get(vol_id).status
+ val_vol_size = cinder.volumes.get(vol_id).size
+ msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:'
+ '{} size:{}'.format(val_vol_name, vol_id,
+ val_vol_stat, val_vol_boot,
+ val_vol_size))
+
+ if val_vol_boot == bootable and val_vol_stat == 'available' \
+ and val_vol_name == vol_name and val_vol_size == vol_size:
+ self.log.debug(msg_attr)
+ else:
+ msg = ('Volume validation failed, {}'.format(msg_attr))
+ amulet.raise_status(amulet.FAIL, msg=msg)
+
+ return vol_new
+
+ def delete_resource(self, resource, resource_id,
+ msg="resource", max_wait=120):
+ """Delete one openstack resource, such as one instance, keypair,
+ image, volume, stack, etc., and confirm deletion within max wait time.
+
+ :param resource: pointer to os resource type, ex:glance_client.images
+ :param resource_id: unique name or id for the openstack resource
+ :param msg: text to identify purpose in logging
+ :param max_wait: maximum wait time in seconds
+ :returns: True if successful, otherwise False
+ """
+ self.log.debug('Deleting OpenStack resource '
+ '{} ({})'.format(resource_id, msg))
+ num_before = len(list(resource.list()))
+ resource.delete(resource_id)
+
+ tries = 0
+ num_after = len(list(resource.list()))
+ while num_after != (num_before - 1) and tries < (max_wait / 4):
+ self.log.debug('{} delete check: '
+ '{} [{}:{}] {}'.format(msg, tries,
+ num_before,
+ num_after,
+ resource_id))
+ time.sleep(4)
+ num_after = len(list(resource.list()))
+ tries += 1
+
+ self.log.debug('{}: expected, actual count = {}, '
+ '{}'.format(msg, num_before - 1, num_after))
+
+ if num_after == (num_before - 1):
+ return True
+ else:
+ self.log.error('{} delete timed out'.format(msg))
+ return False
+
+ def resource_reaches_status(self, resource, resource_id,
+ expected_stat='available',
+ msg='resource', max_wait=120):
+ """Wait for an openstack resources status to reach an
+ expected status within a specified time. Useful to confirm that
+ nova instances, cinder vols, snapshots, glance images, heat stacks
+ and other resources eventually reach the expected status.
+
+ :param resource: pointer to os resource type, ex: heat_client.stacks
+ :param resource_id: unique id for the openstack resource
+ :param expected_stat: status to expect resource to reach
+ :param msg: text to identify purpose in logging
+ :param max_wait: maximum wait time in seconds
+ :returns: True if successful, False if status is not reached
+ """
+
+ tries = 0
+ resource_stat = resource.get(resource_id).status
+ while resource_stat != expected_stat and tries < (max_wait / 4):
+ self.log.debug('{} status check: '
+ '{} [{}:{}] {}'.format(msg, tries,
+ resource_stat,
+ expected_stat,
+ resource_id))
+ time.sleep(4)
+ resource_stat = resource.get(resource_id).status
+ tries += 1
+
+ self.log.debug('{}: expected, actual status = {}, '
+ '{}'.format(msg, resource_stat, expected_stat))
+
+ if resource_stat == expected_stat:
+ return True
+ else:
+ self.log.debug('{} never reached expected status: '
+ '{}'.format(resource_id, expected_stat))
+ return False
+
+ def get_ceph_osd_id_cmd(self, index):
+ """Produce a shell command that will return a ceph-osd id."""
+ return ("`initctl list | grep 'ceph-osd ' | "
+ "awk 'NR=={} {{ print $2 }}' | "
+ "grep -o '[0-9]*'`".format(index + 1))
+
+ def get_ceph_pools(self, sentry_unit):
+ """Return a dict of ceph pools from a single ceph unit, with
+ pool name as keys, pool id as vals."""
+ pools = {}
+ cmd = 'sudo ceph osd lspools'
+ output, code = sentry_unit.run(cmd)
+ if code != 0:
+ msg = ('{} `{}` returned {} '
+ '{}'.format(sentry_unit.info['unit_name'],
+ cmd, code, output))
+ amulet.raise_status(amulet.FAIL, msg=msg)
+
+ # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
+ for pool in str(output).split(','):
+ pool_id_name = pool.split(' ')
+ if len(pool_id_name) == 2:
+ pool_id = pool_id_name[0]
+ pool_name = pool_id_name[1]
+ pools[pool_name] = int(pool_id)
+
+ self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'],
+ pools))
+ return pools
+
+ def get_ceph_df(self, sentry_unit):
+ """Return dict of ceph df json output, including ceph pool state.
+
+ :param sentry_unit: Pointer to amulet sentry instance (juju unit)
+ :returns: Dict of ceph df output
+ """
+ cmd = 'sudo ceph df --format=json'
+ output, code = sentry_unit.run(cmd)
+ if code != 0:
+ msg = ('{} `{}` returned {} '
+ '{}'.format(sentry_unit.info['unit_name'],
+ cmd, code, output))
+ amulet.raise_status(amulet.FAIL, msg=msg)
+ return json.loads(output)
+
+ def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
+ """Take a sample of attributes of a ceph pool, returning ceph
+ pool name, object count and disk space used for the specified
+ pool ID number.
+
+ :param sentry_unit: Pointer to amulet sentry instance (juju unit)
+ :param pool_id: Ceph pool ID
+ :returns: List of pool name, object count, kb disk space used
+ """
+ df = self.get_ceph_df(sentry_unit)
+ for pool in df['pools']:
+ if pool['id'] == pool_id:
+ pool_name = pool['name']
+ obj_count = pool['stats']['objects']
+ kb_used = pool['stats']['kb_used']
+
+ self.log.debug('Ceph {} pool (ID {}): {} objects, '
+ '{} kb used'.format(pool_name, pool_id,
+ obj_count, kb_used))
+ return pool_name, obj_count, kb_used
+
+ def validate_ceph_pool_samples(self, samples, sample_type="resource pool"):
+ """Validate ceph pool samples taken over time, such as pool
+ object counts or pool kb used, before adding, after adding, and
+ after deleting items which affect those pool attributes. The
+ 2nd element is expected to be greater than the 1st; 3rd is expected
+ to be less than the 2nd.
+
+ :param samples: List containing 3 data samples
+ :param sample_type: String for logging and usage context
+ :returns: None if successful, Failure message otherwise
+ """
+ original, created, deleted = range(3)
+ if samples[created] <= samples[original] or \
+ samples[deleted] >= samples[created]:
+ return ('Ceph {} samples ({}) '
+ 'unexpected.'.format(sample_type, samples))
+ else:
+ self.log.debug('Ceph {} samples (OK): '
+ '{}'.format(sample_type, samples))
+ return None
+
+ # rabbitmq/amqp specific helpers:
+
+ def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200):
+ """Wait for rmq units extended status to show cluster readiness,
+ after an optional initial sleep period. Initial sleep is likely
+ necessary to be effective following a config change, as status
+ message may not instantly update to non-ready."""
+
+ if init_sleep:
+ time.sleep(init_sleep)
+
+ message = re.compile('^Unit is ready and clustered$')
+ deployment._auto_wait_for_status(message=message,
+ timeout=timeout,
+ include_only=['rabbitmq-server'])
+
+ def add_rmq_test_user(self, sentry_units,
+ username="testuser1", password="changeme"):
+ """Add a test user via the first rmq juju unit, check connection as
+ the new user against all sentry units.
+
+ :param sentry_units: list of sentry unit pointers
+ :param username: amqp user name, default to testuser1
+ :param password: amqp user password
+ :returns: None if successful. Raise on error.
+ """
+ self.log.debug('Adding rmq user ({})...'.format(username))
+
+ # Check that user does not already exist
+ cmd_user_list = 'rabbitmqctl list_users'
+ output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
+ if username in output:
+ self.log.warning('User ({}) already exists, returning '
+ 'gracefully.'.format(username))
+ return
+
+ perms = '".*" ".*" ".*"'
+ cmds = ['rabbitmqctl add_user {} {}'.format(username, password),
+ 'rabbitmqctl set_permissions {} {}'.format(username, perms)]
+
+ # Add user via first unit
+ for cmd in cmds:
+ output, _ = self.run_cmd_unit(sentry_units[0], cmd)
+
+ # Check connection against the other sentry_units
+ self.log.debug('Checking user connect against units...')
+ for sentry_unit in sentry_units:
+ connection = self.connect_amqp_by_unit(sentry_unit, ssl=False,
+ username=username,
+ password=password)
+ connection.close()
+
+ def delete_rmq_test_user(self, sentry_units, username="testuser1"):
+ """Delete a rabbitmq user via the first rmq juju unit.
+
+ :param sentry_units: list of sentry unit pointers
+ :param username: amqp user name, default to testuser1
+ :param password: amqp user password
+ :returns: None if successful or no such user.
+ """
+ self.log.debug('Deleting rmq user ({})...'.format(username))
+
+ # Check that the user exists
+ cmd_user_list = 'rabbitmqctl list_users'
+ output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
+
+ if username not in output:
+ self.log.warning('User ({}) does not exist, returning '
+ 'gracefully.'.format(username))
+ return
+
+ # Delete the user
+ cmd_user_del = 'rabbitmqctl delete_user {}'.format(username)
+ output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del)
+
+ def get_rmq_cluster_status(self, sentry_unit):
+ """Execute rabbitmq cluster status command on a unit and return
+ the full output.
+
+ :param unit: sentry unit
+ :returns: String containing console output of cluster status command
+ """
+ cmd = 'rabbitmqctl cluster_status'
+ output, _ = self.run_cmd_unit(sentry_unit, cmd)
+ self.log.debug('{} cluster_status:\n{}'.format(
+ sentry_unit.info['unit_name'], output))
+ return str(output)
+
+ def get_rmq_cluster_running_nodes(self, sentry_unit):
+ """Parse rabbitmqctl cluster_status output string, return list of
+ running rabbitmq cluster nodes.
+
+ :param unit: sentry unit
+ :returns: List containing node names of running nodes
+ """
+ # NOTE(beisner): rabbitmqctl cluster_status output is not
+ # json-parsable, do string chop foo, then json.loads that.
+ str_stat = self.get_rmq_cluster_status(sentry_unit)
+ if 'running_nodes' in str_stat:
+ pos_start = str_stat.find("{running_nodes,") + 15
+ pos_end = str_stat.find("]},", pos_start) + 1
+ str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"')
+ run_nodes = json.loads(str_run_nodes)
+ return run_nodes
+ else:
+ return []
+
+ def validate_rmq_cluster_running_nodes(self, sentry_units):
+ """Check that all rmq unit hostnames are represented in the
+ cluster_status output of all units.
+
+ :param host_names: dict of juju unit names to host names
+ :param units: list of sentry unit pointers (all rmq units)
+ :returns: None if successful, otherwise return error message
+ """
+ host_names = self.get_unit_hostnames(sentry_units)
+ errors = []
+
+ # Query every unit for cluster_status running nodes
+ for query_unit in sentry_units:
+ query_unit_name = query_unit.info['unit_name']
+ running_nodes = self.get_rmq_cluster_running_nodes(query_unit)
+
+ # Confirm that every unit is represented in the queried unit's
+ # cluster_status running nodes output.
+ for validate_unit in sentry_units:
+ val_host_name = host_names[validate_unit.info['unit_name']]
+ val_node_name = 'rabbit@{}'.format(val_host_name)
+
+ if val_node_name not in running_nodes:
+ errors.append('Cluster member check failed on {}: {} not '
+ 'in {}\n'.format(query_unit_name,
+ val_node_name,
+ running_nodes))
+ if errors:
+ return ''.join(errors)
+
+ def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None):
+ """Check a single juju rmq unit for ssl and port in the config file."""
+ host = sentry_unit.info['public-address']
+ unit_name = sentry_unit.info['unit_name']
+
+ conf_file = '/etc/rabbitmq/rabbitmq.config'
+ conf_contents = str(self.file_contents_safe(sentry_unit,
+ conf_file, max_wait=16))
+ # Checks
+ conf_ssl = 'ssl' in conf_contents
+ conf_port = str(port) in conf_contents
+
+ # Port explicitly checked in config
+ if port and conf_port and conf_ssl:
+ self.log.debug('SSL is enabled @{}:{} '
+ '({})'.format(host, port, unit_name))
+ return True
+ elif port and not conf_port and conf_ssl:
+ self.log.debug('SSL is enabled @{} but not on port {} '
+ '({})'.format(host, port, unit_name))
+ return False
+ # Port not checked (useful when checking that ssl is disabled)
+ elif not port and conf_ssl:
+ self.log.debug('SSL is enabled @{}:{} '
+ '({})'.format(host, port, unit_name))
+ return True
+ elif not conf_ssl:
+ self.log.debug('SSL not enabled @{}:{} '
+ '({})'.format(host, port, unit_name))
+ return False
+ else:
+ msg = ('Unknown condition when checking SSL status @{}:{} '
+ '({})'.format(host, port, unit_name))
+ amulet.raise_status(amulet.FAIL, msg)
+
+ def validate_rmq_ssl_enabled_units(self, sentry_units, port=None):
+ """Check that ssl is enabled on rmq juju sentry units.
+
+ :param sentry_units: list of all rmq sentry units
+ :param port: optional ssl port override to validate
+ :returns: None if successful, otherwise return error message
+ """
+ for sentry_unit in sentry_units:
+ if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port):
+ return ('Unexpected condition: ssl is disabled on unit '
+ '({})'.format(sentry_unit.info['unit_name']))
+ return None
+
+ def validate_rmq_ssl_disabled_units(self, sentry_units):
+ """Check that ssl is enabled on listed rmq juju sentry units.
+
+ :param sentry_units: list of all rmq sentry units
+ :returns: True if successful. Raise on error.
+ """
+ for sentry_unit in sentry_units:
+ if self.rmq_ssl_is_enabled_on_unit(sentry_unit):
+ return ('Unexpected condition: ssl is enabled on unit '
+ '({})'.format(sentry_unit.info['unit_name']))
+ return None
+
+ def configure_rmq_ssl_on(self, sentry_units, deployment,
+ port=None, max_wait=60):
+ """Turn ssl charm config option on, with optional non-default
+ ssl port specification. Confirm that it is enabled on every
+ unit.
+
+ :param sentry_units: list of sentry units
+ :param deployment: amulet deployment object pointer
+ :param port: amqp port, use defaults if None
+ :param max_wait: maximum time to wait in seconds to confirm
+ :returns: None if successful. Raise on error.
+ """
+ self.log.debug('Setting ssl charm config option: on')
+
+ # Enable RMQ SSL
+ config = {'ssl': 'on'}
+ if port:
+ config['ssl_port'] = port
+
+ deployment.d.configure('rabbitmq-server', config)
+
+ # Wait for unit status
+ self.rmq_wait_for_cluster(deployment)
+
+ # Confirm
+ tries = 0
+ ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
+ while ret and tries < (max_wait / 4):
+ time.sleep(4)
+ self.log.debug('Attempt {}: {}'.format(tries, ret))
+ ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
+ tries += 1
+
+ if ret:
+ amulet.raise_status(amulet.FAIL, ret)
+
+ def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60):
+ """Turn ssl charm config option off, confirm that it is disabled
+ on every unit.
+
+ :param sentry_units: list of sentry units
+ :param deployment: amulet deployment object pointer
+ :param max_wait: maximum time to wait in seconds to confirm
+ :returns: None if successful. Raise on error.
+ """
+ self.log.debug('Setting ssl charm config option: off')
+
+ # Disable RMQ SSL
+ config = {'ssl': 'off'}
+ deployment.d.configure('rabbitmq-server', config)
+
+ # Wait for unit status
+ self.rmq_wait_for_cluster(deployment)
+
+ # Confirm
+ tries = 0
+ ret = self.validate_rmq_ssl_disabled_units(sentry_units)
+ while ret and tries < (max_wait / 4):
+ time.sleep(4)
+ self.log.debug('Attempt {}: {}'.format(tries, ret))
+ ret = self.validate_rmq_ssl_disabled_units(sentry_units)
+ tries += 1
+
+ if ret:
+ amulet.raise_status(amulet.FAIL, ret)
+
+ def connect_amqp_by_unit(self, sentry_unit, ssl=False,
+ port=None, fatal=True,
+ username="testuser1", password="changeme"):
+ """Establish and return a pika amqp connection to the rabbitmq service
+ running on a rmq juju unit.
+
+ :param sentry_unit: sentry unit pointer
+ :param ssl: boolean, default to False
+ :param port: amqp port, use defaults if None
+ :param fatal: boolean, default to True (raises on connect error)
+ :param username: amqp user name, default to testuser1
+ :param password: amqp user password
+ :returns: pika amqp connection pointer or None if failed and non-fatal
+ """
+ host = sentry_unit.info['public-address']
+ unit_name = sentry_unit.info['unit_name']
+
+ # Default port logic if port is not specified
+ if ssl and not port:
+ port = 5671
+ elif not ssl and not port:
+ port = 5672
+
+ self.log.debug('Connecting to amqp on {}:{} ({}) as '
+ '{}...'.format(host, port, unit_name, username))
+
+ try:
+ credentials = pika.PlainCredentials(username, password)
+ parameters = pika.ConnectionParameters(host=host, port=port,
+ credentials=credentials,
+ ssl=ssl,
+ connection_attempts=3,
+ retry_delay=5,
+ socket_timeout=1)
+ connection = pika.BlockingConnection(parameters)
+ assert connection.is_open is True
+ assert connection.is_closing is False
+ self.log.debug('Connect OK')
+ return connection
+ except Exception as e:
+ msg = ('amqp connection failed to {}:{} as '
+ '{} ({})'.format(host, port, username, str(e)))
+ if fatal:
+ amulet.raise_status(amulet.FAIL, msg)
+ else:
+ self.log.warn(msg)
+ return None
+
+ def publish_amqp_message_by_unit(self, sentry_unit, message,
+ queue="test", ssl=False,
+ username="testuser1",
+ password="changeme",
+ port=None):
+ """Publish an amqp message to a rmq juju unit.
+
+ :param sentry_unit: sentry unit pointer
+ :param message: amqp message string
+ :param queue: message queue, default to test
+ :param username: amqp user name, default to testuser1
+ :param password: amqp user password
+ :param ssl: boolean, default to False
+ :param port: amqp port, use defaults if None
+ :returns: None. Raises exception if publish failed.
+ """
+ self.log.debug('Publishing message to {} queue:\n{}'.format(queue,
+ message))
+ connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
+ port=port,
+ username=username,
+ password=password)
+
+ # NOTE(beisner): extra debug here re: pika hang potential:
+ # https://github.com/pika/pika/issues/297
+ # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw
+ self.log.debug('Defining channel...')
+ channel = connection.channel()
+ self.log.debug('Declaring queue...')
+ channel.queue_declare(queue=queue, auto_delete=False, durable=True)
+ self.log.debug('Publishing message...')
+ channel.basic_publish(exchange='', routing_key=queue, body=message)
+ self.log.debug('Closing channel...')
+ channel.close()
+ self.log.debug('Closing connection...')
+ connection.close()
+
+ def get_amqp_message_by_unit(self, sentry_unit, queue="test",
+ username="testuser1",
+ password="changeme",
+ ssl=False, port=None):
+ """Get an amqp message from a rmq juju unit.
+
+ :param sentry_unit: sentry unit pointer
+ :param queue: message queue, default to test
+ :param username: amqp user name, default to testuser1
+ :param password: amqp user password
+ :param ssl: boolean, default to False
+ :param port: amqp port, use defaults if None
+ :returns: amqp message body as string. Raise if get fails.
+ """
+ connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
+ port=port,
+ username=username,
+ password=password)
+ channel = connection.channel()
+ method_frame, _, body = channel.basic_get(queue)
+
+ if method_frame:
+ self.log.debug('Retreived message from {} queue:\n{}'.format(queue,
+ body))
+ channel.basic_ack(method_frame.delivery_tag)
+ channel.close()
+ connection.close()
+ return body
+ else:
+ msg = 'No message retrieved.'
+ amulet.raise_status(amulet.FAIL, msg)
+
+ def validate_memcache(self, sentry_unit, conf, os_release,
+ earliest_release=5, section='keystone_authtoken',
+ check_kvs=None):
+ """Check Memcache is running and is configured to be used
+
+ Example call from Amulet test:
+
+ def test_110_memcache(self):
+ u.validate_memcache(self.neutron_api_sentry,
+ '/etc/neutron/neutron.conf',
+ self._get_openstack_release())
+
+ :param sentry_unit: sentry unit
+ :param conf: OpenStack config file to check memcache settings
+ :param os_release: Current OpenStack release int code
+ :param earliest_release: Earliest Openstack release to check int code
+ :param section: OpenStack config file section to check
+ :param check_kvs: Dict of settings to check in config file
+ :returns: None
+ """
+ if os_release < earliest_release:
+ self.log.debug('Skipping memcache checks for deployment. {} <'
+ 'mitaka'.format(os_release))
+ return
+ _kvs = check_kvs or {'memcached_servers': 'inet6:[::1]:11211'}
+ self.log.debug('Checking memcached is running')
+ ret = self.validate_services_by_name({sentry_unit: ['memcached']})
+ if ret:
+ amulet.raise_status(amulet.FAIL, msg='Memcache running check'
+ 'failed {}'.format(ret))
+ else:
+ self.log.debug('OK')
+ self.log.debug('Checking memcache url is configured in {}'.format(
+ conf))
+ if self.validate_config_data(sentry_unit, conf, section, _kvs):
+ message = "Memcache config error in: {}".format(conf)
+ amulet.raise_status(amulet.FAIL, msg=message)
+ else:
+ self.log.debug('OK')
+ self.log.debug('Checking memcache configuration in '
+ '/etc/memcached.conf')
+ contents = self.file_contents_safe(sentry_unit, '/etc/memcached.conf',
+ fatal=True)
+ ubuntu_release, _ = self.run_cmd_unit(sentry_unit, 'lsb_release -cs')
+ if CompareHostReleases(ubuntu_release) <= 'trusty':
+ memcache_listen_addr = 'ip6-localhost'
+ else:
+ memcache_listen_addr = '::1'
+ expected = {
+ '-p': '11211',
+ '-l': memcache_listen_addr}
+ found = []
+ for key, value in expected.items():
+ for line in contents.split('\n'):
+ if line.startswith(key):
+ self.log.debug('Checking {} is set to {}'.format(
+ key,
+ value))
+ assert value == line.split()[-1]
+ self.log.debug(line.split()[-1])
+ found.append(key)
+ if sorted(found) == sorted(expected.keys()):
+ self.log.debug('OK')
+ else:
+ message = "Memcache config error in: /etc/memcached.conf"
+ amulet.raise_status(amulet.FAIL, msg=message)
diff --git a/tests/charmhelpers/core/__init__.py b/tests/charmhelpers/core/__init__.py
new file mode 100644
index 0000000..d7567b8
--- /dev/null
+++ b/tests/charmhelpers/core/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/tests/charmhelpers/core/decorators.py b/tests/charmhelpers/core/decorators.py
new file mode 100644
index 0000000..6ad41ee
--- /dev/null
+++ b/tests/charmhelpers/core/decorators.py
@@ -0,0 +1,55 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Copyright 2014 Canonical Ltd.
+#
+# Authors:
+# Edward Hope-Morley
+#
+
+import time
+
+from charmhelpers.core.hookenv import (
+ log,
+ INFO,
+)
+
+
+def retry_on_exception(num_retries, base_delay=0, exc_type=Exception):
+ """If the decorated function raises exception exc_type, allow num_retries
+ retry attempts before raise the exception.
+ """
+ def _retry_on_exception_inner_1(f):
+ def _retry_on_exception_inner_2(*args, **kwargs):
+ retries = num_retries
+ multiplier = 1
+ while True:
+ try:
+ return f(*args, **kwargs)
+ except exc_type:
+ if not retries:
+ raise
+
+ delay = base_delay * multiplier
+ multiplier += 1
+ log("Retrying '%s' %d more times (delay=%s)" %
+ (f.__name__, retries, delay), level=INFO)
+ retries -= 1
+ if delay:
+ time.sleep(delay)
+
+ return _retry_on_exception_inner_2
+
+ return _retry_on_exception_inner_1
diff --git a/tests/charmhelpers/core/files.py b/tests/charmhelpers/core/files.py
new file mode 100644
index 0000000..fdd82b7
--- /dev/null
+++ b/tests/charmhelpers/core/files.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__author__ = 'Jorge Niedbalski '
+
+import os
+import subprocess
+
+
+def sed(filename, before, after, flags='g'):
+ """
+ Search and replaces the given pattern on filename.
+
+ :param filename: relative or absolute file path.
+ :param before: expression to be replaced (see 'man sed')
+ :param after: expression to replace with (see 'man sed')
+ :param flags: sed-compatible regex flags in example, to make
+ the search and replace case insensitive, specify ``flags="i"``.
+ The ``g`` flag is always specified regardless, so you do not
+ need to remember to include it when overriding this parameter.
+ :returns: If the sed command exit code was zero then return,
+ otherwise raise CalledProcessError.
+ """
+ expression = r's/{0}/{1}/{2}'.format(before,
+ after, flags)
+
+ return subprocess.check_call(["sed", "-i", "-r", "-e",
+ expression,
+ os.path.expanduser(filename)])
diff --git a/tests/charmhelpers/core/fstab.py b/tests/charmhelpers/core/fstab.py
new file mode 100644
index 0000000..d9fa915
--- /dev/null
+++ b/tests/charmhelpers/core/fstab.py
@@ -0,0 +1,132 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import io
+import os
+
+__author__ = 'Jorge Niedbalski R. '
+
+
+class Fstab(io.FileIO):
+ """This class extends file in order to implement a file reader/writer
+ for file `/etc/fstab`
+ """
+
+ class Entry(object):
+ """Entry class represents a non-comment line on the `/etc/fstab` file
+ """
+ def __init__(self, device, mountpoint, filesystem,
+ options, d=0, p=0):
+ self.device = device
+ self.mountpoint = mountpoint
+ self.filesystem = filesystem
+
+ if not options:
+ options = "defaults"
+
+ self.options = options
+ self.d = int(d)
+ self.p = int(p)
+
+ def __eq__(self, o):
+ return str(self) == str(o)
+
+ def __str__(self):
+ return "{} {} {} {} {} {}".format(self.device,
+ self.mountpoint,
+ self.filesystem,
+ self.options,
+ self.d,
+ self.p)
+
+ DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
+
+ def __init__(self, path=None):
+ if path:
+ self._path = path
+ else:
+ self._path = self.DEFAULT_PATH
+ super(Fstab, self).__init__(self._path, 'rb+')
+
+ def _hydrate_entry(self, line):
+ # NOTE: use split with no arguments to split on any
+ # whitespace including tabs
+ return Fstab.Entry(*filter(
+ lambda x: x not in ('', None),
+ line.strip("\n").split()))
+
+ @property
+ def entries(self):
+ self.seek(0)
+ for line in self.readlines():
+ line = line.decode('us-ascii')
+ try:
+ if line.strip() and not line.strip().startswith("#"):
+ yield self._hydrate_entry(line)
+ except ValueError:
+ pass
+
+ def get_entry_by_attr(self, attr, value):
+ for entry in self.entries:
+ e_attr = getattr(entry, attr)
+ if e_attr == value:
+ return entry
+ return None
+
+ def add_entry(self, entry):
+ if self.get_entry_by_attr('device', entry.device):
+ return False
+
+ self.write((str(entry) + '\n').encode('us-ascii'))
+ self.truncate()
+ return entry
+
+ def remove_entry(self, entry):
+ self.seek(0)
+
+ lines = [l.decode('us-ascii') for l in self.readlines()]
+
+ found = False
+ for index, line in enumerate(lines):
+ if line.strip() and not line.strip().startswith("#"):
+ if self._hydrate_entry(line) == entry:
+ found = True
+ break
+
+ if not found:
+ return False
+
+ lines.remove(line)
+
+ self.seek(0)
+ self.write(''.join(lines).encode('us-ascii'))
+ self.truncate()
+ return True
+
+ @classmethod
+ def remove_by_mountpoint(cls, mountpoint, path=None):
+ fstab = cls(path=path)
+ entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
+ if entry:
+ return fstab.remove_entry(entry)
+ return False
+
+ @classmethod
+ def add(cls, device, mountpoint, filesystem, options=None, path=None):
+ return cls(path=path).add_entry(Fstab.Entry(device,
+ mountpoint, filesystem,
+ options=options))
diff --git a/tests/charmhelpers/core/hookenv.py b/tests/charmhelpers/core/hookenv.py
new file mode 100644
index 0000000..89f1024
--- /dev/null
+++ b/tests/charmhelpers/core/hookenv.py
@@ -0,0 +1,1270 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"Interactions with the Juju environment"
+# Copyright 2013 Canonical Ltd.
+#
+# Authors:
+# Charm Helpers Developers
+
+from __future__ import print_function
+import copy
+from distutils.version import LooseVersion
+from functools import wraps
+from collections import namedtuple
+import glob
+import os
+import json
+import yaml
+import re
+import subprocess
+import sys
+import errno
+import tempfile
+from subprocess import CalledProcessError
+
+import six
+if not six.PY3:
+ from UserDict import UserDict
+else:
+ from collections import UserDict
+
+
+CRITICAL = "CRITICAL"
+ERROR = "ERROR"
+WARNING = "WARNING"
+INFO = "INFO"
+DEBUG = "DEBUG"
+TRACE = "TRACE"
+MARKER = object()
+
+cache = {}
+
+
+def cached(func):
+ """Cache return values for multiple executions of func + args
+
+ For example::
+
+ @cached
+ def unit_get(attribute):
+ pass
+
+ unit_get('test')
+
+ will cache the result of unit_get + 'test' for future calls.
+ """
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ global cache
+ key = json.dumps((func, args, kwargs), sort_keys=True, default=str)
+ try:
+ return cache[key]
+ except KeyError:
+ pass # Drop out of the exception handler scope.
+ res = func(*args, **kwargs)
+ cache[key] = res
+ return res
+ wrapper._wrapped = func
+ return wrapper
+
+
+def flush(key):
+ """Flushes any entries from function cache where the
+ key is found in the function+args """
+ flush_list = []
+ for item in cache:
+ if key in item:
+ flush_list.append(item)
+ for item in flush_list:
+ del cache[item]
+
+
+def log(message, level=None):
+ """Write a message to the juju log"""
+ command = ['juju-log']
+ if level:
+ command += ['-l', level]
+ if not isinstance(message, six.string_types):
+ message = repr(message)
+ command += [message]
+ # Missing juju-log should not cause failures in unit tests
+ # Send log output to stderr
+ try:
+ subprocess.call(command)
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ if level:
+ message = "{}: {}".format(level, message)
+ message = "juju-log: {}".format(message)
+ print(message, file=sys.stderr)
+ else:
+ raise
+
+
+class Serializable(UserDict):
+ """Wrapper, an object that can be serialized to yaml or json"""
+
+ def __init__(self, obj):
+ # wrap the object
+ UserDict.__init__(self)
+ self.data = obj
+
+ def __getattr__(self, attr):
+ # See if this object has attribute.
+ if attr in ("json", "yaml", "data"):
+ return self.__dict__[attr]
+ # Check for attribute in wrapped object.
+ got = getattr(self.data, attr, MARKER)
+ if got is not MARKER:
+ return got
+ # Proxy to the wrapped object via dict interface.
+ try:
+ return self.data[attr]
+ except KeyError:
+ raise AttributeError(attr)
+
+ def __getstate__(self):
+ # Pickle as a standard dictionary.
+ return self.data
+
+ def __setstate__(self, state):
+ # Unpickle into our wrapper.
+ self.data = state
+
+ def json(self):
+ """Serialize the object to json"""
+ return json.dumps(self.data)
+
+ def yaml(self):
+ """Serialize the object to yaml"""
+ return yaml.dump(self.data)
+
+
+def execution_environment():
+ """A convenient bundling of the current execution context"""
+ context = {}
+ context['conf'] = config()
+ if relation_id():
+ context['reltype'] = relation_type()
+ context['relid'] = relation_id()
+ context['rel'] = relation_get()
+ context['unit'] = local_unit()
+ context['rels'] = relations()
+ context['env'] = os.environ
+ return context
+
+
+def in_relation_hook():
+ """Determine whether we're running in a relation hook"""
+ return 'JUJU_RELATION' in os.environ
+
+
+def relation_type():
+ """The scope for the current relation hook"""
+ return os.environ.get('JUJU_RELATION', None)
+
+
+@cached
+def relation_id(relation_name=None, service_or_unit=None):
+ """The relation ID for the current or a specified relation"""
+ if not relation_name and not service_or_unit:
+ return os.environ.get('JUJU_RELATION_ID', None)
+ elif relation_name and service_or_unit:
+ service_name = service_or_unit.split('/')[0]
+ for relid in relation_ids(relation_name):
+ remote_service = remote_service_name(relid)
+ if remote_service == service_name:
+ return relid
+ else:
+ raise ValueError('Must specify neither or both of relation_name and service_or_unit')
+
+
+def local_unit():
+ """Local unit ID"""
+ return os.environ['JUJU_UNIT_NAME']
+
+
+def remote_unit():
+ """The remote unit for the current relation hook"""
+ return os.environ.get('JUJU_REMOTE_UNIT', None)
+
+
+def service_name():
+ """The name service group this unit belongs to"""
+ return local_unit().split('/')[0]
+
+
+def principal_unit():
+ """Returns the principal unit of this unit, otherwise None"""
+ # Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT
+ principal_unit = os.environ.get('JUJU_PRINCIPAL_UNIT', None)
+ # If it's empty, then this unit is the principal
+ if principal_unit == '':
+ return os.environ['JUJU_UNIT_NAME']
+ elif principal_unit is not None:
+ return principal_unit
+ # For Juju 2.1 and below, let's try work out the principle unit by
+ # the various charms' metadata.yaml.
+ for reltype in relation_types():
+ for rid in relation_ids(reltype):
+ for unit in related_units(rid):
+ md = _metadata_unit(unit)
+ if not md:
+ continue
+ subordinate = md.pop('subordinate', None)
+ if not subordinate:
+ return unit
+ return None
+
+
+@cached
+def remote_service_name(relid=None):
+ """The remote service name for a given relation-id (or the current relation)"""
+ if relid is None:
+ unit = remote_unit()
+ else:
+ units = related_units(relid)
+ unit = units[0] if units else None
+ return unit.split('/')[0] if unit else None
+
+
+def hook_name():
+ """The name of the currently executing hook"""
+ return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0]))
+
+
+class Config(dict):
+ """A dictionary representation of the charm's config.yaml, with some
+ extra features:
+
+ - See which values in the dictionary have changed since the previous hook.
+ - For values that have changed, see what the previous value was.
+ - Store arbitrary data for use in a later hook.
+
+ NOTE: Do not instantiate this object directly - instead call
+ ``hookenv.config()``, which will return an instance of :class:`Config`.
+
+ Example usage::
+
+ >>> # inside a hook
+ >>> from charmhelpers.core import hookenv
+ >>> config = hookenv.config()
+ >>> config['foo']
+ 'bar'
+ >>> # store a new key/value for later use
+ >>> config['mykey'] = 'myval'
+
+
+ >>> # user runs `juju set mycharm foo=baz`
+ >>> # now we're inside subsequent config-changed hook
+ >>> config = hookenv.config()
+ >>> config['foo']
+ 'baz'
+ >>> # test to see if this val has changed since last hook
+ >>> config.changed('foo')
+ True
+ >>> # what was the previous value?
+ >>> config.previous('foo')
+ 'bar'
+ >>> # keys/values that we add are preserved across hooks
+ >>> config['mykey']
+ 'myval'
+
+ """
+ CONFIG_FILE_NAME = '.juju-persistent-config'
+
+ def __init__(self, *args, **kw):
+ super(Config, self).__init__(*args, **kw)
+ self.implicit_save = True
+ self._prev_dict = None
+ self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
+ if os.path.exists(self.path):
+ self.load_previous()
+ atexit(self._implicit_save)
+
+ def load_previous(self, path=None):
+ """Load previous copy of config from disk.
+
+ In normal usage you don't need to call this method directly - it
+ is called automatically at object initialization.
+
+ :param path:
+
+ File path from which to load the previous config. If `None`,
+ config is loaded from the default location. If `path` is
+ specified, subsequent `save()` calls will write to the same
+ path.
+
+ """
+ self.path = path or self.path
+ with open(self.path) as f:
+ self._prev_dict = json.load(f)
+ for k, v in copy.deepcopy(self._prev_dict).items():
+ if k not in self:
+ self[k] = v
+
+ def changed(self, key):
+ """Return True if the current value for this key is different from
+ the previous value.
+
+ """
+ if self._prev_dict is None:
+ return True
+ return self.previous(key) != self.get(key)
+
+ def previous(self, key):
+ """Return previous value for this key, or None if there
+ is no previous value.
+
+ """
+ if self._prev_dict:
+ return self._prev_dict.get(key)
+ return None
+
+ def save(self):
+ """Save this config to disk.
+
+ If the charm is using the :mod:`Services Framework `
+ or :meth:'@hook ' decorator, this
+ is called automatically at the end of successful hook execution.
+ Otherwise, it should be called directly by user code.
+
+ To disable automatic saves, set ``implicit_save=False`` on this
+ instance.
+
+ """
+ with open(self.path, 'w') as f:
+ os.fchmod(f.fileno(), 0o600)
+ json.dump(self, f)
+
+ def _implicit_save(self):
+ if self.implicit_save:
+ self.save()
+
+
+@cached
+def config(scope=None):
+ """Juju charm configuration"""
+ config_cmd_line = ['config-get']
+ if scope is not None:
+ config_cmd_line.append(scope)
+ else:
+ config_cmd_line.append('--all')
+ config_cmd_line.append('--format=json')
+ try:
+ config_data = json.loads(
+ subprocess.check_output(config_cmd_line).decode('UTF-8'))
+ if scope is not None:
+ return config_data
+ return Config(config_data)
+ except ValueError:
+ return None
+
+
+@cached
+def relation_get(attribute=None, unit=None, rid=None):
+ """Get relation information"""
+ _args = ['relation-get', '--format=json']
+ if rid:
+ _args.append('-r')
+ _args.append(rid)
+ _args.append(attribute or '-')
+ if unit:
+ _args.append(unit)
+ try:
+ return json.loads(subprocess.check_output(_args).decode('UTF-8'))
+ except ValueError:
+ return None
+ except CalledProcessError as e:
+ if e.returncode == 2:
+ return None
+ raise
+
+
+def relation_set(relation_id=None, relation_settings=None, **kwargs):
+ """Set relation information for the current unit"""
+ relation_settings = relation_settings if relation_settings else {}
+ relation_cmd_line = ['relation-set']
+ accepts_file = "--file" in subprocess.check_output(
+ relation_cmd_line + ["--help"], universal_newlines=True)
+ if relation_id is not None:
+ relation_cmd_line.extend(('-r', relation_id))
+ settings = relation_settings.copy()
+ settings.update(kwargs)
+ for key, value in settings.items():
+ # Force value to be a string: it always should, but some call
+ # sites pass in things like dicts or numbers.
+ if value is not None:
+ settings[key] = "{}".format(value)
+ if accepts_file:
+ # --file was introduced in Juju 1.23.2. Use it by default if
+ # available, since otherwise we'll break if the relation data is
+ # too big. Ideally we should tell relation-set to read the data from
+ # stdin, but that feature is broken in 1.23.2: Bug #1454678.
+ with tempfile.NamedTemporaryFile(delete=False) as settings_file:
+ settings_file.write(yaml.safe_dump(settings).encode("utf-8"))
+ subprocess.check_call(
+ relation_cmd_line + ["--file", settings_file.name])
+ os.remove(settings_file.name)
+ else:
+ for key, value in settings.items():
+ if value is None:
+ relation_cmd_line.append('{}='.format(key))
+ else:
+ relation_cmd_line.append('{}={}'.format(key, value))
+ subprocess.check_call(relation_cmd_line)
+ # Flush cache of any relation-gets for local unit
+ flush(local_unit())
+
+
+def relation_clear(r_id=None):
+ ''' Clears any relation data already set on relation r_id '''
+ settings = relation_get(rid=r_id,
+ unit=local_unit())
+ for setting in settings:
+ if setting not in ['public-address', 'private-address']:
+ settings[setting] = None
+ relation_set(relation_id=r_id,
+ **settings)
+
+
+@cached
+def relation_ids(reltype=None):
+ """A list of relation_ids"""
+ reltype = reltype or relation_type()
+ relid_cmd_line = ['relation-ids', '--format=json']
+ if reltype is not None:
+ relid_cmd_line.append(reltype)
+ return json.loads(
+ subprocess.check_output(relid_cmd_line).decode('UTF-8')) or []
+ return []
+
+
+@cached
+def related_units(relid=None):
+ """A list of related units"""
+ relid = relid or relation_id()
+ units_cmd_line = ['relation-list', '--format=json']
+ if relid is not None:
+ units_cmd_line.extend(('-r', relid))
+ return json.loads(
+ subprocess.check_output(units_cmd_line).decode('UTF-8')) or []
+
+
+@cached
+def relation_for_unit(unit=None, rid=None):
+ """Get the json represenation of a unit's relation"""
+ unit = unit or remote_unit()
+ relation = relation_get(unit=unit, rid=rid)
+ for key in relation:
+ if key.endswith('-list'):
+ relation[key] = relation[key].split()
+ relation['__unit__'] = unit
+ return relation
+
+
+@cached
+def relations_for_id(relid=None):
+ """Get relations of a specific relation ID"""
+ relation_data = []
+ relid = relid or relation_ids()
+ for unit in related_units(relid):
+ unit_data = relation_for_unit(unit, relid)
+ unit_data['__relid__'] = relid
+ relation_data.append(unit_data)
+ return relation_data
+
+
+@cached
+def relations_of_type(reltype=None):
+ """Get relations of a specific type"""
+ relation_data = []
+ reltype = reltype or relation_type()
+ for relid in relation_ids(reltype):
+ for relation in relations_for_id(relid):
+ relation['__relid__'] = relid
+ relation_data.append(relation)
+ return relation_data
+
+
+@cached
+def metadata():
+ """Get the current charm metadata.yaml contents as a python object"""
+ with open(os.path.join(charm_dir(), 'metadata.yaml')) as md:
+ return yaml.safe_load(md)
+
+
+def _metadata_unit(unit):
+ """Given the name of a unit (e.g. apache2/0), get the unit charm's
+ metadata.yaml. Very similar to metadata() but allows us to inspect
+ other units. Unit needs to be co-located, such as a subordinate or
+ principal/primary.
+
+ :returns: metadata.yaml as a python object.
+
+ """
+ basedir = os.sep.join(charm_dir().split(os.sep)[:-2])
+ unitdir = 'unit-{}'.format(unit.replace(os.sep, '-'))
+ joineddir = os.path.join(basedir, unitdir, 'charm', 'metadata.yaml')
+ if not os.path.exists(joineddir):
+ return None
+ with open(joineddir) as md:
+ return yaml.safe_load(md)
+
+
+@cached
+def relation_types():
+ """Get a list of relation types supported by this charm"""
+ rel_types = []
+ md = metadata()
+ for key in ('provides', 'requires', 'peers'):
+ section = md.get(key)
+ if section:
+ rel_types.extend(section.keys())
+ return rel_types
+
+
+@cached
+def peer_relation_id():
+ '''Get the peers relation id if a peers relation has been joined, else None.'''
+ md = metadata()
+ section = md.get('peers')
+ if section:
+ for key in section:
+ relids = relation_ids(key)
+ if relids:
+ return relids[0]
+ return None
+
+
+@cached
+def relation_to_interface(relation_name):
+ """
+ Given the name of a relation, return the interface that relation uses.
+
+ :returns: The interface name, or ``None``.
+ """
+ return relation_to_role_and_interface(relation_name)[1]
+
+
+@cached
+def relation_to_role_and_interface(relation_name):
+ """
+ Given the name of a relation, return the role and the name of the interface
+ that relation uses (where role is one of ``provides``, ``requires``, or ``peers``).
+
+ :returns: A tuple containing ``(role, interface)``, or ``(None, None)``.
+ """
+ _metadata = metadata()
+ for role in ('provides', 'requires', 'peers'):
+ interface = _metadata.get(role, {}).get(relation_name, {}).get('interface')
+ if interface:
+ return role, interface
+ return None, None
+
+
+@cached
+def role_and_interface_to_relations(role, interface_name):
+ """
+ Given a role and interface name, return a list of relation names for the
+ current charm that use that interface under that role (where role is one
+ of ``provides``, ``requires``, or ``peers``).
+
+ :returns: A list of relation names.
+ """
+ _metadata = metadata()
+ results = []
+ for relation_name, relation in _metadata.get(role, {}).items():
+ if relation['interface'] == interface_name:
+ results.append(relation_name)
+ return results
+
+
+@cached
+def interface_to_relations(interface_name):
+ """
+ Given an interface, return a list of relation names for the current
+ charm that use that interface.
+
+ :returns: A list of relation names.
+ """
+ results = []
+ for role in ('provides', 'requires', 'peers'):
+ results.extend(role_and_interface_to_relations(role, interface_name))
+ return results
+
+
+@cached
+def charm_name():
+ """Get the name of the current charm as is specified on metadata.yaml"""
+ return metadata().get('name')
+
+
+@cached
+def relations():
+ """Get a nested dictionary of relation data for all related units"""
+ rels = {}
+ for reltype in relation_types():
+ relids = {}
+ for relid in relation_ids(reltype):
+ units = {local_unit(): relation_get(unit=local_unit(), rid=relid)}
+ for unit in related_units(relid):
+ reldata = relation_get(unit=unit, rid=relid)
+ units[unit] = reldata
+ relids[relid] = units
+ rels[reltype] = relids
+ return rels
+
+
+@cached
+def is_relation_made(relation, keys='private-address'):
+ '''
+ Determine whether a relation is established by checking for
+ presence of key(s). If a list of keys is provided, they
+ must all be present for the relation to be identified as made
+ '''
+ if isinstance(keys, str):
+ keys = [keys]
+ for r_id in relation_ids(relation):
+ for unit in related_units(r_id):
+ context = {}
+ for k in keys:
+ context[k] = relation_get(k, rid=r_id,
+ unit=unit)
+ if None not in context.values():
+ return True
+ return False
+
+
+def _port_op(op_name, port, protocol="TCP"):
+ """Open or close a service network port"""
+ _args = [op_name]
+ icmp = protocol.upper() == "ICMP"
+ if icmp:
+ _args.append(protocol)
+ else:
+ _args.append('{}/{}'.format(port, protocol))
+ try:
+ subprocess.check_call(_args)
+ except subprocess.CalledProcessError:
+ # Older Juju pre 2.3 doesn't support ICMP
+ # so treat it as a no-op if it fails.
+ if not icmp:
+ raise
+
+
+def open_port(port, protocol="TCP"):
+ """Open a service network port"""
+ _port_op('open-port', port, protocol)
+
+
+def close_port(port, protocol="TCP"):
+ """Close a service network port"""
+ _port_op('close-port', port, protocol)
+
+
+def open_ports(start, end, protocol="TCP"):
+ """Opens a range of service network ports"""
+ _args = ['open-port']
+ _args.append('{}-{}/{}'.format(start, end, protocol))
+ subprocess.check_call(_args)
+
+
+def close_ports(start, end, protocol="TCP"):
+ """Close a range of service network ports"""
+ _args = ['close-port']
+ _args.append('{}-{}/{}'.format(start, end, protocol))
+ subprocess.check_call(_args)
+
+
+def opened_ports():
+ """Get the opened ports
+
+ *Note that this will only show ports opened in a previous hook*
+
+ :returns: Opened ports as a list of strings: ``['8080/tcp', '8081-8083/tcp']``
+ """
+ _args = ['opened-ports', '--format=json']
+ return json.loads(subprocess.check_output(_args).decode('UTF-8'))
+
+
+@cached
+def unit_get(attribute):
+ """Get the unit ID for the remote unit"""
+ _args = ['unit-get', '--format=json', attribute]
+ try:
+ return json.loads(subprocess.check_output(_args).decode('UTF-8'))
+ except ValueError:
+ return None
+
+
+def unit_public_ip():
+ """Get this unit's public IP address"""
+ return unit_get('public-address')
+
+
+def unit_private_ip():
+ """Get this unit's private IP address"""
+ return unit_get('private-address')
+
+
+@cached
+def storage_get(attribute=None, storage_id=None):
+ """Get storage attributes"""
+ _args = ['storage-get', '--format=json']
+ if storage_id:
+ _args.extend(('-s', storage_id))
+ if attribute:
+ _args.append(attribute)
+ try:
+ return json.loads(subprocess.check_output(_args).decode('UTF-8'))
+ except ValueError:
+ return None
+
+
+@cached
+def storage_list(storage_name=None):
+ """List the storage IDs for the unit"""
+ _args = ['storage-list', '--format=json']
+ if storage_name:
+ _args.append(storage_name)
+ try:
+ return json.loads(subprocess.check_output(_args).decode('UTF-8'))
+ except ValueError:
+ return None
+ except OSError as e:
+ import errno
+ if e.errno == errno.ENOENT:
+ # storage-list does not exist
+ return []
+ raise
+
+
+class UnregisteredHookError(Exception):
+ """Raised when an undefined hook is called"""
+ pass
+
+
+class Hooks(object):
+ """A convenient handler for hook functions.
+
+ Example::
+
+ hooks = Hooks()
+
+ # register a hook, taking its name from the function name
+ @hooks.hook()
+ def install():
+ pass # your code here
+
+ # register a hook, providing a custom hook name
+ @hooks.hook("config-changed")
+ def config_changed():
+ pass # your code here
+
+ if __name__ == "__main__":
+ # execute a hook based on the name the program is called by
+ hooks.execute(sys.argv)
+ """
+
+ def __init__(self, config_save=None):
+ super(Hooks, self).__init__()
+ self._hooks = {}
+
+ # For unknown reasons, we allow the Hooks constructor to override
+ # config().implicit_save.
+ if config_save is not None:
+ config().implicit_save = config_save
+
+ def register(self, name, function):
+ """Register a hook"""
+ self._hooks[name] = function
+
+ def execute(self, args):
+ """Execute a registered hook based on args[0]"""
+ _run_atstart()
+ hook_name = os.path.basename(args[0])
+ if hook_name in self._hooks:
+ try:
+ self._hooks[hook_name]()
+ except SystemExit as x:
+ if x.code is None or x.code == 0:
+ _run_atexit()
+ raise
+ _run_atexit()
+ else:
+ raise UnregisteredHookError(hook_name)
+
+ def hook(self, *hook_names):
+ """Decorator, registering them as hooks"""
+ def wrapper(decorated):
+ for hook_name in hook_names:
+ self.register(hook_name, decorated)
+ else:
+ self.register(decorated.__name__, decorated)
+ if '_' in decorated.__name__:
+ self.register(
+ decorated.__name__.replace('_', '-'), decorated)
+ return decorated
+ return wrapper
+
+
+class NoNetworkBinding(Exception):
+ pass
+
+
+def charm_dir():
+ """Return the root directory of the current charm"""
+ d = os.environ.get('JUJU_CHARM_DIR')
+ if d is not None:
+ return d
+ return os.environ.get('CHARM_DIR')
+
+
+@cached
+def action_get(key=None):
+ """Gets the value of an action parameter, or all key/value param pairs"""
+ cmd = ['action-get']
+ if key is not None:
+ cmd.append(key)
+ cmd.append('--format=json')
+ action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8'))
+ return action_data
+
+
+def action_set(values):
+ """Sets the values to be returned after the action finishes"""
+ cmd = ['action-set']
+ for k, v in list(values.items()):
+ cmd.append('{}={}'.format(k, v))
+ subprocess.check_call(cmd)
+
+
+def action_fail(message):
+ """Sets the action status to failed and sets the error message.
+
+ The results set by action_set are preserved."""
+ subprocess.check_call(['action-fail', message])
+
+
+def action_name():
+ """Get the name of the currently executing action."""
+ return os.environ.get('JUJU_ACTION_NAME')
+
+
+def action_uuid():
+ """Get the UUID of the currently executing action."""
+ return os.environ.get('JUJU_ACTION_UUID')
+
+
+def action_tag():
+ """Get the tag for the currently executing action."""
+ return os.environ.get('JUJU_ACTION_TAG')
+
+
+def status_set(workload_state, message):
+ """Set the workload state with a message
+
+ Use status-set to set the workload state with a message which is visible
+ to the user via juju status. If the status-set command is not found then
+ assume this is juju < 1.23 and juju-log the message unstead.
+
+ workload_state -- valid juju workload state.
+ message -- status update message
+ """
+ valid_states = ['maintenance', 'blocked', 'waiting', 'active']
+ if workload_state not in valid_states:
+ raise ValueError(
+ '{!r} is not a valid workload state'.format(workload_state)
+ )
+ cmd = ['status-set', workload_state, message]
+ try:
+ ret = subprocess.call(cmd)
+ if ret == 0:
+ return
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ log_message = 'status-set failed: {} {}'.format(workload_state,
+ message)
+ log(log_message, level='INFO')
+
+
+def status_get():
+ """Retrieve the previously set juju workload state and message
+
+ If the status-get command is not found then assume this is juju < 1.23 and
+ return 'unknown', ""
+
+ """
+ cmd = ['status-get', "--format=json", "--include-data"]
+ try:
+ raw_status = subprocess.check_output(cmd)
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ return ('unknown', "")
+ else:
+ raise
+ else:
+ status = json.loads(raw_status.decode("UTF-8"))
+ return (status["status"], status["message"])
+
+
+def translate_exc(from_exc, to_exc):
+ def inner_translate_exc1(f):
+ @wraps(f)
+ def inner_translate_exc2(*args, **kwargs):
+ try:
+ return f(*args, **kwargs)
+ except from_exc:
+ raise to_exc
+
+ return inner_translate_exc2
+
+ return inner_translate_exc1
+
+
+def application_version_set(version):
+ """Charm authors may trigger this command from any hook to output what
+ version of the application is running. This could be a package version,
+ for instance postgres version 9.5. It could also be a build number or
+ version control revision identifier, for instance git sha 6fb7ba68. """
+
+ cmd = ['application-version-set']
+ cmd.append(version)
+ try:
+ subprocess.check_call(cmd)
+ except OSError:
+ log("Application Version: {}".format(version))
+
+
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
+def is_leader():
+ """Does the current unit hold the juju leadership
+
+ Uses juju to determine whether the current unit is the leader of its peers
+ """
+ cmd = ['is-leader', '--format=json']
+ return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
+
+
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
+def leader_get(attribute=None):
+ """Juju leader get value(s)"""
+ cmd = ['leader-get', '--format=json'] + [attribute or '-']
+ return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
+
+
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
+def leader_set(settings=None, **kwargs):
+ """Juju leader set value(s)"""
+ # Don't log secrets.
+ # log("Juju leader-set '%s'" % (settings), level=DEBUG)
+ cmd = ['leader-set']
+ settings = settings or {}
+ settings.update(kwargs)
+ for k, v in settings.items():
+ if v is None:
+ cmd.append('{}='.format(k))
+ else:
+ cmd.append('{}={}'.format(k, v))
+ subprocess.check_call(cmd)
+
+
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
+def payload_register(ptype, klass, pid):
+ """ is used while a hook is running to let Juju know that a
+ payload has been started."""
+ cmd = ['payload-register']
+ for x in [ptype, klass, pid]:
+ cmd.append(x)
+ subprocess.check_call(cmd)
+
+
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
+def payload_unregister(klass, pid):
+ """ is used while a hook is running to let Juju know
+ that a payload has been manually stopped. The and provided
+ must match a payload that has been previously registered with juju using
+ payload-register."""
+ cmd = ['payload-unregister']
+ for x in [klass, pid]:
+ cmd.append(x)
+ subprocess.check_call(cmd)
+
+
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
+def payload_status_set(klass, pid, status):
+ """is used to update the current status of a registered payload.
+ The and provided must match a payload that has been previously
+ registered with juju using payload-register. The must be one of the
+ follow: starting, started, stopping, stopped"""
+ cmd = ['payload-status-set']
+ for x in [klass, pid, status]:
+ cmd.append(x)
+ subprocess.check_call(cmd)
+
+
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
+def resource_get(name):
+ """used to fetch the resource path of the given name.
+
+ must match a name of defined resource in metadata.yaml
+
+ returns either a path or False if resource not available
+ """
+ if not name:
+ return False
+
+ cmd = ['resource-get', name]
+ try:
+ return subprocess.check_output(cmd).decode('UTF-8')
+ except subprocess.CalledProcessError:
+ return False
+
+
+@cached
+def juju_version():
+ """Full version string (eg. '1.23.3.1-trusty-amd64')"""
+ # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1
+ jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0]
+ return subprocess.check_output([jujud, 'version'],
+ universal_newlines=True).strip()
+
+
+def has_juju_version(minimum_version):
+ """Return True if the Juju version is at least the provided version"""
+ return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
+
+
+_atexit = []
+_atstart = []
+
+
+def atstart(callback, *args, **kwargs):
+ '''Schedule a callback to run before the main hook.
+
+ Callbacks are run in the order they were added.
+
+ This is useful for modules and classes to perform initialization
+ and inject behavior. In particular:
+
+ - Run common code before all of your hooks, such as logging
+ the hook name or interesting relation data.
+ - Defer object or module initialization that requires a hook
+ context until we know there actually is a hook context,
+ making testing easier.
+ - Rather than requiring charm authors to include boilerplate to
+ invoke your helper's behavior, have it run automatically if
+ your object is instantiated or module imported.
+
+ This is not at all useful after your hook framework as been launched.
+ '''
+ global _atstart
+ _atstart.append((callback, args, kwargs))
+
+
+def atexit(callback, *args, **kwargs):
+ '''Schedule a callback to run on successful hook completion.
+
+ Callbacks are run in the reverse order that they were added.'''
+ _atexit.append((callback, args, kwargs))
+
+
+def _run_atstart():
+ '''Hook frameworks must invoke this before running the main hook body.'''
+ global _atstart
+ for callback, args, kwargs in _atstart:
+ callback(*args, **kwargs)
+ del _atstart[:]
+
+
+def _run_atexit():
+ '''Hook frameworks must invoke this after the main hook body has
+ successfully completed. Do not invoke it if the hook fails.'''
+ global _atexit
+ for callback, args, kwargs in reversed(_atexit):
+ callback(*args, **kwargs)
+ del _atexit[:]
+
+
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
+def network_get_primary_address(binding):
+ '''
+ Deprecated since Juju 2.3; use network_get()
+
+ Retrieve the primary network address for a named binding
+
+ :param binding: string. The name of a relation of extra-binding
+ :return: string. The primary IP address for the named binding
+ :raise: NotImplementedError if run on Juju < 2.0
+ '''
+ cmd = ['network-get', '--primary-address', binding]
+ try:
+ response = subprocess.check_output(
+ cmd,
+ stderr=subprocess.STDOUT).decode('UTF-8').strip()
+ except CalledProcessError as e:
+ if 'no network config found for binding' in e.output.decode('UTF-8'):
+ raise NoNetworkBinding("No network binding for {}"
+ .format(binding))
+ else:
+ raise
+ return response
+
+
+def network_get(endpoint, relation_id=None):
+ """
+ Retrieve the network details for a relation endpoint
+
+ :param endpoint: string. The name of a relation endpoint
+ :param relation_id: int. The ID of the relation for the current context.
+ :return: dict. The loaded YAML output of the network-get query.
+ :raise: NotImplementedError if request not supported by the Juju version.
+ """
+ if not has_juju_version('2.2'):
+ raise NotImplementedError(juju_version()) # earlier versions require --primary-address
+ if relation_id and not has_juju_version('2.3'):
+ raise NotImplementedError # 2.3 added the -r option
+
+ cmd = ['network-get', endpoint, '--format', 'yaml']
+ if relation_id:
+ cmd.append('-r')
+ cmd.append(relation_id)
+ response = subprocess.check_output(
+ cmd,
+ stderr=subprocess.STDOUT).decode('UTF-8').strip()
+ return yaml.safe_load(response)
+
+
+def add_metric(*args, **kwargs):
+ """Add metric values. Values may be expressed with keyword arguments. For
+ metric names containing dashes, these may be expressed as one or more
+ 'key=value' positional arguments. May only be called from the collect-metrics
+ hook."""
+ _args = ['add-metric']
+ _kvpairs = []
+ _kvpairs.extend(args)
+ _kvpairs.extend(['{}={}'.format(k, v) for k, v in kwargs.items()])
+ _args.extend(sorted(_kvpairs))
+ try:
+ subprocess.check_call(_args)
+ return
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ log_message = 'add-metric failed: {}'.format(' '.join(_kvpairs))
+ log(log_message, level='INFO')
+
+
+def meter_status():
+ """Get the meter status, if running in the meter-status-changed hook."""
+ return os.environ.get('JUJU_METER_STATUS')
+
+
+def meter_info():
+ """Get the meter status information, if running in the meter-status-changed
+ hook."""
+ return os.environ.get('JUJU_METER_INFO')
+
+
+def iter_units_for_relation_name(relation_name):
+ """Iterate through all units in a relation
+
+ Generator that iterates through all the units in a relation and yields
+ a named tuple with rid and unit field names.
+
+ Usage:
+ data = [(u.rid, u.unit)
+ for u in iter_units_for_relation_name(relation_name)]
+
+ :param relation_name: string relation name
+ :yield: Named Tuple with rid and unit field names
+ """
+ RelatedUnit = namedtuple('RelatedUnit', 'rid, unit')
+ for rid in relation_ids(relation_name):
+ for unit in related_units(rid):
+ yield RelatedUnit(rid, unit)
+
+
+def ingress_address(rid=None, unit=None):
+ """
+ Retrieve the ingress-address from a relation when available.
+ Otherwise, return the private-address.
+
+ When used on the consuming side of the relation (unit is a remote
+ unit), the ingress-address is the IP address that this unit needs
+ to use to reach the provided service on the remote unit.
+
+ When used on the providing side of the relation (unit == local_unit()),
+ the ingress-address is the IP address that is advertised to remote
+ units on this relation. Remote units need to use this address to
+ reach the local provided service on this unit.
+
+ Note that charms may document some other method to use in
+ preference to the ingress_address(), such as an address provided
+ on a different relation attribute or a service discovery mechanism.
+ This allows charms to redirect inbound connections to their peers
+ or different applications such as load balancers.
+
+ Usage:
+ addresses = [ingress_address(rid=u.rid, unit=u.unit)
+ for u in iter_units_for_relation_name(relation_name)]
+
+ :param rid: string relation id
+ :param unit: string unit name
+ :side effect: calls relation_get
+ :return: string IP address
+ """
+ settings = relation_get(rid=rid, unit=unit)
+ return (settings.get('ingress-address') or
+ settings.get('private-address'))
+
+
+def egress_subnets(rid=None, unit=None):
+ """
+ Retrieve the egress-subnets from a relation.
+
+ This function is to be used on the providing side of the
+ relation, and provides the ranges of addresses that client
+ connections may come from. The result is uninteresting on
+ the consuming side of a relation (unit == local_unit()).
+
+ Returns a stable list of subnets in CIDR format.
+ eg. ['192.168.1.0/24', '2001::F00F/128']
+
+ If egress-subnets is not available, falls back to using the published
+ ingress-address, or finally private-address.
+
+ :param rid: string relation id
+ :param unit: string unit name
+ :side effect: calls relation_get
+ :return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128']
+ """
+ def _to_range(addr):
+ if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None:
+ addr += '/32'
+ elif ':' in addr and '/' not in addr: # IPv6
+ addr += '/128'
+ return addr
+
+ settings = relation_get(rid=rid, unit=unit)
+ if 'egress-subnets' in settings:
+ return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()]
+ if 'ingress-address' in settings:
+ return [_to_range(settings['ingress-address'])]
+ if 'private-address' in settings:
+ return [_to_range(settings['private-address'])]
+ return [] # Should never happen
diff --git a/tests/charmhelpers/core/host.py b/tests/charmhelpers/core/host.py
new file mode 100644
index 0000000..322ab2a
--- /dev/null
+++ b/tests/charmhelpers/core/host.py
@@ -0,0 +1,1028 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tools for working with the host system"""
+# Copyright 2012 Canonical Ltd.
+#
+# Authors:
+# Nick Moffitt
+# Matthew Wedgwood
+
+import os
+import re
+import pwd
+import glob
+import grp
+import random
+import string
+import subprocess
+import hashlib
+import functools
+import itertools
+import six
+
+from contextlib import contextmanager
+from collections import OrderedDict
+from .hookenv import log, DEBUG, local_unit
+from .fstab import Fstab
+from charmhelpers.osplatform import get_platform
+
+__platform__ = get_platform()
+if __platform__ == "ubuntu":
+ from charmhelpers.core.host_factory.ubuntu import (
+ service_available,
+ add_new_group,
+ lsb_release,
+ cmp_pkgrevno,
+ CompareHostReleases,
+ ) # flake8: noqa -- ignore F401 for this import
+elif __platform__ == "centos":
+ from charmhelpers.core.host_factory.centos import (
+ service_available,
+ add_new_group,
+ lsb_release,
+ cmp_pkgrevno,
+ CompareHostReleases,
+ ) # flake8: noqa -- ignore F401 for this import
+
+UPDATEDB_PATH = '/etc/updatedb.conf'
+
+def service_start(service_name, **kwargs):
+ """Start a system service.
+
+ The specified service name is managed via the system level init system.
+ Some init systems (e.g. upstart) require that additional arguments be
+ provided in order to directly control service instances whereas other init
+ systems allow for addressing instances of a service directly by name (e.g.
+ systemd).
+
+ The kwargs allow for the additional parameters to be passed to underlying
+ init systems for those systems which require/allow for them. For example,
+ the ceph-osd upstart script requires the id parameter to be passed along
+ in order to identify which running daemon should be reloaded. The follow-
+ ing example stops the ceph-osd service for instance id=4:
+
+ service_stop('ceph-osd', id=4)
+
+ :param service_name: the name of the service to stop
+ :param **kwargs: additional parameters to pass to the init system when
+ managing services. These will be passed as key=value
+ parameters to the init system's commandline. kwargs
+ are ignored for systemd enabled systems.
+ """
+ return service('start', service_name, **kwargs)
+
+
+def service_stop(service_name, **kwargs):
+ """Stop a system service.
+
+ The specified service name is managed via the system level init system.
+ Some init systems (e.g. upstart) require that additional arguments be
+ provided in order to directly control service instances whereas other init
+ systems allow for addressing instances of a service directly by name (e.g.
+ systemd).
+
+ The kwargs allow for the additional parameters to be passed to underlying
+ init systems for those systems which require/allow for them. For example,
+ the ceph-osd upstart script requires the id parameter to be passed along
+ in order to identify which running daemon should be reloaded. The follow-
+ ing example stops the ceph-osd service for instance id=4:
+
+ service_stop('ceph-osd', id=4)
+
+ :param service_name: the name of the service to stop
+ :param **kwargs: additional parameters to pass to the init system when
+ managing services. These will be passed as key=value
+ parameters to the init system's commandline. kwargs
+ are ignored for systemd enabled systems.
+ """
+ return service('stop', service_name, **kwargs)
+
+
+def service_restart(service_name, **kwargs):
+ """Restart a system service.
+
+ The specified service name is managed via the system level init system.
+ Some init systems (e.g. upstart) require that additional arguments be
+ provided in order to directly control service instances whereas other init
+ systems allow for addressing instances of a service directly by name (e.g.
+ systemd).
+
+ The kwargs allow for the additional parameters to be passed to underlying
+ init systems for those systems which require/allow for them. For example,
+ the ceph-osd upstart script requires the id parameter to be passed along
+ in order to identify which running daemon should be restarted. The follow-
+ ing example restarts the ceph-osd service for instance id=4:
+
+ service_restart('ceph-osd', id=4)
+
+ :param service_name: the name of the service to restart
+ :param **kwargs: additional parameters to pass to the init system when
+ managing services. These will be passed as key=value
+ parameters to the init system's commandline. kwargs
+ are ignored for init systems not allowing additional
+ parameters via the commandline (systemd).
+ """
+ return service('restart', service_name)
+
+
+def service_reload(service_name, restart_on_failure=False, **kwargs):
+ """Reload a system service, optionally falling back to restart if
+ reload fails.
+
+ The specified service name is managed via the system level init system.
+ Some init systems (e.g. upstart) require that additional arguments be
+ provided in order to directly control service instances whereas other init
+ systems allow for addressing instances of a service directly by name (e.g.
+ systemd).
+
+ The kwargs allow for the additional parameters to be passed to underlying
+ init systems for those systems which require/allow for them. For example,
+ the ceph-osd upstart script requires the id parameter to be passed along
+ in order to identify which running daemon should be reloaded. The follow-
+ ing example restarts the ceph-osd service for instance id=4:
+
+ service_reload('ceph-osd', id=4)
+
+ :param service_name: the name of the service to reload
+ :param restart_on_failure: boolean indicating whether to fallback to a
+ restart if the reload fails.
+ :param **kwargs: additional parameters to pass to the init system when
+ managing services. These will be passed as key=value
+ parameters to the init system's commandline. kwargs
+ are ignored for init systems not allowing additional
+ parameters via the commandline (systemd).
+ """
+ service_result = service('reload', service_name, **kwargs)
+ if not service_result and restart_on_failure:
+ service_result = service('restart', service_name, **kwargs)
+ return service_result
+
+
+def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d",
+ **kwargs):
+ """Pause a system service.
+
+ Stop it, and prevent it from starting again at boot.
+
+ :param service_name: the name of the service to pause
+ :param init_dir: path to the upstart init directory
+ :param initd_dir: path to the sysv init directory
+ :param **kwargs: additional parameters to pass to the init system when
+ managing services. These will be passed as key=value
+ parameters to the init system's commandline. kwargs
+ are ignored for init systems which do not support
+ key=value arguments via the commandline.
+ """
+ stopped = True
+ if service_running(service_name, **kwargs):
+ stopped = service_stop(service_name, **kwargs)
+ upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
+ sysv_file = os.path.join(initd_dir, service_name)
+ if init_is_systemd():
+ service('disable', service_name)
+ service('mask', service_name)
+ elif os.path.exists(upstart_file):
+ override_path = os.path.join(
+ init_dir, '{}.override'.format(service_name))
+ with open(override_path, 'w') as fh:
+ fh.write("manual\n")
+ elif os.path.exists(sysv_file):
+ subprocess.check_call(["update-rc.d", service_name, "disable"])
+ else:
+ raise ValueError(
+ "Unable to detect {0} as SystemD, Upstart {1} or"
+ " SysV {2}".format(
+ service_name, upstart_file, sysv_file))
+ return stopped
+
+
+def service_resume(service_name, init_dir="/etc/init",
+ initd_dir="/etc/init.d", **kwargs):
+ """Resume a system service.
+
+ Reenable starting again at boot. Start the service.
+
+ :param service_name: the name of the service to resume
+ :param init_dir: the path to the init dir
+ :param initd dir: the path to the initd dir
+ :param **kwargs: additional parameters to pass to the init system when
+ managing services. These will be passed as key=value
+ parameters to the init system's commandline. kwargs
+ are ignored for systemd enabled systems.
+ """
+ upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
+ sysv_file = os.path.join(initd_dir, service_name)
+ if init_is_systemd():
+ service('unmask', service_name)
+ service('enable', service_name)
+ elif os.path.exists(upstart_file):
+ override_path = os.path.join(
+ init_dir, '{}.override'.format(service_name))
+ if os.path.exists(override_path):
+ os.unlink(override_path)
+ elif os.path.exists(sysv_file):
+ subprocess.check_call(["update-rc.d", service_name, "enable"])
+ else:
+ raise ValueError(
+ "Unable to detect {0} as SystemD, Upstart {1} or"
+ " SysV {2}".format(
+ service_name, upstart_file, sysv_file))
+ started = service_running(service_name, **kwargs)
+
+ if not started:
+ started = service_start(service_name, **kwargs)
+ return started
+
+
+def service(action, service_name, **kwargs):
+ """Control a system service.
+
+ :param action: the action to take on the service
+ :param service_name: the name of the service to perform th action on
+ :param **kwargs: additional params to be passed to the service command in
+ the form of key=value.
+ """
+ if init_is_systemd():
+ cmd = ['systemctl', action, service_name]
+ else:
+ cmd = ['service', service_name, action]
+ for key, value in six.iteritems(kwargs):
+ parameter = '%s=%s' % (key, value)
+ cmd.append(parameter)
+ return subprocess.call(cmd) == 0
+
+
+_UPSTART_CONF = "/etc/init/{}.conf"
+_INIT_D_CONF = "/etc/init.d/{}"
+
+
+def service_running(service_name, **kwargs):
+ """Determine whether a system service is running.
+
+ :param service_name: the name of the service
+ :param **kwargs: additional args to pass to the service command. This is
+ used to pass additional key=value arguments to the
+ service command line for managing specific instance
+ units (e.g. service ceph-osd status id=2). The kwargs
+ are ignored in systemd services.
+ """
+ if init_is_systemd():
+ return service('is-active', service_name)
+ else:
+ if os.path.exists(_UPSTART_CONF.format(service_name)):
+ try:
+ cmd = ['status', service_name]
+ for key, value in six.iteritems(kwargs):
+ parameter = '%s=%s' % (key, value)
+ cmd.append(parameter)
+ output = subprocess.check_output(cmd,
+ stderr=subprocess.STDOUT).decode('UTF-8')
+ except subprocess.CalledProcessError:
+ return False
+ else:
+ # This works for upstart scripts where the 'service' command
+ # returns a consistent string to represent running
+ # 'start/running'
+ if ("start/running" in output or
+ "is running" in output or
+ "up and running" in output):
+ return True
+ elif os.path.exists(_INIT_D_CONF.format(service_name)):
+ # Check System V scripts init script return codes
+ return service('status', service_name)
+ return False
+
+
+SYSTEMD_SYSTEM = '/run/systemd/system'
+
+
+def init_is_systemd():
+ """Return True if the host system uses systemd, False otherwise."""
+ if lsb_release()['DISTRIB_CODENAME'] == 'trusty':
+ return False
+ return os.path.isdir(SYSTEMD_SYSTEM)
+
+
+def adduser(username, password=None, shell='/bin/bash',
+ system_user=False, primary_group=None,
+ secondary_groups=None, uid=None, home_dir=None):
+ """Add a user to the system.
+
+ Will log but otherwise succeed if the user already exists.
+
+ :param str username: Username to create
+ :param str password: Password for user; if ``None``, create a system user
+ :param str shell: The default shell for the user
+ :param bool system_user: Whether to create a login or system user
+ :param str primary_group: Primary group for user; defaults to username
+ :param list secondary_groups: Optional list of additional groups
+ :param int uid: UID for user being created
+ :param str home_dir: Home directory for user
+
+ :returns: The password database entry struct, as returned by `pwd.getpwnam`
+ """
+ try:
+ user_info = pwd.getpwnam(username)
+ log('user {0} already exists!'.format(username))
+ if uid:
+ user_info = pwd.getpwuid(int(uid))
+ log('user with uid {0} already exists!'.format(uid))
+ except KeyError:
+ log('creating user {0}'.format(username))
+ cmd = ['useradd']
+ if uid:
+ cmd.extend(['--uid', str(uid)])
+ if home_dir:
+ cmd.extend(['--home', str(home_dir)])
+ if system_user or password is None:
+ cmd.append('--system')
+ else:
+ cmd.extend([
+ '--create-home',
+ '--shell', shell,
+ '--password', password,
+ ])
+ if not primary_group:
+ try:
+ grp.getgrnam(username)
+ primary_group = username # avoid "group exists" error
+ except KeyError:
+ pass
+ if primary_group:
+ cmd.extend(['-g', primary_group])
+ if secondary_groups:
+ cmd.extend(['-G', ','.join(secondary_groups)])
+ cmd.append(username)
+ subprocess.check_call(cmd)
+ user_info = pwd.getpwnam(username)
+ return user_info
+
+
+def user_exists(username):
+ """Check if a user exists"""
+ try:
+ pwd.getpwnam(username)
+ user_exists = True
+ except KeyError:
+ user_exists = False
+ return user_exists
+
+
+def uid_exists(uid):
+ """Check if a uid exists"""
+ try:
+ pwd.getpwuid(uid)
+ uid_exists = True
+ except KeyError:
+ uid_exists = False
+ return uid_exists
+
+
+def group_exists(groupname):
+ """Check if a group exists"""
+ try:
+ grp.getgrnam(groupname)
+ group_exists = True
+ except KeyError:
+ group_exists = False
+ return group_exists
+
+
+def gid_exists(gid):
+ """Check if a gid exists"""
+ try:
+ grp.getgrgid(gid)
+ gid_exists = True
+ except KeyError:
+ gid_exists = False
+ return gid_exists
+
+
+def add_group(group_name, system_group=False, gid=None):
+ """Add a group to the system
+
+ Will log but otherwise succeed if the group already exists.
+
+ :param str group_name: group to create
+ :param bool system_group: Create system group
+ :param int gid: GID for user being created
+
+ :returns: The password database entry struct, as returned by `grp.getgrnam`
+ """
+ try:
+ group_info = grp.getgrnam(group_name)
+ log('group {0} already exists!'.format(group_name))
+ if gid:
+ group_info = grp.getgrgid(gid)
+ log('group with gid {0} already exists!'.format(gid))
+ except KeyError:
+ log('creating group {0}'.format(group_name))
+ add_new_group(group_name, system_group, gid)
+ group_info = grp.getgrnam(group_name)
+ return group_info
+
+
+def add_user_to_group(username, group):
+ """Add a user to a group"""
+ cmd = ['gpasswd', '-a', username, group]
+ log("Adding user {} to group {}".format(username, group))
+ subprocess.check_call(cmd)
+
+
+def chage(username, lastday=None, expiredate=None, inactive=None,
+ mindays=None, maxdays=None, root=None, warndays=None):
+ """Change user password expiry information
+
+ :param str username: User to update
+ :param str lastday: Set when password was changed in YYYY-MM-DD format
+ :param str expiredate: Set when user's account will no longer be
+ accessible in YYYY-MM-DD format.
+ -1 will remove an account expiration date.
+ :param str inactive: Set the number of days of inactivity after a password
+ has expired before the account is locked.
+ -1 will remove an account's inactivity.
+ :param str mindays: Set the minimum number of days between password
+ changes to MIN_DAYS.
+ 0 indicates the password can be changed anytime.
+ :param str maxdays: Set the maximum number of days during which a
+ password is valid.
+ -1 as MAX_DAYS will remove checking maxdays
+ :param str root: Apply changes in the CHROOT_DIR directory
+ :param str warndays: Set the number of days of warning before a password
+ change is required
+ :raises subprocess.CalledProcessError: if call to chage fails
+ """
+ cmd = ['chage']
+ if root:
+ cmd.extend(['--root', root])
+ if lastday:
+ cmd.extend(['--lastday', lastday])
+ if expiredate:
+ cmd.extend(['--expiredate', expiredate])
+ if inactive:
+ cmd.extend(['--inactive', inactive])
+ if mindays:
+ cmd.extend(['--mindays', mindays])
+ if maxdays:
+ cmd.extend(['--maxdays', maxdays])
+ if warndays:
+ cmd.extend(['--warndays', warndays])
+ cmd.append(username)
+ subprocess.check_call(cmd)
+
+remove_password_expiry = functools.partial(chage, expiredate='-1', inactive='-1', mindays='0', maxdays='-1')
+
+def rsync(from_path, to_path, flags='-r', options=None, timeout=None):
+ """Replicate the contents of a path"""
+ options = options or ['--delete', '--executability']
+ cmd = ['/usr/bin/rsync', flags]
+ if timeout:
+ cmd = ['timeout', str(timeout)] + cmd
+ cmd.extend(options)
+ cmd.append(from_path)
+ cmd.append(to_path)
+ log(" ".join(cmd))
+ return subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8').strip()
+
+
+def symlink(source, destination):
+ """Create a symbolic link"""
+ log("Symlinking {} as {}".format(source, destination))
+ cmd = [
+ 'ln',
+ '-sf',
+ source,
+ destination,
+ ]
+ subprocess.check_call(cmd)
+
+
+def mkdir(path, owner='root', group='root', perms=0o555, force=False):
+ """Create a directory"""
+ log("Making dir {} {}:{} {:o}".format(path, owner, group,
+ perms))
+ uid = pwd.getpwnam(owner).pw_uid
+ gid = grp.getgrnam(group).gr_gid
+ realpath = os.path.abspath(path)
+ path_exists = os.path.exists(realpath)
+ if path_exists and force:
+ if not os.path.isdir(realpath):
+ log("Removing non-directory file {} prior to mkdir()".format(path))
+ os.unlink(realpath)
+ os.makedirs(realpath, perms)
+ elif not path_exists:
+ os.makedirs(realpath, perms)
+ os.chown(realpath, uid, gid)
+ os.chmod(realpath, perms)
+
+
+def write_file(path, content, owner='root', group='root', perms=0o444):
+ """Create or overwrite a file with the contents of a byte string."""
+ uid = pwd.getpwnam(owner).pw_uid
+ gid = grp.getgrnam(group).gr_gid
+ # lets see if we can grab the file and compare the context, to avoid doing
+ # a write.
+ existing_content = None
+ existing_uid, existing_gid = None, None
+ try:
+ with open(path, 'rb') as target:
+ existing_content = target.read()
+ stat = os.stat(path)
+ existing_uid, existing_gid = stat.st_uid, stat.st_gid
+ except:
+ pass
+ if content != existing_content:
+ log("Writing file {} {}:{} {:o}".format(path, owner, group, perms),
+ level=DEBUG)
+ with open(path, 'wb') as target:
+ os.fchown(target.fileno(), uid, gid)
+ os.fchmod(target.fileno(), perms)
+ if six.PY3 and isinstance(content, six.string_types):
+ content = content.encode('UTF-8')
+ target.write(content)
+ return
+ # the contents were the same, but we might still need to change the
+ # ownership.
+ if existing_uid != uid:
+ log("Changing uid on already existing content: {} -> {}"
+ .format(existing_uid, uid), level=DEBUG)
+ os.chown(path, uid, -1)
+ if existing_gid != gid:
+ log("Changing gid on already existing content: {} -> {}"
+ .format(existing_gid, gid), level=DEBUG)
+ os.chown(path, -1, gid)
+
+
+def fstab_remove(mp):
+ """Remove the given mountpoint entry from /etc/fstab"""
+ return Fstab.remove_by_mountpoint(mp)
+
+
+def fstab_add(dev, mp, fs, options=None):
+ """Adds the given device entry to the /etc/fstab file"""
+ return Fstab.add(dev, mp, fs, options=options)
+
+
+def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
+ """Mount a filesystem at a particular mountpoint"""
+ cmd_args = ['mount']
+ if options is not None:
+ cmd_args.extend(['-o', options])
+ cmd_args.extend([device, mountpoint])
+ try:
+ subprocess.check_output(cmd_args)
+ except subprocess.CalledProcessError as e:
+ log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
+ return False
+
+ if persist:
+ return fstab_add(device, mountpoint, filesystem, options=options)
+ return True
+
+
+def umount(mountpoint, persist=False):
+ """Unmount a filesystem"""
+ cmd_args = ['umount', mountpoint]
+ try:
+ subprocess.check_output(cmd_args)
+ except subprocess.CalledProcessError as e:
+ log('Error unmounting {}\n{}'.format(mountpoint, e.output))
+ return False
+
+ if persist:
+ return fstab_remove(mountpoint)
+ return True
+
+
+def mounts():
+ """Get a list of all mounted volumes as [[mountpoint,device],[...]]"""
+ with open('/proc/mounts') as f:
+ # [['/mount/point','/dev/path'],[...]]
+ system_mounts = [m[1::-1] for m in [l.strip().split()
+ for l in f.readlines()]]
+ return system_mounts
+
+
+def fstab_mount(mountpoint):
+ """Mount filesystem using fstab"""
+ cmd_args = ['mount', mountpoint]
+ try:
+ subprocess.check_output(cmd_args)
+ except subprocess.CalledProcessError as e:
+ log('Error unmounting {}\n{}'.format(mountpoint, e.output))
+ return False
+ return True
+
+
+def file_hash(path, hash_type='md5'):
+ """Generate a hash checksum of the contents of 'path' or None if not found.
+
+ :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
+ such as md5, sha1, sha256, sha512, etc.
+ """
+ if os.path.exists(path):
+ h = getattr(hashlib, hash_type)()
+ with open(path, 'rb') as source:
+ h.update(source.read())
+ return h.hexdigest()
+ else:
+ return None
+
+
+def path_hash(path):
+ """Generate a hash checksum of all files matching 'path'. Standard
+ wildcards like '*' and '?' are supported, see documentation for the 'glob'
+ module for more information.
+
+ :return: dict: A { filename: hash } dictionary for all matched files.
+ Empty if none found.
+ """
+ return {
+ filename: file_hash(filename)
+ for filename in glob.iglob(path)
+ }
+
+
+def check_hash(path, checksum, hash_type='md5'):
+ """Validate a file using a cryptographic checksum.
+
+ :param str checksum: Value of the checksum used to validate the file.
+ :param str hash_type: Hash algorithm used to generate `checksum`.
+ Can be any hash alrgorithm supported by :mod:`hashlib`,
+ such as md5, sha1, sha256, sha512, etc.
+ :raises ChecksumError: If the file fails the checksum
+
+ """
+ actual_checksum = file_hash(path, hash_type)
+ if checksum != actual_checksum:
+ raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
+
+
+class ChecksumError(ValueError):
+ """A class derived from Value error to indicate the checksum failed."""
+ pass
+
+
+def restart_on_change(restart_map, stopstart=False, restart_functions=None):
+ """Restart services based on configuration files changing
+
+ This function is used a decorator, for example::
+
+ @restart_on_change({
+ '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
+ '/etc/apache/sites-enabled/*': [ 'apache2' ]
+ })
+ def config_changed():
+ pass # your code here
+
+ In this example, the cinder-api and cinder-volume services
+ would be restarted if /etc/ceph/ceph.conf is changed by the
+ ceph_client_changed function. The apache2 service would be
+ restarted if any file matching the pattern got changed, created
+ or removed. Standard wildcards are supported, see documentation
+ for the 'glob' module for more information.
+
+ @param restart_map: {path_file_name: [service_name, ...]
+ @param stopstart: DEFAULT false; whether to stop, start OR restart
+ @param restart_functions: nonstandard functions to use to restart services
+ {svc: func, ...}
+ @returns result from decorated function
+ """
+ def wrap(f):
+ @functools.wraps(f)
+ def wrapped_f(*args, **kwargs):
+ return restart_on_change_helper(
+ (lambda: f(*args, **kwargs)), restart_map, stopstart,
+ restart_functions)
+ return wrapped_f
+ return wrap
+
+
+def restart_on_change_helper(lambda_f, restart_map, stopstart=False,
+ restart_functions=None):
+ """Helper function to perform the restart_on_change function.
+
+ This is provided for decorators to restart services if files described
+ in the restart_map have changed after an invocation of lambda_f().
+
+ @param lambda_f: function to call.
+ @param restart_map: {file: [service, ...]}
+ @param stopstart: whether to stop, start or restart a service
+ @param restart_functions: nonstandard functions to use to restart services
+ {svc: func, ...}
+ @returns result of lambda_f()
+ """
+ if restart_functions is None:
+ restart_functions = {}
+ checksums = {path: path_hash(path) for path in restart_map}
+ r = lambda_f()
+ # create a list of lists of the services to restart
+ restarts = [restart_map[path]
+ for path in restart_map
+ if path_hash(path) != checksums[path]]
+ # create a flat list of ordered services without duplicates from lists
+ services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts)))
+ if services_list:
+ actions = ('stop', 'start') if stopstart else ('restart',)
+ for service_name in services_list:
+ if service_name in restart_functions:
+ restart_functions[service_name](service_name)
+ else:
+ for action in actions:
+ service(action, service_name)
+ return r
+
+
+def pwgen(length=None):
+ """Generate a random pasword."""
+ if length is None:
+ # A random length is ok to use a weak PRNG
+ length = random.choice(range(35, 45))
+ alphanumeric_chars = [
+ l for l in (string.ascii_letters + string.digits)
+ if l not in 'l0QD1vAEIOUaeiou']
+ # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the
+ # actual password
+ random_generator = random.SystemRandom()
+ random_chars = [
+ random_generator.choice(alphanumeric_chars) for _ in range(length)]
+ return(''.join(random_chars))
+
+
+def is_phy_iface(interface):
+ """Returns True if interface is not virtual, otherwise False."""
+ if interface:
+ sys_net = '/sys/class/net'
+ if os.path.isdir(sys_net):
+ for iface in glob.glob(os.path.join(sys_net, '*')):
+ if '/virtual/' in os.path.realpath(iface):
+ continue
+
+ if interface == os.path.basename(iface):
+ return True
+
+ return False
+
+
+def get_bond_master(interface):
+ """Returns bond master if interface is bond slave otherwise None.
+
+ NOTE: the provided interface is expected to be physical
+ """
+ if interface:
+ iface_path = '/sys/class/net/%s' % (interface)
+ if os.path.exists(iface_path):
+ if '/virtual/' in os.path.realpath(iface_path):
+ return None
+
+ master = os.path.join(iface_path, 'master')
+ if os.path.exists(master):
+ master = os.path.realpath(master)
+ # make sure it is a bond master
+ if os.path.exists(os.path.join(master, 'bonding')):
+ return os.path.basename(master)
+
+ return None
+
+
+def list_nics(nic_type=None):
+ """Return a list of nics of given type(s)"""
+ if isinstance(nic_type, six.string_types):
+ int_types = [nic_type]
+ else:
+ int_types = nic_type
+
+ interfaces = []
+ if nic_type:
+ for int_type in int_types:
+ cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
+ ip_output = subprocess.check_output(cmd).decode('UTF-8')
+ ip_output = ip_output.split('\n')
+ ip_output = (line for line in ip_output if line)
+ for line in ip_output:
+ if line.split()[1].startswith(int_type):
+ matched = re.search('.*: (' + int_type +
+ r'[0-9]+\.[0-9]+)@.*', line)
+ if matched:
+ iface = matched.groups()[0]
+ else:
+ iface = line.split()[1].replace(":", "")
+
+ if iface not in interfaces:
+ interfaces.append(iface)
+ else:
+ cmd = ['ip', 'a']
+ ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
+ ip_output = (line.strip() for line in ip_output if line)
+
+ key = re.compile('^[0-9]+:\s+(.+):')
+ for line in ip_output:
+ matched = re.search(key, line)
+ if matched:
+ iface = matched.group(1)
+ iface = iface.partition("@")[0]
+ if iface not in interfaces:
+ interfaces.append(iface)
+
+ return interfaces
+
+
+def set_nic_mtu(nic, mtu):
+ """Set the Maximum Transmission Unit (MTU) on a network interface."""
+ cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
+ subprocess.check_call(cmd)
+
+
+def get_nic_mtu(nic):
+ """Return the Maximum Transmission Unit (MTU) for a network interface."""
+ cmd = ['ip', 'addr', 'show', nic]
+ ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
+ mtu = ""
+ for line in ip_output:
+ words = line.split()
+ if 'mtu' in words:
+ mtu = words[words.index("mtu") + 1]
+ return mtu
+
+
+def get_nic_hwaddr(nic):
+ """Return the Media Access Control (MAC) for a network interface."""
+ cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
+ ip_output = subprocess.check_output(cmd).decode('UTF-8')
+ hwaddr = ""
+ words = ip_output.split()
+ if 'link/ether' in words:
+ hwaddr = words[words.index('link/ether') + 1]
+ return hwaddr
+
+
+@contextmanager
+def chdir(directory):
+ """Change the current working directory to a different directory for a code
+ block and return the previous directory after the block exits. Useful to
+ run commands from a specificed directory.
+
+ :param str directory: The directory path to change to for this context.
+ """
+ cur = os.getcwd()
+ try:
+ yield os.chdir(directory)
+ finally:
+ os.chdir(cur)
+
+
+def chownr(path, owner, group, follow_links=True, chowntopdir=False):
+ """Recursively change user and group ownership of files and directories
+ in given path. Doesn't chown path itself by default, only its children.
+
+ :param str path: The string path to start changing ownership.
+ :param str owner: The owner string to use when looking up the uid.
+ :param str group: The group string to use when looking up the gid.
+ :param bool follow_links: Also follow and chown links if True
+ :param bool chowntopdir: Also chown path itself if True
+ """
+ uid = pwd.getpwnam(owner).pw_uid
+ gid = grp.getgrnam(group).gr_gid
+ if follow_links:
+ chown = os.chown
+ else:
+ chown = os.lchown
+
+ if chowntopdir:
+ broken_symlink = os.path.lexists(path) and not os.path.exists(path)
+ if not broken_symlink:
+ chown(path, uid, gid)
+ for root, dirs, files in os.walk(path, followlinks=follow_links):
+ for name in dirs + files:
+ full = os.path.join(root, name)
+ broken_symlink = os.path.lexists(full) and not os.path.exists(full)
+ if not broken_symlink:
+ chown(full, uid, gid)
+
+
+def lchownr(path, owner, group):
+ """Recursively change user and group ownership of files and directories
+ in a given path, not following symbolic links. See the documentation for
+ 'os.lchown' for more information.
+
+ :param str path: The string path to start changing ownership.
+ :param str owner: The owner string to use when looking up the uid.
+ :param str group: The group string to use when looking up the gid.
+ """
+ chownr(path, owner, group, follow_links=False)
+
+
+def owner(path):
+ """Returns a tuple containing the username & groupname owning the path.
+
+ :param str path: the string path to retrieve the ownership
+ :return tuple(str, str): A (username, groupname) tuple containing the
+ name of the user and group owning the path.
+ :raises OSError: if the specified path does not exist
+ """
+ stat = os.stat(path)
+ username = pwd.getpwuid(stat.st_uid)[0]
+ groupname = grp.getgrgid(stat.st_gid)[0]
+ return username, groupname
+
+
+def get_total_ram():
+ """The total amount of system RAM in bytes.
+
+ This is what is reported by the OS, and may be overcommitted when
+ there are multiple containers hosted on the same machine.
+ """
+ with open('/proc/meminfo', 'r') as f:
+ for line in f.readlines():
+ if line:
+ key, value, unit = line.split()
+ if key == 'MemTotal:':
+ assert unit == 'kB', 'Unknown unit'
+ return int(value) * 1024 # Classic, not KiB.
+ raise NotImplementedError()
+
+
+UPSTART_CONTAINER_TYPE = '/run/container_type'
+
+
+def is_container():
+ """Determine whether unit is running in a container
+
+ @return: boolean indicating if unit is in a container
+ """
+ if init_is_systemd():
+ # Detect using systemd-detect-virt
+ return subprocess.call(['systemd-detect-virt',
+ '--container']) == 0
+ else:
+ # Detect using upstart container file marker
+ return os.path.exists(UPSTART_CONTAINER_TYPE)
+
+
+def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH):
+ with open(updatedb_path, 'r+') as f_id:
+ updatedb_text = f_id.read()
+ output = updatedb(updatedb_text, path)
+ f_id.seek(0)
+ f_id.write(output)
+ f_id.truncate()
+
+
+def updatedb(updatedb_text, new_path):
+ lines = [line for line in updatedb_text.split("\n")]
+ for i, line in enumerate(lines):
+ if line.startswith("PRUNEPATHS="):
+ paths_line = line.split("=")[1].replace('"', '')
+ paths = paths_line.split(" ")
+ if new_path not in paths:
+ paths.append(new_path)
+ lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths))
+ output = "\n".join(lines)
+ return output
+
+
+def modulo_distribution(modulo=3, wait=30, non_zero_wait=False):
+ """ Modulo distribution
+
+ This helper uses the unit number, a modulo value and a constant wait time
+ to produce a calculated wait time distribution. This is useful in large
+ scale deployments to distribute load during an expensive operation such as
+ service restarts.
+
+ If you have 1000 nodes that need to restart 100 at a time 1 minute at a
+ time:
+
+ time.wait(modulo_distribution(modulo=100, wait=60))
+ restart()
+
+ If you need restarts to happen serially set modulo to the exact number of
+ nodes and set a high constant wait time:
+
+ time.wait(modulo_distribution(modulo=10, wait=120))
+ restart()
+
+ @param modulo: int The modulo number creates the group distribution
+ @param wait: int The constant time wait value
+ @param non_zero_wait: boolean Override unit % modulo == 0,
+ return modulo * wait. Used to avoid collisions with
+ leader nodes which are often given priority.
+ @return: int Calculated time to wait for unit operation
+ """
+ unit_number = int(local_unit().split('/')[1])
+ calculated_wait_time = (unit_number % modulo) * wait
+ if non_zero_wait and calculated_wait_time == 0:
+ return modulo * wait
+ else:
+ return calculated_wait_time
diff --git a/tests/charmhelpers/core/host_factory/__init__.py b/tests/charmhelpers/core/host_factory/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/tests/charmhelpers/core/host_factory/centos.py b/tests/charmhelpers/core/host_factory/centos.py
new file mode 100644
index 0000000..7781a39
--- /dev/null
+++ b/tests/charmhelpers/core/host_factory/centos.py
@@ -0,0 +1,72 @@
+import subprocess
+import yum
+import os
+
+from charmhelpers.core.strutils import BasicStringComparator
+
+
+class CompareHostReleases(BasicStringComparator):
+ """Provide comparisons of Host releases.
+
+ Use in the form of
+
+ if CompareHostReleases(release) > 'trusty':
+ # do something with mitaka
+ """
+
+ def __init__(self, item):
+ raise NotImplementedError(
+ "CompareHostReleases() is not implemented for CentOS")
+
+
+def service_available(service_name):
+ # """Determine whether a system service is available."""
+ if os.path.isdir('/run/systemd/system'):
+ cmd = ['systemctl', 'is-enabled', service_name]
+ else:
+ cmd = ['service', service_name, 'is-enabled']
+ return subprocess.call(cmd) == 0
+
+
+def add_new_group(group_name, system_group=False, gid=None):
+ cmd = ['groupadd']
+ if gid:
+ cmd.extend(['--gid', str(gid)])
+ if system_group:
+ cmd.append('-r')
+ cmd.append(group_name)
+ subprocess.check_call(cmd)
+
+
+def lsb_release():
+ """Return /etc/os-release in a dict."""
+ d = {}
+ with open('/etc/os-release', 'r') as lsb:
+ for l in lsb:
+ s = l.split('=')
+ if len(s) != 2:
+ continue
+ d[s[0].strip()] = s[1].strip()
+ return d
+
+
+def cmp_pkgrevno(package, revno, pkgcache=None):
+ """Compare supplied revno with the revno of the installed package.
+
+ * 1 => Installed revno is greater than supplied arg
+ * 0 => Installed revno is the same as supplied arg
+ * -1 => Installed revno is less than supplied arg
+
+ This function imports YumBase function if the pkgcache argument
+ is None.
+ """
+ if not pkgcache:
+ y = yum.YumBase()
+ packages = y.doPackageLists()
+ pkgcache = {i.Name: i.version for i in packages['installed']}
+ pkg = pkgcache[package]
+ if pkg > revno:
+ return 1
+ if pkg < revno:
+ return -1
+ return 0
diff --git a/tests/charmhelpers/core/host_factory/ubuntu.py b/tests/charmhelpers/core/host_factory/ubuntu.py
new file mode 100644
index 0000000..99451b5
--- /dev/null
+++ b/tests/charmhelpers/core/host_factory/ubuntu.py
@@ -0,0 +1,90 @@
+import subprocess
+
+from charmhelpers.core.strutils import BasicStringComparator
+
+
+UBUNTU_RELEASES = (
+ 'lucid',
+ 'maverick',
+ 'natty',
+ 'oneiric',
+ 'precise',
+ 'quantal',
+ 'raring',
+ 'saucy',
+ 'trusty',
+ 'utopic',
+ 'vivid',
+ 'wily',
+ 'xenial',
+ 'yakkety',
+ 'zesty',
+ 'artful',
+ 'bionic',
+)
+
+
+class CompareHostReleases(BasicStringComparator):
+ """Provide comparisons of Ubuntu releases.
+
+ Use in the form of
+
+ if CompareHostReleases(release) > 'trusty':
+ # do something with mitaka
+ """
+ _list = UBUNTU_RELEASES
+
+
+def service_available(service_name):
+ """Determine whether a system service is available"""
+ try:
+ subprocess.check_output(
+ ['service', service_name, 'status'],
+ stderr=subprocess.STDOUT).decode('UTF-8')
+ except subprocess.CalledProcessError as e:
+ return b'unrecognized service' not in e.output
+ else:
+ return True
+
+
+def add_new_group(group_name, system_group=False, gid=None):
+ cmd = ['addgroup']
+ if gid:
+ cmd.extend(['--gid', str(gid)])
+ if system_group:
+ cmd.append('--system')
+ else:
+ cmd.extend([
+ '--group',
+ ])
+ cmd.append(group_name)
+ subprocess.check_call(cmd)
+
+
+def lsb_release():
+ """Return /etc/lsb-release in a dict"""
+ d = {}
+ with open('/etc/lsb-release', 'r') as lsb:
+ for l in lsb:
+ k, v = l.split('=')
+ d[k.strip()] = v.strip()
+ return d
+
+
+def cmp_pkgrevno(package, revno, pkgcache=None):
+ """Compare supplied revno with the revno of the installed package.
+
+ * 1 => Installed revno is greater than supplied arg
+ * 0 => Installed revno is the same as supplied arg
+ * -1 => Installed revno is less than supplied arg
+
+ This function imports apt_cache function from charmhelpers.fetch if
+ the pkgcache argument is None. Be sure to add charmhelpers.fetch if
+ you call this function, or pass an apt_pkg.Cache() instance.
+ """
+ import apt_pkg
+ if not pkgcache:
+ from charmhelpers.fetch import apt_cache
+ pkgcache = apt_cache()
+ pkg = pkgcache[package]
+ return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
diff --git a/tests/charmhelpers/core/hugepage.py b/tests/charmhelpers/core/hugepage.py
new file mode 100644
index 0000000..54b5b5e
--- /dev/null
+++ b/tests/charmhelpers/core/hugepage.py
@@ -0,0 +1,69 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import yaml
+from charmhelpers.core import fstab
+from charmhelpers.core import sysctl
+from charmhelpers.core.host import (
+ add_group,
+ add_user_to_group,
+ fstab_mount,
+ mkdir,
+)
+from charmhelpers.core.strutils import bytes_from_string
+from subprocess import check_output
+
+
+def hugepage_support(user, group='hugetlb', nr_hugepages=256,
+ max_map_count=65536, mnt_point='/run/hugepages/kvm',
+ pagesize='2MB', mount=True, set_shmmax=False):
+ """Enable hugepages on system.
+
+ Args:
+ user (str) -- Username to allow access to hugepages to
+ group (str) -- Group name to own hugepages
+ nr_hugepages (int) -- Number of pages to reserve
+ max_map_count (int) -- Number of Virtual Memory Areas a process can own
+ mnt_point (str) -- Directory to mount hugepages on
+ pagesize (str) -- Size of hugepages
+ mount (bool) -- Whether to Mount hugepages
+ """
+ group_info = add_group(group)
+ gid = group_info.gr_gid
+ add_user_to_group(user, group)
+ if max_map_count < 2 * nr_hugepages:
+ max_map_count = 2 * nr_hugepages
+ sysctl_settings = {
+ 'vm.nr_hugepages': nr_hugepages,
+ 'vm.max_map_count': max_map_count,
+ 'vm.hugetlb_shm_group': gid,
+ }
+ if set_shmmax:
+ shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax']))
+ shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages
+ if shmmax_minsize > shmmax_current:
+ sysctl_settings['kernel.shmmax'] = shmmax_minsize
+ sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf')
+ mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False)
+ lfstab = fstab.Fstab()
+ fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point)
+ if fstab_entry:
+ lfstab.remove_entry(fstab_entry)
+ entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs',
+ 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0)
+ lfstab.add_entry(entry)
+ if mount:
+ fstab_mount(mnt_point)
diff --git a/tests/charmhelpers/core/kernel.py b/tests/charmhelpers/core/kernel.py
new file mode 100644
index 0000000..2d40452
--- /dev/null
+++ b/tests/charmhelpers/core/kernel.py
@@ -0,0 +1,72 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import re
+import subprocess
+
+from charmhelpers.osplatform import get_platform
+from charmhelpers.core.hookenv import (
+ log,
+ INFO
+)
+
+__platform__ = get_platform()
+if __platform__ == "ubuntu":
+ from charmhelpers.core.kernel_factory.ubuntu import (
+ persistent_modprobe,
+ update_initramfs,
+ ) # flake8: noqa -- ignore F401 for this import
+elif __platform__ == "centos":
+ from charmhelpers.core.kernel_factory.centos import (
+ persistent_modprobe,
+ update_initramfs,
+ ) # flake8: noqa -- ignore F401 for this import
+
+__author__ = "Jorge Niedbalski "
+
+
+def modprobe(module, persist=True):
+ """Load a kernel module and configure for auto-load on reboot."""
+ cmd = ['modprobe', module]
+
+ log('Loading kernel module %s' % module, level=INFO)
+
+ subprocess.check_call(cmd)
+ if persist:
+ persistent_modprobe(module)
+
+
+def rmmod(module, force=False):
+ """Remove a module from the linux kernel"""
+ cmd = ['rmmod']
+ if force:
+ cmd.append('-f')
+ cmd.append(module)
+ log('Removing kernel module %s' % module, level=INFO)
+ return subprocess.check_call(cmd)
+
+
+def lsmod():
+ """Shows what kernel modules are currently loaded"""
+ return subprocess.check_output(['lsmod'],
+ universal_newlines=True)
+
+
+def is_module_loaded(module):
+ """Checks if a kernel module is already loaded"""
+ matches = re.findall('^%s[ ]+' % module, lsmod(), re.M)
+ return len(matches) > 0
diff --git a/tests/charmhelpers/core/kernel_factory/__init__.py b/tests/charmhelpers/core/kernel_factory/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/tests/charmhelpers/core/kernel_factory/centos.py b/tests/charmhelpers/core/kernel_factory/centos.py
new file mode 100644
index 0000000..1c402c1
--- /dev/null
+++ b/tests/charmhelpers/core/kernel_factory/centos.py
@@ -0,0 +1,17 @@
+import subprocess
+import os
+
+
+def persistent_modprobe(module):
+ """Load a kernel module and configure for auto-load on reboot."""
+ if not os.path.exists('/etc/rc.modules'):
+ open('/etc/rc.modules', 'a')
+ os.chmod('/etc/rc.modules', 111)
+ with open('/etc/rc.modules', 'r+') as modules:
+ if module not in modules.read():
+ modules.write('modprobe %s\n' % module)
+
+
+def update_initramfs(version='all'):
+ """Updates an initramfs image."""
+ return subprocess.check_call(["dracut", "-f", version])
diff --git a/tests/charmhelpers/core/kernel_factory/ubuntu.py b/tests/charmhelpers/core/kernel_factory/ubuntu.py
new file mode 100644
index 0000000..3de372f
--- /dev/null
+++ b/tests/charmhelpers/core/kernel_factory/ubuntu.py
@@ -0,0 +1,13 @@
+import subprocess
+
+
+def persistent_modprobe(module):
+ """Load a kernel module and configure for auto-load on reboot."""
+ with open('/etc/modules', 'r+') as modules:
+ if module not in modules.read():
+ modules.write(module + "\n")
+
+
+def update_initramfs(version='all'):
+ """Updates an initramfs image."""
+ return subprocess.check_call(["update-initramfs", "-k", version, "-u"])
diff --git a/tests/charmhelpers/core/services/__init__.py b/tests/charmhelpers/core/services/__init__.py
new file mode 100644
index 0000000..61fd074
--- /dev/null
+++ b/tests/charmhelpers/core/services/__init__.py
@@ -0,0 +1,16 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .base import * # NOQA
+from .helpers import * # NOQA
diff --git a/tests/charmhelpers/core/services/base.py b/tests/charmhelpers/core/services/base.py
new file mode 100644
index 0000000..345b60d
--- /dev/null
+++ b/tests/charmhelpers/core/services/base.py
@@ -0,0 +1,360 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import json
+from inspect import getargspec
+from collections import Iterable, OrderedDict
+
+from charmhelpers.core import host
+from charmhelpers.core import hookenv
+
+
+__all__ = ['ServiceManager', 'ManagerCallback',
+ 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
+ 'service_restart', 'service_stop']
+
+
+class ServiceManager(object):
+ def __init__(self, services=None):
+ """
+ Register a list of services, given their definitions.
+
+ Service definitions are dicts in the following formats (all keys except
+ 'service' are optional)::
+
+ {
+ "service": ,
+ "required_data": ,
+ "provided_data": ,
+ "data_ready": ,
+ "data_lost": ,
+ "start": ,
+ "stop": ,
+ "ports": ,
+ }
+
+ The 'required_data' list should contain dicts of required data (or
+ dependency managers that act like dicts and know how to collect the data).
+ Only when all items in the 'required_data' list are populated are the list
+ of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
+ information.
+
+ The 'provided_data' list should contain relation data providers, most likely
+ a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`,
+ that will indicate a set of data to set on a given relation.
+
+ The 'data_ready' value should be either a single callback, or a list of
+ callbacks, to be called when all items in 'required_data' pass `is_ready()`.
+ Each callback will be called with the service name as the only parameter.
+ After all of the 'data_ready' callbacks are called, the 'start' callbacks
+ are fired.
+
+ The 'data_lost' value should be either a single callback, or a list of
+ callbacks, to be called when a 'required_data' item no longer passes
+ `is_ready()`. Each callback will be called with the service name as the
+ only parameter. After all of the 'data_lost' callbacks are called,
+ the 'stop' callbacks are fired.
+
+ The 'start' value should be either a single callback, or a list of
+ callbacks, to be called when starting the service, after the 'data_ready'
+ callbacks are complete. Each callback will be called with the service
+ name as the only parameter. This defaults to
+ `[host.service_start, services.open_ports]`.
+
+ The 'stop' value should be either a single callback, or a list of
+ callbacks, to be called when stopping the service. If the service is
+ being stopped because it no longer has all of its 'required_data', this
+ will be called after all of the 'data_lost' callbacks are complete.
+ Each callback will be called with the service name as the only parameter.
+ This defaults to `[services.close_ports, host.service_stop]`.
+
+ The 'ports' value should be a list of ports to manage. The default
+ 'start' handler will open the ports after the service is started,
+ and the default 'stop' handler will close the ports prior to stopping
+ the service.
+
+
+ Examples:
+
+ The following registers an Upstart service called bingod that depends on
+ a mongodb relation and which runs a custom `db_migrate` function prior to
+ restarting the service, and a Runit service called spadesd::
+
+ manager = services.ServiceManager([
+ {
+ 'service': 'bingod',
+ 'ports': [80, 443],
+ 'required_data': [MongoRelation(), config(), {'my': 'data'}],
+ 'data_ready': [
+ services.template(source='bingod.conf'),
+ services.template(source='bingod.ini',
+ target='/etc/bingod.ini',
+ owner='bingo', perms=0400),
+ ],
+ },
+ {
+ 'service': 'spadesd',
+ 'data_ready': services.template(source='spadesd_run.j2',
+ target='/etc/sv/spadesd/run',
+ perms=0555),
+ 'start': runit_start,
+ 'stop': runit_stop,
+ },
+ ])
+ manager.manage()
+ """
+ self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
+ self._ready = None
+ self.services = OrderedDict()
+ for service in services or []:
+ service_name = service['service']
+ self.services[service_name] = service
+
+ def manage(self):
+ """
+ Handle the current hook by doing The Right Thing with the registered services.
+ """
+ hookenv._run_atstart()
+ try:
+ hook_name = hookenv.hook_name()
+ if hook_name == 'stop':
+ self.stop_services()
+ else:
+ self.reconfigure_services()
+ self.provide_data()
+ except SystemExit as x:
+ if x.code is None or x.code == 0:
+ hookenv._run_atexit()
+ hookenv._run_atexit()
+
+ def provide_data(self):
+ """
+ Set the relation data for each provider in the ``provided_data`` list.
+
+ A provider must have a `name` attribute, which indicates which relation
+ to set data on, and a `provide_data()` method, which returns a dict of
+ data to set.
+
+ The `provide_data()` method can optionally accept two parameters:
+
+ * ``remote_service`` The name of the remote service that the data will
+ be provided to. The `provide_data()` method will be called once
+ for each connected service (not unit). This allows the method to
+ tailor its data to the given service.
+ * ``service_ready`` Whether or not the service definition had all of
+ its requirements met, and thus the ``data_ready`` callbacks run.
+
+ Note that the ``provided_data`` methods are now called **after** the
+ ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks
+ a chance to generate any data necessary for the providing to the remote
+ services.
+ """
+ for service_name, service in self.services.items():
+ service_ready = self.is_ready(service_name)
+ for provider in service.get('provided_data', []):
+ for relid in hookenv.relation_ids(provider.name):
+ units = hookenv.related_units(relid)
+ if not units:
+ continue
+ remote_service = units[0].split('/')[0]
+ argspec = getargspec(provider.provide_data)
+ if len(argspec.args) > 1:
+ data = provider.provide_data(remote_service, service_ready)
+ else:
+ data = provider.provide_data()
+ if data:
+ hookenv.relation_set(relid, data)
+
+ def reconfigure_services(self, *service_names):
+ """
+ Update all files for one or more registered services, and,
+ if ready, optionally restart them.
+
+ If no service names are given, reconfigures all registered services.
+ """
+ for service_name in service_names or self.services.keys():
+ if self.is_ready(service_name):
+ self.fire_event('data_ready', service_name)
+ self.fire_event('start', service_name, default=[
+ service_restart,
+ manage_ports])
+ self.save_ready(service_name)
+ else:
+ if self.was_ready(service_name):
+ self.fire_event('data_lost', service_name)
+ self.fire_event('stop', service_name, default=[
+ manage_ports,
+ service_stop])
+ self.save_lost(service_name)
+
+ def stop_services(self, *service_names):
+ """
+ Stop one or more registered services, by name.
+
+ If no service names are given, stops all registered services.
+ """
+ for service_name in service_names or self.services.keys():
+ self.fire_event('stop', service_name, default=[
+ manage_ports,
+ service_stop])
+
+ def get_service(self, service_name):
+ """
+ Given the name of a registered service, return its service definition.
+ """
+ service = self.services.get(service_name)
+ if not service:
+ raise KeyError('Service not registered: %s' % service_name)
+ return service
+
+ def fire_event(self, event_name, service_name, default=None):
+ """
+ Fire a data_ready, data_lost, start, or stop event on a given service.
+ """
+ service = self.get_service(service_name)
+ callbacks = service.get(event_name, default)
+ if not callbacks:
+ return
+ if not isinstance(callbacks, Iterable):
+ callbacks = [callbacks]
+ for callback in callbacks:
+ if isinstance(callback, ManagerCallback):
+ callback(self, service_name, event_name)
+ else:
+ callback(service_name)
+
+ def is_ready(self, service_name):
+ """
+ Determine if a registered service is ready, by checking its 'required_data'.
+
+ A 'required_data' item can be any mapping type, and is considered ready
+ if `bool(item)` evaluates as True.
+ """
+ service = self.get_service(service_name)
+ reqs = service.get('required_data', [])
+ return all(bool(req) for req in reqs)
+
+ def _load_ready_file(self):
+ if self._ready is not None:
+ return
+ if os.path.exists(self._ready_file):
+ with open(self._ready_file) as fp:
+ self._ready = set(json.load(fp))
+ else:
+ self._ready = set()
+
+ def _save_ready_file(self):
+ if self._ready is None:
+ return
+ with open(self._ready_file, 'w') as fp:
+ json.dump(list(self._ready), fp)
+
+ def save_ready(self, service_name):
+ """
+ Save an indicator that the given service is now data_ready.
+ """
+ self._load_ready_file()
+ self._ready.add(service_name)
+ self._save_ready_file()
+
+ def save_lost(self, service_name):
+ """
+ Save an indicator that the given service is no longer data_ready.
+ """
+ self._load_ready_file()
+ self._ready.discard(service_name)
+ self._save_ready_file()
+
+ def was_ready(self, service_name):
+ """
+ Determine if the given service was previously data_ready.
+ """
+ self._load_ready_file()
+ return service_name in self._ready
+
+
+class ManagerCallback(object):
+ """
+ Special case of a callback that takes the `ServiceManager` instance
+ in addition to the service name.
+
+ Subclasses should implement `__call__` which should accept three parameters:
+
+ * `manager` The `ServiceManager` instance
+ * `service_name` The name of the service it's being triggered for
+ * `event_name` The name of the event that this callback is handling
+ """
+ def __call__(self, manager, service_name, event_name):
+ raise NotImplementedError()
+
+
+class PortManagerCallback(ManagerCallback):
+ """
+ Callback class that will open or close ports, for use as either
+ a start or stop action.
+ """
+ def __call__(self, manager, service_name, event_name):
+ service = manager.get_service(service_name)
+ new_ports = service.get('ports', [])
+ port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
+ if os.path.exists(port_file):
+ with open(port_file) as fp:
+ old_ports = fp.read().split(',')
+ for old_port in old_ports:
+ if bool(old_port) and not self.ports_contains(old_port, new_ports):
+ hookenv.close_port(old_port)
+ with open(port_file, 'w') as fp:
+ fp.write(','.join(str(port) for port in new_ports))
+ for port in new_ports:
+ # A port is either a number or 'ICMP'
+ protocol = 'TCP'
+ if str(port).upper() == 'ICMP':
+ protocol = 'ICMP'
+ if event_name == 'start':
+ hookenv.open_port(port, protocol)
+ elif event_name == 'stop':
+ hookenv.close_port(port, protocol)
+
+ def ports_contains(self, port, ports):
+ if not bool(port):
+ return False
+ if str(port).upper() != 'ICMP':
+ port = int(port)
+ return port in ports
+
+
+def service_stop(service_name):
+ """
+ Wrapper around host.service_stop to prevent spurious "unknown service"
+ messages in the logs.
+ """
+ if host.service_running(service_name):
+ host.service_stop(service_name)
+
+
+def service_restart(service_name):
+ """
+ Wrapper around host.service_restart to prevent spurious "unknown service"
+ messages in the logs.
+ """
+ if host.service_available(service_name):
+ if host.service_running(service_name):
+ host.service_restart(service_name)
+ else:
+ host.service_start(service_name)
+
+
+# Convenience aliases
+open_ports = close_ports = manage_ports = PortManagerCallback()
diff --git a/tests/charmhelpers/core/services/helpers.py b/tests/charmhelpers/core/services/helpers.py
new file mode 100644
index 0000000..3e6e30d
--- /dev/null
+++ b/tests/charmhelpers/core/services/helpers.py
@@ -0,0 +1,290 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import yaml
+
+from charmhelpers.core import hookenv
+from charmhelpers.core import host
+from charmhelpers.core import templating
+
+from charmhelpers.core.services.base import ManagerCallback
+
+
+__all__ = ['RelationContext', 'TemplateCallback',
+ 'render_template', 'template']
+
+
+class RelationContext(dict):
+ """
+ Base class for a context generator that gets relation data from juju.
+
+ Subclasses must provide the attributes `name`, which is the name of the
+ interface of interest, `interface`, which is the type of the interface of
+ interest, and `required_keys`, which is the set of keys required for the
+ relation to be considered complete. The data for all interfaces matching
+ the `name` attribute that are complete will used to populate the dictionary
+ values (see `get_data`, below).
+
+ The generated context will be namespaced under the relation :attr:`name`,
+ to prevent potential naming conflicts.
+
+ :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
+ :param list additional_required_keys: Extend the list of :attr:`required_keys`
+ """
+ name = None
+ interface = None
+
+ def __init__(self, name=None, additional_required_keys=None):
+ if not hasattr(self, 'required_keys'):
+ self.required_keys = []
+
+ if name is not None:
+ self.name = name
+ if additional_required_keys:
+ self.required_keys.extend(additional_required_keys)
+ self.get_data()
+
+ def __bool__(self):
+ """
+ Returns True if all of the required_keys are available.
+ """
+ return self.is_ready()
+
+ __nonzero__ = __bool__
+
+ def __repr__(self):
+ return super(RelationContext, self).__repr__()
+
+ def is_ready(self):
+ """
+ Returns True if all of the `required_keys` are available from any units.
+ """
+ ready = len(self.get(self.name, [])) > 0
+ if not ready:
+ hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
+ return ready
+
+ def _is_ready(self, unit_data):
+ """
+ Helper method that tests a set of relation data and returns True if
+ all of the `required_keys` are present.
+ """
+ return set(unit_data.keys()).issuperset(set(self.required_keys))
+
+ def get_data(self):
+ """
+ Retrieve the relation data for each unit involved in a relation and,
+ if complete, store it in a list under `self[self.name]`. This
+ is automatically called when the RelationContext is instantiated.
+
+ The units are sorted lexographically first by the service ID, then by
+ the unit ID. Thus, if an interface has two other services, 'db:1'
+ and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
+ and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
+ set of data, the relation data for the units will be stored in the
+ order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
+
+ If you only care about a single unit on the relation, you can just
+ access it as `{{ interface[0]['key'] }}`. However, if you can at all
+ support multiple units on a relation, you should iterate over the list,
+ like::
+
+ {% for unit in interface -%}
+ {{ unit['key'] }}{% if not loop.last %},{% endif %}
+ {%- endfor %}
+
+ Note that since all sets of relation data from all related services and
+ units are in a single list, if you need to know which service or unit a
+ set of data came from, you'll need to extend this class to preserve
+ that information.
+ """
+ if not hookenv.relation_ids(self.name):
+ return
+
+ ns = self.setdefault(self.name, [])
+ for rid in sorted(hookenv.relation_ids(self.name)):
+ for unit in sorted(hookenv.related_units(rid)):
+ reldata = hookenv.relation_get(rid=rid, unit=unit)
+ if self._is_ready(reldata):
+ ns.append(reldata)
+
+ def provide_data(self):
+ """
+ Return data to be relation_set for this interface.
+ """
+ return {}
+
+
+class MysqlRelation(RelationContext):
+ """
+ Relation context for the `mysql` interface.
+
+ :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
+ :param list additional_required_keys: Extend the list of :attr:`required_keys`
+ """
+ name = 'db'
+ interface = 'mysql'
+
+ def __init__(self, *args, **kwargs):
+ self.required_keys = ['host', 'user', 'password', 'database']
+ RelationContext.__init__(self, *args, **kwargs)
+
+
+class HttpRelation(RelationContext):
+ """
+ Relation context for the `http` interface.
+
+ :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
+ :param list additional_required_keys: Extend the list of :attr:`required_keys`
+ """
+ name = 'website'
+ interface = 'http'
+
+ def __init__(self, *args, **kwargs):
+ self.required_keys = ['host', 'port']
+ RelationContext.__init__(self, *args, **kwargs)
+
+ def provide_data(self):
+ return {
+ 'host': hookenv.unit_get('private-address'),
+ 'port': 80,
+ }
+
+
+class RequiredConfig(dict):
+ """
+ Data context that loads config options with one or more mandatory options.
+
+ Once the required options have been changed from their default values, all
+ config options will be available, namespaced under `config` to prevent
+ potential naming conflicts (for example, between a config option and a
+ relation property).
+
+ :param list *args: List of options that must be changed from their default values.
+ """
+
+ def __init__(self, *args):
+ self.required_options = args
+ self['config'] = hookenv.config()
+ with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
+ self.config = yaml.load(fp).get('options', {})
+
+ def __bool__(self):
+ for option in self.required_options:
+ if option not in self['config']:
+ return False
+ current_value = self['config'][option]
+ default_value = self.config[option].get('default')
+ if current_value == default_value:
+ return False
+ if current_value in (None, '') and default_value in (None, ''):
+ return False
+ return True
+
+ def __nonzero__(self):
+ return self.__bool__()
+
+
+class StoredContext(dict):
+ """
+ A data context that always returns the data that it was first created with.
+
+ This is useful to do a one-time generation of things like passwords, that
+ will thereafter use the same value that was originally generated, instead
+ of generating a new value each time it is run.
+ """
+ def __init__(self, file_name, config_data):
+ """
+ If the file exists, populate `self` with the data from the file.
+ Otherwise, populate with the given data and persist it to the file.
+ """
+ if os.path.exists(file_name):
+ self.update(self.read_context(file_name))
+ else:
+ self.store_context(file_name, config_data)
+ self.update(config_data)
+
+ def store_context(self, file_name, config_data):
+ if not os.path.isabs(file_name):
+ file_name = os.path.join(hookenv.charm_dir(), file_name)
+ with open(file_name, 'w') as file_stream:
+ os.fchmod(file_stream.fileno(), 0o600)
+ yaml.dump(config_data, file_stream)
+
+ def read_context(self, file_name):
+ if not os.path.isabs(file_name):
+ file_name = os.path.join(hookenv.charm_dir(), file_name)
+ with open(file_name, 'r') as file_stream:
+ data = yaml.load(file_stream)
+ if not data:
+ raise OSError("%s is empty" % file_name)
+ return data
+
+
+class TemplateCallback(ManagerCallback):
+ """
+ Callback class that will render a Jinja2 template, for use as a ready
+ action.
+
+ :param str source: The template source file, relative to
+ `$CHARM_DIR/templates`
+
+ :param str target: The target to write the rendered template to (or None)
+ :param str owner: The owner of the rendered file
+ :param str group: The group of the rendered file
+ :param int perms: The permissions of the rendered file
+ :param partial on_change_action: functools partial to be executed when
+ rendered file changes
+ :param jinja2 loader template_loader: A jinja2 template loader
+
+ :return str: The rendered template
+ """
+ def __init__(self, source, target,
+ owner='root', group='root', perms=0o444,
+ on_change_action=None, template_loader=None):
+ self.source = source
+ self.target = target
+ self.owner = owner
+ self.group = group
+ self.perms = perms
+ self.on_change_action = on_change_action
+ self.template_loader = template_loader
+
+ def __call__(self, manager, service_name, event_name):
+ pre_checksum = ''
+ if self.on_change_action and os.path.isfile(self.target):
+ pre_checksum = host.file_hash(self.target)
+ service = manager.get_service(service_name)
+ context = {'ctx': {}}
+ for ctx in service.get('required_data', []):
+ context.update(ctx)
+ context['ctx'].update(ctx)
+
+ result = templating.render(self.source, self.target, context,
+ self.owner, self.group, self.perms,
+ template_loader=self.template_loader)
+ if self.on_change_action:
+ if pre_checksum == host.file_hash(self.target):
+ hookenv.log(
+ 'No change detected: {}'.format(self.target),
+ hookenv.DEBUG)
+ else:
+ self.on_change_action()
+
+ return result
+
+
+# Convenience aliases for templates
+render_template = template = TemplateCallback
diff --git a/tests/charmhelpers/core/strutils.py b/tests/charmhelpers/core/strutils.py
new file mode 100644
index 0000000..e8df045
--- /dev/null
+++ b/tests/charmhelpers/core/strutils.py
@@ -0,0 +1,129 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import six
+import re
+
+
+def bool_from_string(value):
+ """Interpret string value as boolean.
+
+ Returns True if value translates to True otherwise False.
+ """
+ if isinstance(value, six.string_types):
+ value = six.text_type(value)
+ else:
+ msg = "Unable to interpret non-string value '%s' as boolean" % (value)
+ raise ValueError(msg)
+
+ value = value.strip().lower()
+
+ if value in ['y', 'yes', 'true', 't', 'on']:
+ return True
+ elif value in ['n', 'no', 'false', 'f', 'off']:
+ return False
+
+ msg = "Unable to interpret string value '%s' as boolean" % (value)
+ raise ValueError(msg)
+
+
+def bytes_from_string(value):
+ """Interpret human readable string value as bytes.
+
+ Returns int
+ """
+ BYTE_POWER = {
+ 'K': 1,
+ 'KB': 1,
+ 'M': 2,
+ 'MB': 2,
+ 'G': 3,
+ 'GB': 3,
+ 'T': 4,
+ 'TB': 4,
+ 'P': 5,
+ 'PB': 5,
+ }
+ if isinstance(value, six.string_types):
+ value = six.text_type(value)
+ else:
+ msg = "Unable to interpret non-string value '%s' as bytes" % (value)
+ raise ValueError(msg)
+ matches = re.match("([0-9]+)([a-zA-Z]+)", value)
+ if matches:
+ size = int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
+ else:
+ # Assume that value passed in is bytes
+ try:
+ size = int(value)
+ except ValueError:
+ msg = "Unable to interpret string value '%s' as bytes" % (value)
+ raise ValueError(msg)
+ return size
+
+
+class BasicStringComparator(object):
+ """Provides a class that will compare strings from an iterator type object.
+ Used to provide > and < comparisons on strings that may not necessarily be
+ alphanumerically ordered. e.g. OpenStack or Ubuntu releases AFTER the
+ z-wrap.
+ """
+
+ _list = None
+
+ def __init__(self, item):
+ if self._list is None:
+ raise Exception("Must define the _list in the class definition!")
+ try:
+ self.index = self._list.index(item)
+ except Exception:
+ raise KeyError("Item '{}' is not in list '{}'"
+ .format(item, self._list))
+
+ def __eq__(self, other):
+ assert isinstance(other, str) or isinstance(other, self.__class__)
+ return self.index == self._list.index(other)
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __lt__(self, other):
+ assert isinstance(other, str) or isinstance(other, self.__class__)
+ return self.index < self._list.index(other)
+
+ def __ge__(self, other):
+ return not self.__lt__(other)
+
+ def __gt__(self, other):
+ assert isinstance(other, str) or isinstance(other, self.__class__)
+ return self.index > self._list.index(other)
+
+ def __le__(self, other):
+ return not self.__gt__(other)
+
+ def __str__(self):
+ """Always give back the item at the index so it can be used in
+ comparisons like:
+
+ s_mitaka = CompareOpenStack('mitaka')
+ s_newton = CompareOpenstack('newton')
+
+ assert s_newton > s_mitaka
+
+ @returns:
+ """
+ return self._list[self.index]
diff --git a/tests/charmhelpers/core/sysctl.py b/tests/charmhelpers/core/sysctl.py
new file mode 100644
index 0000000..6e413e3
--- /dev/null
+++ b/tests/charmhelpers/core/sysctl.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import yaml
+
+from subprocess import check_call
+
+from charmhelpers.core.hookenv import (
+ log,
+ DEBUG,
+ ERROR,
+)
+
+__author__ = 'Jorge Niedbalski R. '
+
+
+def create(sysctl_dict, sysctl_file):
+ """Creates a sysctl.conf file from a YAML associative array
+
+ :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }"
+ :type sysctl_dict: str
+ :param sysctl_file: path to the sysctl file to be saved
+ :type sysctl_file: str or unicode
+ :returns: None
+ """
+ try:
+ sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
+ except yaml.YAMLError:
+ log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
+ level=ERROR)
+ return
+
+ with open(sysctl_file, "w") as fd:
+ for key, value in sysctl_dict_parsed.items():
+ fd.write("{}={}\n".format(key, value))
+
+ log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed),
+ level=DEBUG)
+
+ check_call(["sysctl", "-p", sysctl_file])
diff --git a/tests/charmhelpers/core/templating.py b/tests/charmhelpers/core/templating.py
new file mode 100644
index 0000000..9014015
--- /dev/null
+++ b/tests/charmhelpers/core/templating.py
@@ -0,0 +1,93 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import sys
+
+from charmhelpers.core import host
+from charmhelpers.core import hookenv
+
+
+def render(source, target, context, owner='root', group='root',
+ perms=0o444, templates_dir=None, encoding='UTF-8',
+ template_loader=None, config_template=None):
+ """
+ Render a template.
+
+ The `source` path, if not absolute, is relative to the `templates_dir`.
+
+ The `target` path should be absolute. It can also be `None`, in which
+ case no file will be written.
+
+ The context should be a dict containing the values to be replaced in the
+ template.
+
+ config_template may be provided to render from a provided template instead
+ of loading from a file.
+
+ The `owner`, `group`, and `perms` options will be passed to `write_file`.
+
+ If omitted, `templates_dir` defaults to the `templates` folder in the charm.
+
+ The rendered template will be written to the file as well as being returned
+ as a string.
+
+ Note: Using this requires python-jinja2 or python3-jinja2; if it is not
+ installed, calling this will attempt to use charmhelpers.fetch.apt_install
+ to install it.
+ """
+ try:
+ from jinja2 import FileSystemLoader, Environment, exceptions
+ except ImportError:
+ try:
+ from charmhelpers.fetch import apt_install
+ except ImportError:
+ hookenv.log('Could not import jinja2, and could not import '
+ 'charmhelpers.fetch to install it',
+ level=hookenv.ERROR)
+ raise
+ if sys.version_info.major == 2:
+ apt_install('python-jinja2', fatal=True)
+ else:
+ apt_install('python3-jinja2', fatal=True)
+ from jinja2 import FileSystemLoader, Environment, exceptions
+
+ if template_loader:
+ template_env = Environment(loader=template_loader)
+ else:
+ if templates_dir is None:
+ templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
+ template_env = Environment(loader=FileSystemLoader(templates_dir))
+
+ # load from a string if provided explicitly
+ if config_template is not None:
+ template = template_env.from_string(config_template)
+ else:
+ try:
+ source = source
+ template = template_env.get_template(source)
+ except exceptions.TemplateNotFound as e:
+ hookenv.log('Could not load template %s from %s.' %
+ (source, templates_dir),
+ level=hookenv.ERROR)
+ raise e
+ content = template.render(context)
+ if target is not None:
+ target_dir = os.path.dirname(target)
+ if not os.path.exists(target_dir):
+ # This is a terrible default directory permission, as the file
+ # or its siblings will often contain secrets.
+ host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
+ host.write_file(target, content.encode(encoding), owner, group, perms)
+ return content
diff --git a/tests/charmhelpers/core/unitdata.py b/tests/charmhelpers/core/unitdata.py
new file mode 100644
index 0000000..6d7b494
--- /dev/null
+++ b/tests/charmhelpers/core/unitdata.py
@@ -0,0 +1,520 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Authors:
+# Kapil Thangavelu
+#
+"""
+Intro
+-----
+
+A simple way to store state in units. This provides a key value
+storage with support for versioned, transactional operation,
+and can calculate deltas from previous values to simplify unit logic
+when processing changes.
+
+
+Hook Integration
+----------------
+
+There are several extant frameworks for hook execution, including
+
+ - charmhelpers.core.hookenv.Hooks
+ - charmhelpers.core.services.ServiceManager
+
+The storage classes are framework agnostic, one simple integration is
+via the HookData contextmanager. It will record the current hook
+execution environment (including relation data, config data, etc.),
+setup a transaction and allow easy access to the changes from
+previously seen values. One consequence of the integration is the
+reservation of particular keys ('rels', 'unit', 'env', 'config',
+'charm_revisions') for their respective values.
+
+Here's a fully worked integration example using hookenv.Hooks::
+
+ from charmhelper.core import hookenv, unitdata
+
+ hook_data = unitdata.HookData()
+ db = unitdata.kv()
+ hooks = hookenv.Hooks()
+
+ @hooks.hook
+ def config_changed():
+ # Print all changes to configuration from previously seen
+ # values.
+ for changed, (prev, cur) in hook_data.conf.items():
+ print('config changed', changed,
+ 'previous value', prev,
+ 'current value', cur)
+
+ # Get some unit specific bookeeping
+ if not db.get('pkg_key'):
+ key = urllib.urlopen('https://example.com/pkg_key').read()
+ db.set('pkg_key', key)
+
+ # Directly access all charm config as a mapping.
+ conf = db.getrange('config', True)
+
+ # Directly access all relation data as a mapping
+ rels = db.getrange('rels', True)
+
+ if __name__ == '__main__':
+ with hook_data():
+ hook.execute()
+
+
+A more basic integration is via the hook_scope context manager which simply
+manages transaction scope (and records hook name, and timestamp)::
+
+ >>> from unitdata import kv
+ >>> db = kv()
+ >>> with db.hook_scope('install'):
+ ... # do work, in transactional scope.
+ ... db.set('x', 1)
+ >>> db.get('x')
+ 1
+
+
+Usage
+-----
+
+Values are automatically json de/serialized to preserve basic typing
+and complex data struct capabilities (dicts, lists, ints, booleans, etc).
+
+Individual values can be manipulated via get/set::
+
+ >>> kv.set('y', True)
+ >>> kv.get('y')
+ True
+
+ # We can set complex values (dicts, lists) as a single key.
+ >>> kv.set('config', {'a': 1, 'b': True'})
+
+ # Also supports returning dictionaries as a record which
+ # provides attribute access.
+ >>> config = kv.get('config', record=True)
+ >>> config.b
+ True
+
+
+Groups of keys can be manipulated with update/getrange::
+
+ >>> kv.update({'z': 1, 'y': 2}, prefix="gui.")
+ >>> kv.getrange('gui.', strip=True)
+ {'z': 1, 'y': 2}
+
+When updating values, its very helpful to understand which values
+have actually changed and how have they changed. The storage
+provides a delta method to provide for this::
+
+ >>> data = {'debug': True, 'option': 2}
+ >>> delta = kv.delta(data, 'config.')
+ >>> delta.debug.previous
+ None
+ >>> delta.debug.current
+ True
+ >>> delta
+ {'debug': (None, True), 'option': (None, 2)}
+
+Note the delta method does not persist the actual change, it needs to
+be explicitly saved via 'update' method::
+
+ >>> kv.update(data, 'config.')
+
+Values modified in the context of a hook scope retain historical values
+associated to the hookname.
+
+ >>> with db.hook_scope('config-changed'):
+ ... db.set('x', 42)
+ >>> db.gethistory('x')
+ [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'),
+ (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')]
+
+"""
+
+import collections
+import contextlib
+import datetime
+import itertools
+import json
+import os
+import pprint
+import sqlite3
+import sys
+
+__author__ = 'Kapil Thangavelu '
+
+
+class Storage(object):
+ """Simple key value database for local unit state within charms.
+
+ Modifications are not persisted unless :meth:`flush` is called.
+
+ To support dicts, lists, integer, floats, and booleans values
+ are automatically json encoded/decoded.
+ """
+ def __init__(self, path=None):
+ self.db_path = path
+ if path is None:
+ if 'UNIT_STATE_DB' in os.environ:
+ self.db_path = os.environ['UNIT_STATE_DB']
+ else:
+ self.db_path = os.path.join(
+ os.environ.get('CHARM_DIR', ''), '.unit-state.db')
+ with open(self.db_path, 'a') as f:
+ os.fchmod(f.fileno(), 0o600)
+ self.conn = sqlite3.connect('%s' % self.db_path)
+ self.cursor = self.conn.cursor()
+ self.revision = None
+ self._closed = False
+ self._init()
+
+ def close(self):
+ if self._closed:
+ return
+ self.flush(False)
+ self.cursor.close()
+ self.conn.close()
+ self._closed = True
+
+ def get(self, key, default=None, record=False):
+ self.cursor.execute('select data from kv where key=?', [key])
+ result = self.cursor.fetchone()
+ if not result:
+ return default
+ if record:
+ return Record(json.loads(result[0]))
+ return json.loads(result[0])
+
+ def getrange(self, key_prefix, strip=False):
+ """
+ Get a range of keys starting with a common prefix as a mapping of
+ keys to values.
+
+ :param str key_prefix: Common prefix among all keys
+ :param bool strip: Optionally strip the common prefix from the key
+ names in the returned dict
+ :return dict: A (possibly empty) dict of key-value mappings
+ """
+ self.cursor.execute("select key, data from kv where key like ?",
+ ['%s%%' % key_prefix])
+ result = self.cursor.fetchall()
+
+ if not result:
+ return {}
+ if not strip:
+ key_prefix = ''
+ return dict([
+ (k[len(key_prefix):], json.loads(v)) for k, v in result])
+
+ def update(self, mapping, prefix=""):
+ """
+ Set the values of multiple keys at once.
+
+ :param dict mapping: Mapping of keys to values
+ :param str prefix: Optional prefix to apply to all keys in `mapping`
+ before setting
+ """
+ for k, v in mapping.items():
+ self.set("%s%s" % (prefix, k), v)
+
+ def unset(self, key):
+ """
+ Remove a key from the database entirely.
+ """
+ self.cursor.execute('delete from kv where key=?', [key])
+ if self.revision and self.cursor.rowcount:
+ self.cursor.execute(
+ 'insert into kv_revisions values (?, ?, ?)',
+ [key, self.revision, json.dumps('DELETED')])
+
+ def unsetrange(self, keys=None, prefix=""):
+ """
+ Remove a range of keys starting with a common prefix, from the database
+ entirely.
+
+ :param list keys: List of keys to remove.
+ :param str prefix: Optional prefix to apply to all keys in ``keys``
+ before removing.
+ """
+ if keys is not None:
+ keys = ['%s%s' % (prefix, key) for key in keys]
+ self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys)
+ if self.revision and self.cursor.rowcount:
+ self.cursor.execute(
+ 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)),
+ list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys)))
+ else:
+ self.cursor.execute('delete from kv where key like ?',
+ ['%s%%' % prefix])
+ if self.revision and self.cursor.rowcount:
+ self.cursor.execute(
+ 'insert into kv_revisions values (?, ?, ?)',
+ ['%s%%' % prefix, self.revision, json.dumps('DELETED')])
+
+ def set(self, key, value):
+ """
+ Set a value in the database.
+
+ :param str key: Key to set the value for
+ :param value: Any JSON-serializable value to be set
+ """
+ serialized = json.dumps(value)
+
+ self.cursor.execute('select data from kv where key=?', [key])
+ exists = self.cursor.fetchone()
+
+ # Skip mutations to the same value
+ if exists:
+ if exists[0] == serialized:
+ return value
+
+ if not exists:
+ self.cursor.execute(
+ 'insert into kv (key, data) values (?, ?)',
+ (key, serialized))
+ else:
+ self.cursor.execute('''
+ update kv
+ set data = ?
+ where key = ?''', [serialized, key])
+
+ # Save
+ if not self.revision:
+ return value
+
+ self.cursor.execute(
+ 'select 1 from kv_revisions where key=? and revision=?',
+ [key, self.revision])
+ exists = self.cursor.fetchone()
+
+ if not exists:
+ self.cursor.execute(
+ '''insert into kv_revisions (
+ revision, key, data) values (?, ?, ?)''',
+ (self.revision, key, serialized))
+ else:
+ self.cursor.execute(
+ '''
+ update kv_revisions
+ set data = ?
+ where key = ?
+ and revision = ?''',
+ [serialized, key, self.revision])
+
+ return value
+
+ def delta(self, mapping, prefix):
+ """
+ return a delta containing values that have changed.
+ """
+ previous = self.getrange(prefix, strip=True)
+ if not previous:
+ pk = set()
+ else:
+ pk = set(previous.keys())
+ ck = set(mapping.keys())
+ delta = DeltaSet()
+
+ # added
+ for k in ck.difference(pk):
+ delta[k] = Delta(None, mapping[k])
+
+ # removed
+ for k in pk.difference(ck):
+ delta[k] = Delta(previous[k], None)
+
+ # changed
+ for k in pk.intersection(ck):
+ c = mapping[k]
+ p = previous[k]
+ if c != p:
+ delta[k] = Delta(p, c)
+
+ return delta
+
+ @contextlib.contextmanager
+ def hook_scope(self, name=""):
+ """Scope all future interactions to the current hook execution
+ revision."""
+ assert not self.revision
+ self.cursor.execute(
+ 'insert into hooks (hook, date) values (?, ?)',
+ (name or sys.argv[0],
+ datetime.datetime.utcnow().isoformat()))
+ self.revision = self.cursor.lastrowid
+ try:
+ yield self.revision
+ self.revision = None
+ except Exception:
+ self.flush(False)
+ self.revision = None
+ raise
+ else:
+ self.flush()
+
+ def flush(self, save=True):
+ if save:
+ self.conn.commit()
+ elif self._closed:
+ return
+ else:
+ self.conn.rollback()
+
+ def _init(self):
+ self.cursor.execute('''
+ create table if not exists kv (
+ key text,
+ data text,
+ primary key (key)
+ )''')
+ self.cursor.execute('''
+ create table if not exists kv_revisions (
+ key text,
+ revision integer,
+ data text,
+ primary key (key, revision)
+ )''')
+ self.cursor.execute('''
+ create table if not exists hooks (
+ version integer primary key autoincrement,
+ hook text,
+ date text
+ )''')
+ self.conn.commit()
+
+ def gethistory(self, key, deserialize=False):
+ self.cursor.execute(
+ '''
+ select kv.revision, kv.key, kv.data, h.hook, h.date
+ from kv_revisions kv,
+ hooks h
+ where kv.key=?
+ and kv.revision = h.version
+ ''', [key])
+ if deserialize is False:
+ return self.cursor.fetchall()
+ return map(_parse_history, self.cursor.fetchall())
+
+ def debug(self, fh=sys.stderr):
+ self.cursor.execute('select * from kv')
+ pprint.pprint(self.cursor.fetchall(), stream=fh)
+ self.cursor.execute('select * from kv_revisions')
+ pprint.pprint(self.cursor.fetchall(), stream=fh)
+
+
+def _parse_history(d):
+ return (d[0], d[1], json.loads(d[2]), d[3],
+ datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f"))
+
+
+class HookData(object):
+ """Simple integration for existing hook exec frameworks.
+
+ Records all unit information, and stores deltas for processing
+ by the hook.
+
+ Sample::
+
+ from charmhelper.core import hookenv, unitdata
+
+ changes = unitdata.HookData()
+ db = unitdata.kv()
+ hooks = hookenv.Hooks()
+
+ @hooks.hook
+ def config_changed():
+ # View all changes to configuration
+ for changed, (prev, cur) in changes.conf.items():
+ print('config changed', changed,
+ 'previous value', prev,
+ 'current value', cur)
+
+ # Get some unit specific bookeeping
+ if not db.get('pkg_key'):
+ key = urllib.urlopen('https://example.com/pkg_key').read()
+ db.set('pkg_key', key)
+
+ if __name__ == '__main__':
+ with changes():
+ hook.execute()
+
+ """
+ def __init__(self):
+ self.kv = kv()
+ self.conf = None
+ self.rels = None
+
+ @contextlib.contextmanager
+ def __call__(self):
+ from charmhelpers.core import hookenv
+ hook_name = hookenv.hook_name()
+
+ with self.kv.hook_scope(hook_name):
+ self._record_charm_version(hookenv.charm_dir())
+ delta_config, delta_relation = self._record_hook(hookenv)
+ yield self.kv, delta_config, delta_relation
+
+ def _record_charm_version(self, charm_dir):
+ # Record revisions.. charm revisions are meaningless
+ # to charm authors as they don't control the revision.
+ # so logic dependnent on revision is not particularly
+ # useful, however it is useful for debugging analysis.
+ charm_rev = open(
+ os.path.join(charm_dir, 'revision')).read().strip()
+ charm_rev = charm_rev or '0'
+ revs = self.kv.get('charm_revisions', [])
+ if charm_rev not in revs:
+ revs.append(charm_rev.strip() or '0')
+ self.kv.set('charm_revisions', revs)
+
+ def _record_hook(self, hookenv):
+ data = hookenv.execution_environment()
+ self.conf = conf_delta = self.kv.delta(data['conf'], 'config')
+ self.rels = rels_delta = self.kv.delta(data['rels'], 'rels')
+ self.kv.set('env', dict(data['env']))
+ self.kv.set('unit', data['unit'])
+ self.kv.set('relid', data.get('relid'))
+ return conf_delta, rels_delta
+
+
+class Record(dict):
+
+ __slots__ = ()
+
+ def __getattr__(self, k):
+ if k in self:
+ return self[k]
+ raise AttributeError(k)
+
+
+class DeltaSet(Record):
+
+ __slots__ = ()
+
+
+Delta = collections.namedtuple('Delta', ['previous', 'current'])
+
+
+_KV = None
+
+
+def kv():
+ global _KV
+ if _KV is None:
+ _KV = Storage()
+ return _KV
diff --git a/tests/charmhelpers/osplatform.py b/tests/charmhelpers/osplatform.py
new file mode 100644
index 0000000..d9a4d5c
--- /dev/null
+++ b/tests/charmhelpers/osplatform.py
@@ -0,0 +1,25 @@
+import platform
+
+
+def get_platform():
+ """Return the current OS platform.
+
+ For example: if current os platform is Ubuntu then a string "ubuntu"
+ will be returned (which is the name of the module).
+ This string is used to decide which platform module should be imported.
+ """
+ # linux_distribution is deprecated and will be removed in Python 3.7
+ # Warings *not* disabled, as we certainly need to fix this.
+ tuple_platform = platform.linux_distribution()
+ current_platform = tuple_platform[0]
+ if "Ubuntu" in current_platform:
+ return "ubuntu"
+ elif "CentOS" in current_platform:
+ return "centos"
+ elif "debian" in current_platform:
+ # Stock Python does not detect Ubuntu and instead returns debian.
+ # Or at least it does in some build environments like Travis CI
+ return "ubuntu"
+ else:
+ raise RuntimeError("This module is not supported on {}."
+ .format(current_platform))
diff --git a/tests/gate-basic-bionic-queens b/tests/gate-basic-bionic-queens
new file mode 100755
index 0000000..41763ea
--- /dev/null
+++ b/tests/gate-basic-bionic-queens
@@ -0,0 +1,23 @@
+#!/usr/bin/env python
+#
+# Copyright 2016 Canonical Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Amulet tests on a basic Glance deployment on bionic-queens."""
+
+from basic_deployment import GlanceBasicDeployment
+
+if __name__ == '__main__':
+ deployment = GlanceBasicDeployment(series='bionic')
+ deployment.run_tests()
diff --git a/tests/gate-basic-trusty-icehouse b/tests/gate-basic-trusty-icehouse
new file mode 100755
index 0000000..a336b99
--- /dev/null
+++ b/tests/gate-basic-trusty-icehouse
@@ -0,0 +1,23 @@
+#!/usr/bin/env python
+#
+# Copyright 2016 Canonical Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Amulet tests on a basic Glance deployment on trusty-icehouse."""
+
+from basic_deployment import GlanceBasicDeployment
+
+if __name__ == '__main__':
+ deployment = GlanceBasicDeployment(series='trusty')
+ deployment.run_tests()
diff --git a/tests/gate-basic-trusty-mitaka b/tests/gate-basic-trusty-mitaka
new file mode 100755
index 0000000..6eeaa12
--- /dev/null
+++ b/tests/gate-basic-trusty-mitaka
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+#
+# Copyright 2016 Canonical Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Amulet tests on a basic glance deployment on trusty-mitaka."""
+
+from basic_deployment import GlanceBasicDeployment
+
+if __name__ == '__main__':
+ deployment = GlanceBasicDeployment(series='trusty',
+ openstack='cloud:trusty-mitaka',
+ source='cloud:trusty-updates/mitaka')
+ deployment.run_tests()
diff --git a/tests/gate-basic-xenial-mitaka b/tests/gate-basic-xenial-mitaka
new file mode 100755
index 0000000..98b343e
--- /dev/null
+++ b/tests/gate-basic-xenial-mitaka
@@ -0,0 +1,23 @@
+#!/usr/bin/env python
+#
+# Copyright 2016 Canonical Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Amulet tests on a basic Glance deployment on xenial-mitaka."""
+
+from basic_deployment import GlanceBasicDeployment
+
+if __name__ == '__main__':
+ deployment = GlanceBasicDeployment(series='xenial')
+ deployment.run_tests()
diff --git a/tests/gate-basic-xenial-ocata b/tests/gate-basic-xenial-ocata
new file mode 100755
index 0000000..ce43ab0
--- /dev/null
+++ b/tests/gate-basic-xenial-ocata
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+#
+# Copyright 2016 Canonical Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Amulet tests on a basic glance deployment on xenial-ocata."""
+
+from basic_deployment import GlanceBasicDeployment
+
+if __name__ == '__main__':
+ deployment = GlanceBasicDeployment(series='xenial',
+ openstack='cloud:xenial-ocata',
+ source='cloud:xenial-updates/ocata')
+ deployment.run_tests()
diff --git a/tests/gate-basic-xenial-pike b/tests/gate-basic-xenial-pike
new file mode 100755
index 0000000..2f91304
--- /dev/null
+++ b/tests/gate-basic-xenial-pike
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+#
+# Copyright 2016 Canonical Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Amulet tests on a basic glance deployment on xenial-pike."""
+
+from basic_deployment import GlanceBasicDeployment
+
+if __name__ == '__main__':
+ deployment = GlanceBasicDeployment(series='xenial',
+ openstack='cloud:xenial-pike',
+ source='cloud:xenial-updates/pike')
+ deployment.run_tests()
diff --git a/tests/gate-basic-xenial-queens b/tests/gate-basic-xenial-queens
new file mode 100755
index 0000000..44f06e6
--- /dev/null
+++ b/tests/gate-basic-xenial-queens
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+#
+# Copyright 2016 Canonical Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Amulet tests on a basic glance deployment on xenial-queens."""
+
+from basic_deployment import GlanceBasicDeployment
+
+if __name__ == '__main__':
+ deployment = GlanceBasicDeployment(series='xenial',
+ openstack='cloud:xenial-queens',
+ source='cloud:xenial-updates/queens')
+ deployment.run_tests()
diff --git a/tests/tests.yaml b/tests/tests.yaml
new file mode 100644
index 0000000..a03e7ba
--- /dev/null
+++ b/tests/tests.yaml
@@ -0,0 +1,18 @@
+# Bootstrap the model if necessary.
+bootstrap: True
+# Re-use bootstrap node.
+reset: True
+# Use tox/requirements to drive the venv instead of bundletester's venv feature.
+virtualenv: False
+# Leave makefile empty, otherwise unit/lint tests will rerun ahead of amulet.
+makefile: []
+# Do not specify juju PPA sources. Juju is presumed to be pre-installed
+# and configured in all test runner environments.
+#sources:
+# Do not specify or rely on system packages.
+#packages:
+# Do not specify python packages here. Use test-requirements.txt
+# and tox instead. ie. The venv is constructed before bundletester
+# is invoked.
+#python-packages:
+reset_timeout: 600
diff --git a/tox.ini b/tox.ini
index 11ca8d9..4319064 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,3 +1,6 @@
+# Classic charm: ./tox.ini
+# This file is managed centrally by release-tools and should not be modified
+# within individual charm repos.
[tox]
envlist = pep8,py27
skipsdist = True
@@ -5,20 +8,29 @@ skipsdist = True
[testenv]
setenv = VIRTUAL_ENV={envdir}
PYTHONHASHSEED=0
+ CHARM_DIR={envdir}
+ AMULET_SETUP_TIMEOUT=5400
install_command =
- pip install --allow-unverified python-apt {opts} {packages}
+ pip install {opts} {packages}
commands = ostestr {posargs}
+whitelist_externals = juju
+passenv = HOME TERM AMULET_* CS_API_*
[testenv:py27]
basepython = python2.7
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
+[testenv:py35]
+basepython = python3.5
+deps = -r{toxinidir}/requirements.txt
+ -r{toxinidir}/test-requirements.txt
+
[testenv:pep8]
basepython = python2.7
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
-commands = flake8 {posargs} hooks unit_tests
+commands = flake8 {posargs} hooks unit_tests tests actions lib
charm-proof
[testenv:venv]
@@ -48,7 +60,7 @@ basepython = python2.7
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
commands =
- bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-mitaka --no-destroy
+ bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-pike --no-destroy
[testenv:func27-dfs]
# Charm Functional Test
@@ -70,4 +82,4 @@ commands =
[flake8]
ignore = E402,E226
-exclude = hooks/charmhelpers
+exclude = */charmhelpers