Merge pull request #2 from openstack-charmers/add-tests
Add functional tests and sync charm helpers
This commit is contained in:
commit
d8355af37c
|
@ -1,7 +1,17 @@
|
|||
*pyc
|
||||
.bzr
|
||||
bin
|
||||
.coverage
|
||||
.venv
|
||||
.testrepository/
|
||||
.tox/
|
||||
bin
|
||||
tags
|
||||
.tox
|
||||
.testrepository
|
||||
*.sw[nop]
|
||||
*.pyc
|
||||
.unit-state.db
|
||||
tests/*.img
|
||||
.idea
|
||||
.stestr
|
||||
.local
|
||||
.pydevproject
|
||||
repo-info
|
||||
results*.txt
|
||||
func-results.json
|
||||
__pycache__
|
||||
|
|
|
@ -1,8 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<?eclipse-pydev version="1.0"?><pydev_project>
|
||||
<pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH">
|
||||
<path>/glance-simplestreams-sync</path>
|
||||
</pydev_pathproperty>
|
||||
<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.7</pydev_property>
|
||||
<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property>
|
||||
</pydev_project>
|
21
Makefile
21
Makefile
|
@ -1,23 +1,24 @@
|
|||
#!/usr/bin/make
|
||||
PYTHON := /usr/bin/env python
|
||||
CHARM_DIR := $(PWD)
|
||||
HOOKS_DIR := $(PWD)/hooks
|
||||
TEST_PREFIX := PYTHONPATH=$(HOOKS_DIR)
|
||||
|
||||
clean:
|
||||
find . -name '*.pyc' -delete
|
||||
|
||||
lint:
|
||||
@tox -e pep8
|
||||
|
||||
unit_tests:
|
||||
test:
|
||||
@echo Starting unit tests...
|
||||
@tox -e py27
|
||||
|
||||
functional_test:
|
||||
@echo Starting functional tests...
|
||||
@tox -e func27
|
||||
|
||||
bin/charm_helpers_sync.py:
|
||||
@mkdir -p bin
|
||||
@bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \
|
||||
> bin/charm_helpers_sync.py
|
||||
@curl -o bin/charm_helpers_sync.py https://raw.githubusercontent.com/juju/charm-helpers/master/tools/charm_helpers_sync/charm_helpers_sync.py
|
||||
|
||||
|
||||
sync: bin/charm_helpers_sync.py
|
||||
@$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-sync.yaml
|
||||
@$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml
|
||||
@$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml
|
||||
|
||||
all: test lint
|
||||
|
|
|
@ -1,3 +1,8 @@
|
|||
# Notice
|
||||
|
||||
This charm has been tested with Trusty and Xenial series. Bionic series
|
||||
is enabled, but is still under testing and in development.
|
||||
|
||||
# Overview
|
||||
|
||||
This charm provides a service that syncs your OpenStack cloud's
|
||||
|
|
|
@ -0,0 +1,7 @@
|
|||
repo: https://github.com/juju/charm-helpers
|
||||
destination: tests/charmhelpers
|
||||
include:
|
||||
- core
|
||||
- contrib.amulet
|
||||
- contrib.openstack.amulet
|
||||
- osplatform
|
|
@ -30,6 +30,7 @@ import yaml
|
|||
|
||||
from charmhelpers.core.hookenv import (
|
||||
config,
|
||||
hook_name,
|
||||
local_unit,
|
||||
log,
|
||||
relation_ids,
|
||||
|
@ -285,7 +286,7 @@ class NRPE(object):
|
|||
try:
|
||||
nagios_uid = pwd.getpwnam('nagios').pw_uid
|
||||
nagios_gid = grp.getgrnam('nagios').gr_gid
|
||||
except:
|
||||
except Exception:
|
||||
log("Nagios user not set up, nrpe checks not updated")
|
||||
return
|
||||
|
||||
|
@ -302,7 +303,12 @@ class NRPE(object):
|
|||
"command": nrpecheck.command,
|
||||
}
|
||||
|
||||
service('restart', 'nagios-nrpe-server')
|
||||
# update-status hooks are configured to firing every 5 minutes by
|
||||
# default. When nagios-nrpe-server is restarted, the nagios server
|
||||
# reports checks failing causing unneccessary alerts. Let's not restart
|
||||
# on update-status hooks.
|
||||
if not hook_name() == 'update-status':
|
||||
service('restart', 'nagios-nrpe-server')
|
||||
|
||||
monitor_ids = relation_ids("local-monitors") + \
|
||||
relation_ids("nrpe-external-master")
|
||||
|
|
|
@ -65,7 +65,8 @@ def get_ca_cert():
|
|||
if ca_cert is None:
|
||||
log("Inspecting identity-service relations for CA SSL certificate.",
|
||||
level=INFO)
|
||||
for r_id in relation_ids('identity-service'):
|
||||
for r_id in (relation_ids('identity-service') +
|
||||
relation_ids('identity-credentials')):
|
||||
for unit in relation_list(r_id):
|
||||
if ca_cert is None:
|
||||
ca_cert = relation_get('ca_cert',
|
||||
|
@ -90,6 +91,6 @@ def install_ca_cert(ca_cert):
|
|||
log("CA cert is the same as installed version", level=INFO)
|
||||
else:
|
||||
log("Installing new CA cert", level=INFO)
|
||||
with open(cert_file, 'w') as crt:
|
||||
with open(cert_file, 'wb') as crt:
|
||||
crt.write(ca_cert)
|
||||
subprocess.check_call(['update-ca-certificates', '--fresh'])
|
||||
|
|
|
@ -27,6 +27,7 @@ clustering-related helpers.
|
|||
|
||||
import subprocess
|
||||
import os
|
||||
import time
|
||||
|
||||
from socket import gethostname as get_unit_hostname
|
||||
|
||||
|
@ -45,6 +46,9 @@ from charmhelpers.core.hookenv import (
|
|||
is_leader as juju_is_leader,
|
||||
status_set,
|
||||
)
|
||||
from charmhelpers.core.host import (
|
||||
modulo_distribution,
|
||||
)
|
||||
from charmhelpers.core.decorators import (
|
||||
retry_on_exception,
|
||||
)
|
||||
|
@ -361,3 +365,37 @@ def canonical_url(configs, vip_setting='vip'):
|
|||
else:
|
||||
addr = unit_get('private-address')
|
||||
return '%s://%s' % (scheme, addr)
|
||||
|
||||
|
||||
def distributed_wait(modulo=None, wait=None, operation_name='operation'):
|
||||
''' Distribute operations by waiting based on modulo_distribution
|
||||
|
||||
If modulo and or wait are not set, check config_get for those values.
|
||||
If config values are not set, default to modulo=3 and wait=30.
|
||||
|
||||
:param modulo: int The modulo number creates the group distribution
|
||||
:param wait: int The constant time wait value
|
||||
:param operation_name: string Operation name for status message
|
||||
i.e. 'restart'
|
||||
:side effect: Calls config_get()
|
||||
:side effect: Calls log()
|
||||
:side effect: Calls status_set()
|
||||
:side effect: Calls time.sleep()
|
||||
'''
|
||||
if modulo is None:
|
||||
modulo = config_get('modulo-nodes') or 3
|
||||
if wait is None:
|
||||
wait = config_get('known-wait') or 30
|
||||
if juju_is_leader():
|
||||
# The leader should never wait
|
||||
calculated_wait = 0
|
||||
else:
|
||||
# non_zero_wait=True guarantees the non-leader who gets modulo 0
|
||||
# will still wait
|
||||
calculated_wait = modulo_distribution(modulo=modulo, wait=wait,
|
||||
non_zero_wait=True)
|
||||
msg = "Waiting {} seconds for {} ...".format(calculated_wait,
|
||||
operation_name)
|
||||
log(msg, DEBUG)
|
||||
status_set('maintenance', msg)
|
||||
time.sleep(calculated_wait)
|
||||
|
|
|
@ -27,6 +27,7 @@ from charmhelpers.core.hookenv import (
|
|||
network_get_primary_address,
|
||||
unit_get,
|
||||
WARNING,
|
||||
NoNetworkBinding,
|
||||
)
|
||||
|
||||
from charmhelpers.core.host import (
|
||||
|
@ -109,7 +110,12 @@ def get_address_in_network(network, fallback=None, fatal=False):
|
|||
_validate_cidr(network)
|
||||
network = netaddr.IPNetwork(network)
|
||||
for iface in netifaces.interfaces():
|
||||
addresses = netifaces.ifaddresses(iface)
|
||||
try:
|
||||
addresses = netifaces.ifaddresses(iface)
|
||||
except ValueError:
|
||||
# If an instance was deleted between
|
||||
# netifaces.interfaces() run and now, its interfaces are gone
|
||||
continue
|
||||
if network.version == 4 and netifaces.AF_INET in addresses:
|
||||
for addr in addresses[netifaces.AF_INET]:
|
||||
cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
|
||||
|
@ -490,7 +496,7 @@ def get_host_ip(hostname, fallback=None):
|
|||
if not ip_addr:
|
||||
try:
|
||||
ip_addr = socket.gethostbyname(hostname)
|
||||
except:
|
||||
except Exception:
|
||||
log("Failed to resolve hostname '%s'" % (hostname),
|
||||
level=WARNING)
|
||||
return fallback
|
||||
|
@ -518,7 +524,7 @@ def get_hostname(address, fqdn=True):
|
|||
if not result:
|
||||
try:
|
||||
result = socket.gethostbyaddr(address)[0]
|
||||
except:
|
||||
except Exception:
|
||||
return None
|
||||
else:
|
||||
result = address
|
||||
|
@ -578,6 +584,9 @@ def get_relation_ip(interface, cidr_network=None):
|
|||
except NotImplementedError:
|
||||
# If network-get is not available
|
||||
address = get_host_ip(unit_get('private-address'))
|
||||
except NoNetworkBinding:
|
||||
log("No network binding for {}".format(interface), WARNING)
|
||||
address = get_host_ip(unit_get('private-address'))
|
||||
|
||||
if config('prefer-ipv6'):
|
||||
# Currently IPv6 has priority, eventually we want IPv6 to just be
|
||||
|
|
|
@ -29,3 +29,16 @@ def install_alternative(name, target, source, priority=50):
|
|||
target, name, source, str(priority)
|
||||
]
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
def remove_alternative(name, source):
|
||||
"""Remove an installed alternative configuration file
|
||||
|
||||
:param name: string name of the alternative to remove
|
||||
:param source: string full path to alternative to remove
|
||||
"""
|
||||
cmd = [
|
||||
'update-alternatives', '--remove',
|
||||
name, source
|
||||
]
|
||||
subprocess.check_call(cmd)
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import six
|
||||
|
@ -20,6 +21,9 @@ from collections import OrderedDict
|
|||
from charmhelpers.contrib.amulet.deployment import (
|
||||
AmuletDeployment
|
||||
)
|
||||
from charmhelpers.contrib.openstack.amulet.utils import (
|
||||
OPENSTACK_RELEASES_PAIRS
|
||||
)
|
||||
|
||||
DEBUG = logging.DEBUG
|
||||
ERROR = logging.ERROR
|
||||
|
@ -185,7 +189,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||
self.d.configure(service, config)
|
||||
|
||||
def _auto_wait_for_status(self, message=None, exclude_services=None,
|
||||
include_only=None, timeout=1800):
|
||||
include_only=None, timeout=None):
|
||||
"""Wait for all units to have a specific extended status, except
|
||||
for any defined as excluded. Unless specified via message, any
|
||||
status containing any case of 'ready' will be considered a match.
|
||||
|
@ -215,7 +219,10 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||
:param timeout: Maximum time in seconds to wait for status match
|
||||
:returns: None. Raises if timeout is hit.
|
||||
"""
|
||||
self.log.info('Waiting for extended status on units...')
|
||||
if not timeout:
|
||||
timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 1800))
|
||||
self.log.info('Waiting for extended status on units for {}s...'
|
||||
''.format(timeout))
|
||||
|
||||
all_services = self.d.services.keys()
|
||||
|
||||
|
@ -250,7 +257,14 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||
self.log.debug('Waiting up to {}s for extended status on services: '
|
||||
'{}'.format(timeout, services))
|
||||
service_messages = {service: message for service in services}
|
||||
|
||||
# Check for idleness
|
||||
self.d.sentry.wait(timeout=timeout)
|
||||
# Check for error states and bail early
|
||||
self.d.sentry.wait_for_status(self.d.juju_env, services, timeout=timeout)
|
||||
# Check for ready messages
|
||||
self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
|
||||
|
||||
self.log.info('OK')
|
||||
|
||||
def _get_openstack_release(self):
|
||||
|
@ -260,10 +274,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||
release.
|
||||
"""
|
||||
# Must be ordered by OpenStack release (not by Ubuntu release):
|
||||
(self.trusty_icehouse, self.trusty_kilo, self.trusty_liberty,
|
||||
self.trusty_mitaka, self.xenial_mitaka, self.xenial_newton,
|
||||
self.yakkety_newton, self.xenial_ocata, self.zesty_ocata,
|
||||
self.xenial_pike, self.artful_pike) = range(11)
|
||||
for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS):
|
||||
setattr(self, os_pair, i)
|
||||
|
||||
releases = {
|
||||
('trusty', None): self.trusty_icehouse,
|
||||
|
@ -274,9 +286,11 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||
('xenial', 'cloud:xenial-newton'): self.xenial_newton,
|
||||
('xenial', 'cloud:xenial-ocata'): self.xenial_ocata,
|
||||
('xenial', 'cloud:xenial-pike'): self.xenial_pike,
|
||||
('xenial', 'cloud:xenial-queens'): self.xenial_queens,
|
||||
('yakkety', None): self.yakkety_newton,
|
||||
('zesty', None): self.zesty_ocata,
|
||||
('artful', None): self.artful_pike,
|
||||
('bionic', None): self.bionic_queens,
|
||||
}
|
||||
return releases[(self.series, self.openstack)]
|
||||
|
||||
|
@ -291,6 +305,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||
('yakkety', 'newton'),
|
||||
('zesty', 'ocata'),
|
||||
('artful', 'pike'),
|
||||
('bionic', 'queens'),
|
||||
])
|
||||
if self.openstack:
|
||||
os_origin = self.openstack.split(':')[1]
|
||||
|
@ -303,20 +318,27 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||
test scenario, based on OpenStack release and whether ceph radosgw
|
||||
is flagged as present or not."""
|
||||
|
||||
if self._get_openstack_release() >= self.trusty_kilo:
|
||||
# Kilo or later
|
||||
pools = [
|
||||
'rbd',
|
||||
'cinder',
|
||||
'glance'
|
||||
]
|
||||
else:
|
||||
# Juno or earlier
|
||||
if self._get_openstack_release() == self.trusty_icehouse:
|
||||
# Icehouse
|
||||
pools = [
|
||||
'data',
|
||||
'metadata',
|
||||
'rbd',
|
||||
'cinder',
|
||||
'cinder-ceph',
|
||||
'glance'
|
||||
]
|
||||
elif (self.trusty_kilo <= self._get_openstack_release() <=
|
||||
self.zesty_ocata):
|
||||
# Kilo through Ocata
|
||||
pools = [
|
||||
'rbd',
|
||||
'cinder-ceph',
|
||||
'glance'
|
||||
]
|
||||
else:
|
||||
# Pike and later
|
||||
pools = [
|
||||
'cinder-ceph',
|
||||
'glance'
|
||||
]
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@ import urllib
|
|||
import urlparse
|
||||
|
||||
import cinderclient.v1.client as cinder_client
|
||||
import cinderclient.v2.client as cinder_clientv2
|
||||
import glanceclient.v1.client as glance_client
|
||||
import heatclient.v1.client as heat_client
|
||||
from keystoneclient.v2_0 import client as keystone_client
|
||||
|
@ -42,7 +43,6 @@ import swiftclient
|
|||
from charmhelpers.contrib.amulet.utils import (
|
||||
AmuletUtils
|
||||
)
|
||||
from charmhelpers.core.decorators import retry_on_exception
|
||||
from charmhelpers.core.host import CompareHostReleases
|
||||
|
||||
DEBUG = logging.DEBUG
|
||||
|
@ -50,6 +50,13 @@ ERROR = logging.ERROR
|
|||
|
||||
NOVA_CLIENT_VERSION = "2"
|
||||
|
||||
OPENSTACK_RELEASES_PAIRS = [
|
||||
'trusty_icehouse', 'trusty_kilo', 'trusty_liberty',
|
||||
'trusty_mitaka', 'xenial_mitaka', 'xenial_newton',
|
||||
'yakkety_newton', 'xenial_ocata', 'zesty_ocata',
|
||||
'xenial_pike', 'artful_pike', 'xenial_queens',
|
||||
'bionic_queens']
|
||||
|
||||
|
||||
class OpenStackAmuletUtils(AmuletUtils):
|
||||
"""OpenStack amulet utilities.
|
||||
|
@ -63,7 +70,34 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||
super(OpenStackAmuletUtils, self).__init__(log_level)
|
||||
|
||||
def validate_endpoint_data(self, endpoints, admin_port, internal_port,
|
||||
public_port, expected):
|
||||
public_port, expected, openstack_release=None):
|
||||
"""Validate endpoint data. Pick the correct validator based on
|
||||
OpenStack release. Expected data should be in the v2 format:
|
||||
{
|
||||
'id': id,
|
||||
'region': region,
|
||||
'adminurl': adminurl,
|
||||
'internalurl': internalurl,
|
||||
'publicurl': publicurl,
|
||||
'service_id': service_id}
|
||||
|
||||
"""
|
||||
validation_function = self.validate_v2_endpoint_data
|
||||
xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens')
|
||||
if openstack_release and openstack_release >= xenial_queens:
|
||||
validation_function = self.validate_v3_endpoint_data
|
||||
expected = {
|
||||
'id': expected['id'],
|
||||
'region': expected['region'],
|
||||
'region_id': 'RegionOne',
|
||||
'url': self.valid_url,
|
||||
'interface': self.not_null,
|
||||
'service_id': expected['service_id']}
|
||||
return validation_function(endpoints, admin_port, internal_port,
|
||||
public_port, expected)
|
||||
|
||||
def validate_v2_endpoint_data(self, endpoints, admin_port, internal_port,
|
||||
public_port, expected):
|
||||
"""Validate endpoint data.
|
||||
|
||||
Validate actual endpoint data vs expected endpoint data. The ports
|
||||
|
@ -92,7 +126,7 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||
return 'endpoint not found'
|
||||
|
||||
def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port,
|
||||
public_port, expected):
|
||||
public_port, expected, expected_num_eps=3):
|
||||
"""Validate keystone v3 endpoint data.
|
||||
|
||||
Validate the v3 endpoint data which has changed from v2. The
|
||||
|
@ -138,10 +172,89 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||
if ret:
|
||||
return 'unexpected endpoint data - {}'.format(ret)
|
||||
|
||||
if len(found) != 3:
|
||||
if len(found) != expected_num_eps:
|
||||
return 'Unexpected number of endpoints found'
|
||||
|
||||
def validate_svc_catalog_endpoint_data(self, expected, actual):
|
||||
def convert_svc_catalog_endpoint_data_to_v3(self, ep_data):
|
||||
"""Convert v2 endpoint data into v3.
|
||||
|
||||
{
|
||||
'service_name1': [
|
||||
{
|
||||
'adminURL': adminURL,
|
||||
'id': id,
|
||||
'region': region.
|
||||
'publicURL': publicURL,
|
||||
'internalURL': internalURL
|
||||
}],
|
||||
'service_name2': [
|
||||
{
|
||||
'adminURL': adminURL,
|
||||
'id': id,
|
||||
'region': region.
|
||||
'publicURL': publicURL,
|
||||
'internalURL': internalURL
|
||||
}],
|
||||
}
|
||||
"""
|
||||
self.log.warn("Endpoint ID and Region ID validation is limited to not "
|
||||
"null checks after v2 to v3 conversion")
|
||||
for svc in ep_data.keys():
|
||||
assert len(ep_data[svc]) == 1, "Unknown data format"
|
||||
svc_ep_data = ep_data[svc][0]
|
||||
ep_data[svc] = [
|
||||
{
|
||||
'url': svc_ep_data['adminURL'],
|
||||
'interface': 'admin',
|
||||
'region': svc_ep_data['region'],
|
||||
'region_id': self.not_null,
|
||||
'id': self.not_null},
|
||||
{
|
||||
'url': svc_ep_data['publicURL'],
|
||||
'interface': 'public',
|
||||
'region': svc_ep_data['region'],
|
||||
'region_id': self.not_null,
|
||||
'id': self.not_null},
|
||||
{
|
||||
'url': svc_ep_data['internalURL'],
|
||||
'interface': 'internal',
|
||||
'region': svc_ep_data['region'],
|
||||
'region_id': self.not_null,
|
||||
'id': self.not_null}]
|
||||
return ep_data
|
||||
|
||||
def validate_svc_catalog_endpoint_data(self, expected, actual,
|
||||
openstack_release=None):
|
||||
"""Validate service catalog endpoint data. Pick the correct validator
|
||||
for the OpenStack version. Expected data should be in the v2 format:
|
||||
{
|
||||
'service_name1': [
|
||||
{
|
||||
'adminURL': adminURL,
|
||||
'id': id,
|
||||
'region': region.
|
||||
'publicURL': publicURL,
|
||||
'internalURL': internalURL
|
||||
}],
|
||||
'service_name2': [
|
||||
{
|
||||
'adminURL': adminURL,
|
||||
'id': id,
|
||||
'region': region.
|
||||
'publicURL': publicURL,
|
||||
'internalURL': internalURL
|
||||
}],
|
||||
}
|
||||
|
||||
"""
|
||||
validation_function = self.validate_v2_svc_catalog_endpoint_data
|
||||
xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens')
|
||||
if openstack_release and openstack_release >= xenial_queens:
|
||||
validation_function = self.validate_v3_svc_catalog_endpoint_data
|
||||
expected = self.convert_svc_catalog_endpoint_data_to_v3(expected)
|
||||
return validation_function(expected, actual)
|
||||
|
||||
def validate_v2_svc_catalog_endpoint_data(self, expected, actual):
|
||||
"""Validate service catalog endpoint data.
|
||||
|
||||
Validate a list of actual service catalog endpoints vs a list of
|
||||
|
@ -310,7 +423,6 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||
self.log.debug('Checking if tenant exists ({})...'.format(tenant))
|
||||
return tenant in [t.name for t in keystone.tenants.list()]
|
||||
|
||||
@retry_on_exception(5, base_delay=10)
|
||||
def keystone_wait_for_propagation(self, sentry_relation_pairs,
|
||||
api_version):
|
||||
"""Iterate over list of sentry and relation tuples and verify that
|
||||
|
@ -326,10 +438,10 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||
rel = sentry.relation('identity-service',
|
||||
relation_name)
|
||||
self.log.debug('keystone relation data: {}'.format(rel))
|
||||
if rel['api_version'] != str(api_version):
|
||||
if rel.get('api_version') != str(api_version):
|
||||
raise Exception("api_version not propagated through relation"
|
||||
" data yet ('{}' != '{}')."
|
||||
"".format(rel['api_version'], api_version))
|
||||
"".format(rel.get('api_version'), api_version))
|
||||
|
||||
def keystone_configure_api_version(self, sentry_relation_pairs, deployment,
|
||||
api_version):
|
||||
|
@ -348,15 +460,16 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||
|
||||
config = {'preferred-api-version': api_version}
|
||||
deployment.d.configure('keystone', config)
|
||||
deployment._auto_wait_for_status()
|
||||
self.keystone_wait_for_propagation(sentry_relation_pairs, api_version)
|
||||
|
||||
def authenticate_cinder_admin(self, keystone_sentry, username,
|
||||
password, tenant):
|
||||
def authenticate_cinder_admin(self, keystone, api_version=2):
|
||||
"""Authenticates admin user with cinder."""
|
||||
# NOTE(beisner): cinder python client doesn't accept tokens.
|
||||
keystone_ip = keystone_sentry.info['public-address']
|
||||
ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8'))
|
||||
return cinder_client.Client(username, password, tenant, ept)
|
||||
self.log.debug('Authenticating cinder admin...')
|
||||
_clients = {
|
||||
1: cinder_client.Client,
|
||||
2: cinder_clientv2.Client}
|
||||
return _clients[api_version](session=keystone.session)
|
||||
|
||||
def authenticate_keystone(self, keystone_ip, username, password,
|
||||
api_version=False, admin_port=False,
|
||||
|
@ -364,13 +477,36 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||
project_domain_name=None, project_name=None):
|
||||
"""Authenticate with Keystone"""
|
||||
self.log.debug('Authenticating with keystone...')
|
||||
port = 5000
|
||||
if admin_port:
|
||||
port = 35357
|
||||
base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'),
|
||||
port)
|
||||
if not api_version or api_version == 2:
|
||||
ep = base_ep + "/v2.0"
|
||||
if not api_version:
|
||||
api_version = 2
|
||||
sess, auth = self.get_keystone_session(
|
||||
keystone_ip=keystone_ip,
|
||||
username=username,
|
||||
password=password,
|
||||
api_version=api_version,
|
||||
admin_port=admin_port,
|
||||
user_domain_name=user_domain_name,
|
||||
domain_name=domain_name,
|
||||
project_domain_name=project_domain_name,
|
||||
project_name=project_name
|
||||
)
|
||||
if api_version == 2:
|
||||
client = keystone_client.Client(session=sess)
|
||||
else:
|
||||
client = keystone_client_v3.Client(session=sess)
|
||||
# This populates the client.service_catalog
|
||||
client.auth_ref = auth.get_access(sess)
|
||||
return client
|
||||
|
||||
def get_keystone_session(self, keystone_ip, username, password,
|
||||
api_version=False, admin_port=False,
|
||||
user_domain_name=None, domain_name=None,
|
||||
project_domain_name=None, project_name=None):
|
||||
"""Return a keystone session object"""
|
||||
ep = self.get_keystone_endpoint(keystone_ip,
|
||||
api_version=api_version,
|
||||
admin_port=admin_port)
|
||||
if api_version == 2:
|
||||
auth = v2.Password(
|
||||
username=username,
|
||||
password=password,
|
||||
|
@ -378,12 +514,7 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||
auth_url=ep
|
||||
)
|
||||
sess = keystone_session.Session(auth=auth)
|
||||
client = keystone_client.Client(session=sess)
|
||||
# This populates the client.service_catalog
|
||||
client.auth_ref = auth.get_access(sess)
|
||||
return client
|
||||
else:
|
||||
ep = base_ep + "/v3"
|
||||
auth = v3.Password(
|
||||
user_domain_name=user_domain_name,
|
||||
username=username,
|
||||
|
@ -394,10 +525,57 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||
auth_url=ep
|
||||
)
|
||||
sess = keystone_session.Session(auth=auth)
|
||||
client = keystone_client_v3.Client(session=sess)
|
||||
# This populates the client.service_catalog
|
||||
client.auth_ref = auth.get_access(sess)
|
||||
return client
|
||||
return (sess, auth)
|
||||
|
||||
def get_keystone_endpoint(self, keystone_ip, api_version=None,
|
||||
admin_port=False):
|
||||
"""Return keystone endpoint"""
|
||||
port = 5000
|
||||
if admin_port:
|
||||
port = 35357
|
||||
base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'),
|
||||
port)
|
||||
if api_version == 2:
|
||||
ep = base_ep + "/v2.0"
|
||||
else:
|
||||
ep = base_ep + "/v3"
|
||||
return ep
|
||||
|
||||
def get_default_keystone_session(self, keystone_sentry,
|
||||
openstack_release=None):
|
||||
"""Return a keystone session object and client object assuming standard
|
||||
default settings
|
||||
|
||||
Example call in amulet tests:
|
||||
self.keystone_session, self.keystone = u.get_default_keystone_session(
|
||||
self.keystone_sentry,
|
||||
openstack_release=self._get_openstack_release())
|
||||
|
||||
The session can then be used to auth other clients:
|
||||
neutronclient.Client(session=session)
|
||||
aodh_client.Client(session=session)
|
||||
eyc
|
||||
"""
|
||||
self.log.debug('Authenticating keystone admin...')
|
||||
api_version = 2
|
||||
client_class = keystone_client.Client
|
||||
# 11 => xenial_queens
|
||||
if openstack_release and openstack_release >= 11:
|
||||
api_version = 3
|
||||
client_class = keystone_client_v3.Client
|
||||
keystone_ip = keystone_sentry.info['public-address']
|
||||
session, auth = self.get_keystone_session(
|
||||
keystone_ip,
|
||||
api_version=api_version,
|
||||
username='admin',
|
||||
password='openstack',
|
||||
project_name='admin',
|
||||
user_domain_name='admin_domain',
|
||||
project_domain_name='admin_domain')
|
||||
client = client_class(session=session)
|
||||
# This populates the client.service_catalog
|
||||
client.auth_ref = auth.get_access(session)
|
||||
return session, client
|
||||
|
||||
def authenticate_keystone_admin(self, keystone_sentry, user, password,
|
||||
tenant=None, api_version=None,
|
||||
|
@ -617,13 +795,25 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||
self.log.debug('Keypair ({}) already exists, '
|
||||
'using it.'.format(keypair_name))
|
||||
return _keypair
|
||||
except:
|
||||
except Exception:
|
||||
self.log.debug('Keypair ({}) does not exist, '
|
||||
'creating it.'.format(keypair_name))
|
||||
|
||||
_keypair = nova.keypairs.create(name=keypair_name)
|
||||
return _keypair
|
||||
|
||||
def _get_cinder_obj_name(self, cinder_object):
|
||||
"""Retrieve name of cinder object.
|
||||
|
||||
:param cinder_object: cinder snapshot or volume object
|
||||
:returns: str cinder object name
|
||||
"""
|
||||
# v1 objects store name in 'display_name' attr but v2+ use 'name'
|
||||
try:
|
||||
return cinder_object.display_name
|
||||
except AttributeError:
|
||||
return cinder_object.name
|
||||
|
||||
def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
|
||||
img_id=None, src_vol_id=None, snap_id=None):
|
||||
"""Create cinder volume, optionally from a glance image, OR
|
||||
|
@ -674,6 +864,13 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||
source_volid=src_vol_id,
|
||||
snapshot_id=snap_id)
|
||||
vol_id = vol_new.id
|
||||
except TypeError:
|
||||
vol_new = cinder.volumes.create(name=vol_name,
|
||||
imageRef=img_id,
|
||||
size=vol_size,
|
||||
source_volid=src_vol_id,
|
||||
snapshot_id=snap_id)
|
||||
vol_id = vol_new.id
|
||||
except Exception as e:
|
||||
msg = 'Failed to create volume: {}'.format(e)
|
||||
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||
|
@ -688,7 +885,7 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||
|
||||
# Re-validate new volume
|
||||
self.log.debug('Validating volume attributes...')
|
||||
val_vol_name = cinder.volumes.get(vol_id).display_name
|
||||
val_vol_name = self._get_cinder_obj_name(cinder.volumes.get(vol_id))
|
||||
val_vol_boot = cinder.volumes.get(vol_id).bootable
|
||||
val_vol_stat = cinder.volumes.get(vol_id).status
|
||||
val_vol_size = cinder.volumes.get(vol_id).size
|
||||
|
@ -836,9 +1033,12 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||
:returns: List of pool name, object count, kb disk space used
|
||||
"""
|
||||
df = self.get_ceph_df(sentry_unit)
|
||||
pool_name = df['pools'][pool_id]['name']
|
||||
obj_count = df['pools'][pool_id]['stats']['objects']
|
||||
kb_used = df['pools'][pool_id]['stats']['kb_used']
|
||||
for pool in df['pools']:
|
||||
if pool['id'] == pool_id:
|
||||
pool_name = pool['name']
|
||||
obj_count = pool['stats']['objects']
|
||||
kb_used = pool['stats']['kb_used']
|
||||
|
||||
self.log.debug('Ceph {} pool (ID {}): {} objects, '
|
||||
'{} kb used'.format(pool_name, pool_id,
|
||||
obj_count, kb_used))
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import collections
|
||||
import glob
|
||||
import json
|
||||
import math
|
||||
|
@ -92,14 +93,14 @@ from charmhelpers.contrib.network.ip import (
|
|||
format_ipv6_addr,
|
||||
is_bridge_member,
|
||||
is_ipv6_disabled,
|
||||
get_relation_ip,
|
||||
)
|
||||
from charmhelpers.contrib.openstack.utils import (
|
||||
config_flags_parser,
|
||||
get_host_ip,
|
||||
git_determine_usr_bin,
|
||||
git_determine_python_path,
|
||||
enable_memcache,
|
||||
snap_install_requested,
|
||||
CompareOpenStackReleases,
|
||||
os_release,
|
||||
)
|
||||
from charmhelpers.core.unitdata import kv
|
||||
|
||||
|
@ -292,7 +293,7 @@ class PostgresqlDBContext(OSContextGenerator):
|
|||
def db_ssl(rdata, ctxt, ssl_dir):
|
||||
if 'ssl_ca' in rdata and ssl_dir:
|
||||
ca_path = os.path.join(ssl_dir, 'db-client.ca')
|
||||
with open(ca_path, 'w') as fh:
|
||||
with open(ca_path, 'wb') as fh:
|
||||
fh.write(b64decode(rdata['ssl_ca']))
|
||||
|
||||
ctxt['database_ssl_ca'] = ca_path
|
||||
|
@ -307,12 +308,12 @@ def db_ssl(rdata, ctxt, ssl_dir):
|
|||
log("Waiting 1m for ssl client cert validity", level=INFO)
|
||||
time.sleep(60)
|
||||
|
||||
with open(cert_path, 'w') as fh:
|
||||
with open(cert_path, 'wb') as fh:
|
||||
fh.write(b64decode(rdata['ssl_cert']))
|
||||
|
||||
ctxt['database_ssl_cert'] = cert_path
|
||||
key_path = os.path.join(ssl_dir, 'db-client.key')
|
||||
with open(key_path, 'w') as fh:
|
||||
with open(key_path, 'wb') as fh:
|
||||
fh.write(b64decode(rdata['ssl_key']))
|
||||
|
||||
ctxt['database_ssl_key'] = key_path
|
||||
|
@ -331,10 +332,7 @@ class IdentityServiceContext(OSContextGenerator):
|
|||
self.rel_name = rel_name
|
||||
self.interfaces = [self.rel_name]
|
||||
|
||||
def __call__(self):
|
||||
log('Generating template context for ' + self.rel_name, level=DEBUG)
|
||||
ctxt = {}
|
||||
|
||||
def _setup_pki_cache(self):
|
||||
if self.service and self.service_user:
|
||||
# This is required for pki token signing if we don't want /tmp to
|
||||
# be used.
|
||||
|
@ -344,6 +342,15 @@ class IdentityServiceContext(OSContextGenerator):
|
|||
mkdir(path=cachedir, owner=self.service_user,
|
||||
group=self.service_user, perms=0o700)
|
||||
|
||||
return cachedir
|
||||
return None
|
||||
|
||||
def __call__(self):
|
||||
log('Generating template context for ' + self.rel_name, level=DEBUG)
|
||||
ctxt = {}
|
||||
|
||||
cachedir = self._setup_pki_cache()
|
||||
if cachedir:
|
||||
ctxt['signing_dir'] = cachedir
|
||||
|
||||
for rid in relation_ids(self.rel_name):
|
||||
|
@ -377,6 +384,63 @@ class IdentityServiceContext(OSContextGenerator):
|
|||
# so a missing value just indicates keystone needs
|
||||
# upgrading
|
||||
ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')
|
||||
ctxt['admin_domain_id'] = rdata.get('service_domain_id')
|
||||
return ctxt
|
||||
|
||||
return {}
|
||||
|
||||
|
||||
class IdentityCredentialsContext(IdentityServiceContext):
|
||||
'''Context for identity-credentials interface type'''
|
||||
|
||||
def __init__(self,
|
||||
service=None,
|
||||
service_user=None,
|
||||
rel_name='identity-credentials'):
|
||||
super(IdentityCredentialsContext, self).__init__(service,
|
||||
service_user,
|
||||
rel_name)
|
||||
|
||||
def __call__(self):
|
||||
log('Generating template context for ' + self.rel_name, level=DEBUG)
|
||||
ctxt = {}
|
||||
|
||||
cachedir = self._setup_pki_cache()
|
||||
if cachedir:
|
||||
ctxt['signing_dir'] = cachedir
|
||||
|
||||
for rid in relation_ids(self.rel_name):
|
||||
self.related = True
|
||||
for unit in related_units(rid):
|
||||
rdata = relation_get(rid=rid, unit=unit)
|
||||
credentials_host = rdata.get('credentials_host')
|
||||
credentials_host = (
|
||||
format_ipv6_addr(credentials_host) or credentials_host
|
||||
)
|
||||
auth_host = rdata.get('auth_host')
|
||||
auth_host = format_ipv6_addr(auth_host) or auth_host
|
||||
svc_protocol = rdata.get('credentials_protocol') or 'http'
|
||||
auth_protocol = rdata.get('auth_protocol') or 'http'
|
||||
api_version = rdata.get('api_version') or '2.0'
|
||||
ctxt.update({
|
||||
'service_port': rdata.get('credentials_port'),
|
||||
'service_host': credentials_host,
|
||||
'auth_host': auth_host,
|
||||
'auth_port': rdata.get('auth_port'),
|
||||
'admin_tenant_name': rdata.get('credentials_project'),
|
||||
'admin_tenant_id': rdata.get('credentials_project_id'),
|
||||
'admin_user': rdata.get('credentials_username'),
|
||||
'admin_password': rdata.get('credentials_password'),
|
||||
'service_protocol': svc_protocol,
|
||||
'auth_protocol': auth_protocol,
|
||||
'api_version': api_version
|
||||
})
|
||||
|
||||
if float(api_version) > 2:
|
||||
ctxt.update({'admin_domain_name':
|
||||
rdata.get('domain')})
|
||||
|
||||
if self.context_complete(ctxt):
|
||||
return ctxt
|
||||
|
||||
return {}
|
||||
|
@ -458,7 +522,7 @@ class AMQPContext(OSContextGenerator):
|
|||
|
||||
ca_path = os.path.join(
|
||||
self.ssl_dir, 'rabbit-client-ca.pem')
|
||||
with open(ca_path, 'w') as fh:
|
||||
with open(ca_path, 'wb') as fh:
|
||||
fh.write(b64decode(ctxt['rabbit_ssl_ca']))
|
||||
ctxt['rabbit_ssl_ca'] = ca_path
|
||||
|
||||
|
@ -554,7 +618,9 @@ class HAProxyContext(OSContextGenerator):
|
|||
"""
|
||||
interfaces = ['cluster']
|
||||
|
||||
def __init__(self, singlenode_mode=False):
|
||||
def __init__(self, singlenode_mode=False,
|
||||
address_types=ADDRESS_TYPES):
|
||||
self.address_types = address_types
|
||||
self.singlenode_mode = singlenode_mode
|
||||
|
||||
def __call__(self):
|
||||
|
@ -563,41 +629,56 @@ class HAProxyContext(OSContextGenerator):
|
|||
if not relation_ids('cluster') and not self.singlenode_mode:
|
||||
return {}
|
||||
|
||||
if config('prefer-ipv6'):
|
||||
addr = get_ipv6_addr(exc_list=[config('vip')])[0]
|
||||
else:
|
||||
addr = get_host_ip(unit_get('private-address'))
|
||||
|
||||
l_unit = local_unit().replace('/', '-')
|
||||
cluster_hosts = {}
|
||||
|
||||
# NOTE(jamespage): build out map of configured network endpoints
|
||||
# and associated backends
|
||||
for addr_type in ADDRESS_TYPES:
|
||||
for addr_type in self.address_types:
|
||||
cfg_opt = 'os-{}-network'.format(addr_type)
|
||||
laddr = get_address_in_network(config(cfg_opt))
|
||||
# NOTE(thedac) For some reason the ADDRESS_MAP uses 'int' rather
|
||||
# than 'internal'
|
||||
if addr_type == 'internal':
|
||||
_addr_map_type = INTERNAL
|
||||
else:
|
||||
_addr_map_type = addr_type
|
||||
# Network spaces aware
|
||||
laddr = get_relation_ip(ADDRESS_MAP[_addr_map_type]['binding'],
|
||||
config(cfg_opt))
|
||||
if laddr:
|
||||
netmask = get_netmask_for_address(laddr)
|
||||
cluster_hosts[laddr] = {'network': "{}/{}".format(laddr,
|
||||
netmask),
|
||||
'backends': {l_unit: laddr}}
|
||||
cluster_hosts[laddr] = {
|
||||
'network': "{}/{}".format(laddr,
|
||||
netmask),
|
||||
'backends': collections.OrderedDict([(l_unit,
|
||||
laddr)])
|
||||
}
|
||||
for rid in relation_ids('cluster'):
|
||||
for unit in related_units(rid):
|
||||
for unit in sorted(related_units(rid)):
|
||||
# API Charms will need to set {addr_type}-address with
|
||||
# get_relation_ip(addr_type)
|
||||
_laddr = relation_get('{}-address'.format(addr_type),
|
||||
rid=rid, unit=unit)
|
||||
if _laddr:
|
||||
_unit = unit.replace('/', '-')
|
||||
cluster_hosts[laddr]['backends'][_unit] = _laddr
|
||||
|
||||
# NOTE(jamespage) add backend based on private address - this
|
||||
# with either be the only backend or the fallback if no acls
|
||||
# NOTE(jamespage) add backend based on get_relation_ip - this
|
||||
# will either be the only backend or the fallback if no acls
|
||||
# match in the frontend
|
||||
# Network spaces aware
|
||||
addr = get_relation_ip('cluster')
|
||||
cluster_hosts[addr] = {}
|
||||
netmask = get_netmask_for_address(addr)
|
||||
cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask),
|
||||
'backends': {l_unit: addr}}
|
||||
cluster_hosts[addr] = {
|
||||
'network': "{}/{}".format(addr, netmask),
|
||||
'backends': collections.OrderedDict([(l_unit,
|
||||
addr)])
|
||||
}
|
||||
for rid in relation_ids('cluster'):
|
||||
for unit in related_units(rid):
|
||||
for unit in sorted(related_units(rid)):
|
||||
# API Charms will need to set their private-address with
|
||||
# get_relation_ip('cluster')
|
||||
_laddr = relation_get('private-address',
|
||||
rid=rid, unit=unit)
|
||||
if _laddr:
|
||||
|
@ -628,6 +709,8 @@ class HAProxyContext(OSContextGenerator):
|
|||
ctxt['local_host'] = '127.0.0.1'
|
||||
ctxt['haproxy_host'] = '0.0.0.0'
|
||||
|
||||
ctxt['ipv6_enabled'] = not is_ipv6_disabled()
|
||||
|
||||
ctxt['stat_port'] = '8888'
|
||||
|
||||
db = kv()
|
||||
|
@ -802,8 +885,9 @@ class ApacheSSLContext(OSContextGenerator):
|
|||
else:
|
||||
# Expect cert/key provided in config (currently assumed that ca
|
||||
# uses ip for cn)
|
||||
cn = resolve_address(endpoint_type=INTERNAL)
|
||||
self.configure_cert(cn)
|
||||
for net_type in (INTERNAL, ADMIN, PUBLIC):
|
||||
cn = resolve_address(endpoint_type=net_type)
|
||||
self.configure_cert(cn)
|
||||
|
||||
addresses = self.get_network_addresses()
|
||||
for address, endpoint in addresses:
|
||||
|
@ -843,15 +927,6 @@ class NeutronContext(OSContextGenerator):
|
|||
for pkgs in self.packages:
|
||||
ensure_packages(pkgs)
|
||||
|
||||
def _save_flag_file(self):
|
||||
if self.network_manager == 'quantum':
|
||||
_file = '/etc/nova/quantum_plugin.conf'
|
||||
else:
|
||||
_file = '/etc/nova/neutron_plugin.conf'
|
||||
|
||||
with open(_file, 'wb') as out:
|
||||
out.write(self.plugin + '\n')
|
||||
|
||||
def ovs_ctxt(self):
|
||||
driver = neutron_plugin_attribute(self.plugin, 'driver',
|
||||
self.network_manager)
|
||||
|
@ -996,7 +1071,6 @@ class NeutronContext(OSContextGenerator):
|
|||
flags = config_flags_parser(alchemy_flags)
|
||||
ctxt['neutron_alchemy_flags'] = flags
|
||||
|
||||
self._save_flag_file()
|
||||
return ctxt
|
||||
|
||||
|
||||
|
@ -1176,7 +1250,7 @@ class SubordinateConfigContext(OSContextGenerator):
|
|||
if sub_config and sub_config != '':
|
||||
try:
|
||||
sub_config = json.loads(sub_config)
|
||||
except:
|
||||
except Exception:
|
||||
log('Could not parse JSON from '
|
||||
'subordinate_configuration setting from %s'
|
||||
% rid, level=ERROR)
|
||||
|
@ -1321,8 +1395,6 @@ class WSGIWorkerConfigContext(WorkerConfigContext):
|
|||
"public_processes": int(math.ceil(self.public_process_weight *
|
||||
total_processes)),
|
||||
"threads": 1,
|
||||
"usr_bin": git_determine_usr_bin(),
|
||||
"python_path": git_determine_python_path(),
|
||||
}
|
||||
return ctxt
|
||||
|
||||
|
@ -1570,6 +1642,82 @@ class InternalEndpointContext(OSContextGenerator):
|
|||
return {'use_internal_endpoints': config('use-internal-endpoints')}
|
||||
|
||||
|
||||
class VolumeAPIContext(InternalEndpointContext):
|
||||
"""Volume API context.
|
||||
|
||||
This context provides information regarding the volume endpoint to use
|
||||
when communicating between services. It determines which version of the
|
||||
API is appropriate for use.
|
||||
|
||||
This value will be determined in the resulting context dictionary
|
||||
returned from calling the VolumeAPIContext object. Information provided
|
||||
by this context is as follows:
|
||||
|
||||
volume_api_version: the volume api version to use, currently
|
||||
'v2' or 'v3'
|
||||
volume_catalog_info: the information to use for a cinder client
|
||||
configuration that consumes API endpoints from the keystone
|
||||
catalog. This is defined as the type:name:endpoint_type string.
|
||||
"""
|
||||
# FIXME(wolsen) This implementation is based on the provider being able
|
||||
# to specify the package version to check but does not guarantee that the
|
||||
# volume service api version selected is available. In practice, it is
|
||||
# quite likely the volume service *is* providing the v3 volume service.
|
||||
# This should be resolved when the service-discovery spec is implemented.
|
||||
def __init__(self, pkg):
|
||||
"""
|
||||
Creates a new VolumeAPIContext for use in determining which version
|
||||
of the Volume API should be used for communication. A package codename
|
||||
should be supplied for determining the currently installed OpenStack
|
||||
version.
|
||||
|
||||
:param pkg: the package codename to use in order to determine the
|
||||
component version (e.g. nova-common). See
|
||||
charmhelpers.contrib.openstack.utils.PACKAGE_CODENAMES for more.
|
||||
"""
|
||||
super(VolumeAPIContext, self).__init__()
|
||||
self._ctxt = None
|
||||
if not pkg:
|
||||
raise ValueError('package name must be provided in order to '
|
||||
'determine current OpenStack version.')
|
||||
self.pkg = pkg
|
||||
|
||||
@property
|
||||
def ctxt(self):
|
||||
if self._ctxt is not None:
|
||||
return self._ctxt
|
||||
self._ctxt = self._determine_ctxt()
|
||||
return self._ctxt
|
||||
|
||||
def _determine_ctxt(self):
|
||||
"""Determines the Volume API endpoint information.
|
||||
|
||||
Determines the appropriate version of the API that should be used
|
||||
as well as the catalog_info string that would be supplied. Returns
|
||||
a dict containing the volume_api_version and the volume_catalog_info.
|
||||
"""
|
||||
rel = os_release(self.pkg, base='icehouse')
|
||||
version = '2'
|
||||
if CompareOpenStackReleases(rel) >= 'pike':
|
||||
version = '3'
|
||||
|
||||
service_type = 'volumev{version}'.format(version=version)
|
||||
service_name = 'cinderv{version}'.format(version=version)
|
||||
endpoint_type = 'publicURL'
|
||||
if config('use-internal-endpoints'):
|
||||
endpoint_type = 'internalURL'
|
||||
catalog_info = '{type}:{name}:{endpoint}'.format(
|
||||
type=service_type, name=service_name, endpoint=endpoint_type)
|
||||
|
||||
return {
|
||||
'volume_api_version': version,
|
||||
'volume_catalog_info': catalog_info,
|
||||
}
|
||||
|
||||
def __call__(self):
|
||||
return self.ctxt
|
||||
|
||||
|
||||
class AppArmorContext(OSContextGenerator):
|
||||
"""Base class for apparmor contexts."""
|
||||
|
||||
|
@ -1705,3 +1853,30 @@ class MemcacheContext(OSContextGenerator):
|
|||
ctxt['memcache_server_formatted'],
|
||||
ctxt['memcache_port'])
|
||||
return ctxt
|
||||
|
||||
|
||||
class EnsureDirContext(OSContextGenerator):
|
||||
'''
|
||||
Serves as a generic context to create a directory as a side-effect.
|
||||
|
||||
Useful for software that supports drop-in files (.d) in conjunction
|
||||
with config option-based templates. Examples include:
|
||||
* OpenStack oslo.policy drop-in files;
|
||||
* systemd drop-in config files;
|
||||
* other software that supports overriding defaults with .d files
|
||||
|
||||
Another use-case is when a subordinate generates a configuration for
|
||||
primary to render in a separate directory.
|
||||
|
||||
Some software requires a user to create a target directory to be
|
||||
scanned for drop-in files with a specific format. This is why this
|
||||
context is needed to do that before rendering a template.
|
||||
'''
|
||||
|
||||
def __init__(self, dirname):
|
||||
'''Used merely to ensure that a given directory exists.'''
|
||||
self.dirname = dirname
|
||||
|
||||
def __call__(self):
|
||||
mkdir(self.dirname)
|
||||
return {}
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
CRITICAL=0
|
||||
NOTACTIVE=''
|
||||
LOGFILE=/var/log/nagios/check_haproxy.log
|
||||
AUTH=$(grep -r "stats auth" /etc/haproxy | awk 'NR=1{print $4}')
|
||||
AUTH=$(grep -r "stats auth" /etc/haproxy/haproxy.cfg | awk 'NR=1{print $3}')
|
||||
|
||||
typeset -i N_INSTANCES=0
|
||||
for appserver in $(awk '/^\s+server/{print $2}' /etc/haproxy/haproxy.cfg)
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
CURRQthrsh=0
|
||||
MAXQthrsh=100
|
||||
|
||||
AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}')
|
||||
AUTH=$(grep -r "stats auth" /etc/haproxy/haproxy.cfg | awk 'NR=1{print $3}')
|
||||
|
||||
HAPROXYSTATS=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v)
|
||||
|
||||
|
|
|
@ -23,6 +23,8 @@
|
|||
Helpers for high availability.
|
||||
"""
|
||||
|
||||
import json
|
||||
|
||||
import re
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
|
@ -32,6 +34,7 @@ from charmhelpers.core.hookenv import (
|
|||
config,
|
||||
status_set,
|
||||
DEBUG,
|
||||
WARNING,
|
||||
)
|
||||
|
||||
from charmhelpers.core.host import (
|
||||
|
@ -40,6 +43,23 @@ from charmhelpers.core.host import (
|
|||
|
||||
from charmhelpers.contrib.openstack.ip import (
|
||||
resolve_address,
|
||||
is_ipv6,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.network.ip import (
|
||||
get_iface_for_address,
|
||||
get_netmask_for_address,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.hahelpers.cluster import (
|
||||
get_hacluster_config
|
||||
)
|
||||
|
||||
JSON_ENCODE_OPTIONS = dict(
|
||||
sort_keys=True,
|
||||
allow_nan=False,
|
||||
indent=None,
|
||||
separators=(',', ':'),
|
||||
)
|
||||
|
||||
|
||||
|
@ -53,8 +73,8 @@ class DNSHAException(Exception):
|
|||
def update_dns_ha_resource_params(resources, resource_params,
|
||||
relation_id=None,
|
||||
crm_ocf='ocf:maas:dns'):
|
||||
""" Check for os-*-hostname settings and update resource dictionaries for
|
||||
the HA relation.
|
||||
""" Configure DNS-HA resources based on provided configuration and
|
||||
update resource dictionaries for the HA relation.
|
||||
|
||||
@param resources: Pointer to dictionary of resources.
|
||||
Usually instantiated in ha_joined().
|
||||
|
@ -64,61 +84,20 @@ def update_dns_ha_resource_params(resources, resource_params,
|
|||
@param crm_ocf: Corosync Open Cluster Framework resource agent to use for
|
||||
DNS HA
|
||||
"""
|
||||
|
||||
# Validate the charm environment for DNS HA
|
||||
assert_charm_supports_dns_ha()
|
||||
|
||||
settings = ['os-admin-hostname', 'os-internal-hostname',
|
||||
'os-public-hostname', 'os-access-hostname']
|
||||
|
||||
# Check which DNS settings are set and update dictionaries
|
||||
hostname_group = []
|
||||
for setting in settings:
|
||||
hostname = config(setting)
|
||||
if hostname is None:
|
||||
log('DNS HA: Hostname setting {} is None. Ignoring.'
|
||||
''.format(setting),
|
||||
DEBUG)
|
||||
continue
|
||||
m = re.search('os-(.+?)-hostname', setting)
|
||||
if m:
|
||||
networkspace = m.group(1)
|
||||
else:
|
||||
msg = ('Unexpected DNS hostname setting: {}. '
|
||||
'Cannot determine network space name'
|
||||
''.format(setting))
|
||||
status_set('blocked', msg)
|
||||
raise DNSHAException(msg)
|
||||
|
||||
hostname_key = 'res_{}_{}_hostname'.format(charm_name(), networkspace)
|
||||
if hostname_key in hostname_group:
|
||||
log('DNS HA: Resource {}: {} already exists in '
|
||||
'hostname group - skipping'.format(hostname_key, hostname),
|
||||
DEBUG)
|
||||
continue
|
||||
|
||||
hostname_group.append(hostname_key)
|
||||
resources[hostname_key] = crm_ocf
|
||||
resource_params[hostname_key] = (
|
||||
'params fqdn="{}" ip_address="{}" '
|
||||
''.format(hostname, resolve_address(endpoint_type=networkspace,
|
||||
override=False)))
|
||||
|
||||
if len(hostname_group) >= 1:
|
||||
log('DNS HA: Hostname group is set with {} as members. '
|
||||
'Informing the ha relation'.format(' '.join(hostname_group)),
|
||||
DEBUG)
|
||||
relation_set(relation_id=relation_id, groups={
|
||||
'grp_{}_hostnames'.format(charm_name()): ' '.join(hostname_group)})
|
||||
else:
|
||||
msg = 'DNS HA: Hostname group has no members.'
|
||||
status_set('blocked', msg)
|
||||
raise DNSHAException(msg)
|
||||
_relation_data = {'resources': {}, 'resource_params': {}}
|
||||
update_hacluster_dns_ha(charm_name(),
|
||||
_relation_data,
|
||||
crm_ocf)
|
||||
resources.update(_relation_data['resources'])
|
||||
resource_params.update(_relation_data['resource_params'])
|
||||
relation_set(relation_id=relation_id, groups=_relation_data['groups'])
|
||||
|
||||
|
||||
def assert_charm_supports_dns_ha():
|
||||
"""Validate prerequisites for DNS HA
|
||||
The MAAS client is only available on Xenial or greater
|
||||
|
||||
:raises DNSHAException: if release is < 16.04
|
||||
"""
|
||||
if lsb_release().get('DISTRIB_RELEASE') < '16.04':
|
||||
msg = ('DNS HA is only supported on 16.04 and greater '
|
||||
|
@ -137,3 +116,150 @@ def expect_ha():
|
|||
@returns boolean
|
||||
"""
|
||||
return config('vip') or config('dns-ha')
|
||||
|
||||
|
||||
def generate_ha_relation_data(service):
|
||||
""" Generate relation data for ha relation
|
||||
|
||||
Based on configuration options and unit interfaces, generate a json
|
||||
encoded dict of relation data items for the hacluster relation,
|
||||
providing configuration for DNS HA or VIP's + haproxy clone sets.
|
||||
|
||||
@returns dict: json encoded data for use with relation_set
|
||||
"""
|
||||
_haproxy_res = 'res_{}_haproxy'.format(service)
|
||||
_relation_data = {
|
||||
'resources': {
|
||||
_haproxy_res: 'lsb:haproxy',
|
||||
},
|
||||
'resource_params': {
|
||||
_haproxy_res: 'op monitor interval="5s"'
|
||||
},
|
||||
'init_services': {
|
||||
_haproxy_res: 'haproxy'
|
||||
},
|
||||
'clones': {
|
||||
'cl_{}_haproxy'.format(service): _haproxy_res
|
||||
},
|
||||
}
|
||||
|
||||
if config('dns-ha'):
|
||||
update_hacluster_dns_ha(service, _relation_data)
|
||||
else:
|
||||
update_hacluster_vip(service, _relation_data)
|
||||
|
||||
return {
|
||||
'json_{}'.format(k): json.dumps(v, **JSON_ENCODE_OPTIONS)
|
||||
for k, v in _relation_data.items() if v
|
||||
}
|
||||
|
||||
|
||||
def update_hacluster_dns_ha(service, relation_data,
|
||||
crm_ocf='ocf:maas:dns'):
|
||||
""" Configure DNS-HA resources based on provided configuration
|
||||
|
||||
@param service: Name of the service being configured
|
||||
@param relation_data: Pointer to dictionary of relation data.
|
||||
@param crm_ocf: Corosync Open Cluster Framework resource agent to use for
|
||||
DNS HA
|
||||
"""
|
||||
# Validate the charm environment for DNS HA
|
||||
assert_charm_supports_dns_ha()
|
||||
|
||||
settings = ['os-admin-hostname', 'os-internal-hostname',
|
||||
'os-public-hostname', 'os-access-hostname']
|
||||
|
||||
# Check which DNS settings are set and update dictionaries
|
||||
hostname_group = []
|
||||
for setting in settings:
|
||||
hostname = config(setting)
|
||||
if hostname is None:
|
||||
log('DNS HA: Hostname setting {} is None. Ignoring.'
|
||||
''.format(setting),
|
||||
DEBUG)
|
||||
continue
|
||||
m = re.search('os-(.+?)-hostname', setting)
|
||||
if m:
|
||||
endpoint_type = m.group(1)
|
||||
# resolve_address's ADDRESS_MAP uses 'int' not 'internal'
|
||||
if endpoint_type == 'internal':
|
||||
endpoint_type = 'int'
|
||||
else:
|
||||
msg = ('Unexpected DNS hostname setting: {}. '
|
||||
'Cannot determine endpoint_type name'
|
||||
''.format(setting))
|
||||
status_set('blocked', msg)
|
||||
raise DNSHAException(msg)
|
||||
|
||||
hostname_key = 'res_{}_{}_hostname'.format(service, endpoint_type)
|
||||
if hostname_key in hostname_group:
|
||||
log('DNS HA: Resource {}: {} already exists in '
|
||||
'hostname group - skipping'.format(hostname_key, hostname),
|
||||
DEBUG)
|
||||
continue
|
||||
|
||||
hostname_group.append(hostname_key)
|
||||
relation_data['resources'][hostname_key] = crm_ocf
|
||||
relation_data['resource_params'][hostname_key] = (
|
||||
'params fqdn="{}" ip_address="{}"'
|
||||
.format(hostname, resolve_address(endpoint_type=endpoint_type,
|
||||
override=False)))
|
||||
|
||||
if len(hostname_group) >= 1:
|
||||
log('DNS HA: Hostname group is set with {} as members. '
|
||||
'Informing the ha relation'.format(' '.join(hostname_group)),
|
||||
DEBUG)
|
||||
relation_data['groups'] = {
|
||||
'grp_{}_hostnames'.format(service): ' '.join(hostname_group)
|
||||
}
|
||||
else:
|
||||
msg = 'DNS HA: Hostname group has no members.'
|
||||
status_set('blocked', msg)
|
||||
raise DNSHAException(msg)
|
||||
|
||||
|
||||
def update_hacluster_vip(service, relation_data):
|
||||
""" Configure VIP resources based on provided configuration
|
||||
|
||||
@param service: Name of the service being configured
|
||||
@param relation_data: Pointer to dictionary of relation data.
|
||||
"""
|
||||
cluster_config = get_hacluster_config()
|
||||
vip_group = []
|
||||
for vip in cluster_config['vip'].split():
|
||||
if is_ipv6(vip):
|
||||
res_neutron_vip = 'ocf:heartbeat:IPv6addr'
|
||||
vip_params = 'ipv6addr'
|
||||
else:
|
||||
res_neutron_vip = 'ocf:heartbeat:IPaddr2'
|
||||
vip_params = 'ip'
|
||||
|
||||
iface = (get_iface_for_address(vip) or
|
||||
config('vip_iface'))
|
||||
netmask = (get_netmask_for_address(vip) or
|
||||
config('vip_cidr'))
|
||||
|
||||
if iface is not None:
|
||||
vip_key = 'res_{}_{}_vip'.format(service, iface)
|
||||
if vip_key in vip_group:
|
||||
if vip not in relation_data['resource_params'][vip_key]:
|
||||
vip_key = '{}_{}'.format(vip_key, vip_params)
|
||||
else:
|
||||
log("Resource '%s' (vip='%s') already exists in "
|
||||
"vip group - skipping" % (vip_key, vip), WARNING)
|
||||
continue
|
||||
|
||||
relation_data['resources'][vip_key] = res_neutron_vip
|
||||
relation_data['resource_params'][vip_key] = (
|
||||
'params {ip}="{vip}" cidr_netmask="{netmask}" '
|
||||
'nic="{iface}"'.format(ip=vip_params,
|
||||
vip=vip,
|
||||
iface=iface,
|
||||
netmask=netmask)
|
||||
)
|
||||
vip_group.append(vip_key)
|
||||
|
||||
if len(vip_group) >= 1:
|
||||
relation_data['groups'] = {
|
||||
'grp_{}_vips'.format(service): ' '.join(vip_group)
|
||||
}
|
||||
|
|
|
@ -59,18 +59,13 @@ def determine_dkms_package():
|
|||
|
||||
|
||||
def quantum_plugins():
|
||||
from charmhelpers.contrib.openstack import context
|
||||
return {
|
||||
'ovs': {
|
||||
'config': '/etc/quantum/plugins/openvswitch/'
|
||||
'ovs_quantum_plugin.ini',
|
||||
'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'
|
||||
'OVSQuantumPluginV2',
|
||||
'contexts': [
|
||||
context.SharedDBContext(user=config('neutron-database-user'),
|
||||
database=config('neutron-database'),
|
||||
relation_prefix='neutron',
|
||||
ssl_dir=QUANTUM_CONF_DIR)],
|
||||
'contexts': [],
|
||||
'services': ['quantum-plugin-openvswitch-agent'],
|
||||
'packages': [determine_dkms_package(),
|
||||
['quantum-plugin-openvswitch-agent']],
|
||||
|
@ -82,11 +77,7 @@ def quantum_plugins():
|
|||
'config': '/etc/quantum/plugins/nicira/nvp.ini',
|
||||
'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
|
||||
'QuantumPlugin.NvpPluginV2',
|
||||
'contexts': [
|
||||
context.SharedDBContext(user=config('neutron-database-user'),
|
||||
database=config('neutron-database'),
|
||||
relation_prefix='neutron',
|
||||
ssl_dir=QUANTUM_CONF_DIR)],
|
||||
'contexts': [],
|
||||
'services': [],
|
||||
'packages': [],
|
||||
'server_packages': ['quantum-server',
|
||||
|
@ -100,7 +91,6 @@ NEUTRON_CONF_DIR = '/etc/neutron'
|
|||
|
||||
|
||||
def neutron_plugins():
|
||||
from charmhelpers.contrib.openstack import context
|
||||
release = os_release('nova-common')
|
||||
plugins = {
|
||||
'ovs': {
|
||||
|
@ -108,11 +98,7 @@ def neutron_plugins():
|
|||
'ovs_neutron_plugin.ini',
|
||||
'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.'
|
||||
'OVSNeutronPluginV2',
|
||||
'contexts': [
|
||||
context.SharedDBContext(user=config('neutron-database-user'),
|
||||
database=config('neutron-database'),
|
||||
relation_prefix='neutron',
|
||||
ssl_dir=NEUTRON_CONF_DIR)],
|
||||
'contexts': [],
|
||||
'services': ['neutron-plugin-openvswitch-agent'],
|
||||
'packages': [determine_dkms_package(),
|
||||
['neutron-plugin-openvswitch-agent']],
|
||||
|
@ -124,11 +110,7 @@ def neutron_plugins():
|
|||
'config': '/etc/neutron/plugins/nicira/nvp.ini',
|
||||
'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
|
||||
'NeutronPlugin.NvpPluginV2',
|
||||
'contexts': [
|
||||
context.SharedDBContext(user=config('neutron-database-user'),
|
||||
database=config('neutron-database'),
|
||||
relation_prefix='neutron',
|
||||
ssl_dir=NEUTRON_CONF_DIR)],
|
||||
'contexts': [],
|
||||
'services': [],
|
||||
'packages': [],
|
||||
'server_packages': ['neutron-server',
|
||||
|
@ -138,11 +120,7 @@ def neutron_plugins():
|
|||
'nsx': {
|
||||
'config': '/etc/neutron/plugins/vmware/nsx.ini',
|
||||
'driver': 'vmware',
|
||||
'contexts': [
|
||||
context.SharedDBContext(user=config('neutron-database-user'),
|
||||
database=config('neutron-database'),
|
||||
relation_prefix='neutron',
|
||||
ssl_dir=NEUTRON_CONF_DIR)],
|
||||
'contexts': [],
|
||||
'services': [],
|
||||
'packages': [],
|
||||
'server_packages': ['neutron-server',
|
||||
|
@ -152,11 +130,7 @@ def neutron_plugins():
|
|||
'n1kv': {
|
||||
'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
|
||||
'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
|
||||
'contexts': [
|
||||
context.SharedDBContext(user=config('neutron-database-user'),
|
||||
database=config('neutron-database'),
|
||||
relation_prefix='neutron',
|
||||
ssl_dir=NEUTRON_CONF_DIR)],
|
||||
'contexts': [],
|
||||
'services': [],
|
||||
'packages': [determine_dkms_package(),
|
||||
['neutron-plugin-cisco']],
|
||||
|
@ -167,11 +141,7 @@ def neutron_plugins():
|
|||
'Calico': {
|
||||
'config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
|
||||
'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin',
|
||||
'contexts': [
|
||||
context.SharedDBContext(user=config('neutron-database-user'),
|
||||
database=config('neutron-database'),
|
||||
relation_prefix='neutron',
|
||||
ssl_dir=NEUTRON_CONF_DIR)],
|
||||
'contexts': [],
|
||||
'services': ['calico-felix',
|
||||
'bird',
|
||||
'neutron-dhcp-agent',
|
||||
|
@ -189,11 +159,7 @@ def neutron_plugins():
|
|||
'vsp': {
|
||||
'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini',
|
||||
'driver': 'neutron.plugins.nuage.plugin.NuagePlugin',
|
||||
'contexts': [
|
||||
context.SharedDBContext(user=config('neutron-database-user'),
|
||||
database=config('neutron-database'),
|
||||
relation_prefix='neutron',
|
||||
ssl_dir=NEUTRON_CONF_DIR)],
|
||||
'contexts': [],
|
||||
'services': [],
|
||||
'packages': [],
|
||||
'server_packages': ['neutron-server', 'neutron-plugin-nuage'],
|
||||
|
@ -203,10 +169,7 @@ def neutron_plugins():
|
|||
'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini',
|
||||
'driver': ('neutron.plugins.plumgrid.plumgrid_plugin'
|
||||
'.plumgrid_plugin.NeutronPluginPLUMgridV2'),
|
||||
'contexts': [
|
||||
context.SharedDBContext(user=config('database-user'),
|
||||
database=config('database'),
|
||||
ssl_dir=NEUTRON_CONF_DIR)],
|
||||
'contexts': [],
|
||||
'services': [],
|
||||
'packages': ['plumgrid-lxc',
|
||||
'iovisor-dkms'],
|
||||
|
@ -217,11 +180,7 @@ def neutron_plugins():
|
|||
'midonet': {
|
||||
'config': '/etc/neutron/plugins/midonet/midonet.ini',
|
||||
'driver': 'midonet.neutron.plugin.MidonetPluginV2',
|
||||
'contexts': [
|
||||
context.SharedDBContext(user=config('neutron-database-user'),
|
||||
database=config('neutron-database'),
|
||||
relation_prefix='neutron',
|
||||
ssl_dir=NEUTRON_CONF_DIR)],
|
||||
'contexts': [],
|
||||
'services': [],
|
||||
'packages': [determine_dkms_package()],
|
||||
'server_packages': ['neutron-server',
|
||||
|
|
|
@ -18,7 +18,7 @@ rbd default features = {{ rbd_features }}
|
|||
|
||||
[client]
|
||||
{% if rbd_client_cache_settings -%}
|
||||
{% for key, value in rbd_client_cache_settings.iteritems() -%}
|
||||
{% for key, value in rbd_client_cache_settings.items() -%}
|
||||
{{ key }} = {{ value }}
|
||||
{% endfor -%}
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
|
|
|
@ -17,22 +17,22 @@ defaults
|
|||
{%- if haproxy_queue_timeout %}
|
||||
timeout queue {{ haproxy_queue_timeout }}
|
||||
{%- else %}
|
||||
timeout queue 5000
|
||||
timeout queue 9000
|
||||
{%- endif %}
|
||||
{%- if haproxy_connect_timeout %}
|
||||
timeout connect {{ haproxy_connect_timeout }}
|
||||
{%- else %}
|
||||
timeout connect 5000
|
||||
timeout connect 9000
|
||||
{%- endif %}
|
||||
{%- if haproxy_client_timeout %}
|
||||
timeout client {{ haproxy_client_timeout }}
|
||||
{%- else %}
|
||||
timeout client 30000
|
||||
timeout client 90000
|
||||
{%- endif %}
|
||||
{%- if haproxy_server_timeout %}
|
||||
timeout server {{ haproxy_server_timeout }}
|
||||
{%- else %}
|
||||
timeout server 30000
|
||||
timeout server 90000
|
||||
{%- endif %}
|
||||
|
||||
listen stats
|
||||
|
@ -48,7 +48,9 @@ listen stats
|
|||
{% for service, ports in service_ports.items() -%}
|
||||
frontend tcp-in_{{ service }}
|
||||
bind *:{{ ports[0] }}
|
||||
{% if ipv6_enabled -%}
|
||||
bind :::{{ ports[0] }}
|
||||
{% endif -%}
|
||||
{% for frontend in frontends -%}
|
||||
acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }}
|
||||
use_backend {{ service }}_{{ frontend }} if net_{{ frontend }}
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
[cache]
|
||||
{% if memcache_url %}
|
||||
enabled = true
|
||||
backend = oslo_cache.memcache_pool
|
||||
memcache_servers = {{ memcache_url }}
|
||||
{% endif %}
|
|
@ -15,9 +15,6 @@ Listen {{ public_port }}
|
|||
{% if port -%}
|
||||
<VirtualHost *:{{ port }}>
|
||||
WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \
|
||||
{% if python_path -%}
|
||||
python-path={{ python_path }} \
|
||||
{% endif -%}
|
||||
display-name=%{GROUP}
|
||||
WSGIProcessGroup {{ service_name }}
|
||||
WSGIScriptAlias / {{ script }}
|
||||
|
@ -29,7 +26,7 @@ Listen {{ public_port }}
|
|||
ErrorLog /var/log/apache2/{{ service_name }}_error.log
|
||||
CustomLog /var/log/apache2/{{ service_name }}_access.log combined
|
||||
|
||||
<Directory {{ usr_bin }}>
|
||||
<Directory /usr/bin>
|
||||
<IfVersion >= 2.4>
|
||||
Require all granted
|
||||
</IfVersion>
|
||||
|
@ -44,9 +41,6 @@ Listen {{ public_port }}
|
|||
{% if admin_port -%}
|
||||
<VirtualHost *:{{ admin_port }}>
|
||||
WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \
|
||||
{% if python_path -%}
|
||||
python-path={{ python_path }} \
|
||||
{% endif -%}
|
||||
display-name=%{GROUP}
|
||||
WSGIProcessGroup {{ service_name }}-admin
|
||||
WSGIScriptAlias / {{ admin_script }}
|
||||
|
@ -58,7 +52,7 @@ Listen {{ public_port }}
|
|||
ErrorLog /var/log/apache2/{{ service_name }}_error.log
|
||||
CustomLog /var/log/apache2/{{ service_name }}_access.log combined
|
||||
|
||||
<Directory {{ usr_bin }}>
|
||||
<Directory /usr/bin>
|
||||
<IfVersion >= 2.4>
|
||||
Require all granted
|
||||
</IfVersion>
|
||||
|
@ -73,9 +67,6 @@ Listen {{ public_port }}
|
|||
{% if public_port -%}
|
||||
<VirtualHost *:{{ public_port }}>
|
||||
WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \
|
||||
{% if python_path -%}
|
||||
python-path={{ python_path }} \
|
||||
{% endif -%}
|
||||
display-name=%{GROUP}
|
||||
WSGIProcessGroup {{ service_name }}-public
|
||||
WSGIScriptAlias / {{ public_script }}
|
||||
|
@ -87,7 +78,7 @@ Listen {{ public_port }}
|
|||
ErrorLog /var/log/apache2/{{ service_name }}_error.log
|
||||
CustomLog /var/log/apache2/{{ service_name }}_access.log combined
|
||||
|
||||
<Directory {{ usr_bin }}>
|
||||
<Directory /usr/bin>
|
||||
<IfVersion >= 2.4>
|
||||
Require all granted
|
||||
</IfVersion>
|
||||
|
|
|
@ -93,7 +93,8 @@ class OSConfigTemplate(object):
|
|||
Associates a config file template with a list of context generators.
|
||||
Responsible for constructing a template context based on those generators.
|
||||
"""
|
||||
def __init__(self, config_file, contexts):
|
||||
|
||||
def __init__(self, config_file, contexts, config_template=None):
|
||||
self.config_file = config_file
|
||||
|
||||
if hasattr(contexts, '__call__'):
|
||||
|
@ -103,6 +104,8 @@ class OSConfigTemplate(object):
|
|||
|
||||
self._complete_contexts = []
|
||||
|
||||
self.config_template = config_template
|
||||
|
||||
def context(self):
|
||||
ctxt = {}
|
||||
for context in self.contexts:
|
||||
|
@ -124,6 +127,11 @@ class OSConfigTemplate(object):
|
|||
self.context()
|
||||
return self._complete_contexts
|
||||
|
||||
@property
|
||||
def is_string_template(self):
|
||||
""":returns: Boolean if this instance is a template initialised with a string"""
|
||||
return self.config_template is not None
|
||||
|
||||
|
||||
class OSConfigRenderer(object):
|
||||
"""
|
||||
|
@ -148,6 +156,10 @@ class OSConfigRenderer(object):
|
|||
contexts=[context.IdentityServiceContext()])
|
||||
configs.register(config_file='/etc/haproxy/haproxy.conf',
|
||||
contexts=[context.HAProxyContext()])
|
||||
configs.register(config_file='/etc/keystone/policy.d/extra.cfg',
|
||||
contexts=[context.ExtraPolicyContext()
|
||||
context.KeystoneContext()],
|
||||
config_template=hookenv.config('extra-policy'))
|
||||
# write out a single config
|
||||
configs.write('/etc/nova/nova.conf')
|
||||
# write out all registered configs
|
||||
|
@ -218,14 +230,23 @@ class OSConfigRenderer(object):
|
|||
else:
|
||||
apt_install('python3-jinja2')
|
||||
|
||||
def register(self, config_file, contexts):
|
||||
def register(self, config_file, contexts, config_template=None):
|
||||
"""
|
||||
Register a config file with a list of context generators to be called
|
||||
during rendering.
|
||||
config_template can be used to load a template from a string instead of
|
||||
using template loaders and template files.
|
||||
:param config_file (str): a path where a config file will be rendered
|
||||
:param contexts (list): a list of context dictionaries with kv pairs
|
||||
:param config_template (str): an optional template string to use
|
||||
"""
|
||||
self.templates[config_file] = OSConfigTemplate(config_file=config_file,
|
||||
contexts=contexts)
|
||||
log('Registered config file: %s' % config_file, level=INFO)
|
||||
self.templates[config_file] = OSConfigTemplate(
|
||||
config_file=config_file,
|
||||
contexts=contexts,
|
||||
config_template=config_template
|
||||
)
|
||||
log('Registered config file: {}'.format(config_file),
|
||||
level=INFO)
|
||||
|
||||
def _get_tmpl_env(self):
|
||||
if not self._tmpl_env:
|
||||
|
@ -235,32 +256,58 @@ class OSConfigRenderer(object):
|
|||
def _get_template(self, template):
|
||||
self._get_tmpl_env()
|
||||
template = self._tmpl_env.get_template(template)
|
||||
log('Loaded template from %s' % template.filename, level=INFO)
|
||||
log('Loaded template from {}'.format(template.filename),
|
||||
level=INFO)
|
||||
return template
|
||||
|
||||
def _get_template_from_string(self, ostmpl):
|
||||
'''
|
||||
Get a jinja2 template object from a string.
|
||||
:param ostmpl: OSConfigTemplate to use as a data source.
|
||||
'''
|
||||
self._get_tmpl_env()
|
||||
template = self._tmpl_env.from_string(ostmpl.config_template)
|
||||
log('Loaded a template from a string for {}'.format(
|
||||
ostmpl.config_file),
|
||||
level=INFO)
|
||||
return template
|
||||
|
||||
def render(self, config_file):
|
||||
if config_file not in self.templates:
|
||||
log('Config not registered: %s' % config_file, level=ERROR)
|
||||
log('Config not registered: {}'.format(config_file), level=ERROR)
|
||||
raise OSConfigException
|
||||
ctxt = self.templates[config_file].context()
|
||||
|
||||
_tmpl = os.path.basename(config_file)
|
||||
try:
|
||||
template = self._get_template(_tmpl)
|
||||
except exceptions.TemplateNotFound:
|
||||
# if no template is found with basename, try looking for it
|
||||
# using a munged full path, eg:
|
||||
# /etc/apache2/apache2.conf -> etc_apache2_apache2.conf
|
||||
_tmpl = '_'.join(config_file.split('/')[1:])
|
||||
ostmpl = self.templates[config_file]
|
||||
ctxt = ostmpl.context()
|
||||
|
||||
if ostmpl.is_string_template:
|
||||
template = self._get_template_from_string(ostmpl)
|
||||
log('Rendering from a string template: '
|
||||
'{}'.format(config_file),
|
||||
level=INFO)
|
||||
else:
|
||||
_tmpl = os.path.basename(config_file)
|
||||
try:
|
||||
template = self._get_template(_tmpl)
|
||||
except exceptions.TemplateNotFound as e:
|
||||
log('Could not load template from %s by %s or %s.' %
|
||||
(self.templates_dir, os.path.basename(config_file), _tmpl),
|
||||
level=ERROR)
|
||||
raise e
|
||||
except exceptions.TemplateNotFound:
|
||||
# if no template is found with basename, try looking
|
||||
# for it using a munged full path, eg:
|
||||
# /etc/apache2/apache2.conf -> etc_apache2_apache2.conf
|
||||
_tmpl = '_'.join(config_file.split('/')[1:])
|
||||
try:
|
||||
template = self._get_template(_tmpl)
|
||||
except exceptions.TemplateNotFound as e:
|
||||
log('Could not load template from {} by {} or {}.'
|
||||
''.format(
|
||||
self.templates_dir,
|
||||
os.path.basename(config_file),
|
||||
_tmpl
|
||||
),
|
||||
level=ERROR)
|
||||
raise e
|
||||
|
||||
log('Rendering from template: %s' % _tmpl, level=INFO)
|
||||
log('Rendering from template: {}'.format(config_file),
|
||||
level=INFO)
|
||||
return template.render(ctxt)
|
||||
|
||||
def write(self, config_file):
|
||||
|
@ -272,6 +319,8 @@ class OSConfigRenderer(object):
|
|||
raise OSConfigException
|
||||
|
||||
_out = self.render(config_file)
|
||||
if six.PY3:
|
||||
_out = _out.encode('UTF-8')
|
||||
|
||||
with open(config_file, 'wb') as out:
|
||||
out.write(_out)
|
||||
|
|
|
@ -23,7 +23,6 @@ import sys
|
|||
import re
|
||||
import itertools
|
||||
import functools
|
||||
import shutil
|
||||
|
||||
import six
|
||||
import traceback
|
||||
|
@ -47,7 +46,6 @@ from charmhelpers.core.hookenv import (
|
|||
related_units,
|
||||
relation_ids,
|
||||
relation_set,
|
||||
service_name,
|
||||
status_set,
|
||||
hook_name,
|
||||
application_version_set,
|
||||
|
@ -68,11 +66,6 @@ from charmhelpers.contrib.network.ip import (
|
|||
port_has_listener,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.python.packages import (
|
||||
pip_create_virtualenv,
|
||||
pip_install,
|
||||
)
|
||||
|
||||
from charmhelpers.core.host import (
|
||||
lsb_release,
|
||||
mounts,
|
||||
|
@ -84,7 +77,6 @@ from charmhelpers.core.host import (
|
|||
)
|
||||
from charmhelpers.fetch import (
|
||||
apt_cache,
|
||||
install_remote,
|
||||
import_key as fetch_import_key,
|
||||
add_source as fetch_add_source,
|
||||
SourceConfigError,
|
||||
|
@ -95,7 +87,7 @@ from charmhelpers.fetch import (
|
|||
from charmhelpers.fetch.snap import (
|
||||
snap_install,
|
||||
snap_refresh,
|
||||
SNAP_CHANNELS,
|
||||
valid_snap_channel,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
|
||||
|
@ -140,6 +132,7 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([
|
|||
('yakkety', 'newton'),
|
||||
('zesty', 'ocata'),
|
||||
('artful', 'pike'),
|
||||
('bionic', 'queens'),
|
||||
])
|
||||
|
||||
|
||||
|
@ -157,6 +150,7 @@ OPENSTACK_CODENAMES = OrderedDict([
|
|||
('2016.2', 'newton'),
|
||||
('2017.1', 'ocata'),
|
||||
('2017.2', 'pike'),
|
||||
('2018.1', 'queens'),
|
||||
])
|
||||
|
||||
# The ugly duckling - must list releases oldest to newest
|
||||
|
@ -187,6 +181,8 @@ SWIFT_CODENAMES = OrderedDict([
|
|||
['2.11.0', '2.12.0', '2.13.0']),
|
||||
('pike',
|
||||
['2.13.0', '2.15.0']),
|
||||
('queens',
|
||||
['2.16.0', '2.17.0']),
|
||||
])
|
||||
|
||||
# >= Liberty version->codename mapping
|
||||
|
@ -274,27 +270,6 @@ PACKAGE_CODENAMES = {
|
|||
]),
|
||||
}
|
||||
|
||||
GIT_DEFAULT_REPOS = {
|
||||
'requirements': 'git://github.com/openstack/requirements',
|
||||
'cinder': 'git://github.com/openstack/cinder',
|
||||
'glance': 'git://github.com/openstack/glance',
|
||||
'horizon': 'git://github.com/openstack/horizon',
|
||||
'keystone': 'git://github.com/openstack/keystone',
|
||||
'networking-hyperv': 'git://github.com/openstack/networking-hyperv',
|
||||
'neutron': 'git://github.com/openstack/neutron',
|
||||
'neutron-fwaas': 'git://github.com/openstack/neutron-fwaas',
|
||||
'neutron-lbaas': 'git://github.com/openstack/neutron-lbaas',
|
||||
'neutron-vpnaas': 'git://github.com/openstack/neutron-vpnaas',
|
||||
'nova': 'git://github.com/openstack/nova',
|
||||
}
|
||||
|
||||
GIT_DEFAULT_BRANCHES = {
|
||||
'liberty': 'stable/liberty',
|
||||
'mitaka': 'stable/mitaka',
|
||||
'newton': 'stable/newton',
|
||||
'master': 'master',
|
||||
}
|
||||
|
||||
DEFAULT_LOOPBACK_SIZE = '5G'
|
||||
|
||||
|
||||
|
@ -388,6 +363,8 @@ def get_swift_codename(version):
|
|||
releases = UBUNTU_OPENSTACK_RELEASE
|
||||
release = [k for k, v in six.iteritems(releases) if codename in v]
|
||||
ret = subprocess.check_output(['apt-cache', 'policy', 'swift'])
|
||||
if six.PY3:
|
||||
ret = ret.decode('UTF-8')
|
||||
if codename in ret or release[0] in ret:
|
||||
return codename
|
||||
elif len(codenames) == 1:
|
||||
|
@ -412,6 +389,8 @@ def get_os_codename_package(package, fatal=True):
|
|||
cmd = ['snap', 'list', package]
|
||||
try:
|
||||
out = subprocess.check_output(cmd)
|
||||
if six.PY3:
|
||||
out = out.decode('UTF-8')
|
||||
except subprocess.CalledProcessError as e:
|
||||
return None
|
||||
lines = out.split('\n')
|
||||
|
@ -426,7 +405,7 @@ def get_os_codename_package(package, fatal=True):
|
|||
|
||||
try:
|
||||
pkg = cache[package]
|
||||
except:
|
||||
except Exception:
|
||||
if not fatal:
|
||||
return None
|
||||
# the package is unknown to the current apt cache.
|
||||
|
@ -522,7 +501,6 @@ def os_release(package, base='essex', reset_cache=False):
|
|||
if _os_rel:
|
||||
return _os_rel
|
||||
_os_rel = (
|
||||
git_os_codename_install_source(config('openstack-origin-git')) or
|
||||
get_os_codename_package(package, fatal=False) or
|
||||
get_os_codename_install_source(config('openstack-origin')) or
|
||||
base)
|
||||
|
@ -579,6 +557,9 @@ def configure_installation_source(source_plus_key):
|
|||
Note that the behaviour on error is to log the error to the juju log and
|
||||
then call sys.exit(1).
|
||||
"""
|
||||
if source_plus_key.startswith('snap'):
|
||||
# Do nothing for snap installs
|
||||
return
|
||||
# extract the key if there is one, denoted by a '|' in the rel
|
||||
source, key = get_source_and_pgp_key(source_plus_key)
|
||||
|
||||
|
@ -615,7 +596,7 @@ def save_script_rc(script_path="scripts/scriptrc", **env_vars):
|
|||
juju_rc_path = "%s/%s" % (charm_dir(), script_path)
|
||||
if not os.path.exists(os.path.dirname(juju_rc_path)):
|
||||
os.mkdir(os.path.dirname(juju_rc_path))
|
||||
with open(juju_rc_path, 'wb') as rc_script:
|
||||
with open(juju_rc_path, 'wt') as rc_script:
|
||||
rc_script.write(
|
||||
"#!/bin/bash\n")
|
||||
[rc_script.write('export %s=%s\n' % (u, p))
|
||||
|
@ -645,11 +626,6 @@ def openstack_upgrade_available(package):
|
|||
else:
|
||||
avail_vers = get_os_version_install_source(src)
|
||||
apt.init()
|
||||
if "swift" in package:
|
||||
major_cur_vers = cur_vers.split('.', 1)[0]
|
||||
major_avail_vers = avail_vers.split('.', 1)[0]
|
||||
major_diff = apt.version_compare(major_avail_vers, major_cur_vers)
|
||||
return avail_vers > cur_vers and (major_diff == 1 or major_diff == 0)
|
||||
return apt.version_compare(avail_vers, cur_vers) == 1
|
||||
|
||||
|
||||
|
@ -760,417 +736,6 @@ def os_requires_version(ostack_release, pkg):
|
|||
return wrap
|
||||
|
||||
|
||||
def git_install_requested():
|
||||
"""
|
||||
Returns true if openstack-origin-git is specified.
|
||||
"""
|
||||
return config('openstack-origin-git') is not None
|
||||
|
||||
|
||||
def git_os_codename_install_source(projects_yaml):
|
||||
"""
|
||||
Returns OpenStack codename of release being installed from source.
|
||||
"""
|
||||
if git_install_requested():
|
||||
projects = _git_yaml_load(projects_yaml)
|
||||
|
||||
if projects in GIT_DEFAULT_BRANCHES.keys():
|
||||
if projects == 'master':
|
||||
return 'ocata'
|
||||
return projects
|
||||
|
||||
if 'release' in projects:
|
||||
if projects['release'] == 'master':
|
||||
return 'ocata'
|
||||
return projects['release']
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def git_default_repos(projects_yaml):
|
||||
"""
|
||||
Returns default repos if a default openstack-origin-git value is specified.
|
||||
"""
|
||||
service = service_name()
|
||||
core_project = service
|
||||
|
||||
for default, branch in GIT_DEFAULT_BRANCHES.iteritems():
|
||||
if projects_yaml == default:
|
||||
|
||||
# add the requirements repo first
|
||||
repo = {
|
||||
'name': 'requirements',
|
||||
'repository': GIT_DEFAULT_REPOS['requirements'],
|
||||
'branch': branch,
|
||||
}
|
||||
repos = [repo]
|
||||
|
||||
# neutron-* and nova-* charms require some additional repos
|
||||
if service in ['neutron-api', 'neutron-gateway',
|
||||
'neutron-openvswitch']:
|
||||
core_project = 'neutron'
|
||||
if service == 'neutron-api':
|
||||
repo = {
|
||||
'name': 'networking-hyperv',
|
||||
'repository': GIT_DEFAULT_REPOS['networking-hyperv'],
|
||||
'branch': branch,
|
||||
}
|
||||
repos.append(repo)
|
||||
for project in ['neutron-fwaas', 'neutron-lbaas',
|
||||
'neutron-vpnaas', 'nova']:
|
||||
repo = {
|
||||
'name': project,
|
||||
'repository': GIT_DEFAULT_REPOS[project],
|
||||
'branch': branch,
|
||||
}
|
||||
repos.append(repo)
|
||||
|
||||
elif service in ['nova-cloud-controller', 'nova-compute']:
|
||||
core_project = 'nova'
|
||||
repo = {
|
||||
'name': 'neutron',
|
||||
'repository': GIT_DEFAULT_REPOS['neutron'],
|
||||
'branch': branch,
|
||||
}
|
||||
repos.append(repo)
|
||||
elif service == 'openstack-dashboard':
|
||||
core_project = 'horizon'
|
||||
|
||||
# finally add the current service's core project repo
|
||||
repo = {
|
||||
'name': core_project,
|
||||
'repository': GIT_DEFAULT_REPOS[core_project],
|
||||
'branch': branch,
|
||||
}
|
||||
repos.append(repo)
|
||||
|
||||
return yaml.dump(dict(repositories=repos, release=default))
|
||||
|
||||
return projects_yaml
|
||||
|
||||
|
||||
def _git_yaml_load(projects_yaml):
|
||||
"""
|
||||
Load the specified yaml into a dictionary.
|
||||
"""
|
||||
if not projects_yaml:
|
||||
return None
|
||||
|
||||
return yaml.load(projects_yaml)
|
||||
|
||||
|
||||
requirements_dir = None
|
||||
|
||||
|
||||
def git_clone_and_install(projects_yaml, core_project):
|
||||
"""
|
||||
Clone/install all specified OpenStack repositories.
|
||||
|
||||
The expected format of projects_yaml is:
|
||||
|
||||
repositories:
|
||||
- {name: keystone,
|
||||
repository: 'git://git.openstack.org/openstack/keystone.git',
|
||||
branch: 'stable/icehouse'}
|
||||
- {name: requirements,
|
||||
repository: 'git://git.openstack.org/openstack/requirements.git',
|
||||
branch: 'stable/icehouse'}
|
||||
|
||||
directory: /mnt/openstack-git
|
||||
http_proxy: squid-proxy-url
|
||||
https_proxy: squid-proxy-url
|
||||
|
||||
The directory, http_proxy, and https_proxy keys are optional.
|
||||
|
||||
"""
|
||||
global requirements_dir
|
||||
parent_dir = '/mnt/openstack-git'
|
||||
http_proxy = None
|
||||
|
||||
projects = _git_yaml_load(projects_yaml)
|
||||
_git_validate_projects_yaml(projects, core_project)
|
||||
|
||||
old_environ = dict(os.environ)
|
||||
|
||||
if 'http_proxy' in projects.keys():
|
||||
http_proxy = projects['http_proxy']
|
||||
os.environ['http_proxy'] = projects['http_proxy']
|
||||
if 'https_proxy' in projects.keys():
|
||||
os.environ['https_proxy'] = projects['https_proxy']
|
||||
|
||||
if 'directory' in projects.keys():
|
||||
parent_dir = projects['directory']
|
||||
|
||||
pip_create_virtualenv(os.path.join(parent_dir, 'venv'))
|
||||
|
||||
# Upgrade setuptools and pip from default virtualenv versions. The default
|
||||
# versions in trusty break master OpenStack branch deployments.
|
||||
for p in ['pip', 'setuptools']:
|
||||
pip_install(p, upgrade=True, proxy=http_proxy,
|
||||
venv=os.path.join(parent_dir, 'venv'))
|
||||
|
||||
constraints = None
|
||||
for p in projects['repositories']:
|
||||
repo = p['repository']
|
||||
branch = p['branch']
|
||||
depth = '1'
|
||||
if 'depth' in p.keys():
|
||||
depth = p['depth']
|
||||
if p['name'] == 'requirements':
|
||||
repo_dir = _git_clone_and_install_single(repo, branch, depth,
|
||||
parent_dir, http_proxy,
|
||||
update_requirements=False)
|
||||
requirements_dir = repo_dir
|
||||
constraints = os.path.join(repo_dir, "upper-constraints.txt")
|
||||
# upper-constraints didn't exist until after icehouse
|
||||
if not os.path.isfile(constraints):
|
||||
constraints = None
|
||||
# use constraints unless project yaml sets use_constraints to false
|
||||
if 'use_constraints' in projects.keys():
|
||||
if not projects['use_constraints']:
|
||||
constraints = None
|
||||
else:
|
||||
repo_dir = _git_clone_and_install_single(repo, branch, depth,
|
||||
parent_dir, http_proxy,
|
||||
update_requirements=True,
|
||||
constraints=constraints)
|
||||
|
||||
os.environ = old_environ
|
||||
|
||||
|
||||
def _git_validate_projects_yaml(projects, core_project):
|
||||
"""
|
||||
Validate the projects yaml.
|
||||
"""
|
||||
_git_ensure_key_exists('repositories', projects)
|
||||
|
||||
for project in projects['repositories']:
|
||||
_git_ensure_key_exists('name', project.keys())
|
||||
_git_ensure_key_exists('repository', project.keys())
|
||||
_git_ensure_key_exists('branch', project.keys())
|
||||
|
||||
if projects['repositories'][0]['name'] != 'requirements':
|
||||
error_out('{} git repo must be specified first'.format('requirements'))
|
||||
|
||||
if projects['repositories'][-1]['name'] != core_project:
|
||||
error_out('{} git repo must be specified last'.format(core_project))
|
||||
|
||||
_git_ensure_key_exists('release', projects)
|
||||
|
||||
|
||||
def _git_ensure_key_exists(key, keys):
|
||||
"""
|
||||
Ensure that key exists in keys.
|
||||
"""
|
||||
if key not in keys:
|
||||
error_out('openstack-origin-git key \'{}\' is missing'.format(key))
|
||||
|
||||
|
||||
def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy,
|
||||
update_requirements, constraints=None):
|
||||
"""
|
||||
Clone and install a single git repository.
|
||||
"""
|
||||
if not os.path.exists(parent_dir):
|
||||
juju_log('Directory already exists at {}. '
|
||||
'No need to create directory.'.format(parent_dir))
|
||||
os.mkdir(parent_dir)
|
||||
|
||||
juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
|
||||
repo_dir = install_remote(
|
||||
repo, dest=parent_dir, branch=branch, depth=depth)
|
||||
|
||||
venv = os.path.join(parent_dir, 'venv')
|
||||
|
||||
if update_requirements:
|
||||
if not requirements_dir:
|
||||
error_out('requirements repo must be cloned before '
|
||||
'updating from global requirements.')
|
||||
_git_update_requirements(venv, repo_dir, requirements_dir)
|
||||
|
||||
juju_log('Installing git repo from dir: {}'.format(repo_dir))
|
||||
if http_proxy:
|
||||
pip_install(repo_dir, proxy=http_proxy, venv=venv,
|
||||
constraints=constraints)
|
||||
else:
|
||||
pip_install(repo_dir, venv=venv, constraints=constraints)
|
||||
|
||||
return repo_dir
|
||||
|
||||
|
||||
def _git_update_requirements(venv, package_dir, reqs_dir):
|
||||
"""
|
||||
Update from global requirements.
|
||||
|
||||
Update an OpenStack git directory's requirements.txt and
|
||||
test-requirements.txt from global-requirements.txt.
|
||||
"""
|
||||
orig_dir = os.getcwd()
|
||||
os.chdir(reqs_dir)
|
||||
python = os.path.join(venv, 'bin/python')
|
||||
cmd = [python, 'update.py', package_dir]
|
||||
try:
|
||||
subprocess.check_call(cmd)
|
||||
except subprocess.CalledProcessError:
|
||||
package = os.path.basename(package_dir)
|
||||
error_out("Error updating {} from "
|
||||
"global-requirements.txt".format(package))
|
||||
os.chdir(orig_dir)
|
||||
|
||||
|
||||
def git_pip_venv_dir(projects_yaml):
|
||||
"""
|
||||
Return the pip virtualenv path.
|
||||
"""
|
||||
parent_dir = '/mnt/openstack-git'
|
||||
|
||||
projects = _git_yaml_load(projects_yaml)
|
||||
|
||||
if 'directory' in projects.keys():
|
||||
parent_dir = projects['directory']
|
||||
|
||||
return os.path.join(parent_dir, 'venv')
|
||||
|
||||
|
||||
def git_src_dir(projects_yaml, project):
|
||||
"""
|
||||
Return the directory where the specified project's source is located.
|
||||
"""
|
||||
parent_dir = '/mnt/openstack-git'
|
||||
|
||||
projects = _git_yaml_load(projects_yaml)
|
||||
|
||||
if 'directory' in projects.keys():
|
||||
parent_dir = projects['directory']
|
||||
|
||||
for p in projects['repositories']:
|
||||
if p['name'] == project:
|
||||
return os.path.join(parent_dir, os.path.basename(p['repository']))
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def git_yaml_value(projects_yaml, key):
|
||||
"""
|
||||
Return the value in projects_yaml for the specified key.
|
||||
"""
|
||||
projects = _git_yaml_load(projects_yaml)
|
||||
|
||||
if key in projects.keys():
|
||||
return projects[key]
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def git_generate_systemd_init_files(templates_dir):
|
||||
"""
|
||||
Generate systemd init files.
|
||||
|
||||
Generates and installs systemd init units and script files based on the
|
||||
*.init.in files contained in the templates_dir directory.
|
||||
|
||||
This code is based on the openstack-pkg-tools package and its init
|
||||
script generation, which is used by the OpenStack packages.
|
||||
"""
|
||||
for f in os.listdir(templates_dir):
|
||||
# Create the init script and systemd unit file from the template
|
||||
if f.endswith(".init.in"):
|
||||
init_in_file = f
|
||||
init_file = f[:-8]
|
||||
service_file = "{}.service".format(init_file)
|
||||
|
||||
init_in_source = os.path.join(templates_dir, init_in_file)
|
||||
init_source = os.path.join(templates_dir, init_file)
|
||||
service_source = os.path.join(templates_dir, service_file)
|
||||
|
||||
init_dest = os.path.join('/etc/init.d', init_file)
|
||||
service_dest = os.path.join('/lib/systemd/system', service_file)
|
||||
|
||||
shutil.copyfile(init_in_source, init_source)
|
||||
with open(init_source, 'a') as outfile:
|
||||
template = ('/usr/share/openstack-pkg-tools/'
|
||||
'init-script-template')
|
||||
with open(template) as infile:
|
||||
outfile.write('\n\n{}'.format(infile.read()))
|
||||
|
||||
cmd = ['pkgos-gen-systemd-unit', init_in_source]
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
if os.path.exists(init_dest):
|
||||
os.remove(init_dest)
|
||||
if os.path.exists(service_dest):
|
||||
os.remove(service_dest)
|
||||
shutil.copyfile(init_source, init_dest)
|
||||
shutil.copyfile(service_source, service_dest)
|
||||
os.chmod(init_dest, 0o755)
|
||||
|
||||
for f in os.listdir(templates_dir):
|
||||
# If there's a service.in file, use it instead of the generated one
|
||||
if f.endswith(".service.in"):
|
||||
service_in_file = f
|
||||
service_file = f[:-3]
|
||||
|
||||
service_in_source = os.path.join(templates_dir, service_in_file)
|
||||
service_source = os.path.join(templates_dir, service_file)
|
||||
service_dest = os.path.join('/lib/systemd/system', service_file)
|
||||
|
||||
shutil.copyfile(service_in_source, service_source)
|
||||
|
||||
if os.path.exists(service_dest):
|
||||
os.remove(service_dest)
|
||||
shutil.copyfile(service_source, service_dest)
|
||||
|
||||
for f in os.listdir(templates_dir):
|
||||
# Generate the systemd unit if there's no existing .service.in
|
||||
if f.endswith(".init.in"):
|
||||
init_in_file = f
|
||||
init_file = f[:-8]
|
||||
service_in_file = "{}.service.in".format(init_file)
|
||||
service_file = "{}.service".format(init_file)
|
||||
|
||||
init_in_source = os.path.join(templates_dir, init_in_file)
|
||||
service_in_source = os.path.join(templates_dir, service_in_file)
|
||||
service_source = os.path.join(templates_dir, service_file)
|
||||
service_dest = os.path.join('/lib/systemd/system', service_file)
|
||||
|
||||
if not os.path.exists(service_in_source):
|
||||
cmd = ['pkgos-gen-systemd-unit', init_in_source]
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
if os.path.exists(service_dest):
|
||||
os.remove(service_dest)
|
||||
shutil.copyfile(service_source, service_dest)
|
||||
|
||||
|
||||
def git_determine_usr_bin():
|
||||
"""Return the /usr/bin path for Apache2 config.
|
||||
|
||||
The /usr/bin path will be located in the virtualenv if the charm
|
||||
is configured to deploy from source.
|
||||
"""
|
||||
if git_install_requested():
|
||||
projects_yaml = config('openstack-origin-git')
|
||||
projects_yaml = git_default_repos(projects_yaml)
|
||||
return os.path.join(git_pip_venv_dir(projects_yaml), 'bin')
|
||||
else:
|
||||
return '/usr/bin'
|
||||
|
||||
|
||||
def git_determine_python_path():
|
||||
"""Return the python-path for Apache2 config.
|
||||
|
||||
Returns 'None' unless the charm is configured to deploy from source,
|
||||
in which case the path of the virtualenv's site-packages is returned.
|
||||
"""
|
||||
if git_install_requested():
|
||||
projects_yaml = config('openstack-origin-git')
|
||||
projects_yaml = git_default_repos(projects_yaml)
|
||||
return os.path.join(git_pip_venv_dir(projects_yaml),
|
||||
'lib/python2.7/site-packages')
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def os_workload_status(configs, required_interfaces, charm_func=None):
|
||||
"""
|
||||
Decorator to set workload status based on complete contexts
|
||||
|
@ -1604,27 +1169,24 @@ def do_action_openstack_upgrade(package, upgrade_callback, configs):
|
|||
"""
|
||||
ret = False
|
||||
|
||||
if git_install_requested():
|
||||
action_set({'outcome': 'installed from source, skipped upgrade.'})
|
||||
else:
|
||||
if openstack_upgrade_available(package):
|
||||
if config('action-managed-upgrade'):
|
||||
juju_log('Upgrading OpenStack release')
|
||||
if openstack_upgrade_available(package):
|
||||
if config('action-managed-upgrade'):
|
||||
juju_log('Upgrading OpenStack release')
|
||||
|
||||
try:
|
||||
upgrade_callback(configs=configs)
|
||||
action_set({'outcome': 'success, upgrade completed.'})
|
||||
ret = True
|
||||
except:
|
||||
action_set({'outcome': 'upgrade failed, see traceback.'})
|
||||
action_set({'traceback': traceback.format_exc()})
|
||||
action_fail('do_openstack_upgrade resulted in an '
|
||||
'unexpected error')
|
||||
else:
|
||||
action_set({'outcome': 'action-managed-upgrade config is '
|
||||
'False, skipped upgrade.'})
|
||||
try:
|
||||
upgrade_callback(configs=configs)
|
||||
action_set({'outcome': 'success, upgrade completed.'})
|
||||
ret = True
|
||||
except Exception:
|
||||
action_set({'outcome': 'upgrade failed, see traceback.'})
|
||||
action_set({'traceback': traceback.format_exc()})
|
||||
action_fail('do_openstack_upgrade resulted in an '
|
||||
'unexpected error')
|
||||
else:
|
||||
action_set({'outcome': 'no upgrade available.'})
|
||||
action_set({'outcome': 'action-managed-upgrade config is '
|
||||
'False, skipped upgrade.'})
|
||||
else:
|
||||
action_set({'outcome': 'no upgrade available.'})
|
||||
|
||||
return ret
|
||||
|
||||
|
@ -1720,7 +1282,7 @@ def is_unit_paused_set():
|
|||
kv = t[0]
|
||||
# transform something truth-y into a Boolean.
|
||||
return not(not(kv.get('unit-paused')))
|
||||
except:
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
|
@ -2034,21 +1596,32 @@ def token_cache_pkgs(source=None, release=None):
|
|||
|
||||
def update_json_file(filename, items):
|
||||
"""Updates the json `filename` with a given dict.
|
||||
:param filename: json filename (i.e.: /etc/glance/policy.json)
|
||||
:param filename: path to json file (e.g. /etc/glance/policy.json)
|
||||
:param items: dict of items to update
|
||||
"""
|
||||
if not items:
|
||||
return
|
||||
|
||||
with open(filename) as fd:
|
||||
policy = json.load(fd)
|
||||
|
||||
# Compare before and after and if nothing has changed don't write the file
|
||||
# since that could cause unnecessary service restarts.
|
||||
before = json.dumps(policy, indent=4, sort_keys=True)
|
||||
policy.update(items)
|
||||
after = json.dumps(policy, indent=4, sort_keys=True)
|
||||
if before == after:
|
||||
return
|
||||
|
||||
with open(filename, "w") as fd:
|
||||
fd.write(json.dumps(policy, indent=4))
|
||||
fd.write(after)
|
||||
|
||||
|
||||
@cached
|
||||
def snap_install_requested():
|
||||
""" Determine if installing from snaps
|
||||
|
||||
If openstack-origin is of the form snap:channel-series-release
|
||||
If openstack-origin is of the form snap:track/channel[/branch]
|
||||
and channel is in SNAPS_CHANNELS return True.
|
||||
"""
|
||||
origin = config('openstack-origin') or ""
|
||||
|
@ -2056,10 +1629,12 @@ def snap_install_requested():
|
|||
return False
|
||||
|
||||
_src = origin[5:]
|
||||
channel, series, release = _src.split('-')
|
||||
if channel.lower() in SNAP_CHANNELS:
|
||||
return True
|
||||
return False
|
||||
if '/' in _src:
|
||||
channel = _src.split('/')[1]
|
||||
else:
|
||||
# Handle snap:track with no channel
|
||||
channel = 'stable'
|
||||
return valid_snap_channel(channel)
|
||||
|
||||
|
||||
def get_snaps_install_info_from_origin(snaps, src, mode='classic'):
|
||||
|
@ -2067,7 +1642,7 @@ def get_snaps_install_info_from_origin(snaps, src, mode='classic'):
|
|||
|
||||
@param snaps: List of snaps
|
||||
@param src: String of openstack-origin or source of the form
|
||||
snap:channel-series-track
|
||||
snap:track/channel
|
||||
@param mode: String classic, devmode or jailmode
|
||||
@returns: Dictionary of snaps with channels and modes
|
||||
"""
|
||||
|
@ -2077,8 +1652,7 @@ def get_snaps_install_info_from_origin(snaps, src, mode='classic'):
|
|||
return {}
|
||||
|
||||
_src = src[5:]
|
||||
_channel, _series, _release = _src.split('-')
|
||||
channel = '--channel={}/{}'.format(_release, _channel)
|
||||
channel = '--channel={}'.format(_src)
|
||||
|
||||
return {snap: {'channel': channel, 'mode': mode}
|
||||
for snap in snaps}
|
||||
|
@ -2090,8 +1664,8 @@ def install_os_snaps(snaps, refresh=False):
|
|||
@param snaps: Dictionary of snaps with channels and modes of the form:
|
||||
{'snap_name': {'channel': 'snap_channel',
|
||||
'mode': 'snap_mode'}}
|
||||
Where channel a snapstore channel and mode is --classic, --devmode or
|
||||
--jailmode.
|
||||
Where channel is a snapstore channel and mode is --classic, --devmode
|
||||
or --jailmode.
|
||||
@param post_snap_install: Callback function to run after snaps have been
|
||||
installed
|
||||
"""
|
||||
|
|
|
@ -113,7 +113,7 @@ def validator(value, valid_type, valid_range=None):
|
|||
assert isinstance(valid_range, list), \
|
||||
"valid_range must be a list, was given {}".format(valid_range)
|
||||
# If we're dealing with strings
|
||||
if valid_type is six.string_types:
|
||||
if isinstance(value, six.string_types):
|
||||
assert value in valid_range, \
|
||||
"{} is not in the list {}".format(value, valid_range)
|
||||
# Integer, float should have a min and max
|
||||
|
@ -370,18 +370,19 @@ def get_mon_map(service):
|
|||
Also raises CalledProcessError if our ceph command fails
|
||||
"""
|
||||
try:
|
||||
mon_status = check_output(
|
||||
['ceph', '--id', service,
|
||||
'mon_status', '--format=json'])
|
||||
mon_status = check_output(['ceph', '--id', service,
|
||||
'mon_status', '--format=json'])
|
||||
if six.PY3:
|
||||
mon_status = mon_status.decode('UTF-8')
|
||||
try:
|
||||
return json.loads(mon_status)
|
||||
except ValueError as v:
|
||||
log("Unable to parse mon_status json: {}. Error: {}".format(
|
||||
mon_status, v.message))
|
||||
log("Unable to parse mon_status json: {}. Error: {}"
|
||||
.format(mon_status, str(v)))
|
||||
raise
|
||||
except CalledProcessError as e:
|
||||
log("mon_status command failed with message: {}".format(
|
||||
e.message))
|
||||
log("mon_status command failed with message: {}"
|
||||
.format(str(e)))
|
||||
raise
|
||||
|
||||
|
||||
|
@ -457,7 +458,7 @@ def monitor_key_get(service, key):
|
|||
try:
|
||||
output = check_output(
|
||||
['ceph', '--id', service,
|
||||
'config-key', 'get', str(key)])
|
||||
'config-key', 'get', str(key)]).decode('UTF-8')
|
||||
return output
|
||||
except CalledProcessError as e:
|
||||
log("Monitor config-key get failed with message: {}".format(
|
||||
|
@ -500,6 +501,8 @@ def get_erasure_profile(service, name):
|
|||
out = check_output(['ceph', '--id', service,
|
||||
'osd', 'erasure-code-profile', 'get',
|
||||
name, '--format=json'])
|
||||
if six.PY3:
|
||||
out = out.decode('UTF-8')
|
||||
return json.loads(out)
|
||||
except (CalledProcessError, OSError, ValueError):
|
||||
return None
|
||||
|
@ -514,7 +517,8 @@ def pool_set(service, pool_name, key, value):
|
|||
:param value:
|
||||
:return: None. Can raise CalledProcessError
|
||||
"""
|
||||
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, value]
|
||||
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key,
|
||||
str(value).lower()]
|
||||
try:
|
||||
check_call(cmd)
|
||||
except CalledProcessError:
|
||||
|
@ -618,16 +622,24 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure'
|
|||
:param durability_estimator: int
|
||||
:return: None. Can raise CalledProcessError
|
||||
"""
|
||||
version = ceph_version()
|
||||
|
||||
# Ensure this failure_domain is allowed by Ceph
|
||||
validator(failure_domain, six.string_types,
|
||||
['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row'])
|
||||
|
||||
cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name,
|
||||
'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks),
|
||||
'ruleset_failure_domain=' + failure_domain]
|
||||
'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks)
|
||||
]
|
||||
if locality is not None and durability_estimator is not None:
|
||||
raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.")
|
||||
|
||||
# failure_domain changed in luminous
|
||||
if version and version >= '12.0.0':
|
||||
cmd.append('crush-failure-domain=' + failure_domain)
|
||||
else:
|
||||
cmd.append('ruleset-failure-domain=' + failure_domain)
|
||||
|
||||
# Add plugin specific information
|
||||
if locality is not None:
|
||||
# For local erasure codes
|
||||
|
@ -686,7 +698,10 @@ def get_cache_mode(service, pool_name):
|
|||
"""
|
||||
validator(value=service, valid_type=six.string_types)
|
||||
validator(value=pool_name, valid_type=six.string_types)
|
||||
out = check_output(['ceph', '--id', service, 'osd', 'dump', '--format=json'])
|
||||
out = check_output(['ceph', '--id', service,
|
||||
'osd', 'dump', '--format=json'])
|
||||
if six.PY3:
|
||||
out = out.decode('UTF-8')
|
||||
try:
|
||||
osd_json = json.loads(out)
|
||||
for pool in osd_json['pools']:
|
||||
|
@ -700,8 +715,9 @@ def get_cache_mode(service, pool_name):
|
|||
def pool_exists(service, name):
|
||||
"""Check to see if a RADOS pool already exists."""
|
||||
try:
|
||||
out = check_output(['rados', '--id', service,
|
||||
'lspools']).decode('UTF-8')
|
||||
out = check_output(['rados', '--id', service, 'lspools'])
|
||||
if six.PY3:
|
||||
out = out.decode('UTF-8')
|
||||
except CalledProcessError:
|
||||
return False
|
||||
|
||||
|
@ -714,9 +730,12 @@ def get_osds(service):
|
|||
"""
|
||||
version = ceph_version()
|
||||
if version and version >= '0.56':
|
||||
return json.loads(check_output(['ceph', '--id', service,
|
||||
'osd', 'ls',
|
||||
'--format=json']).decode('UTF-8'))
|
||||
out = check_output(['ceph', '--id', service,
|
||||
'osd', 'ls',
|
||||
'--format=json'])
|
||||
if six.PY3:
|
||||
out = out.decode('UTF-8')
|
||||
return json.loads(out)
|
||||
|
||||
return None
|
||||
|
||||
|
@ -734,7 +753,9 @@ def rbd_exists(service, pool, rbd_img):
|
|||
"""Check to see if a RADOS block device exists."""
|
||||
try:
|
||||
out = check_output(['rbd', 'list', '--id',
|
||||
service, '--pool', pool]).decode('UTF-8')
|
||||
service, '--pool', pool])
|
||||
if six.PY3:
|
||||
out = out.decode('UTF-8')
|
||||
except CalledProcessError:
|
||||
return False
|
||||
|
||||
|
@ -859,7 +880,9 @@ def configure(service, key, auth, use_syslog):
|
|||
def image_mapped(name):
|
||||
"""Determine whether a RADOS block device is mapped locally."""
|
||||
try:
|
||||
out = check_output(['rbd', 'showmapped']).decode('UTF-8')
|
||||
out = check_output(['rbd', 'showmapped'])
|
||||
if six.PY3:
|
||||
out = out.decode('UTF-8')
|
||||
except CalledProcessError:
|
||||
return False
|
||||
|
||||
|
@ -1018,7 +1041,9 @@ def ceph_version():
|
|||
"""Retrieve the local version of ceph."""
|
||||
if os.path.exists('/usr/bin/ceph'):
|
||||
cmd = ['ceph', '-v']
|
||||
output = check_output(cmd).decode('US-ASCII')
|
||||
output = check_output(cmd)
|
||||
if six.PY3:
|
||||
output = output.decode('UTF-8')
|
||||
output = output.split()
|
||||
if len(output) > 3:
|
||||
return output[2]
|
||||
|
@ -1048,14 +1073,24 @@ class CephBrokerRq(object):
|
|||
self.ops = []
|
||||
|
||||
def add_op_request_access_to_group(self, name, namespace=None,
|
||||
permission=None, key_name=None):
|
||||
permission=None, key_name=None,
|
||||
object_prefix_permissions=None):
|
||||
"""
|
||||
Adds the requested permissions to the current service's Ceph key,
|
||||
allowing the key to access only the specified pools
|
||||
allowing the key to access only the specified pools or
|
||||
object prefixes. object_prefix_permissions should be a dictionary
|
||||
keyed on the permission with the corresponding value being a list
|
||||
of prefixes to apply that permission to.
|
||||
{
|
||||
'rwx': ['prefix1', 'prefix2'],
|
||||
'class-read': ['prefix3']}
|
||||
"""
|
||||
self.ops.append({'op': 'add-permissions-to-key', 'group': name,
|
||||
'namespace': namespace, 'name': key_name or service_name(),
|
||||
'group-permission': permission})
|
||||
self.ops.append({
|
||||
'op': 'add-permissions-to-key', 'group': name,
|
||||
'namespace': namespace,
|
||||
'name': key_name or service_name(),
|
||||
'group-permission': permission,
|
||||
'object-prefix-permissions': object_prefix_permissions})
|
||||
|
||||
def add_op_create_pool(self, name, replica_count=3, pg_num=None,
|
||||
weight=None, group=None, namespace=None):
|
||||
|
@ -1091,7 +1126,10 @@ class CephBrokerRq(object):
|
|||
def _ops_equal(self, other):
|
||||
if len(self.ops) == len(other.ops):
|
||||
for req_no in range(0, len(self.ops)):
|
||||
for key in ['replicas', 'name', 'op', 'pg_num', 'weight']:
|
||||
for key in [
|
||||
'replicas', 'name', 'op', 'pg_num', 'weight',
|
||||
'group', 'group-namespace', 'group-permission',
|
||||
'object-prefix-permissions']:
|
||||
if self.ops[req_no].get(key) != other.ops[req_no].get(key):
|
||||
return False
|
||||
else:
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import functools
|
||||
from subprocess import (
|
||||
CalledProcessError,
|
||||
check_call,
|
||||
|
@ -74,10 +75,10 @@ def list_lvm_volume_group(block_device):
|
|||
'''
|
||||
vg = None
|
||||
pvd = check_output(['pvdisplay', block_device]).splitlines()
|
||||
for l in pvd:
|
||||
l = l.decode('UTF-8')
|
||||
if l.strip().startswith('VG Name'):
|
||||
vg = ' '.join(l.strip().split()[2:])
|
||||
for lvm in pvd:
|
||||
lvm = lvm.decode('UTF-8')
|
||||
if lvm.strip().startswith('VG Name'):
|
||||
vg = ' '.join(lvm.strip().split()[2:])
|
||||
return vg
|
||||
|
||||
|
||||
|
@ -101,3 +102,81 @@ def create_lvm_volume_group(volume_group, block_device):
|
|||
:block_device: str: Full path of PV-initialized block device.
|
||||
'''
|
||||
check_call(['vgcreate', volume_group, block_device])
|
||||
|
||||
|
||||
def list_logical_volumes(select_criteria=None, path_mode=False):
|
||||
'''
|
||||
List logical volumes
|
||||
|
||||
:param select_criteria: str: Limit list to those volumes matching this
|
||||
criteria (see 'lvs -S help' for more details)
|
||||
:param path_mode: bool: return logical volume name in 'vg/lv' format, this
|
||||
format is required for some commands like lvextend
|
||||
:returns: [str]: List of logical volumes
|
||||
'''
|
||||
lv_diplay_attr = 'lv_name'
|
||||
if path_mode:
|
||||
# Parsing output logic relies on the column order
|
||||
lv_diplay_attr = 'vg_name,' + lv_diplay_attr
|
||||
cmd = ['lvs', '--options', lv_diplay_attr, '--noheadings']
|
||||
if select_criteria:
|
||||
cmd.extend(['--select', select_criteria])
|
||||
lvs = []
|
||||
for lv in check_output(cmd).decode('UTF-8').splitlines():
|
||||
if not lv:
|
||||
continue
|
||||
if path_mode:
|
||||
lvs.append('/'.join(lv.strip().split()))
|
||||
else:
|
||||
lvs.append(lv.strip())
|
||||
return lvs
|
||||
|
||||
|
||||
list_thin_logical_volume_pools = functools.partial(
|
||||
list_logical_volumes,
|
||||
select_criteria='lv_attr =~ ^t')
|
||||
|
||||
list_thin_logical_volumes = functools.partial(
|
||||
list_logical_volumes,
|
||||
select_criteria='lv_attr =~ ^V')
|
||||
|
||||
|
||||
def extend_logical_volume_by_device(lv_name, block_device):
|
||||
'''
|
||||
Extends the size of logical volume lv_name by the amount of free space on
|
||||
physical volume block_device.
|
||||
|
||||
:param lv_name: str: name of logical volume to be extended (vg/lv format)
|
||||
:param block_device: str: name of block_device to be allocated to lv_name
|
||||
'''
|
||||
cmd = ['lvextend', lv_name, block_device]
|
||||
check_call(cmd)
|
||||
|
||||
|
||||
def create_logical_volume(lv_name, volume_group, size=None):
|
||||
'''
|
||||
Create a new logical volume in an existing volume group
|
||||
|
||||
:param lv_name: str: name of logical volume to be created.
|
||||
:param volume_group: str: Name of volume group to use for the new volume.
|
||||
:param size: str: Size of logical volume to create (100% if not supplied)
|
||||
:raises subprocess.CalledProcessError: in the event that the lvcreate fails.
|
||||
'''
|
||||
if size:
|
||||
check_call([
|
||||
'lvcreate',
|
||||
'--yes',
|
||||
'-L',
|
||||
'{}'.format(size),
|
||||
'-n', lv_name, volume_group
|
||||
])
|
||||
# create the lv with all the space available, this is needed because the
|
||||
# system call is different for LVM
|
||||
else:
|
||||
check_call([
|
||||
'lvcreate',
|
||||
'--yes',
|
||||
'-l',
|
||||
'100%FREE',
|
||||
'-n', lv_name, volume_group
|
||||
])
|
||||
|
|
|
@ -64,6 +64,6 @@ def is_device_mounted(device):
|
|||
'''
|
||||
try:
|
||||
out = check_output(['lsblk', '-P', device]).decode('UTF-8')
|
||||
except:
|
||||
except Exception:
|
||||
return False
|
||||
return bool(re.search(r'MOUNTPOINT=".+"', out))
|
||||
|
|
|
@ -22,10 +22,12 @@ from __future__ import print_function
|
|||
import copy
|
||||
from distutils.version import LooseVersion
|
||||
from functools import wraps
|
||||
from collections import namedtuple
|
||||
import glob
|
||||
import os
|
||||
import json
|
||||
import yaml
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import errno
|
||||
|
@ -38,6 +40,7 @@ if not six.PY3:
|
|||
else:
|
||||
from collections import UserDict
|
||||
|
||||
|
||||
CRITICAL = "CRITICAL"
|
||||
ERROR = "ERROR"
|
||||
WARNING = "WARNING"
|
||||
|
@ -65,7 +68,7 @@ def cached(func):
|
|||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
global cache
|
||||
key = str((func, args, kwargs))
|
||||
key = json.dumps((func, args, kwargs), sort_keys=True, default=str)
|
||||
try:
|
||||
return cache[key]
|
||||
except KeyError:
|
||||
|
@ -343,6 +346,7 @@ class Config(dict):
|
|||
|
||||
"""
|
||||
with open(self.path, 'w') as f:
|
||||
os.fchmod(f.fileno(), 0o600)
|
||||
json.dump(self, f)
|
||||
|
||||
def _implicit_save(self):
|
||||
|
@ -644,18 +648,31 @@ def is_relation_made(relation, keys='private-address'):
|
|||
return False
|
||||
|
||||
|
||||
def _port_op(op_name, port, protocol="TCP"):
|
||||
"""Open or close a service network port"""
|
||||
_args = [op_name]
|
||||
icmp = protocol.upper() == "ICMP"
|
||||
if icmp:
|
||||
_args.append(protocol)
|
||||
else:
|
||||
_args.append('{}/{}'.format(port, protocol))
|
||||
try:
|
||||
subprocess.check_call(_args)
|
||||
except subprocess.CalledProcessError:
|
||||
# Older Juju pre 2.3 doesn't support ICMP
|
||||
# so treat it as a no-op if it fails.
|
||||
if not icmp:
|
||||
raise
|
||||
|
||||
|
||||
def open_port(port, protocol="TCP"):
|
||||
"""Open a service network port"""
|
||||
_args = ['open-port']
|
||||
_args.append('{}/{}'.format(port, protocol))
|
||||
subprocess.check_call(_args)
|
||||
_port_op('open-port', port, protocol)
|
||||
|
||||
|
||||
def close_port(port, protocol="TCP"):
|
||||
"""Close a service network port"""
|
||||
_args = ['close-port']
|
||||
_args.append('{}/{}'.format(port, protocol))
|
||||
subprocess.check_call(_args)
|
||||
_port_op('close-port', port, protocol)
|
||||
|
||||
|
||||
def open_ports(start, end, protocol="TCP"):
|
||||
|
@ -672,6 +689,17 @@ def close_ports(start, end, protocol="TCP"):
|
|||
subprocess.check_call(_args)
|
||||
|
||||
|
||||
def opened_ports():
|
||||
"""Get the opened ports
|
||||
|
||||
*Note that this will only show ports opened in a previous hook*
|
||||
|
||||
:returns: Opened ports as a list of strings: ``['8080/tcp', '8081-8083/tcp']``
|
||||
"""
|
||||
_args = ['opened-ports', '--format=json']
|
||||
return json.loads(subprocess.check_output(_args).decode('UTF-8'))
|
||||
|
||||
|
||||
@cached
|
||||
def unit_get(attribute):
|
||||
"""Get the unit ID for the remote unit"""
|
||||
|
@ -793,6 +821,10 @@ class Hooks(object):
|
|||
return wrapper
|
||||
|
||||
|
||||
class NoNetworkBinding(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def charm_dir():
|
||||
"""Return the root directory of the current charm"""
|
||||
d = os.environ.get('JUJU_CHARM_DIR')
|
||||
|
@ -1012,7 +1044,6 @@ def juju_version():
|
|||
universal_newlines=True).strip()
|
||||
|
||||
|
||||
@cached
|
||||
def has_juju_version(minimum_version):
|
||||
"""Return True if the Juju version is at least the provided version"""
|
||||
return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
|
||||
|
@ -1072,6 +1103,8 @@ def _run_atexit():
|
|||
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
|
||||
def network_get_primary_address(binding):
|
||||
'''
|
||||
Deprecated since Juju 2.3; use network_get()
|
||||
|
||||
Retrieve the primary network address for a named binding
|
||||
|
||||
:param binding: string. The name of a relation of extra-binding
|
||||
|
@ -1079,7 +1112,41 @@ def network_get_primary_address(binding):
|
|||
:raise: NotImplementedError if run on Juju < 2.0
|
||||
'''
|
||||
cmd = ['network-get', '--primary-address', binding]
|
||||
return subprocess.check_output(cmd).decode('UTF-8').strip()
|
||||
try:
|
||||
response = subprocess.check_output(
|
||||
cmd,
|
||||
stderr=subprocess.STDOUT).decode('UTF-8').strip()
|
||||
except CalledProcessError as e:
|
||||
if 'no network config found for binding' in e.output.decode('UTF-8'):
|
||||
raise NoNetworkBinding("No network binding for {}"
|
||||
.format(binding))
|
||||
else:
|
||||
raise
|
||||
return response
|
||||
|
||||
|
||||
def network_get(endpoint, relation_id=None):
|
||||
"""
|
||||
Retrieve the network details for a relation endpoint
|
||||
|
||||
:param endpoint: string. The name of a relation endpoint
|
||||
:param relation_id: int. The ID of the relation for the current context.
|
||||
:return: dict. The loaded YAML output of the network-get query.
|
||||
:raise: NotImplementedError if request not supported by the Juju version.
|
||||
"""
|
||||
if not has_juju_version('2.2'):
|
||||
raise NotImplementedError(juju_version()) # earlier versions require --primary-address
|
||||
if relation_id and not has_juju_version('2.3'):
|
||||
raise NotImplementedError # 2.3 added the -r option
|
||||
|
||||
cmd = ['network-get', endpoint, '--format', 'yaml']
|
||||
if relation_id:
|
||||
cmd.append('-r')
|
||||
cmd.append(relation_id)
|
||||
response = subprocess.check_output(
|
||||
cmd,
|
||||
stderr=subprocess.STDOUT).decode('UTF-8').strip()
|
||||
return yaml.safe_load(response)
|
||||
|
||||
|
||||
def add_metric(*args, **kwargs):
|
||||
|
@ -1111,3 +1178,93 @@ def meter_info():
|
|||
"""Get the meter status information, if running in the meter-status-changed
|
||||
hook."""
|
||||
return os.environ.get('JUJU_METER_INFO')
|
||||
|
||||
|
||||
def iter_units_for_relation_name(relation_name):
|
||||
"""Iterate through all units in a relation
|
||||
|
||||
Generator that iterates through all the units in a relation and yields
|
||||
a named tuple with rid and unit field names.
|
||||
|
||||
Usage:
|
||||
data = [(u.rid, u.unit)
|
||||
for u in iter_units_for_relation_name(relation_name)]
|
||||
|
||||
:param relation_name: string relation name
|
||||
:yield: Named Tuple with rid and unit field names
|
||||
"""
|
||||
RelatedUnit = namedtuple('RelatedUnit', 'rid, unit')
|
||||
for rid in relation_ids(relation_name):
|
||||
for unit in related_units(rid):
|
||||
yield RelatedUnit(rid, unit)
|
||||
|
||||
|
||||
def ingress_address(rid=None, unit=None):
|
||||
"""
|
||||
Retrieve the ingress-address from a relation when available.
|
||||
Otherwise, return the private-address.
|
||||
|
||||
When used on the consuming side of the relation (unit is a remote
|
||||
unit), the ingress-address is the IP address that this unit needs
|
||||
to use to reach the provided service on the remote unit.
|
||||
|
||||
When used on the providing side of the relation (unit == local_unit()),
|
||||
the ingress-address is the IP address that is advertised to remote
|
||||
units on this relation. Remote units need to use this address to
|
||||
reach the local provided service on this unit.
|
||||
|
||||
Note that charms may document some other method to use in
|
||||
preference to the ingress_address(), such as an address provided
|
||||
on a different relation attribute or a service discovery mechanism.
|
||||
This allows charms to redirect inbound connections to their peers
|
||||
or different applications such as load balancers.
|
||||
|
||||
Usage:
|
||||
addresses = [ingress_address(rid=u.rid, unit=u.unit)
|
||||
for u in iter_units_for_relation_name(relation_name)]
|
||||
|
||||
:param rid: string relation id
|
||||
:param unit: string unit name
|
||||
:side effect: calls relation_get
|
||||
:return: string IP address
|
||||
"""
|
||||
settings = relation_get(rid=rid, unit=unit)
|
||||
return (settings.get('ingress-address') or
|
||||
settings.get('private-address'))
|
||||
|
||||
|
||||
def egress_subnets(rid=None, unit=None):
|
||||
"""
|
||||
Retrieve the egress-subnets from a relation.
|
||||
|
||||
This function is to be used on the providing side of the
|
||||
relation, and provides the ranges of addresses that client
|
||||
connections may come from. The result is uninteresting on
|
||||
the consuming side of a relation (unit == local_unit()).
|
||||
|
||||
Returns a stable list of subnets in CIDR format.
|
||||
eg. ['192.168.1.0/24', '2001::F00F/128']
|
||||
|
||||
If egress-subnets is not available, falls back to using the published
|
||||
ingress-address, or finally private-address.
|
||||
|
||||
:param rid: string relation id
|
||||
:param unit: string unit name
|
||||
:side effect: calls relation_get
|
||||
:return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128']
|
||||
"""
|
||||
def _to_range(addr):
|
||||
if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None:
|
||||
addr += '/32'
|
||||
elif ':' in addr and '/' not in addr: # IPv6
|
||||
addr += '/128'
|
||||
return addr
|
||||
|
||||
settings = relation_get(rid=rid, unit=unit)
|
||||
if 'egress-subnets' in settings:
|
||||
return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()]
|
||||
if 'ingress-address' in settings:
|
||||
return [_to_range(settings['ingress-address'])]
|
||||
if 'private-address' in settings:
|
||||
return [_to_range(settings['private-address'])]
|
||||
return [] # Should never happen
|
||||
|
|
|
@ -34,7 +34,7 @@ import six
|
|||
|
||||
from contextlib import contextmanager
|
||||
from collections import OrderedDict
|
||||
from .hookenv import log, DEBUG
|
||||
from .hookenv import log, DEBUG, local_unit
|
||||
from .fstab import Fstab
|
||||
from charmhelpers.osplatform import get_platform
|
||||
|
||||
|
@ -441,6 +441,49 @@ def add_user_to_group(username, group):
|
|||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
def chage(username, lastday=None, expiredate=None, inactive=None,
|
||||
mindays=None, maxdays=None, root=None, warndays=None):
|
||||
"""Change user password expiry information
|
||||
|
||||
:param str username: User to update
|
||||
:param str lastday: Set when password was changed in YYYY-MM-DD format
|
||||
:param str expiredate: Set when user's account will no longer be
|
||||
accessible in YYYY-MM-DD format.
|
||||
-1 will remove an account expiration date.
|
||||
:param str inactive: Set the number of days of inactivity after a password
|
||||
has expired before the account is locked.
|
||||
-1 will remove an account's inactivity.
|
||||
:param str mindays: Set the minimum number of days between password
|
||||
changes to MIN_DAYS.
|
||||
0 indicates the password can be changed anytime.
|
||||
:param str maxdays: Set the maximum number of days during which a
|
||||
password is valid.
|
||||
-1 as MAX_DAYS will remove checking maxdays
|
||||
:param str root: Apply changes in the CHROOT_DIR directory
|
||||
:param str warndays: Set the number of days of warning before a password
|
||||
change is required
|
||||
:raises subprocess.CalledProcessError: if call to chage fails
|
||||
"""
|
||||
cmd = ['chage']
|
||||
if root:
|
||||
cmd.extend(['--root', root])
|
||||
if lastday:
|
||||
cmd.extend(['--lastday', lastday])
|
||||
if expiredate:
|
||||
cmd.extend(['--expiredate', expiredate])
|
||||
if inactive:
|
||||
cmd.extend(['--inactive', inactive])
|
||||
if mindays:
|
||||
cmd.extend(['--mindays', mindays])
|
||||
if maxdays:
|
||||
cmd.extend(['--maxdays', maxdays])
|
||||
if warndays:
|
||||
cmd.extend(['--warndays', warndays])
|
||||
cmd.append(username)
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
remove_password_expiry = functools.partial(chage, expiredate='-1', inactive='-1', mindays='0', maxdays='-1')
|
||||
|
||||
def rsync(from_path, to_path, flags='-r', options=None, timeout=None):
|
||||
"""Replicate the contents of a path"""
|
||||
options = options or ['--delete', '--executability']
|
||||
|
@ -506,6 +549,8 @@ def write_file(path, content, owner='root', group='root', perms=0o444):
|
|||
with open(path, 'wb') as target:
|
||||
os.fchown(target.fileno(), uid, gid)
|
||||
os.fchmod(target.fileno(), perms)
|
||||
if six.PY3 and isinstance(content, six.string_types):
|
||||
content = content.encode('UTF-8')
|
||||
target.write(content)
|
||||
return
|
||||
# the contents were the same, but we might still need to change the
|
||||
|
@ -946,3 +991,38 @@ def updatedb(updatedb_text, new_path):
|
|||
lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths))
|
||||
output = "\n".join(lines)
|
||||
return output
|
||||
|
||||
|
||||
def modulo_distribution(modulo=3, wait=30, non_zero_wait=False):
|
||||
""" Modulo distribution
|
||||
|
||||
This helper uses the unit number, a modulo value and a constant wait time
|
||||
to produce a calculated wait time distribution. This is useful in large
|
||||
scale deployments to distribute load during an expensive operation such as
|
||||
service restarts.
|
||||
|
||||
If you have 1000 nodes that need to restart 100 at a time 1 minute at a
|
||||
time:
|
||||
|
||||
time.wait(modulo_distribution(modulo=100, wait=60))
|
||||
restart()
|
||||
|
||||
If you need restarts to happen serially set modulo to the exact number of
|
||||
nodes and set a high constant wait time:
|
||||
|
||||
time.wait(modulo_distribution(modulo=10, wait=120))
|
||||
restart()
|
||||
|
||||
@param modulo: int The modulo number creates the group distribution
|
||||
@param wait: int The constant time wait value
|
||||
@param non_zero_wait: boolean Override unit % modulo == 0,
|
||||
return modulo * wait. Used to avoid collisions with
|
||||
leader nodes which are often given priority.
|
||||
@return: int Calculated time to wait for unit operation
|
||||
"""
|
||||
unit_number = int(local_unit().split('/')[1])
|
||||
calculated_wait_time = (unit_number % modulo) * wait
|
||||
if non_zero_wait and calculated_wait_time == 0:
|
||||
return modulo * wait
|
||||
else:
|
||||
return calculated_wait_time
|
||||
|
|
|
@ -20,6 +20,7 @@ UBUNTU_RELEASES = (
|
|||
'yakkety',
|
||||
'zesty',
|
||||
'artful',
|
||||
'bionic',
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -313,17 +313,26 @@ class PortManagerCallback(ManagerCallback):
|
|||
with open(port_file) as fp:
|
||||
old_ports = fp.read().split(',')
|
||||
for old_port in old_ports:
|
||||
if bool(old_port):
|
||||
old_port = int(old_port)
|
||||
if old_port not in new_ports:
|
||||
hookenv.close_port(old_port)
|
||||
if bool(old_port) and not self.ports_contains(old_port, new_ports):
|
||||
hookenv.close_port(old_port)
|
||||
with open(port_file, 'w') as fp:
|
||||
fp.write(','.join(str(port) for port in new_ports))
|
||||
for port in new_ports:
|
||||
# A port is either a number or 'ICMP'
|
||||
protocol = 'TCP'
|
||||
if str(port).upper() == 'ICMP':
|
||||
protocol = 'ICMP'
|
||||
if event_name == 'start':
|
||||
hookenv.open_port(port)
|
||||
hookenv.open_port(port, protocol)
|
||||
elif event_name == 'stop':
|
||||
hookenv.close_port(port)
|
||||
hookenv.close_port(port, protocol)
|
||||
|
||||
def ports_contains(self, port, ports):
|
||||
if not bool(port):
|
||||
return False
|
||||
if str(port).upper() != 'ICMP':
|
||||
port = int(port)
|
||||
return port in ports
|
||||
|
||||
|
||||
def service_stop(service_name):
|
||||
|
|
|
@ -61,13 +61,19 @@ def bytes_from_string(value):
|
|||
if isinstance(value, six.string_types):
|
||||
value = six.text_type(value)
|
||||
else:
|
||||
msg = "Unable to interpret non-string value '%s' as boolean" % (value)
|
||||
msg = "Unable to interpret non-string value '%s' as bytes" % (value)
|
||||
raise ValueError(msg)
|
||||
matches = re.match("([0-9]+)([a-zA-Z]+)", value)
|
||||
if not matches:
|
||||
msg = "Unable to interpret string value '%s' as bytes" % (value)
|
||||
raise ValueError(msg)
|
||||
return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
|
||||
if matches:
|
||||
size = int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
|
||||
else:
|
||||
# Assume that value passed in is bytes
|
||||
try:
|
||||
size = int(value)
|
||||
except ValueError:
|
||||
msg = "Unable to interpret string value '%s' as bytes" % (value)
|
||||
raise ValueError(msg)
|
||||
return size
|
||||
|
||||
|
||||
class BasicStringComparator(object):
|
||||
|
|
|
@ -20,7 +20,8 @@ from charmhelpers.core import hookenv
|
|||
|
||||
|
||||
def render(source, target, context, owner='root', group='root',
|
||||
perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None):
|
||||
perms=0o444, templates_dir=None, encoding='UTF-8',
|
||||
template_loader=None, config_template=None):
|
||||
"""
|
||||
Render a template.
|
||||
|
||||
|
@ -32,6 +33,9 @@ def render(source, target, context, owner='root', group='root',
|
|||
The context should be a dict containing the values to be replaced in the
|
||||
template.
|
||||
|
||||
config_template may be provided to render from a provided template instead
|
||||
of loading from a file.
|
||||
|
||||
The `owner`, `group`, and `perms` options will be passed to `write_file`.
|
||||
|
||||
If omitted, `templates_dir` defaults to the `templates` folder in the charm.
|
||||
|
@ -65,14 +69,19 @@ def render(source, target, context, owner='root', group='root',
|
|||
if templates_dir is None:
|
||||
templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
|
||||
template_env = Environment(loader=FileSystemLoader(templates_dir))
|
||||
try:
|
||||
source = source
|
||||
template = template_env.get_template(source)
|
||||
except exceptions.TemplateNotFound as e:
|
||||
hookenv.log('Could not load template %s from %s.' %
|
||||
(source, templates_dir),
|
||||
level=hookenv.ERROR)
|
||||
raise e
|
||||
|
||||
# load from a string if provided explicitly
|
||||
if config_template is not None:
|
||||
template = template_env.from_string(config_template)
|
||||
else:
|
||||
try:
|
||||
source = source
|
||||
template = template_env.get_template(source)
|
||||
except exceptions.TemplateNotFound as e:
|
||||
hookenv.log('Could not load template %s from %s.' %
|
||||
(source, templates_dir),
|
||||
level=hookenv.ERROR)
|
||||
raise e
|
||||
content = template.render(context)
|
||||
if target is not None:
|
||||
target_dir = os.path.dirname(target)
|
||||
|
|
|
@ -175,6 +175,8 @@ class Storage(object):
|
|||
else:
|
||||
self.db_path = os.path.join(
|
||||
os.environ.get('CHARM_DIR', ''), '.unit-state.db')
|
||||
with open(self.db_path, 'a') as f:
|
||||
os.fchmod(f.fileno(), 0o600)
|
||||
self.conn = sqlite3.connect('%s' % self.db_path)
|
||||
self.cursor = self.conn.cursor()
|
||||
self.revision = None
|
||||
|
@ -358,7 +360,7 @@ class Storage(object):
|
|||
try:
|
||||
yield self.revision
|
||||
self.revision = None
|
||||
except:
|
||||
except Exception:
|
||||
self.flush(False)
|
||||
self.revision = None
|
||||
raise
|
||||
|
|
|
@ -41,6 +41,10 @@ class CouldNotAcquireLockException(Exception):
|
|||
pass
|
||||
|
||||
|
||||
class InvalidSnapChannel(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def _snap_exec(commands):
|
||||
"""
|
||||
Execute snap commands.
|
||||
|
@ -132,3 +136,15 @@ def snap_refresh(packages, *flags):
|
|||
|
||||
log(message, level='INFO')
|
||||
return _snap_exec(['refresh'] + flags + packages)
|
||||
|
||||
|
||||
def valid_snap_channel(channel):
|
||||
""" Validate snap channel exists
|
||||
|
||||
:raises InvalidSnapChannel: When channel does not exist
|
||||
:return: Boolean
|
||||
"""
|
||||
if channel.lower() in SNAP_CHANNELS:
|
||||
return True
|
||||
else:
|
||||
raise InvalidSnapChannel("Invalid Snap Channel: {}".format(channel))
|
||||
|
|
|
@ -572,7 +572,7 @@ def get_upstream_version(package):
|
|||
cache = apt_cache()
|
||||
try:
|
||||
pkg = cache[package]
|
||||
except:
|
||||
except Exception:
|
||||
# the package is unknown to the current apt cache.
|
||||
return None
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#!/usr/bin/env python2.7
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright 2014 Canonical Ltd. released under AGPL
|
||||
#
|
||||
|
@ -113,6 +113,15 @@ class MirrorsConfigServiceContext(OSContextGenerator):
|
|||
hypervisor_mapping=config['hypervisor_mapping'])
|
||||
|
||||
|
||||
def ensure_perms():
|
||||
"""Ensure gss file permissions."""
|
||||
if os.path.isfile(ID_CONF_FILE_NAME):
|
||||
os.chmod(ID_CONF_FILE_NAME, 0o640)
|
||||
|
||||
if os.path.isfile(MIRRORS_CONF_FILE_NAME,):
|
||||
os.chmod(MIRRORS_CONF_FILE_NAME, 0o640)
|
||||
|
||||
|
||||
def get_release():
|
||||
return get_os_codename_package('glance-common', fatal=False) or 'icehouse'
|
||||
|
||||
|
@ -198,6 +207,7 @@ def identity_service_joined(relation_id=None):
|
|||
def identity_service_changed():
|
||||
configs = get_configs()
|
||||
configs.write(ID_CONF_FILE_NAME)
|
||||
ensure_perms()
|
||||
|
||||
|
||||
@hooks.hook('install.real')
|
||||
|
@ -230,6 +240,7 @@ def config_changed():
|
|||
hookenv.log('begin config-changed hook.')
|
||||
configs = get_configs()
|
||||
configs.write(MIRRORS_CONF_FILE_NAME)
|
||||
ensure_perms()
|
||||
|
||||
update_nrpe_config()
|
||||
|
||||
|
@ -259,6 +270,7 @@ def upgrade_charm():
|
|||
update_nrpe_config()
|
||||
configs = get_configs()
|
||||
configs.write_all()
|
||||
ensure_perms()
|
||||
|
||||
|
||||
@hooks.hook('amqp-relation-joined')
|
||||
|
|
|
@ -10,7 +10,7 @@ tags:
|
|||
series:
|
||||
- trusty
|
||||
- xenial
|
||||
- yakkety
|
||||
- bionic
|
||||
subordinate: false
|
||||
provides:
|
||||
simplestreams-image-service:
|
||||
|
|
|
@ -10,4 +10,3 @@ Jinja2>=2.6 # BSD License (3 clause)
|
|||
six>=1.9.0
|
||||
dnspython>=1.12.0
|
||||
psutil>=1.1.1,<2.0.0
|
||||
python-neutronclient>=2.6.0
|
||||
|
|
|
@ -11,13 +11,17 @@ requests==2.6.0
|
|||
# Liberty client lower constraints
|
||||
amulet>=1.14.3,<2.0
|
||||
bundletester>=0.6.1,<1.0
|
||||
python-ceilometerclient>=1.5.0,<2.0
|
||||
python-cinderclient>=1.4.0,<2.0
|
||||
python-glanceclient>=1.1.0,<2.0
|
||||
python-heatclient>=0.8.0,<1.0
|
||||
python-novaclient>=2.30.1,<3.0
|
||||
python-openstackclient>=1.7.0,<2.0
|
||||
python-swiftclient>=2.6.0,<3.0
|
||||
python-ceilometerclient>=1.5.0
|
||||
python-cinderclient>=1.4.0
|
||||
python-glanceclient>=1.1.0
|
||||
python-heatclient>=0.8.0
|
||||
python-keystoneclient>=1.7.1
|
||||
python-neutronclient>=3.1.0
|
||||
python-novaclient>=2.30.1
|
||||
python-openstackclient>=1.7.0
|
||||
python-swiftclient>=2.6.0
|
||||
pika>=0.10.0,<1.0
|
||||
distro-info
|
||||
# END: Amulet OpenStack Charm Helper Requirements
|
||||
# NOTE: workaround for 14.04 pip/tox
|
||||
pytz
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
# Overview
|
||||
|
||||
This directory provides Amulet tests to verify basic deployment functionality
|
||||
from the perspective of this charm, its requirements and its features, as
|
||||
exercised in a subset of the full OpenStack deployment test bundle topology.
|
||||
|
||||
For full details on functional testing of OpenStack charms please refer to
|
||||
the [functional testing](http://docs.openstack.org/developer/charm-guide/testing.html#functional-testing)
|
||||
section of the OpenStack Charm Guide.
|
|
@ -0,0 +1,237 @@
|
|||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright 2016 Canonical Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Basic glance-simplestreams-sync functional tests.
|
||||
"""
|
||||
|
||||
import time
|
||||
import amulet
|
||||
|
||||
from charmhelpers.contrib.openstack.amulet.deployment import (
|
||||
OpenStackAmuletDeployment
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.openstack.amulet.utils import (
|
||||
OpenStackAmuletUtils,
|
||||
DEBUG,
|
||||
# ERROR
|
||||
)
|
||||
|
||||
# Use DEBUG to turn on debug logging
|
||||
u = OpenStackAmuletUtils(DEBUG)
|
||||
|
||||
|
||||
class GlanceBasicDeployment(OpenStackAmuletDeployment):
|
||||
"""Amulet tests on a basic file-backed glance deployment. Verify
|
||||
relations, service status, endpoint service catalog, create and
|
||||
delete new image."""
|
||||
|
||||
SERVICES = ('apache2', 'haproxy', 'glance-api', 'glance-registry')
|
||||
|
||||
def __init__(self, series=None, openstack=None, source=None,
|
||||
stable=False):
|
||||
"""Deploy the entire test environment."""
|
||||
super(GlanceBasicDeployment, self).__init__(series, openstack,
|
||||
source, stable)
|
||||
self._add_services()
|
||||
self._add_relations()
|
||||
self._configure_services()
|
||||
self._deploy()
|
||||
|
||||
u.log.info('Waiting on extended status checks...')
|
||||
|
||||
# NOTE(beisner): This charm lacks support for juju workload status.
|
||||
# That means that this charm will not affirm a ready state for those
|
||||
# waiting for a ready state through tools like juju-wait, leading to
|
||||
# anticipated race conditions and automation blocking conditions.
|
||||
exclude_services = ['glance-simplestreams-sync']
|
||||
|
||||
self._auto_wait_for_status(exclude_services=exclude_services)
|
||||
|
||||
self.d.sentry.wait()
|
||||
self._initialize_tests()
|
||||
|
||||
def _assert_services(self, should_run):
|
||||
u.get_unit_process_ids(
|
||||
{self.glance_sentry: self.SERVICES},
|
||||
expect_success=should_run)
|
||||
|
||||
def _add_services(self):
|
||||
"""Add services
|
||||
|
||||
Add the services that we're testing, where glance is local,
|
||||
and the rest of the service are from lp branches that are
|
||||
compatible with the local charm (e.g. stable or next).
|
||||
"""
|
||||
this_service = {'name': 'glance-simplestreams-sync'}
|
||||
other_services = [
|
||||
{'name': 'percona-cluster', 'constraints': {'mem': '3072M'}},
|
||||
{'name': 'glance'},
|
||||
{'name': 'rabbitmq-server'},
|
||||
{'name': 'keystone'},
|
||||
]
|
||||
super(GlanceBasicDeployment, self)._add_services(
|
||||
this_service,
|
||||
other_services,
|
||||
use_source=['glance-simplestreams-sync'],
|
||||
)
|
||||
|
||||
def _add_relations(self):
|
||||
"""Add relations for the services."""
|
||||
relations = {
|
||||
'glance:identity-service': 'keystone:identity-service',
|
||||
'glance:shared-db': 'percona-cluster:shared-db',
|
||||
'keystone:shared-db': 'percona-cluster:shared-db',
|
||||
'glance:amqp': 'rabbitmq-server:amqp',
|
||||
'glance-simplestreams-sync:identity-service':
|
||||
'keystone:identity-service',
|
||||
'glance-simplestreams-sync:amqp':
|
||||
'rabbitmq-server:amqp',
|
||||
}
|
||||
|
||||
super(GlanceBasicDeployment, self)._add_relations(relations)
|
||||
|
||||
def _configure_services(self):
|
||||
"""Configure all of the services."""
|
||||
gss_config = {
|
||||
# https://bugs.launchpad.net/bugs/1686437
|
||||
'source': 'ppa:simplestreams-dev/trunk',
|
||||
'use_swift': 'False',
|
||||
}
|
||||
glance_config = {}
|
||||
keystone_config = {
|
||||
'admin-password': 'openstack',
|
||||
'admin-token': 'ubuntutesting',
|
||||
}
|
||||
pxc_config = {
|
||||
'dataset-size': '25%',
|
||||
'max-connections': 1000,
|
||||
'root-password': 'ChangeMe123',
|
||||
'sst-password': 'ChangeMe123',
|
||||
}
|
||||
configs = {
|
||||
'glance-simplestreams-sync': gss_config,
|
||||
'glance': glance_config,
|
||||
'keystone': keystone_config,
|
||||
'percona-cluster': pxc_config,
|
||||
}
|
||||
super(GlanceBasicDeployment, self)._configure_services(configs)
|
||||
|
||||
def _initialize_tests(self):
|
||||
"""Perform final initialization before tests get run."""
|
||||
# Access the sentries for inspecting service units
|
||||
self.gss_sentry = self.d.sentry['glance-simplestreams-sync'][0]
|
||||
self.pxc_sentry = self.d.sentry['percona-cluster'][0]
|
||||
self.glance_sentry = self.d.sentry['glance'][0]
|
||||
self.keystone_sentry = self.d.sentry['keystone'][0]
|
||||
self.rabbitmq_sentry = self.d.sentry['rabbitmq-server'][0]
|
||||
u.log.debug('openstack release val: {}'.format(
|
||||
self._get_openstack_release()))
|
||||
u.log.debug('openstack release str: {}'.format(
|
||||
self._get_openstack_release_string()))
|
||||
|
||||
# Authenticate admin with keystone
|
||||
self.keystone_session, self.keystone = u.get_default_keystone_session(
|
||||
self.keystone_sentry,
|
||||
openstack_release=self._get_openstack_release())
|
||||
|
||||
# Authenticate admin with glance endpoint
|
||||
self.glance = u.authenticate_glance_admin(self.keystone)
|
||||
|
||||
def test_001_wait_for_image_sync(self):
|
||||
"""Wait for images to be synced. Expect at least one."""
|
||||
|
||||
max_image_wait = 600
|
||||
retry_sleep = 2
|
||||
images = []
|
||||
|
||||
time_start = time.time()
|
||||
while not images:
|
||||
images = [image.name for image in self.glance.images.list()]
|
||||
u.log.debug('Images: {}'.format(images))
|
||||
if images:
|
||||
break
|
||||
|
||||
time_now = time.time()
|
||||
if time_now - time_start >= max_image_wait:
|
||||
raise Exception('Images not synced within '
|
||||
'{}s'.format(time_now - time_start))
|
||||
else:
|
||||
u.log.debug('Waiting {}s'.format(retry_sleep))
|
||||
time.sleep(retry_sleep)
|
||||
retry_sleep = retry_sleep + 4 if retry_sleep < 30 else 30
|
||||
|
||||
def test_050_gss_permissions_regression_check_lp1611987(self):
|
||||
"""Assert the intended file permissions on gss config files
|
||||
https://bugs.launchpad.net/bugs/1611987"""
|
||||
|
||||
perm_check = [
|
||||
{
|
||||
'file_path': '/etc/glance-simplestreams-sync/identity.yaml',
|
||||
'expected_perms': '640',
|
||||
'unit_sentry': self.gss_sentry
|
||||
},
|
||||
{
|
||||
'file_path': '/etc/glance-simplestreams-sync/mirrors.yaml',
|
||||
'expected_perms': '640',
|
||||
'unit_sentry': self.gss_sentry
|
||||
},
|
||||
]
|
||||
|
||||
for _check in perm_check:
|
||||
cmd = 'stat -c %a {}'.format(_check['file_path'])
|
||||
output, _ = u.run_cmd_unit(_check['unit_sentry'], cmd)
|
||||
|
||||
assert output == _check['expected_perms'], \
|
||||
'{} perms not as expected'.format(_check['file_path'])
|
||||
|
||||
u.log.debug('Permissions on {}: {}'.format(
|
||||
_check['file_path'], output))
|
||||
|
||||
def test_102_service_catalog(self):
|
||||
"""Verify that the service catalog endpoint data is valid."""
|
||||
u.log.debug('Checking keystone service catalog...')
|
||||
endpoint_check = {
|
||||
'adminURL': u.valid_url,
|
||||
'id': u.not_null,
|
||||
'region': 'RegionOne',
|
||||
'publicURL': u.valid_url,
|
||||
'internalURL': u.valid_url
|
||||
}
|
||||
expected = {
|
||||
'product-streams': [endpoint_check],
|
||||
'image': [endpoint_check],
|
||||
'identity': [endpoint_check]
|
||||
}
|
||||
actual = self.keystone.service_catalog.get_endpoints()
|
||||
|
||||
ret = u.validate_svc_catalog_endpoint_data(
|
||||
expected,
|
||||
actual,
|
||||
openstack_release=self._get_openstack_release())
|
||||
if ret:
|
||||
amulet.raise_status(amulet.FAIL, msg=ret)
|
||||
|
||||
def test_115_memcache(self):
|
||||
u.validate_memcache(self.glance_sentry,
|
||||
'/etc/glance/glance-api.conf',
|
||||
self._get_openstack_release(),
|
||||
earliest_release=self.trusty_mitaka)
|
||||
u.validate_memcache(self.glance_sentry,
|
||||
'/etc/glance/glance-registry.conf',
|
||||
self._get_openstack_release(),
|
||||
earliest_release=self.trusty_mitaka)
|
|
@ -0,0 +1,97 @@
|
|||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Bootstrap charm-helpers, installing its dependencies if necessary using
|
||||
# only standard libraries.
|
||||
from __future__ import print_function
|
||||
from __future__ import absolute_import
|
||||
|
||||
import functools
|
||||
import inspect
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
try:
|
||||
import six # flake8: noqa
|
||||
except ImportError:
|
||||
if sys.version_info.major == 2:
|
||||
subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
|
||||
else:
|
||||
subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
|
||||
import six # flake8: noqa
|
||||
|
||||
try:
|
||||
import yaml # flake8: noqa
|
||||
except ImportError:
|
||||
if sys.version_info.major == 2:
|
||||
subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
|
||||
else:
|
||||
subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
|
||||
import yaml # flake8: noqa
|
||||
|
||||
|
||||
# Holds a list of mapping of mangled function names that have been deprecated
|
||||
# using the @deprecate decorator below. This is so that the warning is only
|
||||
# printed once for each usage of the function.
|
||||
__deprecated_functions = {}
|
||||
|
||||
|
||||
def deprecate(warning, date=None, log=None):
|
||||
"""Add a deprecation warning the first time the function is used.
|
||||
The date, which is a string in semi-ISO8660 format indicate the year-month
|
||||
that the function is officially going to be removed.
|
||||
|
||||
usage:
|
||||
|
||||
@deprecate('use core/fetch/add_source() instead', '2017-04')
|
||||
def contributed_add_source_thing(...):
|
||||
...
|
||||
|
||||
And it then prints to the log ONCE that the function is deprecated.
|
||||
The reason for passing the logging function (log) is so that hookenv.log
|
||||
can be used for a charm if needed.
|
||||
|
||||
:param warning: String to indicat where it has moved ot.
|
||||
:param date: optional sting, in YYYY-MM format to indicate when the
|
||||
function will definitely (probably) be removed.
|
||||
:param log: The log function to call to log. If not, logs to stdout
|
||||
"""
|
||||
def wrap(f):
|
||||
|
||||
@functools.wraps(f)
|
||||
def wrapped_f(*args, **kwargs):
|
||||
try:
|
||||
module = inspect.getmodule(f)
|
||||
file = inspect.getsourcefile(f)
|
||||
lines = inspect.getsourcelines(f)
|
||||
f_name = "{}-{}-{}..{}-{}".format(
|
||||
module.__name__, file, lines[0], lines[-1], f.__name__)
|
||||
except (IOError, TypeError):
|
||||
# assume it was local, so just use the name of the function
|
||||
f_name = f.__name__
|
||||
if f_name not in __deprecated_functions:
|
||||
__deprecated_functions[f_name] = True
|
||||
s = "DEPRECATION WARNING: Function {} is being removed".format(
|
||||
f.__name__)
|
||||
if date:
|
||||
s = "{} on/around {}".format(s, date)
|
||||
if warning:
|
||||
s = "{} : {}".format(s, warning)
|
||||
if log:
|
||||
log(s)
|
||||
else:
|
||||
print(s)
|
||||
return f(*args, **kwargs)
|
||||
return wrapped_f
|
||||
return wrap
|
|
@ -0,0 +1,13 @@
|
|||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
|
@ -0,0 +1,13 @@
|
|||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
|
@ -0,0 +1,97 @@
|
|||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import amulet
|
||||
import os
|
||||
import six
|
||||
|
||||
|
||||
class AmuletDeployment(object):
|
||||
"""Amulet deployment.
|
||||
|
||||
This class provides generic Amulet deployment and test runner
|
||||
methods.
|
||||
"""
|
||||
|
||||
def __init__(self, series=None):
|
||||
"""Initialize the deployment environment."""
|
||||
self.series = None
|
||||
|
||||
if series:
|
||||
self.series = series
|
||||
self.d = amulet.Deployment(series=self.series)
|
||||
else:
|
||||
self.d = amulet.Deployment()
|
||||
|
||||
def _add_services(self, this_service, other_services):
|
||||
"""Add services.
|
||||
|
||||
Add services to the deployment where this_service is the local charm
|
||||
that we're testing and other_services are the other services that
|
||||
are being used in the local amulet tests.
|
||||
"""
|
||||
if this_service['name'] != os.path.basename(os.getcwd()):
|
||||
s = this_service['name']
|
||||
msg = "The charm's root directory name needs to be {}".format(s)
|
||||
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||
|
||||
if 'units' not in this_service:
|
||||
this_service['units'] = 1
|
||||
|
||||
self.d.add(this_service['name'], units=this_service['units'],
|
||||
constraints=this_service.get('constraints'))
|
||||
|
||||
for svc in other_services:
|
||||
if 'location' in svc:
|
||||
branch_location = svc['location']
|
||||
elif self.series:
|
||||
branch_location = 'cs:{}/{}'.format(self.series, svc['name']),
|
||||
else:
|
||||
branch_location = None
|
||||
|
||||
if 'units' not in svc:
|
||||
svc['units'] = 1
|
||||
|
||||
self.d.add(svc['name'], charm=branch_location, units=svc['units'],
|
||||
constraints=svc.get('constraints'))
|
||||
|
||||
def _add_relations(self, relations):
|
||||
"""Add all of the relations for the services."""
|
||||
for k, v in six.iteritems(relations):
|
||||
self.d.relate(k, v)
|
||||
|
||||
def _configure_services(self, configs):
|
||||
"""Configure all of the services."""
|
||||
for service, config in six.iteritems(configs):
|
||||
self.d.configure(service, config)
|
||||
|
||||
def _deploy(self):
|
||||
"""Deploy environment and wait for all hooks to finish executing."""
|
||||
timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 900))
|
||||
try:
|
||||
self.d.setup(timeout=timeout)
|
||||
self.d.sentry.wait(timeout=timeout)
|
||||
except amulet.helpers.TimeoutError:
|
||||
amulet.raise_status(
|
||||
amulet.FAIL,
|
||||
msg="Deployment timed out ({}s)".format(timeout)
|
||||
)
|
||||
except Exception:
|
||||
raise
|
||||
|
||||
def run_tests(self):
|
||||
"""Run all of the methods that are prefixed with 'test_'."""
|
||||
for test in dir(self):
|
||||
if test.startswith('test_'):
|
||||
getattr(self, test)()
|
|
@ -0,0 +1,821 @@
|
|||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import io
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import socket
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
import uuid
|
||||
|
||||
import amulet
|
||||
import distro_info
|
||||
import six
|
||||
from six.moves import configparser
|
||||
if six.PY3:
|
||||
from urllib import parse as urlparse
|
||||
else:
|
||||
import urlparse
|
||||
|
||||
|
||||
class AmuletUtils(object):
|
||||
"""Amulet utilities.
|
||||
|
||||
This class provides common utility functions that are used by Amulet
|
||||
tests.
|
||||
"""
|
||||
|
||||
def __init__(self, log_level=logging.ERROR):
|
||||
self.log = self.get_logger(level=log_level)
|
||||
self.ubuntu_releases = self.get_ubuntu_releases()
|
||||
|
||||
def get_logger(self, name="amulet-logger", level=logging.DEBUG):
|
||||
"""Get a logger object that will log to stdout."""
|
||||
log = logging
|
||||
logger = log.getLogger(name)
|
||||
fmt = log.Formatter("%(asctime)s %(funcName)s "
|
||||
"%(levelname)s: %(message)s")
|
||||
|
||||
handler = log.StreamHandler(stream=sys.stdout)
|
||||
handler.setLevel(level)
|
||||
handler.setFormatter(fmt)
|
||||
|
||||
logger.addHandler(handler)
|
||||
logger.setLevel(level)
|
||||
|
||||
return logger
|
||||
|
||||
def valid_ip(self, ip):
|
||||
if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def valid_url(self, url):
|
||||
p = re.compile(
|
||||
r'^(?:http|ftp)s?://'
|
||||
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa
|
||||
r'localhost|'
|
||||
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
|
||||
r'(?::\d+)?'
|
||||
r'(?:/?|[/?]\S+)$',
|
||||
re.IGNORECASE)
|
||||
if p.match(url):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def get_ubuntu_release_from_sentry(self, sentry_unit):
|
||||
"""Get Ubuntu release codename from sentry unit.
|
||||
|
||||
:param sentry_unit: amulet sentry/service unit pointer
|
||||
:returns: list of strings - release codename, failure message
|
||||
"""
|
||||
msg = None
|
||||
cmd = 'lsb_release -cs'
|
||||
release, code = sentry_unit.run(cmd)
|
||||
if code == 0:
|
||||
self.log.debug('{} lsb_release: {}'.format(
|
||||
sentry_unit.info['unit_name'], release))
|
||||
else:
|
||||
msg = ('{} `{}` returned {} '
|
||||
'{}'.format(sentry_unit.info['unit_name'],
|
||||
cmd, release, code))
|
||||
if release not in self.ubuntu_releases:
|
||||
msg = ("Release ({}) not found in Ubuntu releases "
|
||||
"({})".format(release, self.ubuntu_releases))
|
||||
return release, msg
|
||||
|
||||
def validate_services(self, commands):
|
||||
"""Validate that lists of commands succeed on service units. Can be
|
||||
used to verify system services are running on the corresponding
|
||||
service units.
|
||||
|
||||
:param commands: dict with sentry keys and arbitrary command list vals
|
||||
:returns: None if successful, Failure string message otherwise
|
||||
"""
|
||||
self.log.debug('Checking status of system services...')
|
||||
|
||||
# /!\ DEPRECATION WARNING (beisner):
|
||||
# New and existing tests should be rewritten to use
|
||||
# validate_services_by_name() as it is aware of init systems.
|
||||
self.log.warn('DEPRECATION WARNING: use '
|
||||
'validate_services_by_name instead of validate_services '
|
||||
'due to init system differences.')
|
||||
|
||||
for k, v in six.iteritems(commands):
|
||||
for cmd in v:
|
||||
output, code = k.run(cmd)
|
||||
self.log.debug('{} `{}` returned '
|
||||
'{}'.format(k.info['unit_name'],
|
||||
cmd, code))
|
||||
if code != 0:
|
||||
return "command `{}` returned {}".format(cmd, str(code))
|
||||
return None
|
||||
|
||||
def validate_services_by_name(self, sentry_services):
|
||||
"""Validate system service status by service name, automatically
|
||||
detecting init system based on Ubuntu release codename.
|
||||
|
||||
:param sentry_services: dict with sentry keys and svc list values
|
||||
:returns: None if successful, Failure string message otherwise
|
||||
"""
|
||||
self.log.debug('Checking status of system services...')
|
||||
|
||||
# Point at which systemd became a thing
|
||||
systemd_switch = self.ubuntu_releases.index('vivid')
|
||||
|
||||
for sentry_unit, services_list in six.iteritems(sentry_services):
|
||||
# Get lsb_release codename from unit
|
||||
release, ret = self.get_ubuntu_release_from_sentry(sentry_unit)
|
||||
if ret:
|
||||
return ret
|
||||
|
||||
for service_name in services_list:
|
||||
if (self.ubuntu_releases.index(release) >= systemd_switch or
|
||||
service_name in ['rabbitmq-server', 'apache2',
|
||||
'memcached']):
|
||||
# init is systemd (or regular sysv)
|
||||
cmd = 'sudo service {} status'.format(service_name)
|
||||
output, code = sentry_unit.run(cmd)
|
||||
service_running = code == 0
|
||||
elif self.ubuntu_releases.index(release) < systemd_switch:
|
||||
# init is upstart
|
||||
cmd = 'sudo status {}'.format(service_name)
|
||||
output, code = sentry_unit.run(cmd)
|
||||
service_running = code == 0 and "start/running" in output
|
||||
|
||||
self.log.debug('{} `{}` returned '
|
||||
'{}'.format(sentry_unit.info['unit_name'],
|
||||
cmd, code))
|
||||
if not service_running:
|
||||
return u"command `{}` returned {} {}".format(
|
||||
cmd, output, str(code))
|
||||
return None
|
||||
|
||||
def _get_config(self, unit, filename):
|
||||
"""Get a ConfigParser object for parsing a unit's config file."""
|
||||
file_contents = unit.file_contents(filename)
|
||||
|
||||
# NOTE(beisner): by default, ConfigParser does not handle options
|
||||
# with no value, such as the flags used in the mysql my.cnf file.
|
||||
# https://bugs.python.org/issue7005
|
||||
config = configparser.ConfigParser(allow_no_value=True)
|
||||
config.readfp(io.StringIO(file_contents))
|
||||
return config
|
||||
|
||||
def validate_config_data(self, sentry_unit, config_file, section,
|
||||
expected):
|
||||
"""Validate config file data.
|
||||
|
||||
Verify that the specified section of the config file contains
|
||||
the expected option key:value pairs.
|
||||
|
||||
Compare expected dictionary data vs actual dictionary data.
|
||||
The values in the 'expected' dictionary can be strings, bools, ints,
|
||||
longs, or can be a function that evaluates a variable and returns a
|
||||
bool.
|
||||
"""
|
||||
self.log.debug('Validating config file data ({} in {} on {})'
|
||||
'...'.format(section, config_file,
|
||||
sentry_unit.info['unit_name']))
|
||||
config = self._get_config(sentry_unit, config_file)
|
||||
|
||||
if section != 'DEFAULT' and not config.has_section(section):
|
||||
return "section [{}] does not exist".format(section)
|
||||
|
||||
for k in expected.keys():
|
||||
if not config.has_option(section, k):
|
||||
return "section [{}] is missing option {}".format(section, k)
|
||||
|
||||
actual = config.get(section, k)
|
||||
v = expected[k]
|
||||
if (isinstance(v, six.string_types) or
|
||||
isinstance(v, bool) or
|
||||
isinstance(v, six.integer_types)):
|
||||
# handle explicit values
|
||||
if actual != v:
|
||||
return "section [{}] {}:{} != expected {}:{}".format(
|
||||
section, k, actual, k, expected[k])
|
||||
# handle function pointers, such as not_null or valid_ip
|
||||
elif not v(actual):
|
||||
return "section [{}] {}:{} != expected {}:{}".format(
|
||||
section, k, actual, k, expected[k])
|
||||
return None
|
||||
|
||||
def _validate_dict_data(self, expected, actual):
|
||||
"""Validate dictionary data.
|
||||
|
||||
Compare expected dictionary data vs actual dictionary data.
|
||||
The values in the 'expected' dictionary can be strings, bools, ints,
|
||||
longs, or can be a function that evaluates a variable and returns a
|
||||
bool.
|
||||
"""
|
||||
self.log.debug('actual: {}'.format(repr(actual)))
|
||||
self.log.debug('expected: {}'.format(repr(expected)))
|
||||
|
||||
for k, v in six.iteritems(expected):
|
||||
if k in actual:
|
||||
if (isinstance(v, six.string_types) or
|
||||
isinstance(v, bool) or
|
||||
isinstance(v, six.integer_types)):
|
||||
# handle explicit values
|
||||
if v != actual[k]:
|
||||
return "{}:{}".format(k, actual[k])
|
||||
# handle function pointers, such as not_null or valid_ip
|
||||
elif not v(actual[k]):
|
||||
return "{}:{}".format(k, actual[k])
|
||||
else:
|
||||
return "key '{}' does not exist".format(k)
|
||||
return None
|
||||
|
||||
def validate_relation_data(self, sentry_unit, relation, expected):
|
||||
"""Validate actual relation data based on expected relation data."""
|
||||
actual = sentry_unit.relation(relation[0], relation[1])
|
||||
return self._validate_dict_data(expected, actual)
|
||||
|
||||
def _validate_list_data(self, expected, actual):
|
||||
"""Compare expected list vs actual list data."""
|
||||
for e in expected:
|
||||
if e not in actual:
|
||||
return "expected item {} not found in actual list".format(e)
|
||||
return None
|
||||
|
||||
def not_null(self, string):
|
||||
if string is not None:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def _get_file_mtime(self, sentry_unit, filename):
|
||||
"""Get last modification time of file."""
|
||||
return sentry_unit.file_stat(filename)['mtime']
|
||||
|
||||
def _get_dir_mtime(self, sentry_unit, directory):
|
||||
"""Get last modification time of directory."""
|
||||
return sentry_unit.directory_stat(directory)['mtime']
|
||||
|
||||
def _get_proc_start_time(self, sentry_unit, service, pgrep_full=None):
|
||||
"""Get start time of a process based on the last modification time
|
||||
of the /proc/pid directory.
|
||||
|
||||
:sentry_unit: The sentry unit to check for the service on
|
||||
:service: service name to look for in process table
|
||||
:pgrep_full: [Deprecated] Use full command line search mode with pgrep
|
||||
:returns: epoch time of service process start
|
||||
:param commands: list of bash commands
|
||||
:param sentry_units: list of sentry unit pointers
|
||||
:returns: None if successful; Failure message otherwise
|
||||
"""
|
||||
if pgrep_full is not None:
|
||||
# /!\ DEPRECATION WARNING (beisner):
|
||||
# No longer implemented, as pidof is now used instead of pgrep.
|
||||
# https://bugs.launchpad.net/charm-helpers/+bug/1474030
|
||||
self.log.warn('DEPRECATION WARNING: pgrep_full bool is no '
|
||||
'longer implemented re: lp 1474030.')
|
||||
|
||||
pid_list = self.get_process_id_list(sentry_unit, service)
|
||||
pid = pid_list[0]
|
||||
proc_dir = '/proc/{}'.format(pid)
|
||||
self.log.debug('Pid for {} on {}: {}'.format(
|
||||
service, sentry_unit.info['unit_name'], pid))
|
||||
|
||||
return self._get_dir_mtime(sentry_unit, proc_dir)
|
||||
|
||||
def service_restarted(self, sentry_unit, service, filename,
|
||||
pgrep_full=None, sleep_time=20):
|
||||
"""Check if service was restarted.
|
||||
|
||||
Compare a service's start time vs a file's last modification time
|
||||
(such as a config file for that service) to determine if the service
|
||||
has been restarted.
|
||||
"""
|
||||
# /!\ DEPRECATION WARNING (beisner):
|
||||
# This method is prone to races in that no before-time is known.
|
||||
# Use validate_service_config_changed instead.
|
||||
|
||||
# NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
|
||||
# used instead of pgrep. pgrep_full is still passed through to ensure
|
||||
# deprecation WARNS. lp1474030
|
||||
self.log.warn('DEPRECATION WARNING: use '
|
||||
'validate_service_config_changed instead of '
|
||||
'service_restarted due to known races.')
|
||||
|
||||
time.sleep(sleep_time)
|
||||
if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >=
|
||||
self._get_file_mtime(sentry_unit, filename)):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def service_restarted_since(self, sentry_unit, mtime, service,
|
||||
pgrep_full=None, sleep_time=20,
|
||||
retry_count=30, retry_sleep_time=10):
|
||||
"""Check if service was been started after a given time.
|
||||
|
||||
Args:
|
||||
sentry_unit (sentry): The sentry unit to check for the service on
|
||||
mtime (float): The epoch time to check against
|
||||
service (string): service name to look for in process table
|
||||
pgrep_full: [Deprecated] Use full command line search mode with pgrep
|
||||
sleep_time (int): Initial sleep time (s) before looking for file
|
||||
retry_sleep_time (int): Time (s) to sleep between retries
|
||||
retry_count (int): If file is not found, how many times to retry
|
||||
|
||||
Returns:
|
||||
bool: True if service found and its start time it newer than mtime,
|
||||
False if service is older than mtime or if service was
|
||||
not found.
|
||||
"""
|
||||
# NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
|
||||
# used instead of pgrep. pgrep_full is still passed through to ensure
|
||||
# deprecation WARNS. lp1474030
|
||||
|
||||
unit_name = sentry_unit.info['unit_name']
|
||||
self.log.debug('Checking that %s service restarted since %s on '
|
||||
'%s' % (service, mtime, unit_name))
|
||||
time.sleep(sleep_time)
|
||||
proc_start_time = None
|
||||
tries = 0
|
||||
while tries <= retry_count and not proc_start_time:
|
||||
try:
|
||||
proc_start_time = self._get_proc_start_time(sentry_unit,
|
||||
service,
|
||||
pgrep_full)
|
||||
self.log.debug('Attempt {} to get {} proc start time on {} '
|
||||
'OK'.format(tries, service, unit_name))
|
||||
except IOError as e:
|
||||
# NOTE(beisner) - race avoidance, proc may not exist yet.
|
||||
# https://bugs.launchpad.net/charm-helpers/+bug/1474030
|
||||
self.log.debug('Attempt {} to get {} proc start time on {} '
|
||||
'failed\n{}'.format(tries, service,
|
||||
unit_name, e))
|
||||
time.sleep(retry_sleep_time)
|
||||
tries += 1
|
||||
|
||||
if not proc_start_time:
|
||||
self.log.warn('No proc start time found, assuming service did '
|
||||
'not start')
|
||||
return False
|
||||
if proc_start_time >= mtime:
|
||||
self.log.debug('Proc start time is newer than provided mtime'
|
||||
'(%s >= %s) on %s (OK)' % (proc_start_time,
|
||||
mtime, unit_name))
|
||||
return True
|
||||
else:
|
||||
self.log.warn('Proc start time (%s) is older than provided mtime '
|
||||
'(%s) on %s, service did not '
|
||||
'restart' % (proc_start_time, mtime, unit_name))
|
||||
return False
|
||||
|
||||
def config_updated_since(self, sentry_unit, filename, mtime,
|
||||
sleep_time=20, retry_count=30,
|
||||
retry_sleep_time=10):
|
||||
"""Check if file was modified after a given time.
|
||||
|
||||
Args:
|
||||
sentry_unit (sentry): The sentry unit to check the file mtime on
|
||||
filename (string): The file to check mtime of
|
||||
mtime (float): The epoch time to check against
|
||||
sleep_time (int): Initial sleep time (s) before looking for file
|
||||
retry_sleep_time (int): Time (s) to sleep between retries
|
||||
retry_count (int): If file is not found, how many times to retry
|
||||
|
||||
Returns:
|
||||
bool: True if file was modified more recently than mtime, False if
|
||||
file was modified before mtime, or if file not found.
|
||||
"""
|
||||
unit_name = sentry_unit.info['unit_name']
|
||||
self.log.debug('Checking that %s updated since %s on '
|
||||
'%s' % (filename, mtime, unit_name))
|
||||
time.sleep(sleep_time)
|
||||
file_mtime = None
|
||||
tries = 0
|
||||
while tries <= retry_count and not file_mtime:
|
||||
try:
|
||||
file_mtime = self._get_file_mtime(sentry_unit, filename)
|
||||
self.log.debug('Attempt {} to get {} file mtime on {} '
|
||||
'OK'.format(tries, filename, unit_name))
|
||||
except IOError as e:
|
||||
# NOTE(beisner) - race avoidance, file may not exist yet.
|
||||
# https://bugs.launchpad.net/charm-helpers/+bug/1474030
|
||||
self.log.debug('Attempt {} to get {} file mtime on {} '
|
||||
'failed\n{}'.format(tries, filename,
|
||||
unit_name, e))
|
||||
time.sleep(retry_sleep_time)
|
||||
tries += 1
|
||||
|
||||
if not file_mtime:
|
||||
self.log.warn('Could not determine file mtime, assuming '
|
||||
'file does not exist')
|
||||
return False
|
||||
|
||||
if file_mtime >= mtime:
|
||||
self.log.debug('File mtime is newer than provided mtime '
|
||||
'(%s >= %s) on %s (OK)' % (file_mtime,
|
||||
mtime, unit_name))
|
||||
return True
|
||||
else:
|
||||
self.log.warn('File mtime is older than provided mtime'
|
||||
'(%s < on %s) on %s' % (file_mtime,
|
||||
mtime, unit_name))
|
||||
return False
|
||||
|
||||
def validate_service_config_changed(self, sentry_unit, mtime, service,
|
||||
filename, pgrep_full=None,
|
||||
sleep_time=20, retry_count=30,
|
||||
retry_sleep_time=10):
|
||||
"""Check service and file were updated after mtime
|
||||
|
||||
Args:
|
||||
sentry_unit (sentry): The sentry unit to check for the service on
|
||||
mtime (float): The epoch time to check against
|
||||
service (string): service name to look for in process table
|
||||
filename (string): The file to check mtime of
|
||||
pgrep_full: [Deprecated] Use full command line search mode with pgrep
|
||||
sleep_time (int): Initial sleep in seconds to pass to test helpers
|
||||
retry_count (int): If service is not found, how many times to retry
|
||||
retry_sleep_time (int): Time in seconds to wait between retries
|
||||
|
||||
Typical Usage:
|
||||
u = OpenStackAmuletUtils(ERROR)
|
||||
...
|
||||
mtime = u.get_sentry_time(self.cinder_sentry)
|
||||
self.d.configure('cinder', {'verbose': 'True', 'debug': 'True'})
|
||||
if not u.validate_service_config_changed(self.cinder_sentry,
|
||||
mtime,
|
||||
'cinder-api',
|
||||
'/etc/cinder/cinder.conf')
|
||||
amulet.raise_status(amulet.FAIL, msg='update failed')
|
||||
Returns:
|
||||
bool: True if both service and file where updated/restarted after
|
||||
mtime, False if service is older than mtime or if service was
|
||||
not found or if filename was modified before mtime.
|
||||
"""
|
||||
|
||||
# NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
|
||||
# used instead of pgrep. pgrep_full is still passed through to ensure
|
||||
# deprecation WARNS. lp1474030
|
||||
|
||||
service_restart = self.service_restarted_since(
|
||||
sentry_unit, mtime,
|
||||
service,
|
||||
pgrep_full=pgrep_full,
|
||||
sleep_time=sleep_time,
|
||||
retry_count=retry_count,
|
||||
retry_sleep_time=retry_sleep_time)
|
||||
|
||||
config_update = self.config_updated_since(
|
||||
sentry_unit,
|
||||
filename,
|
||||
mtime,
|
||||
sleep_time=sleep_time,
|
||||
retry_count=retry_count,
|
||||
retry_sleep_time=retry_sleep_time)
|
||||
|
||||
return service_restart and config_update
|
||||
|
||||
def get_sentry_time(self, sentry_unit):
|
||||
"""Return current epoch time on a sentry"""
|
||||
cmd = "date +'%s'"
|
||||
return float(sentry_unit.run(cmd)[0])
|
||||
|
||||
def relation_error(self, name, data):
|
||||
return 'unexpected relation data in {} - {}'.format(name, data)
|
||||
|
||||
def endpoint_error(self, name, data):
|
||||
return 'unexpected endpoint data in {} - {}'.format(name, data)
|
||||
|
||||
def get_ubuntu_releases(self):
|
||||
"""Return a list of all Ubuntu releases in order of release."""
|
||||
_d = distro_info.UbuntuDistroInfo()
|
||||
_release_list = _d.all
|
||||
return _release_list
|
||||
|
||||
def file_to_url(self, file_rel_path):
|
||||
"""Convert a relative file path to a file URL."""
|
||||
_abs_path = os.path.abspath(file_rel_path)
|
||||
return urlparse.urlparse(_abs_path, scheme='file').geturl()
|
||||
|
||||
def check_commands_on_units(self, commands, sentry_units):
|
||||
"""Check that all commands in a list exit zero on all
|
||||
sentry units in a list.
|
||||
|
||||
:param commands: list of bash commands
|
||||
:param sentry_units: list of sentry unit pointers
|
||||
:returns: None if successful; Failure message otherwise
|
||||
"""
|
||||
self.log.debug('Checking exit codes for {} commands on {} '
|
||||
'sentry units...'.format(len(commands),
|
||||
len(sentry_units)))
|
||||
for sentry_unit in sentry_units:
|
||||
for cmd in commands:
|
||||
output, code = sentry_unit.run(cmd)
|
||||
if code == 0:
|
||||
self.log.debug('{} `{}` returned {} '
|
||||
'(OK)'.format(sentry_unit.info['unit_name'],
|
||||
cmd, code))
|
||||
else:
|
||||
return ('{} `{}` returned {} '
|
||||
'{}'.format(sentry_unit.info['unit_name'],
|
||||
cmd, code, output))
|
||||
return None
|
||||
|
||||
def get_process_id_list(self, sentry_unit, process_name,
|
||||
expect_success=True):
|
||||
"""Get a list of process ID(s) from a single sentry juju unit
|
||||
for a single process name.
|
||||
|
||||
:param sentry_unit: Amulet sentry instance (juju unit)
|
||||
:param process_name: Process name
|
||||
:param expect_success: If False, expect the PID to be missing,
|
||||
raise if it is present.
|
||||
:returns: List of process IDs
|
||||
"""
|
||||
cmd = 'pidof -x "{}"'.format(process_name)
|
||||
if not expect_success:
|
||||
cmd += " || exit 0 && exit 1"
|
||||
output, code = sentry_unit.run(cmd)
|
||||
if code != 0:
|
||||
msg = ('{} `{}` returned {} '
|
||||
'{}'.format(sentry_unit.info['unit_name'],
|
||||
cmd, code, output))
|
||||
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||
return str(output).split()
|
||||
|
||||
def get_unit_process_ids(self, unit_processes, expect_success=True):
|
||||
"""Construct a dict containing unit sentries, process names, and
|
||||
process IDs.
|
||||
|
||||
:param unit_processes: A dictionary of Amulet sentry instance
|
||||
to list of process names.
|
||||
:param expect_success: if False expect the processes to not be
|
||||
running, raise if they are.
|
||||
:returns: Dictionary of Amulet sentry instance to dictionary
|
||||
of process names to PIDs.
|
||||
"""
|
||||
pid_dict = {}
|
||||
for sentry_unit, process_list in six.iteritems(unit_processes):
|
||||
pid_dict[sentry_unit] = {}
|
||||
for process in process_list:
|
||||
pids = self.get_process_id_list(
|
||||
sentry_unit, process, expect_success=expect_success)
|
||||
pid_dict[sentry_unit].update({process: pids})
|
||||
return pid_dict
|
||||
|
||||
def validate_unit_process_ids(self, expected, actual):
|
||||
"""Validate process id quantities for services on units."""
|
||||
self.log.debug('Checking units for running processes...')
|
||||
self.log.debug('Expected PIDs: {}'.format(expected))
|
||||
self.log.debug('Actual PIDs: {}'.format(actual))
|
||||
|
||||
if len(actual) != len(expected):
|
||||
return ('Unit count mismatch. expected, actual: {}, '
|
||||
'{} '.format(len(expected), len(actual)))
|
||||
|
||||
for (e_sentry, e_proc_names) in six.iteritems(expected):
|
||||
e_sentry_name = e_sentry.info['unit_name']
|
||||
if e_sentry in actual.keys():
|
||||
a_proc_names = actual[e_sentry]
|
||||
else:
|
||||
return ('Expected sentry ({}) not found in actual dict data.'
|
||||
'{}'.format(e_sentry_name, e_sentry))
|
||||
|
||||
if len(e_proc_names.keys()) != len(a_proc_names.keys()):
|
||||
return ('Process name count mismatch. expected, actual: {}, '
|
||||
'{}'.format(len(expected), len(actual)))
|
||||
|
||||
for (e_proc_name, e_pids), (a_proc_name, a_pids) in \
|
||||
zip(e_proc_names.items(), a_proc_names.items()):
|
||||
if e_proc_name != a_proc_name:
|
||||
return ('Process name mismatch. expected, actual: {}, '
|
||||
'{}'.format(e_proc_name, a_proc_name))
|
||||
|
||||
a_pids_length = len(a_pids)
|
||||
fail_msg = ('PID count mismatch. {} ({}) expected, actual: '
|
||||
'{}, {} ({})'.format(e_sentry_name, e_proc_name,
|
||||
e_pids, a_pids_length,
|
||||
a_pids))
|
||||
|
||||
# If expected is a list, ensure at least one PID quantity match
|
||||
if isinstance(e_pids, list) and \
|
||||
a_pids_length not in e_pids:
|
||||
return fail_msg
|
||||
# If expected is not bool and not list,
|
||||
# ensure PID quantities match
|
||||
elif not isinstance(e_pids, bool) and \
|
||||
not isinstance(e_pids, list) and \
|
||||
a_pids_length != e_pids:
|
||||
return fail_msg
|
||||
# If expected is bool True, ensure 1 or more PIDs exist
|
||||
elif isinstance(e_pids, bool) and \
|
||||
e_pids is True and a_pids_length < 1:
|
||||
return fail_msg
|
||||
# If expected is bool False, ensure 0 PIDs exist
|
||||
elif isinstance(e_pids, bool) and \
|
||||
e_pids is False and a_pids_length != 0:
|
||||
return fail_msg
|
||||
else:
|
||||
self.log.debug('PID check OK: {} {} {}: '
|
||||
'{}'.format(e_sentry_name, e_proc_name,
|
||||
e_pids, a_pids))
|
||||
return None
|
||||
|
||||
def validate_list_of_identical_dicts(self, list_of_dicts):
|
||||
"""Check that all dicts within a list are identical."""
|
||||
hashes = []
|
||||
for _dict in list_of_dicts:
|
||||
hashes.append(hash(frozenset(_dict.items())))
|
||||
|
||||
self.log.debug('Hashes: {}'.format(hashes))
|
||||
if len(set(hashes)) == 1:
|
||||
self.log.debug('Dicts within list are identical')
|
||||
else:
|
||||
return 'Dicts within list are not identical'
|
||||
|
||||
return None
|
||||
|
||||
def validate_sectionless_conf(self, file_contents, expected):
|
||||
"""A crude conf parser. Useful to inspect configuration files which
|
||||
do not have section headers (as would be necessary in order to use
|
||||
the configparser). Such as openstack-dashboard or rabbitmq confs."""
|
||||
for line in file_contents.split('\n'):
|
||||
if '=' in line:
|
||||
args = line.split('=')
|
||||
if len(args) <= 1:
|
||||
continue
|
||||
key = args[0].strip()
|
||||
value = args[1].strip()
|
||||
if key in expected.keys():
|
||||
if expected[key] != value:
|
||||
msg = ('Config mismatch. Expected, actual: {}, '
|
||||
'{}'.format(expected[key], value))
|
||||
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||
|
||||
def get_unit_hostnames(self, units):
|
||||
"""Return a dict of juju unit names to hostnames."""
|
||||
host_names = {}
|
||||
for unit in units:
|
||||
host_names[unit.info['unit_name']] = \
|
||||
str(unit.file_contents('/etc/hostname').strip())
|
||||
self.log.debug('Unit host names: {}'.format(host_names))
|
||||
return host_names
|
||||
|
||||
def run_cmd_unit(self, sentry_unit, cmd):
|
||||
"""Run a command on a unit, return the output and exit code."""
|
||||
output, code = sentry_unit.run(cmd)
|
||||
if code == 0:
|
||||
self.log.debug('{} `{}` command returned {} '
|
||||
'(OK)'.format(sentry_unit.info['unit_name'],
|
||||
cmd, code))
|
||||
else:
|
||||
msg = ('{} `{}` command returned {} '
|
||||
'{}'.format(sentry_unit.info['unit_name'],
|
||||
cmd, code, output))
|
||||
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||
return str(output), code
|
||||
|
||||
def file_exists_on_unit(self, sentry_unit, file_name):
|
||||
"""Check if a file exists on a unit."""
|
||||
try:
|
||||
sentry_unit.file_stat(file_name)
|
||||
return True
|
||||
except IOError:
|
||||
return False
|
||||
except Exception as e:
|
||||
msg = 'Error checking file {}: {}'.format(file_name, e)
|
||||
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||
|
||||
def file_contents_safe(self, sentry_unit, file_name,
|
||||
max_wait=60, fatal=False):
|
||||
"""Get file contents from a sentry unit. Wrap amulet file_contents
|
||||
with retry logic to address races where a file checks as existing,
|
||||
but no longer exists by the time file_contents is called.
|
||||
Return None if file not found. Optionally raise if fatal is True."""
|
||||
unit_name = sentry_unit.info['unit_name']
|
||||
file_contents = False
|
||||
tries = 0
|
||||
while not file_contents and tries < (max_wait / 4):
|
||||
try:
|
||||
file_contents = sentry_unit.file_contents(file_name)
|
||||
except IOError:
|
||||
self.log.debug('Attempt {} to open file {} from {} '
|
||||
'failed'.format(tries, file_name,
|
||||
unit_name))
|
||||
time.sleep(4)
|
||||
tries += 1
|
||||
|
||||
if file_contents:
|
||||
return file_contents
|
||||
elif not fatal:
|
||||
return None
|
||||
elif fatal:
|
||||
msg = 'Failed to get file contents from unit.'
|
||||
amulet.raise_status(amulet.FAIL, msg)
|
||||
|
||||
def port_knock_tcp(self, host="localhost", port=22, timeout=15):
|
||||
"""Open a TCP socket to check for a listening sevice on a host.
|
||||
|
||||
:param host: host name or IP address, default to localhost
|
||||
:param port: TCP port number, default to 22
|
||||
:param timeout: Connect timeout, default to 15 seconds
|
||||
:returns: True if successful, False if connect failed
|
||||
"""
|
||||
|
||||
# Resolve host name if possible
|
||||
try:
|
||||
connect_host = socket.gethostbyname(host)
|
||||
host_human = "{} ({})".format(connect_host, host)
|
||||
except socket.error as e:
|
||||
self.log.warn('Unable to resolve address: '
|
||||
'{} ({}) Trying anyway!'.format(host, e))
|
||||
connect_host = host
|
||||
host_human = connect_host
|
||||
|
||||
# Attempt socket connection
|
||||
try:
|
||||
knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
knock.settimeout(timeout)
|
||||
knock.connect((connect_host, port))
|
||||
knock.close()
|
||||
self.log.debug('Socket connect OK for host '
|
||||
'{} on port {}.'.format(host_human, port))
|
||||
return True
|
||||
except socket.error as e:
|
||||
self.log.debug('Socket connect FAIL for'
|
||||
' {} port {} ({})'.format(host_human, port, e))
|
||||
return False
|
||||
|
||||
def port_knock_units(self, sentry_units, port=22,
|
||||
timeout=15, expect_success=True):
|
||||
"""Open a TCP socket to check for a listening sevice on each
|
||||
listed juju unit.
|
||||
|
||||
:param sentry_units: list of sentry unit pointers
|
||||
:param port: TCP port number, default to 22
|
||||
:param timeout: Connect timeout, default to 15 seconds
|
||||
:expect_success: True by default, set False to invert logic
|
||||
:returns: None if successful, Failure message otherwise
|
||||
"""
|
||||
for unit in sentry_units:
|
||||
host = unit.info['public-address']
|
||||
connected = self.port_knock_tcp(host, port, timeout)
|
||||
if not connected and expect_success:
|
||||
return 'Socket connect failed.'
|
||||
elif connected and not expect_success:
|
||||
return 'Socket connected unexpectedly.'
|
||||
|
||||
def get_uuid_epoch_stamp(self):
|
||||
"""Returns a stamp string based on uuid4 and epoch time. Useful in
|
||||
generating test messages which need to be unique-ish."""
|
||||
return '[{}-{}]'.format(uuid.uuid4(), time.time())
|
||||
|
||||
# amulet juju action helpers:
|
||||
def run_action(self, unit_sentry, action,
|
||||
_check_output=subprocess.check_output,
|
||||
params=None):
|
||||
"""Translate to amulet's built in run_action(). Deprecated.
|
||||
|
||||
Run the named action on a given unit sentry.
|
||||
|
||||
params a dict of parameters to use
|
||||
_check_output parameter is no longer used
|
||||
|
||||
@return action_id.
|
||||
"""
|
||||
self.log.warn('charmhelpers.contrib.amulet.utils.run_action has been '
|
||||
'deprecated for amulet.run_action')
|
||||
return unit_sentry.run_action(action, action_args=params)
|
||||
|
||||
def wait_on_action(self, action_id, _check_output=subprocess.check_output):
|
||||
"""Wait for a given action, returning if it completed or not.
|
||||
|
||||
action_id a string action uuid
|
||||
_check_output parameter is no longer used
|
||||
"""
|
||||
data = amulet.actions.get_action_output(action_id, full_output=True)
|
||||
return data.get(u"status") == "completed"
|
||||
|
||||
def status_get(self, unit):
|
||||
"""Return the current service status of this unit."""
|
||||
raw_status, return_code = unit.run(
|
||||
"status-get --format=json --include-data")
|
||||
if return_code != 0:
|
||||
return ("unknown", "")
|
||||
status = json.loads(raw_status)
|
||||
return (status["status"], status["message"])
|
|
@ -0,0 +1,13 @@
|
|||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
|
@ -0,0 +1,13 @@
|
|||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
|
@ -0,0 +1,354 @@
|
|||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import six
|
||||
from collections import OrderedDict
|
||||
from charmhelpers.contrib.amulet.deployment import (
|
||||
AmuletDeployment
|
||||
)
|
||||
from charmhelpers.contrib.openstack.amulet.utils import (
|
||||
OPENSTACK_RELEASES_PAIRS
|
||||
)
|
||||
|
||||
DEBUG = logging.DEBUG
|
||||
ERROR = logging.ERROR
|
||||
|
||||
|
||||
class OpenStackAmuletDeployment(AmuletDeployment):
|
||||
"""OpenStack amulet deployment.
|
||||
|
||||
This class inherits from AmuletDeployment and has additional support
|
||||
that is specifically for use by OpenStack charms.
|
||||
"""
|
||||
|
||||
def __init__(self, series=None, openstack=None, source=None,
|
||||
stable=True, log_level=DEBUG):
|
||||
"""Initialize the deployment environment."""
|
||||
super(OpenStackAmuletDeployment, self).__init__(series)
|
||||
self.log = self.get_logger(level=log_level)
|
||||
self.log.info('OpenStackAmuletDeployment: init')
|
||||
self.openstack = openstack
|
||||
self.source = source
|
||||
self.stable = stable
|
||||
|
||||
def get_logger(self, name="deployment-logger", level=logging.DEBUG):
|
||||
"""Get a logger object that will log to stdout."""
|
||||
log = logging
|
||||
logger = log.getLogger(name)
|
||||
fmt = log.Formatter("%(asctime)s %(funcName)s "
|
||||
"%(levelname)s: %(message)s")
|
||||
|
||||
handler = log.StreamHandler(stream=sys.stdout)
|
||||
handler.setLevel(level)
|
||||
handler.setFormatter(fmt)
|
||||
|
||||
logger.addHandler(handler)
|
||||
logger.setLevel(level)
|
||||
|
||||
return logger
|
||||
|
||||
def _determine_branch_locations(self, other_services):
|
||||
"""Determine the branch locations for the other services.
|
||||
|
||||
Determine if the local branch being tested is derived from its
|
||||
stable or next (dev) branch, and based on this, use the corresonding
|
||||
stable or next branches for the other_services."""
|
||||
|
||||
self.log.info('OpenStackAmuletDeployment: determine branch locations')
|
||||
|
||||
# Charms outside the ~openstack-charmers
|
||||
base_charms = {
|
||||
'mysql': ['trusty'],
|
||||
'mongodb': ['trusty'],
|
||||
'nrpe': ['trusty', 'xenial'],
|
||||
}
|
||||
|
||||
for svc in other_services:
|
||||
# If a location has been explicitly set, use it
|
||||
if svc.get('location'):
|
||||
continue
|
||||
if svc['name'] in base_charms:
|
||||
# NOTE: not all charms have support for all series we
|
||||
# want/need to test against, so fix to most recent
|
||||
# that each base charm supports
|
||||
target_series = self.series
|
||||
if self.series not in base_charms[svc['name']]:
|
||||
target_series = base_charms[svc['name']][-1]
|
||||
svc['location'] = 'cs:{}/{}'.format(target_series,
|
||||
svc['name'])
|
||||
elif self.stable:
|
||||
svc['location'] = 'cs:{}/{}'.format(self.series,
|
||||
svc['name'])
|
||||
else:
|
||||
svc['location'] = 'cs:~openstack-charmers-next/{}/{}'.format(
|
||||
self.series,
|
||||
svc['name']
|
||||
)
|
||||
|
||||
return other_services
|
||||
|
||||
def _add_services(self, this_service, other_services, use_source=None,
|
||||
no_origin=None):
|
||||
"""Add services to the deployment and optionally set
|
||||
openstack-origin/source.
|
||||
|
||||
:param this_service dict: Service dictionary describing the service
|
||||
whose amulet tests are being run
|
||||
:param other_services dict: List of service dictionaries describing
|
||||
the services needed to support the target
|
||||
service
|
||||
:param use_source list: List of services which use the 'source' config
|
||||
option rather than 'openstack-origin'
|
||||
:param no_origin list: List of services which do not support setting
|
||||
the Cloud Archive.
|
||||
Service Dict:
|
||||
{
|
||||
'name': str charm-name,
|
||||
'units': int number of units,
|
||||
'constraints': dict of juju constraints,
|
||||
'location': str location of charm,
|
||||
}
|
||||
eg
|
||||
this_service = {
|
||||
'name': 'openvswitch-odl',
|
||||
'constraints': {'mem': '8G'},
|
||||
}
|
||||
other_services = [
|
||||
{
|
||||
'name': 'nova-compute',
|
||||
'units': 2,
|
||||
'constraints': {'mem': '4G'},
|
||||
'location': cs:~bob/xenial/nova-compute
|
||||
},
|
||||
{
|
||||
'name': 'mysql',
|
||||
'constraints': {'mem': '2G'},
|
||||
},
|
||||
{'neutron-api-odl'}]
|
||||
use_source = ['mysql']
|
||||
no_origin = ['neutron-api-odl']
|
||||
"""
|
||||
self.log.info('OpenStackAmuletDeployment: adding services')
|
||||
|
||||
other_services = self._determine_branch_locations(other_services)
|
||||
|
||||
super(OpenStackAmuletDeployment, self)._add_services(this_service,
|
||||
other_services)
|
||||
|
||||
services = other_services
|
||||
services.append(this_service)
|
||||
|
||||
use_source = use_source or []
|
||||
no_origin = no_origin or []
|
||||
|
||||
# Charms which should use the source config option
|
||||
use_source = list(set(
|
||||
use_source + ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
|
||||
'ceph-osd', 'ceph-radosgw', 'ceph-mon',
|
||||
'ceph-proxy', 'percona-cluster', 'lxd']))
|
||||
|
||||
# Charms which can not use openstack-origin, ie. many subordinates
|
||||
no_origin = list(set(
|
||||
no_origin + ['cinder-ceph', 'hacluster', 'neutron-openvswitch',
|
||||
'nrpe', 'openvswitch-odl', 'neutron-api-odl',
|
||||
'odl-controller', 'cinder-backup', 'nexentaedge-data',
|
||||
'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
|
||||
'cinder-nexentaedge', 'nexentaedge-mgmt']))
|
||||
|
||||
if self.openstack:
|
||||
for svc in services:
|
||||
if svc['name'] not in use_source + no_origin:
|
||||
config = {'openstack-origin': self.openstack}
|
||||
self.d.configure(svc['name'], config)
|
||||
|
||||
if self.source:
|
||||
for svc in services:
|
||||
if svc['name'] in use_source and svc['name'] not in no_origin:
|
||||
config = {'source': self.source}
|
||||
self.d.configure(svc['name'], config)
|
||||
|
||||
def _configure_services(self, configs):
|
||||
"""Configure all of the services."""
|
||||
self.log.info('OpenStackAmuletDeployment: configure services')
|
||||
for service, config in six.iteritems(configs):
|
||||
self.d.configure(service, config)
|
||||
|
||||
def _auto_wait_for_status(self, message=None, exclude_services=None,
|
||||
include_only=None, timeout=None):
|
||||
"""Wait for all units to have a specific extended status, except
|
||||
for any defined as excluded. Unless specified via message, any
|
||||
status containing any case of 'ready' will be considered a match.
|
||||
|
||||
Examples of message usage:
|
||||
|
||||
Wait for all unit status to CONTAIN any case of 'ready' or 'ok':
|
||||
message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE)
|
||||
|
||||
Wait for all units to reach this status (exact match):
|
||||
message = re.compile('^Unit is ready and clustered$')
|
||||
|
||||
Wait for all units to reach any one of these (exact match):
|
||||
message = re.compile('Unit is ready|OK|Ready')
|
||||
|
||||
Wait for at least one unit to reach this status (exact match):
|
||||
message = {'ready'}
|
||||
|
||||
See Amulet's sentry.wait_for_messages() for message usage detail.
|
||||
https://github.com/juju/amulet/blob/master/amulet/sentry.py
|
||||
|
||||
:param message: Expected status match
|
||||
:param exclude_services: List of juju service names to ignore,
|
||||
not to be used in conjuction with include_only.
|
||||
:param include_only: List of juju service names to exclusively check,
|
||||
not to be used in conjuction with exclude_services.
|
||||
:param timeout: Maximum time in seconds to wait for status match
|
||||
:returns: None. Raises if timeout is hit.
|
||||
"""
|
||||
if not timeout:
|
||||
timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 1800))
|
||||
self.log.info('Waiting for extended status on units for {}s...'
|
||||
''.format(timeout))
|
||||
|
||||
all_services = self.d.services.keys()
|
||||
|
||||
if exclude_services and include_only:
|
||||
raise ValueError('exclude_services can not be used '
|
||||
'with include_only')
|
||||
|
||||
if message:
|
||||
if isinstance(message, re._pattern_type):
|
||||
match = message.pattern
|
||||
else:
|
||||
match = message
|
||||
|
||||
self.log.debug('Custom extended status wait match: '
|
||||
'{}'.format(match))
|
||||
else:
|
||||
self.log.debug('Default extended status wait match: contains '
|
||||
'READY (case-insensitive)')
|
||||
message = re.compile('.*ready.*', re.IGNORECASE)
|
||||
|
||||
if exclude_services:
|
||||
self.log.debug('Excluding services from extended status match: '
|
||||
'{}'.format(exclude_services))
|
||||
else:
|
||||
exclude_services = []
|
||||
|
||||
if include_only:
|
||||
services = include_only
|
||||
else:
|
||||
services = list(set(all_services) - set(exclude_services))
|
||||
|
||||
self.log.debug('Waiting up to {}s for extended status on services: '
|
||||
'{}'.format(timeout, services))
|
||||
service_messages = {service: message for service in services}
|
||||
|
||||
# Check for idleness
|
||||
self.d.sentry.wait(timeout=timeout)
|
||||
# Check for error states and bail early
|
||||
self.d.sentry.wait_for_status(self.d.juju_env, services, timeout=timeout)
|
||||
# Check for ready messages
|
||||
self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
|
||||
|
||||
self.log.info('OK')
|
||||
|
||||
def _get_openstack_release(self):
|
||||
"""Get openstack release.
|
||||
|
||||
Return an integer representing the enum value of the openstack
|
||||
release.
|
||||
"""
|
||||
# Must be ordered by OpenStack release (not by Ubuntu release):
|
||||
for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS):
|
||||
setattr(self, os_pair, i)
|
||||
|
||||
releases = {
|
||||
('trusty', None): self.trusty_icehouse,
|
||||
('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
|
||||
('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
|
||||
('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka,
|
||||
('xenial', None): self.xenial_mitaka,
|
||||
('xenial', 'cloud:xenial-newton'): self.xenial_newton,
|
||||
('xenial', 'cloud:xenial-ocata'): self.xenial_ocata,
|
||||
('xenial', 'cloud:xenial-pike'): self.xenial_pike,
|
||||
('xenial', 'cloud:xenial-queens'): self.xenial_queens,
|
||||
('yakkety', None): self.yakkety_newton,
|
||||
('zesty', None): self.zesty_ocata,
|
||||
('artful', None): self.artful_pike,
|
||||
('bionic', None): self.bionic_queens,
|
||||
}
|
||||
return releases[(self.series, self.openstack)]
|
||||
|
||||
def _get_openstack_release_string(self):
|
||||
"""Get openstack release string.
|
||||
|
||||
Return a string representing the openstack release.
|
||||
"""
|
||||
releases = OrderedDict([
|
||||
('trusty', 'icehouse'),
|
||||
('xenial', 'mitaka'),
|
||||
('yakkety', 'newton'),
|
||||
('zesty', 'ocata'),
|
||||
('artful', 'pike'),
|
||||
('bionic', 'queens'),
|
||||
])
|
||||
if self.openstack:
|
||||
os_origin = self.openstack.split(':')[1]
|
||||
return os_origin.split('%s-' % self.series)[1].split('/')[0]
|
||||
else:
|
||||
return releases[self.series]
|
||||
|
||||
def get_ceph_expected_pools(self, radosgw=False):
|
||||
"""Return a list of expected ceph pools in a ceph + cinder + glance
|
||||
test scenario, based on OpenStack release and whether ceph radosgw
|
||||
is flagged as present or not."""
|
||||
|
||||
if self._get_openstack_release() == self.trusty_icehouse:
|
||||
# Icehouse
|
||||
pools = [
|
||||
'data',
|
||||
'metadata',
|
||||
'rbd',
|
||||
'cinder-ceph',
|
||||
'glance'
|
||||
]
|
||||
elif (self.trusty_kilo <= self._get_openstack_release() <=
|
||||
self.zesty_ocata):
|
||||
# Kilo through Ocata
|
||||
pools = [
|
||||
'rbd',
|
||||
'cinder-ceph',
|
||||
'glance'
|
||||
]
|
||||
else:
|
||||
# Pike and later
|
||||
pools = [
|
||||
'cinder-ceph',
|
||||
'glance'
|
||||
]
|
||||
|
||||
if radosgw:
|
||||
pools.extend([
|
||||
'.rgw.root',
|
||||
'.rgw.control',
|
||||
'.rgw',
|
||||
'.rgw.gc',
|
||||
'.users.uid'
|
||||
])
|
||||
|
||||
return pools
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,13 @@
|
|||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
|
@ -0,0 +1,55 @@
|
|||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
#
|
||||
# Copyright 2014 Canonical Ltd.
|
||||
#
|
||||
# Authors:
|
||||
# Edward Hope-Morley <opentastic@gmail.com>
|
||||
#
|
||||
|
||||
import time
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
log,
|
||||
INFO,
|
||||
)
|
||||
|
||||
|
||||
def retry_on_exception(num_retries, base_delay=0, exc_type=Exception):
|
||||
"""If the decorated function raises exception exc_type, allow num_retries
|
||||
retry attempts before raise the exception.
|
||||
"""
|
||||
def _retry_on_exception_inner_1(f):
|
||||
def _retry_on_exception_inner_2(*args, **kwargs):
|
||||
retries = num_retries
|
||||
multiplier = 1
|
||||
while True:
|
||||
try:
|
||||
return f(*args, **kwargs)
|
||||
except exc_type:
|
||||
if not retries:
|
||||
raise
|
||||
|
||||
delay = base_delay * multiplier
|
||||
multiplier += 1
|
||||
log("Retrying '%s' %d more times (delay=%s)" %
|
||||
(f.__name__, retries, delay), level=INFO)
|
||||
retries -= 1
|
||||
if delay:
|
||||
time.sleep(delay)
|
||||
|
||||
return _retry_on_exception_inner_2
|
||||
|
||||
return _retry_on_exception_inner_1
|
|
@ -0,0 +1,43 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
__author__ = 'Jorge Niedbalski <niedbalski@ubuntu.com>'
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
|
||||
def sed(filename, before, after, flags='g'):
|
||||
"""
|
||||
Search and replaces the given pattern on filename.
|
||||
|
||||
:param filename: relative or absolute file path.
|
||||
:param before: expression to be replaced (see 'man sed')
|
||||
:param after: expression to replace with (see 'man sed')
|
||||
:param flags: sed-compatible regex flags in example, to make
|
||||
the search and replace case insensitive, specify ``flags="i"``.
|
||||
The ``g`` flag is always specified regardless, so you do not
|
||||
need to remember to include it when overriding this parameter.
|
||||
:returns: If the sed command exit code was zero then return,
|
||||
otherwise raise CalledProcessError.
|
||||
"""
|
||||
expression = r's/{0}/{1}/{2}'.format(before,
|
||||
after, flags)
|
||||
|
||||
return subprocess.check_call(["sed", "-i", "-r", "-e",
|
||||
expression,
|
||||
os.path.expanduser(filename)])
|
|
@ -0,0 +1,132 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import io
|
||||
import os
|
||||
|
||||
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
|
||||
|
||||
|
||||
class Fstab(io.FileIO):
|
||||
"""This class extends file in order to implement a file reader/writer
|
||||
for file `/etc/fstab`
|
||||
"""
|
||||
|
||||
class Entry(object):
|
||||
"""Entry class represents a non-comment line on the `/etc/fstab` file
|
||||
"""
|
||||
def __init__(self, device, mountpoint, filesystem,
|
||||
options, d=0, p=0):
|
||||
self.device = device
|
||||
self.mountpoint = mountpoint
|
||||
self.filesystem = filesystem
|
||||
|
||||
if not options:
|
||||
options = "defaults"
|
||||
|
||||
self.options = options
|
||||
self.d = int(d)
|
||||
self.p = int(p)
|
||||
|
||||
def __eq__(self, o):
|
||||
return str(self) == str(o)
|
||||
|
||||
def __str__(self):
|
||||
return "{} {} {} {} {} {}".format(self.device,
|
||||
self.mountpoint,
|
||||
self.filesystem,
|
||||
self.options,
|
||||
self.d,
|
||||
self.p)
|
||||
|
||||
DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
|
||||
|
||||
def __init__(self, path=None):
|
||||
if path:
|
||||
self._path = path
|
||||
else:
|
||||
self._path = self.DEFAULT_PATH
|
||||
super(Fstab, self).__init__(self._path, 'rb+')
|
||||
|
||||
def _hydrate_entry(self, line):
|
||||
# NOTE: use split with no arguments to split on any
|
||||
# whitespace including tabs
|
||||
return Fstab.Entry(*filter(
|
||||
lambda x: x not in ('', None),
|
||||
line.strip("\n").split()))
|
||||
|
||||
@property
|
||||
def entries(self):
|
||||
self.seek(0)
|
||||
for line in self.readlines():
|
||||
line = line.decode('us-ascii')
|
||||
try:
|
||||
if line.strip() and not line.strip().startswith("#"):
|
||||
yield self._hydrate_entry(line)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
def get_entry_by_attr(self, attr, value):
|
||||
for entry in self.entries:
|
||||
e_attr = getattr(entry, attr)
|
||||
if e_attr == value:
|
||||
return entry
|
||||
return None
|
||||
|
||||
def add_entry(self, entry):
|
||||
if self.get_entry_by_attr('device', entry.device):
|
||||
return False
|
||||
|
||||
self.write((str(entry) + '\n').encode('us-ascii'))
|
||||
self.truncate()
|
||||
return entry
|
||||
|
||||
def remove_entry(self, entry):
|
||||
self.seek(0)
|
||||
|
||||
lines = [l.decode('us-ascii') for l in self.readlines()]
|
||||
|
||||
found = False
|
||||
for index, line in enumerate(lines):
|
||||
if line.strip() and not line.strip().startswith("#"):
|
||||
if self._hydrate_entry(line) == entry:
|
||||
found = True
|
||||
break
|
||||
|
||||
if not found:
|
||||
return False
|
||||
|
||||
lines.remove(line)
|
||||
|
||||
self.seek(0)
|
||||
self.write(''.join(lines).encode('us-ascii'))
|
||||
self.truncate()
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def remove_by_mountpoint(cls, mountpoint, path=None):
|
||||
fstab = cls(path=path)
|
||||
entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
|
||||
if entry:
|
||||
return fstab.remove_entry(entry)
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def add(cls, device, mountpoint, filesystem, options=None, path=None):
|
||||
return cls(path=path).add_entry(Fstab.Entry(device,
|
||||
mountpoint, filesystem,
|
||||
options=options))
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,72 @@
|
|||
import subprocess
|
||||
import yum
|
||||
import os
|
||||
|
||||
from charmhelpers.core.strutils import BasicStringComparator
|
||||
|
||||
|
||||
class CompareHostReleases(BasicStringComparator):
|
||||
"""Provide comparisons of Host releases.
|
||||
|
||||
Use in the form of
|
||||
|
||||
if CompareHostReleases(release) > 'trusty':
|
||||
# do something with mitaka
|
||||
"""
|
||||
|
||||
def __init__(self, item):
|
||||
raise NotImplementedError(
|
||||
"CompareHostReleases() is not implemented for CentOS")
|
||||
|
||||
|
||||
def service_available(service_name):
|
||||
# """Determine whether a system service is available."""
|
||||
if os.path.isdir('/run/systemd/system'):
|
||||
cmd = ['systemctl', 'is-enabled', service_name]
|
||||
else:
|
||||
cmd = ['service', service_name, 'is-enabled']
|
||||
return subprocess.call(cmd) == 0
|
||||
|
||||
|
||||
def add_new_group(group_name, system_group=False, gid=None):
|
||||
cmd = ['groupadd']
|
||||
if gid:
|
||||
cmd.extend(['--gid', str(gid)])
|
||||
if system_group:
|
||||
cmd.append('-r')
|
||||
cmd.append(group_name)
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
def lsb_release():
|
||||
"""Return /etc/os-release in a dict."""
|
||||
d = {}
|
||||
with open('/etc/os-release', 'r') as lsb:
|
||||
for l in lsb:
|
||||
s = l.split('=')
|
||||
if len(s) != 2:
|
||||
continue
|
||||
d[s[0].strip()] = s[1].strip()
|
||||
return d
|
||||
|
||||
|
||||
def cmp_pkgrevno(package, revno, pkgcache=None):
|
||||
"""Compare supplied revno with the revno of the installed package.
|
||||
|
||||
* 1 => Installed revno is greater than supplied arg
|
||||
* 0 => Installed revno is the same as supplied arg
|
||||
* -1 => Installed revno is less than supplied arg
|
||||
|
||||
This function imports YumBase function if the pkgcache argument
|
||||
is None.
|
||||
"""
|
||||
if not pkgcache:
|
||||
y = yum.YumBase()
|
||||
packages = y.doPackageLists()
|
||||
pkgcache = {i.Name: i.version for i in packages['installed']}
|
||||
pkg = pkgcache[package]
|
||||
if pkg > revno:
|
||||
return 1
|
||||
if pkg < revno:
|
||||
return -1
|
||||
return 0
|
|
@ -0,0 +1,90 @@
|
|||
import subprocess
|
||||
|
||||
from charmhelpers.core.strutils import BasicStringComparator
|
||||
|
||||
|
||||
UBUNTU_RELEASES = (
|
||||
'lucid',
|
||||
'maverick',
|
||||
'natty',
|
||||
'oneiric',
|
||||
'precise',
|
||||
'quantal',
|
||||
'raring',
|
||||
'saucy',
|
||||
'trusty',
|
||||
'utopic',
|
||||
'vivid',
|
||||
'wily',
|
||||
'xenial',
|
||||
'yakkety',
|
||||
'zesty',
|
||||
'artful',
|
||||
'bionic',
|
||||
)
|
||||
|
||||
|
||||
class CompareHostReleases(BasicStringComparator):
|
||||
"""Provide comparisons of Ubuntu releases.
|
||||
|
||||
Use in the form of
|
||||
|
||||
if CompareHostReleases(release) > 'trusty':
|
||||
# do something with mitaka
|
||||
"""
|
||||
_list = UBUNTU_RELEASES
|
||||
|
||||
|
||||
def service_available(service_name):
|
||||
"""Determine whether a system service is available"""
|
||||
try:
|
||||
subprocess.check_output(
|
||||
['service', service_name, 'status'],
|
||||
stderr=subprocess.STDOUT).decode('UTF-8')
|
||||
except subprocess.CalledProcessError as e:
|
||||
return b'unrecognized service' not in e.output
|
||||
else:
|
||||
return True
|
||||
|
||||
|
||||
def add_new_group(group_name, system_group=False, gid=None):
|
||||
cmd = ['addgroup']
|
||||
if gid:
|
||||
cmd.extend(['--gid', str(gid)])
|
||||
if system_group:
|
||||
cmd.append('--system')
|
||||
else:
|
||||
cmd.extend([
|
||||
'--group',
|
||||
])
|
||||
cmd.append(group_name)
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
def lsb_release():
|
||||
"""Return /etc/lsb-release in a dict"""
|
||||
d = {}
|
||||
with open('/etc/lsb-release', 'r') as lsb:
|
||||
for l in lsb:
|
||||
k, v = l.split('=')
|
||||
d[k.strip()] = v.strip()
|
||||
return d
|
||||
|
||||
|
||||
def cmp_pkgrevno(package, revno, pkgcache=None):
|
||||
"""Compare supplied revno with the revno of the installed package.
|
||||
|
||||
* 1 => Installed revno is greater than supplied arg
|
||||
* 0 => Installed revno is the same as supplied arg
|
||||
* -1 => Installed revno is less than supplied arg
|
||||
|
||||
This function imports apt_cache function from charmhelpers.fetch if
|
||||
the pkgcache argument is None. Be sure to add charmhelpers.fetch if
|
||||
you call this function, or pass an apt_pkg.Cache() instance.
|
||||
"""
|
||||
import apt_pkg
|
||||
if not pkgcache:
|
||||
from charmhelpers.fetch import apt_cache
|
||||
pkgcache = apt_cache()
|
||||
pkg = pkgcache[package]
|
||||
return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
|
|
@ -0,0 +1,69 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import yaml
|
||||
from charmhelpers.core import fstab
|
||||
from charmhelpers.core import sysctl
|
||||
from charmhelpers.core.host import (
|
||||
add_group,
|
||||
add_user_to_group,
|
||||
fstab_mount,
|
||||
mkdir,
|
||||
)
|
||||
from charmhelpers.core.strutils import bytes_from_string
|
||||
from subprocess import check_output
|
||||
|
||||
|
||||
def hugepage_support(user, group='hugetlb', nr_hugepages=256,
|
||||
max_map_count=65536, mnt_point='/run/hugepages/kvm',
|
||||
pagesize='2MB', mount=True, set_shmmax=False):
|
||||
"""Enable hugepages on system.
|
||||
|
||||
Args:
|
||||
user (str) -- Username to allow access to hugepages to
|
||||
group (str) -- Group name to own hugepages
|
||||
nr_hugepages (int) -- Number of pages to reserve
|
||||
max_map_count (int) -- Number of Virtual Memory Areas a process can own
|
||||
mnt_point (str) -- Directory to mount hugepages on
|
||||
pagesize (str) -- Size of hugepages
|
||||
mount (bool) -- Whether to Mount hugepages
|
||||
"""
|
||||
group_info = add_group(group)
|
||||
gid = group_info.gr_gid
|
||||
add_user_to_group(user, group)
|
||||
if max_map_count < 2 * nr_hugepages:
|
||||
max_map_count = 2 * nr_hugepages
|
||||
sysctl_settings = {
|
||||
'vm.nr_hugepages': nr_hugepages,
|
||||
'vm.max_map_count': max_map_count,
|
||||
'vm.hugetlb_shm_group': gid,
|
||||
}
|
||||
if set_shmmax:
|
||||
shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax']))
|
||||
shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages
|
||||
if shmmax_minsize > shmmax_current:
|
||||
sysctl_settings['kernel.shmmax'] = shmmax_minsize
|
||||
sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf')
|
||||
mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False)
|
||||
lfstab = fstab.Fstab()
|
||||
fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point)
|
||||
if fstab_entry:
|
||||
lfstab.remove_entry(fstab_entry)
|
||||
entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs',
|
||||
'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0)
|
||||
lfstab.add_entry(entry)
|
||||
if mount:
|
||||
fstab_mount(mnt_point)
|
|
@ -0,0 +1,72 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import re
|
||||
import subprocess
|
||||
|
||||
from charmhelpers.osplatform import get_platform
|
||||
from charmhelpers.core.hookenv import (
|
||||
log,
|
||||
INFO
|
||||
)
|
||||
|
||||
__platform__ = get_platform()
|
||||
if __platform__ == "ubuntu":
|
||||
from charmhelpers.core.kernel_factory.ubuntu import (
|
||||
persistent_modprobe,
|
||||
update_initramfs,
|
||||
) # flake8: noqa -- ignore F401 for this import
|
||||
elif __platform__ == "centos":
|
||||
from charmhelpers.core.kernel_factory.centos import (
|
||||
persistent_modprobe,
|
||||
update_initramfs,
|
||||
) # flake8: noqa -- ignore F401 for this import
|
||||
|
||||
__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
|
||||
|
||||
|
||||
def modprobe(module, persist=True):
|
||||
"""Load a kernel module and configure for auto-load on reboot."""
|
||||
cmd = ['modprobe', module]
|
||||
|
||||
log('Loading kernel module %s' % module, level=INFO)
|
||||
|
||||
subprocess.check_call(cmd)
|
||||
if persist:
|
||||
persistent_modprobe(module)
|
||||
|
||||
|
||||
def rmmod(module, force=False):
|
||||
"""Remove a module from the linux kernel"""
|
||||
cmd = ['rmmod']
|
||||
if force:
|
||||
cmd.append('-f')
|
||||
cmd.append(module)
|
||||
log('Removing kernel module %s' % module, level=INFO)
|
||||
return subprocess.check_call(cmd)
|
||||
|
||||
|
||||
def lsmod():
|
||||
"""Shows what kernel modules are currently loaded"""
|
||||
return subprocess.check_output(['lsmod'],
|
||||
universal_newlines=True)
|
||||
|
||||
|
||||
def is_module_loaded(module):
|
||||
"""Checks if a kernel module is already loaded"""
|
||||
matches = re.findall('^%s[ ]+' % module, lsmod(), re.M)
|
||||
return len(matches) > 0
|
|
@ -0,0 +1,17 @@
|
|||
import subprocess
|
||||
import os
|
||||
|
||||
|
||||
def persistent_modprobe(module):
|
||||
"""Load a kernel module and configure for auto-load on reboot."""
|
||||
if not os.path.exists('/etc/rc.modules'):
|
||||
open('/etc/rc.modules', 'a')
|
||||
os.chmod('/etc/rc.modules', 111)
|
||||
with open('/etc/rc.modules', 'r+') as modules:
|
||||
if module not in modules.read():
|
||||
modules.write('modprobe %s\n' % module)
|
||||
|
||||
|
||||
def update_initramfs(version='all'):
|
||||
"""Updates an initramfs image."""
|
||||
return subprocess.check_call(["dracut", "-f", version])
|
|
@ -0,0 +1,13 @@
|
|||
import subprocess
|
||||
|
||||
|
||||
def persistent_modprobe(module):
|
||||
"""Load a kernel module and configure for auto-load on reboot."""
|
||||
with open('/etc/modules', 'r+') as modules:
|
||||
if module not in modules.read():
|
||||
modules.write(module + "\n")
|
||||
|
||||
|
||||
def update_initramfs(version='all'):
|
||||
"""Updates an initramfs image."""
|
||||
return subprocess.check_call(["update-initramfs", "-k", version, "-u"])
|
|
@ -0,0 +1,16 @@
|
|||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from .base import * # NOQA
|
||||
from .helpers import * # NOQA
|
|
@ -0,0 +1,360 @@
|
|||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import json
|
||||
from inspect import getargspec
|
||||
from collections import Iterable, OrderedDict
|
||||
|
||||
from charmhelpers.core import host
|
||||
from charmhelpers.core import hookenv
|
||||
|
||||
|
||||
__all__ = ['ServiceManager', 'ManagerCallback',
|
||||
'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
|
||||
'service_restart', 'service_stop']
|
||||
|
||||
|
||||
class ServiceManager(object):
|
||||
def __init__(self, services=None):
|
||||
"""
|
||||
Register a list of services, given their definitions.
|
||||
|
||||
Service definitions are dicts in the following formats (all keys except
|
||||
'service' are optional)::
|
||||
|
||||
{
|
||||
"service": <service name>,
|
||||
"required_data": <list of required data contexts>,
|
||||
"provided_data": <list of provided data contexts>,
|
||||
"data_ready": <one or more callbacks>,
|
||||
"data_lost": <one or more callbacks>,
|
||||
"start": <one or more callbacks>,
|
||||
"stop": <one or more callbacks>,
|
||||
"ports": <list of ports to manage>,
|
||||
}
|
||||
|
||||
The 'required_data' list should contain dicts of required data (or
|
||||
dependency managers that act like dicts and know how to collect the data).
|
||||
Only when all items in the 'required_data' list are populated are the list
|
||||
of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
|
||||
information.
|
||||
|
||||
The 'provided_data' list should contain relation data providers, most likely
|
||||
a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`,
|
||||
that will indicate a set of data to set on a given relation.
|
||||
|
||||
The 'data_ready' value should be either a single callback, or a list of
|
||||
callbacks, to be called when all items in 'required_data' pass `is_ready()`.
|
||||
Each callback will be called with the service name as the only parameter.
|
||||
After all of the 'data_ready' callbacks are called, the 'start' callbacks
|
||||
are fired.
|
||||
|
||||
The 'data_lost' value should be either a single callback, or a list of
|
||||
callbacks, to be called when a 'required_data' item no longer passes
|
||||
`is_ready()`. Each callback will be called with the service name as the
|
||||
only parameter. After all of the 'data_lost' callbacks are called,
|
||||
the 'stop' callbacks are fired.
|
||||
|
||||
The 'start' value should be either a single callback, or a list of
|
||||
callbacks, to be called when starting the service, after the 'data_ready'
|
||||
callbacks are complete. Each callback will be called with the service
|
||||
name as the only parameter. This defaults to
|
||||
`[host.service_start, services.open_ports]`.
|
||||
|
||||
The 'stop' value should be either a single callback, or a list of
|
||||
callbacks, to be called when stopping the service. If the service is
|
||||
being stopped because it no longer has all of its 'required_data', this
|
||||
will be called after all of the 'data_lost' callbacks are complete.
|
||||
Each callback will be called with the service name as the only parameter.
|
||||
This defaults to `[services.close_ports, host.service_stop]`.
|
||||
|
||||
The 'ports' value should be a list of ports to manage. The default
|
||||
'start' handler will open the ports after the service is started,
|
||||
and the default 'stop' handler will close the ports prior to stopping
|
||||
the service.
|
||||
|
||||
|
||||
Examples:
|
||||
|
||||
The following registers an Upstart service called bingod that depends on
|
||||
a mongodb relation and which runs a custom `db_migrate` function prior to
|
||||
restarting the service, and a Runit service called spadesd::
|
||||
|
||||
manager = services.ServiceManager([
|
||||
{
|
||||
'service': 'bingod',
|
||||
'ports': [80, 443],
|
||||
'required_data': [MongoRelation(), config(), {'my': 'data'}],
|
||||
'data_ready': [
|
||||
services.template(source='bingod.conf'),
|
||||
services.template(source='bingod.ini',
|
||||
target='/etc/bingod.ini',
|
||||
owner='bingo', perms=0400),
|
||||
],
|
||||
},
|
||||
{
|
||||
'service': 'spadesd',
|
||||
'data_ready': services.template(source='spadesd_run.j2',
|
||||
target='/etc/sv/spadesd/run',
|
||||
perms=0555),
|
||||
'start': runit_start,
|
||||
'stop': runit_stop,
|
||||
},
|
||||
])
|
||||
manager.manage()
|
||||
"""
|
||||
self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
|
||||
self._ready = None
|
||||
self.services = OrderedDict()
|
||||
for service in services or []:
|
||||
service_name = service['service']
|
||||
self.services[service_name] = service
|
||||
|
||||
def manage(self):
|
||||
"""
|
||||
Handle the current hook by doing The Right Thing with the registered services.
|
||||
"""
|
||||
hookenv._run_atstart()
|
||||
try:
|
||||
hook_name = hookenv.hook_name()
|
||||
if hook_name == 'stop':
|
||||
self.stop_services()
|
||||
else:
|
||||
self.reconfigure_services()
|
||||
self.provide_data()
|
||||
except SystemExit as x:
|
||||
if x.code is None or x.code == 0:
|
||||
hookenv._run_atexit()
|
||||
hookenv._run_atexit()
|
||||
|
||||
def provide_data(self):
|
||||
"""
|
||||
Set the relation data for each provider in the ``provided_data`` list.
|
||||
|
||||
A provider must have a `name` attribute, which indicates which relation
|
||||
to set data on, and a `provide_data()` method, which returns a dict of
|
||||
data to set.
|
||||
|
||||
The `provide_data()` method can optionally accept two parameters:
|
||||
|
||||
* ``remote_service`` The name of the remote service that the data will
|
||||
be provided to. The `provide_data()` method will be called once
|
||||
for each connected service (not unit). This allows the method to
|
||||
tailor its data to the given service.
|
||||
* ``service_ready`` Whether or not the service definition had all of
|
||||
its requirements met, and thus the ``data_ready`` callbacks run.
|
||||
|
||||
Note that the ``provided_data`` methods are now called **after** the
|
||||
``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks
|
||||
a chance to generate any data necessary for the providing to the remote
|
||||
services.
|
||||
"""
|
||||
for service_name, service in self.services.items():
|
||||
service_ready = self.is_ready(service_name)
|
||||
for provider in service.get('provided_data', []):
|
||||
for relid in hookenv.relation_ids(provider.name):
|
||||
units = hookenv.related_units(relid)
|
||||
if not units:
|
||||
continue
|
||||
remote_service = units[0].split('/')[0]
|
||||
argspec = getargspec(provider.provide_data)
|
||||
if len(argspec.args) > 1:
|
||||
data = provider.provide_data(remote_service, service_ready)
|
||||
else:
|
||||
data = provider.provide_data()
|
||||
if data:
|
||||
hookenv.relation_set(relid, data)
|
||||
|
||||
def reconfigure_services(self, *service_names):
|
||||
"""
|
||||
Update all files for one or more registered services, and,
|
||||
if ready, optionally restart them.
|
||||
|
||||
If no service names are given, reconfigures all registered services.
|
||||
"""
|
||||
for service_name in service_names or self.services.keys():
|
||||
if self.is_ready(service_name):
|
||||
self.fire_event('data_ready', service_name)
|
||||
self.fire_event('start', service_name, default=[
|
||||
service_restart,
|
||||
manage_ports])
|
||||
self.save_ready(service_name)
|
||||
else:
|
||||
if self.was_ready(service_name):
|
||||
self.fire_event('data_lost', service_name)
|
||||
self.fire_event('stop', service_name, default=[
|
||||
manage_ports,
|
||||
service_stop])
|
||||
self.save_lost(service_name)
|
||||
|
||||
def stop_services(self, *service_names):
|
||||
"""
|
||||
Stop one or more registered services, by name.
|
||||
|
||||
If no service names are given, stops all registered services.
|
||||
"""
|
||||
for service_name in service_names or self.services.keys():
|
||||
self.fire_event('stop', service_name, default=[
|
||||
manage_ports,
|
||||
service_stop])
|
||||
|
||||
def get_service(self, service_name):
|
||||
"""
|
||||
Given the name of a registered service, return its service definition.
|
||||
"""
|
||||
service = self.services.get(service_name)
|
||||
if not service:
|
||||
raise KeyError('Service not registered: %s' % service_name)
|
||||
return service
|
||||
|
||||
def fire_event(self, event_name, service_name, default=None):
|
||||
"""
|
||||
Fire a data_ready, data_lost, start, or stop event on a given service.
|
||||
"""
|
||||
service = self.get_service(service_name)
|
||||
callbacks = service.get(event_name, default)
|
||||
if not callbacks:
|
||||
return
|
||||
if not isinstance(callbacks, Iterable):
|
||||
callbacks = [callbacks]
|
||||
for callback in callbacks:
|
||||
if isinstance(callback, ManagerCallback):
|
||||
callback(self, service_name, event_name)
|
||||
else:
|
||||
callback(service_name)
|
||||
|
||||
def is_ready(self, service_name):
|
||||
"""
|
||||
Determine if a registered service is ready, by checking its 'required_data'.
|
||||
|
||||
A 'required_data' item can be any mapping type, and is considered ready
|
||||
if `bool(item)` evaluates as True.
|
||||
"""
|
||||
service = self.get_service(service_name)
|
||||
reqs = service.get('required_data', [])
|
||||
return all(bool(req) for req in reqs)
|
||||
|
||||
def _load_ready_file(self):
|
||||
if self._ready is not None:
|
||||
return
|
||||
if os.path.exists(self._ready_file):
|
||||
with open(self._ready_file) as fp:
|
||||
self._ready = set(json.load(fp))
|
||||
else:
|
||||
self._ready = set()
|
||||
|
||||
def _save_ready_file(self):
|
||||
if self._ready is None:
|
||||
return
|
||||
with open(self._ready_file, 'w') as fp:
|
||||
json.dump(list(self._ready), fp)
|
||||
|
||||
def save_ready(self, service_name):
|
||||
"""
|
||||
Save an indicator that the given service is now data_ready.
|
||||
"""
|
||||
self._load_ready_file()
|
||||
self._ready.add(service_name)
|
||||
self._save_ready_file()
|
||||
|
||||
def save_lost(self, service_name):
|
||||
"""
|
||||
Save an indicator that the given service is no longer data_ready.
|
||||
"""
|
||||
self._load_ready_file()
|
||||
self._ready.discard(service_name)
|
||||
self._save_ready_file()
|
||||
|
||||
def was_ready(self, service_name):
|
||||
"""
|
||||
Determine if the given service was previously data_ready.
|
||||
"""
|
||||
self._load_ready_file()
|
||||
return service_name in self._ready
|
||||
|
||||
|
||||
class ManagerCallback(object):
|
||||
"""
|
||||
Special case of a callback that takes the `ServiceManager` instance
|
||||
in addition to the service name.
|
||||
|
||||
Subclasses should implement `__call__` which should accept three parameters:
|
||||
|
||||
* `manager` The `ServiceManager` instance
|
||||
* `service_name` The name of the service it's being triggered for
|
||||
* `event_name` The name of the event that this callback is handling
|
||||
"""
|
||||
def __call__(self, manager, service_name, event_name):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class PortManagerCallback(ManagerCallback):
|
||||
"""
|
||||
Callback class that will open or close ports, for use as either
|
||||
a start or stop action.
|
||||
"""
|
||||
def __call__(self, manager, service_name, event_name):
|
||||
service = manager.get_service(service_name)
|
||||
new_ports = service.get('ports', [])
|
||||
port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
|
||||
if os.path.exists(port_file):
|
||||
with open(port_file) as fp:
|
||||
old_ports = fp.read().split(',')
|
||||
for old_port in old_ports:
|
||||
if bool(old_port) and not self.ports_contains(old_port, new_ports):
|
||||
hookenv.close_port(old_port)
|
||||
with open(port_file, 'w') as fp:
|
||||
fp.write(','.join(str(port) for port in new_ports))
|
||||
for port in new_ports:
|
||||
# A port is either a number or 'ICMP'
|
||||
protocol = 'TCP'
|
||||
if str(port).upper() == 'ICMP':
|
||||
protocol = 'ICMP'
|
||||
if event_name == 'start':
|
||||
hookenv.open_port(port, protocol)
|
||||
elif event_name == 'stop':
|
||||
hookenv.close_port(port, protocol)
|
||||
|
||||
def ports_contains(self, port, ports):
|
||||
if not bool(port):
|
||||
return False
|
||||
if str(port).upper() != 'ICMP':
|
||||
port = int(port)
|
||||
return port in ports
|
||||
|
||||
|
||||
def service_stop(service_name):
|
||||
"""
|
||||
Wrapper around host.service_stop to prevent spurious "unknown service"
|
||||
messages in the logs.
|
||||
"""
|
||||
if host.service_running(service_name):
|
||||
host.service_stop(service_name)
|
||||
|
||||
|
||||
def service_restart(service_name):
|
||||
"""
|
||||
Wrapper around host.service_restart to prevent spurious "unknown service"
|
||||
messages in the logs.
|
||||
"""
|
||||
if host.service_available(service_name):
|
||||
if host.service_running(service_name):
|
||||
host.service_restart(service_name)
|
||||
else:
|
||||
host.service_start(service_name)
|
||||
|
||||
|
||||
# Convenience aliases
|
||||
open_ports = close_ports = manage_ports = PortManagerCallback()
|
|
@ -0,0 +1,290 @@
|
|||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import yaml
|
||||
|
||||
from charmhelpers.core import hookenv
|
||||
from charmhelpers.core import host
|
||||
from charmhelpers.core import templating
|
||||
|
||||
from charmhelpers.core.services.base import ManagerCallback
|
||||
|
||||
|
||||
__all__ = ['RelationContext', 'TemplateCallback',
|
||||
'render_template', 'template']
|
||||
|
||||
|
||||
class RelationContext(dict):
|
||||
"""
|
||||
Base class for a context generator that gets relation data from juju.
|
||||
|
||||
Subclasses must provide the attributes `name`, which is the name of the
|
||||
interface of interest, `interface`, which is the type of the interface of
|
||||
interest, and `required_keys`, which is the set of keys required for the
|
||||
relation to be considered complete. The data for all interfaces matching
|
||||
the `name` attribute that are complete will used to populate the dictionary
|
||||
values (see `get_data`, below).
|
||||
|
||||
The generated context will be namespaced under the relation :attr:`name`,
|
||||
to prevent potential naming conflicts.
|
||||
|
||||
:param str name: Override the relation :attr:`name`, since it can vary from charm to charm
|
||||
:param list additional_required_keys: Extend the list of :attr:`required_keys`
|
||||
"""
|
||||
name = None
|
||||
interface = None
|
||||
|
||||
def __init__(self, name=None, additional_required_keys=None):
|
||||
if not hasattr(self, 'required_keys'):
|
||||
self.required_keys = []
|
||||
|
||||
if name is not None:
|
||||
self.name = name
|
||||
if additional_required_keys:
|
||||
self.required_keys.extend(additional_required_keys)
|
||||
self.get_data()
|
||||
|
||||
def __bool__(self):
|
||||
"""
|
||||
Returns True if all of the required_keys are available.
|
||||
"""
|
||||
return self.is_ready()
|
||||
|
||||
__nonzero__ = __bool__
|
||||
|
||||
def __repr__(self):
|
||||
return super(RelationContext, self).__repr__()
|
||||
|
||||
def is_ready(self):
|
||||
"""
|
||||
Returns True if all of the `required_keys` are available from any units.
|
||||
"""
|
||||
ready = len(self.get(self.name, [])) > 0
|
||||
if not ready:
|
||||
hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
|
||||
return ready
|
||||
|
||||
def _is_ready(self, unit_data):
|
||||
"""
|
||||
Helper method that tests a set of relation data and returns True if
|
||||
all of the `required_keys` are present.
|
||||
"""
|
||||
return set(unit_data.keys()).issuperset(set(self.required_keys))
|
||||
|
||||
def get_data(self):
|
||||
"""
|
||||
Retrieve the relation data for each unit involved in a relation and,
|
||||
if complete, store it in a list under `self[self.name]`. This
|
||||
is automatically called when the RelationContext is instantiated.
|
||||
|
||||
The units are sorted lexographically first by the service ID, then by
|
||||
the unit ID. Thus, if an interface has two other services, 'db:1'
|
||||
and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
|
||||
and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
|
||||
set of data, the relation data for the units will be stored in the
|
||||
order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
|
||||
|
||||
If you only care about a single unit on the relation, you can just
|
||||
access it as `{{ interface[0]['key'] }}`. However, if you can at all
|
||||
support multiple units on a relation, you should iterate over the list,
|
||||
like::
|
||||
|
||||
{% for unit in interface -%}
|
||||
{{ unit['key'] }}{% if not loop.last %},{% endif %}
|
||||
{%- endfor %}
|
||||
|
||||
Note that since all sets of relation data from all related services and
|
||||
units are in a single list, if you need to know which service or unit a
|
||||
set of data came from, you'll need to extend this class to preserve
|
||||
that information.
|
||||
"""
|
||||
if not hookenv.relation_ids(self.name):
|
||||
return
|
||||
|
||||
ns = self.setdefault(self.name, [])
|
||||
for rid in sorted(hookenv.relation_ids(self.name)):
|
||||
for unit in sorted(hookenv.related_units(rid)):
|
||||
reldata = hookenv.relation_get(rid=rid, unit=unit)
|
||||
if self._is_ready(reldata):
|
||||
ns.append(reldata)
|
||||
|
||||
def provide_data(self):
|
||||
"""
|
||||
Return data to be relation_set for this interface.
|
||||
"""
|
||||
return {}
|
||||
|
||||
|
||||
class MysqlRelation(RelationContext):
|
||||
"""
|
||||
Relation context for the `mysql` interface.
|
||||
|
||||
:param str name: Override the relation :attr:`name`, since it can vary from charm to charm
|
||||
:param list additional_required_keys: Extend the list of :attr:`required_keys`
|
||||
"""
|
||||
name = 'db'
|
||||
interface = 'mysql'
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.required_keys = ['host', 'user', 'password', 'database']
|
||||
RelationContext.__init__(self, *args, **kwargs)
|
||||
|
||||
|
||||
class HttpRelation(RelationContext):
|
||||
"""
|
||||
Relation context for the `http` interface.
|
||||
|
||||
:param str name: Override the relation :attr:`name`, since it can vary from charm to charm
|
||||
:param list additional_required_keys: Extend the list of :attr:`required_keys`
|
||||
"""
|
||||
name = 'website'
|
||||
interface = 'http'
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.required_keys = ['host', 'port']
|
||||
RelationContext.__init__(self, *args, **kwargs)
|
||||
|
||||
def provide_data(self):
|
||||
return {
|
||||
'host': hookenv.unit_get('private-address'),
|
||||
'port': 80,
|
||||
}
|
||||
|
||||
|
||||
class RequiredConfig(dict):
|
||||
"""
|
||||
Data context that loads config options with one or more mandatory options.
|
||||
|
||||
Once the required options have been changed from their default values, all
|
||||
config options will be available, namespaced under `config` to prevent
|
||||
potential naming conflicts (for example, between a config option and a
|
||||
relation property).
|
||||
|
||||
:param list *args: List of options that must be changed from their default values.
|
||||
"""
|
||||
|
||||
def __init__(self, *args):
|
||||
self.required_options = args
|
||||
self['config'] = hookenv.config()
|
||||
with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
|
||||
self.config = yaml.load(fp).get('options', {})
|
||||
|
||||
def __bool__(self):
|
||||
for option in self.required_options:
|
||||
if option not in self['config']:
|
||||
return False
|
||||
current_value = self['config'][option]
|
||||
default_value = self.config[option].get('default')
|
||||
if current_value == default_value:
|
||||
return False
|
||||
if current_value in (None, '') and default_value in (None, ''):
|
||||
return False
|
||||
return True
|
||||
|
||||
def __nonzero__(self):
|
||||
return self.__bool__()
|
||||
|
||||
|
||||
class StoredContext(dict):
|
||||
"""
|
||||
A data context that always returns the data that it was first created with.
|
||||
|
||||
This is useful to do a one-time generation of things like passwords, that
|
||||
will thereafter use the same value that was originally generated, instead
|
||||
of generating a new value each time it is run.
|
||||
"""
|
||||
def __init__(self, file_name, config_data):
|
||||
"""
|
||||
If the file exists, populate `self` with the data from the file.
|
||||
Otherwise, populate with the given data and persist it to the file.
|
||||
"""
|
||||
if os.path.exists(file_name):
|
||||
self.update(self.read_context(file_name))
|
||||
else:
|
||||
self.store_context(file_name, config_data)
|
||||
self.update(config_data)
|
||||
|
||||
def store_context(self, file_name, config_data):
|
||||
if not os.path.isabs(file_name):
|
||||
file_name = os.path.join(hookenv.charm_dir(), file_name)
|
||||
with open(file_name, 'w') as file_stream:
|
||||
os.fchmod(file_stream.fileno(), 0o600)
|
||||
yaml.dump(config_data, file_stream)
|
||||
|
||||
def read_context(self, file_name):
|
||||
if not os.path.isabs(file_name):
|
||||
file_name = os.path.join(hookenv.charm_dir(), file_name)
|
||||
with open(file_name, 'r') as file_stream:
|
||||
data = yaml.load(file_stream)
|
||||
if not data:
|
||||
raise OSError("%s is empty" % file_name)
|
||||
return data
|
||||
|
||||
|
||||
class TemplateCallback(ManagerCallback):
|
||||
"""
|
||||
Callback class that will render a Jinja2 template, for use as a ready
|
||||
action.
|
||||
|
||||
:param str source: The template source file, relative to
|
||||
`$CHARM_DIR/templates`
|
||||
|
||||
:param str target: The target to write the rendered template to (or None)
|
||||
:param str owner: The owner of the rendered file
|
||||
:param str group: The group of the rendered file
|
||||
:param int perms: The permissions of the rendered file
|
||||
:param partial on_change_action: functools partial to be executed when
|
||||
rendered file changes
|
||||
:param jinja2 loader template_loader: A jinja2 template loader
|
||||
|
||||
:return str: The rendered template
|
||||
"""
|
||||
def __init__(self, source, target,
|
||||
owner='root', group='root', perms=0o444,
|
||||
on_change_action=None, template_loader=None):
|
||||
self.source = source
|
||||
self.target = target
|
||||
self.owner = owner
|
||||
self.group = group
|
||||
self.perms = perms
|
||||
self.on_change_action = on_change_action
|
||||
self.template_loader = template_loader
|
||||
|
||||
def __call__(self, manager, service_name, event_name):
|
||||
pre_checksum = ''
|
||||
if self.on_change_action and os.path.isfile(self.target):
|
||||
pre_checksum = host.file_hash(self.target)
|
||||
service = manager.get_service(service_name)
|
||||
context = {'ctx': {}}
|
||||
for ctx in service.get('required_data', []):
|
||||
context.update(ctx)
|
||||
context['ctx'].update(ctx)
|
||||
|
||||
result = templating.render(self.source, self.target, context,
|
||||
self.owner, self.group, self.perms,
|
||||
template_loader=self.template_loader)
|
||||
if self.on_change_action:
|
||||
if pre_checksum == host.file_hash(self.target):
|
||||
hookenv.log(
|
||||
'No change detected: {}'.format(self.target),
|
||||
hookenv.DEBUG)
|
||||
else:
|
||||
self.on_change_action()
|
||||
|
||||
return result
|
||||
|
||||
|
||||
# Convenience aliases for templates
|
||||
render_template = template = TemplateCallback
|
|
@ -0,0 +1,129 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import six
|
||||
import re
|
||||
|
||||
|
||||
def bool_from_string(value):
|
||||
"""Interpret string value as boolean.
|
||||
|
||||
Returns True if value translates to True otherwise False.
|
||||
"""
|
||||
if isinstance(value, six.string_types):
|
||||
value = six.text_type(value)
|
||||
else:
|
||||
msg = "Unable to interpret non-string value '%s' as boolean" % (value)
|
||||
raise ValueError(msg)
|
||||
|
||||
value = value.strip().lower()
|
||||
|
||||
if value in ['y', 'yes', 'true', 't', 'on']:
|
||||
return True
|
||||
elif value in ['n', 'no', 'false', 'f', 'off']:
|
||||
return False
|
||||
|
||||
msg = "Unable to interpret string value '%s' as boolean" % (value)
|
||||
raise ValueError(msg)
|
||||
|
||||
|
||||
def bytes_from_string(value):
|
||||
"""Interpret human readable string value as bytes.
|
||||
|
||||
Returns int
|
||||
"""
|
||||
BYTE_POWER = {
|
||||
'K': 1,
|
||||
'KB': 1,
|
||||
'M': 2,
|
||||
'MB': 2,
|
||||
'G': 3,
|
||||
'GB': 3,
|
||||
'T': 4,
|
||||
'TB': 4,
|
||||
'P': 5,
|
||||
'PB': 5,
|
||||
}
|
||||
if isinstance(value, six.string_types):
|
||||
value = six.text_type(value)
|
||||
else:
|
||||
msg = "Unable to interpret non-string value '%s' as bytes" % (value)
|
||||
raise ValueError(msg)
|
||||
matches = re.match("([0-9]+)([a-zA-Z]+)", value)
|
||||
if matches:
|
||||
size = int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
|
||||
else:
|
||||
# Assume that value passed in is bytes
|
||||
try:
|
||||
size = int(value)
|
||||
except ValueError:
|
||||
msg = "Unable to interpret string value '%s' as bytes" % (value)
|
||||
raise ValueError(msg)
|
||||
return size
|
||||
|
||||
|
||||
class BasicStringComparator(object):
|
||||
"""Provides a class that will compare strings from an iterator type object.
|
||||
Used to provide > and < comparisons on strings that may not necessarily be
|
||||
alphanumerically ordered. e.g. OpenStack or Ubuntu releases AFTER the
|
||||
z-wrap.
|
||||
"""
|
||||
|
||||
_list = None
|
||||
|
||||
def __init__(self, item):
|
||||
if self._list is None:
|
||||
raise Exception("Must define the _list in the class definition!")
|
||||
try:
|
||||
self.index = self._list.index(item)
|
||||
except Exception:
|
||||
raise KeyError("Item '{}' is not in list '{}'"
|
||||
.format(item, self._list))
|
||||
|
||||
def __eq__(self, other):
|
||||
assert isinstance(other, str) or isinstance(other, self.__class__)
|
||||
return self.index == self._list.index(other)
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self.__eq__(other)
|
||||
|
||||
def __lt__(self, other):
|
||||
assert isinstance(other, str) or isinstance(other, self.__class__)
|
||||
return self.index < self._list.index(other)
|
||||
|
||||
def __ge__(self, other):
|
||||
return not self.__lt__(other)
|
||||
|
||||
def __gt__(self, other):
|
||||
assert isinstance(other, str) or isinstance(other, self.__class__)
|
||||
return self.index > self._list.index(other)
|
||||
|
||||
def __le__(self, other):
|
||||
return not self.__gt__(other)
|
||||
|
||||
def __str__(self):
|
||||
"""Always give back the item at the index so it can be used in
|
||||
comparisons like:
|
||||
|
||||
s_mitaka = CompareOpenStack('mitaka')
|
||||
s_newton = CompareOpenstack('newton')
|
||||
|
||||
assert s_newton > s_mitaka
|
||||
|
||||
@returns: <string>
|
||||
"""
|
||||
return self._list[self.index]
|
|
@ -0,0 +1,54 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import yaml
|
||||
|
||||
from subprocess import check_call
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
log,
|
||||
DEBUG,
|
||||
ERROR,
|
||||
)
|
||||
|
||||
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
|
||||
|
||||
|
||||
def create(sysctl_dict, sysctl_file):
|
||||
"""Creates a sysctl.conf file from a YAML associative array
|
||||
|
||||
:param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }"
|
||||
:type sysctl_dict: str
|
||||
:param sysctl_file: path to the sysctl file to be saved
|
||||
:type sysctl_file: str or unicode
|
||||
:returns: None
|
||||
"""
|
||||
try:
|
||||
sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
|
||||
except yaml.YAMLError:
|
||||
log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
|
||||
level=ERROR)
|
||||
return
|
||||
|
||||
with open(sysctl_file, "w") as fd:
|
||||
for key, value in sysctl_dict_parsed.items():
|
||||
fd.write("{}={}\n".format(key, value))
|
||||
|
||||
log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed),
|
||||
level=DEBUG)
|
||||
|
||||
check_call(["sysctl", "-p", sysctl_file])
|
|
@ -0,0 +1,93 @@
|
|||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
from charmhelpers.core import host
|
||||
from charmhelpers.core import hookenv
|
||||
|
||||
|
||||
def render(source, target, context, owner='root', group='root',
|
||||
perms=0o444, templates_dir=None, encoding='UTF-8',
|
||||
template_loader=None, config_template=None):
|
||||
"""
|
||||
Render a template.
|
||||
|
||||
The `source` path, if not absolute, is relative to the `templates_dir`.
|
||||
|
||||
The `target` path should be absolute. It can also be `None`, in which
|
||||
case no file will be written.
|
||||
|
||||
The context should be a dict containing the values to be replaced in the
|
||||
template.
|
||||
|
||||
config_template may be provided to render from a provided template instead
|
||||
of loading from a file.
|
||||
|
||||
The `owner`, `group`, and `perms` options will be passed to `write_file`.
|
||||
|
||||
If omitted, `templates_dir` defaults to the `templates` folder in the charm.
|
||||
|
||||
The rendered template will be written to the file as well as being returned
|
||||
as a string.
|
||||
|
||||
Note: Using this requires python-jinja2 or python3-jinja2; if it is not
|
||||
installed, calling this will attempt to use charmhelpers.fetch.apt_install
|
||||
to install it.
|
||||
"""
|
||||
try:
|
||||
from jinja2 import FileSystemLoader, Environment, exceptions
|
||||
except ImportError:
|
||||
try:
|
||||
from charmhelpers.fetch import apt_install
|
||||
except ImportError:
|
||||
hookenv.log('Could not import jinja2, and could not import '
|
||||
'charmhelpers.fetch to install it',
|
||||
level=hookenv.ERROR)
|
||||
raise
|
||||
if sys.version_info.major == 2:
|
||||
apt_install('python-jinja2', fatal=True)
|
||||
else:
|
||||
apt_install('python3-jinja2', fatal=True)
|
||||
from jinja2 import FileSystemLoader, Environment, exceptions
|
||||
|
||||
if template_loader:
|
||||
template_env = Environment(loader=template_loader)
|
||||
else:
|
||||
if templates_dir is None:
|
||||
templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
|
||||
template_env = Environment(loader=FileSystemLoader(templates_dir))
|
||||
|
||||
# load from a string if provided explicitly
|
||||
if config_template is not None:
|
||||
template = template_env.from_string(config_template)
|
||||
else:
|
||||
try:
|
||||
source = source
|
||||
template = template_env.get_template(source)
|
||||
except exceptions.TemplateNotFound as e:
|
||||
hookenv.log('Could not load template %s from %s.' %
|
||||
(source, templates_dir),
|
||||
level=hookenv.ERROR)
|
||||
raise e
|
||||
content = template.render(context)
|
||||
if target is not None:
|
||||
target_dir = os.path.dirname(target)
|
||||
if not os.path.exists(target_dir):
|
||||
# This is a terrible default directory permission, as the file
|
||||
# or its siblings will often contain secrets.
|
||||
host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
|
||||
host.write_file(target, content.encode(encoding), owner, group, perms)
|
||||
return content
|
|
@ -0,0 +1,520 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Authors:
|
||||
# Kapil Thangavelu <kapil.foss@gmail.com>
|
||||
#
|
||||
"""
|
||||
Intro
|
||||
-----
|
||||
|
||||
A simple way to store state in units. This provides a key value
|
||||
storage with support for versioned, transactional operation,
|
||||
and can calculate deltas from previous values to simplify unit logic
|
||||
when processing changes.
|
||||
|
||||
|
||||
Hook Integration
|
||||
----------------
|
||||
|
||||
There are several extant frameworks for hook execution, including
|
||||
|
||||
- charmhelpers.core.hookenv.Hooks
|
||||
- charmhelpers.core.services.ServiceManager
|
||||
|
||||
The storage classes are framework agnostic, one simple integration is
|
||||
via the HookData contextmanager. It will record the current hook
|
||||
execution environment (including relation data, config data, etc.),
|
||||
setup a transaction and allow easy access to the changes from
|
||||
previously seen values. One consequence of the integration is the
|
||||
reservation of particular keys ('rels', 'unit', 'env', 'config',
|
||||
'charm_revisions') for their respective values.
|
||||
|
||||
Here's a fully worked integration example using hookenv.Hooks::
|
||||
|
||||
from charmhelper.core import hookenv, unitdata
|
||||
|
||||
hook_data = unitdata.HookData()
|
||||
db = unitdata.kv()
|
||||
hooks = hookenv.Hooks()
|
||||
|
||||
@hooks.hook
|
||||
def config_changed():
|
||||
# Print all changes to configuration from previously seen
|
||||
# values.
|
||||
for changed, (prev, cur) in hook_data.conf.items():
|
||||
print('config changed', changed,
|
||||
'previous value', prev,
|
||||
'current value', cur)
|
||||
|
||||
# Get some unit specific bookeeping
|
||||
if not db.get('pkg_key'):
|
||||
key = urllib.urlopen('https://example.com/pkg_key').read()
|
||||
db.set('pkg_key', key)
|
||||
|
||||
# Directly access all charm config as a mapping.
|
||||
conf = db.getrange('config', True)
|
||||
|
||||
# Directly access all relation data as a mapping
|
||||
rels = db.getrange('rels', True)
|
||||
|
||||
if __name__ == '__main__':
|
||||
with hook_data():
|
||||
hook.execute()
|
||||
|
||||
|
||||
A more basic integration is via the hook_scope context manager which simply
|
||||
manages transaction scope (and records hook name, and timestamp)::
|
||||
|
||||
>>> from unitdata import kv
|
||||
>>> db = kv()
|
||||
>>> with db.hook_scope('install'):
|
||||
... # do work, in transactional scope.
|
||||
... db.set('x', 1)
|
||||
>>> db.get('x')
|
||||
1
|
||||
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
Values are automatically json de/serialized to preserve basic typing
|
||||
and complex data struct capabilities (dicts, lists, ints, booleans, etc).
|
||||
|
||||
Individual values can be manipulated via get/set::
|
||||
|
||||
>>> kv.set('y', True)
|
||||
>>> kv.get('y')
|
||||
True
|
||||
|
||||
# We can set complex values (dicts, lists) as a single key.
|
||||
>>> kv.set('config', {'a': 1, 'b': True'})
|
||||
|
||||
# Also supports returning dictionaries as a record which
|
||||
# provides attribute access.
|
||||
>>> config = kv.get('config', record=True)
|
||||
>>> config.b
|
||||
True
|
||||
|
||||
|
||||
Groups of keys can be manipulated with update/getrange::
|
||||
|
||||
>>> kv.update({'z': 1, 'y': 2}, prefix="gui.")
|
||||
>>> kv.getrange('gui.', strip=True)
|
||||
{'z': 1, 'y': 2}
|
||||
|
||||
When updating values, its very helpful to understand which values
|
||||
have actually changed and how have they changed. The storage
|
||||
provides a delta method to provide for this::
|
||||
|
||||
>>> data = {'debug': True, 'option': 2}
|
||||
>>> delta = kv.delta(data, 'config.')
|
||||
>>> delta.debug.previous
|
||||
None
|
||||
>>> delta.debug.current
|
||||
True
|
||||
>>> delta
|
||||
{'debug': (None, True), 'option': (None, 2)}
|
||||
|
||||
Note the delta method does not persist the actual change, it needs to
|
||||
be explicitly saved via 'update' method::
|
||||
|
||||
>>> kv.update(data, 'config.')
|
||||
|
||||
Values modified in the context of a hook scope retain historical values
|
||||
associated to the hookname.
|
||||
|
||||
>>> with db.hook_scope('config-changed'):
|
||||
... db.set('x', 42)
|
||||
>>> db.gethistory('x')
|
||||
[(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'),
|
||||
(2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')]
|
||||
|
||||
"""
|
||||
|
||||
import collections
|
||||
import contextlib
|
||||
import datetime
|
||||
import itertools
|
||||
import json
|
||||
import os
|
||||
import pprint
|
||||
import sqlite3
|
||||
import sys
|
||||
|
||||
__author__ = 'Kapil Thangavelu <kapil.foss@gmail.com>'
|
||||
|
||||
|
||||
class Storage(object):
|
||||
"""Simple key value database for local unit state within charms.
|
||||
|
||||
Modifications are not persisted unless :meth:`flush` is called.
|
||||
|
||||
To support dicts, lists, integer, floats, and booleans values
|
||||
are automatically json encoded/decoded.
|
||||
"""
|
||||
def __init__(self, path=None):
|
||||
self.db_path = path
|
||||
if path is None:
|
||||
if 'UNIT_STATE_DB' in os.environ:
|
||||
self.db_path = os.environ['UNIT_STATE_DB']
|
||||
else:
|
||||
self.db_path = os.path.join(
|
||||
os.environ.get('CHARM_DIR', ''), '.unit-state.db')
|
||||
with open(self.db_path, 'a') as f:
|
||||
os.fchmod(f.fileno(), 0o600)
|
||||
self.conn = sqlite3.connect('%s' % self.db_path)
|
||||
self.cursor = self.conn.cursor()
|
||||
self.revision = None
|
||||
self._closed = False
|
||||
self._init()
|
||||
|
||||
def close(self):
|
||||
if self._closed:
|
||||
return
|
||||
self.flush(False)
|
||||
self.cursor.close()
|
||||
self.conn.close()
|
||||
self._closed = True
|
||||
|
||||
def get(self, key, default=None, record=False):
|
||||
self.cursor.execute('select data from kv where key=?', [key])
|
||||
result = self.cursor.fetchone()
|
||||
if not result:
|
||||
return default
|
||||
if record:
|
||||
return Record(json.loads(result[0]))
|
||||
return json.loads(result[0])
|
||||
|
||||
def getrange(self, key_prefix, strip=False):
|
||||
"""
|
||||
Get a range of keys starting with a common prefix as a mapping of
|
||||
keys to values.
|
||||
|
||||
:param str key_prefix: Common prefix among all keys
|
||||
:param bool strip: Optionally strip the common prefix from the key
|
||||
names in the returned dict
|
||||
:return dict: A (possibly empty) dict of key-value mappings
|
||||
"""
|
||||
self.cursor.execute("select key, data from kv where key like ?",
|
||||
['%s%%' % key_prefix])
|
||||
result = self.cursor.fetchall()
|
||||
|
||||
if not result:
|
||||
return {}
|
||||
if not strip:
|
||||
key_prefix = ''
|
||||
return dict([
|
||||
(k[len(key_prefix):], json.loads(v)) for k, v in result])
|
||||
|
||||
def update(self, mapping, prefix=""):
|
||||
"""
|
||||
Set the values of multiple keys at once.
|
||||
|
||||
:param dict mapping: Mapping of keys to values
|
||||
:param str prefix: Optional prefix to apply to all keys in `mapping`
|
||||
before setting
|
||||
"""
|
||||
for k, v in mapping.items():
|
||||
self.set("%s%s" % (prefix, k), v)
|
||||
|
||||
def unset(self, key):
|
||||
"""
|
||||
Remove a key from the database entirely.
|
||||
"""
|
||||
self.cursor.execute('delete from kv where key=?', [key])
|
||||
if self.revision and self.cursor.rowcount:
|
||||
self.cursor.execute(
|
||||
'insert into kv_revisions values (?, ?, ?)',
|
||||
[key, self.revision, json.dumps('DELETED')])
|
||||
|
||||
def unsetrange(self, keys=None, prefix=""):
|
||||
"""
|
||||
Remove a range of keys starting with a common prefix, from the database
|
||||
entirely.
|
||||
|
||||
:param list keys: List of keys to remove.
|
||||
:param str prefix: Optional prefix to apply to all keys in ``keys``
|
||||
before removing.
|
||||
"""
|
||||
if keys is not None:
|
||||
keys = ['%s%s' % (prefix, key) for key in keys]
|
||||
self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys)
|
||||
if self.revision and self.cursor.rowcount:
|
||||
self.cursor.execute(
|
||||
'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)),
|
||||
list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys)))
|
||||
else:
|
||||
self.cursor.execute('delete from kv where key like ?',
|
||||
['%s%%' % prefix])
|
||||
if self.revision and self.cursor.rowcount:
|
||||
self.cursor.execute(
|
||||
'insert into kv_revisions values (?, ?, ?)',
|
||||
['%s%%' % prefix, self.revision, json.dumps('DELETED')])
|
||||
|
||||
def set(self, key, value):
|
||||
"""
|
||||
Set a value in the database.
|
||||
|
||||
:param str key: Key to set the value for
|
||||
:param value: Any JSON-serializable value to be set
|
||||
"""
|
||||
serialized = json.dumps(value)
|
||||
|
||||
self.cursor.execute('select data from kv where key=?', [key])
|
||||
exists = self.cursor.fetchone()
|
||||
|
||||
# Skip mutations to the same value
|
||||
if exists:
|
||||
if exists[0] == serialized:
|
||||
return value
|
||||
|
||||
if not exists:
|
||||
self.cursor.execute(
|
||||
'insert into kv (key, data) values (?, ?)',
|
||||
(key, serialized))
|
||||
else:
|
||||
self.cursor.execute('''
|
||||
update kv
|
||||
set data = ?
|
||||
where key = ?''', [serialized, key])
|
||||
|
||||
# Save
|
||||
if not self.revision:
|
||||
return value
|
||||
|
||||
self.cursor.execute(
|
||||
'select 1 from kv_revisions where key=? and revision=?',
|
||||
[key, self.revision])
|
||||
exists = self.cursor.fetchone()
|
||||
|
||||
if not exists:
|
||||
self.cursor.execute(
|
||||
'''insert into kv_revisions (
|
||||
revision, key, data) values (?, ?, ?)''',
|
||||
(self.revision, key, serialized))
|
||||
else:
|
||||
self.cursor.execute(
|
||||
'''
|
||||
update kv_revisions
|
||||
set data = ?
|
||||
where key = ?
|
||||
and revision = ?''',
|
||||
[serialized, key, self.revision])
|
||||
|
||||
return value
|
||||
|
||||
def delta(self, mapping, prefix):
|
||||
"""
|
||||
return a delta containing values that have changed.
|
||||
"""
|
||||
previous = self.getrange(prefix, strip=True)
|
||||
if not previous:
|
||||
pk = set()
|
||||
else:
|
||||
pk = set(previous.keys())
|
||||
ck = set(mapping.keys())
|
||||
delta = DeltaSet()
|
||||
|
||||
# added
|
||||
for k in ck.difference(pk):
|
||||
delta[k] = Delta(None, mapping[k])
|
||||
|
||||
# removed
|
||||
for k in pk.difference(ck):
|
||||
delta[k] = Delta(previous[k], None)
|
||||
|
||||
# changed
|
||||
for k in pk.intersection(ck):
|
||||
c = mapping[k]
|
||||
p = previous[k]
|
||||
if c != p:
|
||||
delta[k] = Delta(p, c)
|
||||
|
||||
return delta
|
||||
|
||||
@contextlib.contextmanager
|
||||
def hook_scope(self, name=""):
|
||||
"""Scope all future interactions to the current hook execution
|
||||
revision."""
|
||||
assert not self.revision
|
||||
self.cursor.execute(
|
||||
'insert into hooks (hook, date) values (?, ?)',
|
||||
(name or sys.argv[0],
|
||||
datetime.datetime.utcnow().isoformat()))
|
||||
self.revision = self.cursor.lastrowid
|
||||
try:
|
||||
yield self.revision
|
||||
self.revision = None
|
||||
except Exception:
|
||||
self.flush(False)
|
||||
self.revision = None
|
||||
raise
|
||||
else:
|
||||
self.flush()
|
||||
|
||||
def flush(self, save=True):
|
||||
if save:
|
||||
self.conn.commit()
|
||||
elif self._closed:
|
||||
return
|
||||
else:
|
||||
self.conn.rollback()
|
||||
|
||||
def _init(self):
|
||||
self.cursor.execute('''
|
||||
create table if not exists kv (
|
||||
key text,
|
||||
data text,
|
||||
primary key (key)
|
||||
)''')
|
||||
self.cursor.execute('''
|
||||
create table if not exists kv_revisions (
|
||||
key text,
|
||||
revision integer,
|
||||
data text,
|
||||
primary key (key, revision)
|
||||
)''')
|
||||
self.cursor.execute('''
|
||||
create table if not exists hooks (
|
||||
version integer primary key autoincrement,
|
||||
hook text,
|
||||
date text
|
||||
)''')
|
||||
self.conn.commit()
|
||||
|
||||
def gethistory(self, key, deserialize=False):
|
||||
self.cursor.execute(
|
||||
'''
|
||||
select kv.revision, kv.key, kv.data, h.hook, h.date
|
||||
from kv_revisions kv,
|
||||
hooks h
|
||||
where kv.key=?
|
||||
and kv.revision = h.version
|
||||
''', [key])
|
||||
if deserialize is False:
|
||||
return self.cursor.fetchall()
|
||||
return map(_parse_history, self.cursor.fetchall())
|
||||
|
||||
def debug(self, fh=sys.stderr):
|
||||
self.cursor.execute('select * from kv')
|
||||
pprint.pprint(self.cursor.fetchall(), stream=fh)
|
||||
self.cursor.execute('select * from kv_revisions')
|
||||
pprint.pprint(self.cursor.fetchall(), stream=fh)
|
||||
|
||||
|
||||
def _parse_history(d):
|
||||
return (d[0], d[1], json.loads(d[2]), d[3],
|
||||
datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f"))
|
||||
|
||||
|
||||
class HookData(object):
|
||||
"""Simple integration for existing hook exec frameworks.
|
||||
|
||||
Records all unit information, and stores deltas for processing
|
||||
by the hook.
|
||||
|
||||
Sample::
|
||||
|
||||
from charmhelper.core import hookenv, unitdata
|
||||
|
||||
changes = unitdata.HookData()
|
||||
db = unitdata.kv()
|
||||
hooks = hookenv.Hooks()
|
||||
|
||||
@hooks.hook
|
||||
def config_changed():
|
||||
# View all changes to configuration
|
||||
for changed, (prev, cur) in changes.conf.items():
|
||||
print('config changed', changed,
|
||||
'previous value', prev,
|
||||
'current value', cur)
|
||||
|
||||
# Get some unit specific bookeeping
|
||||
if not db.get('pkg_key'):
|
||||
key = urllib.urlopen('https://example.com/pkg_key').read()
|
||||
db.set('pkg_key', key)
|
||||
|
||||
if __name__ == '__main__':
|
||||
with changes():
|
||||
hook.execute()
|
||||
|
||||
"""
|
||||
def __init__(self):
|
||||
self.kv = kv()
|
||||
self.conf = None
|
||||
self.rels = None
|
||||
|
||||
@contextlib.contextmanager
|
||||
def __call__(self):
|
||||
from charmhelpers.core import hookenv
|
||||
hook_name = hookenv.hook_name()
|
||||
|
||||
with self.kv.hook_scope(hook_name):
|
||||
self._record_charm_version(hookenv.charm_dir())
|
||||
delta_config, delta_relation = self._record_hook(hookenv)
|
||||
yield self.kv, delta_config, delta_relation
|
||||
|
||||
def _record_charm_version(self, charm_dir):
|
||||
# Record revisions.. charm revisions are meaningless
|
||||
# to charm authors as they don't control the revision.
|
||||
# so logic dependnent on revision is not particularly
|
||||
# useful, however it is useful for debugging analysis.
|
||||
charm_rev = open(
|
||||
os.path.join(charm_dir, 'revision')).read().strip()
|
||||
charm_rev = charm_rev or '0'
|
||||
revs = self.kv.get('charm_revisions', [])
|
||||
if charm_rev not in revs:
|
||||
revs.append(charm_rev.strip() or '0')
|
||||
self.kv.set('charm_revisions', revs)
|
||||
|
||||
def _record_hook(self, hookenv):
|
||||
data = hookenv.execution_environment()
|
||||
self.conf = conf_delta = self.kv.delta(data['conf'], 'config')
|
||||
self.rels = rels_delta = self.kv.delta(data['rels'], 'rels')
|
||||
self.kv.set('env', dict(data['env']))
|
||||
self.kv.set('unit', data['unit'])
|
||||
self.kv.set('relid', data.get('relid'))
|
||||
return conf_delta, rels_delta
|
||||
|
||||
|
||||
class Record(dict):
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
def __getattr__(self, k):
|
||||
if k in self:
|
||||
return self[k]
|
||||
raise AttributeError(k)
|
||||
|
||||
|
||||
class DeltaSet(Record):
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
|
||||
Delta = collections.namedtuple('Delta', ['previous', 'current'])
|
||||
|
||||
|
||||
_KV = None
|
||||
|
||||
|
||||
def kv():
|
||||
global _KV
|
||||
if _KV is None:
|
||||
_KV = Storage()
|
||||
return _KV
|
|
@ -0,0 +1,25 @@
|
|||
import platform
|
||||
|
||||
|
||||
def get_platform():
|
||||
"""Return the current OS platform.
|
||||
|
||||
For example: if current os platform is Ubuntu then a string "ubuntu"
|
||||
will be returned (which is the name of the module).
|
||||
This string is used to decide which platform module should be imported.
|
||||
"""
|
||||
# linux_distribution is deprecated and will be removed in Python 3.7
|
||||
# Warings *not* disabled, as we certainly need to fix this.
|
||||
tuple_platform = platform.linux_distribution()
|
||||
current_platform = tuple_platform[0]
|
||||
if "Ubuntu" in current_platform:
|
||||
return "ubuntu"
|
||||
elif "CentOS" in current_platform:
|
||||
return "centos"
|
||||
elif "debian" in current_platform:
|
||||
# Stock Python does not detect Ubuntu and instead returns debian.
|
||||
# Or at least it does in some build environments like Travis CI
|
||||
return "ubuntu"
|
||||
else:
|
||||
raise RuntimeError("This module is not supported on {}."
|
||||
.format(current_platform))
|
|
@ -0,0 +1,23 @@
|
|||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright 2016 Canonical Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Amulet tests on a basic Glance deployment on bionic-queens."""
|
||||
|
||||
from basic_deployment import GlanceBasicDeployment
|
||||
|
||||
if __name__ == '__main__':
|
||||
deployment = GlanceBasicDeployment(series='bionic')
|
||||
deployment.run_tests()
|
|
@ -0,0 +1,23 @@
|
|||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright 2016 Canonical Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Amulet tests on a basic Glance deployment on trusty-icehouse."""
|
||||
|
||||
from basic_deployment import GlanceBasicDeployment
|
||||
|
||||
if __name__ == '__main__':
|
||||
deployment = GlanceBasicDeployment(series='trusty')
|
||||
deployment.run_tests()
|
|
@ -0,0 +1,25 @@
|
|||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright 2016 Canonical Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Amulet tests on a basic glance deployment on trusty-mitaka."""
|
||||
|
||||
from basic_deployment import GlanceBasicDeployment
|
||||
|
||||
if __name__ == '__main__':
|
||||
deployment = GlanceBasicDeployment(series='trusty',
|
||||
openstack='cloud:trusty-mitaka',
|
||||
source='cloud:trusty-updates/mitaka')
|
||||
deployment.run_tests()
|
|
@ -0,0 +1,23 @@
|
|||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright 2016 Canonical Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Amulet tests on a basic Glance deployment on xenial-mitaka."""
|
||||
|
||||
from basic_deployment import GlanceBasicDeployment
|
||||
|
||||
if __name__ == '__main__':
|
||||
deployment = GlanceBasicDeployment(series='xenial')
|
||||
deployment.run_tests()
|
|
@ -0,0 +1,25 @@
|
|||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright 2016 Canonical Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Amulet tests on a basic glance deployment on xenial-ocata."""
|
||||
|
||||
from basic_deployment import GlanceBasicDeployment
|
||||
|
||||
if __name__ == '__main__':
|
||||
deployment = GlanceBasicDeployment(series='xenial',
|
||||
openstack='cloud:xenial-ocata',
|
||||
source='cloud:xenial-updates/ocata')
|
||||
deployment.run_tests()
|
|
@ -0,0 +1,25 @@
|
|||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright 2016 Canonical Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Amulet tests on a basic glance deployment on xenial-pike."""
|
||||
|
||||
from basic_deployment import GlanceBasicDeployment
|
||||
|
||||
if __name__ == '__main__':
|
||||
deployment = GlanceBasicDeployment(series='xenial',
|
||||
openstack='cloud:xenial-pike',
|
||||
source='cloud:xenial-updates/pike')
|
||||
deployment.run_tests()
|
|
@ -0,0 +1,25 @@
|
|||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright 2016 Canonical Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Amulet tests on a basic glance deployment on xenial-queens."""
|
||||
|
||||
from basic_deployment import GlanceBasicDeployment
|
||||
|
||||
if __name__ == '__main__':
|
||||
deployment = GlanceBasicDeployment(series='xenial',
|
||||
openstack='cloud:xenial-queens',
|
||||
source='cloud:xenial-updates/queens')
|
||||
deployment.run_tests()
|
|
@ -0,0 +1,18 @@
|
|||
# Bootstrap the model if necessary.
|
||||
bootstrap: True
|
||||
# Re-use bootstrap node.
|
||||
reset: True
|
||||
# Use tox/requirements to drive the venv instead of bundletester's venv feature.
|
||||
virtualenv: False
|
||||
# Leave makefile empty, otherwise unit/lint tests will rerun ahead of amulet.
|
||||
makefile: []
|
||||
# Do not specify juju PPA sources. Juju is presumed to be pre-installed
|
||||
# and configured in all test runner environments.
|
||||
#sources:
|
||||
# Do not specify or rely on system packages.
|
||||
#packages:
|
||||
# Do not specify python packages here. Use test-requirements.txt
|
||||
# and tox instead. ie. The venv is constructed before bundletester
|
||||
# is invoked.
|
||||
#python-packages:
|
||||
reset_timeout: 600
|
20
tox.ini
20
tox.ini
|
@ -1,3 +1,6 @@
|
|||
# Classic charm: ./tox.ini
|
||||
# This file is managed centrally by release-tools and should not be modified
|
||||
# within individual charm repos.
|
||||
[tox]
|
||||
envlist = pep8,py27
|
||||
skipsdist = True
|
||||
|
@ -5,20 +8,29 @@ skipsdist = True
|
|||
[testenv]
|
||||
setenv = VIRTUAL_ENV={envdir}
|
||||
PYTHONHASHSEED=0
|
||||
CHARM_DIR={envdir}
|
||||
AMULET_SETUP_TIMEOUT=5400
|
||||
install_command =
|
||||
pip install --allow-unverified python-apt {opts} {packages}
|
||||
pip install {opts} {packages}
|
||||
commands = ostestr {posargs}
|
||||
whitelist_externals = juju
|
||||
passenv = HOME TERM AMULET_* CS_API_*
|
||||
|
||||
[testenv:py27]
|
||||
basepython = python2.7
|
||||
deps = -r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
|
||||
[testenv:py35]
|
||||
basepython = python3.5
|
||||
deps = -r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
|
||||
[testenv:pep8]
|
||||
basepython = python2.7
|
||||
deps = -r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
commands = flake8 {posargs} hooks unit_tests
|
||||
commands = flake8 {posargs} hooks unit_tests tests actions lib
|
||||
charm-proof
|
||||
|
||||
[testenv:venv]
|
||||
|
@ -48,7 +60,7 @@ basepython = python2.7
|
|||
deps = -r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
commands =
|
||||
bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-mitaka --no-destroy
|
||||
bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-pike --no-destroy
|
||||
|
||||
[testenv:func27-dfs]
|
||||
# Charm Functional Test
|
||||
|
@ -70,4 +82,4 @@ commands =
|
|||
|
||||
[flake8]
|
||||
ignore = E402,E226
|
||||
exclude = hooks/charmhelpers
|
||||
exclude = */charmhelpers
|
||||
|
|
Loading…
Reference in New Issue