Rebase on next branch

This commit is contained in:
james.page@ubuntu.com 2015-01-23 08:23:05 +00:00
commit 0304fc4880
53 changed files with 4035 additions and 723 deletions

View File

@ -1,2 +1,4 @@
bin
revision
.coverage
.venv

6
.coveragerc Normal file
View File

@ -0,0 +1,6 @@
[report]
# Regexes for lines to exclude from consideration
exclude_lines =
if __name__ == .__main__.:
include=
hooks/rabbit*

View File

@ -1,8 +1,22 @@
#!/usr/bin/make
PYTHON := /usr/bin/env python
CHARM_DIR := $(PWD)
HOOKS_DIR := $(PWD)/hooks
TEST_PREFIX := PYTHONPATH=$(HOOKS_DIR)
clean:
rm -f .coverage
find . -name '*.pyc' -delete
rm -rf .venv
(which dh_clean && dh_clean) || true
.venv:
sudo apt-get install -y gcc python-dev python-virtualenv python-apt
virtualenv .venv --system-site-packages
.venv/bin/pip install -I -r test-requirements.txt
lint:
@flake8 --exclude hooks/charmhelpers hooks
@flake8 --exclude hooks/charmhelpers hooks unit_tests
@charm proof
bin/charm_helpers_sync.py:
@ -16,3 +30,11 @@ sync: bin/charm_helpers_sync.py
publish: lint
bzr push lp:charms/rabbitmq-server
bzr push lp:charms/trusty/rabbitmq-server
unit_test: clean .venv
@echo Starting tests...
env CHARM_DIR=$(CHARM_DIR) $(TEST_PREFIX) .venv/bin/nosetests unit_tests/
functional_test:
@echo Starting amulet tests...
@juju test -v -p AMULET_HTTP_PROXY --timeout 900

View File

@ -7,6 +7,7 @@ include:
- contrib.openstack
- contrib.storage
- contrib.peerstorage
- contrib.python.packages
- contrib.ssl
- contrib.hahelpers.cluster
- contrib.network.ip

View File

@ -80,6 +80,12 @@ options:
hacluster charm will keep rabbit in active/active setup, but in addition
it will deploy a VIP that can be used by services that cannot work
with mutiple AMQPs (like Glance in pre-Icehouse).
mirroring-queues:
type: boolean
default: True
description: |
When set to true the 'ha-mode: all' policy is applied to all the exchages
that match the expression '^(?!amq\.).*'
rbd-size:
type: string
default: 5G
@ -94,7 +100,7 @@ options:
image name exists in Ceph, it will be re-used and the data will be
overwritten.
ceph-osd-replication-count:
default: 2
default: 3
type: int
description: |
This value dictates the number of replicas ceph must make of any
@ -133,12 +139,15 @@ options:
description: |
Key ID to import to the apt keyring to support use with arbitary source
configuration from outside of Launchpad archives or PPA's.
# Network configuration options
# by default all access is over 'private-address'
access-network:
type: string
default:
prefer-ipv6:
type: boolean
default: False
description: |
The IP address and netmask of the 'access' network (e.g., 192.168.0.0/24)
If True enables IPv6 support. The charm will expect network interfaces
to be configured with an IPv6 address. If set to False (default) IPv4
is expected.
.
This network will be used for access to RabbitMQ messaging services.
NOTE: these charms do not currently support IPv6 privacy extension. In
order for this charm to function correctly, the privacy extension must be
disabled and a non-temporary address must be configured/available on
your network interface.

View File

@ -0,0 +1,22 @@
# Bootstrap charm-helpers, installing its dependencies if necessary using
# only standard libraries.
import subprocess
import sys
try:
import six # flake8: noqa
except ImportError:
if sys.version_info.major == 2:
subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
else:
subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
import six # flake8: noqa
try:
import yaml # flake8: noqa
except ImportError:
if sys.version_info.major == 2:
subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
else:
subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
import yaml # flake8: noqa

View File

@ -54,6 +54,12 @@ from charmhelpers.core.host import service
# juju-myservice-0
# If you're running multiple environments with the same services in them
# this allows you to differentiate between them.
# nagios_servicegroups:
# default: ""
# type: string
# description: |
# A comma-separated list of nagios servicegroups.
# If left empty, the nagios_context will be used as the servicegroup
#
# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master
#
@ -138,7 +144,7 @@ define service {{
log('Check command not found: {}'.format(parts[0]))
return ''
def write(self, nagios_context, hostname):
def write(self, nagios_context, hostname, nagios_servicegroups=None):
nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format(
self.command)
with open(nrpe_check_file, 'w') as nrpe_check_config:
@ -150,16 +156,21 @@ define service {{
log('Not writing service config as {} is not accessible'.format(
NRPE.nagios_exportdir))
else:
self.write_service_config(nagios_context, hostname)
self.write_service_config(nagios_context, hostname,
nagios_servicegroups)
def write_service_config(self, nagios_context, hostname):
def write_service_config(self, nagios_context, hostname,
nagios_servicegroups=None):
for f in os.listdir(NRPE.nagios_exportdir):
if re.search('.*{}.cfg'.format(self.command), f):
os.remove(os.path.join(NRPE.nagios_exportdir, f))
if not nagios_servicegroups:
nagios_servicegroups = nagios_context
templ_vars = {
'nagios_hostname': hostname,
'nagios_servicegroup': nagios_context,
'nagios_servicegroup': nagios_servicegroups,
'description': self.description,
'shortname': self.shortname,
'command': self.command,
@ -183,6 +194,10 @@ class NRPE(object):
super(NRPE, self).__init__()
self.config = config()
self.nagios_context = self.config['nagios_context']
if 'nagios_servicegroups' in self.config:
self.nagios_servicegroups = self.config['nagios_servicegroups']
else:
self.nagios_servicegroups = 'juju'
self.unit_name = local_unit().replace('/', '-')
if hostname:
self.hostname = hostname
@ -208,7 +223,8 @@ class NRPE(object):
nrpe_monitors = {}
monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
for nrpecheck in self.checks:
nrpecheck.write(self.nagios_context, self.hostname)
nrpecheck.write(self.nagios_context, self.hostname,
self.nagios_servicegroups)
nrpe_monitors[nrpecheck.shortname] = {
"command": nrpecheck.command,
}

View File

@ -6,11 +6,17 @@
# Adam Gandelman <adamg@ubuntu.com>
#
"""
Helpers for clustering and determining "cluster leadership" and other
clustering-related helpers.
"""
import subprocess
import os
from socket import gethostname as get_unit_hostname
import six
from charmhelpers.core.hookenv import (
log,
relation_ids,
@ -19,6 +25,7 @@ from charmhelpers.core.hookenv import (
config as config_get,
INFO,
ERROR,
WARNING,
unit_get,
)
@ -27,6 +34,29 @@ class HAIncompleteConfig(Exception):
pass
def is_elected_leader(resource):
"""
Returns True if the charm executing this is the elected cluster leader.
It relies on two mechanisms to determine leadership:
1. If the charm is part of a corosync cluster, call corosync to
determine leadership.
2. If the charm is not part of a corosync cluster, the leader is
determined as being "the alive unit with the lowest unit numer". In
other words, the oldest surviving unit.
"""
if is_clustered():
if not is_crm_leader(resource):
log('Deferring action to CRM leader.', level=INFO)
return False
else:
peers = peer_units()
if peers and not oldest_peer(peers):
log('Deferring action to oldest service unit.', level=INFO)
return False
return True
def is_clustered():
for r_id in (relation_ids('ha') or []):
for unit in (relation_list(r_id) or []):
@ -38,13 +68,17 @@ def is_clustered():
return False
def is_leader(resource):
def is_crm_leader(resource):
"""
Returns True if the charm calling this is the elected corosync leader,
as returned by calling the external "crm" command.
"""
cmd = [
"crm", "resource",
"show", resource
]
try:
status = subprocess.check_output(cmd)
status = subprocess.check_output(cmd).decode('UTF-8')
except subprocess.CalledProcessError:
return False
else:
@ -54,15 +88,31 @@ def is_leader(resource):
return False
def peer_units():
def is_leader(resource):
log("is_leader is deprecated. Please consider using is_crm_leader "
"instead.", level=WARNING)
return is_crm_leader(resource)
def peer_units(peer_relation="cluster"):
peers = []
for r_id in (relation_ids('cluster') or []):
for r_id in (relation_ids(peer_relation) or []):
for unit in (relation_list(r_id) or []):
peers.append(unit)
return peers
def peer_ips(peer_relation='cluster', addr_key='private-address'):
'''Return a dict of peers and their private-address'''
peers = {}
for r_id in relation_ids(peer_relation):
for unit in relation_list(r_id):
peers[unit] = relation_get(addr_key, rid=r_id, unit=unit)
return peers
def oldest_peer(peers):
"""Determines who the oldest peer is by comparing unit numbers."""
local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
for peer in peers:
remote_unit_no = int(peer.split('/')[1])
@ -72,16 +122,9 @@ def oldest_peer(peers):
def eligible_leader(resource):
if is_clustered():
if not is_leader(resource):
log('Deferring action to CRM leader.', level=INFO)
return False
else:
peers = peer_units()
if peers and not oldest_peer(peers):
log('Deferring action to oldest service unit.', level=INFO)
return False
return True
log("eligible_leader is deprecated. Please consider using "
"is_elected_leader instead.", level=WARNING)
return is_elected_leader(resource)
def https():
@ -97,10 +140,9 @@ def https():
return True
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
# TODO - needs fixing for new helper as ssl_cert/key suffixes with CN
rel_state = [
relation_get('https_keystone', rid=r_id, unit=unit),
relation_get('ssl_cert', rid=r_id, unit=unit),
relation_get('ssl_key', rid=r_id, unit=unit),
relation_get('ca_cert', rid=r_id, unit=unit),
]
# NOTE: works around (LP: #1203241)
@ -109,34 +151,42 @@ def https():
return False
def determine_api_port(public_port):
def determine_api_port(public_port, singlenode_mode=False):
'''
Determine correct API server listening port based on
existence of HTTPS reverse proxy and/or haproxy.
public_port: int: standard public port for given service
singlenode_mode: boolean: Shuffle ports when only a single unit is present
returns: int: the correct listening port for the API service
'''
i = 0
if len(peer_units()) > 0 or is_clustered():
if singlenode_mode:
i += 1
elif len(peer_units()) > 0 or is_clustered():
i += 1
if https():
i += 1
return public_port - (i * 10)
def determine_apache_port(public_port):
def determine_apache_port(public_port, singlenode_mode=False):
'''
Description: Determine correct apache listening port based on public IP +
state of the cluster.
public_port: int: standard public port for given service
singlenode_mode: boolean: Shuffle ports when only a single unit is present
returns: int: the correct listening port for the HAProxy service
'''
i = 0
if len(peer_units()) > 0 or is_clustered():
if singlenode_mode:
i += 1
elif len(peer_units()) > 0 or is_clustered():
i += 1
return public_port - (i * 10)
@ -156,7 +206,7 @@ def get_hacluster_config():
for setting in settings:
conf[setting] = config_get(setting)
missing = []
[missing.append(s) for s, v in conf.iteritems() if v is None]
[missing.append(s) for s, v in six.iteritems(conf) if v is None]
if missing:
log('Insufficient config data to configure hacluster.', level=ERROR)
raise HAIncompleteConfig

View File

@ -1,10 +1,13 @@
import sys
import glob
import re
import subprocess
from functools import partial
from charmhelpers.core.hookenv import unit_get
from charmhelpers.fetch import apt_install
from charmhelpers.core.hookenv import (
ERROR, log,
log
)
try:
@ -28,29 +31,28 @@ def _validate_cidr(network):
network)
def no_ip_found_error_out(network):
errmsg = ("No IP address found in network: %s" % network)
raise ValueError(errmsg)
def get_address_in_network(network, fallback=None, fatal=False):
"""
Get an IPv4 or IPv6 address within the network from the host.
"""Get an IPv4 or IPv6 address within the network from the host.
:param network (str): CIDR presentation format. For example,
'192.168.1.0/24'.
:param fallback (str): If no address is found, return fallback.
:param fatal (boolean): If no address is found, fallback is not
set and fatal is True then exit(1).
"""
def not_found_error_out():
log("No IP address found in network: %s" % network,
level=ERROR)
sys.exit(1)
if network is None:
if fallback is not None:
return fallback
if fatal:
no_ip_found_error_out(network)
else:
if fatal:
not_found_error_out()
return None
_validate_cidr(network)
network = netaddr.IPNetwork(network)
@ -62,6 +64,7 @@ def get_address_in_network(network, fallback=None, fatal=False):
cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
if cidr in network:
return str(cidr.ip)
if network.version == 6 and netifaces.AF_INET6 in addresses:
for addr in addresses[netifaces.AF_INET6]:
if not addr['addr'].startswith('fe80'):
@ -74,20 +77,20 @@ def get_address_in_network(network, fallback=None, fatal=False):
return fallback
if fatal:
not_found_error_out()
no_ip_found_error_out(network)
return None
def is_ipv6(address):
'''Determine whether provided address is IPv6 or not'''
"""Determine whether provided address is IPv6 or not."""
try:
address = netaddr.IPAddress(address)
except netaddr.AddrFormatError:
# probably a hostname - so not an address at all!
return False
else:
return address.version == 6
return address.version == 6
def is_address_in_network(network, address):
@ -105,11 +108,13 @@ def is_address_in_network(network, address):
except (netaddr.core.AddrFormatError, ValueError):
raise ValueError("Network (%s) is not in CIDR presentation format" %
network)
try:
address = netaddr.IPAddress(address)
except (netaddr.core.AddrFormatError, ValueError):
raise ValueError("Address (%s) is not in correct presentation format" %
address)
if address in network:
return True
else:
@ -132,25 +137,215 @@ def _get_for_address(address, key):
if address.version == 4 and netifaces.AF_INET in addresses:
addr = addresses[netifaces.AF_INET][0]['addr']
netmask = addresses[netifaces.AF_INET][0]['netmask']
cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
network = netaddr.IPNetwork("%s/%s" % (addr, netmask))
cidr = network.cidr
if address in cidr:
if key == 'iface':
return iface
else:
return addresses[netifaces.AF_INET][0][key]
if address.version == 6 and netifaces.AF_INET6 in addresses:
for addr in addresses[netifaces.AF_INET6]:
if not addr['addr'].startswith('fe80'):
cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
addr['netmask']))
network = netaddr.IPNetwork("%s/%s" % (addr['addr'],
addr['netmask']))
cidr = network.cidr
if address in cidr:
if key == 'iface':
return iface
elif key == 'netmask' and cidr:
return str(cidr).split('/')[1]
else:
return addr[key]
return None
get_iface_for_address = partial(_get_for_address, key='iface')
get_netmask_for_address = partial(_get_for_address, key='netmask')
def format_ipv6_addr(address):
"""If address is IPv6, wrap it in '[]' otherwise return None.
This is required by most configuration files when specifying IPv6
addresses.
"""
if is_ipv6(address):
return "[%s]" % address
return None
def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
fatal=True, exc_list=None):
"""Return the assigned IP address for a given interface, if any."""
# Extract nic if passed /dev/ethX
if '/' in iface:
iface = iface.split('/')[-1]
if not exc_list:
exc_list = []
try:
inet_num = getattr(netifaces, inet_type)
except AttributeError:
raise Exception("Unknown inet type '%s'" % str(inet_type))
interfaces = netifaces.interfaces()
if inc_aliases:
ifaces = []
for _iface in interfaces:
if iface == _iface or _iface.split(':')[0] == iface:
ifaces.append(_iface)
if fatal and not ifaces:
raise Exception("Invalid interface '%s'" % iface)
ifaces.sort()
else:
if iface not in interfaces:
if fatal:
raise Exception("Interface '%s' not found " % (iface))
else:
return []
else:
ifaces = [iface]
addresses = []
for netiface in ifaces:
net_info = netifaces.ifaddresses(netiface)
if inet_num in net_info:
for entry in net_info[inet_num]:
if 'addr' in entry and entry['addr'] not in exc_list:
addresses.append(entry['addr'])
if fatal and not addresses:
raise Exception("Interface '%s' doesn't have any %s addresses." %
(iface, inet_type))
return sorted(addresses)
get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
def get_iface_from_addr(addr):
"""Work out on which interface the provided address is configured."""
for iface in netifaces.interfaces():
addresses = netifaces.ifaddresses(iface)
for inet_type in addresses:
for _addr in addresses[inet_type]:
_addr = _addr['addr']
# link local
ll_key = re.compile("(.+)%.*")
raw = re.match(ll_key, _addr)
if raw:
_addr = raw.group(1)
if _addr == addr:
log("Address '%s' is configured on iface '%s'" %
(addr, iface))
return iface
msg = "Unable to infer net iface on which '%s' is configured" % (addr)
raise Exception(msg)
def sniff_iface(f):
"""Ensure decorated function is called with a value for iface.
If no iface provided, inject net iface inferred from unit private address.
"""
def iface_sniffer(*args, **kwargs):
if not kwargs.get('iface', None):
kwargs['iface'] = get_iface_from_addr(unit_get('private-address'))
return f(*args, **kwargs)
return iface_sniffer
@sniff_iface
def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
dynamic_only=True):
"""Get assigned IPv6 address for a given interface.
Returns list of addresses found. If no address found, returns empty list.
If iface is None, we infer the current primary interface by doing a reverse
lookup on the unit private-address.
We currently only support scope global IPv6 addresses i.e. non-temporary
addresses. If no global IPv6 address is found, return the first one found
in the ipv6 address list.
"""
addresses = get_iface_addr(iface=iface, inet_type='AF_INET6',
inc_aliases=inc_aliases, fatal=fatal,
exc_list=exc_list)
if addresses:
global_addrs = []
for addr in addresses:
key_scope_link_local = re.compile("^fe80::..(.+)%(.+)")
m = re.match(key_scope_link_local, addr)
if m:
eui_64_mac = m.group(1)
iface = m.group(2)
else:
global_addrs.append(addr)
if global_addrs:
# Make sure any found global addresses are not temporary
cmd = ['ip', 'addr', 'show', iface]
out = subprocess.check_output(cmd).decode('UTF-8')
if dynamic_only:
key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*")
else:
key = re.compile("inet6 (.+)/[0-9]+ scope global.*")
addrs = []
for line in out.split('\n'):
line = line.strip()
m = re.match(key, line)
if m and 'temporary' not in line:
# Return the first valid address we find
for addr in global_addrs:
if m.group(1) == addr:
if not dynamic_only or \
m.group(1).endswith(eui_64_mac):
addrs.append(addr)
if addrs:
return addrs
if fatal:
raise Exception("Interface '%s' does not have a scope global "
"non-temporary ipv6 address." % iface)
return []
def get_bridges(vnic_dir='/sys/devices/virtual/net'):
"""Return a list of bridges on the system."""
b_regex = "%s/*/bridge" % vnic_dir
return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)]
def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
"""Return a list of nics comprising a given bridge on the system."""
brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge)
return [x.split('/')[-1] for x in glob.glob(brif_regex)]
def is_bridge_member(nic):
"""Check if a given nic is a member of a bridge."""
for bridge in get_bridges():
if nic in get_bridge_nics(bridge):
return True
return False

View File

@ -1,50 +1,87 @@
import six
from charmhelpers.contrib.amulet.deployment import (
AmuletDeployment
)
class OpenStackAmuletDeployment(AmuletDeployment):
"""This class inherits from AmuletDeployment and has additional support
that is specifically for use by OpenStack charms."""
"""OpenStack amulet deployment.
def __init__(self, series=None, openstack=None, source=None):
This class inherits from AmuletDeployment and has additional support
that is specifically for use by OpenStack charms.
"""
def __init__(self, series=None, openstack=None, source=None, stable=True):
"""Initialize the deployment environment."""
super(OpenStackAmuletDeployment, self).__init__(series)
self.openstack = openstack
self.source = source
self.stable = stable
# Note(coreycb): this needs to be changed when new next branches come
# out.
self.current_next = "trusty"
def _determine_branch_locations(self, other_services):
"""Determine the branch locations for the other services.
Determine if the local branch being tested is derived from its
stable or next (dev) branch, and based on this, use the corresonding
stable or next branches for the other_services."""
base_charms = ['mysql', 'mongodb', 'rabbitmq-server']
if self.stable:
for svc in other_services:
temp = 'lp:charms/{}'
svc['location'] = temp.format(svc['name'])
else:
for svc in other_services:
if svc['name'] in base_charms:
temp = 'lp:charms/{}'
svc['location'] = temp.format(svc['name'])
else:
temp = 'lp:~openstack-charmers/charms/{}/{}/next'
svc['location'] = temp.format(self.current_next,
svc['name'])
return other_services
def _add_services(self, this_service, other_services):
"""Add services to the deployment and set openstack-origin."""
"""Add services to the deployment and set openstack-origin/source."""
other_services = self._determine_branch_locations(other_services)
super(OpenStackAmuletDeployment, self)._add_services(this_service,
other_services)
name = 0
services = other_services
services.append(this_service)
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph']
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
'ceph-osd', 'ceph-radosgw']
if self.openstack:
for svc in services:
if svc[name] not in use_source:
if svc['name'] not in use_source:
config = {'openstack-origin': self.openstack}
self.d.configure(svc[name], config)
self.d.configure(svc['name'], config)
if self.source:
for svc in services:
if svc[name] in use_source:
if svc['name'] in use_source:
config = {'source': self.source}
self.d.configure(svc[name], config)
self.d.configure(svc['name'], config)
def _configure_services(self, configs):
"""Configure all of the services."""
for service, config in configs.iteritems():
for service, config in six.iteritems(configs):
self.d.configure(service, config)
def _get_openstack_release(self):
"""Return an integer representing the enum value of the openstack
release."""
self.precise_essex, self.precise_folsom, self.precise_grizzly, \
self.precise_havana, self.precise_icehouse, \
self.trusty_icehouse = range(6)
"""Get openstack release.
Return an integer representing the enum value of the openstack
release.
"""
(self.precise_essex, self.precise_folsom, self.precise_grizzly,
self.precise_havana, self.precise_icehouse,
self.trusty_icehouse) = range(6)
releases = {
('precise', None): self.precise_essex,
('precise', 'cloud:precise-folsom'): self.precise_folsom,

View File

@ -7,6 +7,8 @@ import glanceclient.v1.client as glance_client
import keystoneclient.v2_0 as keystone_client
import novaclient.v1_1.client as nova_client
import six
from charmhelpers.contrib.amulet.utils import (
AmuletUtils
)
@ -16,8 +18,11 @@ ERROR = logging.ERROR
class OpenStackAmuletUtils(AmuletUtils):
"""This class inherits from AmuletUtils and has additional support
that is specifically for use by OpenStack charms."""
"""OpenStack amulet utilities.
This class inherits from AmuletUtils and has additional support
that is specifically for use by OpenStack charms.
"""
def __init__(self, log_level=ERROR):
"""Initialize the deployment environment."""
@ -25,13 +30,17 @@ class OpenStackAmuletUtils(AmuletUtils):
def validate_endpoint_data(self, endpoints, admin_port, internal_port,
public_port, expected):
"""Validate actual endpoint data vs expected endpoint data. The ports
are used to find the matching endpoint."""
"""Validate endpoint data.
Validate actual endpoint data vs expected endpoint data. The ports
are used to find the matching endpoint.
"""
found = False
for ep in endpoints:
self.log.debug('endpoint: {}'.format(repr(ep)))
if admin_port in ep.adminurl and internal_port in ep.internalurl \
and public_port in ep.publicurl:
if (admin_port in ep.adminurl and
internal_port in ep.internalurl and
public_port in ep.publicurl):
found = True
actual = {'id': ep.id,
'region': ep.region,
@ -47,10 +56,13 @@ class OpenStackAmuletUtils(AmuletUtils):
return 'endpoint not found'
def validate_svc_catalog_endpoint_data(self, expected, actual):
"""Validate a list of actual service catalog endpoints vs a list of
expected service catalog endpoints."""
"""Validate service catalog endpoint data.
Validate a list of actual service catalog endpoints vs a list of
expected service catalog endpoints.
"""
self.log.debug('actual: {}'.format(repr(actual)))
for k, v in expected.iteritems():
for k, v in six.iteritems(expected):
if k in actual:
ret = self._validate_dict_data(expected[k][0], actual[k][0])
if ret:
@ -60,8 +72,11 @@ class OpenStackAmuletUtils(AmuletUtils):
return ret
def validate_tenant_data(self, expected, actual):
"""Validate a list of actual tenant data vs list of expected tenant
data."""
"""Validate tenant data.
Validate a list of actual tenant data vs list of expected tenant
data.
"""
self.log.debug('actual: {}'.format(repr(actual)))
for e in expected:
found = False
@ -78,8 +93,11 @@ class OpenStackAmuletUtils(AmuletUtils):
return ret
def validate_role_data(self, expected, actual):
"""Validate a list of actual role data vs a list of expected role
data."""
"""Validate role data.
Validate a list of actual role data vs a list of expected role
data.
"""
self.log.debug('actual: {}'.format(repr(actual)))
for e in expected:
found = False
@ -95,8 +113,11 @@ class OpenStackAmuletUtils(AmuletUtils):
return ret
def validate_user_data(self, expected, actual):
"""Validate a list of actual user data vs a list of expected user
data."""
"""Validate user data.
Validate a list of actual user data vs a list of expected user
data.
"""
self.log.debug('actual: {}'.format(repr(actual)))
for e in expected:
found = False
@ -114,21 +135,24 @@ class OpenStackAmuletUtils(AmuletUtils):
return ret
def validate_flavor_data(self, expected, actual):
"""Validate a list of actual flavors vs a list of expected flavors."""
"""Validate flavor data.
Validate a list of actual flavors vs a list of expected flavors.
"""
self.log.debug('actual: {}'.format(repr(actual)))
act = [a.name for a in actual]
return self._validate_list_data(expected, act)
def tenant_exists(self, keystone, tenant):
"""Return True if tenant exists"""
"""Return True if tenant exists."""
return tenant in [t.name for t in keystone.tenants.list()]
def authenticate_keystone_admin(self, keystone_sentry, user, password,
tenant):
"""Authenticates admin user with the keystone admin endpoint."""
service_ip = \
keystone_sentry.relation('shared-db',
'mysql:shared-db')['private-address']
unit = keystone_sentry
service_ip = unit.relation('shared-db',
'mysql:shared-db')['private-address']
ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
return keystone_client.Client(username=user, password=password,
tenant_name=tenant, auth_url=ep)
@ -165,24 +189,53 @@ class OpenStackAmuletUtils(AmuletUtils):
f = opener.open("http://download.cirros-cloud.net/version/released")
version = f.read().strip()
cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version)
cirros_img = "cirros-{}-x86_64-disk.img".format(version)
local_path = os.path.join('tests', cirros_img)
if not os.path.exists(cirros_img):
if not os.path.exists(local_path):
cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
version, cirros_img)
opener.retrieve(cirros_url, cirros_img)
opener.retrieve(cirros_url, local_path)
f.close()
with open(cirros_img) as f:
with open(local_path) as f:
image = glance.images.create(name=image_name, is_public=True,
disk_format='qcow2',
container_format='bare', data=f)
count = 1
status = image.status
while status != 'active' and count < 10:
time.sleep(3)
image = glance.images.get(image.id)
status = image.status
self.log.debug('image status: {}'.format(status))
count += 1
if status != 'active':
self.log.error('image creation timed out')
return None
return image
def delete_image(self, glance, image):
"""Delete the specified image."""
num_before = len(list(glance.images.list()))
glance.images.delete(image)
count = 1
num_after = len(list(glance.images.list()))
while num_after != (num_before - 1) and count < 10:
time.sleep(3)
num_after = len(list(glance.images.list()))
self.log.debug('number of images: {}'.format(num_after))
count += 1
if num_after != (num_before - 1):
self.log.error('image deletion timed out')
return False
return True
def create_instance(self, nova, image_name, instance_name, flavor):
"""Create the specified instance."""
image = nova.images.find(name=image_name)
@ -199,11 +252,27 @@ class OpenStackAmuletUtils(AmuletUtils):
self.log.debug('instance status: {}'.format(status))
count += 1
if status == 'BUILD':
if status != 'ACTIVE':
self.log.error('instance creation timed out')
return None
return instance
def delete_instance(self, nova, instance):
"""Delete the specified instance."""
num_before = len(list(nova.servers.list()))
nova.servers.delete(instance)
count = 1
num_after = len(list(nova.servers.list()))
while num_after != (num_before - 1) and count < 10:
time.sleep(3)
num_after = len(list(nova.servers.list()))
self.log.debug('number of instances: {}'.format(num_after))
count += 1
if num_after != (num_before - 1):
self.log.error('instance deletion timed out')
return False
return True

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,93 @@
from charmhelpers.core.hookenv import (
config,
unit_get,
)
from charmhelpers.contrib.network.ip import (
get_address_in_network,
is_address_in_network,
is_ipv6,
get_ipv6_addr,
)
from charmhelpers.contrib.hahelpers.cluster import is_clustered
PUBLIC = 'public'
INTERNAL = 'int'
ADMIN = 'admin'
ADDRESS_MAP = {
PUBLIC: {
'config': 'os-public-network',
'fallback': 'public-address'
},
INTERNAL: {
'config': 'os-internal-network',
'fallback': 'private-address'
},
ADMIN: {
'config': 'os-admin-network',
'fallback': 'private-address'
}
}
def canonical_url(configs, endpoint_type=PUBLIC):
"""Returns the correct HTTP URL to this host given the state of HTTPS
configuration, hacluster and charm configuration.
:param configs: OSTemplateRenderer config templating object to inspect
for a complete https context.
:param endpoint_type: str endpoint type to resolve.
:param returns: str base URL for services on the current service unit.
"""
scheme = 'http'
if 'https' in configs.complete_contexts():
scheme = 'https'
address = resolve_address(endpoint_type)
if is_ipv6(address):
address = "[{}]".format(address)
return '%s://%s' % (scheme, address)
def resolve_address(endpoint_type=PUBLIC):
"""Return unit address depending on net config.
If unit is clustered with vip(s) and has net splits defined, return vip on
correct network. If clustered with no nets defined, return primary vip.
If not clustered, return unit address ensuring address is on configured net
split if one is configured.
:param endpoint_type: Network endpoing type
"""
resolved_address = None
vips = config('vip')
if vips:
vips = vips.split()
net_type = ADDRESS_MAP[endpoint_type]['config']
net_addr = config(net_type)
net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
clustered = is_clustered()
if clustered:
if not net_addr:
# If no net-splits defined, we expect a single vip
resolved_address = vips[0]
else:
for vip in vips:
if is_address_in_network(net_addr, vip):
resolved_address = vip
break
else:
if config('prefer-ipv6'):
fallback_addr = get_ipv6_addr(exc_list=vips)[0]
else:
fallback_addr = unit_get(net_fallback)
resolved_address = get_address_in_network(net_addr, fallback_addr)
if resolved_address is None:
raise ValueError("Unable to resolve a suitable IP address based on "
"charm state and configuration. (net_type=%s, "
"clustered=%s)" % (net_type, clustered))
return resolved_address

View File

@ -14,7 +14,7 @@ from charmhelpers.contrib.openstack.utils import os_release
def headers_package():
"""Ensures correct linux-headers for running kernel are installed,
for building DKMS package"""
kver = check_output(['uname', '-r']).strip()
kver = check_output(['uname', '-r']).decode('UTF-8').strip()
return 'linux-headers-%s' % kver
QUANTUM_CONF_DIR = '/etc/quantum'
@ -22,7 +22,7 @@ QUANTUM_CONF_DIR = '/etc/quantum'
def kernel_version():
""" Retrieve the current major kernel version as a tuple e.g. (3, 13) """
kver = check_output(['uname', '-r']).strip()
kver = check_output(['uname', '-r']).decode('UTF-8').strip()
kver = kver.split('.')
return (int(kver[0]), int(kver[1]))
@ -138,10 +138,25 @@ def neutron_plugins():
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'services': [],
'packages': [['neutron-plugin-cisco']],
'packages': [[headers_package()] + determine_dkms_package(),
['neutron-plugin-cisco']],
'server_packages': ['neutron-server',
'neutron-plugin-cisco'],
'server_services': ['neutron-server']
},
'Calico': {
'config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'services': ['calico-compute', 'bird', 'neutron-dhcp-agent'],
'packages': [[headers_package()] + determine_dkms_package(),
['calico-compute', 'bird', 'neutron-dhcp-agent']],
'server_packages': ['neutron-server', 'calico-control'],
'server_services': ['neutron-server']
}
}
if release >= 'icehouse':
@ -162,7 +177,8 @@ def neutron_plugin_attribute(plugin, attr, net_manager=None):
elif manager == 'neutron':
plugins = neutron_plugins()
else:
log('Error: Network manager does not support plugins.')
log("Network manager '%s' does not support plugins." % (manager),
level=ERROR)
raise Exception
try:

View File

@ -1,13 +1,13 @@
import os
from charmhelpers.fetch import apt_install
import six
from charmhelpers.fetch import apt_install
from charmhelpers.core.hookenv import (
log,
ERROR,
INFO
)
from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
try:
@ -43,7 +43,7 @@ def get_loader(templates_dir, os_release):
order by OpenStack release.
"""
tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
for rel in OPENSTACK_CODENAMES.itervalues()]
for rel in six.itervalues(OPENSTACK_CODENAMES)]
if not os.path.isdir(templates_dir):
log('Templates directory not found @ %s.' % templates_dir,
@ -258,7 +258,7 @@ class OSConfigRenderer(object):
"""
Write out all registered config files.
"""
[self.write(k) for k in self.templates.iterkeys()]
[self.write(k) for k in six.iterkeys(self.templates)]
def set_release(self, openstack_release):
"""
@ -275,5 +275,5 @@ class OSConfigRenderer(object):
'''
interfaces = []
[interfaces.extend(i.complete_contexts())
for i in self.templates.itervalues()]
for i in six.itervalues(self.templates)]
return interfaces

View File

@ -2,18 +2,24 @@
# Common python helper functions used for OpenStack charms.
from collections import OrderedDict
from functools import wraps
import subprocess
import json
import os
import socket
import sys
import six
import yaml
from charmhelpers.core.hookenv import (
config,
log as juju_log,
charm_dir,
ERROR,
INFO
INFO,
relation_ids,
relation_set
)
from charmhelpers.contrib.storage.linux.lvm import (
@ -22,8 +28,13 @@ from charmhelpers.contrib.storage.linux.lvm import (
remove_lvm_physical_volume,
)
from charmhelpers.contrib.network.ip import (
get_ipv6_addr
)
from charmhelpers.core.host import lsb_release, mounts, umount
from charmhelpers.fetch import apt_install
from charmhelpers.fetch import apt_install, apt_cache, install_remote
from charmhelpers.contrib.python.packages import pip_install
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
@ -70,6 +81,9 @@ SWIFT_CODENAMES = OrderedDict([
('1.13.0', 'icehouse'),
('1.12.0', 'icehouse'),
('1.11.0', 'icehouse'),
('2.0.0', 'juno'),
('2.1.0', 'juno'),
('2.2.0', 'juno'),
])
DEFAULT_LOOPBACK_SIZE = '5G'
@ -102,7 +116,7 @@ def get_os_codename_install_source(src):
# Best guess match based on deb string provided
if src.startswith('deb') or src.startswith('ppa'):
for k, v in OPENSTACK_CODENAMES.iteritems():
for k, v in six.iteritems(OPENSTACK_CODENAMES):
if v in src:
return v
@ -123,7 +137,7 @@ def get_os_codename_version(vers):
def get_os_version_codename(codename):
'''Determine OpenStack version number from codename.'''
for k, v in OPENSTACK_CODENAMES.iteritems():
for k, v in six.iteritems(OPENSTACK_CODENAMES):
if v == codename:
return k
e = 'Could not derive OpenStack version for '\
@ -134,13 +148,8 @@ def get_os_version_codename(codename):
def get_os_codename_package(package, fatal=True):
'''Derive OpenStack release codename from an installed package.'''
import apt_pkg as apt
apt.init()
# Tell apt to build an in-memory cache to prevent race conditions (if
# another process is already building the cache).
apt.config.set("Dir::Cache::pkgcache", "")
cache = apt.Cache()
cache = apt_cache()
try:
pkg = cache[package]
@ -188,7 +197,7 @@ def get_os_version_package(pkg, fatal=True):
else:
vers_map = OPENSTACK_CODENAMES
for version, cname in vers_map.iteritems():
for version, cname in six.iteritems(vers_map):
if cname == codename:
return version
# e = "Could not determine OpenStack version for package: %s" % pkg
@ -312,7 +321,7 @@ def save_script_rc(script_path="scripts/scriptrc", **env_vars):
rc_script.write(
"#!/bin/bash\n")
[rc_script.write('export %s=%s\n' % (u, p))
for u, p in env_vars.iteritems() if u != "script_path"]
for u, p in six.iteritems(env_vars) if u != "script_path"]
def openstack_upgrade_available(package):
@ -345,8 +354,8 @@ def ensure_block_device(block_device):
'''
_none = ['None', 'none', None]
if (block_device in _none):
error_out('prepare_storage(): Missing required input: '
'block_device=%s.' % block_device, level=ERROR)
error_out('prepare_storage(): Missing required input: block_device=%s.'
% block_device)
if block_device.startswith('/dev/'):
bdev = block_device
@ -362,8 +371,7 @@ def ensure_block_device(block_device):
bdev = '/dev/%s' % block_device
if not is_block_device(bdev):
error_out('Failed to locate valid block device at %s' % bdev,
level=ERROR)
error_out('Failed to locate valid block device at %s' % bdev)
return bdev
@ -412,7 +420,7 @@ def ns_query(address):
if isinstance(address, dns.name.Name):
rtype = 'PTR'
elif isinstance(address, basestring):
elif isinstance(address, six.string_types):
rtype = 'A'
else:
return None
@ -461,3 +469,151 @@ def get_hostname(address, fqdn=True):
return result
else:
return result.split('.')[0]
def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'):
mm_map = {}
if os.path.isfile(mm_file):
with open(mm_file, 'r') as f:
mm_map = json.load(f)
return mm_map
def sync_db_with_multi_ipv6_addresses(database, database_user,
relation_prefix=None):
hosts = get_ipv6_addr(dynamic_only=False)
kwargs = {'database': database,
'username': database_user,
'hostname': json.dumps(hosts)}
if relation_prefix:
for key in list(kwargs.keys()):
kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key]
del kwargs[key]
for rid in relation_ids('shared-db'):
relation_set(relation_id=rid, **kwargs)
def os_requires_version(ostack_release, pkg):
"""
Decorator for hook to specify minimum supported release
"""
def wrap(f):
@wraps(f)
def wrapped_f(*args):
if os_release(pkg) < ostack_release:
raise Exception("This hook is not supported on releases"
" before %s" % ostack_release)
f(*args)
return wrapped_f
return wrap
def git_install_requested():
"""Returns true if openstack-origin-git is specified."""
return config('openstack-origin-git') != "None"
requirements_dir = None
def git_clone_and_install(file_name, core_project):
"""Clone/install all OpenStack repos specified in yaml config file."""
global requirements_dir
if file_name == "None":
return
yaml_file = os.path.join(charm_dir(), file_name)
# clone/install the requirements project first
installed = _git_clone_and_install_subset(yaml_file,
whitelist=['requirements'])
if 'requirements' not in installed:
error_out('requirements git repository must be specified')
# clone/install all other projects except requirements and the core project
blacklist = ['requirements', core_project]
_git_clone_and_install_subset(yaml_file, blacklist=blacklist,
update_requirements=True)
# clone/install the core project
whitelist = [core_project]
installed = _git_clone_and_install_subset(yaml_file, whitelist=whitelist,
update_requirements=True)
if core_project not in installed:
error_out('{} git repository must be specified'.format(core_project))
def _git_clone_and_install_subset(yaml_file, whitelist=[], blacklist=[],
update_requirements=False):
"""Clone/install subset of OpenStack repos specified in yaml config file."""
global requirements_dir
installed = []
with open(yaml_file, 'r') as fd:
projects = yaml.load(fd)
for proj, val in projects.items():
# The project subset is chosen based on the following 3 rules:
# 1) If project is in blacklist, we don't clone/install it, period.
# 2) If whitelist is empty, we clone/install everything else.
# 3) If whitelist is not empty, we clone/install everything in the
# whitelist.
if proj in blacklist:
continue
if whitelist and proj not in whitelist:
continue
repo = val['repository']
branch = val['branch']
repo_dir = _git_clone_and_install_single(repo, branch,
update_requirements)
if proj == 'requirements':
requirements_dir = repo_dir
installed.append(proj)
return installed
def _git_clone_and_install_single(repo, branch, update_requirements=False):
"""Clone and install a single git repository."""
dest_parent_dir = "/mnt/openstack-git/"
dest_dir = os.path.join(dest_parent_dir, os.path.basename(repo))
if not os.path.exists(dest_parent_dir):
juju_log('Host dir not mounted at {}. '
'Creating directory there instead.'.format(dest_parent_dir))
os.mkdir(dest_parent_dir)
if not os.path.exists(dest_dir):
juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
repo_dir = install_remote(repo, dest=dest_parent_dir, branch=branch)
else:
repo_dir = dest_dir
if update_requirements:
if not requirements_dir:
error_out('requirements repo must be cloned before '
'updating from global requirements.')
_git_update_requirements(repo_dir, requirements_dir)
juju_log('Installing git repo from dir: {}'.format(repo_dir))
pip_install(repo_dir)
return repo_dir
def _git_update_requirements(package_dir, reqs_dir):
"""Update from global requirements.
Update an OpenStack git directory's requirements.txt and
test-requirements.txt from global-requirements.txt."""
orig_dir = os.getcwd()
os.chdir(reqs_dir)
cmd = "python update.py {}".format(package_dir)
try:
subprocess.check_call(cmd.split(' '))
except subprocess.CalledProcessError:
package = os.path.basename(package_dir)
error_out("Error updating {} from global-requirements.txt".format(package))
os.chdir(orig_dir)

View File

@ -1,44 +1,45 @@
import six
from charmhelpers.core.hookenv import relation_id as current_relation_id
from charmhelpers.core.hookenv import (
is_relation_made,
relation_ids,
relation_get,
local_unit,
relation_set,
)
"""
This helper provides functions to support use of a peer relation
for basic key/value storage, with the added benefit that all storage
can be replicated across peer units, so this is really useful for
services that issue usernames/passwords to remote services.
can be replicated across peer units.
def shared_db_changed()
# Only the lead unit should create passwords
if not is_leader():
return
username = relation_get('username')
key = '{}.password'.format(username)
# Attempt to retrieve any existing password for this user
password = peer_retrieve(key)
if password is None:
# New user, create password and store
password = pwgen(length=64)
peer_store(key, password)
create_access(username, password)
relation_set(password=password)
Requirement to use:
To use this, the "peer_echo()" method has to be called form the peer
relation's relation-changed hook:
def cluster_changed()
# Echo any relation data other that *-address
# back onto the peer relation so all units have
# all *.password keys stored on their local relation
# for later retrieval.
@hooks.hook("cluster-relation-changed") # Adapt the to your peer relation name
def cluster_relation_changed():
peer_echo()
Once this is done, you can use peer storage from anywhere:
@hooks.hook("some-hook")
def some_hook():
# You can store and retrieve key/values this way:
if is_relation_made("cluster"): # from charmhelpers.core.hookenv
# There are peers available so we can work with peer storage
peer_store("mykey", "myvalue")
value = peer_retrieve("mykey")
print value
else:
print "No peers joind the relation, cannot share key/values :("
"""
def peer_retrieve(key, relation_name='cluster'):
""" Retrieve a named key from peer relation relation_name """
"""Retrieve a named key from peer relation `relation_name`."""
cluster_rels = relation_ids(relation_name)
if len(cluster_rels) > 0:
cluster_rid = cluster_rels[0]
@ -49,8 +50,26 @@ def peer_retrieve(key, relation_name='cluster'):
'peer relation {}'.format(relation_name))
def peer_retrieve_by_prefix(prefix, relation_name='cluster', delimiter='_',
inc_list=None, exc_list=None):
""" Retrieve k/v pairs given a prefix and filter using {inc,exc}_list """
inc_list = inc_list if inc_list else []
exc_list = exc_list if exc_list else []
peerdb_settings = peer_retrieve('-', relation_name=relation_name)
matched = {}
for k, v in peerdb_settings.items():
full_prefix = prefix + delimiter
if k.startswith(full_prefix):
new_key = k.replace(full_prefix, '')
if new_key in exc_list:
continue
if new_key in inc_list or len(inc_list) == 0:
matched[new_key] = v
return matched
def peer_store(key, value, relation_name='cluster'):
""" Store the key/value pair on the named peer relation relation_name """
"""Store the key/value pair on the named peer relation `relation_name`."""
cluster_rels = relation_ids(relation_name)
if len(cluster_rels) > 0:
cluster_rid = cluster_rels[0]
@ -62,10 +81,10 @@ def peer_store(key, value, relation_name='cluster'):
def peer_echo(includes=None):
"""Echo filtered attributes back onto the same relation for storage
"""Echo filtered attributes back onto the same relation for storage.
Note that this helper must only be called within a peer relation
changed hook
This is a requirement to use the peerstorage module - it needs to be called
from the peer relation's changed hook.
"""
rdata = relation_get()
echo_data = {}
@ -75,9 +94,39 @@ def peer_echo(includes=None):
if ex in echo_data:
echo_data.pop(ex)
else:
for attribute, value in rdata.iteritems():
for attribute, value in six.iteritems(rdata):
for include in includes:
if include in attribute:
echo_data[attribute] = value
if len(echo_data) > 0:
relation_set(relation_settings=echo_data)
def peer_store_and_set(relation_id=None, peer_relation_name='cluster',
peer_store_fatal=False, relation_settings=None,
delimiter='_', **kwargs):
"""Store passed-in arguments both in argument relation and in peer storage.
It functions like doing relation_set() and peer_store() at the same time,
with the same data.
@param relation_id: the id of the relation to store the data on. Defaults
to the current relation.
@param peer_store_fatal: Set to True, the function will raise an exception
should the peer sotrage not be avialable."""
relation_settings = relation_settings if relation_settings else {}
relation_set(relation_id=relation_id,
relation_settings=relation_settings,
**kwargs)
if is_relation_made(peer_relation_name):
for key, value in six.iteritems(dict(list(kwargs.items()) +
list(relation_settings.items()))):
key_prefix = relation_id or current_relation_id()
peer_store(key_prefix + delimiter + key,
value,
relation_name=peer_relation_name)
else:
if peer_store_fatal:
raise ValueError('Unable to detect '
'peer relation {}'.format(peer_relation_name))

View File

@ -0,0 +1,77 @@
#!/usr/bin/env python
# coding: utf-8
__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
from charmhelpers.fetch import apt_install, apt_update
from charmhelpers.core.hookenv import log
try:
from pip import main as pip_execute
except ImportError:
apt_update()
apt_install('python-pip')
from pip import main as pip_execute
def parse_options(given, available):
"""Given a set of options, check if available"""
for key, value in sorted(given.items()):
if key in available:
yield "--{0}={1}".format(key, value)
def pip_install_requirements(requirements, **options):
"""Install a requirements file """
command = ["install"]
available_options = ('proxy', 'src', 'log', )
for option in parse_options(options, available_options):
command.append(option)
command.append("-r {0}".format(requirements))
log("Installing from file: {} with options: {}".format(requirements,
command))
pip_execute(command)
def pip_install(package, fatal=False, **options):
"""Install a python package"""
command = ["install"]
available_options = ('proxy', 'src', 'log', "index-url", )
for option in parse_options(options, available_options):
command.append(option)
if isinstance(package, list):
command.extend(package)
else:
command.append(package)
log("Installing {} package with options: {}".format(package,
command))
pip_execute(command)
def pip_uninstall(package, **options):
"""Uninstall a python package"""
command = ["uninstall", "-q", "-y"]
available_options = ('proxy', 'log', )
for option in parse_options(options, available_options):
command.append(option)
if isinstance(package, list):
command.extend(package)
else:
command.append(package)
log("Uninstalling {} package with options: {}".format(package,
command))
pip_execute(command)
def pip_list():
"""Returns the list of current python installed packages
"""
return pip_execute(["list"])

View File

@ -74,5 +74,5 @@ def generate_selfsigned(keyfile, certfile, keysize="1024", config=None, subject=
subprocess.check_call(cmd)
return True
except Exception as e:
print "Execution of openssl command failed:\n{}".format(e)
print("Execution of openssl command failed:\n{}".format(e))
return False

View File

@ -16,19 +16,18 @@ import time
from subprocess import (
check_call,
check_output,
CalledProcessError
CalledProcessError,
)
from charmhelpers.core.hookenv import (
relation_get,
relation_ids,
related_units,
log,
DEBUG,
INFO,
WARNING,
ERROR
ERROR,
)
from charmhelpers.core.host import (
mount,
mounts,
@ -37,7 +36,6 @@ from charmhelpers.core.host import (
service_running,
umount,
)
from charmhelpers.fetch import (
apt_install,
)
@ -56,99 +54,85 @@ CEPH_CONF = """[global]
def install():
''' Basic Ceph client installation '''
"""Basic Ceph client installation."""
ceph_dir = "/etc/ceph"
if not os.path.exists(ceph_dir):
os.mkdir(ceph_dir)
apt_install('ceph-common', fatal=True)
def rbd_exists(service, pool, rbd_img):
''' Check to see if a RADOS block device exists '''
"""Check to see if a RADOS block device exists."""
try:
out = check_output(['rbd', 'list', '--id', service,
'--pool', pool])
out = check_output(['rbd', 'list', '--id',
service, '--pool', pool]).decode('UTF-8')
except CalledProcessError:
return False
else:
return rbd_img in out
return rbd_img in out
def create_rbd_image(service, pool, image, sizemb):
''' Create a new RADOS block device '''
cmd = [
'rbd',
'create',
image,
'--size',
str(sizemb),
'--id',
service,
'--pool',
pool
]
"""Create a new RADOS block device."""
cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service,
'--pool', pool]
check_call(cmd)
def pool_exists(service, name):
''' Check to see if a RADOS pool already exists '''
"""Check to see if a RADOS pool already exists."""
try:
out = check_output(['rados', '--id', service, 'lspools'])
out = check_output(['rados', '--id', service,
'lspools']).decode('UTF-8')
except CalledProcessError:
return False
else:
return name in out
return name in out
def get_osds(service):
'''
Return a list of all Ceph Object Storage Daemons
currently in the cluster
'''
"""Return a list of all Ceph Object Storage Daemons currently in the
cluster.
"""
version = ceph_version()
if version and version >= '0.56':
return json.loads(check_output(['ceph', '--id', service,
'osd', 'ls', '--format=json']))
else:
return None
'osd', 'ls',
'--format=json']).decode('UTF-8'))
return None
def create_pool(service, name, replicas=2):
''' Create a new RADOS pool '''
def create_pool(service, name, replicas=3):
"""Create a new RADOS pool."""
if pool_exists(service, name):
log("Ceph pool {} already exists, skipping creation".format(name),
level=WARNING)
return
# Calculate the number of placement groups based
# on upstream recommended best practices.
osds = get_osds(service)
if osds:
pgnum = (len(osds) * 100 / replicas)
pgnum = (len(osds) * 100 // replicas)
else:
# NOTE(james-page): Default to 200 for older ceph versions
# which don't support OSD query from cli
pgnum = 200
cmd = [
'ceph', '--id', service,
'osd', 'pool', 'create',
name, str(pgnum)
]
cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)]
check_call(cmd)
cmd = [
'ceph', '--id', service,
'osd', 'pool', 'set', name,
'size', str(replicas)
]
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size',
str(replicas)]
check_call(cmd)
def delete_pool(service, name):
''' Delete a RADOS pool from ceph '''
cmd = [
'ceph', '--id', service,
'osd', 'pool', 'delete',
name, '--yes-i-really-really-mean-it'
]
"""Delete a RADOS pool from ceph."""
cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name,
'--yes-i-really-really-mean-it']
check_call(cmd)
@ -161,44 +145,43 @@ def _keyring_path(service):
def create_keyring(service, key):
''' Create a new Ceph keyring containing key'''
"""Create a new Ceph keyring containing key."""
keyring = _keyring_path(service)
if os.path.exists(keyring):
log('ceph: Keyring exists at %s.' % keyring, level=WARNING)
log('Ceph keyring exists at %s.' % keyring, level=WARNING)
return
cmd = [
'ceph-authtool',
keyring,
'--create-keyring',
'--name=client.{}'.format(service),
'--add-key={}'.format(key)
]
cmd = ['ceph-authtool', keyring, '--create-keyring',
'--name=client.{}'.format(service), '--add-key={}'.format(key)]
check_call(cmd)
log('ceph: Created new ring at %s.' % keyring, level=INFO)
log('Created new ceph keyring at %s.' % keyring, level=DEBUG)
def create_key_file(service, key):
''' Create a file containing key '''
"""Create a file containing key."""
keyfile = _keyfile_path(service)
if os.path.exists(keyfile):
log('ceph: Keyfile exists at %s.' % keyfile, level=WARNING)
log('Keyfile exists at %s.' % keyfile, level=WARNING)
return
with open(keyfile, 'w') as fd:
fd.write(key)
log('ceph: Created new keyfile at %s.' % keyfile, level=INFO)
log('Created new keyfile at %s.' % keyfile, level=INFO)
def get_ceph_nodes():
''' Query named relation 'ceph' to detemine current nodes '''
"""Query named relation 'ceph' to determine current nodes."""
hosts = []
for r_id in relation_ids('ceph'):
for unit in related_units(r_id):
hosts.append(relation_get('private-address', unit=unit, rid=r_id))
return hosts
def configure(service, key, auth, use_syslog):
''' Perform basic configuration of Ceph '''
"""Perform basic configuration of Ceph."""
create_keyring(service, key)
create_key_file(service, key)
hosts = get_ceph_nodes()
@ -211,17 +194,17 @@ def configure(service, key, auth, use_syslog):
def image_mapped(name):
''' Determine whether a RADOS block device is mapped locally '''
"""Determine whether a RADOS block device is mapped locally."""
try:
out = check_output(['rbd', 'showmapped'])
out = check_output(['rbd', 'showmapped']).decode('UTF-8')
except CalledProcessError:
return False
else:
return name in out
return name in out
def map_block_storage(service, pool, image):
''' Map a RADOS block device for local use '''
"""Map a RADOS block device for local use."""
cmd = [
'rbd',
'map',
@ -235,31 +218,32 @@ def map_block_storage(service, pool, image):
def filesystem_mounted(fs):
''' Determine whether a filesytems is already mounted '''
"""Determine whether a filesytems is already mounted."""
return fs in [f for f, m in mounts()]
def make_filesystem(blk_device, fstype='ext4', timeout=10):
''' Make a new filesystem on the specified block device '''
"""Make a new filesystem on the specified block device."""
count = 0
e_noent = os.errno.ENOENT
while not os.path.exists(blk_device):
if count >= timeout:
log('ceph: gave up waiting on block device %s' % blk_device,
log('Gave up waiting on block device %s' % blk_device,
level=ERROR)
raise IOError(e_noent, os.strerror(e_noent), blk_device)
log('ceph: waiting for block device %s to appear' % blk_device,
level=INFO)
log('Waiting for block device %s to appear' % blk_device,
level=DEBUG)
count += 1
time.sleep(1)
else:
log('ceph: Formatting block device %s as filesystem %s.' %
log('Formatting block device %s as filesystem %s.' %
(blk_device, fstype), level=INFO)
check_call(['mkfs', '-t', fstype, blk_device])
def place_data_on_block_device(blk_device, data_src_dst):
''' Migrate data in data_src_dst to blk_device and then remount '''
"""Migrate data in data_src_dst to blk_device and then remount."""
# mount block device into /mnt
mount(blk_device, '/mnt')
# copy data to /mnt
@ -279,8 +263,8 @@ def place_data_on_block_device(blk_device, data_src_dst):
# TODO: re-use
def modprobe(module):
''' Load a kernel module and configure for auto-load on reboot '''
log('ceph: Loading kernel module', level=INFO)
"""Load a kernel module and configure for auto-load on reboot."""
log('Loading kernel module', level=INFO)
cmd = ['modprobe', module]
check_call(cmd)
with open('/etc/modules', 'r+') as modules:
@ -289,7 +273,7 @@ def modprobe(module):
def copy_files(src, dst, symlinks=False, ignore=None):
''' Copy files from src to dst '''
"""Copy files from src to dst."""
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
@ -300,9 +284,9 @@ def copy_files(src, dst, symlinks=False, ignore=None):
def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
blk_device, fstype, system_services=[]):
"""
NOTE: This function must only be called from a single service unit for
blk_device, fstype, system_services=[],
replicas=3):
"""NOTE: This function must only be called from a single service unit for
the same rbd_img otherwise data loss will occur.
Ensures given pool and RBD image exists, is mapped to a block device,
@ -316,15 +300,16 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
"""
# Ensure pool, RBD image, RBD mappings are in place.
if not pool_exists(service, pool):
log('ceph: Creating new pool {}.'.format(pool))
create_pool(service, pool)
log('Creating new pool {}.'.format(pool), level=INFO)
create_pool(service, pool, replicas=replicas)
if not rbd_exists(service, pool, rbd_img):
log('ceph: Creating RBD image ({}).'.format(rbd_img))
log('Creating RBD image ({}).'.format(rbd_img), level=INFO)
create_rbd_image(service, pool, rbd_img, sizemb)
if not image_mapped(rbd_img):
log('ceph: Mapping RBD Image {} as a Block Device.'.format(rbd_img))
log('Mapping RBD Image {} as a Block Device.'.format(rbd_img),
level=INFO)
map_block_storage(service, pool, rbd_img)
# make file system
@ -339,45 +324,47 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
for svc in system_services:
if service_running(svc):
log('ceph: Stopping services {} prior to migrating data.'
.format(svc))
log('Stopping services {} prior to migrating data.'
.format(svc), level=DEBUG)
service_stop(svc)
place_data_on_block_device(blk_device, mount_point)
for svc in system_services:
log('ceph: Starting service {} after migrating data.'
.format(svc))
log('Starting service {} after migrating data.'
.format(svc), level=DEBUG)
service_start(svc)
def ensure_ceph_keyring(service, user=None, group=None):
'''
Ensures a ceph keyring is created for a named service
and optionally ensures user and group ownership.
"""Ensures a ceph keyring is created for a named service and optionally
ensures user and group ownership.
Returns False if no ceph key is available in relation state.
'''
"""
key = None
for rid in relation_ids('ceph'):
for unit in related_units(rid):
key = relation_get('key', rid=rid, unit=unit)
if key:
break
if not key:
return False
create_keyring(service=service, key=key)
keyring = _keyring_path(service)
if user and group:
check_call(['chown', '%s.%s' % (user, group), keyring])
return True
def ceph_version():
''' Retrieve the local version of ceph '''
"""Retrieve the local version of ceph."""
if os.path.exists('/usr/bin/ceph'):
cmd = ['ceph', '-v']
output = check_output(cmd)
output = check_output(cmd).decode('US-ASCII')
output = output.split()
if len(output) > 3:
return output[2]
@ -385,3 +372,46 @@ def ceph_version():
return None
else:
return None
class CephBrokerRq(object):
"""Ceph broker request.
Multiple operations can be added to a request and sent to the Ceph broker
to be executed.
Request is json-encoded for sending over the wire.
The API is versioned and defaults to version 1.
"""
def __init__(self, api_version=1):
self.api_version = api_version
self.ops = []
def add_op_create_pool(self, name, replica_count=3):
self.ops.append({'op': 'create-pool', 'name': name,
'replicas': replica_count})
@property
def request(self):
return json.dumps({'api-version': self.api_version, 'ops': self.ops})
class CephBrokerRsp(object):
"""Ceph broker response.
Response is json-decoded and contents provided as methods/properties.
The API is versioned and defaults to version 1.
"""
def __init__(self, encoded_rsp):
self.api_version = None
self.rsp = json.loads(encoded_rsp)
@property
def exit_code(self):
return self.rsp.get('exit-code')
@property
def exit_msg(self):
return self.rsp.get('stderr')

View File

@ -1,12 +1,12 @@
import os
import re
from subprocess import (
check_call,
check_output,
)
import six
##################################################
# loopback device helpers.
@ -37,7 +37,7 @@ def create_loopback(file_path):
'''
file_path = os.path.abspath(file_path)
check_call(['losetup', '--find', file_path])
for d, f in loopback_devices().iteritems():
for d, f in six.iteritems(loopback_devices()):
if f == file_path:
return d
@ -51,7 +51,7 @@ def ensure_loopback_device(path, size):
:returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
'''
for d, f in loopback_devices().iteritems():
for d, f in six.iteritems(loopback_devices()):
if f == path:
return d

View File

@ -61,6 +61,7 @@ def list_lvm_volume_group(block_device):
vg = None
pvd = check_output(['pvdisplay', block_device]).splitlines()
for l in pvd:
l = l.decode('UTF-8')
if l.strip().startswith('VG Name'):
vg = ' '.join(l.strip().split()[2:])
return vg

View File

@ -30,7 +30,8 @@ def zap_disk(block_device):
# sometimes sgdisk exits non-zero; this is OK, dd will clean up
call(['sgdisk', '--zap-all', '--mbrtogpt',
'--clear', block_device])
dev_end = check_output(['blockdev', '--getsz', block_device])
dev_end = check_output(['blockdev', '--getsz',
block_device]).decode('UTF-8')
gpt_end = int(dev_end.split()[0]) - 100
check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
'bs=1M', 'count=1'])
@ -46,5 +47,8 @@ def is_device_mounted(device):
:returns: boolean: True if the path represents a mounted device, False if
it doesn't.
'''
out = check_output(['mount'])
is_partition = bool(re.search(r".*[0-9]+\b", device))
out = check_output(['mount']).decode('UTF-8')
if is_partition:
return bool(re.search(device + r"\b", out))
return bool(re.search(device + r"[0-9]+\b", out))

View File

@ -3,10 +3,11 @@
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
import io
import os
class Fstab(file):
class Fstab(io.FileIO):
"""This class extends file in order to implement a file reader/writer
for file `/etc/fstab`
"""
@ -24,8 +25,8 @@ class Fstab(file):
options = "defaults"
self.options = options
self.d = d
self.p = p
self.d = int(d)
self.p = int(p)
def __eq__(self, o):
return str(self) == str(o)
@ -45,7 +46,7 @@ class Fstab(file):
self._path = path
else:
self._path = self.DEFAULT_PATH
file.__init__(self, self._path, 'r+')
super(Fstab, self).__init__(self._path, 'rb+')
def _hydrate_entry(self, line):
# NOTE: use split with no arguments to split on any
@ -58,8 +59,9 @@ class Fstab(file):
def entries(self):
self.seek(0)
for line in self.readlines():
line = line.decode('us-ascii')
try:
if not line.startswith("#"):
if line.strip() and not line.startswith("#"):
yield self._hydrate_entry(line)
except ValueError:
pass
@ -75,14 +77,14 @@ class Fstab(file):
if self.get_entry_by_attr('device', entry.device):
return False
self.write(str(entry) + '\n')
self.write((str(entry) + '\n').encode('us-ascii'))
self.truncate()
return entry
def remove_entry(self, entry):
self.seek(0)
lines = self.readlines()
lines = [l.decode('us-ascii') for l in self.readlines()]
found = False
for index, line in enumerate(lines):
@ -97,7 +99,7 @@ class Fstab(file):
lines.remove(line)
self.seek(0)
self.write(''.join(lines))
self.write(''.join(lines).encode('us-ascii'))
self.truncate()
return True

View File

@ -9,9 +9,14 @@ import json
import yaml
import subprocess
import sys
import UserDict
from subprocess import CalledProcessError
import six
if not six.PY3:
from UserDict import UserDict
else:
from collections import UserDict
CRITICAL = "CRITICAL"
ERROR = "ERROR"
WARNING = "WARNING"
@ -63,16 +68,18 @@ def log(message, level=None):
command = ['juju-log']
if level:
command += ['-l', level]
if not isinstance(message, six.string_types):
message = repr(message)
command += [message]
subprocess.call(command)
class Serializable(UserDict.IterableUserDict):
class Serializable(UserDict):
"""Wrapper, an object that can be serialized to yaml or json"""
def __init__(self, obj):
# wrap the object
UserDict.IterableUserDict.__init__(self)
UserDict.__init__(self)
self.data = obj
def __getattr__(self, attr):
@ -156,12 +163,15 @@ def hook_name():
class Config(dict):
"""A Juju charm config dictionary that can write itself to
disk (as json) and track which values have changed since
the previous hook invocation.
"""A dictionary representation of the charm's config.yaml, with some
extra features:
Do not instantiate this object directly - instead call
``hookenv.config()``
- See which values in the dictionary have changed since the previous hook.
- For values that have changed, see what the previous value was.
- Store arbitrary data for use in a later hook.
NOTE: Do not instantiate this object directly - instead call
``hookenv.config()``, which will return an instance of :class:`Config`.
Example usage::
@ -170,8 +180,8 @@ class Config(dict):
>>> config = hookenv.config()
>>> config['foo']
'bar'
>>> # store a new key/value for later use
>>> config['mykey'] = 'myval'
>>> config.save()
>>> # user runs `juju set mycharm foo=baz`
@ -188,22 +198,40 @@ class Config(dict):
>>> # keys/values that we add are preserved across hooks
>>> config['mykey']
'myval'
>>> # don't forget to save at the end of hook!
>>> config.save()
"""
CONFIG_FILE_NAME = '.juju-persistent-config'
def __init__(self, *args, **kw):
super(Config, self).__init__(*args, **kw)
self.implicit_save = True
self._prev_dict = None
self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
if os.path.exists(self.path):
self.load_previous()
def __getitem__(self, key):
"""For regular dict lookups, check the current juju config first,
then the previous (saved) copy. This ensures that user-saved values
will be returned by a dict lookup.
"""
try:
return dict.__getitem__(self, key)
except KeyError:
return (self._prev_dict or {})[key]
def keys(self):
prev_keys = []
if self._prev_dict is not None:
prev_keys = self._prev_dict.keys()
return list(set(prev_keys + list(dict.keys(self))))
def load_previous(self, path=None):
"""Load previous copy of config from disk so that current values
can be compared to previous values.
"""Load previous copy of config from disk.
In normal usage you don't need to call this method directly - it
is called automatically at object initialization.
:param path:
@ -218,8 +246,8 @@ class Config(dict):
self._prev_dict = json.load(f)
def changed(self, key):
"""Return true if the value for this key has changed since
the last save.
"""Return True if the current value for this key is different from
the previous value.
"""
if self._prev_dict is None:
@ -228,7 +256,7 @@ class Config(dict):
def previous(self, key):
"""Return previous value for this key, or None if there
is no "previous" value.
is no previous value.
"""
if self._prev_dict:
@ -238,11 +266,17 @@ class Config(dict):
def save(self):
"""Save this config to disk.
Preserves items in _prev_dict that do not exist in self.
If the charm is using the :mod:`Services Framework <services.base>`
or :meth:'@hook <Hooks.hook>' decorator, this
is called automatically at the end of successful hook execution.
Otherwise, it should be called directly by user code.
To disable automatic saves, set ``implicit_save=False`` on this
instance.
"""
if self._prev_dict:
for k, v in self._prev_dict.iteritems():
for k, v in six.iteritems(self._prev_dict):
if k not in self:
self[k] = v
with open(self.path, 'w') as f:
@ -257,7 +291,8 @@ def config(scope=None):
config_cmd_line.append(scope)
config_cmd_line.append('--format=json')
try:
config_data = json.loads(subprocess.check_output(config_cmd_line))
config_data = json.loads(
subprocess.check_output(config_cmd_line).decode('UTF-8'))
if scope is not None:
return config_data
return Config(config_data)
@ -276,21 +311,22 @@ def relation_get(attribute=None, unit=None, rid=None):
if unit:
_args.append(unit)
try:
return json.loads(subprocess.check_output(_args))
return json.loads(subprocess.check_output(_args).decode('UTF-8'))
except ValueError:
return None
except CalledProcessError, e:
except CalledProcessError as e:
if e.returncode == 2:
return None
raise
def relation_set(relation_id=None, relation_settings={}, **kwargs):
def relation_set(relation_id=None, relation_settings=None, **kwargs):
"""Set relation information for the current unit"""
relation_settings = relation_settings if relation_settings else {}
relation_cmd_line = ['relation-set']
if relation_id is not None:
relation_cmd_line.extend(('-r', relation_id))
for k, v in (relation_settings.items() + kwargs.items()):
for k, v in (list(relation_settings.items()) + list(kwargs.items())):
if v is None:
relation_cmd_line.append('{}='.format(k))
else:
@ -307,7 +343,8 @@ def relation_ids(reltype=None):
relid_cmd_line = ['relation-ids', '--format=json']
if reltype is not None:
relid_cmd_line.append(reltype)
return json.loads(subprocess.check_output(relid_cmd_line)) or []
return json.loads(
subprocess.check_output(relid_cmd_line).decode('UTF-8')) or []
return []
@ -318,7 +355,8 @@ def related_units(relid=None):
units_cmd_line = ['relation-list', '--format=json']
if relid is not None:
units_cmd_line.extend(('-r', relid))
return json.loads(subprocess.check_output(units_cmd_line)) or []
return json.loads(
subprocess.check_output(units_cmd_line).decode('UTF-8')) or []
@cached
@ -357,21 +395,31 @@ def relations_of_type(reltype=None):
return relation_data
@cached
def metadata():
"""Get the current charm metadata.yaml contents as a python object"""
with open(os.path.join(charm_dir(), 'metadata.yaml')) as md:
return yaml.safe_load(md)
@cached
def relation_types():
"""Get a list of relation types supported by this charm"""
charmdir = os.environ.get('CHARM_DIR', '')
mdf = open(os.path.join(charmdir, 'metadata.yaml'))
md = yaml.safe_load(mdf)
rel_types = []
md = metadata()
for key in ('provides', 'requires', 'peers'):
section = md.get(key)
if section:
rel_types.extend(section.keys())
mdf.close()
return rel_types
@cached
def charm_name():
"""Get the name of the current charm as is specified on metadata.yaml"""
return metadata().get('name')
@cached
def relations():
"""Get a nested dictionary of relation data for all related units"""
@ -427,7 +475,7 @@ def unit_get(attribute):
"""Get the unit ID for the remote unit"""
_args = ['unit-get', '--format=json', attribute]
try:
return json.loads(subprocess.check_output(_args))
return json.loads(subprocess.check_output(_args).decode('UTF-8'))
except ValueError:
return None
@ -464,9 +512,10 @@ class Hooks(object):
hooks.execute(sys.argv)
"""
def __init__(self):
def __init__(self, config_save=True):
super(Hooks, self).__init__()
self._hooks = {}
self._config_save = config_save
def register(self, name, function):
"""Register a hook"""
@ -477,6 +526,10 @@ class Hooks(object):
hook_name = os.path.basename(args[0])
if hook_name in self._hooks:
self._hooks[hook_name]()
if self._config_save:
cfg = config()
if cfg.implicit_save:
cfg.save()
else:
raise UnregisteredHookError(hook_name)

View File

@ -6,17 +6,20 @@
# Matthew Wedgwood <matthew.wedgwood@canonical.com>
import os
import re
import pwd
import grp
import random
import string
import subprocess
import hashlib
from contextlib import contextmanager
from collections import OrderedDict
from hookenv import log
from fstab import Fstab
import six
from .hookenv import log
from .fstab import Fstab
def service_start(service_name):
@ -52,7 +55,9 @@ def service(action, service_name):
def service_running(service):
"""Determine whether a system service is running"""
try:
output = subprocess.check_output(['service', service, 'status'])
output = subprocess.check_output(
['service', service, 'status'],
stderr=subprocess.STDOUT).decode('UTF-8')
except subprocess.CalledProcessError:
return False
else:
@ -62,6 +67,18 @@ def service_running(service):
return False
def service_available(service_name):
"""Determine whether a system service is available"""
try:
subprocess.check_output(
['service', service_name, 'status'],
stderr=subprocess.STDOUT).decode('UTF-8')
except subprocess.CalledProcessError as e:
return 'unrecognized service' not in e.output
else:
return True
def adduser(username, password=None, shell='/bin/bash', system_user=False):
"""Add a user to the system"""
try:
@ -84,6 +101,26 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False):
return user_info
def add_group(group_name, system_group=False):
"""Add a group to the system"""
try:
group_info = grp.getgrnam(group_name)
log('group {0} already exists!'.format(group_name))
except KeyError:
log('creating group {0}'.format(group_name))
cmd = ['addgroup']
if system_group:
cmd.append('--system')
else:
cmd.extend([
'--group',
])
cmd.append(group_name)
subprocess.check_call(cmd)
group_info = grp.getgrnam(group_name)
return group_info
def add_user_to_group(username, group):
"""Add a user to a group"""
cmd = [
@ -103,7 +140,7 @@ def rsync(from_path, to_path, flags='-r', options=None):
cmd.append(from_path)
cmd.append(to_path)
log(" ".join(cmd))
return subprocess.check_output(cmd).strip()
return subprocess.check_output(cmd).decode('UTF-8').strip()
def symlink(source, destination):
@ -118,23 +155,26 @@ def symlink(source, destination):
subprocess.check_call(cmd)
def mkdir(path, owner='root', group='root', perms=0555, force=False):
def mkdir(path, owner='root', group='root', perms=0o555, force=False):
"""Create a directory"""
log("Making dir {} {}:{} {:o}".format(path, owner, group,
perms))
uid = pwd.getpwnam(owner).pw_uid
gid = grp.getgrnam(group).gr_gid
realpath = os.path.abspath(path)
if os.path.exists(realpath):
if force and not os.path.isdir(realpath):
path_exists = os.path.exists(realpath)
if path_exists and force:
if not os.path.isdir(realpath):
log("Removing non-directory file {} prior to mkdir()".format(path))
os.unlink(realpath)
else:
os.makedirs(realpath, perms)
os.chown(realpath, uid, gid)
elif not path_exists:
os.makedirs(realpath, perms)
os.chown(realpath, uid, gid)
os.chown(realpath, uid, gid)
def write_file(path, content, owner='root', group='root', perms=0444):
def write_file(path, content, owner='root', group='root', perms=0o444):
"""Create or overwrite a file with the contents of a string"""
log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
uid = pwd.getpwnam(owner).pw_uid
@ -165,7 +205,7 @@ def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
cmd_args.extend([device, mountpoint])
try:
subprocess.check_output(cmd_args)
except subprocess.CalledProcessError, e:
except subprocess.CalledProcessError as e:
log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
return False
@ -179,7 +219,7 @@ def umount(mountpoint, persist=False):
cmd_args = ['umount', mountpoint]
try:
subprocess.check_output(cmd_args)
except subprocess.CalledProcessError, e:
except subprocess.CalledProcessError as e:
log('Error unmounting {}\n{}'.format(mountpoint, e.output))
return False
@ -197,17 +237,42 @@ def mounts():
return system_mounts
def file_hash(path):
"""Generate a md5 hash of the contents of 'path' or None if not found """
def file_hash(path, hash_type='md5'):
"""
Generate a hash checksum of the contents of 'path' or None if not found.
:param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
such as md5, sha1, sha256, sha512, etc.
"""
if os.path.exists(path):
h = hashlib.md5()
with open(path, 'r') as source:
h.update(source.read()) # IGNORE:E1101 - it does have update
h = getattr(hashlib, hash_type)()
with open(path, 'rb') as source:
h.update(source.read())
return h.hexdigest()
else:
return None
def check_hash(path, checksum, hash_type='md5'):
"""
Validate a file using a cryptographic checksum.
:param str checksum: Value of the checksum used to validate the file.
:param str hash_type: Hash algorithm used to generate `checksum`.
Can be any hash alrgorithm supported by :mod:`hashlib`,
such as md5, sha1, sha256, sha512, etc.
:raises ChecksumError: If the file fails the checksum
"""
actual_checksum = file_hash(path, hash_type)
if checksum != actual_checksum:
raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
class ChecksumError(ValueError):
pass
def restart_on_change(restart_map, stopstart=False):
"""Restart services based on configuration files changing
@ -260,7 +325,7 @@ def pwgen(length=None):
if length is None:
length = random.choice(range(35, 45))
alphanumeric_chars = [
l for l in (string.letters + string.digits)
l for l in (string.ascii_letters + string.digits)
if l not in 'l0QD1vAEIOUaeiou']
random_chars = [
random.choice(alphanumeric_chars) for _ in range(length)]
@ -269,18 +334,24 @@ def pwgen(length=None):
def list_nics(nic_type):
'''Return a list of nics of given type(s)'''
if isinstance(nic_type, basestring):
if isinstance(nic_type, six.string_types):
int_types = [nic_type]
else:
int_types = nic_type
interfaces = []
for int_type in int_types:
cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
ip_output = subprocess.check_output(cmd).split('\n')
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
ip_output = (line for line in ip_output if line)
for line in ip_output:
if line.split()[1].startswith(int_type):
interfaces.append(line.split()[1].replace(":", ""))
matched = re.search('.*: (bond[0-9]+\.[0-9]+)@.*', line)
if matched:
interface = matched.groups()[0]
else:
interface = line.split()[1].replace(":", "")
interfaces.append(interface)
return interfaces
@ -292,7 +363,7 @@ def set_nic_mtu(nic, mtu):
def get_nic_mtu(nic):
cmd = ['ip', 'addr', 'show', nic]
ip_output = subprocess.check_output(cmd).split('\n')
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
mtu = ""
for line in ip_output:
words = line.split()
@ -303,7 +374,7 @@ def get_nic_mtu(nic):
def get_nic_hwaddr(nic):
cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
ip_output = subprocess.check_output(cmd)
ip_output = subprocess.check_output(cmd).decode('UTF-8')
hwaddr = ""
words = ip_output.split()
if 'link/ether' in words:
@ -321,11 +392,28 @@ def cmp_pkgrevno(package, revno, pkgcache=None):
'''
import apt_pkg
if not pkgcache:
apt_pkg.init()
# Force Apt to build its cache in memory. That way we avoid race
# conditions with other applications building the cache in the same
# place.
apt_pkg.config.set("Dir::Cache::pkgcache", "")
pkgcache = apt_pkg.Cache()
from charmhelpers.fetch import apt_cache
pkgcache = apt_cache()
pkg = pkgcache[package]
return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
@contextmanager
def chdir(d):
cur = os.getcwd()
try:
yield os.chdir(d)
finally:
os.chdir(cur)
def chownr(path, owner, group):
uid = pwd.getpwnam(owner).pw_uid
gid = grp.getgrnam(group).gr_gid
for root, dirs, files in os.walk(path):
for name in dirs + files:
full = os.path.join(root, name)
broken_symlink = os.path.lexists(full) and not os.path.exists(full)
if not broken_symlink:
os.chown(full, uid, gid)

View File

@ -0,0 +1,2 @@
from .base import * # NOQA
from .helpers import * # NOQA

View File

@ -0,0 +1,313 @@
import os
import re
import json
from collections import Iterable
from charmhelpers.core import host
from charmhelpers.core import hookenv
__all__ = ['ServiceManager', 'ManagerCallback',
'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
'service_restart', 'service_stop']
class ServiceManager(object):
def __init__(self, services=None):
"""
Register a list of services, given their definitions.
Service definitions are dicts in the following formats (all keys except
'service' are optional)::
{
"service": <service name>,
"required_data": <list of required data contexts>,
"provided_data": <list of provided data contexts>,
"data_ready": <one or more callbacks>,
"data_lost": <one or more callbacks>,
"start": <one or more callbacks>,
"stop": <one or more callbacks>,
"ports": <list of ports to manage>,
}
The 'required_data' list should contain dicts of required data (or
dependency managers that act like dicts and know how to collect the data).
Only when all items in the 'required_data' list are populated are the list
of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
information.
The 'provided_data' list should contain relation data providers, most likely
a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`,
that will indicate a set of data to set on a given relation.
The 'data_ready' value should be either a single callback, or a list of
callbacks, to be called when all items in 'required_data' pass `is_ready()`.
Each callback will be called with the service name as the only parameter.
After all of the 'data_ready' callbacks are called, the 'start' callbacks
are fired.
The 'data_lost' value should be either a single callback, or a list of
callbacks, to be called when a 'required_data' item no longer passes
`is_ready()`. Each callback will be called with the service name as the
only parameter. After all of the 'data_lost' callbacks are called,
the 'stop' callbacks are fired.
The 'start' value should be either a single callback, or a list of
callbacks, to be called when starting the service, after the 'data_ready'
callbacks are complete. Each callback will be called with the service
name as the only parameter. This defaults to
`[host.service_start, services.open_ports]`.
The 'stop' value should be either a single callback, or a list of
callbacks, to be called when stopping the service. If the service is
being stopped because it no longer has all of its 'required_data', this
will be called after all of the 'data_lost' callbacks are complete.
Each callback will be called with the service name as the only parameter.
This defaults to `[services.close_ports, host.service_stop]`.
The 'ports' value should be a list of ports to manage. The default
'start' handler will open the ports after the service is started,
and the default 'stop' handler will close the ports prior to stopping
the service.
Examples:
The following registers an Upstart service called bingod that depends on
a mongodb relation and which runs a custom `db_migrate` function prior to
restarting the service, and a Runit service called spadesd::
manager = services.ServiceManager([
{
'service': 'bingod',
'ports': [80, 443],
'required_data': [MongoRelation(), config(), {'my': 'data'}],
'data_ready': [
services.template(source='bingod.conf'),
services.template(source='bingod.ini',
target='/etc/bingod.ini',
owner='bingo', perms=0400),
],
},
{
'service': 'spadesd',
'data_ready': services.template(source='spadesd_run.j2',
target='/etc/sv/spadesd/run',
perms=0555),
'start': runit_start,
'stop': runit_stop,
},
])
manager.manage()
"""
self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
self._ready = None
self.services = {}
for service in services or []:
service_name = service['service']
self.services[service_name] = service
def manage(self):
"""
Handle the current hook by doing The Right Thing with the registered services.
"""
hook_name = hookenv.hook_name()
if hook_name == 'stop':
self.stop_services()
else:
self.provide_data()
self.reconfigure_services()
cfg = hookenv.config()
if cfg.implicit_save:
cfg.save()
def provide_data(self):
"""
Set the relation data for each provider in the ``provided_data`` list.
A provider must have a `name` attribute, which indicates which relation
to set data on, and a `provide_data()` method, which returns a dict of
data to set.
"""
hook_name = hookenv.hook_name()
for service in self.services.values():
for provider in service.get('provided_data', []):
if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name):
data = provider.provide_data()
_ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data
if _ready:
hookenv.relation_set(None, data)
def reconfigure_services(self, *service_names):
"""
Update all files for one or more registered services, and,
if ready, optionally restart them.
If no service names are given, reconfigures all registered services.
"""
for service_name in service_names or self.services.keys():
if self.is_ready(service_name):
self.fire_event('data_ready', service_name)
self.fire_event('start', service_name, default=[
service_restart,
manage_ports])
self.save_ready(service_name)
else:
if self.was_ready(service_name):
self.fire_event('data_lost', service_name)
self.fire_event('stop', service_name, default=[
manage_ports,
service_stop])
self.save_lost(service_name)
def stop_services(self, *service_names):
"""
Stop one or more registered services, by name.
If no service names are given, stops all registered services.
"""
for service_name in service_names or self.services.keys():
self.fire_event('stop', service_name, default=[
manage_ports,
service_stop])
def get_service(self, service_name):
"""
Given the name of a registered service, return its service definition.
"""
service = self.services.get(service_name)
if not service:
raise KeyError('Service not registered: %s' % service_name)
return service
def fire_event(self, event_name, service_name, default=None):
"""
Fire a data_ready, data_lost, start, or stop event on a given service.
"""
service = self.get_service(service_name)
callbacks = service.get(event_name, default)
if not callbacks:
return
if not isinstance(callbacks, Iterable):
callbacks = [callbacks]
for callback in callbacks:
if isinstance(callback, ManagerCallback):
callback(self, service_name, event_name)
else:
callback(service_name)
def is_ready(self, service_name):
"""
Determine if a registered service is ready, by checking its 'required_data'.
A 'required_data' item can be any mapping type, and is considered ready
if `bool(item)` evaluates as True.
"""
service = self.get_service(service_name)
reqs = service.get('required_data', [])
return all(bool(req) for req in reqs)
def _load_ready_file(self):
if self._ready is not None:
return
if os.path.exists(self._ready_file):
with open(self._ready_file) as fp:
self._ready = set(json.load(fp))
else:
self._ready = set()
def _save_ready_file(self):
if self._ready is None:
return
with open(self._ready_file, 'w') as fp:
json.dump(list(self._ready), fp)
def save_ready(self, service_name):
"""
Save an indicator that the given service is now data_ready.
"""
self._load_ready_file()
self._ready.add(service_name)
self._save_ready_file()
def save_lost(self, service_name):
"""
Save an indicator that the given service is no longer data_ready.
"""
self._load_ready_file()
self._ready.discard(service_name)
self._save_ready_file()
def was_ready(self, service_name):
"""
Determine if the given service was previously data_ready.
"""
self._load_ready_file()
return service_name in self._ready
class ManagerCallback(object):
"""
Special case of a callback that takes the `ServiceManager` instance
in addition to the service name.
Subclasses should implement `__call__` which should accept three parameters:
* `manager` The `ServiceManager` instance
* `service_name` The name of the service it's being triggered for
* `event_name` The name of the event that this callback is handling
"""
def __call__(self, manager, service_name, event_name):
raise NotImplementedError()
class PortManagerCallback(ManagerCallback):
"""
Callback class that will open or close ports, for use as either
a start or stop action.
"""
def __call__(self, manager, service_name, event_name):
service = manager.get_service(service_name)
new_ports = service.get('ports', [])
port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
if os.path.exists(port_file):
with open(port_file) as fp:
old_ports = fp.read().split(',')
for old_port in old_ports:
if bool(old_port):
old_port = int(old_port)
if old_port not in new_ports:
hookenv.close_port(old_port)
with open(port_file, 'w') as fp:
fp.write(','.join(str(port) for port in new_ports))
for port in new_ports:
if event_name == 'start':
hookenv.open_port(port)
elif event_name == 'stop':
hookenv.close_port(port)
def service_stop(service_name):
"""
Wrapper around host.service_stop to prevent spurious "unknown service"
messages in the logs.
"""
if host.service_running(service_name):
host.service_stop(service_name)
def service_restart(service_name):
"""
Wrapper around host.service_restart to prevent spurious "unknown service"
messages in the logs.
"""
if host.service_available(service_name):
if host.service_running(service_name):
host.service_restart(service_name)
else:
host.service_start(service_name)
# Convenience aliases
open_ports = close_ports = manage_ports = PortManagerCallback()

View File

@ -0,0 +1,243 @@
import os
import yaml
from charmhelpers.core import hookenv
from charmhelpers.core import templating
from charmhelpers.core.services.base import ManagerCallback
__all__ = ['RelationContext', 'TemplateCallback',
'render_template', 'template']
class RelationContext(dict):
"""
Base class for a context generator that gets relation data from juju.
Subclasses must provide the attributes `name`, which is the name of the
interface of interest, `interface`, which is the type of the interface of
interest, and `required_keys`, which is the set of keys required for the
relation to be considered complete. The data for all interfaces matching
the `name` attribute that are complete will used to populate the dictionary
values (see `get_data`, below).
The generated context will be namespaced under the relation :attr:`name`,
to prevent potential naming conflicts.
:param str name: Override the relation :attr:`name`, since it can vary from charm to charm
:param list additional_required_keys: Extend the list of :attr:`required_keys`
"""
name = None
interface = None
required_keys = []
def __init__(self, name=None, additional_required_keys=None):
if name is not None:
self.name = name
if additional_required_keys is not None:
self.required_keys.extend(additional_required_keys)
self.get_data()
def __bool__(self):
"""
Returns True if all of the required_keys are available.
"""
return self.is_ready()
__nonzero__ = __bool__
def __repr__(self):
return super(RelationContext, self).__repr__()
def is_ready(self):
"""
Returns True if all of the `required_keys` are available from any units.
"""
ready = len(self.get(self.name, [])) > 0
if not ready:
hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
return ready
def _is_ready(self, unit_data):
"""
Helper method that tests a set of relation data and returns True if
all of the `required_keys` are present.
"""
return set(unit_data.keys()).issuperset(set(self.required_keys))
def get_data(self):
"""
Retrieve the relation data for each unit involved in a relation and,
if complete, store it in a list under `self[self.name]`. This
is automatically called when the RelationContext is instantiated.
The units are sorted lexographically first by the service ID, then by
the unit ID. Thus, if an interface has two other services, 'db:1'
and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
set of data, the relation data for the units will be stored in the
order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
If you only care about a single unit on the relation, you can just
access it as `{{ interface[0]['key'] }}`. However, if you can at all
support multiple units on a relation, you should iterate over the list,
like::
{% for unit in interface -%}
{{ unit['key'] }}{% if not loop.last %},{% endif %}
{%- endfor %}
Note that since all sets of relation data from all related services and
units are in a single list, if you need to know which service or unit a
set of data came from, you'll need to extend this class to preserve
that information.
"""
if not hookenv.relation_ids(self.name):
return
ns = self.setdefault(self.name, [])
for rid in sorted(hookenv.relation_ids(self.name)):
for unit in sorted(hookenv.related_units(rid)):
reldata = hookenv.relation_get(rid=rid, unit=unit)
if self._is_ready(reldata):
ns.append(reldata)
def provide_data(self):
"""
Return data to be relation_set for this interface.
"""
return {}
class MysqlRelation(RelationContext):
"""
Relation context for the `mysql` interface.
:param str name: Override the relation :attr:`name`, since it can vary from charm to charm
:param list additional_required_keys: Extend the list of :attr:`required_keys`
"""
name = 'db'
interface = 'mysql'
required_keys = ['host', 'user', 'password', 'database']
class HttpRelation(RelationContext):
"""
Relation context for the `http` interface.
:param str name: Override the relation :attr:`name`, since it can vary from charm to charm
:param list additional_required_keys: Extend the list of :attr:`required_keys`
"""
name = 'website'
interface = 'http'
required_keys = ['host', 'port']
def provide_data(self):
return {
'host': hookenv.unit_get('private-address'),
'port': 80,
}
class RequiredConfig(dict):
"""
Data context that loads config options with one or more mandatory options.
Once the required options have been changed from their default values, all
config options will be available, namespaced under `config` to prevent
potential naming conflicts (for example, between a config option and a
relation property).
:param list *args: List of options that must be changed from their default values.
"""
def __init__(self, *args):
self.required_options = args
self['config'] = hookenv.config()
with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
self.config = yaml.load(fp).get('options', {})
def __bool__(self):
for option in self.required_options:
if option not in self['config']:
return False
current_value = self['config'][option]
default_value = self.config[option].get('default')
if current_value == default_value:
return False
if current_value in (None, '') and default_value in (None, ''):
return False
return True
def __nonzero__(self):
return self.__bool__()
class StoredContext(dict):
"""
A data context that always returns the data that it was first created with.
This is useful to do a one-time generation of things like passwords, that
will thereafter use the same value that was originally generated, instead
of generating a new value each time it is run.
"""
def __init__(self, file_name, config_data):
"""
If the file exists, populate `self` with the data from the file.
Otherwise, populate with the given data and persist it to the file.
"""
if os.path.exists(file_name):
self.update(self.read_context(file_name))
else:
self.store_context(file_name, config_data)
self.update(config_data)
def store_context(self, file_name, config_data):
if not os.path.isabs(file_name):
file_name = os.path.join(hookenv.charm_dir(), file_name)
with open(file_name, 'w') as file_stream:
os.fchmod(file_stream.fileno(), 0o600)
yaml.dump(config_data, file_stream)
def read_context(self, file_name):
if not os.path.isabs(file_name):
file_name = os.path.join(hookenv.charm_dir(), file_name)
with open(file_name, 'r') as file_stream:
data = yaml.load(file_stream)
if not data:
raise OSError("%s is empty" % file_name)
return data
class TemplateCallback(ManagerCallback):
"""
Callback class that will render a Jinja2 template, for use as a ready
action.
:param str source: The template source file, relative to
`$CHARM_DIR/templates`
:param str target: The target to write the rendered template to
:param str owner: The owner of the rendered file
:param str group: The group of the rendered file
:param int perms: The permissions of the rendered file
"""
def __init__(self, source, target,
owner='root', group='root', perms=0o444):
self.source = source
self.target = target
self.owner = owner
self.group = group
self.perms = perms
def __call__(self, manager, service_name, event_name):
service = manager.get_service(service_name)
context = {}
for ctx in service.get('required_data', []):
context.update(ctx)
templating.render(self.source, self.target, context,
self.owner, self.group, self.perms)
# Convenience aliases for templates
render_template = template = TemplateCallback

View File

@ -0,0 +1,34 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
import yaml
from subprocess import check_call
from charmhelpers.core.hookenv import (
log,
DEBUG,
)
def create(sysctl_dict, sysctl_file):
"""Creates a sysctl.conf file from a YAML associative array
:param sysctl_dict: a dict of sysctl options eg { 'kernel.max_pid': 1337 }
:type sysctl_dict: dict
:param sysctl_file: path to the sysctl file to be saved
:type sysctl_file: str or unicode
:returns: None
"""
sysctl_dict = yaml.load(sysctl_dict)
with open(sysctl_file, "w") as fd:
for key, value in sysctl_dict.items():
fd.write("{}={}\n".format(key, value))
log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict),
level=DEBUG)
check_call(["sysctl", "-p", sysctl_file])

View File

@ -0,0 +1,52 @@
import os
from charmhelpers.core import host
from charmhelpers.core import hookenv
def render(source, target, context, owner='root', group='root',
perms=0o444, templates_dir=None):
"""
Render a template.
The `source` path, if not absolute, is relative to the `templates_dir`.
The `target` path should be absolute.
The context should be a dict containing the values to be replaced in the
template.
The `owner`, `group`, and `perms` options will be passed to `write_file`.
If omitted, `templates_dir` defaults to the `templates` folder in the charm.
Note: Using this requires python-jinja2; if it is not installed, calling
this will attempt to use charmhelpers.fetch.apt_install to install it.
"""
try:
from jinja2 import FileSystemLoader, Environment, exceptions
except ImportError:
try:
from charmhelpers.fetch import apt_install
except ImportError:
hookenv.log('Could not import jinja2, and could not import '
'charmhelpers.fetch to install it',
level=hookenv.ERROR)
raise
apt_install('python-jinja2', fatal=True)
from jinja2 import FileSystemLoader, Environment, exceptions
if templates_dir is None:
templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
loader = Environment(loader=FileSystemLoader(templates_dir))
try:
source = source
template = loader.get_template(source)
except exceptions.TemplateNotFound as e:
hookenv.log('Could not load template %s from %s.' %
(source, templates_dir),
level=hookenv.ERROR)
raise e
content = template.render(context)
host.mkdir(os.path.dirname(target), owner, group)
host.write_file(target, content, owner, group, perms)

View File

@ -1,13 +1,10 @@
import importlib
from tempfile import NamedTemporaryFile
import time
from yaml import safe_load
from charmhelpers.core.host import (
lsb_release
)
from urlparse import (
urlparse,
urlunparse,
)
import subprocess
from charmhelpers.core.hookenv import (
config,
@ -15,6 +12,12 @@ from charmhelpers.core.hookenv import (
)
import os
import six
if six.PY3:
from urllib.parse import urlparse, urlunparse
else:
from urlparse import urlparse, urlunparse
CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
@ -71,6 +74,7 @@ CLOUD_ARCHIVE_POCKETS = {
FETCH_HANDLERS = (
'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
'charmhelpers.fetch.giturl.GitUrlFetchHandler',
)
APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
@ -116,14 +120,7 @@ class BaseFetchHandler(object):
def filter_installed_packages(packages):
"""Returns a list of packages that require installation"""
import apt_pkg
apt_pkg.init()
# Tell apt to build an in-memory cache to prevent race conditions (if
# another process is already building the cache).
apt_pkg.config.set("Dir::Cache::pkgcache", "")
cache = apt_pkg.Cache()
cache = apt_cache()
_pkgs = []
for package in packages:
try:
@ -136,6 +133,16 @@ def filter_installed_packages(packages):
return _pkgs
def apt_cache(in_memory=True):
"""Build and return an apt cache"""
import apt_pkg
apt_pkg.init()
if in_memory:
apt_pkg.config.set("Dir::Cache::pkgcache", "")
apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
return apt_pkg.Cache()
def apt_install(packages, options=None, fatal=False):
"""Install one or more packages"""
if options is None:
@ -144,7 +151,7 @@ def apt_install(packages, options=None, fatal=False):
cmd = ['apt-get', '--assume-yes']
cmd.extend(options)
cmd.append('install')
if isinstance(packages, basestring):
if isinstance(packages, six.string_types):
cmd.append(packages)
else:
cmd.extend(packages)
@ -177,7 +184,7 @@ def apt_update(fatal=False):
def apt_purge(packages, fatal=False):
"""Purge one or more packages"""
cmd = ['apt-get', '--assume-yes', 'purge']
if isinstance(packages, basestring):
if isinstance(packages, six.string_types):
cmd.append(packages)
else:
cmd.extend(packages)
@ -188,7 +195,7 @@ def apt_purge(packages, fatal=False):
def apt_hold(packages, fatal=False):
"""Hold one or more packages"""
cmd = ['apt-mark', 'hold']
if isinstance(packages, basestring):
if isinstance(packages, six.string_types):
cmd.append(packages)
else:
cmd.extend(packages)
@ -201,6 +208,29 @@ def apt_hold(packages, fatal=False):
def add_source(source, key=None):
"""Add a package source to this system.
@param source: a URL or sources.list entry, as supported by
add-apt-repository(1). Examples::
ppa:charmers/example
deb https://stub:key@private.example.com/ubuntu trusty main
In addition:
'proposed:' may be used to enable the standard 'proposed'
pocket for the release.
'cloud:' may be used to activate official cloud archive pockets,
such as 'cloud:icehouse'
'distro' may be used as a noop
@param key: A key to be added to the system's APT keyring and used
to verify the signatures on packages. Ideally, this should be an
ASCII format GPG public key including the block headers. A GPG key
id may also be used, but be aware that only insecure protocols are
available to retrieve the actual public key from a public keyserver
placing your Juju environment at risk. ppa and cloud archive keys
are securely added automtically, so sould not be provided.
"""
if source is None:
log('Source is not present. Skipping')
return
@ -225,10 +255,25 @@ def add_source(source, key=None):
release = lsb_release()['DISTRIB_CODENAME']
with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
apt.write(PROPOSED_POCKET.format(release))
elif source == 'distro':
pass
else:
log("Unknown source: {!r}".format(source))
if key:
subprocess.check_call(['apt-key', 'adv', '--keyserver',
'hkp://keyserver.ubuntu.com:80', '--recv',
key])
if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
with NamedTemporaryFile('w+') as key_file:
key_file.write(key)
key_file.flush()
key_file.seek(0)
subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file)
else:
# Note that hkp: is in no way a secure protocol. Using a
# GPG key id is pointless from a security POV unless you
# absolutely trust your network and DNS.
subprocess.check_call(['apt-key', 'adv', '--keyserver',
'hkp://keyserver.ubuntu.com:80', '--recv',
key])
def configure_sources(update=False,
@ -238,7 +283,8 @@ def configure_sources(update=False,
Configure multiple sources from charm configuration.
The lists are encoded as yaml fragments in the configuration.
The frament needs to be included as a string.
The frament needs to be included as a string. Sources and their
corresponding keys are of the types supported by add_source().
Example config:
install_sources: |
@ -253,14 +299,14 @@ def configure_sources(update=False,
sources = safe_load((config(sources_var) or '').strip()) or []
keys = safe_load((config(keys_var) or '').strip()) or None
if isinstance(sources, basestring):
if isinstance(sources, six.string_types):
sources = [sources]
if keys is None:
for source in sources:
add_source(source, None)
else:
if isinstance(keys, basestring):
if isinstance(keys, six.string_types):
keys = [keys]
if len(sources) != len(keys):
@ -272,22 +318,35 @@ def configure_sources(update=False,
apt_update(fatal=True)
def install_remote(source):
def install_remote(source, *args, **kwargs):
"""
Install a file tree from a remote source
The specified source should be a url of the form:
scheme://[host]/path[#[option=value][&...]]
Schemes supported are based on this modules submodules
Options supported are submodule-specific"""
Schemes supported are based on this modules submodules.
Options supported are submodule-specific.
Additional arguments are passed through to the submodule.
For example::
dest = install_remote('http://example.com/archive.tgz',
checksum='deadbeef',
hash_type='sha1')
This will download `archive.tgz`, validate it using SHA1 and, if
the file is ok, extract it and return the directory in which it
was extracted. If the checksum fails, it will raise
:class:`charmhelpers.core.host.ChecksumError`.
"""
# We ONLY check for True here because can_handle may return a string
# explaining why it can't handle a given source.
handlers = [h for h in plugins() if h.can_handle(source) is True]
installed_to = None
for handler in handlers:
try:
installed_to = handler.install(source)
installed_to = handler.install(source, *args, **kwargs)
except UnhandledSource:
pass
if not installed_to:
@ -344,7 +403,7 @@ def _run_apt_command(cmd, fatal=False):
while result is None or result == APT_NO_LOCK:
try:
result = subprocess.check_call(cmd, env=env)
except subprocess.CalledProcessError, e:
except subprocess.CalledProcessError as e:
retry_count = retry_count + 1
if retry_count > APT_NO_LOCK_RETRY_COUNT:
raise

View File

@ -1,6 +1,23 @@
import os
import urllib2
import urlparse
import hashlib
import re
import six
if six.PY3:
from urllib.request import (
build_opener, install_opener, urlopen, urlretrieve,
HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
)
from urllib.parse import urlparse, urlunparse, parse_qs
from urllib.error import URLError
else:
from urllib import urlretrieve
from urllib2 import (
build_opener, install_opener, urlopen,
HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
URLError
)
from urlparse import urlparse, urlunparse, parse_qs
from charmhelpers.fetch import (
BaseFetchHandler,
@ -10,11 +27,37 @@ from charmhelpers.payload.archive import (
get_archive_handler,
extract,
)
from charmhelpers.core.host import mkdir
from charmhelpers.core.host import mkdir, check_hash
def splituser(host):
'''urllib.splituser(), but six's support of this seems broken'''
_userprog = re.compile('^(.*)@(.*)$')
match = _userprog.match(host)
if match:
return match.group(1, 2)
return None, host
def splitpasswd(user):
'''urllib.splitpasswd(), but six's support of this is missing'''
_passwdprog = re.compile('^([^:]*):(.*)$', re.S)
match = _passwdprog.match(user)
if match:
return match.group(1, 2)
return user, None
class ArchiveUrlFetchHandler(BaseFetchHandler):
"""Handler for archives via generic URLs"""
"""
Handler to download archive files from arbitrary URLs.
Can fetch from http, https, ftp, and file URLs.
Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files.
Installs the contents of the archive in $CHARM_DIR/fetched/.
"""
def can_handle(self, source):
url_parts = self.parse_url(source)
if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
@ -24,22 +67,28 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
return False
def download(self, source, dest):
"""
Download an archive file.
:param str source: URL pointing to an archive file.
:param str dest: Local path location to download archive file to.
"""
# propogate all exceptions
# URLError, OSError, etc
proto, netloc, path, params, query, fragment = urlparse.urlparse(source)
proto, netloc, path, params, query, fragment = urlparse(source)
if proto in ('http', 'https'):
auth, barehost = urllib2.splituser(netloc)
auth, barehost = splituser(netloc)
if auth is not None:
source = urlparse.urlunparse((proto, barehost, path, params, query, fragment))
username, password = urllib2.splitpasswd(auth)
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
source = urlunparse((proto, barehost, path, params, query, fragment))
username, password = splitpasswd(auth)
passman = HTTPPasswordMgrWithDefaultRealm()
# Realm is set to None in add_password to force the username and password
# to be used whatever the realm
passman.add_password(None, source, username, password)
authhandler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(authhandler)
urllib2.install_opener(opener)
response = urllib2.urlopen(source)
authhandler = HTTPBasicAuthHandler(passman)
opener = build_opener(authhandler)
install_opener(opener)
response = urlopen(source)
try:
with open(dest, 'w') as dest_file:
dest_file.write(response.read())
@ -48,16 +97,49 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
os.unlink(dest)
raise e
def install(self, source):
# Mandatory file validation via Sha1 or MD5 hashing.
def download_and_validate(self, url, hashsum, validate="sha1"):
tempfile, headers = urlretrieve(url)
check_hash(tempfile, hashsum, validate)
return tempfile
def install(self, source, dest=None, checksum=None, hash_type='sha1'):
"""
Download and install an archive file, with optional checksum validation.
The checksum can also be given on the `source` URL's fragment.
For example::
handler.install('http://example.com/file.tgz#sha1=deadbeef')
:param str source: URL pointing to an archive file.
:param str dest: Local destination path to install to. If not given,
installs to `$CHARM_DIR/archives/archive_file_name`.
:param str checksum: If given, validate the archive file after download.
:param str hash_type: Algorithm used to generate `checksum`.
Can be any hash alrgorithm supported by :mod:`hashlib`,
such as md5, sha1, sha256, sha512, etc.
"""
url_parts = self.parse_url(source)
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
if not os.path.exists(dest_dir):
mkdir(dest_dir, perms=0755)
mkdir(dest_dir, perms=0o755)
dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
try:
self.download(source, dld_file)
except urllib2.URLError as e:
except URLError as e:
raise UnhandledSource(e.reason)
except OSError as e:
raise UnhandledSource(e.strerror)
return extract(dld_file)
options = parse_qs(url_parts.fragment)
for key, value in options.items():
if not six.PY3:
algorithms = hashlib.algorithms
else:
algorithms = hashlib.algorithms_available
if key in algorithms:
check_hash(dld_file, value, key)
if checksum:
check_hash(dld_file, checksum, hash_type)
return extract(dld_file, dest)

View File

@ -5,6 +5,10 @@ from charmhelpers.fetch import (
)
from charmhelpers.core.host import mkdir
import six
if six.PY3:
raise ImportError('bzrlib does not support Python3')
try:
from bzrlib.branch import Branch
except ImportError:
@ -42,7 +46,7 @@ class BzrUrlFetchHandler(BaseFetchHandler):
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
branch_name)
if not os.path.exists(dest_dir):
mkdir(dest_dir, perms=0755)
mkdir(dest_dir, perms=0o755)
try:
self.branch(source, dest_dir)
except OSError as e:

View File

@ -0,0 +1,51 @@
import os
from charmhelpers.fetch import (
BaseFetchHandler,
UnhandledSource
)
from charmhelpers.core.host import mkdir
import six
if six.PY3:
raise ImportError('GitPython does not support Python 3')
try:
from git import Repo
except ImportError:
from charmhelpers.fetch import apt_install
apt_install("python-git")
from git import Repo
class GitUrlFetchHandler(BaseFetchHandler):
"""Handler for git branches via generic and github URLs"""
def can_handle(self, source):
url_parts = self.parse_url(source)
# TODO (mattyw) no support for ssh git@ yet
if url_parts.scheme not in ('http', 'https', 'git'):
return False
else:
return True
def clone(self, source, dest, branch):
if not self.can_handle(source):
raise UnhandledSource("Cannot handle {}".format(source))
repo = Repo.clone_from(source, dest)
repo.git.checkout(branch)
def install(self, source, branch="master", dest=None):
url_parts = self.parse_url(source)
branch_name = url_parts.path.strip("/").split("/")[-1]
if dest:
dest_dir = os.path.join(dest, branch_name)
else:
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
branch_name)
if not os.path.exists(dest_dir):
mkdir(dest_dir, perms=0o755)
try:
self.clone(source, dest_dir, branch)
except OSError as e:
raise UnhandledSource(e.strerror)
return dest_dir

View File

@ -13,28 +13,11 @@ import grp
import os
import pwd
from charmhelpers.fetch import (
apt_install
)
from charmhelpers.core.hookenv import (
local_unit,
remote_unit,
log
)
TEMPLATES_DIR = 'templates'
try:
import jinja2
except ImportError:
apt_install('python-jinja2', fatal=True)
import jinja2
def render_template(template_name, context, template_dir=TEMPLATES_DIR):
templates = \
jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir))
template = templates.get_template(template_name)
return template.render(context)
def is_newer():

View File

@ -2,11 +2,11 @@ import os
import pwd
import grp
import re
import socket
import sys
import subprocess
import glob
from lib.utils import render_template
import apt_pkg as apt
import tempfile
from charmhelpers.contrib.openstack.utils import (
get_hostname,
@ -18,40 +18,79 @@ from charmhelpers.core.hookenv import (
relation_get,
related_units,
log, ERROR,
INFO,
service_name
)
from charmhelpers.core.host import pwgen, mkdir, write_file
from charmhelpers.core.host import (
pwgen,
mkdir,
write_file,
lsb_release,
cmp_pkgrevno
)
from charmhelpers.core.templating import render
from charmhelpers.contrib.peerstorage import (
peer_store,
peer_retrieve
)
from collections import OrderedDict
PACKAGES = ['rabbitmq-server', 'python-amqplib']
RABBITMQ_CTL = '/usr/sbin/rabbitmqctl'
COOKIE_PATH = '/var/lib/rabbitmq/.erlang.cookie'
ENV_CONF = '/etc/rabbitmq/rabbitmq-env.conf'
RABBITMQ_CONF = '/etc/rabbitmq/rabbitmq.config'
ENABLED_PLUGINS = '/etc/rabbitmq/enabled_plugins'
RABBIT_USER = 'rabbitmq'
LIB_PATH = '/var/lib/rabbitmq/'
HOSTS_FILE = '/etc/hosts'
_named_passwd = '/var/lib/charm/{}/{}.passwd'
# hook_contexts are used as a convenient mechanism to render templates
# logically, consider building a hook_context for template rendering so
# the charm doesn't concern itself with template specifics etc.
CONFIG_FILES = OrderedDict([
(RABBITMQ_CONF, {
'hook_contexts': None,
'services': ['rabbitmq-server']
}),
(ENV_CONF, {
'hook_contexts': None,
'services': ['rabbitmq-server']
}),
(ENABLED_PLUGINS, {
'hook_contexts': None,
'services': ['rabbitmq-server']
}),
])
class RabbitmqError(Exception):
pass
def list_vhosts():
"""
Returns a list of all the available vhosts
"""
try:
output = subprocess.check_output([RABBITMQ_CTL, 'list_vhosts'])
return output.split('\n')[1:-2]
except Exception as ex:
# if no vhosts, just raises an exception
log(str(ex), level='DEBUG')
return []
def vhost_exists(vhost):
try:
cmd = [RABBITMQ_CTL, 'list_vhosts']
out = subprocess.check_output(cmd)
for line in out.split('\n')[1:]:
if line == vhost:
log('vhost (%s) already exists.' % vhost)
return True
return False
except:
# if no vhosts, just raises an exception
return False
return vhost in list_vhosts()
def create_vhost(vhost):
@ -86,10 +125,11 @@ def create_user(user, password, admin=False):
if admin:
cmd = [RABBITMQ_CTL, 'set_user_tags', user, 'administrator']
log('Granting user (%s) admin access.')
log('Granting user (%s) admin access.' % user)
else:
cmd = [RABBITMQ_CTL, 'set_user_tags', user]
log('Revoking user (%s) admin access.')
log('Revoking user (%s) admin access.' % user)
subprocess.check_call(cmd)
def grant_permissions(user, vhost):
@ -98,26 +138,101 @@ def grant_permissions(user, vhost):
subprocess.check_call(cmd)
def set_policy(vhost, policy_name, match, value):
cmd = [RABBITMQ_CTL, 'set_policy', '-p', vhost,
policy_name, match, value]
log("setting policy: %s" % str(cmd), level='DEBUG')
subprocess.check_call(cmd)
def set_ha_mode(vhost, mode, params=None, sync_mode='automatic'):
"""Valid mode values:
* 'all': Queue is mirrored across all nodes in the cluster. When a new
node is added to the cluster, the queue will be mirrored to that node.
* 'exactly': Queue is mirrored to count nodes in the cluster.
* 'nodes': Queue is mirrored to the nodes listed in node names
More details at http://www.rabbitmq.com./ha.html
:param vhost: virtual host name
:param mode: ha mode
:param params: values to pass to the policy, possible values depend on the
mode chosen.
:param sync_mode: when `mode` is 'exactly' this used to indicate how the
sync has to be done
http://www.rabbitmq.com./ha.html#eager-synchronisation
"""
if cmp_pkgrevno('rabbitmq-server', '3.0.0') < 0:
log(("Mirroring queues cannot be enabled, only supported "
"in rabbitmq-server >= 3.0"), level='WARN')
log(("More information at http://www.rabbitmq.com/blog/"
"2012/11/19/breaking-things-with-rabbitmq-3-0"), level='INFO')
return
if mode == 'all':
value = '{"ha-mode": "all"}'
elif mode == 'exactly':
value = '{"ha-mode":"exactly","ha-params":%s,"ha-sync-mode":"%s"}' \
% (params, sync_mode)
elif mode == 'nodes':
value = '{"ha-mode":"nodes","ha-params":[%s]}' % ",".join(params)
else:
raise RabbitmqError(("Unknown mode '%s', known modes: "
"all, exactly, nodes"))
log("Setting HA policy to vhost '%s'" % vhost, level='INFO')
set_policy(vhost, 'HA', '^(?!amq\.).*', value)
def clear_ha_mode(vhost, name='HA', force=False):
"""
Clear policy from the `vhost` by `name`
"""
if cmp_pkgrevno('rabbitmq-server', '3.0.0') < 0:
log(("Mirroring queues not supported "
"in rabbitmq-server >= 3.0"), level='WARN')
log(("More information at http://www.rabbitmq.com/blog/"
"2012/11/19/breaking-things-with-rabbitmq-3-0"), level='INFO')
return
log("Clearing '%s' policy from vhost '%s'" % (name, vhost), level='INFO')
try:
subprocess.check_call([RABBITMQ_CTL, 'clear_policy', '-p', vhost,
name])
except subprocess.CalledProcessError as ex:
if not force:
raise ex
def set_all_mirroring_queues(enable):
"""
:param enable: if True then enable mirroring queue for all the vhosts,
otherwise the HA policy is removed
"""
if cmp_pkgrevno('rabbitmq-server', '3.0.0') < 0:
log(("Mirroring queues not supported "
"in rabbitmq-server >= 3.0"), level='WARN')
log(("More information at http://www.rabbitmq.com/blog/"
"2012/11/19/breaking-things-with-rabbitmq-3-0"), level='INFO')
return
for vhost in list_vhosts():
if enable:
set_ha_mode(vhost, 'all')
else:
clear_ha_mode(vhost, force=True)
def service(action):
cmd = ['service', 'rabbitmq-server', action]
subprocess.check_call(cmd)
def compare_version(base_version):
apt.init()
cache = apt.Cache()
pkg = cache['rabbitmq-server']
if pkg.current_ver:
return apt.version_compare(
apt.upstream_version(pkg.current_ver.ver_str),
base_version)
else:
return False
def cluster_with():
log('Clustering with new node')
if compare_version('3.0.1') >= 0:
if cmp_pkgrevno('rabbitmq-server', '3.0.1') >= 0:
cluster_cmd = 'join_cluster'
else:
cluster_cmd = 'cluster'
@ -141,10 +256,24 @@ def cluster_with():
available_nodes = []
for r_id in relation_ids('cluster'):
for unit in related_units(r_id):
address = relation_get('private-address',
rid=r_id, unit=unit)
if config('prefer-ipv6'):
address = relation_get('hostname',
rid=r_id, unit=unit)
else:
address = relation_get('private-address',
rid=r_id, unit=unit)
if address is not None:
node = get_hostname(address, fqdn=False)
try:
node = get_hostname(address, fqdn=False)
except:
log('Cannot resolve hostname for {} '
'using DNS servers'.format(address), level='WARNING')
log('Falling back to use socket.gethostname()',
level='WARNING')
# If the private-address is not resolvable using DNS
# then use the current hostname
node = socket.gethostname()
available_nodes.append(node)
if len(available_nodes) == 0:
@ -167,10 +296,6 @@ def cluster_with():
cmd = [RABBITMQ_CTL, 'start_app']
subprocess.check_call(cmd)
log('Host clustered with %s.' % node)
if compare_version('3.0.1') >= 0:
cmd = [RABBITMQ_CTL, 'set_policy', 'HA',
'^(?!amq\.).*', '{"ha-mode": "all"}']
subprocess.check_call(cmd)
return True
except:
log('Failed to cluster with %s.' % node)
@ -198,29 +323,40 @@ def break_cluster():
raise
def set_node_name(name):
# update or append RABBITMQ_NODENAME to environment config.
# rabbitmq.conf.d is not present on all releases, so use or create
# rabbitmq-env.conf instead.
if not os.path.isfile(ENV_CONF):
log('%s does not exist, creating.' % ENV_CONF)
with open(ENV_CONF, 'wb') as out:
out.write('RABBITMQ_NODENAME=%s\n' % name)
return
def update_rmq_env_conf(hostname=None, ipv6=False):
"""Update or append environment config.
rabbitmq.conf.d is not present on all releases, so use or create
rabbitmq-env.conf instead.
"""
keyvals = {}
if ipv6:
keyvals['RABBITMQ_SERVER_START_ARGS'] = "'-proto_dist inet6_tcp'"
if hostname:
keyvals['RABBITMQ_NODENAME'] = hostname
out = []
f = False
for line in open(ENV_CONF).readlines():
if line.strip().startswith('RABBITMQ_NODENAME'):
f = True
line = 'RABBITMQ_NODENAME=%s\n' % name
out.append(line)
if not f:
out.append('RABBITMQ_NODENAME=%s\n' % name)
log('Updating %s, RABBITMQ_NODENAME=%s' %
(ENV_CONF, name))
keys_found = []
if os.path.exists(ENV_CONF):
for line in open(ENV_CONF).readlines():
for key, val in keyvals.items():
if line.strip().startswith(key):
keys_found.append(key)
line = '%s=%s' % (key, val)
out.append(line)
for key, val in keyvals.items():
log('Updating %s, %s=%s' % (ENV_CONF, key, val))
if key not in keys_found:
out.append('%s=%s' % (key, val))
with open(ENV_CONF, 'wb') as conf:
conf.write(''.join(out))
conf.write('\n'.join(out))
# Ensure newline at EOF
conf.write('\n')
def get_node_name():
@ -281,9 +417,7 @@ def enable_ssl(ssl_key, ssl_cert, ssl_port,
if ssl_ca:
data["ssl_ca_file"] = ssl_ca_file
with open(RABBITMQ_CONF, 'w') as rmq_conf:
rmq_conf.write(render_template(
os.path.basename(RABBITMQ_CONF), data))
render(os.path.basename(RABBITMQ_CONF), RABBITMQ_CONF, data, perms=0o644)
def execute(cmd, die=False, echo=False):
@ -367,3 +501,72 @@ def get_rabbit_password(username, password=None):
# cluster relation is not yet started, use on-disk
_password = get_rabbit_password_on_disk(username, password)
return _password
def bind_ipv6_interface():
out = "RABBITMQ_SERVER_START_ARGS='-proto_dist inet6_tcp'\n"
with open(ENV_CONF, 'wb') as conf:
conf.write(out)
def update_hosts_file(map):
"""Rabbitmq does not currently like ipv6 addresses so we need to use dns
names instead. In order to make them resolvable we ensure they are in
/etc/hosts.
"""
with open(HOSTS_FILE, 'r') as hosts:
lines = hosts.readlines()
log("Updating hosts file with: %s (current: %s)" % (map, lines),
level=INFO)
newlines = []
for ip, hostname in map.items():
if not ip or not hostname:
continue
keepers = []
for line in lines:
_line = line.split()
if len(line) < 2 or not (_line[0] == ip or hostname in _line[1:]):
keepers.append(line)
else:
log("Removing line '%s' from hosts file" % (line))
lines = keepers
newlines.append("%s %s\n" % (ip, hostname))
lines += newlines
with tempfile.NamedTemporaryFile(delete=False) as tmpfile:
with open(tmpfile.name, 'w') as hosts:
for line in lines:
hosts.write(line)
os.rename(tmpfile.name, HOSTS_FILE)
os.chmod(HOSTS_FILE, 0o644)
def assert_charm_supports_ipv6():
"""Check whether we are able to support charms ipv6."""
if lsb_release()['DISTRIB_CODENAME'].lower() < "trusty":
raise Exception("IPv6 is not supported in the charms for Ubuntu "
"versions less than Trusty 14.04")
def restart_map():
'''Determine the correct resource map to be passed to
charmhelpers.core.restart_on_change() based on the services configured.
:returns: dict: A dictionary mapping config file to lists of services
that should be restarted when file changes.
'''
_map = []
for f, ctxt in CONFIG_FILES.iteritems():
svcs = []
for svc in ctxt['services']:
svcs.append(svc)
if svcs:
_map.append((f, svcs))
return OrderedDict(_map)

View File

@ -5,6 +5,7 @@ import shutil
import sys
import subprocess
import glob
import socket
import rabbit_utils as rabbit
from lib.utils import (
@ -13,7 +14,15 @@ from lib.utils import (
)
from charmhelpers.contrib.hahelpers.cluster import (
is_clustered,
eligible_leader
is_elected_leader
)
from charmhelpers.contrib.openstack.utils import (
get_hostname,
get_host_ip
)
from charmhelpers.contrib.network.ip import (
get_ipv6_addr
)
import charmhelpers.contrib.storage.linux.ceph as ceph
@ -22,11 +31,15 @@ from charmhelpers.contrib.openstack.utils import save_script_rc
from charmhelpers.fetch import (
add_source,
apt_update,
apt_install)
apt_install,
)
from charmhelpers.core.hookenv import (
open_port, close_port,
log, ERROR,
open_port,
close_port,
log,
ERROR,
INFO,
relation_get,
relation_set,
relation_ids,
@ -41,15 +54,21 @@ from charmhelpers.core.hookenv import (
UnregisteredHookError
)
from charmhelpers.core.host import (
rsync, service_stop, service_restart
cmp_pkgrevno,
restart_on_change,
rsync,
service_stop,
service_restart,
)
from charmhelpers.contrib.charmsupport.nrpe import NRPE
from charmhelpers.contrib.ssl.service import ServiceCA
from charmhelpers.contrib.peerstorage import (
peer_echo,
peer_retrieve,
peer_store,
peer_retrieve
peer_store_and_set,
peer_retrieve_by_prefix,
)
from charmhelpers.contrib.network.ip import get_address_in_network
@ -79,44 +98,78 @@ def configure_amqp(username, vhost, admin=False):
rabbit.create_user(username, password, admin)
rabbit.grant_permissions(username, vhost)
# NOTE(freyes): after rabbitmq-server 3.0 the method to define HA in the
# queues is different
# http://www.rabbitmq.com/blog/2012/11/19/breaking-things-with-rabbitmq-3-0
if config('mirroring-queues'):
rabbit.set_ha_mode(vhost, 'all')
return password
@hooks.hook('amqp-relation-changed')
def amqp_changed(relation_id=None, remote_unit=None):
if not is_elected_leader('res_rabbitmq_vip'):
# Each unit needs to set the db information otherwise if the unit
# with the info dies the settings die with it Bug# 1355848
for rel_id in relation_ids('amqp'):
peerdb_settings = peer_retrieve_by_prefix(rel_id,
exc_list=['hostname'])
peerdb_settings['hostname'] = unit_get('private-address')
if 'password' in peerdb_settings:
relation_set(relation_id=rel_id, **peerdb_settings)
log('amqp_changed(): Deferring amqp_changed'
' to is_elected_leader.')
# NOTE: active/active case
if config('prefer-ipv6'):
relation_settings = {'private-address': get_ipv6_addr()[0]}
relation_set(relation_settings=relation_settings)
return
relation_settings = {}
settings = relation_get(rid=relation_id, unit=remote_unit)
if eligible_leader('res_rabbitmq_vip'):
settings = relation_get(rid=relation_id, unit=remote_unit)
singleset = set(['username', 'vhost'])
singleset = set(['username', 'vhost'])
if singleset.issubset(settings):
if None in [settings['username'], settings['vhost']]:
log('amqp_changed(): Relation not ready.')
return
if singleset.issubset(settings):
if None in [settings['username'], settings['vhost']]:
log('amqp_changed(): Relation not ready.')
return
relation_settings['password'] = configure_amqp(
username=settings['username'],
vhost=settings['vhost'],
admin=settings.get('admin', False))
else:
queues = {}
for k, v in settings.iteritems():
amqp = k.split('_')[0]
x = '_'.join(k.split('_')[1:])
if amqp not in queues:
queues[amqp] = {}
queues[amqp][x] = v
for amqp in queues:
if singleset.issubset(queues[amqp]):
relation_settings[
'_'.join([amqp, 'password'])] = configure_amqp(
queues[amqp]['username'],
queues[amqp]['vhost'])
relation_settings['password'] = configure_amqp(
username=settings['username'],
vhost=settings['vhost'],
admin=settings.get('admin', False))
else:
queues = {}
for k, v in settings.iteritems():
amqp = k.split('_')[0]
x = '_'.join(k.split('_')[1:])
if amqp not in queues:
queues[amqp] = {}
queues[amqp][x] = v
for amqp in queues:
if singleset.issubset(queues[amqp]):
relation_settings[
'_'.join([amqp, 'password'])] = configure_amqp(
queues[amqp]['username'],
queues[amqp]['vhost'])
if config('prefer-ipv6'):
relation_settings['private-address'] = get_ipv6_addr()[0]
else:
# NOTE(jamespage)
# override private-address settings if access-network is
# configured and an appropriate network interface is configured.
relation_settings['hostname'] = \
get_address_in_network(config('access-network'),
unit_get('private-address'))
relation_settings['private-address'] = \
get_address_in_network(config('access-network'),
unit_get('private-address'))
relation_settings['hostname'] = \
get_address_in_network(config('access-network'),
unit_get('private-address'))
configure_client_ssl(relation_settings)
@ -130,34 +183,56 @@ def amqp_changed(relation_id=None, remote_unit=None):
if config('ha-vip-only') is True:
relation_settings['ha-vip-only'] = 'true'
# NOTE(jamespage)
# override private-address settings if access-network is
# configured and an appropriate network interface is configured.
relation_settings['private-address'] = \
get_address_in_network(config('access-network'),
unit_get('private-address'))
# set if need HA queues or not
if rabbit.compare_version('3.0.1') < 0:
if cmp_pkgrevno('rabbitmq-server', '3.0.1') < 0:
relation_settings['ha_queues'] = True
relation_set(relation_id=relation_id,
relation_settings=relation_settings)
peer_store_and_set(relation_id=relation_id,
relation_settings=relation_settings)
@hooks.hook('cluster-relation-joined')
def cluster_joined():
def cluster_joined(relation_id=None):
if config('prefer-ipv6'):
relation_settings = {'hostname': socket.gethostname(),
'private-address': get_ipv6_addr()[0]}
relation_set(relation_id=relation_id,
relation_settings=relation_settings)
if is_relation_made('ha') and \
config('ha-vip-only') is False:
log('hacluster relation is present, skipping native '
'rabbitmq cluster config.')
return
# Set RABBITMQ_NODENAME to something that's resolvable by my peers
# get_host_ip() is called to sanitize private-address in case it
# doesn't return an IP address
ip_addr = get_host_ip(unit_get('private-address'))
try:
nodename = get_hostname(ip_addr, fqdn=False)
except:
log('Cannot resolve hostname for %s using DNS servers' % ip_addr,
level='WARNING')
log('Falling back to use socket.gethostname()',
level='WARNING')
# If the private-address is not resolvable using DNS
# then use the current hostname
nodename = socket.gethostname()
if nodename and rabbit.get_node_name() != nodename:
log('forcing nodename=%s' % nodename)
# would like to have used the restart_on_change decorator, but
# need to stop it under current nodename prior to updating env
service_stop('rabbitmq-server')
rabbit.update_rmq_env_conf(hostname='rabbit@%s' % nodename,
ipv6=config('prefer-ipv6'))
service_restart('rabbitmq-server')
if is_newer():
log('cluster_joined: Relation greater.')
return
rabbit.COOKIE_PATH = '/var/lib/rabbitmq/.erlang.cookie'
if not os.path.isfile(rabbit.COOKIE_PATH):
log('erlang cookie missing from %s' % rabbit.COOKIE_PATH,
level=ERROR)
@ -170,26 +245,27 @@ def cluster_joined():
def cluster_changed():
rdata = relation_get()
if 'cookie' not in rdata:
log('cluster_joined: cookie not yet set.')
log('cluster_joined: cookie not yet set.', level=INFO)
return
# sync passwords
peer_echo()
# sync cookie
cookie = peer_retrieve('cookie')
if open(rabbit.COOKIE_PATH, 'r').read().strip() == cookie:
log('Cookie already synchronized with peer.')
else:
log('Synchronizing erlang cookie from peer.')
rabbit.service('stop')
with open(rabbit.COOKIE_PATH, 'wb') as out:
out.write(cookie)
rabbit.service('start')
if config('prefer-ipv6') and rdata.get('hostname'):
private_address = rdata['private-address']
hostname = rdata['hostname']
if hostname:
rabbit.update_hosts_file({private_address: hostname})
# sync passwords
blacklist = ['hostname', 'private-address', 'public-address']
whitelist = [a for a in rdata.keys() if a not in blacklist]
peer_echo(includes=whitelist)
# sync the cookie with peers if necessary
update_cookie()
if is_relation_made('ha') and \
config('ha-vip-only') is False:
log('hacluster relation is present, skipping native '
'rabbitmq cluster config.')
'rabbitmq cluster config.', level=INFO)
return
# cluster with node
@ -197,6 +273,29 @@ def cluster_changed():
if rabbit.cluster_with():
# resync nrpe user after clustering
update_nrpe_checks()
# If cluster has changed peer db may have changed so run amqp_changed
# to sync any changes
for rid in relation_ids('amqp'):
for unit in related_units(rid):
amqp_changed(relation_id=rid, remote_unit=unit)
def update_cookie():
# sync cookie
cookie = peer_retrieve('cookie')
cookie_local = None
with open(rabbit.COOKIE_PATH, 'r') as f:
cookie_local = f.read().strip()
if cookie_local == cookie:
log('Cookie already synchronized with peer.')
return
log('Synchronizing erlang cookie from peer.', level=INFO)
service_stop('rabbitmq-server')
with open(rabbit.COOKIE_PATH, 'wb') as out:
out.write(cookie)
service_restart('rabbitmq-server')
@hooks.hook('cluster-relation-departed')
@ -241,7 +340,8 @@ def ha_joined():
if rabbit.get_node_name() != name and vip_only is False:
log('Stopping rabbitmq-server.')
service_stop('rabbitmq-server')
rabbit.set_node_name('%s@localhost' % SERVICE_NAME)
rabbit.update_rmq_env_conf(hostname='%s@localhost' % SERVICE_NAME,
ipv6=config('prefer-ipv6'))
else:
log('Node name already set to %s.' % name)
@ -332,7 +432,7 @@ def ceph_changed():
ceph.configure(service=SERVICE_NAME, key=key, auth=auth,
use_syslog=use_syslog)
if eligible_leader('res_rabbitmq_vip'):
if is_elected_leader('res_rabbitmq_vip'):
rbd_img = config('rbd-name')
rbd_size = config('rbd-size')
sizemb = int(rbd_size.split('G')[0]) * 1024
@ -371,9 +471,11 @@ def update_nrpe_checks():
# Find out if nrpe set nagios_hostname
hostname = None
host_context = None
for rel in relations_of_type('nrpe-external-master'):
if 'nagios_hostname' in rel:
hostname = rel['nagios_hostname']
host_context = rel['nagios_host_context']
break
# create unique user and vhost for each unit
current_unit = local_unit().replace('/', '-')
@ -381,6 +483,11 @@ def update_nrpe_checks():
vhost = 'nagios-%s' % current_unit
password = rabbit.get_rabbit_password(user)
if host_context:
myunit = "%s:%s" % (host_context, local_unit())
else:
myunit = local_unit()
rabbit.create_vhost(vhost)
rabbit.create_user(user, password)
rabbit.grant_permissions(user, vhost)
@ -388,7 +495,7 @@ def update_nrpe_checks():
nrpe_compat = NRPE(hostname=hostname)
nrpe_compat.add_check(
shortname=rabbit.RABBIT_USER,
description='Check RabbitMQ',
description='Check RabbitMQ {%s}' % myunit,
check_cmd='{}/check_rabbitmq.py --user {} --password {} --vhost {}'
''.format(NAGIOS_PLUGINS, user, password, vhost)
)
@ -519,7 +626,11 @@ def configure_rabbit_ssl():
@hooks.hook('config-changed')
@restart_on_change(rabbit.restart_map())
def config_changed():
if config('prefer-ipv6'):
rabbit.assert_charm_supports_ipv6()
# Add archive source if provided
add_source(config('source'), config('key'))
apt_update(fatal=True)
@ -536,6 +647,9 @@ def config_changed():
chown(RABBIT_DIR, rabbit.RABBIT_USER, rabbit.RABBIT_USER)
chmod(RABBIT_DIR, 0o775)
if config('prefer-ipv6'):
rabbit.update_rmq_env_conf(ipv6=config('prefer-ipv6'))
if config('management_plugin') is True:
rabbit.enable_plugin(MAN_PLUGIN)
open_port(55672)
@ -545,11 +659,21 @@ def config_changed():
configure_rabbit_ssl()
if eligible_leader('res_rabbitmq_vip') or \
config('ha-vip-only') is True:
service_restart('rabbitmq-server')
rabbit.set_all_mirroring_queues(config('mirroring-queues'))
update_nrpe_checks()
if is_relation_made("ha"):
ha_is_active_active = config("ha-vip-only")
if ha_is_active_active:
update_nrpe_checks()
else:
if is_elected_leader('res_rabbitmq_vip'):
update_nrpe_checks()
else:
log("hacluster relation is present but this node is not active"
" skipping update nrpe checks")
else:
update_nrpe_checks()
# NOTE(jamespage)
# trigger amqp_changed to pickup and changes to network

5
setup.cfg Normal file
View File

@ -0,0 +1,5 @@
[nosetests]
verbosity=2
with-coverage=1
cover-erase=1
cover-package=hooks

3
test-requirements.txt Normal file
View File

@ -0,0 +1,3 @@
nose
testtools
mock

16
tests/00_setup.sh Executable file
View File

@ -0,0 +1,16 @@
#!/bin/sh
# Install Juju Amulet and any other applications that are needed for the tests.
set -x
# Check if amulet is installed before adding repository and updating apt-get.
dpkg -s amulet
if [ $? -ne 0 ]; then
sudo add-apt-repository -y ppa:juju/stable
sudo apt-get update
sudo apt-get install -y amulet
fi
# Install any additional python packages, or software here.
sudo apt-get install -y python python-pika python3-requests

101
tests/10_basic_deploy_test.py Executable file
View File

@ -0,0 +1,101 @@
#!/usr/bin/python3
# This Amulet test performs a basic deploy and checks if rabbitmq is running.
import amulet
import os
import socket
import ssl
# The number of seconds to wait for the environment to setup.
seconds = 900
# Get the directory in this way to load the files from the tests directory.
path = os.path.abspath(os.path.dirname(__file__))
key_path = os.path.join(path, 'rabbit-server-privkey.pem')
# Read the private key file.
with open(key_path) as f:
privateKey = f.read()
# Read the certificate file.
cert_path = os.path.join(path, 'rabbit-server-cert.pem')
with open(cert_path) as f:
certificate = f.read()
# Create a dictionary for the rabbitmq configuration.
rabbitmq_configuration = {
'ssl_enabled': True,
'ssl_key': privateKey,
'ssl_cert': certificate,
'ssl_port': 5671
}
d = amulet.Deployment(series='trusty')
# Add the rabbitmq-server charm to the deployment.
d.add('rabbitmq-server')
# Configure options on the rabbitmq-server.
d.configure('rabbitmq-server', rabbitmq_configuration)
# Expose the server so we can connect.
d.expose('rabbitmq-server')
try:
# Execute the deployer with the current mapping.
d.setup(timeout=seconds)
except amulet.helpers.TimeoutError:
message = 'The environment did not setup in %d seconds.' % seconds
# The SKIP status enables skip or fail the test based on configuration.
amulet.raise_status(amulet.SKIP, msg=message)
except:
raise
print('The rabbitmq-server has been successfully deployed.')
###############################################################################
## Verify that the rabbit service is running on the deployed server.
###############################################################################
rabbitmq_sentry = d.sentry.unit['rabbitmq-server/0']
# Get the public address for rabbitmq-server instance.
server_address = rabbitmq_sentry.info['public-address']
# Create the command that checks if the rabbitmq-server service is running.
command = 'rabbitmqctl status'
print(command)
# Execute the command on the deployed service.
output, code = rabbitmq_sentry.run(command)
print(output)
# Check the return code for the success and failure of this test.
if (code != 0):
message = 'The ' + command + ' did not return the expected code of 0.'
amulet.raise_status(amulet.FAIL, msg=message)
else:
print('The rabbitmq-server is running on %s' % server_address)
###############################################################################
## Test the ssl certificate.
###############################################################################
# Get the port for ssl_port instance.
server_port = rabbitmq_configuration['ssl_port']
# Get the path to the certificate authority file.
ca_cert_path = os.path.join(path, 'rabbit-server-cacert.pem')
print('Testing ssl connection to rabbitmq-server.')
try:
# Create a normal socket.
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Require a certificate from the server, since a self-signed certificate
# was used, the ca_certs must be the server certificate file itself.
ssl_sock = ssl.wrap_socket(s, ca_certs=ca_cert_path,
cert_reqs=ssl.CERT_REQUIRED)
# Connect to the rabbitmq server using ssl.
ssl_sock.connect((server_address, server_port))
# Get the certificate.
certificate = ssl_sock.getpeercert()
# SSL socket connected and got the certificate, this passes the ssl test!
print('Connected to the rabbitmq-server {0}:{1} using ssl!'.format(
server_address, server_port))
except Exception as e:
message = 'Failed to create an ssl connection to {0}:{1}\n{2}'.format(
server_address, server_port, str(e))
amulet.raise_status(amulet.FAIL, msg=message)
finally:
ssl_sock.close()
print('The rabbitmq-server passed the basic deploy test!')

214
tests/20_deploy_relations_test.py Executable file
View File

@ -0,0 +1,214 @@
#!/usr/bin/python3
# This Amulet test deploys rabbitmq-server, and the related charms.
import amulet
import os
import subprocess
import time
# The number of seconds to wait for the environment to setup.
seconds = 900
# The number of units to scale rabbitmq-server to.
scale = 2
# The port that amqp traffic is sent on.
amqp_port = '5672'
# The directory to use as a block devie for the ceph
devices = '/srv/osd1'
# The default version of ceph does not support directories as devices.
havana = 'cloud:precise-updates/havana'
# Create a dictionary of configuration values for ceph.
ceph_configuration = {
'fsid': 'ecbb8960-0e21-11e2-b495-83a88f44db01',
'monitor-secret': 'AQBomftSyK1LORAAhg71ukxBxN9ml90stexqEw==',
'osd-devices': devices,
'source': havana
}
# Create a dictionary of configuration values for cinder.
cinder_configuration = {
'block-device': 'None'
}
# Create a dictionary of the rabbit configuration values.
rabbit_configuration = {
'vip': '192.168.77.11',
'vip_cidr': 19,
'vip_iface': 'eth0',
'ha-bindiface': 'eth0',
'ha-mcastport': 5406,
'rbd-size': '2G',
'rbd-name': 'testrabbit1'
}
# The AMQP package is only available for python version 2.x.
python2 = '/usr/bin/python'
if not os.path.isfile(python2):
error_message = 'Error, python version 2 is required for this test.'
amulet.raise_status(amulet.FAIL, msg=error_message)
d = amulet.Deployment(series='trusty')
# Add rabbitmq-server to the deployment.
d.add('rabbitmq-server', units=scale)
# Add the ceph charm to the deployment.
d.add('ceph')
# Add cinder to the deployment to test the AMQP relation.
d.add('cinder')
# Add hacluster to the deployment to test the ha relation.
d.add('hacluster')
# The ceph charm requires configuration to deploy successfully.
d.configure('ceph', ceph_configuration)
# Configure the cinder charm.
d.configure('cinder', cinder_configuration)
# Configure the rabbit charm.
d.configure('rabbitmq-server', rabbit_configuration)
# Add relation from rabbitmq-server to ceph testing the ceph relation.
d.relate('rabbitmq-server:ceph', 'ceph:client')
# Add relation from rabbitmq-server to cinder testing the amqp relation.
d.relate('rabbitmq-server:amqp', 'cinder:amqp')
# Add relation from rabibtmq-server to hacluster testing the ha relation.
d.relate('rabbitmq-server:ha', 'hacluster:ha')
# Expose the rabbitmq-server.
d.expose('rabbitmq-server')
try:
# Execute the deployer with the current mapping.
d.setup(timeout=seconds)
# Wait for the relation to finish the transations.
d.sentry.wait(seconds)
except amulet.helpers.TimeoutError:
message = 'The environment did not setup in %d seconds.' % seconds
# The SKIP status enables skip or fail the test based on configuration.
amulet.raise_status(amulet.FAIL, msg=message)
except:
raise
print('The environment successfully deployed.')
# Create a counter to make the messages unique.
counter = 1
# Get the directory in this way to load the files from the tests directory.
path = os.path.abspath(os.path.dirname(__file__))
# Create a path to the python test file to call.
amqp_tester = os.path.join(path, 'amqp_tester.py')
if not os.path.isfile(amqp_tester):
error_message = 'Unable to locate python test file %s' % amqp_tester
amulet.raise_status(amulet.FAIL, msg=error_message)
# Verify the ceph unit was created.
ceph_unit = d.sentry.unit['ceph/0']
# Verify the cinder unit was created.
cinder_unit = d.sentry.unit['cinder/0']
rabbit_units = []
for n in range(scale):
# Get each rabbitmq unit that was deployed.
rabbit_units.append(d.sentry.unit['rabbitmq-server/%d' % n])
# Iterate over every rabbitmq-unit to get the different relations.
for rabbit_unit in rabbit_units:
###########################################################################
## Test Relations
###########################################################################
# Verify the ceph relation was created for the rabbit unit.
rabbit_relation = rabbit_unit.relation('ceph', 'ceph:client')
print('rabbit relation to ceph:')
for key, value in rabbit_relation.items():
print(key, value)
# Verify the amqp relation was created for the rabbit unit.
rabbit_relation = rabbit_unit.relation('amqp', 'cinder:amqp')
print('rabbit relation to amqp:')
for key, value in rabbit_relation.items():
print(key, value)
# The hacluster charm is a subordinate, since the relation-sentry is also
# a subordinate charm no sentry is created for the hacluster relation.
# Verify the rabbit relation was created with the ceph unit.
ceph_relation = ceph_unit.relation('client', 'rabbitmq-server:ceph')
print('ceph relation to rabbitmq-server:')
for key, value in ceph_relation.items():
print(key, value)
# Verify the rabbit relation was created with the cinder unit.
cinder_relation = cinder_unit.relation('amqp', 'rabbitmq-server:amqp')
print('cinder relation to rabbitmq-server:')
for key, value in cinder_relation.items():
print(key, value)
###########################################################################
## Test AMQP
###########################################################################
# The AMQP python library is only available for python2 at this time.
# Call out a command to run the python2 code to test the AMQP protocol.
# Get the public address for rabbitmq-server instance.
server_address = rabbit_unit.info['public-address']
# Create a time stamp to help make the AMQP message unique.
time_stamp = time.strftime('%F %r')
# Create the message to send on the AMPQ protocol.
amqp_message = "Message #{0} to send using the AMPQ protocol {1}".format(
counter, time_stamp)
# Create the command with arguments that sends the message.
send_command = [python2, amqp_tester, server_address, amqp_port,
amqp_message]
print(send_command)
# Call the python command to send the AMQP message to the server.
output = subprocess.check_output(send_command)
# Create the command with arguments to receive messages.
receive_command = [python2, amqp_tester, server_address, amqp_port]
print(receive_command)
# Call the python command to receive the AMQP message from the same server.
output = subprocess.check_output(receive_command)
# The output is a byte string so convert the message to a byte string.
if output.find(amqp_message.encode()) == -1:
print('The AMQP test to {0}:{1} failed.'.format(server_address,
amqp_port))
amulet.raise_status(amulet.FAIL, msg=output)
else:
print('The AMQP test to {0}:{1} completed successfully.'.format(
server_address, amqp_port))
counter += 1
###########################################################################
## Verify that the rabbitmq cluster status is correct.
###########################################################################
# Create the command that checks if the rabbitmq-server service is running.
command = 'rabbitmqctl cluster_status'
print(command)
# Execute the command on the deployed service.
output, code = rabbit_unit.run(command)
print(output)
# Check the return code for the success and failure of this test.
if (code != 0):
message = 'The ' + command + ' did not return the expected code of 0.'
amulet.raise_status(amulet.FAIL, msg=message)
else:
print('The rabbitmq-server cluster status is OK.')
###############################################################################
## Test the AMQP messages can be sent from and read from another.
###############################################################################
# Get the public address for rabbitmq-server instance 0.
send_address = rabbit_units[0].info['public-address']
# Create a message to send from instance 0 and read it from instance 1.
amqp_message = "Message #{0} sent from {1} using the AMQP protocol.".format(
counter, send_address)
counter += 1
# Create the command that sends the message to instance 0.
send_command = [python2, amqp_tester, send_address, amqp_port, amqp_message]
print(send_command)
output = subprocess.check_output(send_command)
# Get the public address for rabbitmq-server instance 1.
receive_address = rabbit_units[1].info['public-address']
# Create the command that receives the message from instance 1.
recieve_command = [python2, amqp_tester, receive_address, amqp_port]
print(recieve_command)
output = subprocess.check_output(receive_command)
# The output is a byte string so convert the message to a byte string.
if output.find(amqp_message.encode()) == -1:
print(output)
message = 'Server {0} did not receive the AMQP message "{1}"'.format(
receive_address, amqp_message)
amulet.raise_status(amulet.FAIL, msg=message)
else:
print('Server {0} received the AMQP message sent from {1}'.format(
receive_address, send_address))
print('The rabbitmq-server charm passed this relations test.')

142
tests/30_configuration_test.py Executable file
View File

@ -0,0 +1,142 @@
#!/usr/bin/python3
# This Amulet test exercises the configuration options for rabbitmq-server.
import amulet
import os
import requests
import socket
import ssl
# The number of seconds to wait for the environment to setup.
seconds = 900
# Get the directory in this way to load the files from the tests directory.
path = os.path.abspath(os.path.dirname(__file__))
key_path = os.path.join(path, 'rabbit-server-privkey.pem')
# Read the private key file.
with open(key_path) as f:
privateKey = f.read()
cert_path = os.path.join(path, 'rabbit-server-cert.pem')
# Read the certificate file.
with open(cert_path) as f:
certificate = f.read()
# Create a dictionary of all the configuration values.
rabbit_configuration = {
'management_plugin': True,
'ssl_enabled': True,
'ssl_port': 5999,
'ssl_key': privateKey,
'ssl_cert': certificate
}
d = amulet.Deployment(series='trusty')
# Add the rabbitmq-server charm to the deployment.
d.add('rabbitmq-server')
# Configure all the options on rabbitmq-server.
d.configure('rabbitmq-server', rabbit_configuration)
# Expose the rabbitmq-server.
d.expose('rabbitmq-server')
try:
# Execute the deployer with the current mapping.
d.setup(timeout=seconds)
# Wait for the relation to finish the transations.
#d.sentry.wait(seconds)
except amulet.helpers.TimeoutError:
message = 'The environment did not setup in %d seconds.' % seconds
# The SKIP status enables skip or fail the test based on configuration.
amulet.raise_status(amulet.SKIP, msg=message)
except:
raise
rabbit_unit = d.sentry.unit['rabbitmq-server/0']
###############################################################################
## Verify that the rabbit service is running on the deployed server.
###############################################################################
# Create the command that checks if the rabbitmq-server service is running.
command = 'rabbitmqctl status'
print(command)
# Execute the command on the deployed service.
output, code = rabbit_unit.run(command)
print(output)
# Check the return code for the success and failure of this test.
if (code != 0):
message = 'The ' + command + ' did not return the expected code of 0.'
amulet.raise_status(amulet.FAIL, msg=message)
else:
print('The rabbitmq-server is running.')
###############################################################################
## Verify the configuration values.
###############################################################################
# Get the contents of the private key from the rabbitmq-server
contents = rabbit_unit.file_contents('/etc/rabbitmq/rabbit-server-privkey.pem')
# Verify the private key was saved on the rabbitmq server correctly.
if contents != privateKey:
message = 'The private keys did not match!'
amulet.raise_status(amulet.FAIL, msg=message)
else:
print('The private keys was configured properly on the rabbitmq server.')
# Get the contents of the certificate from the rabbitmq-server.
contents = rabbit_unit.file_contents('/etc/rabbitmq/rabbit-server-cert.pem')
# Verify the certificate was saved on the rabbitmq server correctly.
if contents != certificate:
message = 'The certificates did not match!'
amulet.raise_status(amulet.FAIL, msg=message)
else:
print('The certificate was configured properly on the rabbitmq server.')
# Get the public address for rabbitmq-server instance.
rabbit_host = rabbit_unit.info['public-address']
###############################################################################
## Verify the management plugin is running and responding on correct port.
## According to this: http://www.rabbitmq.com/access-control.html
## The guest account can only log in from local host.
## Since this test runs on a different system there is no way to test
## the management plugin.
###############################################################################
# Create a url for the rabbitmq server's managment plugin (uses 55672).
#management_url = 'http://{0}:55672'.format(rabbit_host)
#print(management_url)
# Get the management url with the authentication for guest.
#r = requests.get(management_url, auth=('guest', 'guest'))
# Raise an exception if response is not 200 OK.
#r.raise_for_status()
#print(str(r))
#print('Successfully authenticated to the management console at %s' %
# management_url)
###############################################################################
## Verify that SSL is set up on the non-default port.
###############################################################################
# Get the port for ssl_port instance.
ssl_port = rabbit_configuration['ssl_port']
# Get the path to the certificate authority file.
ca_cert_path = os.path.join(path, 'rabbit-server-cacert.pem')
try:
# Create a normal socket.
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Require a certificate from the server, since a self-signed certificate
# was used, the ca_certs must be the server certificate file itself.
ssl_sock = ssl.wrap_socket(s, ca_certs=ca_cert_path,
cert_reqs=ssl.CERT_REQUIRED)
# Connect to the rabbitmq server using ssl.
ssl_sock.connect((rabbit_host, ssl_port))
# Get the certificate.
certificate = ssl_sock.getpeercert()
# SSL scoket connected and got the certificate, this passes the ssl test!
print('Connected to the rabbitmq-server {0}:{1} using ssl!'.format(
rabbit_host, ssl_port))
except Exception as e:
message = 'Failed to create an ssl connection to {0}:{1}\n{2}'.format(
rabbit_host, ssl_port, str(e))
amulet.raise_status(amulet.FAIL, msg=message)
finally:
ssl_sock.close()
print('The rabbitmq-server passed the configuration tests.')

View File

@ -0,0 +1,84 @@
#!/usr/bin/python
#
# This Amulet test deploys rabbitmq-server
#
# Note: We use python2, because pika doesn't support python3
import amulet
import pika
import telnetlib
# The number of seconds to wait for the environment to setup.
seconds = 1200
d = amulet.Deployment(series="trusty")
# Add the rabbitmq-server charm to the deployment.
d.add('rabbitmq-server', units=2)
# Create a configuration.
configuration = {'mirroring-queues': True}
d.configure('rabbitmq-server', configuration)
d.expose('rabbitmq-server')
try:
d.setup(timeout=seconds)
d.sentry.wait(seconds)
except amulet.helpers.TimeoutError:
message = 'The environment did not setup in %d seconds.' % seconds
amulet.raise_status(amulet.SKIP, msg=message)
except:
raise
rabbit_unit = d.sentry.unit['rabbitmq-server/0']
rabbit_unit2 = d.sentry.unit['rabbitmq-server/1']
commands = ['service rabbitmq-server status',
'rabbitmqctl cluster_status']
for cmd in commands:
output, code = rabbit_unit.run(cmd)
message = cmd + ' | exit code: %d.' % code
print(message)
print(output)
if code != 0:
amulet.raise_status(amulet.FAIL, msg=message)
rabbit_addr1 = rabbit_unit.info["public-address"]
rabbit_port = "5672"
rabbit_url = 'amqp://guest:guest@%s:%s/%%2F' % (rabbit_addr1, rabbit_port)
print('Connecting to %s' % rabbit_url)
conn1 = pika.BlockingConnection(pika.connection.URLParameters(rabbit_url))
channel = conn1.channel()
print('Declaring queue')
channel.queue_declare(queue='hello')
orig_msg = 'Hello World!'
print('Publishing message: %s' % orig_msg)
channel.basic_publish(exchange='',
routing_key='hello',
body=orig_msg)
print('stopping rabbit in unit 0')
rabbit_unit.run('service rabbitmq-server stop')
print('Consuming message from second unit')
rabbit_addr2 = rabbit_unit2.info["public-address"]
rabbit_url2 = 'amqp://guest:guest@%s:%s/%%2F' % (rabbit_addr2, rabbit_port)
conn2 = pika.BlockingConnection(pika.connection.URLParameters(rabbit_url2))
channel2 = conn2.channel()
method_frame, header_frame, body = channel2.basic_get('hello')
if method_frame:
print(method_frame, header_frame, body)
assert body == orig_msg, '%s != %s' % (body, orig_msg)
channel2.basic_ack(method_frame.delivery_tag)
else:
raise Exception('No message returned')
# check the management plugin is running
mgmt_port = "15672"
print('Checking management port')
telnetlib.Telnet(rabbit_addr1, mgmt_port)

65
tests/amqp_tester.py Normal file
View File

@ -0,0 +1,65 @@
#!/usr/bin/python
# This class uses Python to make AMQP calls to send and receive messages.
# To send an AMQP message call this module with a host, port, and message.
# To receive an AMQP message call this module with a host and port only.
import logging
import pika
import sys
def send(host, port, message, queue='test'):
""" Send an AMQP message to a host and port."""
connection = None
try:
parameters = pika.ConnectionParameters(host, port)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.queue_declare(queue)
channel.basic_publish(exchange='', routing_key=queue, body=message)
print('Message published to {0}:{1}'.format(host, port))
except Exception as e:
print('Unable to send message to {0}:{1}'.format(host, port))
print(e)
finally:
if connection:
connection.close()
def callback(ch, method, properties, body):
""" Handle the callback when the channel receives a message. """
print(body)
def receive(host, port, queue='test'):
""" Connects to host and port, and consumes AMQP messages. """
connection = None
try:
parameters = pika.ConnectionParameters(host, port)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.queue_declare(queue)
channel.basic_consume(callback, queue, no_ack=True)
except Exception as e:
print('Unable to receive message from {0}:{1}'.format(host, port))
print(e)
finally:
if connection:
connection.close()
# Needed to disable pika complaining about logging levels not set.
logging.basicConfig(level=logging.ERROR)
if len(sys.argv) == 3:
host = sys.argv[1]
port = int(sys.argv[2])
receive(host, port)
elif len(sys.argv) > 3:
host = sys.argv[1]
port = int(sys.argv[2])
message = ' '.join(sys.argv[3:])
send(host, port, message)
else:
print('Not enough arguments, host and port are required.')

View File

@ -0,0 +1,17 @@
-----BEGIN CERTIFICATE-----
MIICxjCCAa6gAwIBAgIJAIOIZyP0JpvLMA0GCSqGSIb3DQEBBQUAMBMxETAPBgNV
BAMTCE15VGVzdENBMB4XDTE0MDIyMzIwMDgxOFoXDTE1MDIyMzIwMDgxOFowEzER
MA8GA1UEAxMITXlUZXN0Q0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB
AQDceXBJOhn9d68M9UiOViWz00Znds1xk9i+GbZFEsqLPkoReS9g/SeXvR5ZFZlU
5gzAcg8z8b+n26wgZLFQJ4wQAIVELIu0S6e4sUPKfKl/fo9NmRVv/sPkfZWUZ5sc
d9DEk8MiNYjjXT+Ff4TV7DFxdDOJLIDrc09JWzIKrmfOXP5wLFCsIllGbellfNvY
FxiHHm3Iz5t3t077+uUXeMD5p1Qd2qQdbJ2p8Dwkg2AyTPNG8RA71tEMIT7FX0nB
sTX5M217ocdEZJI67x+3Z8Ll21m6blcnJI3V3Zk5kvccvYRlDuyGh7tiWcv4YKmv
xuP64L9174nQ3HXnwipfjBkBAgMBAAGjHTAbMAwGA1UdEwQFMAMBAf8wCwYDVR0P
BAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQAaQWYVUuPKRtAFd6kyugL/UpVhIa/o
zfm/2b23UviPA1TIYzzf4m1eDV7kxJynOeP1RRPp166el+sVx7+vqhBsqcm4b5e+
epShI3jff0gtODxo5rUVrROS3T6xjHA7AVm60/xmGIIybhDonXzA5YJ8YVBpHUdZ
Yc2neOwdDT8k/H95cPXBU3pf3vTVpndjN827fBuyO7KwKDAiKHwtwmSedc1uZtLN
sfwkonXF+gNAHXlk28VeygGQ1jHdloIrNG0zYc4ZX4zqPHd7HDeyYItBBHjrznow
nf1X6fNjP4YnG5EkUN8hRXf3ct+L8iq8puMNhjb8ssW+bsFRBIufVFaC
-----END CERTIFICATE-----

View File

@ -0,0 +1,18 @@
-----BEGIN CERTIFICATE-----
MIIC5TCCAc2gAwIBAgIBATANBgkqhkiG9w0BAQUFADATMREwDwYDVQQDEwhNeVRl
c3RDQTAeFw0xNDAyMjMyMDA5MjFaFw0xNTAyMjMyMDA5MjFaMCgxFTATBgNVBAMM
DHJlYWxtcy1zbGljZTEPMA0GA1UECgwGc2VydmVyMIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEApc6WxtgQZL+PcDHL1S/kLqFyGPaKiJQrOPGYq9djM5pr
VGPHUZ3F26VWtlBtPB8PgQFT10Sjr+ec7hC9aqT+THyNq2U8qCizGq4l4e44tfEI
LPuE9IluF/dZuVWFR2nbVYp3FeAjuRQ68AwzpcZOXVup3xsXx7dJrGL4KUx/7NUb
5+6TzboM1nXX7o/DYCE5BvXncM7U3cLg16SV58T6Rs+JYATAFdzveN6X88AgvQpB
rDSD42tSQmQzYu9mO2RwtP48jLvYLHv34dZo2h6G5zNWe/PkUjXxKEGJXHkeXy83
vx4UV62Vo8pMLeSSqL4wUV3KMRHJ+MBskP42lmruwQIDAQABoy8wLTAJBgNVHRME
AjAAMAsGA1UdDwQEAwIFIDATBgNVHSUEDDAKBggrBgEFBQcDATANBgkqhkiG9w0B
AQUFAAOCAQEArEFhWRHb1Nn1ACDNXbRueiMSdqwWtiAEU4yAFB+1maKyAG1VAQC2
IegIJ7INbzfwDp6CzNpI3CCyelNO2rcBGwOlXre6XA5K7TxKM6WLJDdodPUt30dY
3+/DF/XwH5S/C4oGbeRVWxzCBAFbStMlJqpXFIQkAYYCOfvWKoZzSslxGe7Zj+hQ
NYqJbc8BDe0UTyM8KoL9OWucEbdQMQgn0FxwAPqSLXgDkpSo6XHKL15MUbES1u5/
iT9gRJU1eN+bIWfrJA3dqh4JxXntTLDZ28pBdFtOV4WEF2O4fmxGiSktCi34tjK6
DsIScb+0mUeKS9b2cyQzLSUCwj8LgJW3rQ==
-----END CERTIFICATE-----

View File

@ -0,0 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEApc6WxtgQZL+PcDHL1S/kLqFyGPaKiJQrOPGYq9djM5prVGPH
UZ3F26VWtlBtPB8PgQFT10Sjr+ec7hC9aqT+THyNq2U8qCizGq4l4e44tfEILPuE
9IluF/dZuVWFR2nbVYp3FeAjuRQ68AwzpcZOXVup3xsXx7dJrGL4KUx/7NUb5+6T
zboM1nXX7o/DYCE5BvXncM7U3cLg16SV58T6Rs+JYATAFdzveN6X88AgvQpBrDSD
42tSQmQzYu9mO2RwtP48jLvYLHv34dZo2h6G5zNWe/PkUjXxKEGJXHkeXy83vx4U
V62Vo8pMLeSSqL4wUV3KMRHJ+MBskP42lmruwQIDAQABAoIBAE6jo/ldQrE19acF
xyIum6/OHJpgXCYY+EMCuyFNf5xa8erNhBxekxfw3CaOELECHk1WPtiLkoL8e/6h
a+UnqgDG1j5jPpiW7ROLYyY74SPR1MnY5R9CCzhMtX5kZFkRiNiSWpbCfs7qHGX7
s4c9fa9jqTbK18V+Ve/v5LlZsha13OQRISQLqZlCM6vKRtHZorQHZVM1KIV4bzdP
75/YTrhUA8GKGA+4Le5vZ1PQY8ubTAXPHeeqILvClsmkZ6k0RC/zesB3DUXzMvjA
ycbarcpZ+muxyp0Icuv9B7pj3iT/dL4udc+BM82Qg3KvLLiteE9aeOPsW3aJxAHa
YYLLCQECgYEA2EUF4QVgJE1U6RHFsRO6xP1KBJO0mVMMlEpgywPV3oN7D5gOVrCK
A/iNkyt2ggaX2o2+QHMe+WjINdMLhY6TdhRiYrUQhLteNSrp0w/HDYyY2Cz1IhH3
Y/0qHm9rGZhetOWoJ5Ejn/9vnL/ZfsGfSNzwS1VjCUHyXShebS9NHRECgYEAxERZ
5HX3zctB2lzzS8KTNAWijUc+hc8c0LV/GcCC8LMsUPNb/V+wQNiSInJa994YI5+9
1BkJkw4Lnexvx8GQPaAkX6DzZsWSmwaNSkLICd75f2ga4dqeohWOAIvS3xb4fanr
szCLZfd4L8MEb6lVI2wzpM5yADK42y/w03t0drECgYBvDAn3v93c5gRKZIDA6uOE
0JXYAcvCypzz67kFpSOEzLg8ipQaOS202kQ/pBqGq0H/y7Y7u6DU6dObp5EL8+iN
weu+yUABF4BJBo7ne/t2XpIAthzscJM5uT2OQSGaE93VPvL31hOXzP4PW4cfCeZy
8FdGJ0Lh9wWuhdLud1I+MQKBgQC7CwzEPmy38mJC8VxoMfnJlSkyDNiX+ybR/HYx
m5buP0MXrqVXVe8KDZtPOr5ZBI7qvFzTmjzWqYcGrQJmU6DcKhcgD6qHofiyx061
m+k6BwatlhAweAHAJFydRqPjOef9Eofu0G+48FvY4LkElVLvHDoncRuR9sTXFtwj
H7+BMQKBgQDNWXHeXi4bYm17KsBHpK+DMT6O9cF1BqYpgbqLyro554ZjYLYpt0li
iiekhVM55NpWlbsOfje0lhD8mr0ZYzW+SmWd/XMj03ZF9EeOBHx8SvWemsrz69Es
wAjiQtQoZlvczaLOHLV89p9EaK3OA1trqjAkqq9GFdLBVmG/zVzSaQ==
-----END RSA PRIVATE KEY-----

0
unit_tests/__init__.py Normal file
View File

View File

@ -0,0 +1,71 @@
import mock
import os
import unittest
import tempfile
import sys
sys.modules['MySQLdb'] = mock.Mock()
import rabbit_utils
class UtilsTests(unittest.TestCase):
def setUp(self):
super(UtilsTests, self).setUp()
@mock.patch("rabbit_utils.log")
def test_update_empty_hosts_file(self, mock_log):
map = {'1.2.3.4': 'my-host'}
with tempfile.NamedTemporaryFile(delete=False) as tmpfile:
rabbit_utils.HOSTS_FILE = tmpfile.name
rabbit_utils.HOSTS_FILE = tmpfile.name
rabbit_utils.update_hosts_file(map)
with open(tmpfile.name, 'r') as fd:
lines = fd.readlines()
os.remove(tmpfile.name)
self.assertEqual(len(lines), 1)
self.assertEqual(lines[0], "%s %s\n" % (map.items()[0]))
@mock.patch("rabbit_utils.log")
def test_update_hosts_file_w_dup(self, mock_log):
map = {'1.2.3.4': 'my-host'}
with tempfile.NamedTemporaryFile(delete=False) as tmpfile:
rabbit_utils.HOSTS_FILE = tmpfile.name
with open(tmpfile.name, 'w') as fd:
fd.write("%s %s\n" % (map.items()[0]))
rabbit_utils.update_hosts_file(map)
with open(tmpfile.name, 'r') as fd:
lines = fd.readlines()
os.remove(tmpfile.name)
self.assertEqual(len(lines), 1)
self.assertEqual(lines[0], "%s %s\n" % (map.items()[0]))
@mock.patch("rabbit_utils.log")
def test_update_hosts_file_entry(self, mock_log):
altmap = {'1.1.1.1': 'alt-host'}
map = {'1.1.1.1': 'hostA',
'2.2.2.2': 'hostB',
'3.3.3.3': 'hostC',
'4.4.4.4': 'hostD'}
with tempfile.NamedTemporaryFile(delete=False) as tmpfile:
rabbit_utils.HOSTS_FILE = tmpfile.name
with open(tmpfile.name, 'w') as fd:
fd.write("#somedata\n")
fd.write("%s %s\n" % (altmap.items()[0]))
rabbit_utils.update_hosts_file(map)
with open(rabbit_utils.HOSTS_FILE, 'r') as fd:
lines = fd.readlines()
os.remove(tmpfile.name)
self.assertEqual(len(lines), 5)
self.assertEqual(lines[0], "#somedata\n")
self.assertEqual(lines[1], "%s %s\n" % (map.items()[0]))
self.assertEqual(lines[4], "%s %s\n" % (map.items()[3]))

View File

@ -0,0 +1,131 @@
import os
from testtools import TestCase
from mock import patch, MagicMock
os.environ['JUJU_UNIT_NAME'] = 'UNIT_TEST/0'
import rabbitmq_server_relations
class RelationUtil(TestCase):
def setUp(self):
self.fake_repo = {}
super(RelationUtil, self).setUp()
def _apt_cache(self):
"""Used for mocking out apt_pkg.Cache"""
# mocks out the apt cache
def cache_get(package):
pkg = MagicMock()
if package in self.fake_repo \
and 'pkg_vers' in self.fake_repo[package]:
pkg.name = package
pkg.current_ver.ver_str = self.fake_repo[package]['pkg_vers']
elif (package in self.fake_repo and
'pkg_vers' not in self.fake_repo[package]):
pkg.name = package
pkg.current_ver = None
else:
raise KeyError
return pkg
cache = MagicMock()
cache.__getitem__.side_effect = cache_get
return cache
@patch('rabbitmq_server_relations.peer_store_and_set')
@patch('rabbitmq_server_relations.get_ipv6_addr')
@patch('rabbitmq_server_relations.config')
@patch('rabbitmq_server_relations.relation_set')
@patch('apt_pkg.Cache')
@patch('rabbitmq_server_relations.is_clustered')
@patch('rabbitmq_server_relations.configure_client_ssl')
@patch('rabbitmq_server_relations.unit_get')
@patch('rabbitmq_server_relations.relation_get')
@patch('rabbitmq_server_relations.is_elected_leader')
def test_amqp_changed_compare_versions_ha_queues(
self,
is_elected_leader, relation_get, unit_get, configure_client_ssl,
is_clustered, apt_cache, relation_set, mock_config,
mock_get_ipv6_addr, mock_peer_store_and_set):
"""
Compare version above and below 3.0.1.
Make sure ha_queues is set correctly on each side.
"""
def config(key):
if key == 'prefer-ipv6':
return False
return None
mock_config.side_effect = config
host_addr = "10.1.2.3"
unit_get.return_value = host_addr
mock_get_ipv6_addr.return_value = [host_addr]
is_elected_leader.return_value = True
relation_get.return_value = {}
is_clustered.return_value = False
apt_cache.return_value = self._apt_cache()
self.fake_repo = {'rabbitmq-server': {'pkg_vers': '3.0'}}
rabbitmq_server_relations.amqp_changed(None, None)
mock_peer_store_and_set.assert_called_with(
relation_settings={'private-address': '10.1.2.3',
'hostname': host_addr,
'ha_queues': True},
relation_id=None)
self.fake_repo = {'rabbitmq-server': {'pkg_vers': '3.0.2'}}
rabbitmq_server_relations.amqp_changed(None, None)
mock_peer_store_and_set.assert_called_with(
relation_settings={'private-address': '10.1.2.3',
'hostname': host_addr},
relation_id=None)
@patch('rabbitmq_server_relations.peer_store_and_set')
@patch('rabbitmq_server_relations.get_ipv6_addr')
@patch('rabbitmq_server_relations.config')
@patch('rabbitmq_server_relations.relation_set')
@patch('apt_pkg.Cache')
@patch('rabbitmq_server_relations.is_clustered')
@patch('rabbitmq_server_relations.configure_client_ssl')
@patch('rabbitmq_server_relations.unit_get')
@patch('rabbitmq_server_relations.relation_get')
@patch('rabbitmq_server_relations.is_elected_leader')
def test_amqp_changed_compare_versions_ha_queues_prefer_ipv6(
self,
is_elected_leader, relation_get, unit_get, configure_client_ssl,
is_clustered, apt_cache, relation_set, mock_config,
mock_get_ipv6_addr, mock_peer_store_and_set):
"""
Compare version above and below 3.0.1.
Make sure ha_queues is set correctly on each side.
"""
def config(key):
if key == 'prefer-ipv6':
return True
return None
mock_config.side_effect = config
ipv6_addr = "2001:db8:1:0:f816:3eff:fed6:c140"
mock_get_ipv6_addr.return_value = [ipv6_addr]
host_addr = "10.1.2.3"
unit_get.return_value = host_addr
is_elected_leader.return_value = True
relation_get.return_value = {}
is_clustered.return_value = False
apt_cache.return_value = self._apt_cache()
self.fake_repo = {'rabbitmq-server': {'pkg_vers': '3.0'}}
rabbitmq_server_relations.amqp_changed(None, None)
mock_peer_store_and_set.assert_called_with(
relation_settings={'private-address': ipv6_addr,
'ha_queues': True},
relation_id=None)
self.fake_repo = {'rabbitmq-server': {'pkg_vers': '3.0.2'}}
rabbitmq_server_relations.amqp_changed(None, None)
mock_peer_store_and_set.assert_called_with(
relation_settings={'private-address': ipv6_addr},
relation_id=None)