Redux take 1

This commit is contained in:
James Page 2014-03-05 12:57:20 +00:00
parent 313001cad1
commit 852f465581
39 changed files with 430 additions and 1448 deletions

View File

@ -4,6 +4,5 @@
<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property>
<pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH">
<path>/rabbitmq-server/hooks</path>
<path>/rabbitmq-server/lib</path>
</pydev_pathproperty>
</pydev_project>

View File

@ -2,7 +2,7 @@
PYTHON := /usr/bin/env python
lint:
@flake8 --exclude lib/charmhelpers hooks
@flake8 --exclude hooks/charmhelpers hooks
@charm proof
sync:

View File

@ -1,4 +1,4 @@
destination: lib/charmhelpers
destination: hooks/charmhelpers
branch: lp:~openstack-charmers/charm-helpers/ssl-everywhere
include:
- fetch
@ -7,3 +7,4 @@ include:
- contrib.openstack
- contrib.storage
- contrib.ssl
- contrib.hahelpers.cluster

View File

@ -0,0 +1,183 @@
#
# Copyright 2012 Canonical Ltd.
#
# Authors:
# James Page <james.page@ubuntu.com>
# Adam Gandelman <adamg@ubuntu.com>
#
import subprocess
import os
from socket import gethostname as get_unit_hostname
from charmhelpers.core.hookenv import (
log,
relation_ids,
related_units as relation_list,
relation_get,
config as config_get,
INFO,
ERROR,
unit_get,
)
class HAIncompleteConfig(Exception):
pass
def is_clustered():
for r_id in (relation_ids('ha') or []):
for unit in (relation_list(r_id) or []):
clustered = relation_get('clustered',
rid=r_id,
unit=unit)
if clustered:
return True
return False
def is_leader(resource):
cmd = [
"crm", "resource",
"show", resource
]
try:
status = subprocess.check_output(cmd)
except subprocess.CalledProcessError:
return False
else:
if get_unit_hostname() in status:
return True
else:
return False
def peer_units():
peers = []
for r_id in (relation_ids('cluster') or []):
for unit in (relation_list(r_id) or []):
peers.append(unit)
return peers
def oldest_peer(peers):
local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
for peer in peers:
remote_unit_no = int(peer.split('/')[1])
if remote_unit_no < local_unit_no:
return False
return True
def eligible_leader(resource):
if is_clustered():
if not is_leader(resource):
log('Deferring action to CRM leader.', level=INFO)
return False
else:
peers = peer_units()
if peers and not oldest_peer(peers):
log('Deferring action to oldest service unit.', level=INFO)
return False
return True
def https():
'''
Determines whether enough data has been provided in configuration
or relation data to configure HTTPS
.
returns: boolean
'''
if config_get('use-https') == "yes":
return True
if config_get('ssl_cert') and config_get('ssl_key'):
return True
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
rel_state = [
relation_get('https_keystone', rid=r_id, unit=unit),
relation_get('ssl_cert', rid=r_id, unit=unit),
relation_get('ssl_key', rid=r_id, unit=unit),
relation_get('ca_cert', rid=r_id, unit=unit),
]
# NOTE: works around (LP: #1203241)
if (None not in rel_state) and ('' not in rel_state):
return True
return False
def determine_api_port(public_port):
'''
Determine correct API server listening port based on
existence of HTTPS reverse proxy and/or haproxy.
public_port: int: standard public port for given service
returns: int: the correct listening port for the API service
'''
i = 0
if len(peer_units()) > 0 or is_clustered():
i += 1
if https():
i += 1
return public_port - (i * 10)
def determine_apache_port(public_port):
'''
Description: Determine correct apache listening port based on public IP +
state of the cluster.
public_port: int: standard public port for given service
returns: int: the correct listening port for the HAProxy service
'''
i = 0
if len(peer_units()) > 0 or is_clustered():
i += 1
return public_port - (i * 10)
def get_hacluster_config():
'''
Obtains all relevant configuration from charm configuration required
for initiating a relation to hacluster:
ha-bindiface, ha-mcastport, vip, vip_iface, vip_cidr
returns: dict: A dict containing settings keyed by setting name.
raises: HAIncompleteConfig if settings are missing.
'''
settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'vip_iface', 'vip_cidr']
conf = {}
for setting in settings:
conf[setting] = config_get(setting)
missing = []
[missing.append(s) for s, v in conf.iteritems() if v is None]
if missing:
log('Insufficient config data to configure hacluster.', level=ERROR)
raise HAIncompleteConfig
return conf
def canonical_url(configs, vip_setting='vip'):
'''
Returns the correct HTTP URL to this host given the state of HTTPS
configuration and hacluster.
:configs : OSTemplateRenderer: A config tempating object to inspect for
a complete https context.
:vip_setting: str: Setting in charm config that specifies
VIP address.
'''
scheme = 'http'
if 'https' in configs.complete_contexts():
scheme = 'https'
if is_clustered():
addr = config_get(vip_setting)
else:
addr = unit_get('private-address')
return '%s://%s' % (scheme, addr)

View File

View File

@ -1,320 +0,0 @@
#
# Copyright 2012 Canonical Ltd.
#
# This file is sourced from lp:openstack-charm-helpers
#
# Authors:
# James Page <james.page@ubuntu.com>
# Adam Gandelman <adamg@ubuntu.com>
#
import commands
import json
import subprocess
import os
import shutil
import time
import lib.utils as utils
KEYRING = '/etc/ceph/ceph.client.%s.keyring'
KEYFILE = '/etc/ceph/ceph.client.%s.key'
CEPH_CONF = """[global]
auth supported = %(auth)s
keyring = %(keyring)s
mon host = %(mon_hosts)s
log to syslog = %(use_syslog)s
err to syslog = %(use_syslog)s
clog to syslog = %(use_syslog)s
"""
def execute(cmd):
subprocess.check_call(cmd)
def execute_shell(cmd):
subprocess.check_call(cmd, shell=True)
def install():
ceph_dir = "/etc/ceph"
if not os.path.isdir(ceph_dir):
os.mkdir(ceph_dir)
utils.install('ceph-common')
def rbd_exists(service, pool, rbd_img):
(rc, out) = commands.getstatusoutput('rbd list --id %s --pool %s' %
(service, pool))
return rbd_img in out
def create_rbd_image(service, pool, image, sizemb):
cmd = [
'rbd',
'create',
image,
'--size',
str(sizemb),
'--id',
service,
'--pool',
pool]
execute(cmd)
def pool_exists(service, name):
(rc, out) = commands.getstatusoutput("rados --id %s lspools" % service)
return name in out
def ceph_version():
''' Retrieve the local version of ceph '''
if os.path.exists('/usr/bin/ceph'):
cmd = ['ceph', '-v']
output = subprocess.check_output(cmd)
output = output.split()
if len(output) > 3:
return output[2]
else:
return None
else:
return None
def get_osds(service):
'''
Return a list of all Ceph Object Storage Daemons
currently in the cluster
'''
version = ceph_version()
if version and version >= '0.56':
cmd = ['ceph', '--id', service, 'osd', 'ls', '--format=json']
return json.loads(subprocess.check_output(cmd))
else:
return None
def create_pool(service, name, replicas=2):
''' Create a new RADOS pool '''
if pool_exists(service, name):
utils.juju_log('WARNING',
"Ceph pool {} already exists, "
"skipping creation".format(name))
return
osds = get_osds(service)
if osds:
pgnum = (len(osds) * 100 / replicas)
else:
# NOTE(james-page): Default to 200 for older ceph versions
# which don't support OSD query from cli
pgnum = 200
cmd = [
'ceph', '--id', service,
'osd', 'pool', 'create',
name, str(pgnum)
]
subprocess.check_call(cmd)
cmd = [
'ceph', '--id', service,
'osd', 'pool', 'set', name,
'size', str(replicas)
]
subprocess.check_call(cmd)
def keyfile_path(service):
return KEYFILE % service
def keyring_path(service):
return KEYRING % service
def create_keyring(service, key):
keyring = keyring_path(service)
if os.path.exists(keyring):
utils.juju_log('INFO', 'ceph: Keyring exists at %s.' % keyring)
cmd = [
'ceph-authtool',
keyring,
'--create-keyring',
'--name=client.%s' % service,
'--add-key=%s' % key]
execute(cmd)
utils.juju_log('INFO', 'ceph: Created new ring at %s.' % keyring)
def create_key_file(service, key):
# create a file containing the key
keyfile = keyfile_path(service)
if os.path.exists(keyfile):
utils.juju_log('INFO', 'ceph: Keyfile exists at %s.' % keyfile)
fd = open(keyfile, 'w')
fd.write(key)
fd.close()
utils.juju_log('INFO', 'ceph: Created new keyfile at %s.' % keyfile)
def get_ceph_nodes():
hosts = []
for r_id in utils.relation_ids('ceph'):
for unit in utils.relation_list(r_id):
ceph_public_addr = utils.relation_get(
'ceph_public_addr', unit=unit, rid=r_id) or \
utils.relation_get('private-address', unit=unit, rid=r_id)
hosts.append(ceph_public_addr)
return hosts
def configure(service, key, auth, use_syslog):
create_keyring(service, key)
create_key_file(service, key)
hosts = get_ceph_nodes()
mon_hosts = ",".join(map(str, hosts))
keyring = keyring_path(service)
with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
ceph_conf.write(CEPH_CONF % locals())
modprobe_kernel_module('rbd')
def image_mapped(image_name):
(rc, out) = commands.getstatusoutput('rbd showmapped')
return image_name in out
def map_block_storage(service, pool, image):
cmd = [
'rbd',
'map',
'%s/%s' % (pool, image),
'--user',
service,
'--secret',
keyfile_path(service)]
execute(cmd)
def filesystem_mounted(fs):
return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0
def make_filesystem(blk_device, fstype='ext4'):
count = 0
e_noent = os.errno.ENOENT
while not os.path.exists(blk_device):
if count >= 10:
utils.juju_log('ERROR',
'ceph: gave up waiting on block device %s' %
blk_device)
raise IOError(e_noent, os.strerror(e_noent), blk_device)
utils.juju_log('INFO',
'ceph: waiting for block device %s to appear' %
blk_device)
count += 1
time.sleep(1)
else:
utils.juju_log('INFO',
'ceph: Formatting block device %s as filesystem %s.' %
(blk_device, fstype))
execute(['mkfs', '-t', fstype, blk_device])
def place_data_on_ceph(service, blk_device, data_src_dst, fstype='ext4'):
# mount block device into /mnt
cmd = ['mount', '-t', fstype, blk_device, '/mnt']
execute(cmd)
# copy data to /mnt
try:
copy_files(data_src_dst, '/mnt')
except:
pass
# umount block device
cmd = ['umount', '/mnt']
execute(cmd)
_dir = os.stat(data_src_dst)
uid = _dir.st_uid
gid = _dir.st_gid
# re-mount where the data should originally be
cmd = ['mount', '-t', fstype, blk_device, data_src_dst]
execute(cmd)
# ensure original ownership of new mount.
cmd = ['chown', '-R', '%s:%s' % (uid, gid), data_src_dst]
execute(cmd)
# TODO: re-use
def modprobe_kernel_module(module):
utils.juju_log('INFO', 'Loading kernel module')
cmd = ['modprobe', module]
execute(cmd)
cmd = 'echo %s >> /etc/modules' % module
execute_shell(cmd)
def copy_files(src, dst, symlinks=False, ignore=None):
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
blk_device, fstype, system_services=[],
rbd_pool_replicas=2):
"""
To be called from the current cluster leader.
Ensures given pool and RBD image exists, is mapped to a block device,
and the device is formatted and mounted at the given mount_point.
If formatting a device for the first time, data existing at mount_point
will be migrated to the RBD device before being remounted.
All services listed in system_services will be stopped prior to data
migration and restarted when complete.
"""
# Ensure pool, RBD image, RBD mappings are in place.
if not pool_exists(service, pool):
utils.juju_log('INFO', 'ceph: Creating new pool %s.' % pool)
create_pool(service, pool, replicas=rbd_pool_replicas)
if not rbd_exists(service, pool, rbd_img):
utils.juju_log('INFO', 'ceph: Creating RBD image (%s).' % rbd_img)
create_rbd_image(service, pool, rbd_img, sizemb)
if not image_mapped(rbd_img):
utils.juju_log('INFO', 'ceph: Mapping RBD Image as a Block Device.')
map_block_storage(service, pool, rbd_img)
# make file system
# TODO: What happens if for whatever reason this is run again and
# the data is already in the rbd device and/or is mounted??
# When it is mounted already, it will fail to make the fs
# XXX: This is really sketchy! Need to at least add an fstab entry
# otherwise this hook will blow away existing data if its executed
# after a reboot.
if not filesystem_mounted(mount_point):
make_filesystem(blk_device, fstype)
for svc in system_services:
if utils.running(svc):
utils.juju_log('INFO',
'Stopping services %s prior to migrating '
'data' % svc)
utils.stop(svc)
place_data_on_ceph(service, blk_device, mount_point, fstype)
for svc in system_services:
utils.start(svc)

View File

@ -1,128 +0,0 @@
#
# Copyright 2012 Canonical Ltd.
#
# This file is sourced from lp:openstack-charm-helpers
#
# Authors:
# James Page <james.page@ubuntu.com>
# Adam Gandelman <adamg@ubuntu.com>
#
from lib.utils import (
juju_log,
relation_ids,
relation_list,
relation_get,
get_unit_hostname,
config_get)
import subprocess
import os
def is_clustered():
for r_id in (relation_ids('ha') or []):
for unit in (relation_list(r_id) or []):
clustered = relation_get('clustered',
rid=r_id,
unit=unit)
if clustered:
return True
return False
def is_leader(resource):
cmd = [
"crm", "resource",
"show", resource]
try:
status = subprocess.check_output(cmd)
except subprocess.CalledProcessError:
return False
else:
if get_unit_hostname() in status:
return True
else:
return False
def peer_units():
peers = []
for r_id in (relation_ids('cluster') or []):
for unit in (relation_list(r_id) or []):
peers.append(unit)
return peers
def oldest_peer(peers):
local_unit_no = os.getenv('JUJU_UNIT_NAME').split('/')[1]
for peer in peers:
remote_unit_no = peer.split('/')[1]
if remote_unit_no < local_unit_no:
return False
return True
def eligible_leader(resource):
if is_clustered():
if not is_leader(resource):
juju_log('INFO', 'Deferring action to CRM leader.')
return False
else:
peers = peer_units()
if peers and not oldest_peer(peers):
juju_log('INFO', 'Deferring action to oldest service unit.')
return False
return True
def https():
'''
Determines whether enough data has been provided in configuration
or relation data to configure HTTPS
.
returns: boolean
'''
if config_get('use-https') == "yes":
return True
if config_get('ssl_cert') and config_get('ssl_key'):
return True
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
if (relation_get('https_keystone', rid=r_id, unit=unit) and
relation_get('ssl_cert', rid=r_id, unit=unit) and
relation_get('ssl_key', rid=r_id, unit=unit) and
relation_get('ca_cert', rid=r_id, unit=unit)):
return True
return False
def determine_api_port(public_port):
'''
Determine correct API server listening port based on
existence of HTTPS reverse proxy and/or haproxy.
public_port: int: standard public port for given service
returns: int: the correct listening port for the API service
'''
i = 0
if len(peer_units()) > 0 or is_clustered():
i += 1
if https():
i += 1
return public_port - (i * 10)
def determine_haproxy_port(public_port):
'''
Description: Determine correct proxy listening port based on public IP +
existence of HTTPS reverse proxy.
public_port: int: standard public port for given service
returns: int: the correct listening port for the HAProxy service
'''
i = 0
if https():
i += 1
return public_port - (i * 10)

View File

@ -1,230 +0,0 @@
#!/usr/bin/python
# Common python helper functions used for OpenStack charms.
import apt_pkg as apt
import subprocess
import os
CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
ubuntu_openstack_release = {
'oneiric': 'diablo',
'precise': 'essex',
'quantal': 'folsom',
'raring': 'grizzly',
}
openstack_codenames = {
'2011.2': 'diablo',
'2012.1': 'essex',
'2012.2': 'folsom',
'2013.1': 'grizzly',
'2013.2': 'havana',
}
# The ugly duckling
swift_codenames = {
'1.4.3': 'diablo',
'1.4.8': 'essex',
'1.7.4': 'folsom',
'1.7.6': 'grizzly',
'1.7.7': 'grizzly',
'1.8.0': 'grizzly',
}
def juju_log(msg):
subprocess.check_call(['juju-log', msg])
def error_out(msg):
juju_log("FATAL ERROR: %s" % msg)
exit(1)
def lsb_release():
'''Return /etc/lsb-release in a dict'''
lsb = open('/etc/lsb-release', 'r')
d = {}
for l in lsb:
k, v = l.split('=')
d[k.strip()] = v.strip()
return d
def get_os_codename_install_source(src):
'''Derive OpenStack release codename from a given installation source.'''
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
rel = ''
if src == 'distro':
try:
rel = ubuntu_openstack_release[ubuntu_rel]
except KeyError:
e = 'Code not derive openstack release for '\
'this Ubuntu release: %s' % rel
error_out(e)
return rel
if src.startswith('cloud:'):
ca_rel = src.split(':')[1]
ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0]
return ca_rel
# Best guess match based on deb string provided
if src.startswith('deb') or src.startswith('ppa'):
for k, v in openstack_codenames.iteritems():
if v in src:
return v
def get_os_codename_version(vers):
'''Determine OpenStack codename from version number.'''
try:
return openstack_codenames[vers]
except KeyError:
e = 'Could not determine OpenStack codename for version %s' % vers
error_out(e)
def get_os_version_codename(codename):
'''Determine OpenStack version number from codename.'''
for k, v in openstack_codenames.iteritems():
if v == codename:
return k
e = 'Code not derive OpenStack version for '\
'codename: %s' % codename
error_out(e)
def get_os_codename_package(pkg):
'''Derive OpenStack release codename from an installed package.'''
apt.init()
cache = apt.Cache()
try:
pkg = cache[pkg]
except:
e = 'Could not determine version of installed package: %s' % pkg
error_out(e)
vers = apt.UpstreamVersion(pkg.current_ver.ver_str)
try:
if 'swift' in pkg.name:
vers = vers[:5]
return swift_codenames[vers]
else:
vers = vers[:6]
return openstack_codenames[vers]
except KeyError:
e = 'Could not determine OpenStack codename for version %s' % vers
error_out(e)
def get_os_version_package(pkg):
'''Derive OpenStack version number from an installed package.'''
codename = get_os_codename_package(pkg)
if 'swift' in pkg:
vers_map = swift_codenames
else:
vers_map = openstack_codenames
for version, cname in vers_map.iteritems():
if cname == codename:
return version
e = "Could not determine OpenStack version for package: %s" % pkg
error_out(e)
def configure_installation_source(rel):
'''Configure apt installation source.'''
def _import_key(keyid):
cmd = "apt-key adv --keyserver keyserver.ubuntu.com " \
"--recv-keys %s" % keyid
try:
subprocess.check_call(cmd.split(' '))
except subprocess.CalledProcessError:
error_out("Error importing repo key %s" % keyid)
if rel == 'distro':
return
elif rel[:4] == "ppa:":
src = rel
subprocess.check_call(["add-apt-repository", "-y", src])
elif rel[:3] == "deb":
l = len(rel.split('|'))
if l == 2:
src, key = rel.split('|')
juju_log("Importing PPA key from keyserver for %s" % src)
_import_key(key)
elif l == 1:
src = rel
else:
error_out("Invalid openstack-release: %s" % rel)
with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
f.write(src)
elif rel[:6] == 'cloud:':
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
rel = rel.split(':')[1]
u_rel = rel.split('-')[0]
ca_rel = rel.split('-')[1]
if u_rel != ubuntu_rel:
e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\
'version (%s)' % (ca_rel, ubuntu_rel)
error_out(e)
if 'staging' in ca_rel:
# staging is just a regular PPA.
os_rel = ca_rel.split('/')[0]
ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel
cmd = 'add-apt-repository -y %s' % ppa
subprocess.check_call(cmd.split(' '))
return
# map charm config options to actual archive pockets.
pockets = {
'folsom': 'precise-updates/folsom',
'folsom/updates': 'precise-updates/folsom',
'folsom/proposed': 'precise-proposed/folsom',
'grizzly': 'precise-updates/grizzly',
'grizzly/updates': 'precise-updates/grizzly',
'grizzly/proposed': 'precise-proposed/grizzly'
}
try:
pocket = pockets[ca_rel]
except KeyError:
e = 'Invalid Cloud Archive release specified: %s' % rel
error_out(e)
src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket)
_import_key(CLOUD_ARCHIVE_KEY_ID)
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f:
f.write(src)
else:
error_out("Invalid openstack-release specified: %s" % rel)
def save_script_rc(script_path="scripts/scriptrc", **env_vars):
"""
Write an rc file in the charm-delivered directory containing
exported environment variables provided by env_vars. Any charm scripts run
outside the juju hook environment can source this scriptrc to obtain
updated config information necessary to perform health checks or
service changes.
"""
charm_dir = os.getenv('CHARM_DIR')
juju_rc_path = "%s/%s" % (charm_dir, script_path)
with open(juju_rc_path, 'wb') as rc_script:
rc_script.write(
"#!/bin/bash\n")
[rc_script.write('export %s=%s\n' % (u, p))
for u, p in env_vars.iteritems() if u != "script_path"]

View File

@ -1,224 +0,0 @@
#!/usr/bin/python
#
# Easy file synchronization among peer units using ssh + unison.
#
# From *both* peer relation -joined and -changed, add a call to
# ssh_authorized_peers() describing the peer relation and the desired
# user + group. After all peer relations have settled, all hosts should
# be able to connect to on another via key auth'd ssh as the specified user.
#
# Other hooks are then free to synchronize files and directories using
# sync_to_peers().
#
# For a peer relation named 'cluster', for example:
#
# cluster-relation-joined:
# ...
# ssh_authorized_peers(peer_interface='cluster',
# user='juju_ssh', group='juju_ssh',
# ensure_user=True)
# ...
#
# cluster-relation-changed:
# ...
# ssh_authorized_peers(peer_interface='cluster',
# user='juju_ssh', group='juju_ssh',
# ensure_user=True)
# ...
#
# Hooks are now free to sync files as easily as:
#
# files = ['/etc/fstab', '/etc/apt.conf.d/']
# sync_to_peers(peer_interface='cluster',
# user='juju_ssh, paths=[files])
#
# It is assumed the charm itself has setup permissions on each unit
# such that 'juju_ssh' has read + write permissions. Also assumed
# that the calling charm takes care of leader delegation.
#
# TODO: Currently depends on the utils.py shipped with the keystone charm.
# Either copy required functionality to this library or depend on
# something more generic.
import os
import sys
import lib.utils as utils
import subprocess
import grp
import pwd
def get_homedir(user):
try:
user = pwd.getpwnam(user)
return user.pw_dir
except KeyError:
utils.juju_log('INFO',
'Could not get homedir for user %s: user exists?')
sys.exit(1)
def get_keypair(user):
home_dir = get_homedir(user)
ssh_dir = os.path.join(home_dir, '.ssh')
if not os.path.isdir(ssh_dir):
os.mkdir(ssh_dir)
priv_key = os.path.join(ssh_dir, 'id_rsa')
if not os.path.isfile(priv_key):
utils.juju_log('INFO', 'Generating new ssh key for user %s.' % user)
cmd = ['ssh-keygen', '-q', '-N', '', '-t', 'rsa', '-b', '2048',
'-f', priv_key]
subprocess.check_call(cmd)
pub_key = '%s.pub' % priv_key
if not os.path.isfile(pub_key):
utils.juju_log('INFO', 'Generatring missing ssh public key @ %s.' %
pub_key)
cmd = ['ssh-keygen', '-y', '-f', priv_key]
p = subprocess.check_output(cmd).strip()
with open(pub_key, 'wb') as out:
out.write(p)
subprocess.check_call(['chown', '-R', user, ssh_dir])
return (open(priv_key, 'r').read().strip(),
open(pub_key, 'r').read().strip())
def write_authorized_keys(user, keys):
home_dir = get_homedir(user)
ssh_dir = os.path.join(home_dir, '.ssh')
auth_keys = os.path.join(ssh_dir, 'authorized_keys')
utils.juju_log('INFO', 'Syncing authorized_keys @ %s.' % auth_keys)
with open(auth_keys, 'wb') as out:
for k in keys:
out.write('%s\n' % k)
def write_known_hosts(user, hosts):
home_dir = get_homedir(user)
ssh_dir = os.path.join(home_dir, '.ssh')
known_hosts = os.path.join(ssh_dir, 'known_hosts')
khosts = []
for host in hosts:
cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host]
remote_key = subprocess.check_output(cmd).strip()
khosts.append(remote_key)
utils.juju_log('INFO', 'Syncing known_hosts @ %s.' % known_hosts)
with open(known_hosts, 'wb') as out:
for host in khosts:
out.write('%s\n' % host)
def ensure_user(user, group=None):
# need to ensure a bash shell'd user exists.
try:
pwd.getpwnam(user)
except KeyError:
utils.juju_log('INFO', 'Creating new user %s.%s.' % (user, group))
cmd = ['adduser', '--system', '--shell', '/bin/bash', user]
if group:
try:
grp.getgrnam(group)
except KeyError:
subprocess.check_call(['addgroup', group])
cmd += ['--ingroup', group]
subprocess.check_call(cmd)
def ssh_authorized_peers(peer_interface, user, group=None,
ensure_local_user=False):
"""
Main setup function, should be called from both peer -changed and -joined
hooks with the same parameters.
"""
if ensure_local_user:
ensure_user(user, group)
priv_key, pub_key = get_keypair(user)
hook = os.path.basename(sys.argv[0])
if hook == '%s-relation-joined' % peer_interface:
utils.relation_set(ssh_pub_key=pub_key)
print 'joined'
elif hook == '%s-relation-changed' % peer_interface:
hosts = []
keys = []
for r_id in utils.relation_ids(peer_interface):
for unit in utils.relation_list(r_id):
settings = utils.relation_get_dict(relation_id=r_id,
remote_unit=unit)
if 'ssh_pub_key' in settings:
keys.append(settings['ssh_pub_key'])
hosts.append(settings['private-address'])
else:
utils.juju_log('INFO',
'ssh_authorized_peers(): ssh_pub_key '
'missing for unit %s, skipping.' % unit)
write_authorized_keys(user, keys)
write_known_hosts(user, hosts)
authed_hosts = ':'.join(hosts)
utils.relation_set(ssh_authorized_hosts=authed_hosts)
def _run_as_user(user):
try:
user = pwd.getpwnam(user)
except KeyError:
utils.juju_log('INFO', 'Invalid user: %s' % user)
sys.exit(1)
uid, gid = user.pw_uid, user.pw_gid
os.environ['HOME'] = user.pw_dir
def _inner():
os.setgid(gid)
os.setuid(uid)
return _inner
def run_as_user(user, cmd):
return subprocess.check_output(cmd, preexec_fn=_run_as_user(user), cwd='/')
def sync_to_peers(peer_interface, user, paths=[], verbose=False):
base_cmd = ['unison', '-auto', '-batch=true', '-confirmbigdel=false',
'-fastcheck=true', '-group=false', '-owner=false',
'-prefer=newer', '-times=true']
if not verbose:
base_cmd.append('-silent')
hosts = []
for r_id in (utils.relation_ids(peer_interface) or []):
for unit in utils.relation_list(r_id):
settings = utils.relation_get_dict(relation_id=r_id,
remote_unit=unit)
try:
authed_hosts = settings['ssh_authorized_hosts'].split(':')
except KeyError:
print 'unison sync_to_peers: peer has not authorized *any* '\
'hosts yet.'
return
unit_hostname = utils.unit_get('private-address')
add_host = None
for authed_host in authed_hosts:
if unit_hostname == authed_host:
add_host = settings['private-address']
if add_host:
hosts.append(settings['private-address'])
else:
print ('unison sync_to_peers: peer (%s) has not authorized '
'*this* host yet, skipping.' %
settings['private-address'])
for path in paths:
# removing trailing slash from directory paths, unison
# doesn't like these.
if path.endswith('/'):
path = path[:(len(path) - 1)]
for host in hosts:
try:
cmd = base_cmd + [path, 'ssh://%s@%s/%s' % (user, host, path)]
utils.juju_log('INFO', 'Syncing local path %s to %s@%s:%s' %
(path, user, host, path))
run_as_user(user, cmd)
except:
# it may fail for permissions on some files
pass

View File

@ -9,50 +9,26 @@
# Adam Gandelman <adamg@ubuntu.com>
#
import json
import grp
import os
import pwd
import subprocess
import socket
import sys
def do_hooks(hooks):
hook = os.path.basename(sys.argv[0])
try:
hook_func = hooks[hook]
except KeyError:
juju_log('INFO',
"This charm doesn't know how to handle '{}'.".format(hook))
else:
hook_func()
def install(*pkgs):
cmd = [
'apt-get',
'-y',
'install']
for pkg in pkgs:
cmd.append(pkg)
subprocess.check_call(cmd)
from charmhelpers.fetch import (
apt_install
)
from charmhelpers.core.hookenv import (
local_unit,
remote_unit,
log
)
TEMPLATES_DIR = 'templates'
try:
import jinja2
except ImportError:
install('python-jinja2')
apt_install('python-jinja2', fatal=True)
import jinja2
try:
import dns.resolver
except ImportError:
install('python-dnspython')
import dns.resolver
def render_template(template_name, context, template_dir=TEMPLATES_DIR):
templates = \
@ -60,326 +36,45 @@ def render_template(template_name, context, template_dir=TEMPLATES_DIR):
template = templates.get_template(template_name)
return template.render(context)
CLOUD_ARCHIVE = """
# Ubuntu Cloud Archive
deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
"""
CLOUD_ARCHIVE_POCKETS = {
'folsom': 'precise-updates/folsom',
'folsom/updates': 'precise-updates/folsom',
'folsom/proposed': 'precise-proposed/folsom',
'grizzly': 'precise-updates/grizzly',
'grizzly/updates': 'precise-updates/grizzly',
'grizzly/proposed': 'precise-proposed/grizzly',
'havana': 'precise-updates/havana',
'havana/updates': 'precise-updates/havana',
'havana/proposed': 'precise-proposed/havana',
'icehouse': 'precise-updates/icehouse',
'icehouse/updates': 'precise-updates/icehouse',
'icehouse/proposed': 'precise-proposed/icehouse',
}
def configure_source():
source = str(config_get('openstack-origin'))
if not source:
return
if source.startswith('ppa:'):
cmd = [
'add-apt-repository',
source]
subprocess.check_call(cmd)
if source.startswith('cloud:'):
# CA values should be formatted as cloud:ubuntu-openstack/pocket, eg:
# cloud:precise-folsom/updates or cloud:precise-folsom/proposed
install('ubuntu-cloud-keyring')
pocket = source.split(':')[1]
pocket = pocket.split('-')[1]
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
apt.write(CLOUD_ARCHIVE.format(CLOUD_ARCHIVE_POCKETS[pocket]))
if source.startswith('deb'):
l = len(source.split('|'))
if l == 2:
(apt_line, key) = source.split('|')
cmd = [
'apt-key',
'adv', '--keyserver keyserver.ubuntu.com',
'--recv-keys', key]
subprocess.check_call(cmd)
elif l == 1:
apt_line = source
with open('/etc/apt/sources.list.d/quantum.list', 'w') as apt:
apt.write(apt_line + "\n")
cmd = [
'apt-get',
'update']
subprocess.check_call(cmd)
# Protocols
TCP = 'TCP'
UDP = 'UDP'
def expose(port, protocol='TCP'):
cmd = [
'open-port',
'{}/{}'.format(port, protocol)]
subprocess.check_call(cmd)
def open_port(port, protocol='TCP'):
expose(port, protocol)
def close_port(port, protocol='TCP'):
cmd = [
'close-port',
'{}/{}'.format(port, protocol)]
subprocess.check_call(cmd)
def juju_log(severity, message):
cmd = [
'juju-log',
'--log-level', severity,
message]
subprocess.check_call(cmd)
cache = {}
def cached(func):
def wrapper(*args, **kwargs):
global cache
key = str((func, args, kwargs))
try:
return cache[key]
except KeyError:
res = func(*args, **kwargs)
cache[key] = res
return res
return wrapper
@cached
def relation_ids(relation):
cmd = [
'relation-ids',
relation]
result = str(subprocess.check_output(cmd)).split()
if result == "":
return None
else:
return result
@cached
def relation_list(rid):
cmd = [
'relation-list',
'-r', rid]
result = str(subprocess.check_output(cmd)).split()
if result == "":
return None
else:
return result
@cached
def relation_get(attribute, unit=None, rid=None):
cmd = [
'relation-get']
if rid:
cmd.append('-r')
cmd.append(rid)
cmd.append(attribute)
if unit:
cmd.append(unit)
value = subprocess.check_output(cmd).strip() # IGNORE:E1103
if value == "":
return None
else:
return value
@cached
def relation_get_dict(relation_id=None, remote_unit=None):
"""Obtain all relation data as dict by way of JSON"""
cmd = [
'relation-get', '--format=json']
if relation_id:
cmd.append('-r')
cmd.append(relation_id)
if remote_unit:
cmd.append('-')
cmd.append(remote_unit)
j = subprocess.check_output(cmd)
d = json.loads(j)
settings = {}
# convert unicode to strings
for k, v in d.iteritems():
settings[str(k)] = str(v)
return settings
def relation_set(**kwargs):
cmd = [
'relation-set']
args = []
for k, v in kwargs.items():
if k == 'rid':
if v:
cmd.append('-r')
cmd.append(v)
else:
args.append('{}={}'.format(k, v))
cmd += args
subprocess.check_call(cmd)
@cached
def unit_get(attribute):
cmd = [
'unit-get',
attribute]
value = subprocess.check_output(cmd).strip() # IGNORE:E1103
if value == "":
return None
else:
return value
@cached
def config_get(scope=None):
"""Juju charm configuration"""
config_cmd_line = ['config-get']
if scope is not None:
config_cmd_line.append(scope)
config_cmd_line.append('--format=json')
try:
return json.loads(subprocess.check_output(config_cmd_line))
except ValueError:
return None
@cached
def get_unit_hostname():
return socket.gethostname()
@cached
def get_host_ip(hostname=unit_get('private-address')):
try:
# Test to see if already an IPv4 address
socket.inet_aton(hostname)
return hostname
except socket.error:
answers = dns.resolver.query(hostname, 'A')
if answers:
return answers[0].address
return None
def _svc_control(service, action):
subprocess.check_call(['service', service, action])
def restart(*services):
for service in services:
_svc_control(service, 'restart')
def stop(*services):
for service in services:
_svc_control(service, 'stop')
def start(*services):
for service in services:
_svc_control(service, 'start')
def reload(*services):
for service in services:
try:
_svc_control(service, 'reload')
except subprocess.CalledProcessError:
# Reload failed - either service does not support reload
# or it was not running - restart will fixup most things
_svc_control(service, 'restart')
def running(service):
try:
output = subprocess.check_output(['service', service, 'status'])
except subprocess.CalledProcessError:
return False
else:
if ("start/running" in output or
"is running" in output):
return True
else:
return False
def is_relation_made(relation, key='private-address'):
for r_id in (relation_ids(relation) or []):
for unit in (relation_list(r_id) or []):
if relation_get(key, rid=r_id, unit=unit):
return True
return False
def get_homedir(user):
try:
user = pwd.getpwnam(user)
return user.pw_dir
except KeyError:
log('Could not get homedir for user %s: user exists?', ERROR)
raise Exception
def is_newer():
l_unit_no = os.getenv('JUJU_UNIT_NAME').split('/')[1]
r_unit_no = os.getenv('JUJU_REMOTE_UNIT').split('/')[1]
l_unit_no = local_unit().split('/')[1]
r_unit_no = remote_unit().split('/')[1]
return (l_unit_no > r_unit_no)
def chown(path, owner='root', group='root', recursive=False):
"""Changes owner of given path, recursively if needed"""
if os.path.exists(path):
juju_log('INFO', 'Changing ownership of path %s to %s:%s' %
(path, owner, group))
log('Changing ownership of path %s to %s:%s' %
(path, owner, group))
uid = pwd.getpwnam(owner).pw_uid
gid = grp.getgrnam(group).gr_gid
if recursive:
for root, dirs, files in os.walk(path):
for dir in dirs:
os.chown(os.path.join(root, dir), uid, gid)
for file in files:
os.chown(os.path.join(root, file), uid, gid)
for root, dirs, files in os.walk(path):
for d in dirs:
os.chown(os.path.join(root, d), uid, gid)
for f in files:
os.chown(os.path.join(root, f), uid, gid)
else:
os.chown(path, uid, gid)
else:
juju_log('ERROR', '%s path does not exist' % path)
log('%s path does not exist' % path)
def chmod(path, perms, recursive=False):
"""Changes perms of given path, recursively if needed"""
if os.path.exists(path):
juju_log('INFO', 'Changing perms of path %s ' % path)
log('Changing perms of path %s ' % path)
if recursive:
for root, dirs, files in os.walk(path):
for dir in dirs:
os.chmod(os.path.join(root, dir), perms)
for file in files:
os.chmod(os.path.join(root, file), perms)
for root, dirs, files in os.walk(path):
for d in dirs:
os.chmod(os.path.join(root, d), perms)
for f in files:
os.chmod(os.path.join(root, f), perms)
else:
os.chmod(path, perms)
else:
juju_log('ERROR', '%s path does not exist' % path)
log('ERROR', '%s path does not exist' % path)

View File

@ -1,22 +1,28 @@
import os
import pwd
import grp
import re
import sys
import subprocess
import glob
import lib.utils as utils
from lib.utils import render_template
import apt_pkg as apt
import _pythonpath
_ = _pythonpath
from charmhelpers.contrib.openstack.utils import (
get_hostname,
error_out
)
from charmhelpers.core.hookenv import config, relation_ids, relation_get, relation_set, local_unit
from charmhelpers.core.hookenv import (
config,
relation_ids,
relation_get,
relation_set,
related_units,
local_unit,
log, ERROR
)
PACKAGES = ['pwgen', 'rabbitmq-server', 'python-amqplib']
PACKAGES = ['rabbitmq-server', 'python-amqplib']
RABBITMQ_CTL = '/usr/sbin/rabbitmqctl'
COOKIE_PATH = '/var/lib/rabbitmq/.erlang.cookie'
@ -32,7 +38,7 @@ def vhost_exists(vhost):
out = subprocess.check_output(cmd)
for line in out.split('\n')[1:]:
if line == vhost:
utils.juju_log('INFO', 'vhost (%s) already exists.' % vhost)
log('vhost (%s) already exists.' % vhost)
return True
return False
except:
@ -45,7 +51,7 @@ def create_vhost(vhost):
return
cmd = [RABBITMQ_CTL, 'add_vhost', vhost]
subprocess.check_call(cmd)
utils.juju_log('INFO', 'Created new vhost (%s).' % vhost)
log('Created new vhost (%s).' % vhost)
def user_exists(user):
@ -65,17 +71,17 @@ def create_user(user, password, admin=False):
if not exists:
cmd = [RABBITMQ_CTL, 'add_user', user, password]
subprocess.check_call(cmd)
utils.juju_log('INFO', 'Created new user (%s).' % user)
log('Created new user (%s).' % user)
if admin == is_admin:
return
if admin:
cmd = [RABBITMQ_CTL, 'set_user_tags', user, 'administrator']
utils.juju_log('INFO', 'Granting user (%s) admin access.')
log('Granting user (%s) admin access.')
else:
cmd = [RABBITMQ_CTL, 'set_user_tags', user]
utils.juju_log('INFO', 'Revoking user (%s) admin access.')
log('Revoking user (%s) admin access.')
def grant_permissions(user, vhost):
@ -102,13 +108,13 @@ def compare_version(base_version):
def cluster_with():
utils.juju_log('INFO', 'Clustering with new node')
log('Clustering with new node')
if compare_version('3.0.1') >= 0:
cluster_cmd = 'join_cluster'
else:
cluster_cmd = 'cluster'
out = subprocess.check_output([RABBITMQ_CTL, 'cluster_status'])
utils.juju_log('INFO', 'cluster status is %s' % str(out))
log('cluster status is %s' % str(out))
# check if node is already clustered
total_nodes = 1
@ -120,32 +126,29 @@ def cluster_with():
total_nodes = len(running_nodes)
if total_nodes > 1:
utils.juju_log('INFO', 'Node is already clustered, skipping')
log('Node is already clustered, skipping')
return False
# check all peers and try to cluster with them
available_nodes = []
for r_id in (utils.relation_ids('cluster') or []):
for unit in (utils.relation_list(r_id) or []):
address = utils.relation_get('private-address',
rid=r_id, unit=unit)
for r_id in relation_ids('cluster'):
for unit in related_units(r_id):
address = relation_get('private-address',
rid=r_id, unit=unit)
if address is not None:
node = get_hostname(address, fqdn=False)
available_nodes.append(node)
if len(available_nodes) == 0:
utils.juju_log('INFO', 'No nodes available to cluster with')
log('No nodes available to cluster with')
return False
# iterate over all the nodes, join to the first available
num_tries = 0
for node in available_nodes:
utils.juju_log('INFO',
'Clustering with remote'
' rabbit host (%s).' % node)
log('Clustering with remote rabbit host (%s).' % node)
if node in running_nodes:
utils.juju_log('INFO',
'Host already clustered with %s.' % node)
log('Host already clustered with %s.' % node)
return False
try:
@ -155,19 +158,18 @@ def cluster_with():
subprocess.check_call(cmd)
cmd = [RABBITMQ_CTL, 'start_app']
subprocess.check_call(cmd)
utils.juju_log('INFO', 'Host clustered with %s.' % node)
log('Host clustered with %s.' % node)
if compare_version('3.0.1') >= 0:
cmd = [RABBITMQ_CTL, 'set_policy', 'HA',
'^(?!amq\.).*', '{"ha-mode": "all"}']
subprocess.check_call(cmd)
return True
except:
utils.juju_log('INFO', 'Failed to cluster with %s.' % node)
log('Failed to cluster with %s.' % node)
# continue to the next node
num_tries += 1
if num_tries > config('max-cluster-tries'):
utils.juju_log('ERROR',
'Max tries number exhausted, exiting')
log('Max tries number exhausted, exiting', level=ERROR)
raise
return False
@ -181,11 +183,11 @@ def break_cluster():
subprocess.check_call(cmd)
cmd = [RABBITMQ_CTL, 'start_app']
subprocess.check_call(cmd)
utils.juju_log('INFO', 'Cluster successfully broken.')
log('Cluster successfully broken.')
except:
# error, no nodes available for clustering
utils.juju_log('ERROR', 'Error breaking rabbit cluster')
sys.exit(1)
log('Error breaking rabbit cluster', level=ERROR)
raise
def set_node_name(name):
@ -193,7 +195,7 @@ def set_node_name(name):
# rabbitmq.conf.d is not present on all releases, so use or create
# rabbitmq-env.conf instead.
if not os.path.isfile(ENV_CONF):
utils.juju_log('INFO', '%s does not exist, creating.' % ENV_CONF)
log('%s does not exist, creating.' % ENV_CONF)
with open(ENV_CONF, 'wb') as out:
out.write('RABBITMQ_NODENAME=%s\n' % name)
return
@ -207,8 +209,8 @@ def set_node_name(name):
out.append(line)
if not f:
out.append('RABBITMQ_NODENAME=%s\n' % name)
utils.juju_log('INFO', 'Updating %s, RABBITMQ_NODENAME=%s' %
(ENV_CONF, name))
log('Updating %s, RABBITMQ_NODENAME=%s' %
(ENV_CONF, name))
with open(ENV_CONF, 'wb') as conf:
conf.write(''.join(out))
@ -257,7 +259,7 @@ def enable_ssl(ssl_key, ssl_cert, ssl_port,
continue
with open(path, 'w') as fh:
fh.write(contents)
os.chmod(path, 0640)
os.chmod(path, 0o640)
os.chown(path, uid, gid)
data = {
@ -272,7 +274,7 @@ def enable_ssl(ssl_key, ssl_cert, ssl_port,
data["ssl_ca_file"] = ssl_ca_file
with open(RABBITMQ_CONF, 'w') as rmq_conf:
rmq_conf.write(utils.render_template(
rmq_conf.write(render_template(
os.path.basename(RABBITMQ_CONF), data))
@ -307,7 +309,7 @@ def execute(cmd, die=False, echo=False):
rc = p.returncode
if die and rc != 0:
utils.juju_log('INFO', "ERROR: command %s return non-zero.\n" % cmd)
log("command %s return non-zero." % cmd, level=ERROR)
return (stdout, stderr, rc)
@ -315,13 +317,19 @@ def get_clustered_attribute(attribute_name):
cluster_rels = relation_ids('cluster')
if len(cluster_rels) > 0:
cluster_rid = cluster_rels[0]
password = relation_get(attribute=attribute_name, rid=cluster_rid, unit=local_unit())
password = relation_get(
attribute=attribute_name,
rid=cluster_rid,
unit=local_unit())
return password
else:
return None
def set_clustered_attribute(attribute_name, value):
cluster_rels = relation_ids('cluster')
if len(cluster_rels) > 0:
cluster_rid = cluster_rels[0]
relation_set(relation_id=cluster_rid, relation_settings={attribute_name: value})
relation_set(
relation_id=cluster_rid,
relation_settings={attribute_name: value})

View File

@ -6,24 +6,45 @@ import sys
import subprocess
import glob
import rabbit_utils as rabbit
import lib.utils as utils
import lib.cluster_utils as cluster
import lib.ceph_utils as ceph
import lib.openstack_common as openstack
from lib.utils import (
chown, chmod,
is_newer,
)
from charmhelpers.contrib.hahelpers.cluster import (
is_clustered,
eligible_leader
)
import _pythonpath
_ = _pythonpath
import charmhelpers.contrib.storage.linux.ceph as ceph
from charmhelpers.contrib.openstack.utils import save_script_rc
from charmhelpers.fetch import (
add_source,
apt_update)
from charmhelpers.core import hookenv
from charmhelpers.core.host import rsync, mkdir, pwgen
apt_update,
apt_install)
from charmhelpers.core.hookenv import (
open_port, close_port,
log, ERROR,
relation_get,
relation_set,
relation_ids,
related_units,
local_unit,
config,
unit_get,
is_relation_made,
Hooks, UnregisteredHookError
)
from charmhelpers.core.host import (
rsync, pwgen,
service_stop, service_restart
)
from charmhelpers.contrib.charmsupport.nrpe import NRPE
from charmhelpers.contrib.ssl.service import ServiceCA
hooks = Hooks()
SERVICE_NAME = os.getenv('JUJU_UNIT_NAME').split('/')[0]
POOL_NAME = SERVICE_NAME
@ -31,14 +52,15 @@ RABBIT_DIR = '/var/lib/rabbitmq'
NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
@hooks.hook('install')
def install():
pre_install_hooks()
add_source(utils.config_get('source'), utils.config_get('key'))
add_source(config('source'), config('key'))
apt_update(fatal=True)
utils.install(*rabbit.PACKAGES)
utils.expose(5672)
utils.chown(RABBIT_DIR, rabbit.RABBIT_USER, rabbit.RABBIT_USER)
utils.chmod(RABBIT_DIR, 0775)
apt_install(rabbit.PACKAGES, fatal=True)
open_port(5672)
chown(RABBIT_DIR, rabbit.RABBIT_USER, rabbit.RABBIT_USER)
chmod(RABBIT_DIR, 0o775)
def configure_amqp(username, vhost):
@ -56,21 +78,22 @@ def configure_amqp(username, vhost):
return password
@hooks.hook('amqp-relation-changed')
def amqp_changed(relation_id=None, remote_unit=None):
if not cluster.eligible_leader('res_rabbitmq_vip'):
utils.juju_log('INFO',
'amqp_changed(): Deferring amqp_changed'
' to eligible_leader.')
if not eligible_leader('res_rabbitmq_vip'):
log('amqp_changed(): Deferring amqp_changed'
' to eligible_leader.')
return
relation_settings = {}
settings = hookenv.relation_get(rid=relation_id, unit=remote_unit)
settings = relation_get(rid=relation_id, unit=remote_unit)
singleset = set(['username', 'vhost'])
if singleset.issubset(settings):
if None in [settings['username'], settings['vhost']]:
utils.juju_log('INFO', 'amqp_changed(): Relation not ready.')
log('amqp_changed(): Relation not ready.')
return
relation_settings['password'] = configure_amqp(
@ -88,133 +111,132 @@ def amqp_changed(relation_id=None, remote_unit=None):
if singleset.issubset(queues[amqp]):
relation_settings[
'_'.join([amqp, 'password'])] = configure_amqp(
queues[amqp]['username'],
queues[amqp]['vhost'])
queues[amqp]['username'],
queues[amqp]['vhost'])
relation_settings['hostname'] = utils.unit_get('private-address')
relation_settings['hostname'] = unit_get('private-address')
configure_client_ssl(relation_settings)
if cluster.is_clustered():
if is_clustered():
relation_settings['clustered'] = 'true'
if utils.is_relation_made('ha'):
if is_relation_made('ha'):
# active/passive settings
relation_settings['vip'] = utils.config_get('vip')
relation_settings['ha-vip-only'] = utils.config_get('ha-vip-only')
relation_settings['vip'] = config('vip')
relation_settings['ha-vip-only'] = config('ha-vip-only')
if relation_id:
relation_settings['rid'] = relation_id
# set if need HA queues or not
relation_settings['ha_queues'] = (rabbit.compare_version('3.0.1') >= 0)
utils.relation_set(**relation_settings)
relation_set(relation_settings=relation_settings)
@hooks.hook('cluster-relation-joined')
def cluster_joined():
if utils.is_relation_made('ha') and \
utils.config_get('ha-vip-only') is False:
utils.juju_log('INFO',
'hacluster relation is present, skipping native '
'rabbitmq cluster config.')
if is_relation_made('ha') and \
config('ha-vip-only') is False:
log('hacluster relation is present, skipping native '
'rabbitmq cluster config.')
return
if utils.is_newer():
utils.juju_log('INFO', 'cluster_joined: Relation greater.')
if is_newer():
log('cluster_joined: Relation greater.')
return
rabbit.COOKIE_PATH = '/var/lib/rabbitmq/.erlang.cookie'
if not os.path.isfile(rabbit.COOKIE_PATH):
utils.juju_log('ERROR', 'erlang cookie missing from %s' %
rabbit.COOKIE_PATH)
log('erlang cookie missing from %s' % rabbit.COOKIE_PATH,
level=ERROR)
return
cookie = open(rabbit.COOKIE_PATH, 'r').read().strip()
rabbit.set_clustered_attribute('cookie', cookie)
@hooks.hook('cluster-relation-changed')
def cluster_changed():
# sync passwords
rdata = hookenv.relation_get()
rdata = relation_get()
echo_data = {}
for attribute, value in rdata.iteritems():
if '.passwd' in attribute or attribute == 'cookie':
echo_data[attribute] = value
if len(echo_data) > 0:
hookenv.relation_set(relation_settings=echo_data)
relation_set(relation_settings=echo_data)
if 'cookie' not in echo_data:
utils.juju_log('INFO',
'cluster_joined: cookie not yet set.')
log('cluster_joined: cookie not yet set.')
return
# sync cookie
cookie = echo_data['cookie']
if open(rabbit.COOKIE_PATH, 'r').read().strip() == cookie:
utils.juju_log('INFO', 'Cookie already synchronized with peer.')
log('Cookie already synchronized with peer.')
else:
utils.juju_log('INFO', 'Synchronizing erlang cookie from peer.')
log('Synchronizing erlang cookie from peer.')
rabbit.service('stop')
with open(rabbit.COOKIE_PATH, 'wb') as out:
out.write(cookie)
rabbit.service('start')
if utils.is_relation_made('ha') and \
utils.config_get('ha-vip-only') is False:
utils.juju_log('INFO',
'hacluster relation is present, skipping native '
'rabbitmq cluster config.')
if is_relation_made('ha') and \
config('ha-vip-only') is False:
log('hacluster relation is present, skipping native '
'rabbitmq cluster config.')
return
# cluster with node
if utils.is_newer():
if is_newer():
if rabbit.cluster_with():
# resync nrpe user after clustering
update_nrpe_checks()
@hooks.hook('cluster-relation-departed')
def cluster_departed():
if utils.is_relation_made('ha') and \
utils.config_get('ha-vip-only') is False:
utils.juju_log('INFO',
'hacluster relation is present, skipping native '
'rabbitmq cluster config.')
if is_relation_made('ha') and \
config('ha-vip-only') is False:
log('hacluster relation is present, skipping native '
'rabbitmq cluster config.')
return
if not utils.is_newer():
utils.juju_log('INFO', 'cluster_joined: Relation lesser.')
if not is_newer():
log('cluster_joined: Relation lesser.')
return
rabbit.break_cluster()
@hooks.hook('ha-relation-joined')
def ha_joined():
corosync_bindiface = utils.config_get('ha-bindiface')
corosync_mcastport = utils.config_get('ha-mcastport')
vip = utils.config_get('vip')
vip_iface = utils.config_get('vip_iface')
vip_cidr = utils.config_get('vip_cidr')
rbd_name = utils.config_get('rbd-name')
vip_only = utils.config_get('ha-vip-only')
corosync_bindiface = config('ha-bindiface')
corosync_mcastport = config('ha-mcastport')
vip = config('vip')
vip_iface = config('vip_iface')
vip_cidr = config('vip_cidr')
rbd_name = config('rbd-name')
vip_only = config('ha-vip-only')
if None in [corosync_bindiface, corosync_mcastport, vip, vip_iface,
vip_cidr, rbd_name] and vip_only is False:
utils.juju_log('ERROR', 'Insufficient configuration data to '
'configure hacluster.')
log('Insufficient configuration data to configure hacluster.',
level=ERROR)
sys.exit(1)
elif None in [corosync_bindiface, corosync_mcastport, vip, vip_iface,
vip_cidr] and vip_only is True:
utils.juju_log('ERROR', 'Insufficient configuration data to '
'configure VIP-only hacluster.')
log('Insufficient configuration data to configure VIP-only hacluster.',
level=ERROR)
sys.exit(1)
if not utils.is_relation_made('ceph', 'auth') and vip_only is False:
utils.juju_log('INFO',
'ha_joined: No ceph relation yet, deferring.')
if not is_relation_made('ceph', 'auth') and vip_only is False:
log('ha_joined: No ceph relation yet, deferring.')
return
name = '%s@localhost' % SERVICE_NAME
if rabbit.get_node_name() != name and vip_only is False:
utils.juju_log('INFO', 'Stopping rabbitmq-server.')
utils.stop('rabbitmq-server')
log('Stopping rabbitmq-server.')
service_stop('rabbitmq-server')
rabbit.set_node_name('%s@localhost' % SERVICE_NAME)
else:
utils.juju_log('INFO', 'Node name already set to %s.' % name)
log('Node name already set to %s.' % name)
relation_settings = {}
relation_settings['corosync_bindiface'] = corosync_bindiface
@ -240,7 +262,8 @@ def ha_joined():
'res_rabbitmq_rbd': 'params name="%s" pool="%s" user="%s" '
'secret="%s"' %
(rbd_name, POOL_NAME,
SERVICE_NAME, ceph.keyfile_path(SERVICE_NAME)),
SERVICE_NAME, ceph._keyfile_path(
SERVICE_NAME)),
'res_rabbitmq_fs': 'params device="/dev/rbd/%s/%s" directory="%s" '
'fstype="ext4" op start start-delay="10s"' %
(POOL_NAME, rbd_name, RABBIT_DIR),
@ -251,58 +274,63 @@ def ha_joined():
}
relation_settings['groups'] = {
'grp_rabbitmq': 'res_rabbitmq_rbd res_rabbitmq_fs res_rabbitmq_vip '
'res_rabbitmq-server',
'grp_rabbitmq':
'res_rabbitmq_rbd res_rabbitmq_fs res_rabbitmq_vip '
'res_rabbitmq-server',
}
for rel_id in utils.relation_ids('ha'):
utils.relation_set(rid=rel_id, **relation_settings)
for rel_id in relation_ids('ha'):
relation_set(relation_id=rel_id, relation_settings=relation_settings)
env_vars = {
'OPENSTACK_PORT_EPMD': 4369,
'OPENSTACK_PORT_MCASTPORT': utils.config_get('ha-mcastport'),
'OPENSTACK_PORT_MCASTPORT': config('ha-mcastport'),
}
openstack.save_script_rc(**env_vars)
save_script_rc(**env_vars)
@hooks.hook('ha-relation-changed')
def ha_changed():
if not cluster.is_clustered():
if not is_clustered():
return
vip = utils.config_get('vip')
utils.juju_log('INFO', 'ha_changed(): We are now HA clustered. '
vip = config('vip')
log('ha_changed(): We are now HA clustered. '
'Advertising our VIP (%s) to all AMQP clients.' %
vip)
# need to re-authenticate all clients since node-name changed.
for rid in utils.relation_ids('amqp'):
for unit in utils.relation_list(rid):
for rid in relation_ids('amqp'):
for unit in related_units(rid):
amqp_changed(relation_id=rid, remote_unit=unit)
@hooks.hook('ceph-relation-joined')
def ceph_joined():
utils.juju_log('INFO', 'Start Ceph Relation Joined')
utils.configure_source()
log('Start Ceph Relation Joined')
#NOTE fixup
#utils.configure_source()
ceph.install()
utils.juju_log('INFO', 'Finish Ceph Relation Joined')
log('Finish Ceph Relation Joined')
@hooks.hook('ceph-relation-changed')
def ceph_changed():
utils.juju_log('INFO', 'Start Ceph Relation Changed')
auth = utils.relation_get('auth')
key = utils.relation_get('key')
use_syslog = str(utils.config_get('use-syslog')).lower()
log('Start Ceph Relation Changed')
auth = relation_get('auth')
key = relation_get('key')
use_syslog = str(config('use-syslog')).lower()
if None in [auth, key]:
utils.juju_log('INFO', 'Missing key or auth in relation')
log('Missing key or auth in relation')
sys.exit(0)
ceph.configure(service=SERVICE_NAME, key=key, auth=auth,
use_syslog=use_syslog)
if cluster.eligible_leader('res_rabbitmq_vip'):
rbd_img = utils.config_get('rbd-name')
rbd_size = utils.config_get('rbd-size')
if eligible_leader('res_rabbitmq_vip'):
rbd_img = config('rbd-name')
rbd_size = config('rbd-size')
sizemb = int(rbd_size.split('G')[0]) * 1024
blk_device = '/dev/rbd/%s/%s' % (POOL_NAME, rbd_img)
rbd_pool_rep_count = utils.config_get('ceph-osd-replication-count')
rbd_pool_rep_count = config('ceph-osd-replication-count')
ceph.ensure_ceph_storage(service=SERVICE_NAME, pool=POOL_NAME,
rbd_img=rbd_img, sizemb=sizemb,
fstype='ext4', mount_point=RABBIT_DIR,
@ -310,22 +338,22 @@ def ceph_changed():
system_services=['rabbitmq-server'],
rbd_pool_replicas=rbd_pool_rep_count)
else:
utils.juju_log('INFO',
'This is not the peer leader. Not configuring RBD.')
utils.juju_log('INFO', 'Stopping rabbitmq-server.')
utils.stop('rabbitmq-server')
log('This is not the peer leader. Not configuring RBD.')
log('Stopping rabbitmq-server.')
service_stop('rabbitmq-server')
# If 'ha' relation has been made before the 'ceph' relation
# it is important to make sure the ha-relation data is being
# sent.
if utils.is_relation_made('ha'):
utils.juju_log('INFO', '*ha* relation exists. Triggering ha_joined()')
if is_relation_made('ha'):
log('*ha* relation exists. Triggering ha_joined()')
ha_joined()
else:
utils.juju_log('INFO', '*ha* relation does not exist.')
utils.juju_log('INFO', 'Finish Ceph Relation Changed')
log('*ha* relation does not exist.')
log('Finish Ceph Relation Changed')
@hooks.hook('nrpe-external-master-relation-changed')
def update_nrpe_checks():
if os.path.isdir(NAGIOS_PLUGINS):
rsync(os.path.join(os.getenv('CHARM_DIR'), 'scripts',
@ -333,12 +361,12 @@ def update_nrpe_checks():
os.path.join(NAGIOS_PLUGINS, 'check_rabbitmq.py'))
# create unique user and vhost for each unit
current_unit = hookenv.local_unit().replace('/', '-')
current_unit = local_unit().replace('/', '-')
user = 'nagios-%s' % current_unit
vhost = 'nagios-%s' % current_unit
password = rabbit.get_clustered_attribute('%s.passwd' % user)
if not password:
utils.juju_log('INFO', 'Setting password for nagios unit: %s' % user)
log('Setting password for nagios unit: %s' % user)
password = pwgen(length=64)
rabbit.set_clustered_attribute('%s.passwd' % user, password)
@ -356,9 +384,10 @@ def update_nrpe_checks():
nrpe_compat.write()
@hooks.hook('upgrade-charm')
def upgrade_charm():
pre_install_hooks()
add_source(utils.config_get('source'), utils.config_get('key'))
add_source(config('source'), config('key'))
apt_update(fatal=True)
# Ensure older passwd files in /var/lib/juju are moved to
@ -378,9 +407,8 @@ def upgrade_charm():
if stored_password:
rabbit.set_clustered_attribute(username, stored_password)
utils.juju_log('INFO',
'upgrade_charm: Migrating stored passwd'
' from %s to %s.' % (s, d))
log('upgrade_charm: Migrating stored passwd'
' from %s to %s.' % (s, d))
shutil.move(s, d)
# explicitly update buggy file name naigos.passwd
@ -399,18 +427,18 @@ def configure_client_ssl(relation_data):
ssl_mode, external_ca = _get_ssl_mode()
if ssl_mode == 'off':
return
relation_data['ssl_port'] = utils.config_get('ssl_port')
relation_data['ssl_port'] = config('ssl_port')
if external_ca:
if utils.config_get('ssl_ca'):
if config('ssl_ca'):
relation_data['ssl_ca'] = base64.b64encode(
utils.config_get('ssl_ca'))
config('ssl_ca'))
return
ca = ServiceCA.get_ca()
relation_data['ssl_ca'] = base64.b64encode(ca.get_ca_bundle())
def _get_ssl_mode():
config = utils.config_get()
config = config()
ssl_mode = config.get('ssl')
external_ca = False
# Legacy config boolean option
@ -419,8 +447,8 @@ def _get_ssl_mode():
ssl_mode = 'off'
elif ssl_mode == 'off' and ssl_on:
ssl_mode = 'on'
ssl_key = utils.config_get('ssl_key')
ssl_cert = utils.config_get('ssl_cert')
ssl_key = config('ssl_key')
ssl_cert = config('ssl_cert')
if all((ssl_key, ssl_cert)):
external_ca = True
return ssl_mode, external_ca
@ -441,16 +469,14 @@ def _convert_from_base64(v):
def reconfigure_client_ssl(ssl_enabled=False):
ssl_config_keys = set(('ssl_key', 'ssl_cert', 'ssl_ca'))
for rid in hookenv.relation_ids('amqp'):
rdata = hookenv.relation_get(
rid=rid, unit=os.environ['JUJU_UNIT_NAME'])
for rid in relation_ids('amqp'):
rdata = relation_get(rid=rid, unit=os.environ['JUJU_UNIT_NAME'])
if not ssl_enabled and ssl_config_keys.intersection(rdata):
# No clean way to remove entirely, but blank them.
utils.relation_set(
rid=rid, ssl_key='', ssl_cert='', ssl_ca='')
relation_set(relation_id=rid, ssl_key='', ssl_cert='', ssl_ca='')
elif ssl_enabled and not ssl_config_keys.intersection(rdata):
configure_client_ssl(rdata)
utils.relation_set(rid=rid, **rdata)
relation_set(relation_id=rid, **rdata)
def configure_rabbit_ssl():
@ -465,20 +491,19 @@ def configure_rabbit_ssl():
if ssl_mode == 'off':
if os.path.exists(rabbit.RABBITMQ_CONF):
os.remove(rabbit.RABBITMQ_CONF)
utils.close_port(utils.config_get('ssl_port'))
close_port(config('ssl_port'))
reconfigure_client_ssl()
return
ssl_key = _convert_from_base64(utils.config_get('ssl_key'))
ssl_cert = _convert_from_base64(utils.config_get('ssl_cert'))
ssl_ca = _convert_from_base64(utils.config_get('ssl_ca'))
ssl_port = utils.config_get('ssl_port')
ssl_key = _convert_from_base64(config('ssl_key'))
ssl_cert = _convert_from_base64(config('ssl_cert'))
ssl_ca = _convert_from_base64(config('ssl_ca'))
ssl_port = config('ssl_port')
# If external managed certs then we need all the fields.
if (ssl_mode in ('on', 'only') and any((ssl_key, ssl_cert)) and
not all((ssl_key, ssl_cert))):
utils.juju_log(
'ERROR',
'If ssl_key or ssl_cert are specified both are required.')
log('If ssl_key or ssl_cert are specified both are required.',
level=ERROR)
sys.exit(1)
if not external_ca:
@ -488,22 +513,23 @@ def configure_rabbit_ssl():
ssl_key, ssl_cert, ssl_port, ssl_ca,
ssl_only=(ssl_mode == "only"), ssl_client=False)
reconfigure_client_ssl(True)
utils.open_port(ssl_port)
open_port(ssl_port)
@hooks.hook('config-changed')
def config_changed():
if utils.config_get('management_plugin') is True:
if config('management_plugin') is True:
rabbit.enable_plugin(MAN_PLUGIN)
utils.open_port(55672)
open_port(55672)
else:
rabbit.disable_plugin(MAN_PLUGIN)
utils.close_port(55672)
close_port(55672)
configure_rabbit_ssl()
if cluster.eligible_leader('res_rabbitmq_vip') or \
utils.config_get('ha-vip-only') is True:
utils.restart('rabbitmq-server')
if eligible_leader('res_rabbitmq_vip') or \
config('ha-vip-only') is True:
service_restart('rabbitmq-server')
update_nrpe_checks()
@ -513,19 +539,9 @@ def pre_install_hooks():
if os.path.isfile(f) and os.access(f, os.X_OK):
subprocess.check_call(['sh', '-c', f])
hooks = {
'install': install,
'amqp-relation-changed': amqp_changed,
'cluster-relation-joined': cluster_joined,
'cluster-relation-changed': cluster_changed,
'cluster-relation-departed': cluster_departed,
'ha-relation-joined': ha_joined,
'ha-relation-changed': ha_changed,
'ceph-relation-joined': ceph_joined,
'ceph-relation-changed': ceph_changed,
'upgrade-charm': upgrade_charm,
'config-changed': config_changed,
'nrpe-external-master-relation-changed': update_nrpe_checks
}
utils.do_hooks(hooks)
if __name__ == '__main__':
try:
hooks.execute(sys.argv)
except UnregisteredHookError as e:
log('Unknown hook {} - skipping.'.format(e))

View File

@ -1,18 +0,0 @@
Metadata-Version: 1.0
Name: charmhelpers
Version: 0.1.2
Summary: UNKNOWN
Home-page: https://code.launchpad.net/charm-helpers
Author: Ubuntu Developers
Author-email: ubuntu-devel-discuss@lists.ubuntu.com
License: Affero GNU Public License v3
Description: ============
CharmHelpers
============
CharmHelpers provides an opinionated set of tools for building Juju
charms that work together. In addition to basic tasks like interact-
ing with the charm environment and the machine it runs on, it also
helps keep you build hooks and establish relations effortlessly.
Platform: UNKNOWN