Merge: Liberty/Mitaka changes

This commit is contained in:
Junaid Ali 2016-05-19 11:35:31 +05:00
commit 7280316667
50 changed files with 4378 additions and 3414 deletions

View File

@ -4,7 +4,7 @@ PYTHON := /usr/bin/env python
virtualenv:
virtualenv .venv
.venv/bin/pip install flake8 nose coverage mock pyyaml netifaces \
netaddr jinja2
netaddr jinja2 pyflakes pep8 six pbr funcsigs psutil
lint: virtualenv
.venv/bin/flake8 --exclude hooks/charmhelpers hooks unit_tests tests --ignore E402

253
bin/charm_helpers_sync.py Normal file
View File

@ -0,0 +1,253 @@
#!/usr/bin/python
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
# Authors:
# Adam Gandelman <adamg@ubuntu.com>
import logging
import optparse
import os
import subprocess
import shutil
import sys
import tempfile
import yaml
from fnmatch import fnmatch
import six
CHARM_HELPERS_BRANCH = 'lp:charm-helpers'
def parse_config(conf_file):
if not os.path.isfile(conf_file):
logging.error('Invalid config file: %s.' % conf_file)
return False
return yaml.load(open(conf_file).read())
def clone_helpers(work_dir, branch):
dest = os.path.join(work_dir, 'charm-helpers')
logging.info('Checking out %s to %s.' % (branch, dest))
cmd = ['bzr', 'checkout', '--lightweight', branch, dest]
subprocess.check_call(cmd)
return dest
def _module_path(module):
return os.path.join(*module.split('.'))
def _src_path(src, module):
return os.path.join(src, 'charmhelpers', _module_path(module))
def _dest_path(dest, module):
return os.path.join(dest, _module_path(module))
def _is_pyfile(path):
return os.path.isfile(path + '.py')
def ensure_init(path):
'''
ensure directories leading up to path are importable, omitting
parent directory, eg path='/hooks/helpers/foo'/:
hooks/
hooks/helpers/__init__.py
hooks/helpers/foo/__init__.py
'''
for d, dirs, files in os.walk(os.path.join(*path.split('/')[:2])):
_i = os.path.join(d, '__init__.py')
if not os.path.exists(_i):
logging.info('Adding missing __init__.py: %s' % _i)
open(_i, 'wb').close()
def sync_pyfile(src, dest):
src = src + '.py'
src_dir = os.path.dirname(src)
logging.info('Syncing pyfile: %s -> %s.' % (src, dest))
if not os.path.exists(dest):
os.makedirs(dest)
shutil.copy(src, dest)
if os.path.isfile(os.path.join(src_dir, '__init__.py')):
shutil.copy(os.path.join(src_dir, '__init__.py'),
dest)
ensure_init(dest)
def get_filter(opts=None):
opts = opts or []
if 'inc=*' in opts:
# do not filter any files, include everything
return None
def _filter(dir, ls):
incs = [opt.split('=').pop() for opt in opts if 'inc=' in opt]
_filter = []
for f in ls:
_f = os.path.join(dir, f)
if not os.path.isdir(_f) and not _f.endswith('.py') and incs:
if True not in [fnmatch(_f, inc) for inc in incs]:
logging.debug('Not syncing %s, does not match include '
'filters (%s)' % (_f, incs))
_filter.append(f)
else:
logging.debug('Including file, which matches include '
'filters (%s): %s' % (incs, _f))
elif (os.path.isfile(_f) and not _f.endswith('.py')):
logging.debug('Not syncing file: %s' % f)
_filter.append(f)
elif (os.path.isdir(_f) and not
os.path.isfile(os.path.join(_f, '__init__.py'))):
logging.debug('Not syncing directory: %s' % f)
_filter.append(f)
return _filter
return _filter
def sync_directory(src, dest, opts=None):
if os.path.exists(dest):
logging.debug('Removing existing directory: %s' % dest)
shutil.rmtree(dest)
logging.info('Syncing directory: %s -> %s.' % (src, dest))
shutil.copytree(src, dest, ignore=get_filter(opts))
ensure_init(dest)
def sync(src, dest, module, opts=None):
# Sync charmhelpers/__init__.py for bootstrap code.
sync_pyfile(_src_path(src, '__init__'), dest)
# Sync other __init__.py files in the path leading to module.
m = []
steps = module.split('.')[:-1]
while steps:
m.append(steps.pop(0))
init = '.'.join(m + ['__init__'])
sync_pyfile(_src_path(src, init),
os.path.dirname(_dest_path(dest, init)))
# Sync the module, or maybe a .py file.
if os.path.isdir(_src_path(src, module)):
sync_directory(_src_path(src, module), _dest_path(dest, module), opts)
elif _is_pyfile(_src_path(src, module)):
sync_pyfile(_src_path(src, module),
os.path.dirname(_dest_path(dest, module)))
else:
logging.warn('Could not sync: %s. Neither a pyfile or directory, '
'does it even exist?' % module)
def parse_sync_options(options):
if not options:
return []
return options.split(',')
def extract_options(inc, global_options=None):
global_options = global_options or []
if global_options and isinstance(global_options, six.string_types):
global_options = [global_options]
if '|' not in inc:
return (inc, global_options)
inc, opts = inc.split('|')
return (inc, parse_sync_options(opts) + global_options)
def sync_helpers(include, src, dest, options=None):
if not os.path.isdir(dest):
os.makedirs(dest)
global_options = parse_sync_options(options)
for inc in include:
if isinstance(inc, str):
inc, opts = extract_options(inc, global_options)
sync(src, dest, inc, opts)
elif isinstance(inc, dict):
# could also do nested dicts here.
for k, v in six.iteritems(inc):
if isinstance(v, list):
for m in v:
inc, opts = extract_options(m, global_options)
sync(src, dest, '%s.%s' % (k, inc), opts)
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option('-c', '--config', action='store', dest='config',
default=None, help='helper config file')
parser.add_option('-D', '--debug', action='store_true', dest='debug',
default=False, help='debug')
parser.add_option('-b', '--branch', action='store', dest='branch',
help='charm-helpers bzr branch (overrides config)')
parser.add_option('-d', '--destination', action='store', dest='dest_dir',
help='sync destination dir (overrides config)')
(opts, args) = parser.parse_args()
if opts.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
if opts.config:
logging.info('Loading charm helper config from %s.' % opts.config)
config = parse_config(opts.config)
if not config:
logging.error('Could not parse config from %s.' % opts.config)
sys.exit(1)
else:
config = {}
if 'branch' not in config:
config['branch'] = CHARM_HELPERS_BRANCH
if opts.branch:
config['branch'] = opts.branch
if opts.dest_dir:
config['destination'] = opts.dest_dir
if 'destination' not in config:
logging.error('No destination dir. specified as option or config.')
sys.exit(1)
if 'include' not in config:
if not args:
logging.error('No modules to sync specified as option or config.')
sys.exit(1)
config['include'] = []
[config['include'].append(a) for a in args]
sync_options = None
if 'options' in config:
sync_options = config['options']
tmpd = tempfile.mkdtemp()
try:
checkout = clone_helpers(tmpd, config['branch'])
sync_helpers(config['include'], checkout, config['destination'],
options=sync_options)
except Exception as e:
logging.error("Could not sync: %s" % e)
raise e
finally:
logging.debug('Cleaning up %s' % tmpd)
shutil.rmtree(tmpd)

View File

@ -3,5 +3,10 @@ destination: hooks/charmhelpers
include:
- core
- fetch
- contrib
- contrib.amulet
- contrib.hahelpers
- contrib.network
- contrib.openstack
- contrib.python
- contrib.storage
- payload

View File

@ -51,7 +51,8 @@ class AmuletDeployment(object):
if 'units' not in this_service:
this_service['units'] = 1
self.d.add(this_service['name'], units=this_service['units'])
self.d.add(this_service['name'], units=this_service['units'],
constraints=this_service.get('constraints'))
for svc in other_services:
if 'location' in svc:
@ -64,7 +65,8 @@ class AmuletDeployment(object):
if 'units' not in svc:
svc['units'] = 1
self.d.add(svc['name'], charm=branch_location, units=svc['units'])
self.d.add(svc['name'], charm=branch_location, units=svc['units'],
constraints=svc.get('constraints'))
def _add_relations(self, relations):
"""Add all of the relations for the services."""

View File

@ -14,17 +14,25 @@
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import amulet
import ConfigParser
import distro_info
import io
import json
import logging
import os
import re
import six
import socket
import subprocess
import sys
import time
import urlparse
import uuid
import amulet
import distro_info
import six
from six.moves import configparser
if six.PY3:
from urllib import parse as urlparse
else:
import urlparse
class AmuletUtils(object):
@ -108,7 +116,7 @@ class AmuletUtils(object):
# /!\ DEPRECATION WARNING (beisner):
# New and existing tests should be rewritten to use
# validate_services_by_name() as it is aware of init systems.
self.log.warn('/!\\ DEPRECATION WARNING: use '
self.log.warn('DEPRECATION WARNING: use '
'validate_services_by_name instead of validate_services '
'due to init system differences.')
@ -142,19 +150,23 @@ class AmuletUtils(object):
for service_name in services_list:
if (self.ubuntu_releases.index(release) >= systemd_switch or
service_name == "rabbitmq-server"):
# init is systemd
service_name in ['rabbitmq-server', 'apache2']):
# init is systemd (or regular sysv)
cmd = 'sudo service {} status'.format(service_name)
output, code = sentry_unit.run(cmd)
service_running = code == 0
elif self.ubuntu_releases.index(release) < systemd_switch:
# init is upstart
cmd = 'sudo status {}'.format(service_name)
output, code = sentry_unit.run(cmd)
service_running = code == 0 and "start/running" in output
output, code = sentry_unit.run(cmd)
self.log.debug('{} `{}` returned '
'{}'.format(sentry_unit.info['unit_name'],
cmd, code))
if code != 0:
return "command `{}` returned {}".format(cmd, str(code))
if not service_running:
return u"command `{}` returned {} {}".format(
cmd, output, str(code))
return None
def _get_config(self, unit, filename):
@ -164,7 +176,7 @@ class AmuletUtils(object):
# NOTE(beisner): by default, ConfigParser does not handle options
# with no value, such as the flags used in the mysql my.cnf file.
# https://bugs.python.org/issue7005
config = ConfigParser.ConfigParser(allow_no_value=True)
config = configparser.ConfigParser(allow_no_value=True)
config.readfp(io.StringIO(file_contents))
return config
@ -259,33 +271,52 @@ class AmuletUtils(object):
"""Get last modification time of directory."""
return sentry_unit.directory_stat(directory)['mtime']
def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False):
"""Get process' start time.
def _get_proc_start_time(self, sentry_unit, service, pgrep_full=None):
"""Get start time of a process based on the last modification time
of the /proc/pid directory.
Determine start time of the process based on the last modification
time of the /proc/pid directory. If pgrep_full is True, the process
name is matched against the full command line.
"""
if pgrep_full:
cmd = 'pgrep -o -f {}'.format(service)
else:
cmd = 'pgrep -o {}'.format(service)
cmd = cmd + ' | grep -v pgrep || exit 0'
cmd_out = sentry_unit.run(cmd)
self.log.debug('CMDout: ' + str(cmd_out))
if cmd_out[0]:
self.log.debug('Pid for %s %s' % (service, str(cmd_out[0])))
proc_dir = '/proc/{}'.format(cmd_out[0].strip())
return self._get_dir_mtime(sentry_unit, proc_dir)
:sentry_unit: The sentry unit to check for the service on
:service: service name to look for in process table
:pgrep_full: [Deprecated] Use full command line search mode with pgrep
:returns: epoch time of service process start
:param commands: list of bash commands
:param sentry_units: list of sentry unit pointers
:returns: None if successful; Failure message otherwise
"""
if pgrep_full is not None:
# /!\ DEPRECATION WARNING (beisner):
# No longer implemented, as pidof is now used instead of pgrep.
# https://bugs.launchpad.net/charm-helpers/+bug/1474030
self.log.warn('DEPRECATION WARNING: pgrep_full bool is no '
'longer implemented re: lp 1474030.')
pid_list = self.get_process_id_list(sentry_unit, service)
pid = pid_list[0]
proc_dir = '/proc/{}'.format(pid)
self.log.debug('Pid for {} on {}: {}'.format(
service, sentry_unit.info['unit_name'], pid))
return self._get_dir_mtime(sentry_unit, proc_dir)
def service_restarted(self, sentry_unit, service, filename,
pgrep_full=False, sleep_time=20):
pgrep_full=None, sleep_time=20):
"""Check if service was restarted.
Compare a service's start time vs a file's last modification time
(such as a config file for that service) to determine if the service
has been restarted.
"""
# /!\ DEPRECATION WARNING (beisner):
# This method is prone to races in that no before-time is known.
# Use validate_service_config_changed instead.
# NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
# used instead of pgrep. pgrep_full is still passed through to ensure
# deprecation WARNS. lp1474030
self.log.warn('DEPRECATION WARNING: use '
'validate_service_config_changed instead of '
'service_restarted due to known races.')
time.sleep(sleep_time)
if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >=
self._get_file_mtime(sentry_unit, filename)):
@ -294,78 +325,122 @@ class AmuletUtils(object):
return False
def service_restarted_since(self, sentry_unit, mtime, service,
pgrep_full=False, sleep_time=20,
retry_count=2):
pgrep_full=None, sleep_time=20,
retry_count=30, retry_sleep_time=10):
"""Check if service was been started after a given time.
Args:
sentry_unit (sentry): The sentry unit to check for the service on
mtime (float): The epoch time to check against
service (string): service name to look for in process table
pgrep_full (boolean): Use full command line search mode with pgrep
sleep_time (int): Seconds to sleep before looking for process
retry_count (int): If service is not found, how many times to retry
pgrep_full: [Deprecated] Use full command line search mode with pgrep
sleep_time (int): Initial sleep time (s) before looking for file
retry_sleep_time (int): Time (s) to sleep between retries
retry_count (int): If file is not found, how many times to retry
Returns:
bool: True if service found and its start time it newer than mtime,
False if service is older than mtime or if service was
not found.
"""
self.log.debug('Checking %s restarted since %s' % (service, mtime))
# NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
# used instead of pgrep. pgrep_full is still passed through to ensure
# deprecation WARNS. lp1474030
unit_name = sentry_unit.info['unit_name']
self.log.debug('Checking that %s service restarted since %s on '
'%s' % (service, mtime, unit_name))
time.sleep(sleep_time)
proc_start_time = self._get_proc_start_time(sentry_unit, service,
pgrep_full)
while retry_count > 0 and not proc_start_time:
self.log.debug('No pid file found for service %s, will retry %i '
'more times' % (service, retry_count))
time.sleep(30)
proc_start_time = self._get_proc_start_time(sentry_unit, service,
pgrep_full)
retry_count = retry_count - 1
proc_start_time = None
tries = 0
while tries <= retry_count and not proc_start_time:
try:
proc_start_time = self._get_proc_start_time(sentry_unit,
service,
pgrep_full)
self.log.debug('Attempt {} to get {} proc start time on {} '
'OK'.format(tries, service, unit_name))
except IOError as e:
# NOTE(beisner) - race avoidance, proc may not exist yet.
# https://bugs.launchpad.net/charm-helpers/+bug/1474030
self.log.debug('Attempt {} to get {} proc start time on {} '
'failed\n{}'.format(tries, service,
unit_name, e))
time.sleep(retry_sleep_time)
tries += 1
if not proc_start_time:
self.log.warn('No proc start time found, assuming service did '
'not start')
return False
if proc_start_time >= mtime:
self.log.debug('proc start time is newer than provided mtime'
'(%s >= %s)' % (proc_start_time, mtime))
self.log.debug('Proc start time is newer than provided mtime'
'(%s >= %s) on %s (OK)' % (proc_start_time,
mtime, unit_name))
return True
else:
self.log.warn('proc start time (%s) is older than provided mtime '
'(%s), service did not restart' % (proc_start_time,
mtime))
self.log.warn('Proc start time (%s) is older than provided mtime '
'(%s) on %s, service did not '
'restart' % (proc_start_time, mtime, unit_name))
return False
def config_updated_since(self, sentry_unit, filename, mtime,
sleep_time=20):
sleep_time=20, retry_count=30,
retry_sleep_time=10):
"""Check if file was modified after a given time.
Args:
sentry_unit (sentry): The sentry unit to check the file mtime on
filename (string): The file to check mtime of
mtime (float): The epoch time to check against
sleep_time (int): Seconds to sleep before looking for process
sleep_time (int): Initial sleep time (s) before looking for file
retry_sleep_time (int): Time (s) to sleep between retries
retry_count (int): If file is not found, how many times to retry
Returns:
bool: True if file was modified more recently than mtime, False if
file was modified before mtime,
file was modified before mtime, or if file not found.
"""
self.log.debug('Checking %s updated since %s' % (filename, mtime))
unit_name = sentry_unit.info['unit_name']
self.log.debug('Checking that %s updated since %s on '
'%s' % (filename, mtime, unit_name))
time.sleep(sleep_time)
file_mtime = self._get_file_mtime(sentry_unit, filename)
file_mtime = None
tries = 0
while tries <= retry_count and not file_mtime:
try:
file_mtime = self._get_file_mtime(sentry_unit, filename)
self.log.debug('Attempt {} to get {} file mtime on {} '
'OK'.format(tries, filename, unit_name))
except IOError as e:
# NOTE(beisner) - race avoidance, file may not exist yet.
# https://bugs.launchpad.net/charm-helpers/+bug/1474030
self.log.debug('Attempt {} to get {} file mtime on {} '
'failed\n{}'.format(tries, filename,
unit_name, e))
time.sleep(retry_sleep_time)
tries += 1
if not file_mtime:
self.log.warn('Could not determine file mtime, assuming '
'file does not exist')
return False
if file_mtime >= mtime:
self.log.debug('File mtime is newer than provided mtime '
'(%s >= %s)' % (file_mtime, mtime))
'(%s >= %s) on %s (OK)' % (file_mtime,
mtime, unit_name))
return True
else:
self.log.warn('File mtime %s is older than provided mtime %s'
% (file_mtime, mtime))
self.log.warn('File mtime is older than provided mtime'
'(%s < on %s) on %s' % (file_mtime,
mtime, unit_name))
return False
def validate_service_config_changed(self, sentry_unit, mtime, service,
filename, pgrep_full=False,
sleep_time=20, retry_count=2):
filename, pgrep_full=None,
sleep_time=20, retry_count=30,
retry_sleep_time=10):
"""Check service and file were updated after mtime
Args:
@ -373,9 +448,10 @@ class AmuletUtils(object):
mtime (float): The epoch time to check against
service (string): service name to look for in process table
filename (string): The file to check mtime of
pgrep_full (boolean): Use full command line search mode with pgrep
sleep_time (int): Seconds to sleep before looking for process
pgrep_full: [Deprecated] Use full command line search mode with pgrep
sleep_time (int): Initial sleep in seconds to pass to test helpers
retry_count (int): If service is not found, how many times to retry
retry_sleep_time (int): Time in seconds to wait between retries
Typical Usage:
u = OpenStackAmuletUtils(ERROR)
@ -392,15 +468,27 @@ class AmuletUtils(object):
mtime, False if service is older than mtime or if service was
not found or if filename was modified before mtime.
"""
self.log.debug('Checking %s restarted since %s' % (service, mtime))
time.sleep(sleep_time)
service_restart = self.service_restarted_since(sentry_unit, mtime,
service,
pgrep_full=pgrep_full,
sleep_time=0,
retry_count=retry_count)
config_update = self.config_updated_since(sentry_unit, filename, mtime,
sleep_time=0)
# NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
# used instead of pgrep. pgrep_full is still passed through to ensure
# deprecation WARNS. lp1474030
service_restart = self.service_restarted_since(
sentry_unit, mtime,
service,
pgrep_full=pgrep_full,
sleep_time=sleep_time,
retry_count=retry_count,
retry_sleep_time=retry_sleep_time)
config_update = self.config_updated_since(
sentry_unit,
filename,
mtime,
sleep_time=sleep_time,
retry_count=retry_count,
retry_sleep_time=retry_sleep_time)
return service_restart and config_update
def get_sentry_time(self, sentry_unit):
@ -418,7 +506,6 @@ class AmuletUtils(object):
"""Return a list of all Ubuntu releases in order of release."""
_d = distro_info.UbuntuDistroInfo()
_release_list = _d.all
self.log.debug('Ubuntu release list: {}'.format(_release_list))
return _release_list
def file_to_url(self, file_rel_path):
@ -450,15 +537,20 @@ class AmuletUtils(object):
cmd, code, output))
return None
def get_process_id_list(self, sentry_unit, process_name):
def get_process_id_list(self, sentry_unit, process_name,
expect_success=True):
"""Get a list of process ID(s) from a single sentry juju unit
for a single process name.
:param sentry_unit: Pointer to amulet sentry instance (juju unit)
:param sentry_unit: Amulet sentry instance (juju unit)
:param process_name: Process name
:param expect_success: If False, expect the PID to be missing,
raise if it is present.
:returns: List of process IDs
"""
cmd = 'pidof {}'.format(process_name)
cmd = 'pidof -x {}'.format(process_name)
if not expect_success:
cmd += " || exit 0 && exit 1"
output, code = sentry_unit.run(cmd)
if code != 0:
msg = ('{} `{}` returned {} '
@ -467,14 +559,23 @@ class AmuletUtils(object):
amulet.raise_status(amulet.FAIL, msg=msg)
return str(output).split()
def get_unit_process_ids(self, unit_processes):
def get_unit_process_ids(self, unit_processes, expect_success=True):
"""Construct a dict containing unit sentries, process names, and
process IDs."""
process IDs.
:param unit_processes: A dictionary of Amulet sentry instance
to list of process names.
:param expect_success: if False expect the processes to not be
running, raise if they are.
:returns: Dictionary of Amulet sentry instance to dictionary
of process names to PIDs.
"""
pid_dict = {}
for sentry_unit, process_list in unit_processes.iteritems():
for sentry_unit, process_list in six.iteritems(unit_processes):
pid_dict[sentry_unit] = {}
for process in process_list:
pids = self.get_process_id_list(sentry_unit, process)
pids = self.get_process_id_list(
sentry_unit, process, expect_success=expect_success)
pid_dict[sentry_unit].update({process: pids})
return pid_dict
@ -488,7 +589,7 @@ class AmuletUtils(object):
return ('Unit count mismatch. expected, actual: {}, '
'{} '.format(len(expected), len(actual)))
for (e_sentry, e_proc_names) in expected.iteritems():
for (e_sentry, e_proc_names) in six.iteritems(expected):
e_sentry_name = e_sentry.info['unit_name']
if e_sentry in actual.keys():
a_proc_names = actual[e_sentry]
@ -500,22 +601,40 @@ class AmuletUtils(object):
return ('Process name count mismatch. expected, actual: {}, '
'{}'.format(len(expected), len(actual)))
for (e_proc_name, e_pids_length), (a_proc_name, a_pids) in \
for (e_proc_name, e_pids), (a_proc_name, a_pids) in \
zip(e_proc_names.items(), a_proc_names.items()):
if e_proc_name != a_proc_name:
return ('Process name mismatch. expected, actual: {}, '
'{}'.format(e_proc_name, a_proc_name))
a_pids_length = len(a_pids)
if e_pids_length != a_pids_length:
return ('PID count mismatch. {} ({}) expected, actual: '
fail_msg = ('PID count mismatch. {} ({}) expected, actual: '
'{}, {} ({})'.format(e_sentry_name, e_proc_name,
e_pids_length, a_pids_length,
e_pids, a_pids_length,
a_pids))
# If expected is a list, ensure at least one PID quantity match
if isinstance(e_pids, list) and \
a_pids_length not in e_pids:
return fail_msg
# If expected is not bool and not list,
# ensure PID quantities match
elif not isinstance(e_pids, bool) and \
not isinstance(e_pids, list) and \
a_pids_length != e_pids:
return fail_msg
# If expected is bool True, ensure 1 or more PIDs exist
elif isinstance(e_pids, bool) and \
e_pids is True and a_pids_length < 1:
return fail_msg
# If expected is bool False, ensure 0 PIDs exist
elif isinstance(e_pids, bool) and \
e_pids is False and a_pids_length != 0:
return fail_msg
else:
self.log.debug('PID check OK: {} {} {}: '
'{}'.format(e_sentry_name, e_proc_name,
e_pids_length, a_pids))
e_pids, a_pids))
return None
def validate_list_of_identical_dicts(self, list_of_dicts):
@ -531,3 +650,180 @@ class AmuletUtils(object):
return 'Dicts within list are not identical'
return None
def validate_sectionless_conf(self, file_contents, expected):
"""A crude conf parser. Useful to inspect configuration files which
do not have section headers (as would be necessary in order to use
the configparser). Such as openstack-dashboard or rabbitmq confs."""
for line in file_contents.split('\n'):
if '=' in line:
args = line.split('=')
if len(args) <= 1:
continue
key = args[0].strip()
value = args[1].strip()
if key in expected.keys():
if expected[key] != value:
msg = ('Config mismatch. Expected, actual: {}, '
'{}'.format(expected[key], value))
amulet.raise_status(amulet.FAIL, msg=msg)
def get_unit_hostnames(self, units):
"""Return a dict of juju unit names to hostnames."""
host_names = {}
for unit in units:
host_names[unit.info['unit_name']] = \
str(unit.file_contents('/etc/hostname').strip())
self.log.debug('Unit host names: {}'.format(host_names))
return host_names
def run_cmd_unit(self, sentry_unit, cmd):
"""Run a command on a unit, return the output and exit code."""
output, code = sentry_unit.run(cmd)
if code == 0:
self.log.debug('{} `{}` command returned {} '
'(OK)'.format(sentry_unit.info['unit_name'],
cmd, code))
else:
msg = ('{} `{}` command returned {} '
'{}'.format(sentry_unit.info['unit_name'],
cmd, code, output))
amulet.raise_status(amulet.FAIL, msg=msg)
return str(output), code
def file_exists_on_unit(self, sentry_unit, file_name):
"""Check if a file exists on a unit."""
try:
sentry_unit.file_stat(file_name)
return True
except IOError:
return False
except Exception as e:
msg = 'Error checking file {}: {}'.format(file_name, e)
amulet.raise_status(amulet.FAIL, msg=msg)
def file_contents_safe(self, sentry_unit, file_name,
max_wait=60, fatal=False):
"""Get file contents from a sentry unit. Wrap amulet file_contents
with retry logic to address races where a file checks as existing,
but no longer exists by the time file_contents is called.
Return None if file not found. Optionally raise if fatal is True."""
unit_name = sentry_unit.info['unit_name']
file_contents = False
tries = 0
while not file_contents and tries < (max_wait / 4):
try:
file_contents = sentry_unit.file_contents(file_name)
except IOError:
self.log.debug('Attempt {} to open file {} from {} '
'failed'.format(tries, file_name,
unit_name))
time.sleep(4)
tries += 1
if file_contents:
return file_contents
elif not fatal:
return None
elif fatal:
msg = 'Failed to get file contents from unit.'
amulet.raise_status(amulet.FAIL, msg)
def port_knock_tcp(self, host="localhost", port=22, timeout=15):
"""Open a TCP socket to check for a listening sevice on a host.
:param host: host name or IP address, default to localhost
:param port: TCP port number, default to 22
:param timeout: Connect timeout, default to 15 seconds
:returns: True if successful, False if connect failed
"""
# Resolve host name if possible
try:
connect_host = socket.gethostbyname(host)
host_human = "{} ({})".format(connect_host, host)
except socket.error as e:
self.log.warn('Unable to resolve address: '
'{} ({}) Trying anyway!'.format(host, e))
connect_host = host
host_human = connect_host
# Attempt socket connection
try:
knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
knock.settimeout(timeout)
knock.connect((connect_host, port))
knock.close()
self.log.debug('Socket connect OK for host '
'{} on port {}.'.format(host_human, port))
return True
except socket.error as e:
self.log.debug('Socket connect FAIL for'
' {} port {} ({})'.format(host_human, port, e))
return False
def port_knock_units(self, sentry_units, port=22,
timeout=15, expect_success=True):
"""Open a TCP socket to check for a listening sevice on each
listed juju unit.
:param sentry_units: list of sentry unit pointers
:param port: TCP port number, default to 22
:param timeout: Connect timeout, default to 15 seconds
:expect_success: True by default, set False to invert logic
:returns: None if successful, Failure message otherwise
"""
for unit in sentry_units:
host = unit.info['public-address']
connected = self.port_knock_tcp(host, port, timeout)
if not connected and expect_success:
return 'Socket connect failed.'
elif connected and not expect_success:
return 'Socket connected unexpectedly.'
def get_uuid_epoch_stamp(self):
"""Returns a stamp string based on uuid4 and epoch time. Useful in
generating test messages which need to be unique-ish."""
return '[{}-{}]'.format(uuid.uuid4(), time.time())
# amulet juju action helpers:
def run_action(self, unit_sentry, action,
_check_output=subprocess.check_output,
params=None):
"""Run the named action on a given unit sentry.
params a dict of parameters to use
_check_output parameter is used for dependency injection.
@return action_id.
"""
unit_id = unit_sentry.info["unit_name"]
command = ["juju", "action", "do", "--format=json", unit_id, action]
if params is not None:
for key, value in params.iteritems():
command.append("{}={}".format(key, value))
self.log.info("Running command: %s\n" % " ".join(command))
output = _check_output(command, universal_newlines=True)
data = json.loads(output)
action_id = data[u'Action queued with id']
return action_id
def wait_on_action(self, action_id, _check_output=subprocess.check_output):
"""Wait for a given action, returning if it completed or not.
_check_output parameter is used for dependency injection.
"""
command = ["juju", "action", "fetch", "--format=json", "--wait=0",
action_id]
output = _check_output(command, universal_newlines=True)
data = json.loads(output)
return data.get(u"status") == "completed"
def status_get(self, unit):
"""Return the current service status of this unit."""
raw_status, return_code = unit.run(
"status-get --format=json --include-data")
if return_code != 0:
return ("unknown", "")
status = json.loads(raw_status)
return (status["status"], status["message"])

View File

@ -1,254 +0,0 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
# Copyright 2013 Canonical Ltd.
#
# Authors:
# Charm Helpers Developers <juju@lists.ubuntu.com>
"""Charm Helpers ansible - declare the state of your machines.
This helper enables you to declare your machine state, rather than
program it procedurally (and have to test each change to your procedures).
Your install hook can be as simple as::
{{{
import charmhelpers.contrib.ansible
def install():
charmhelpers.contrib.ansible.install_ansible_support()
charmhelpers.contrib.ansible.apply_playbook('playbooks/install.yaml')
}}}
and won't need to change (nor will its tests) when you change the machine
state.
All of your juju config and relation-data are available as template
variables within your playbooks and templates. An install playbook looks
something like::
{{{
---
- hosts: localhost
user: root
tasks:
- name: Add private repositories.
template:
src: ../templates/private-repositories.list.jinja2
dest: /etc/apt/sources.list.d/private.list
- name: Update the cache.
apt: update_cache=yes
- name: Install dependencies.
apt: pkg={{ item }}
with_items:
- python-mimeparse
- python-webob
- sunburnt
- name: Setup groups.
group: name={{ item.name }} gid={{ item.gid }}
with_items:
- { name: 'deploy_user', gid: 1800 }
- { name: 'service_user', gid: 1500 }
...
}}}
Read more online about `playbooks`_ and standard ansible `modules`_.
.. _playbooks: http://www.ansibleworks.com/docs/playbooks.html
.. _modules: http://www.ansibleworks.com/docs/modules.html
A further feature os the ansible hooks is to provide a light weight "action"
scripting tool. This is a decorator that you apply to a function, and that
function can now receive cli args, and can pass extra args to the playbook.
e.g.
@hooks.action()
def some_action(amount, force="False"):
"Usage: some-action AMOUNT [force=True]" # <-- shown on error
# process the arguments
# do some calls
# return extra-vars to be passed to ansible-playbook
return {
'amount': int(amount),
'type': force,
}
You can now create a symlink to hooks.py that can be invoked like a hook, but
with cli params:
# link actions/some-action to hooks/hooks.py
actions/some-action amount=10 force=true
"""
import os
import stat
import subprocess
import functools
import charmhelpers.contrib.templating.contexts
import charmhelpers.core.host
import charmhelpers.core.hookenv
import charmhelpers.fetch
charm_dir = os.environ.get('CHARM_DIR', '')
ansible_hosts_path = '/etc/ansible/hosts'
# Ansible will automatically include any vars in the following
# file in its inventory when run locally.
ansible_vars_path = '/etc/ansible/host_vars/localhost'
def install_ansible_support(from_ppa=True, ppa_location='ppa:rquillo/ansible'):
"""Installs the ansible package.
By default it is installed from the `PPA`_ linked from
the ansible `website`_ or from a ppa specified by a charm config..
.. _PPA: https://launchpad.net/~rquillo/+archive/ansible
.. _website: http://docs.ansible.com/intro_installation.html#latest-releases-via-apt-ubuntu
If from_ppa is empty, you must ensure that the package is available
from a configured repository.
"""
if from_ppa:
charmhelpers.fetch.add_source(ppa_location)
charmhelpers.fetch.apt_update(fatal=True)
charmhelpers.fetch.apt_install('ansible')
with open(ansible_hosts_path, 'w+') as hosts_file:
hosts_file.write('localhost ansible_connection=local')
def apply_playbook(playbook, tags=None, extra_vars=None):
tags = tags or []
tags = ",".join(tags)
charmhelpers.contrib.templating.contexts.juju_state_to_yaml(
ansible_vars_path, namespace_separator='__',
allow_hyphens_in_keys=False, mode=(stat.S_IRUSR | stat.S_IWUSR))
# we want ansible's log output to be unbuffered
env = os.environ.copy()
env['PYTHONUNBUFFERED'] = "1"
call = [
'ansible-playbook',
'-c',
'local',
playbook,
]
if tags:
call.extend(['--tags', '{}'.format(tags)])
if extra_vars:
extra = ["%s=%s" % (k, v) for k, v in extra_vars.items()]
call.extend(['--extra-vars', " ".join(extra)])
subprocess.check_call(call, env=env)
class AnsibleHooks(charmhelpers.core.hookenv.Hooks):
"""Run a playbook with the hook-name as the tag.
This helper builds on the standard hookenv.Hooks helper,
but additionally runs the playbook with the hook-name specified
using --tags (ie. running all the tasks tagged with the hook-name).
Example::
hooks = AnsibleHooks(playbook_path='playbooks/my_machine_state.yaml')
# All the tasks within my_machine_state.yaml tagged with 'install'
# will be run automatically after do_custom_work()
@hooks.hook()
def install():
do_custom_work()
# For most of your hooks, you won't need to do anything other
# than run the tagged tasks for the hook:
@hooks.hook('config-changed', 'start', 'stop')
def just_use_playbook():
pass
# As a convenience, you can avoid the above noop function by specifying
# the hooks which are handled by ansible-only and they'll be registered
# for you:
# hooks = AnsibleHooks(
# 'playbooks/my_machine_state.yaml',
# default_hooks=['config-changed', 'start', 'stop'])
if __name__ == "__main__":
# execute a hook based on the name the program is called by
hooks.execute(sys.argv)
"""
def __init__(self, playbook_path, default_hooks=None):
"""Register any hooks handled by ansible."""
super(AnsibleHooks, self).__init__()
self._actions = {}
self.playbook_path = playbook_path
default_hooks = default_hooks or []
def noop(*args, **kwargs):
pass
for hook in default_hooks:
self.register(hook, noop)
def register_action(self, name, function):
"""Register a hook"""
self._actions[name] = function
def execute(self, args):
"""Execute the hook followed by the playbook using the hook as tag."""
hook_name = os.path.basename(args[0])
extra_vars = None
if hook_name in self._actions:
extra_vars = self._actions[hook_name](args[1:])
else:
super(AnsibleHooks, self).execute(args)
charmhelpers.contrib.ansible.apply_playbook(
self.playbook_path, tags=[hook_name], extra_vars=extra_vars)
def action(self, *action_names):
"""Decorator, registering them as actions"""
def action_wrapper(decorated):
@functools.wraps(decorated)
def wrapper(argv):
kwargs = dict(arg.split('=') for arg in argv)
try:
return decorated(**kwargs)
except TypeError as e:
if decorated.__doc__:
e.args += (decorated.__doc__,)
raise
self.register_action(decorated.__name__, wrapper)
if '_' in decorated.__name__:
self.register_action(
decorated.__name__.replace('_', '-'), wrapper)
return wrapper
return action_wrapper

View File

@ -1,126 +0,0 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import subprocess
import time
import os
from distutils.spawn import find_executable
from charmhelpers.core.hookenv import (
in_relation_hook,
relation_ids,
relation_set,
relation_get,
)
def action_set(key, val):
if find_executable('action-set'):
action_cmd = ['action-set']
if isinstance(val, dict):
for k, v in iter(val.items()):
action_set('%s.%s' % (key, k), v)
return True
action_cmd.append('%s=%s' % (key, val))
subprocess.check_call(action_cmd)
return True
return False
class Benchmark():
"""
Helper class for the `benchmark` interface.
:param list actions: Define the actions that are also benchmarks
From inside the benchmark-relation-changed hook, you would
Benchmark(['memory', 'cpu', 'disk', 'smoke', 'custom'])
Examples:
siege = Benchmark(['siege'])
siege.start()
[... run siege ...]
# The higher the score, the better the benchmark
siege.set_composite_score(16.70, 'trans/sec', 'desc')
siege.finish()
"""
BENCHMARK_CONF = '/etc/benchmark.conf' # Replaced in testing
required_keys = [
'hostname',
'port',
'graphite_port',
'graphite_endpoint',
'api_port'
]
def __init__(self, benchmarks=None):
if in_relation_hook():
if benchmarks is not None:
for rid in sorted(relation_ids('benchmark')):
relation_set(relation_id=rid, relation_settings={
'benchmarks': ",".join(benchmarks)
})
# Check the relation data
config = {}
for key in self.required_keys:
val = relation_get(key)
if val is not None:
config[key] = val
else:
# We don't have all of the required keys
config = {}
break
if len(config):
with open(self.BENCHMARK_CONF, 'w') as f:
for key, val in iter(config.items()):
f.write("%s=%s\n" % (key, val))
@staticmethod
def start():
action_set('meta.start', time.strftime('%Y-%m-%dT%H:%M:%SZ'))
"""
If the collectd charm is also installed, tell it to send a snapshot
of the current profile data.
"""
COLLECT_PROFILE_DATA = '/usr/local/bin/collect-profile-data'
if os.path.exists(COLLECT_PROFILE_DATA):
subprocess.check_output([COLLECT_PROFILE_DATA])
@staticmethod
def finish():
action_set('meta.stop', time.strftime('%Y-%m-%dT%H:%M:%SZ'))
@staticmethod
def set_composite_score(value, units, direction='asc'):
"""
Set the composite score for a benchmark run. This is a single number
representative of the benchmark results. This could be the most
important metric, or an amalgamation of metric scores.
"""
return action_set(
"meta.composite",
{'value': value, 'units': units, 'direction': direction}
)

View File

@ -1,208 +0,0 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
# Copyright 2012 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
import warnings
warnings.warn("contrib.charmhelpers is deprecated", DeprecationWarning) # noqa
import operator
import tempfile
import time
import yaml
import subprocess
import six
if six.PY3:
from urllib.request import urlopen
from urllib.error import (HTTPError, URLError)
else:
from urllib2 import (urlopen, HTTPError, URLError)
"""Helper functions for writing Juju charms in Python."""
__metaclass__ = type
__all__ = [
# 'get_config', # core.hookenv.config()
# 'log', # core.hookenv.log()
# 'log_entry', # core.hookenv.log()
# 'log_exit', # core.hookenv.log()
# 'relation_get', # core.hookenv.relation_get()
# 'relation_set', # core.hookenv.relation_set()
# 'relation_ids', # core.hookenv.relation_ids()
# 'relation_list', # core.hookenv.relation_units()
# 'config_get', # core.hookenv.config()
# 'unit_get', # core.hookenv.unit_get()
# 'open_port', # core.hookenv.open_port()
# 'close_port', # core.hookenv.close_port()
# 'service_control', # core.host.service()
'unit_info', # client-side, NOT IMPLEMENTED
'wait_for_machine', # client-side, NOT IMPLEMENTED
'wait_for_page_contents', # client-side, NOT IMPLEMENTED
'wait_for_relation', # client-side, NOT IMPLEMENTED
'wait_for_unit', # client-side, NOT IMPLEMENTED
]
SLEEP_AMOUNT = 0.1
# We create a juju_status Command here because it makes testing much,
# much easier.
def juju_status():
subprocess.check_call(['juju', 'status'])
# re-implemented as charmhelpers.fetch.configure_sources()
# def configure_source(update=False):
# source = config_get('source')
# if ((source.startswith('ppa:') or
# source.startswith('cloud:') or
# source.startswith('http:'))):
# run('add-apt-repository', source)
# if source.startswith("http:"):
# run('apt-key', 'import', config_get('key'))
# if update:
# run('apt-get', 'update')
# DEPRECATED: client-side only
def make_charm_config_file(charm_config):
charm_config_file = tempfile.NamedTemporaryFile(mode='w+')
charm_config_file.write(yaml.dump(charm_config))
charm_config_file.flush()
# The NamedTemporaryFile instance is returned instead of just the name
# because we want to take advantage of garbage collection-triggered
# deletion of the temp file when it goes out of scope in the caller.
return charm_config_file
# DEPRECATED: client-side only
def unit_info(service_name, item_name, data=None, unit=None):
if data is None:
data = yaml.safe_load(juju_status())
service = data['services'].get(service_name)
if service is None:
# XXX 2012-02-08 gmb:
# This allows us to cope with the race condition that we
# have between deploying a service and having it come up in
# `juju status`. We could probably do with cleaning it up so
# that it fails a bit more noisily after a while.
return ''
units = service['units']
if unit is not None:
item = units[unit][item_name]
else:
# It might seem odd to sort the units here, but we do it to
# ensure that when no unit is specified, the first unit for the
# service (or at least the one with the lowest number) is the
# one whose data gets returned.
sorted_unit_names = sorted(units.keys())
item = units[sorted_unit_names[0]][item_name]
return item
# DEPRECATED: client-side only
def get_machine_data():
return yaml.safe_load(juju_status())['machines']
# DEPRECATED: client-side only
def wait_for_machine(num_machines=1, timeout=300):
"""Wait `timeout` seconds for `num_machines` machines to come up.
This wait_for... function can be called by other wait_for functions
whose timeouts might be too short in situations where only a bare
Juju setup has been bootstrapped.
:return: A tuple of (num_machines, time_taken). This is used for
testing.
"""
# You may think this is a hack, and you'd be right. The easiest way
# to tell what environment we're working in (LXC vs EC2) is to check
# the dns-name of the first machine. If it's localhost we're in LXC
# and we can just return here.
if get_machine_data()[0]['dns-name'] == 'localhost':
return 1, 0
start_time = time.time()
while True:
# Drop the first machine, since it's the Zookeeper and that's
# not a machine that we need to wait for. This will only work
# for EC2 environments, which is why we return early above if
# we're in LXC.
machine_data = get_machine_data()
non_zookeeper_machines = [
machine_data[key] for key in list(machine_data.keys())[1:]]
if len(non_zookeeper_machines) >= num_machines:
all_machines_running = True
for machine in non_zookeeper_machines:
if machine.get('instance-state') != 'running':
all_machines_running = False
break
if all_machines_running:
break
if time.time() - start_time >= timeout:
raise RuntimeError('timeout waiting for service to start')
time.sleep(SLEEP_AMOUNT)
return num_machines, time.time() - start_time
# DEPRECATED: client-side only
def wait_for_unit(service_name, timeout=480):
"""Wait `timeout` seconds for a given service name to come up."""
wait_for_machine(num_machines=1)
start_time = time.time()
while True:
state = unit_info(service_name, 'agent-state')
if 'error' in state or state == 'started':
break
if time.time() - start_time >= timeout:
raise RuntimeError('timeout waiting for service to start')
time.sleep(SLEEP_AMOUNT)
if state != 'started':
raise RuntimeError('unit did not start, agent-state: ' + state)
# DEPRECATED: client-side only
def wait_for_relation(service_name, relation_name, timeout=120):
"""Wait `timeout` seconds for a given relation to come up."""
start_time = time.time()
while True:
relation = unit_info(service_name, 'relations').get(relation_name)
if relation is not None and relation['state'] == 'up':
break
if time.time() - start_time >= timeout:
raise RuntimeError('timeout waiting for relation to be up')
time.sleep(SLEEP_AMOUNT)
# DEPRECATED: client-side only
def wait_for_page_contents(url, contents, timeout=120, validate=None):
if validate is None:
validate = operator.contains
start_time = time.time()
while True:
try:
stream = urlopen(url)
except (HTTPError, URLError):
pass
else:
page = stream.read()
if validate(page, contents):
return page
if time.time() - start_time >= timeout:
raise RuntimeError('timeout waiting for contents of ' + url)
time.sleep(SLEEP_AMOUNT)

View File

@ -1,15 +0,0 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.

View File

@ -1,360 +0,0 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
"""Compatibility with the nrpe-external-master charm"""
# Copyright 2012 Canonical Ltd.
#
# Authors:
# Matthew Wedgwood <matthew.wedgwood@canonical.com>
import subprocess
import pwd
import grp
import os
import glob
import shutil
import re
import shlex
import yaml
from charmhelpers.core.hookenv import (
config,
local_unit,
log,
relation_ids,
relation_set,
relations_of_type,
)
from charmhelpers.core.host import service
# This module adds compatibility with the nrpe-external-master and plain nrpe
# subordinate charms. To use it in your charm:
#
# 1. Update metadata.yaml
#
# provides:
# (...)
# nrpe-external-master:
# interface: nrpe-external-master
# scope: container
#
# and/or
#
# provides:
# (...)
# local-monitors:
# interface: local-monitors
# scope: container
#
# 2. Add the following to config.yaml
#
# nagios_context:
# default: "juju"
# type: string
# description: |
# Used by the nrpe subordinate charms.
# A string that will be prepended to instance name to set the host name
# in nagios. So for instance the hostname would be something like:
# juju-myservice-0
# If you're running multiple environments with the same services in them
# this allows you to differentiate between them.
# nagios_servicegroups:
# default: ""
# type: string
# description: |
# A comma-separated list of nagios servicegroups.
# If left empty, the nagios_context will be used as the servicegroup
#
# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master
#
# 4. Update your hooks.py with something like this:
#
# from charmsupport.nrpe import NRPE
# (...)
# def update_nrpe_config():
# nrpe_compat = NRPE()
# nrpe_compat.add_check(
# shortname = "myservice",
# description = "Check MyService",
# check_cmd = "check_http -w 2 -c 10 http://localhost"
# )
# nrpe_compat.add_check(
# "myservice_other",
# "Check for widget failures",
# check_cmd = "/srv/myapp/scripts/widget_check"
# )
# nrpe_compat.write()
#
# def config_changed():
# (...)
# update_nrpe_config()
#
# def nrpe_external_master_relation_changed():
# update_nrpe_config()
#
# def local_monitors_relation_changed():
# update_nrpe_config()
#
# 5. ln -s hooks.py nrpe-external-master-relation-changed
# ln -s hooks.py local-monitors-relation-changed
class CheckException(Exception):
pass
class Check(object):
shortname_re = '[A-Za-z0-9-_]+$'
service_template = ("""
#---------------------------------------------------
# This file is Juju managed
#---------------------------------------------------
define service {{
use active-service
host_name {nagios_hostname}
service_description {nagios_hostname}[{shortname}] """
"""{description}
check_command check_nrpe!{command}
servicegroups {nagios_servicegroup}
}}
""")
def __init__(self, shortname, description, check_cmd):
super(Check, self).__init__()
# XXX: could be better to calculate this from the service name
if not re.match(self.shortname_re, shortname):
raise CheckException("shortname must match {}".format(
Check.shortname_re))
self.shortname = shortname
self.command = "check_{}".format(shortname)
# Note: a set of invalid characters is defined by the
# Nagios server config
# The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()=
self.description = description
self.check_cmd = self._locate_cmd(check_cmd)
def _locate_cmd(self, check_cmd):
search_path = (
'/usr/lib/nagios/plugins',
'/usr/local/lib/nagios/plugins',
)
parts = shlex.split(check_cmd)
for path in search_path:
if os.path.exists(os.path.join(path, parts[0])):
command = os.path.join(path, parts[0])
if len(parts) > 1:
command += " " + " ".join(parts[1:])
return command
log('Check command not found: {}'.format(parts[0]))
return ''
def write(self, nagios_context, hostname, nagios_servicegroups):
nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format(
self.command)
with open(nrpe_check_file, 'w') as nrpe_check_config:
nrpe_check_config.write("# check {}\n".format(self.shortname))
nrpe_check_config.write("command[{}]={}\n".format(
self.command, self.check_cmd))
if not os.path.exists(NRPE.nagios_exportdir):
log('Not writing service config as {} is not accessible'.format(
NRPE.nagios_exportdir))
else:
self.write_service_config(nagios_context, hostname,
nagios_servicegroups)
def write_service_config(self, nagios_context, hostname,
nagios_servicegroups):
for f in os.listdir(NRPE.nagios_exportdir):
if re.search('.*{}.cfg'.format(self.command), f):
os.remove(os.path.join(NRPE.nagios_exportdir, f))
templ_vars = {
'nagios_hostname': hostname,
'nagios_servicegroup': nagios_servicegroups,
'description': self.description,
'shortname': self.shortname,
'command': self.command,
}
nrpe_service_text = Check.service_template.format(**templ_vars)
nrpe_service_file = '{}/service__{}_{}.cfg'.format(
NRPE.nagios_exportdir, hostname, self.command)
with open(nrpe_service_file, 'w') as nrpe_service_config:
nrpe_service_config.write(str(nrpe_service_text))
def run(self):
subprocess.call(self.check_cmd)
class NRPE(object):
nagios_logdir = '/var/log/nagios'
nagios_exportdir = '/var/lib/nagios/export'
nrpe_confdir = '/etc/nagios/nrpe.d'
def __init__(self, hostname=None):
super(NRPE, self).__init__()
self.config = config()
self.nagios_context = self.config['nagios_context']
if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']:
self.nagios_servicegroups = self.config['nagios_servicegroups']
else:
self.nagios_servicegroups = self.nagios_context
self.unit_name = local_unit().replace('/', '-')
if hostname:
self.hostname = hostname
else:
self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
self.checks = []
def add_check(self, *args, **kwargs):
self.checks.append(Check(*args, **kwargs))
def write(self):
try:
nagios_uid = pwd.getpwnam('nagios').pw_uid
nagios_gid = grp.getgrnam('nagios').gr_gid
except:
log("Nagios user not set up, nrpe checks not updated")
return
if not os.path.exists(NRPE.nagios_logdir):
os.mkdir(NRPE.nagios_logdir)
os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)
nrpe_monitors = {}
monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
for nrpecheck in self.checks:
nrpecheck.write(self.nagios_context, self.hostname,
self.nagios_servicegroups)
nrpe_monitors[nrpecheck.shortname] = {
"command": nrpecheck.command,
}
service('restart', 'nagios-nrpe-server')
monitor_ids = relation_ids("local-monitors") + \
relation_ids("nrpe-external-master")
for rid in monitor_ids:
relation_set(relation_id=rid, monitors=yaml.dump(monitors))
def get_nagios_hostcontext(relation_name='nrpe-external-master'):
"""
Query relation with nrpe subordinate, return the nagios_host_context
:param str relation_name: Name of relation nrpe sub joined to
"""
for rel in relations_of_type(relation_name):
if 'nagios_hostname' in rel:
return rel['nagios_host_context']
def get_nagios_hostname(relation_name='nrpe-external-master'):
"""
Query relation with nrpe subordinate, return the nagios_hostname
:param str relation_name: Name of relation nrpe sub joined to
"""
for rel in relations_of_type(relation_name):
if 'nagios_hostname' in rel:
return rel['nagios_hostname']
def get_nagios_unit_name(relation_name='nrpe-external-master'):
"""
Return the nagios unit name prepended with host_context if needed
:param str relation_name: Name of relation nrpe sub joined to
"""
host_context = get_nagios_hostcontext(relation_name)
if host_context:
unit = "%s:%s" % (host_context, local_unit())
else:
unit = local_unit()
return unit
def add_init_service_checks(nrpe, services, unit_name):
"""
Add checks for each service in list
:param NRPE nrpe: NRPE object to add check to
:param list services: List of services to check
:param str unit_name: Unit name to use in check description
"""
for svc in services:
upstart_init = '/etc/init/%s.conf' % svc
sysv_init = '/etc/init.d/%s' % svc
if os.path.exists(upstart_init):
nrpe.add_check(
shortname=svc,
description='process check {%s}' % unit_name,
check_cmd='check_upstart_job %s' % svc
)
elif os.path.exists(sysv_init):
cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
cron_file = ('*/5 * * * * root '
'/usr/local/lib/nagios/plugins/check_exit_status.pl '
'-s /etc/init.d/%s status > '
'/var/lib/nagios/service-check-%s.txt\n' % (svc,
svc)
)
f = open(cronpath, 'w')
f.write(cron_file)
f.close()
nrpe.add_check(
shortname=svc,
description='process check {%s}' % unit_name,
check_cmd='check_status_file.py -f '
'/var/lib/nagios/service-check-%s.txt' % svc,
)
def copy_nrpe_checks():
"""
Copy the nrpe checks into place
"""
NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
nrpe_files_dir = os.path.join(os.getenv('CHARM_DIR'), 'hooks',
'charmhelpers', 'contrib', 'openstack',
'files')
if not os.path.exists(NAGIOS_PLUGINS):
os.makedirs(NAGIOS_PLUGINS)
for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")):
if os.path.isfile(fname):
shutil.copy2(fname,
os.path.join(NAGIOS_PLUGINS, os.path.basename(fname)))
def add_haproxy_checks(nrpe, unit_name):
"""
Add checks for each service in list
:param NRPE nrpe: NRPE object to add check to
:param str unit_name: Unit name to use in check description
"""
nrpe.add_check(
shortname='haproxy_servers',
description='Check HAProxy {%s}' % unit_name,
check_cmd='check_haproxy.sh')
nrpe.add_check(
shortname='haproxy_queue',
description='Check HAProxy queue depth {%s}' % unit_name,
check_cmd='check_haproxy_queue_depth.sh')

View File

@ -1,175 +0,0 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
'''
Functions for managing volumes in juju units. One volume is supported per unit.
Subordinates may have their own storage, provided it is on its own partition.
Configuration stanzas::
volume-ephemeral:
type: boolean
default: true
description: >
If false, a volume is mounted as sepecified in "volume-map"
If true, ephemeral storage will be used, meaning that log data
will only exist as long as the machine. YOU HAVE BEEN WARNED.
volume-map:
type: string
default: {}
description: >
YAML map of units to device names, e.g:
"{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }"
Service units will raise a configure-error if volume-ephemeral
is 'true' and no volume-map value is set. Use 'juju set' to set a
value and 'juju resolved' to complete configuration.
Usage::
from charmsupport.volumes import configure_volume, VolumeConfigurationError
from charmsupport.hookenv import log, ERROR
def post_mount_hook():
stop_service('myservice')
def post_mount_hook():
start_service('myservice')
if __name__ == '__main__':
try:
configure_volume(before_change=pre_mount_hook,
after_change=post_mount_hook)
except VolumeConfigurationError:
log('Storage could not be configured', ERROR)
'''
# XXX: Known limitations
# - fstab is neither consulted nor updated
import os
from charmhelpers.core import hookenv
from charmhelpers.core import host
import yaml
MOUNT_BASE = '/srv/juju/volumes'
class VolumeConfigurationError(Exception):
'''Volume configuration data is missing or invalid'''
pass
def get_config():
'''Gather and sanity-check volume configuration data'''
volume_config = {}
config = hookenv.config()
errors = False
if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'):
volume_config['ephemeral'] = True
else:
volume_config['ephemeral'] = False
try:
volume_map = yaml.safe_load(config.get('volume-map', '{}'))
except yaml.YAMLError as e:
hookenv.log("Error parsing YAML volume-map: {}".format(e),
hookenv.ERROR)
errors = True
if volume_map is None:
# probably an empty string
volume_map = {}
elif not isinstance(volume_map, dict):
hookenv.log("Volume-map should be a dictionary, not {}".format(
type(volume_map)))
errors = True
volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME'])
if volume_config['device'] and volume_config['ephemeral']:
# asked for ephemeral storage but also defined a volume ID
hookenv.log('A volume is defined for this unit, but ephemeral '
'storage was requested', hookenv.ERROR)
errors = True
elif not volume_config['device'] and not volume_config['ephemeral']:
# asked for permanent storage but did not define volume ID
hookenv.log('Ephemeral storage was requested, but there is no volume '
'defined for this unit.', hookenv.ERROR)
errors = True
unit_mount_name = hookenv.local_unit().replace('/', '-')
volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name)
if errors:
return None
return volume_config
def mount_volume(config):
if os.path.exists(config['mountpoint']):
if not os.path.isdir(config['mountpoint']):
hookenv.log('Not a directory: {}'.format(config['mountpoint']))
raise VolumeConfigurationError()
else:
host.mkdir(config['mountpoint'])
if os.path.ismount(config['mountpoint']):
unmount_volume(config)
if not host.mount(config['device'], config['mountpoint'], persist=True):
raise VolumeConfigurationError()
def unmount_volume(config):
if os.path.ismount(config['mountpoint']):
if not host.umount(config['mountpoint'], persist=True):
raise VolumeConfigurationError()
def managed_mounts():
'''List of all mounted managed volumes'''
return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts())
def configure_volume(before_change=lambda: None, after_change=lambda: None):
'''Set up storage (or don't) according to the charm's volume configuration.
Returns the mount point or "ephemeral". before_change and after_change
are optional functions to be called if the volume configuration changes.
'''
config = get_config()
if not config:
hookenv.log('Failed to read volume configuration', hookenv.CRITICAL)
raise VolumeConfigurationError()
if config['ephemeral']:
if os.path.ismount(config['mountpoint']):
before_change()
unmount_volume(config)
after_change()
return 'ephemeral'
else:
# persistent storage
if os.path.ismount(config['mountpoint']):
mounts = dict(managed_mounts())
if mounts.get(config['mountpoint']) != config['device']:
before_change()
unmount_volume(config)
mount_volume(config)
after_change()
else:
before_change()
mount_volume(config)
after_change()
return config['mountpoint']

View File

@ -1,412 +0,0 @@
"""Helper for working with a MySQL database"""
import json
import re
import sys
import platform
import os
import glob
# from string import upper
from charmhelpers.core.host import (
mkdir,
pwgen,
write_file
)
from charmhelpers.core.hookenv import (
config as config_get,
relation_get,
related_units,
unit_get,
log,
DEBUG,
INFO,
WARNING,
)
from charmhelpers.fetch import (
apt_install,
apt_update,
filter_installed_packages,
)
from charmhelpers.contrib.peerstorage import (
peer_store,
peer_retrieve,
)
from charmhelpers.contrib.network.ip import get_host_ip
try:
import MySQLdb
except ImportError:
apt_update(fatal=True)
apt_install(filter_installed_packages(['python-mysqldb']), fatal=True)
import MySQLdb
class MySQLHelper(object):
def __init__(self, rpasswdf_template, upasswdf_template, host='localhost',
migrate_passwd_to_peer_relation=True,
delete_ondisk_passwd_file=True):
self.host = host
# Password file path templates
self.root_passwd_file_template = rpasswdf_template
self.user_passwd_file_template = upasswdf_template
self.migrate_passwd_to_peer_relation = migrate_passwd_to_peer_relation
# If we migrate we have the option to delete local copy of root passwd
self.delete_ondisk_passwd_file = delete_ondisk_passwd_file
def connect(self, user='root', password=None):
log("Opening db connection for %s@%s" % (user, self.host), level=DEBUG)
self.connection = MySQLdb.connect(user=user, host=self.host,
passwd=password)
def database_exists(self, db_name):
cursor = self.connection.cursor()
try:
cursor.execute("SHOW DATABASES")
databases = [i[0] for i in cursor.fetchall()]
finally:
cursor.close()
return db_name in databases
def create_database(self, db_name):
cursor = self.connection.cursor()
try:
cursor.execute("CREATE DATABASE {} CHARACTER SET UTF8"
.format(db_name))
finally:
cursor.close()
def grant_exists(self, db_name, db_user, remote_ip):
cursor = self.connection.cursor()
priv_string = "GRANT ALL PRIVILEGES ON `{}`.* " \
"TO '{}'@'{}'".format(db_name, db_user, remote_ip)
try:
cursor.execute("SHOW GRANTS for '{}'@'{}'".format(db_user,
remote_ip))
grants = [i[0] for i in cursor.fetchall()]
except MySQLdb.OperationalError:
return False
finally:
cursor.close()
# TODO: review for different grants
return priv_string in grants
def create_grant(self, db_name, db_user, remote_ip, password):
cursor = self.connection.cursor()
try:
# TODO: review for different grants
cursor.execute("GRANT ALL PRIVILEGES ON {}.* TO '{}'@'{}' "
"IDENTIFIED BY '{}'".format(db_name,
db_user,
remote_ip,
password))
finally:
cursor.close()
def create_admin_grant(self, db_user, remote_ip, password):
cursor = self.connection.cursor()
try:
cursor.execute("GRANT ALL PRIVILEGES ON *.* TO '{}'@'{}' "
"IDENTIFIED BY '{}'".format(db_user,
remote_ip,
password))
finally:
cursor.close()
def cleanup_grant(self, db_user, remote_ip):
cursor = self.connection.cursor()
try:
cursor.execute("DROP FROM mysql.user WHERE user='{}' "
"AND HOST='{}'".format(db_user,
remote_ip))
finally:
cursor.close()
def execute(self, sql):
"""Execute arbitary SQL against the database."""
cursor = self.connection.cursor()
try:
cursor.execute(sql)
finally:
cursor.close()
def migrate_passwords_to_peer_relation(self, excludes=None):
"""Migrate any passwords storage on disk to cluster peer relation."""
dirname = os.path.dirname(self.root_passwd_file_template)
path = os.path.join(dirname, '*.passwd')
for f in glob.glob(path):
if excludes and f in excludes:
log("Excluding %s from peer migration" % (f), level=DEBUG)
continue
key = os.path.basename(f)
with open(f, 'r') as passwd:
_value = passwd.read().strip()
try:
peer_store(key, _value)
if self.delete_ondisk_passwd_file:
os.unlink(f)
except ValueError:
# NOTE cluster relation not yet ready - skip for now
pass
def get_mysql_password_on_disk(self, username=None, password=None):
"""Retrieve, generate or store a mysql password for the provided
username on disk."""
if username:
template = self.user_passwd_file_template
passwd_file = template.format(username)
else:
passwd_file = self.root_passwd_file_template
_password = None
if os.path.exists(passwd_file):
log("Using existing password file '%s'" % passwd_file, level=DEBUG)
with open(passwd_file, 'r') as passwd:
_password = passwd.read().strip()
else:
log("Generating new password file '%s'" % passwd_file, level=DEBUG)
if not os.path.isdir(os.path.dirname(passwd_file)):
# NOTE: need to ensure this is not mysql root dir (which needs
# to be mysql readable)
mkdir(os.path.dirname(passwd_file), owner='root', group='root',
perms=0o770)
# Force permissions - for some reason the chmod in makedirs
# fails
os.chmod(os.path.dirname(passwd_file), 0o770)
_password = password or pwgen(length=32)
write_file(passwd_file, _password, owner='root', group='root',
perms=0o660)
return _password
def passwd_keys(self, username):
"""Generator to return keys used to store passwords in peer store.
NOTE: we support both legacy and new format to support mysql
charm prior to refactor. This is necessary to avoid LP 1451890.
"""
keys = []
if username == 'mysql':
log("Bad username '%s'" % (username), level=WARNING)
if username:
# IMPORTANT: *newer* format must be returned first
keys.append('mysql-%s.passwd' % (username))
keys.append('%s.passwd' % (username))
else:
keys.append('mysql.passwd')
for key in keys:
yield key
def get_mysql_password(self, username=None, password=None):
"""Retrieve, generate or store a mysql password for the provided
username using peer relation cluster."""
excludes = []
# First check peer relation.
try:
for key in self.passwd_keys(username):
_password = peer_retrieve(key)
if _password:
break
# If root password available don't update peer relation from local
if _password and not username:
excludes.append(self.root_passwd_file_template)
except ValueError:
# cluster relation is not yet started; use on-disk
_password = None
# If none available, generate new one
if not _password:
_password = self.get_mysql_password_on_disk(username, password)
# Put on wire if required
if self.migrate_passwd_to_peer_relation:
self.migrate_passwords_to_peer_relation(excludes=excludes)
return _password
def get_mysql_root_password(self, password=None):
"""Retrieve or generate mysql root password for service units."""
return self.get_mysql_password(username=None, password=password)
def normalize_address(self, hostname):
"""Ensure that address returned is an IP address (i.e. not fqdn)"""
if config_get('prefer-ipv6'):
# TODO: add support for ipv6 dns
return hostname
if hostname != unit_get('private-address'):
return get_host_ip(hostname, fallback=hostname)
# Otherwise assume localhost
return '127.0.0.1'
def get_allowed_units(self, database, username, relation_id=None):
"""Get list of units with access grants for database with username.
This is typically used to provide shared-db relations with a list of
which units have been granted access to the given database.
"""
self.connect(password=self.get_mysql_root_password())
allowed_units = set()
for unit in related_units(relation_id):
settings = relation_get(rid=relation_id, unit=unit)
# First check for setting with prefix, then without
for attr in ["%s_hostname" % (database), 'hostname']:
hosts = settings.get(attr, None)
if hosts:
break
if hosts:
# hostname can be json-encoded list of hostnames
try:
hosts = json.loads(hosts)
except ValueError:
hosts = [hosts]
else:
hosts = [settings['private-address']]
if hosts:
for host in hosts:
host = self.normalize_address(host)
if self.grant_exists(database, username, host):
log("Grant exists for host '%s' on db '%s'" %
(host, database), level=DEBUG)
if unit not in allowed_units:
allowed_units.add(unit)
else:
log("Grant does NOT exist for host '%s' on db '%s'" %
(host, database), level=DEBUG)
else:
log("No hosts found for grant check", level=INFO)
return allowed_units
def configure_db(self, hostname, database, username, admin=False):
"""Configure access to database for username from hostname."""
self.connect(password=self.get_mysql_root_password())
if not self.database_exists(database):
self.create_database(database)
remote_ip = self.normalize_address(hostname)
password = self.get_mysql_password(username)
if not self.grant_exists(database, username, remote_ip):
if not admin:
self.create_grant(database, username, remote_ip, password)
else:
self.create_admin_grant(username, remote_ip, password)
return password
class PerconaClusterHelper(object):
# Going for the biggest page size to avoid wasted bytes.
# InnoDB page size is 16MB
DEFAULT_PAGE_SIZE = 16 * 1024 * 1024
DEFAULT_INNODB_BUFFER_FACTOR = 0.50
def human_to_bytes(self, human):
"""Convert human readable configuration options to bytes."""
num_re = re.compile('^[0-9]+$')
if num_re.match(human):
return human
factors = {
'K': 1024,
'M': 1048576,
'G': 1073741824,
'T': 1099511627776
}
modifier = human[-1]
if modifier in factors:
return int(human[:-1]) * factors[modifier]
if modifier == '%':
total_ram = self.human_to_bytes(self.get_mem_total())
if self.is_32bit_system() and total_ram > self.sys_mem_limit():
total_ram = self.sys_mem_limit()
factor = int(human[:-1]) * 0.01
pctram = total_ram * factor
return int(pctram - (pctram % self.DEFAULT_PAGE_SIZE))
raise ValueError("Can only convert K,M,G, or T")
def is_32bit_system(self):
"""Determine whether system is 32 or 64 bit."""
try:
return sys.maxsize < 2 ** 32
except OverflowError:
return False
def sys_mem_limit(self):
"""Determine the default memory limit for the current service unit."""
if platform.machine() in ['armv7l']:
_mem_limit = self.human_to_bytes('2700M') # experimentally determined
else:
# Limit for x86 based 32bit systems
_mem_limit = self.human_to_bytes('4G')
return _mem_limit
def get_mem_total(self):
"""Calculate the total memory in the current service unit."""
with open('/proc/meminfo') as meminfo_file:
for line in meminfo_file:
key, mem = line.split(':', 2)
if key == 'MemTotal':
mtot, modifier = mem.strip().split(' ')
return '%s%s' % (mtot, modifier[0].upper())
def parse_config(self):
"""Parse charm configuration and calculate values for config files."""
config = config_get()
mysql_config = {}
if 'max-connections' in config:
mysql_config['max_connections'] = config['max-connections']
if 'wait-timeout' in config:
mysql_config['wait_timeout'] = config['wait-timeout']
if 'innodb-flush-log-at-trx-commit' in config:
mysql_config['innodb_flush_log_at_trx_commit'] = config['innodb-flush-log-at-trx-commit']
# Set a sane default key_buffer size
mysql_config['key_buffer'] = self.human_to_bytes('32M')
total_memory = self.human_to_bytes(self.get_mem_total())
dataset_bytes = config.get('dataset-size', None)
innodb_buffer_pool_size = config.get('innodb-buffer-pool-size', None)
if innodb_buffer_pool_size:
innodb_buffer_pool_size = self.human_to_bytes(
innodb_buffer_pool_size)
elif dataset_bytes:
log("Option 'dataset-size' has been deprecated, please use"
"innodb_buffer_pool_size option instead", level="WARN")
innodb_buffer_pool_size = self.human_to_bytes(
dataset_bytes)
else:
innodb_buffer_pool_size = int(
total_memory * self.DEFAULT_INNODB_BUFFER_FACTOR)
if innodb_buffer_pool_size > total_memory:
log("innodb_buffer_pool_size; {} is greater than system available memory:{}".format(
innodb_buffer_pool_size,
total_memory), level='WARN')
mysql_config['innodb_buffer_pool_size'] = innodb_buffer_pool_size
return mysql_config

View File

@ -23,7 +23,7 @@ import socket
from functools import partial
from charmhelpers.core.hookenv import unit_get
from charmhelpers.fetch import apt_install
from charmhelpers.fetch import apt_install, apt_update
from charmhelpers.core.hookenv import (
log,
WARNING,
@ -32,13 +32,15 @@ from charmhelpers.core.hookenv import (
try:
import netifaces
except ImportError:
apt_install('python-netifaces')
apt_update(fatal=True)
apt_install('python-netifaces', fatal=True)
import netifaces
try:
import netaddr
except ImportError:
apt_install('python-netaddr')
apt_update(fatal=True)
apt_install('python-netaddr', fatal=True)
import netaddr
@ -51,7 +53,7 @@ def _validate_cidr(network):
def no_ip_found_error_out(network):
errmsg = ("No IP address found in network: %s" % network)
errmsg = ("No IP address found in network(s): %s" % network)
raise ValueError(errmsg)
@ -59,7 +61,7 @@ def get_address_in_network(network, fallback=None, fatal=False):
"""Get an IPv4 or IPv6 address within the network from the host.
:param network (str): CIDR presentation format. For example,
'192.168.1.0/24'.
'192.168.1.0/24'. Supports multiple networks as a space-delimited list.
:param fallback (str): If no address is found, return fallback.
:param fatal (boolean): If no address is found, fallback is not
set and fatal is True then exit(1).
@ -73,24 +75,26 @@ def get_address_in_network(network, fallback=None, fatal=False):
else:
return None
_validate_cidr(network)
network = netaddr.IPNetwork(network)
for iface in netifaces.interfaces():
addresses = netifaces.ifaddresses(iface)
if network.version == 4 and netifaces.AF_INET in addresses:
addr = addresses[netifaces.AF_INET][0]['addr']
netmask = addresses[netifaces.AF_INET][0]['netmask']
cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
if cidr in network:
return str(cidr.ip)
networks = network.split() or [network]
for network in networks:
_validate_cidr(network)
network = netaddr.IPNetwork(network)
for iface in netifaces.interfaces():
addresses = netifaces.ifaddresses(iface)
if network.version == 4 and netifaces.AF_INET in addresses:
addr = addresses[netifaces.AF_INET][0]['addr']
netmask = addresses[netifaces.AF_INET][0]['netmask']
cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
if cidr in network:
return str(cidr.ip)
if network.version == 6 and netifaces.AF_INET6 in addresses:
for addr in addresses[netifaces.AF_INET6]:
if not addr['addr'].startswith('fe80'):
cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
addr['netmask']))
if cidr in network:
return str(cidr.ip)
if network.version == 6 and netifaces.AF_INET6 in addresses:
for addr in addresses[netifaces.AF_INET6]:
if not addr['addr'].startswith('fe80'):
cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
addr['netmask']))
if cidr in network:
return str(cidr.ip)
if fallback is not None:
return fallback
@ -187,6 +191,15 @@ get_iface_for_address = partial(_get_for_address, key='iface')
get_netmask_for_address = partial(_get_for_address, key='netmask')
def resolve_network_cidr(ip_address):
'''
Resolves the full address cidr of an ip_address based on
configured network interfaces
'''
netmask = get_netmask_for_address(ip_address)
return str(netaddr.IPNetwork("%s/%s" % (ip_address, netmask)).cidr)
def format_ipv6_addr(address):
"""If address is IPv6, wrap it in '[]' otherwise return None.
@ -435,8 +448,12 @@ def get_hostname(address, fqdn=True):
rev = dns.reversename.from_address(address)
result = ns_query(rev)
if not result:
return None
try:
result = socket.gethostbyaddr(address)[0]
except:
return None
else:
result = address
@ -448,3 +465,18 @@ def get_hostname(address, fqdn=True):
return result
else:
return result.split('.')[0]
def port_has_listener(address, port):
"""
Returns True if the address:port is open and being listened to,
else False.
@param address: an IP address or hostname
@param port: integer port
Note calls 'zc' via a subprocess shell
"""
cmd = ['nc', '-z', address, str(port)]
result = subprocess.call(cmd)
return not(bool(result))

View File

@ -25,10 +25,14 @@ from charmhelpers.core.host import (
)
def add_bridge(name):
def add_bridge(name, datapath_type=None):
''' Add the named bridge to openvswitch '''
log('Creating bridge {}'.format(name))
subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-br", name])
cmd = ["ovs-vsctl", "--", "--may-exist", "add-br", name]
if datapath_type is not None:
cmd += ['--', 'set', 'bridge', name,
'datapath_type={}'.format(datapath_type)]
subprocess.check_call(cmd)
def del_bridge(name):

View File

@ -40,7 +40,9 @@ Examples:
import re
import os
import subprocess
from charmhelpers.core import hookenv
from charmhelpers.core.kernel import modprobe, is_module_loaded
__author__ = "Felipe Reyes <felipe.reyes@canonical.com>"
@ -82,14 +84,11 @@ def is_ipv6_ok(soft_fail=False):
# do we have IPv6 in the machine?
if os.path.isdir('/proc/sys/net/ipv6'):
# is ip6tables kernel module loaded?
lsmod = subprocess.check_output(['lsmod'], universal_newlines=True)
matches = re.findall('^ip6_tables[ ]+', lsmod, re.M)
if len(matches) == 0:
if not is_module_loaded('ip6_tables'):
# ip6tables support isn't complete, let's try to load it
try:
subprocess.check_output(['modprobe', 'ip6_tables'],
universal_newlines=True)
# great, we could load the module
modprobe('ip6_tables')
# great, we can load the module
return True
except subprocess.CalledProcessError as ex:
hookenv.log("Couldn't load ip6_tables module: %s" % ex.output,

View File

@ -14,12 +14,18 @@
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import logging
import re
import sys
import six
from collections import OrderedDict
from charmhelpers.contrib.amulet.deployment import (
AmuletDeployment
)
DEBUG = logging.DEBUG
ERROR = logging.ERROR
class OpenStackAmuletDeployment(AmuletDeployment):
"""OpenStack amulet deployment.
@ -28,9 +34,12 @@ class OpenStackAmuletDeployment(AmuletDeployment):
that is specifically for use by OpenStack charms.
"""
def __init__(self, series=None, openstack=None, source=None, stable=True):
def __init__(self, series=None, openstack=None, source=None,
stable=True, log_level=DEBUG):
"""Initialize the deployment environment."""
super(OpenStackAmuletDeployment, self).__init__(series)
self.log = self.get_logger(level=log_level)
self.log.info('OpenStackAmuletDeployment: init')
self.openstack = openstack
self.source = source
self.stable = stable
@ -38,26 +47,55 @@ class OpenStackAmuletDeployment(AmuletDeployment):
# out.
self.current_next = "trusty"
def get_logger(self, name="deployment-logger", level=logging.DEBUG):
"""Get a logger object that will log to stdout."""
log = logging
logger = log.getLogger(name)
fmt = log.Formatter("%(asctime)s %(funcName)s "
"%(levelname)s: %(message)s")
handler = log.StreamHandler(stream=sys.stdout)
handler.setLevel(level)
handler.setFormatter(fmt)
logger.addHandler(handler)
logger.setLevel(level)
return logger
def _determine_branch_locations(self, other_services):
"""Determine the branch locations for the other services.
Determine if the local branch being tested is derived from its
stable or next (dev) branch, and based on this, use the corresonding
stable or next branches for the other_services."""
base_charms = ['mysql', 'mongodb']
self.log.info('OpenStackAmuletDeployment: determine branch locations')
# Charms outside the lp:~openstack-charmers namespace
base_charms = ['mysql', 'mongodb', 'nrpe']
# Force these charms to current series even when using an older series.
# ie. Use trusty/nrpe even when series is precise, as the P charm
# does not possess the necessary external master config and hooks.
force_series_current = ['nrpe']
if self.series in ['precise', 'trusty']:
base_series = self.series
else:
base_series = self.current_next
if self.stable:
for svc in other_services:
for svc in other_services:
if svc['name'] in force_series_current:
base_series = self.current_next
# If a location has been explicitly set, use it
if svc.get('location'):
continue
if self.stable:
temp = 'lp:charms/{}/{}'
svc['location'] = temp.format(base_series,
svc['name'])
else:
for svc in other_services:
else:
if svc['name'] in base_charms:
temp = 'lp:charms/{}/{}'
svc['location'] = temp.format(base_series,
@ -66,10 +104,13 @@ class OpenStackAmuletDeployment(AmuletDeployment):
temp = 'lp:~openstack-charmers/charms/{}/{}/next'
svc['location'] = temp.format(self.current_next,
svc['name'])
return other_services
def _add_services(self, this_service, other_services):
"""Add services to the deployment and set openstack-origin/source."""
self.log.info('OpenStackAmuletDeployment: adding services')
other_services = self._determine_branch_locations(other_services)
super(OpenStackAmuletDeployment, self)._add_services(this_service,
@ -77,29 +118,105 @@ class OpenStackAmuletDeployment(AmuletDeployment):
services = other_services
services.append(this_service)
# Charms which should use the source config option
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
'ceph-osd', 'ceph-radosgw']
# Most OpenStack subordinate charms do not expose an origin option
# as that is controlled by the principle.
ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch']
'ceph-osd', 'ceph-radosgw', 'ceph-mon']
# Charms which can not use openstack-origin, ie. many subordinates
no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
'cinder-backup', 'nexentaedge-data',
'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
'cinder-nexentaedge', 'nexentaedge-mgmt']
if self.openstack:
for svc in services:
if svc['name'] not in use_source + ignore:
if svc['name'] not in use_source + no_origin:
config = {'openstack-origin': self.openstack}
self.d.configure(svc['name'], config)
if self.source:
for svc in services:
if svc['name'] in use_source and svc['name'] not in ignore:
if svc['name'] in use_source and svc['name'] not in no_origin:
config = {'source': self.source}
self.d.configure(svc['name'], config)
def _configure_services(self, configs):
"""Configure all of the services."""
self.log.info('OpenStackAmuletDeployment: configure services')
for service, config in six.iteritems(configs):
self.d.configure(service, config)
def _auto_wait_for_status(self, message=None, exclude_services=None,
include_only=None, timeout=1800):
"""Wait for all units to have a specific extended status, except
for any defined as excluded. Unless specified via message, any
status containing any case of 'ready' will be considered a match.
Examples of message usage:
Wait for all unit status to CONTAIN any case of 'ready' or 'ok':
message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE)
Wait for all units to reach this status (exact match):
message = re.compile('^Unit is ready and clustered$')
Wait for all units to reach any one of these (exact match):
message = re.compile('Unit is ready|OK|Ready')
Wait for at least one unit to reach this status (exact match):
message = {'ready'}
See Amulet's sentry.wait_for_messages() for message usage detail.
https://github.com/juju/amulet/blob/master/amulet/sentry.py
:param message: Expected status match
:param exclude_services: List of juju service names to ignore,
not to be used in conjuction with include_only.
:param include_only: List of juju service names to exclusively check,
not to be used in conjuction with exclude_services.
:param timeout: Maximum time in seconds to wait for status match
:returns: None. Raises if timeout is hit.
"""
self.log.info('Waiting for extended status on units...')
all_services = self.d.services.keys()
if exclude_services and include_only:
raise ValueError('exclude_services can not be used '
'with include_only')
if message:
if isinstance(message, re._pattern_type):
match = message.pattern
else:
match = message
self.log.debug('Custom extended status wait match: '
'{}'.format(match))
else:
self.log.debug('Default extended status wait match: contains '
'READY (case-insensitive)')
message = re.compile('.*ready.*', re.IGNORECASE)
if exclude_services:
self.log.debug('Excluding services from extended status match: '
'{}'.format(exclude_services))
else:
exclude_services = []
if include_only:
services = include_only
else:
services = list(set(all_services) - set(exclude_services))
self.log.debug('Waiting up to {}s for extended status on services: '
'{}'.format(timeout, services))
service_messages = {service: message for service in services}
self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
self.log.info('OK')
def _get_openstack_release(self):
"""Get openstack release.
@ -111,7 +228,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
self.precise_havana, self.precise_icehouse,
self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
self.wily_liberty) = range(12)
self.wily_liberty, self.trusty_mitaka,
self.xenial_mitaka) = range(14)
releases = {
('precise', None): self.precise_essex,
@ -123,9 +241,11 @@ class OpenStackAmuletDeployment(AmuletDeployment):
('trusty', 'cloud:trusty-juno'): self.trusty_juno,
('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka,
('utopic', None): self.utopic_juno,
('vivid', None): self.vivid_kilo,
('wily', None): self.wily_liberty}
('wily', None): self.wily_liberty,
('xenial', None): self.xenial_mitaka}
return releases[(self.series, self.openstack)]
def _get_openstack_release_string(self):
@ -142,6 +262,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
('utopic', 'juno'),
('vivid', 'kilo'),
('wily', 'liberty'),
('xenial', 'mitaka'),
])
if self.openstack:
os_origin = self.openstack.split(':')[1]

View File

@ -18,6 +18,7 @@ import amulet
import json
import logging
import os
import re
import six
import time
import urllib
@ -26,7 +27,12 @@ import cinderclient.v1.client as cinder_client
import glanceclient.v1.client as glance_client
import heatclient.v1.client as heat_client
import keystoneclient.v2_0 as keystone_client
import novaclient.v1_1.client as nova_client
from keystoneclient.auth.identity import v3 as keystone_id_v3
from keystoneclient import session as keystone_session
from keystoneclient.v3 import client as keystone_client_v3
import novaclient.client as nova_client
import pika
import swiftclient
from charmhelpers.contrib.amulet.utils import (
@ -36,6 +42,8 @@ from charmhelpers.contrib.amulet.utils import (
DEBUG = logging.DEBUG
ERROR = logging.ERROR
NOVA_CLIENT_VERSION = "2"
class OpenStackAmuletUtils(AmuletUtils):
"""OpenStack amulet utilities.
@ -137,7 +145,7 @@ class OpenStackAmuletUtils(AmuletUtils):
return "role {} does not exist".format(e['name'])
return ret
def validate_user_data(self, expected, actual):
def validate_user_data(self, expected, actual, api_version=None):
"""Validate user data.
Validate a list of actual user data vs a list of expected user
@ -148,10 +156,15 @@ class OpenStackAmuletUtils(AmuletUtils):
for e in expected:
found = False
for act in actual:
a = {'enabled': act.enabled, 'name': act.name,
'email': act.email, 'tenantId': act.tenantId,
'id': act.id}
if e['name'] == a['name']:
if e['name'] == act.name:
a = {'enabled': act.enabled, 'name': act.name,
'email': act.email, 'id': act.id}
if api_version == 3:
a['default_project_id'] = getattr(act,
'default_project_id',
'none')
else:
a['tenantId'] = act.tenantId
found = True
ret = self._validate_dict_data(e, a)
if ret:
@ -186,15 +199,30 @@ class OpenStackAmuletUtils(AmuletUtils):
return cinder_client.Client(username, password, tenant, ept)
def authenticate_keystone_admin(self, keystone_sentry, user, password,
tenant):
tenant=None, api_version=None,
keystone_ip=None):
"""Authenticates admin user with the keystone admin endpoint."""
self.log.debug('Authenticating keystone admin...')
unit = keystone_sentry
service_ip = unit.relation('shared-db',
'mysql:shared-db')['private-address']
ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
return keystone_client.Client(username=user, password=password,
tenant_name=tenant, auth_url=ep)
if not keystone_ip:
keystone_ip = unit.relation('shared-db',
'mysql:shared-db')['private-address']
base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8'))
if not api_version or api_version == 2:
ep = base_ep + "/v2.0"
return keystone_client.Client(username=user, password=password,
tenant_name=tenant, auth_url=ep)
else:
ep = base_ep + "/v3"
auth = keystone_id_v3.Password(
user_domain_name='admin_domain',
username=user,
password=password,
domain_name='admin_domain',
auth_url=ep,
)
sess = keystone_session.Session(auth=auth)
return keystone_client_v3.Client(session=sess)
def authenticate_keystone_user(self, keystone, user, password, tenant):
"""Authenticates a regular user with the keystone public endpoint."""
@ -223,7 +251,8 @@ class OpenStackAmuletUtils(AmuletUtils):
self.log.debug('Authenticating nova user ({})...'.format(user))
ep = keystone.service_catalog.url_for(service_type='identity',
endpoint_type='publicURL')
return nova_client.Client(username=user, api_key=password,
return nova_client.Client(NOVA_CLIENT_VERSION,
username=user, api_key=password,
project_id=tenant, auth_url=ep)
def authenticate_swift_user(self, keystone, user, password, tenant):
@ -602,3 +631,382 @@ class OpenStackAmuletUtils(AmuletUtils):
self.log.debug('Ceph {} samples (OK): '
'{}'.format(sample_type, samples))
return None
# rabbitmq/amqp specific helpers:
def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200):
"""Wait for rmq units extended status to show cluster readiness,
after an optional initial sleep period. Initial sleep is likely
necessary to be effective following a config change, as status
message may not instantly update to non-ready."""
if init_sleep:
time.sleep(init_sleep)
message = re.compile('^Unit is ready and clustered$')
deployment._auto_wait_for_status(message=message,
timeout=timeout,
include_only=['rabbitmq-server'])
def add_rmq_test_user(self, sentry_units,
username="testuser1", password="changeme"):
"""Add a test user via the first rmq juju unit, check connection as
the new user against all sentry units.
:param sentry_units: list of sentry unit pointers
:param username: amqp user name, default to testuser1
:param password: amqp user password
:returns: None if successful. Raise on error.
"""
self.log.debug('Adding rmq user ({})...'.format(username))
# Check that user does not already exist
cmd_user_list = 'rabbitmqctl list_users'
output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
if username in output:
self.log.warning('User ({}) already exists, returning '
'gracefully.'.format(username))
return
perms = '".*" ".*" ".*"'
cmds = ['rabbitmqctl add_user {} {}'.format(username, password),
'rabbitmqctl set_permissions {} {}'.format(username, perms)]
# Add user via first unit
for cmd in cmds:
output, _ = self.run_cmd_unit(sentry_units[0], cmd)
# Check connection against the other sentry_units
self.log.debug('Checking user connect against units...')
for sentry_unit in sentry_units:
connection = self.connect_amqp_by_unit(sentry_unit, ssl=False,
username=username,
password=password)
connection.close()
def delete_rmq_test_user(self, sentry_units, username="testuser1"):
"""Delete a rabbitmq user via the first rmq juju unit.
:param sentry_units: list of sentry unit pointers
:param username: amqp user name, default to testuser1
:param password: amqp user password
:returns: None if successful or no such user.
"""
self.log.debug('Deleting rmq user ({})...'.format(username))
# Check that the user exists
cmd_user_list = 'rabbitmqctl list_users'
output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
if username not in output:
self.log.warning('User ({}) does not exist, returning '
'gracefully.'.format(username))
return
# Delete the user
cmd_user_del = 'rabbitmqctl delete_user {}'.format(username)
output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del)
def get_rmq_cluster_status(self, sentry_unit):
"""Execute rabbitmq cluster status command on a unit and return
the full output.
:param unit: sentry unit
:returns: String containing console output of cluster status command
"""
cmd = 'rabbitmqctl cluster_status'
output, _ = self.run_cmd_unit(sentry_unit, cmd)
self.log.debug('{} cluster_status:\n{}'.format(
sentry_unit.info['unit_name'], output))
return str(output)
def get_rmq_cluster_running_nodes(self, sentry_unit):
"""Parse rabbitmqctl cluster_status output string, return list of
running rabbitmq cluster nodes.
:param unit: sentry unit
:returns: List containing node names of running nodes
"""
# NOTE(beisner): rabbitmqctl cluster_status output is not
# json-parsable, do string chop foo, then json.loads that.
str_stat = self.get_rmq_cluster_status(sentry_unit)
if 'running_nodes' in str_stat:
pos_start = str_stat.find("{running_nodes,") + 15
pos_end = str_stat.find("]},", pos_start) + 1
str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"')
run_nodes = json.loads(str_run_nodes)
return run_nodes
else:
return []
def validate_rmq_cluster_running_nodes(self, sentry_units):
"""Check that all rmq unit hostnames are represented in the
cluster_status output of all units.
:param host_names: dict of juju unit names to host names
:param units: list of sentry unit pointers (all rmq units)
:returns: None if successful, otherwise return error message
"""
host_names = self.get_unit_hostnames(sentry_units)
errors = []
# Query every unit for cluster_status running nodes
for query_unit in sentry_units:
query_unit_name = query_unit.info['unit_name']
running_nodes = self.get_rmq_cluster_running_nodes(query_unit)
# Confirm that every unit is represented in the queried unit's
# cluster_status running nodes output.
for validate_unit in sentry_units:
val_host_name = host_names[validate_unit.info['unit_name']]
val_node_name = 'rabbit@{}'.format(val_host_name)
if val_node_name not in running_nodes:
errors.append('Cluster member check failed on {}: {} not '
'in {}\n'.format(query_unit_name,
val_node_name,
running_nodes))
if errors:
return ''.join(errors)
def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None):
"""Check a single juju rmq unit for ssl and port in the config file."""
host = sentry_unit.info['public-address']
unit_name = sentry_unit.info['unit_name']
conf_file = '/etc/rabbitmq/rabbitmq.config'
conf_contents = str(self.file_contents_safe(sentry_unit,
conf_file, max_wait=16))
# Checks
conf_ssl = 'ssl' in conf_contents
conf_port = str(port) in conf_contents
# Port explicitly checked in config
if port and conf_port and conf_ssl:
self.log.debug('SSL is enabled @{}:{} '
'({})'.format(host, port, unit_name))
return True
elif port and not conf_port and conf_ssl:
self.log.debug('SSL is enabled @{} but not on port {} '
'({})'.format(host, port, unit_name))
return False
# Port not checked (useful when checking that ssl is disabled)
elif not port and conf_ssl:
self.log.debug('SSL is enabled @{}:{} '
'({})'.format(host, port, unit_name))
return True
elif not conf_ssl:
self.log.debug('SSL not enabled @{}:{} '
'({})'.format(host, port, unit_name))
return False
else:
msg = ('Unknown condition when checking SSL status @{}:{} '
'({})'.format(host, port, unit_name))
amulet.raise_status(amulet.FAIL, msg)
def validate_rmq_ssl_enabled_units(self, sentry_units, port=None):
"""Check that ssl is enabled on rmq juju sentry units.
:param sentry_units: list of all rmq sentry units
:param port: optional ssl port override to validate
:returns: None if successful, otherwise return error message
"""
for sentry_unit in sentry_units:
if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port):
return ('Unexpected condition: ssl is disabled on unit '
'({})'.format(sentry_unit.info['unit_name']))
return None
def validate_rmq_ssl_disabled_units(self, sentry_units):
"""Check that ssl is enabled on listed rmq juju sentry units.
:param sentry_units: list of all rmq sentry units
:returns: True if successful. Raise on error.
"""
for sentry_unit in sentry_units:
if self.rmq_ssl_is_enabled_on_unit(sentry_unit):
return ('Unexpected condition: ssl is enabled on unit '
'({})'.format(sentry_unit.info['unit_name']))
return None
def configure_rmq_ssl_on(self, sentry_units, deployment,
port=None, max_wait=60):
"""Turn ssl charm config option on, with optional non-default
ssl port specification. Confirm that it is enabled on every
unit.
:param sentry_units: list of sentry units
:param deployment: amulet deployment object pointer
:param port: amqp port, use defaults if None
:param max_wait: maximum time to wait in seconds to confirm
:returns: None if successful. Raise on error.
"""
self.log.debug('Setting ssl charm config option: on')
# Enable RMQ SSL
config = {'ssl': 'on'}
if port:
config['ssl_port'] = port
deployment.d.configure('rabbitmq-server', config)
# Wait for unit status
self.rmq_wait_for_cluster(deployment)
# Confirm
tries = 0
ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
while ret and tries < (max_wait / 4):
time.sleep(4)
self.log.debug('Attempt {}: {}'.format(tries, ret))
ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
tries += 1
if ret:
amulet.raise_status(amulet.FAIL, ret)
def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60):
"""Turn ssl charm config option off, confirm that it is disabled
on every unit.
:param sentry_units: list of sentry units
:param deployment: amulet deployment object pointer
:param max_wait: maximum time to wait in seconds to confirm
:returns: None if successful. Raise on error.
"""
self.log.debug('Setting ssl charm config option: off')
# Disable RMQ SSL
config = {'ssl': 'off'}
deployment.d.configure('rabbitmq-server', config)
# Wait for unit status
self.rmq_wait_for_cluster(deployment)
# Confirm
tries = 0
ret = self.validate_rmq_ssl_disabled_units(sentry_units)
while ret and tries < (max_wait / 4):
time.sleep(4)
self.log.debug('Attempt {}: {}'.format(tries, ret))
ret = self.validate_rmq_ssl_disabled_units(sentry_units)
tries += 1
if ret:
amulet.raise_status(amulet.FAIL, ret)
def connect_amqp_by_unit(self, sentry_unit, ssl=False,
port=None, fatal=True,
username="testuser1", password="changeme"):
"""Establish and return a pika amqp connection to the rabbitmq service
running on a rmq juju unit.
:param sentry_unit: sentry unit pointer
:param ssl: boolean, default to False
:param port: amqp port, use defaults if None
:param fatal: boolean, default to True (raises on connect error)
:param username: amqp user name, default to testuser1
:param password: amqp user password
:returns: pika amqp connection pointer or None if failed and non-fatal
"""
host = sentry_unit.info['public-address']
unit_name = sentry_unit.info['unit_name']
# Default port logic if port is not specified
if ssl and not port:
port = 5671
elif not ssl and not port:
port = 5672
self.log.debug('Connecting to amqp on {}:{} ({}) as '
'{}...'.format(host, port, unit_name, username))
try:
credentials = pika.PlainCredentials(username, password)
parameters = pika.ConnectionParameters(host=host, port=port,
credentials=credentials,
ssl=ssl,
connection_attempts=3,
retry_delay=5,
socket_timeout=1)
connection = pika.BlockingConnection(parameters)
assert connection.server_properties['product'] == 'RabbitMQ'
self.log.debug('Connect OK')
return connection
except Exception as e:
msg = ('amqp connection failed to {}:{} as '
'{} ({})'.format(host, port, username, str(e)))
if fatal:
amulet.raise_status(amulet.FAIL, msg)
else:
self.log.warn(msg)
return None
def publish_amqp_message_by_unit(self, sentry_unit, message,
queue="test", ssl=False,
username="testuser1",
password="changeme",
port=None):
"""Publish an amqp message to a rmq juju unit.
:param sentry_unit: sentry unit pointer
:param message: amqp message string
:param queue: message queue, default to test
:param username: amqp user name, default to testuser1
:param password: amqp user password
:param ssl: boolean, default to False
:param port: amqp port, use defaults if None
:returns: None. Raises exception if publish failed.
"""
self.log.debug('Publishing message to {} queue:\n{}'.format(queue,
message))
connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
port=port,
username=username,
password=password)
# NOTE(beisner): extra debug here re: pika hang potential:
# https://github.com/pika/pika/issues/297
# https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw
self.log.debug('Defining channel...')
channel = connection.channel()
self.log.debug('Declaring queue...')
channel.queue_declare(queue=queue, auto_delete=False, durable=True)
self.log.debug('Publishing message...')
channel.basic_publish(exchange='', routing_key=queue, body=message)
self.log.debug('Closing channel...')
channel.close()
self.log.debug('Closing connection...')
connection.close()
def get_amqp_message_by_unit(self, sentry_unit, queue="test",
username="testuser1",
password="changeme",
ssl=False, port=None):
"""Get an amqp message from a rmq juju unit.
:param sentry_unit: sentry unit pointer
:param queue: message queue, default to test
:param username: amqp user name, default to testuser1
:param password: amqp user password
:param ssl: boolean, default to False
:param port: amqp port, use defaults if None
:returns: amqp message body as string. Raise if get fails.
"""
connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
port=port,
username=username,
password=password)
channel = connection.channel()
method_frame, _, body = channel.basic_get(queue)
if method_frame:
self.log.debug('Retreived message from {} queue:\n{}'.format(queue,
body))
channel.basic_ack(method_frame.delivery_tag)
channel.close()
connection.close()
return body
else:
msg = 'No message retrieved.'
amulet.raise_status(amulet.FAIL, msg)

View File

@ -14,12 +14,13 @@
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import glob
import json
import os
import re
import time
from base64 import b64decode
from subprocess import check_call
from subprocess import check_call, CalledProcessError
import six
import yaml
@ -44,16 +45,20 @@ from charmhelpers.core.hookenv import (
INFO,
WARNING,
ERROR,
status_set,
)
from charmhelpers.core.sysctl import create as sysctl_create
from charmhelpers.core.strutils import bool_from_string
from charmhelpers.core.host import (
get_bond_master,
is_phy_iface,
list_nics,
get_nic_hwaddr,
mkdir,
write_file,
pwgen,
)
from charmhelpers.contrib.hahelpers.cluster import (
determine_apache_port,
@ -84,6 +89,14 @@ from charmhelpers.contrib.network.ip import (
is_bridge_member,
)
from charmhelpers.contrib.openstack.utils import get_host_ip
from charmhelpers.core.unitdata import kv
try:
import psutil
except ImportError:
apt_install('python-psutil', fatal=True)
import psutil
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
ADDRESS_TYPES = ['admin', 'internal', 'public']
@ -192,10 +205,50 @@ def config_flags_parser(config_flags):
class OSContextGenerator(object):
"""Base class for all context generators."""
interfaces = []
related = False
complete = False
missing_data = []
def __call__(self):
raise NotImplementedError
def context_complete(self, ctxt):
"""Check for missing data for the required context data.
Set self.missing_data if it exists and return False.
Set self.complete if no missing data and return True.
"""
# Fresh start
self.complete = False
self.missing_data = []
for k, v in six.iteritems(ctxt):
if v is None or v == '':
if k not in self.missing_data:
self.missing_data.append(k)
if self.missing_data:
self.complete = False
log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO)
else:
self.complete = True
return self.complete
def get_related(self):
"""Check if any of the context interfaces have relation ids.
Set self.related and return True if one of the interfaces
has relation ids.
"""
# Fresh start
self.related = False
try:
for interface in self.interfaces:
if relation_ids(interface):
self.related = True
return self.related
except AttributeError as e:
log("{} {}"
"".format(self, e), 'INFO')
return self.related
class SharedDBContext(OSContextGenerator):
interfaces = ['shared-db']
@ -211,6 +264,7 @@ class SharedDBContext(OSContextGenerator):
self.database = database
self.user = user
self.ssl_dir = ssl_dir
self.rel_name = self.interfaces[0]
def __call__(self):
self.database = self.database or config('database')
@ -244,6 +298,7 @@ class SharedDBContext(OSContextGenerator):
password_setting = self.relation_prefix + '_password'
for rid in relation_ids(self.interfaces[0]):
self.related = True
for unit in related_units(rid):
rdata = relation_get(rid=rid, unit=unit)
host = rdata.get('db_host')
@ -255,7 +310,7 @@ class SharedDBContext(OSContextGenerator):
'database_password': rdata.get(password_setting),
'database_type': 'mysql'
}
if context_complete(ctxt):
if self.context_complete(ctxt):
db_ssl(rdata, ctxt, self.ssl_dir)
return ctxt
return {}
@ -276,6 +331,7 @@ class PostgresqlDBContext(OSContextGenerator):
ctxt = {}
for rid in relation_ids(self.interfaces[0]):
self.related = True
for unit in related_units(rid):
rel_host = relation_get('host', rid=rid, unit=unit)
rel_user = relation_get('user', rid=rid, unit=unit)
@ -285,7 +341,7 @@ class PostgresqlDBContext(OSContextGenerator):
'database_user': rel_user,
'database_password': rel_passwd,
'database_type': 'postgresql'}
if context_complete(ctxt):
if self.context_complete(ctxt):
return ctxt
return {}
@ -346,6 +402,7 @@ class IdentityServiceContext(OSContextGenerator):
ctxt['signing_dir'] = cachedir
for rid in relation_ids(self.rel_name):
self.related = True
for unit in related_units(rid):
rdata = relation_get(rid=rid, unit=unit)
serv_host = rdata.get('service_host')
@ -354,6 +411,7 @@ class IdentityServiceContext(OSContextGenerator):
auth_host = format_ipv6_addr(auth_host) or auth_host
svc_protocol = rdata.get('service_protocol') or 'http'
auth_protocol = rdata.get('auth_protocol') or 'http'
api_version = rdata.get('api_version') or '2.0'
ctxt.update({'service_port': rdata.get('service_port'),
'service_host': serv_host,
'auth_host': auth_host,
@ -362,9 +420,10 @@ class IdentityServiceContext(OSContextGenerator):
'admin_user': rdata.get('service_username'),
'admin_password': rdata.get('service_password'),
'service_protocol': svc_protocol,
'auth_protocol': auth_protocol})
'auth_protocol': auth_protocol,
'api_version': api_version})
if context_complete(ctxt):
if self.context_complete(ctxt):
# NOTE(jamespage) this is required for >= icehouse
# so a missing value just indicates keystone needs
# upgrading
@ -403,6 +462,7 @@ class AMQPContext(OSContextGenerator):
ctxt = {}
for rid in relation_ids(self.rel_name):
ha_vip_only = False
self.related = True
for unit in related_units(rid):
if relation_get('clustered', rid=rid, unit=unit):
ctxt['clustered'] = True
@ -435,7 +495,7 @@ class AMQPContext(OSContextGenerator):
ha_vip_only = relation_get('ha-vip-only',
rid=rid, unit=unit) is not None
if context_complete(ctxt):
if self.context_complete(ctxt):
if 'rabbit_ssl_ca' in ctxt:
if not self.ssl_dir:
log("Charm not setup for ssl support but ssl ca "
@ -467,7 +527,7 @@ class AMQPContext(OSContextGenerator):
ctxt['oslo_messaging_flags'] = config_flags_parser(
oslo_messaging_flags)
if not context_complete(ctxt):
if not self.complete:
return {}
return ctxt
@ -483,13 +543,15 @@ class CephContext(OSContextGenerator):
log('Generating template context for ceph', level=DEBUG)
mon_hosts = []
auth = None
key = None
use_syslog = str(config('use-syslog')).lower()
ctxt = {
'use_syslog': str(config('use-syslog')).lower()
}
for rid in relation_ids('ceph'):
for unit in related_units(rid):
auth = relation_get('auth', rid=rid, unit=unit)
key = relation_get('key', rid=rid, unit=unit)
if not ctxt.get('auth'):
ctxt['auth'] = relation_get('auth', rid=rid, unit=unit)
if not ctxt.get('key'):
ctxt['key'] = relation_get('key', rid=rid, unit=unit)
ceph_pub_addr = relation_get('ceph-public-address', rid=rid,
unit=unit)
unit_priv_addr = relation_get('private-address', rid=rid,
@ -498,15 +560,12 @@ class CephContext(OSContextGenerator):
ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
mon_hosts.append(ceph_addr)
ctxt = {'mon_hosts': ' '.join(sorted(mon_hosts)),
'auth': auth,
'key': key,
'use_syslog': use_syslog}
ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts))
if not os.path.isdir('/etc/ceph'):
os.mkdir('/etc/ceph')
if not context_complete(ctxt):
if not self.context_complete(ctxt):
return {}
ensure_packages(['ceph-common'])
@ -579,15 +638,28 @@ class HAProxyContext(OSContextGenerator):
if config('haproxy-client-timeout'):
ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
if config('haproxy-queue-timeout'):
ctxt['haproxy_queue_timeout'] = config('haproxy-queue-timeout')
if config('haproxy-connect-timeout'):
ctxt['haproxy_connect_timeout'] = config('haproxy-connect-timeout')
if config('prefer-ipv6'):
ctxt['ipv6'] = True
ctxt['local_host'] = 'ip6-localhost'
ctxt['haproxy_host'] = '::'
ctxt['stat_port'] = ':::8888'
else:
ctxt['local_host'] = '127.0.0.1'
ctxt['haproxy_host'] = '0.0.0.0'
ctxt['stat_port'] = ':8888'
ctxt['stat_port'] = '8888'
db = kv()
ctxt['stat_password'] = db.get('stat-password')
if not ctxt['stat_password']:
ctxt['stat_password'] = db.set('stat-password',
pwgen(32))
db.flush()
for frontend in cluster_hosts:
if (len(cluster_hosts[frontend]['backends']) > 1 or
@ -878,19 +950,6 @@ class NeutronContext(OSContextGenerator):
return calico_ctxt
def pg_ctxt(self):
driver = neutron_plugin_attribute(self.plugin, 'driver',
self.network_manager)
config = neutron_plugin_attribute(self.plugin, 'config',
self.network_manager)
ovs_ctxt = {'core_plugin': driver,
'neutron_plugin': 'plumgrid',
'neutron_security_groups': self.neutron_security_groups,
'local_ip': unit_private_ip(),
'config': config}
return ovs_ctxt
def neutron_ctxt(self):
if https():
proto = 'https'
@ -906,6 +965,31 @@ class NeutronContext(OSContextGenerator):
'neutron_url': '%s://%s:%s' % (proto, host, '9696')}
return ctxt
def pg_ctxt(self):
driver = neutron_plugin_attribute(self.plugin, 'driver',
self.network_manager)
config = neutron_plugin_attribute(self.plugin, 'config',
self.network_manager)
ovs_ctxt = {'core_plugin': driver,
'neutron_plugin': 'plumgrid',
'neutron_security_groups': self.neutron_security_groups,
'local_ip': unit_private_ip(),
'config': config}
return ovs_ctxt
def midonet_ctxt(self):
driver = neutron_plugin_attribute(self.plugin, 'driver',
self.network_manager)
midonet_config = neutron_plugin_attribute(self.plugin, 'config',
self.network_manager)
mido_ctxt = {'core_plugin': driver,
'neutron_plugin': 'midonet',
'neutron_security_groups': self.neutron_security_groups,
'local_ip': unit_private_ip(),
'config': midonet_config}
return mido_ctxt
def __call__(self):
if self.network_manager not in ['quantum', 'neutron']:
return {}
@ -927,6 +1011,8 @@ class NeutronContext(OSContextGenerator):
ctxt.update(self.nuage_ctxt())
elif self.plugin == 'plumgrid':
ctxt.update(self.pg_ctxt())
elif self.plugin == 'midonet':
ctxt.update(self.midonet_ctxt())
alchemy_flags = config('neutron-alchemy-flags')
if alchemy_flags:
@ -938,7 +1024,6 @@ class NeutronContext(OSContextGenerator):
class NeutronPortContext(OSContextGenerator):
NIC_PREFIXES = ['eth', 'bond']
def resolve_ports(self, ports):
"""Resolve NICs not yet bound to bridge(s)
@ -950,7 +1035,18 @@ class NeutronPortContext(OSContextGenerator):
hwaddr_to_nic = {}
hwaddr_to_ip = {}
for nic in list_nics(self.NIC_PREFIXES):
for nic in list_nics():
# Ignore virtual interfaces (bond masters will be identified from
# their slaves)
if not is_phy_iface(nic):
continue
_nic = get_bond_master(nic)
if _nic:
log("Replacing iface '%s' with bond master '%s'" % (nic, _nic),
level=DEBUG)
nic = _nic
hwaddr = get_nic_hwaddr(nic)
hwaddr_to_nic[hwaddr] = nic
addresses = get_ipv4_addr(nic, fatal=False)
@ -976,7 +1072,8 @@ class NeutronPortContext(OSContextGenerator):
# trust it to be the real external network).
resolved.append(entry)
return resolved
# Ensure no duplicates
return list(set(resolved))
class OSConfigFlagContext(OSContextGenerator):
@ -1016,6 +1113,20 @@ class OSConfigFlagContext(OSContextGenerator):
config_flags_parser(config_flags)}
class LibvirtConfigFlagsContext(OSContextGenerator):
"""
This context provides support for extending
the libvirt section through user-defined flags.
"""
def __call__(self):
ctxt = {}
libvirt_flags = config('libvirt-flags')
if libvirt_flags:
ctxt['libvirt_flags'] = config_flags_parser(
libvirt_flags)
return ctxt
class SubordinateConfigContext(OSContextGenerator):
"""
@ -1048,7 +1159,7 @@ class SubordinateConfigContext(OSContextGenerator):
ctxt = {
... other context ...
'subordinate_config': {
'subordinate_configuration': {
'DEFAULT': {
'key1': 'value1',
},
@ -1066,13 +1177,22 @@ class SubordinateConfigContext(OSContextGenerator):
:param config_file : Service's config file to query sections
:param interface : Subordinate interface to inspect
"""
self.service = service
self.config_file = config_file
self.interface = interface
if isinstance(service, list):
self.services = service
else:
self.services = [service]
if isinstance(interface, list):
self.interfaces = interface
else:
self.interfaces = [interface]
def __call__(self):
ctxt = {'sections': {}}
for rid in relation_ids(self.interface):
rids = []
for interface in self.interfaces:
rids.extend(relation_ids(interface))
for rid in rids:
for unit in related_units(rid):
sub_config = relation_get('subordinate_configuration',
rid=rid, unit=unit)
@ -1080,33 +1200,37 @@ class SubordinateConfigContext(OSContextGenerator):
try:
sub_config = json.loads(sub_config)
except:
log('Could not parse JSON from subordinate_config '
'setting from %s' % rid, level=ERROR)
log('Could not parse JSON from '
'subordinate_configuration setting from %s'
% rid, level=ERROR)
continue
if self.service not in sub_config:
log('Found subordinate_config on %s but it contained'
'nothing for %s service' % (rid, self.service),
level=INFO)
continue
for service in self.services:
if service not in sub_config:
log('Found subordinate_configuration on %s but it '
'contained nothing for %s service'
% (rid, service), level=INFO)
continue
sub_config = sub_config[self.service]
if self.config_file not in sub_config:
log('Found subordinate_config on %s but it contained'
'nothing for %s' % (rid, self.config_file),
level=INFO)
continue
sub_config = sub_config[self.config_file]
for k, v in six.iteritems(sub_config):
if k == 'sections':
for section, config_dict in six.iteritems(v):
log("adding section '%s'" % (section),
level=DEBUG)
ctxt[k][section] = config_dict
else:
ctxt[k] = v
sub_config = sub_config[service]
if self.config_file not in sub_config:
log('Found subordinate_configuration on %s but it '
'contained nothing for %s'
% (rid, self.config_file), level=INFO)
continue
sub_config = sub_config[self.config_file]
for k, v in six.iteritems(sub_config):
if k == 'sections':
for section, config_list in six.iteritems(v):
log("adding section '%s'" % (section),
level=DEBUG)
if ctxt[k].get(section):
ctxt[k][section].extend(config_list)
else:
ctxt[k][section] = config_list
else:
ctxt[k] = v
log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
return ctxt
@ -1143,13 +1267,11 @@ class WorkerConfigContext(OSContextGenerator):
@property
def num_cpus(self):
try:
from psutil import NUM_CPUS
except ImportError:
apt_install('python-psutil', fatal=True)
from psutil import NUM_CPUS
return NUM_CPUS
# NOTE: use cpu_count if present (16.04 support)
if hasattr(psutil, 'cpu_count'):
return psutil.cpu_count()
else:
return psutil.NUM_CPUS
def __call__(self):
multiplier = config('worker-multiplier') or 0
@ -1283,15 +1405,19 @@ class DataPortContext(NeutronPortContext):
def __call__(self):
ports = config('data-port')
if ports:
# Map of {port/mac:bridge}
portmap = parse_data_port_mappings(ports)
ports = portmap.values()
ports = portmap.keys()
# Resolve provided ports or mac addresses and filter out those
# already attached to a bridge.
resolved = self.resolve_ports(ports)
# FIXME: is this necessary?
normalized = {get_nic_hwaddr(port): port for port in resolved
if port not in ports}
normalized.update({port: port for port in resolved
if port in ports})
if resolved:
return {bridge: normalized[port] for bridge, port in
return {normalized[port]: bridge for port, bridge in
six.iteritems(portmap) if port in normalized.keys()}
return None
@ -1302,12 +1428,22 @@ class PhyNICMTUContext(DataPortContext):
def __call__(self):
ctxt = {}
mappings = super(PhyNICMTUContext, self).__call__()
if mappings and mappings.values():
ports = mappings.values()
if mappings and mappings.keys():
ports = sorted(mappings.keys())
napi_settings = NeutronAPIContext()()
mtu = napi_settings.get('network_device_mtu')
all_ports = set()
# If any of ports is a vlan device, its underlying device must have
# mtu applied first.
for port in ports:
for lport in glob.glob("/sys/class/net/%s/lower_*" % port):
lport = os.path.basename(lport)
all_ports.add(lport.split('_')[1])
all_ports = list(all_ports)
all_ports.extend(ports)
if mtu:
ctxt["devs"] = '\\n'.join(ports)
ctxt["devs"] = '\\n'.join(all_ports)
ctxt['mtu'] = mtu
return ctxt
@ -1338,7 +1474,110 @@ class NetworkServiceContext(OSContextGenerator):
rdata.get('service_protocol') or 'http',
'auth_protocol':
rdata.get('auth_protocol') or 'http',
'api_version':
rdata.get('api_version') or '2.0',
}
if context_complete(ctxt):
if self.context_complete(ctxt):
return ctxt
return {}
class InternalEndpointContext(OSContextGenerator):
"""Internal endpoint context.
This context provides the endpoint type used for communication between
services e.g. between Nova and Cinder internally. Openstack uses Public
endpoints by default so this allows admins to optionally use internal
endpoints.
"""
def __call__(self):
return {'use_internal_endpoints': config('use-internal-endpoints')}
class AppArmorContext(OSContextGenerator):
"""Base class for apparmor contexts."""
def __init__(self):
self._ctxt = None
self.aa_profile = None
self.aa_utils_packages = ['apparmor-utils']
@property
def ctxt(self):
if self._ctxt is not None:
return self._ctxt
self._ctxt = self._determine_ctxt()
return self._ctxt
def _determine_ctxt(self):
"""
Validate aa-profile-mode settings is disable, enforce, or complain.
:return ctxt: Dictionary of the apparmor profile or None
"""
if config('aa-profile-mode') in ['disable', 'enforce', 'complain']:
ctxt = {'aa-profile-mode': config('aa-profile-mode')}
else:
ctxt = None
return ctxt
def __call__(self):
return self.ctxt
def install_aa_utils(self):
"""
Install packages required for apparmor configuration.
"""
log("Installing apparmor utils.")
ensure_packages(self.aa_utils_packages)
def manually_disable_aa_profile(self):
"""
Manually disable an apparmor profile.
If aa-profile-mode is set to disabled (default) this is required as the
template has been written but apparmor is yet unaware of the profile
and aa-disable aa-profile fails. Without this the profile would kick
into enforce mode on the next service restart.
"""
profile_path = '/etc/apparmor.d'
disable_path = '/etc/apparmor.d/disable'
if not os.path.lexists(os.path.join(disable_path, self.aa_profile)):
os.symlink(os.path.join(profile_path, self.aa_profile),
os.path.join(disable_path, self.aa_profile))
def setup_aa_profile(self):
"""
Setup an apparmor profile.
The ctxt dictionary will contain the apparmor profile mode and
the apparmor profile name.
Makes calls out to aa-disable, aa-complain, or aa-enforce to setup
the apparmor profile.
"""
self()
if not self.ctxt:
log("Not enabling apparmor Profile")
return
self.install_aa_utils()
cmd = ['aa-{}'.format(self.ctxt['aa-profile-mode'])]
cmd.append(self.ctxt['aa-profile'])
log("Setting up the apparmor profile for {} in {} mode."
"".format(self.ctxt['aa-profile'], self.ctxt['aa-profile-mode']))
try:
check_call(cmd)
except CalledProcessError as e:
# If aa-profile-mode is set to disabled (default) manual
# disabling is required as the template has been written but
# apparmor is yet unaware of the profile and aa-disable aa-profile
# fails. If aa-disable learns to read profile files first this can
# be removed.
if self.ctxt['aa-profile-mode'] == 'disable':
log("Manually disabling the apparmor profile for {}."
"".format(self.ctxt['aa-profile']))
self.manually_disable_aa_profile()
return
status_set('blocked', "Apparmor profile {} failed to be set to {}."
"".format(self.ctxt['aa-profile'],
self.ctxt['aa-profile-mode']))
raise e

View File

@ -14,16 +14,19 @@
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
from charmhelpers.core.hookenv import (
config,
unit_get,
service_name,
network_get_primary_address,
)
from charmhelpers.contrib.network.ip import (
get_address_in_network,
is_address_in_network,
is_ipv6,
get_ipv6_addr,
resolve_network_cidr,
)
from charmhelpers.contrib.hahelpers.cluster import is_clustered
@ -33,16 +36,19 @@ ADMIN = 'admin'
ADDRESS_MAP = {
PUBLIC: {
'binding': 'public',
'config': 'os-public-network',
'fallback': 'public-address',
'override': 'os-public-hostname',
},
INTERNAL: {
'binding': 'internal',
'config': 'os-internal-network',
'fallback': 'private-address',
'override': 'os-internal-hostname',
},
ADMIN: {
'binding': 'admin',
'config': 'os-admin-network',
'fallback': 'private-address',
'override': 'os-admin-hostname',
@ -110,7 +116,7 @@ def resolve_address(endpoint_type=PUBLIC):
correct network. If clustered with no nets defined, return primary vip.
If not clustered, return unit address ensuring address is on configured net
split if one is configured.
split if one is configured, or a Juju 2.0 extra-binding has been used.
:param endpoint_type: Network endpoing type
"""
@ -125,23 +131,45 @@ def resolve_address(endpoint_type=PUBLIC):
net_type = ADDRESS_MAP[endpoint_type]['config']
net_addr = config(net_type)
net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
binding = ADDRESS_MAP[endpoint_type]['binding']
clustered = is_clustered()
if clustered:
if not net_addr:
# If no net-splits defined, we expect a single vip
resolved_address = vips[0]
else:
if clustered and vips:
if net_addr:
for vip in vips:
if is_address_in_network(net_addr, vip):
resolved_address = vip
break
else:
# NOTE: endeavour to check vips against network space
# bindings
try:
bound_cidr = resolve_network_cidr(
network_get_primary_address(binding)
)
for vip in vips:
if is_address_in_network(bound_cidr, vip):
resolved_address = vip
break
except NotImplementedError:
# If no net-splits configured and no support for extra
# bindings/network spaces so we expect a single vip
resolved_address = vips[0]
else:
if config('prefer-ipv6'):
fallback_addr = get_ipv6_addr(exc_list=vips)[0]
else:
fallback_addr = unit_get(net_fallback)
resolved_address = get_address_in_network(net_addr, fallback_addr)
if net_addr:
resolved_address = get_address_in_network(net_addr, fallback_addr)
else:
# NOTE: only try to use extra bindings if legacy network
# configuration is not in use
try:
resolved_address = network_get_primary_address(binding)
except NotImplementedError:
resolved_address = fallback_addr
if resolved_address is None:
raise ValueError("Unable to resolve a suitable IP address based on "

View File

@ -50,7 +50,7 @@ def determine_dkms_package():
if kernel_version() >= (3, 13):
return []
else:
return ['openvswitch-datapath-dkms']
return [headers_package(), 'openvswitch-datapath-dkms']
# legacy
@ -70,7 +70,7 @@ def quantum_plugins():
relation_prefix='neutron',
ssl_dir=QUANTUM_CONF_DIR)],
'services': ['quantum-plugin-openvswitch-agent'],
'packages': [[headers_package()] + determine_dkms_package(),
'packages': [determine_dkms_package(),
['quantum-plugin-openvswitch-agent']],
'server_packages': ['quantum-server',
'quantum-plugin-openvswitch'],
@ -111,7 +111,7 @@ def neutron_plugins():
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'services': ['neutron-plugin-openvswitch-agent'],
'packages': [[headers_package()] + determine_dkms_package(),
'packages': [determine_dkms_package(),
['neutron-plugin-openvswitch-agent']],
'server_packages': ['neutron-server',
'neutron-plugin-openvswitch'],
@ -155,7 +155,7 @@ def neutron_plugins():
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'services': [],
'packages': [[headers_package()] + determine_dkms_package(),
'packages': [determine_dkms_package(),
['neutron-plugin-cisco']],
'server_packages': ['neutron-server',
'neutron-plugin-cisco'],
@ -174,7 +174,7 @@ def neutron_plugins():
'neutron-dhcp-agent',
'nova-api-metadata',
'etcd'],
'packages': [[headers_package()] + determine_dkms_package(),
'packages': [determine_dkms_package(),
['calico-compute',
'bird',
'neutron-dhcp-agent',
@ -209,6 +209,20 @@ def neutron_plugins():
'server_packages': ['neutron-server',
'neutron-plugin-plumgrid'],
'server_services': ['neutron-server']
},
'midonet': {
'config': '/etc/neutron/plugins/midonet/midonet.ini',
'driver': 'midonet.neutron.plugin.MidonetPluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'services': [],
'packages': [determine_dkms_package()],
'server_packages': ['neutron-server',
'python-neutron-plugin-midonet'],
'server_services': ['neutron-server']
}
}
if release >= 'icehouse':
@ -219,6 +233,20 @@ def neutron_plugins():
'neutron-plugin-ml2']
# NOTE: patch in vmware renames nvp->nsx for icehouse onwards
plugins['nvp'] = plugins['nsx']
if release >= 'kilo':
plugins['midonet']['driver'] = (
'neutron.plugins.midonet.plugin.MidonetPluginV2')
if release >= 'liberty':
plugins['midonet']['driver'] = (
'midonet.neutron.plugin_v1.MidonetPluginV2')
plugins['midonet']['server_packages'].remove(
'python-neutron-plugin-midonet')
plugins['midonet']['server_packages'].append(
'python-networking-midonet')
plugins['plumgrid']['driver'] = (
'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2')
plugins['plumgrid']['server_packages'].remove(
'neutron-plugin-plumgrid')
return plugins
@ -269,17 +297,30 @@ def network_manager():
return 'neutron'
def parse_mappings(mappings):
def parse_mappings(mappings, key_rvalue=False):
"""By default mappings are lvalue keyed.
If key_rvalue is True, the mapping will be reversed to allow multiple
configs for the same lvalue.
"""
parsed = {}
if mappings:
mappings = mappings.split()
for m in mappings:
p = m.partition(':')
key = p[0].strip()
if p[1]:
parsed[key] = p[2].strip()
if key_rvalue:
key_index = 2
val_index = 0
# if there is no rvalue skip to next
if not p[1]:
continue
else:
parsed[key] = ''
key_index = 0
val_index = 2
key = p[key_index].strip()
parsed[key] = p[val_index].strip()
return parsed
@ -297,25 +338,25 @@ def parse_bridge_mappings(mappings):
def parse_data_port_mappings(mappings, default_bridge='br-data'):
"""Parse data port mappings.
Mappings must be a space-delimited list of bridge:port mappings.
Mappings must be a space-delimited list of bridge:port.
Returns dict of the form {bridge:port}.
Returns dict of the form {port:bridge} where ports may be mac addresses or
interface names.
"""
_mappings = parse_mappings(mappings)
# NOTE(dosaboy): we use rvalue for key to allow multiple values to be
# proposed for <port> since it may be a mac address which will differ
# across units this allowing first-known-good to be chosen.
_mappings = parse_mappings(mappings, key_rvalue=True)
if not _mappings or list(_mappings.values()) == ['']:
if not mappings:
return {}
# For backwards-compatibility we need to support port-only provided in
# config.
_mappings = {default_bridge: mappings.split()[0]}
bridges = _mappings.keys()
ports = _mappings.values()
if len(set(bridges)) != len(bridges):
raise Exception("It is not allowed to have more than one port "
"configured on the same bridge")
_mappings = {mappings.split()[0]: default_bridge}
ports = _mappings.keys()
if len(set(ports)) != len(ports):
raise Exception("It is not allowed to have the same port configured "
"on more than one bridge")

View File

@ -18,7 +18,7 @@ import os
import six
from charmhelpers.fetch import apt_install
from charmhelpers.fetch import apt_install, apt_update
from charmhelpers.core.hookenv import (
log,
ERROR,
@ -29,6 +29,7 @@ from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
try:
from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
except ImportError:
apt_update(fatal=True)
apt_install('python-jinja2', fatal=True)
from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
@ -112,7 +113,7 @@ class OSConfigTemplate(object):
def complete_contexts(self):
'''
Return a list of interfaces that have atisfied contexts.
Return a list of interfaces that have satisfied contexts.
'''
if self._complete_contexts:
return self._complete_contexts
@ -293,3 +294,30 @@ class OSConfigRenderer(object):
[interfaces.extend(i.complete_contexts())
for i in six.itervalues(self.templates)]
return interfaces
def get_incomplete_context_data(self, interfaces):
'''
Return dictionary of relation status of interfaces and any missing
required context data. Example:
{'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
'zeromq-configuration': {'related': False}}
'''
incomplete_context_data = {}
for i in six.itervalues(self.templates):
for context in i.contexts:
for interface in interfaces:
related = False
if interface in context.interfaces:
related = context.get_related()
missing_data = context.missing_data
if missing_data:
incomplete_context_data[interface] = {'missing_data': missing_data}
if related:
if incomplete_context_data.get(interface):
incomplete_context_data[interface].update({'related': True})
else:
incomplete_context_data[interface] = {'related': True}
else:
incomplete_context_data[interface] = {'related': False}
return incomplete_context_data

File diff suppressed because it is too large Load Diff

View File

@ -1,268 +0,0 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import json
import six
from charmhelpers.core.hookenv import relation_id as current_relation_id
from charmhelpers.core.hookenv import (
is_relation_made,
relation_ids,
relation_get as _relation_get,
local_unit,
relation_set as _relation_set,
leader_get as _leader_get,
leader_set,
is_leader,
)
"""
This helper provides functions to support use of a peer relation
for basic key/value storage, with the added benefit that all storage
can be replicated across peer units.
Requirement to use:
To use this, the "peer_echo()" method has to be called form the peer
relation's relation-changed hook:
@hooks.hook("cluster-relation-changed") # Adapt the to your peer relation name
def cluster_relation_changed():
peer_echo()
Once this is done, you can use peer storage from anywhere:
@hooks.hook("some-hook")
def some_hook():
# You can store and retrieve key/values this way:
if is_relation_made("cluster"): # from charmhelpers.core.hookenv
# There are peers available so we can work with peer storage
peer_store("mykey", "myvalue")
value = peer_retrieve("mykey")
print value
else:
print "No peers joind the relation, cannot share key/values :("
"""
def leader_get(attribute=None):
"""Wrapper to ensure that settings are migrated from the peer relation.
This is to support upgrading an environment that does not support
Juju leadership election to one that does.
If a setting is not extant in the leader-get but is on the relation-get
peer rel, it is migrated and marked as such so that it is not re-migrated.
"""
migration_key = '__leader_get_migrated_settings__'
if not is_leader():
return _leader_get(attribute=attribute)
settings_migrated = False
leader_settings = _leader_get(attribute=attribute)
previously_migrated = _leader_get(attribute=migration_key)
if previously_migrated:
migrated = set(json.loads(previously_migrated))
else:
migrated = set([])
try:
if migration_key in leader_settings:
del leader_settings[migration_key]
except TypeError:
pass
if attribute:
if attribute in migrated:
return leader_settings
# If attribute not present in leader db, check if this unit has set
# the attribute in the peer relation
if not leader_settings:
peer_setting = relation_get(attribute=attribute, unit=local_unit())
if peer_setting:
leader_set(settings={attribute: peer_setting})
leader_settings = peer_setting
if leader_settings:
settings_migrated = True
migrated.add(attribute)
else:
r_settings = relation_get(unit=local_unit())
if r_settings:
for key in set(r_settings.keys()).difference(migrated):
# Leader setting wins
if not leader_settings.get(key):
leader_settings[key] = r_settings[key]
settings_migrated = True
migrated.add(key)
if settings_migrated:
leader_set(**leader_settings)
if migrated and settings_migrated:
migrated = json.dumps(list(migrated))
leader_set(settings={migration_key: migrated})
return leader_settings
def relation_set(relation_id=None, relation_settings=None, **kwargs):
"""Attempt to use leader-set if supported in the current version of Juju,
otherwise falls back on relation-set.
Note that we only attempt to use leader-set if the provided relation_id is
a peer relation id or no relation id is provided (in which case we assume
we are within the peer relation context).
"""
try:
if relation_id in relation_ids('cluster'):
return leader_set(settings=relation_settings, **kwargs)
else:
raise NotImplementedError
except NotImplementedError:
return _relation_set(relation_id=relation_id,
relation_settings=relation_settings, **kwargs)
def relation_get(attribute=None, unit=None, rid=None):
"""Attempt to use leader-get if supported in the current version of Juju,
otherwise falls back on relation-get.
Note that we only attempt to use leader-get if the provided rid is a peer
relation id or no relation id is provided (in which case we assume we are
within the peer relation context).
"""
try:
if rid in relation_ids('cluster'):
return leader_get(attribute)
else:
raise NotImplementedError
except NotImplementedError:
return _relation_get(attribute=attribute, rid=rid, unit=unit)
def peer_retrieve(key, relation_name='cluster'):
"""Retrieve a named key from peer relation `relation_name`."""
cluster_rels = relation_ids(relation_name)
if len(cluster_rels) > 0:
cluster_rid = cluster_rels[0]
return relation_get(attribute=key, rid=cluster_rid,
unit=local_unit())
else:
raise ValueError('Unable to detect'
'peer relation {}'.format(relation_name))
def peer_retrieve_by_prefix(prefix, relation_name='cluster', delimiter='_',
inc_list=None, exc_list=None):
""" Retrieve k/v pairs given a prefix and filter using {inc,exc}_list """
inc_list = inc_list if inc_list else []
exc_list = exc_list if exc_list else []
peerdb_settings = peer_retrieve('-', relation_name=relation_name)
matched = {}
if peerdb_settings is None:
return matched
for k, v in peerdb_settings.items():
full_prefix = prefix + delimiter
if k.startswith(full_prefix):
new_key = k.replace(full_prefix, '')
if new_key in exc_list:
continue
if new_key in inc_list or len(inc_list) == 0:
matched[new_key] = v
return matched
def peer_store(key, value, relation_name='cluster'):
"""Store the key/value pair on the named peer relation `relation_name`."""
cluster_rels = relation_ids(relation_name)
if len(cluster_rels) > 0:
cluster_rid = cluster_rels[0]
relation_set(relation_id=cluster_rid,
relation_settings={key: value})
else:
raise ValueError('Unable to detect '
'peer relation {}'.format(relation_name))
def peer_echo(includes=None, force=False):
"""Echo filtered attributes back onto the same relation for storage.
This is a requirement to use the peerstorage module - it needs to be called
from the peer relation's changed hook.
If Juju leader support exists this will be a noop unless force is True.
"""
try:
is_leader()
except NotImplementedError:
pass
else:
if not force:
return # NOOP if leader-election is supported
# Use original non-leader calls
relation_get = _relation_get
relation_set = _relation_set
rdata = relation_get()
echo_data = {}
if includes is None:
echo_data = rdata.copy()
for ex in ['private-address', 'public-address']:
if ex in echo_data:
echo_data.pop(ex)
else:
for attribute, value in six.iteritems(rdata):
for include in includes:
if include in attribute:
echo_data[attribute] = value
if len(echo_data) > 0:
relation_set(relation_settings=echo_data)
def peer_store_and_set(relation_id=None, peer_relation_name='cluster',
peer_store_fatal=False, relation_settings=None,
delimiter='_', **kwargs):
"""Store passed-in arguments both in argument relation and in peer storage.
It functions like doing relation_set() and peer_store() at the same time,
with the same data.
@param relation_id: the id of the relation to store the data on. Defaults
to the current relation.
@param peer_store_fatal: Set to True, the function will raise an exception
should the peer sotrage not be avialable."""
relation_settings = relation_settings if relation_settings else {}
relation_set(relation_id=relation_id,
relation_settings=relation_settings,
**kwargs)
if is_relation_made(peer_relation_name):
for key, value in six.iteritems(dict(list(kwargs.items()) +
list(relation_settings.items()))):
key_prefix = relation_id or current_relation_id()
peer_store(key_prefix + delimiter + key,
value,
relation_name=peer_relation_name)
else:
if peer_store_fatal:
raise ValueError('Unable to detect '
'peer relation {}'.format(peer_relation_name))

View File

@ -19,20 +19,35 @@
import os
import subprocess
import sys
from charmhelpers.fetch import apt_install, apt_update
from charmhelpers.core.hookenv import charm_dir, log
try:
from pip import main as pip_execute
except ImportError:
apt_update()
apt_install('python-pip')
from pip import main as pip_execute
__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
def pip_execute(*args, **kwargs):
"""Overriden pip_execute() to stop sys.path being changed.
The act of importing main from the pip module seems to cause add wheels
from the /usr/share/python-wheels which are installed by various tools.
This function ensures that sys.path remains the same after the call is
executed.
"""
try:
_path = sys.path
try:
from pip import main as _pip_execute
except ImportError:
apt_update()
apt_install('python-pip')
from pip import main as _pip_execute
_pip_execute(*args, **kwargs)
finally:
sys.path = _path
def parse_options(given, available):
"""Given a set of options, check if available"""
for key, value in sorted(given.items()):
@ -42,8 +57,12 @@ def parse_options(given, available):
yield "--{0}={1}".format(key, value)
def pip_install_requirements(requirements, **options):
"""Install a requirements file """
def pip_install_requirements(requirements, constraints=None, **options):
"""Install a requirements file.
:param constraints: Path to pip constraints file.
http://pip.readthedocs.org/en/stable/user_guide/#constraints-files
"""
command = ["install"]
available_options = ('proxy', 'src', 'log', )
@ -51,8 +70,13 @@ def pip_install_requirements(requirements, **options):
command.append(option)
command.append("-r {0}".format(requirements))
log("Installing from file: {} with options: {}".format(requirements,
command))
if constraints:
command.append("-c {0}".format(constraints))
log("Installing from file: {} with constraints {} "
"and options: {}".format(requirements, constraints, command))
else:
log("Installing from file: {} with options: {}".format(requirements,
command))
pip_execute(command)

View File

@ -1,118 +0,0 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
"""Charm Helpers saltstack - declare the state of your machines.
This helper enables you to declare your machine state, rather than
program it procedurally (and have to test each change to your procedures).
Your install hook can be as simple as::
{{{
from charmhelpers.contrib.saltstack import (
install_salt_support,
update_machine_state,
)
def install():
install_salt_support()
update_machine_state('machine_states/dependencies.yaml')
update_machine_state('machine_states/installed.yaml')
}}}
and won't need to change (nor will its tests) when you change the machine
state.
It's using a python package called salt-minion which allows various formats for
specifying resources, such as::
{{{
/srv/{{ basedir }}:
file.directory:
- group: ubunet
- user: ubunet
- require:
- user: ubunet
- recurse:
- user
- group
ubunet:
group.present:
- gid: 1500
user.present:
- uid: 1500
- gid: 1500
- createhome: False
- require:
- group: ubunet
}}}
The docs for all the different state definitions are at:
http://docs.saltstack.com/ref/states/all/
TODO:
* Add test helpers which will ensure that machine state definitions
are functionally (but not necessarily logically) correct (ie. getting
salt to parse all state defs.
* Add a link to a public bootstrap charm example / blogpost.
* Find a way to obviate the need to use the grains['charm_dir'] syntax
in templates.
"""
# Copyright 2013 Canonical Ltd.
#
# Authors:
# Charm Helpers Developers <juju@lists.ubuntu.com>
import subprocess
import charmhelpers.contrib.templating.contexts
import charmhelpers.core.host
import charmhelpers.core.hookenv
salt_grains_path = '/etc/salt/grains'
def install_salt_support(from_ppa=True):
"""Installs the salt-minion helper for machine state.
By default the salt-minion package is installed from
the saltstack PPA. If from_ppa is False you must ensure
that the salt-minion package is available in the apt cache.
"""
if from_ppa:
subprocess.check_call([
'/usr/bin/add-apt-repository',
'--yes',
'ppa:saltstack/salt',
])
subprocess.check_call(['/usr/bin/apt-get', 'update'])
# We install salt-common as salt-minion would run the salt-minion
# daemon.
charmhelpers.fetch.apt_install('salt-common')
def update_machine_state(state_path):
"""Update the machine state using the provided state declaration."""
charmhelpers.contrib.templating.contexts.juju_state_to_yaml(
salt_grains_path)
subprocess.check_call([
'salt-call',
'--local',
'state.template',
state_path,
])

View File

@ -1,94 +0,0 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import subprocess
from charmhelpers.core import hookenv
def generate_selfsigned(keyfile, certfile, keysize="1024", config=None, subject=None, cn=None):
"""Generate selfsigned SSL keypair
You must provide one of the 3 optional arguments:
config, subject or cn
If more than one is provided the leftmost will be used
Arguments:
keyfile -- (required) full path to the keyfile to be created
certfile -- (required) full path to the certfile to be created
keysize -- (optional) SSL key length
config -- (optional) openssl configuration file
subject -- (optional) dictionary with SSL subject variables
cn -- (optional) cerfificate common name
Required keys in subject dict:
cn -- Common name (eq. FQDN)
Optional keys in subject dict
country -- Country Name (2 letter code)
state -- State or Province Name (full name)
locality -- Locality Name (eg, city)
organization -- Organization Name (eg, company)
organizational_unit -- Organizational Unit Name (eg, section)
email -- Email Address
"""
cmd = []
if config:
cmd = ["/usr/bin/openssl", "req", "-new", "-newkey",
"rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509",
"-keyout", keyfile,
"-out", certfile, "-config", config]
elif subject:
ssl_subject = ""
if "country" in subject:
ssl_subject = ssl_subject + "/C={}".format(subject["country"])
if "state" in subject:
ssl_subject = ssl_subject + "/ST={}".format(subject["state"])
if "locality" in subject:
ssl_subject = ssl_subject + "/L={}".format(subject["locality"])
if "organization" in subject:
ssl_subject = ssl_subject + "/O={}".format(subject["organization"])
if "organizational_unit" in subject:
ssl_subject = ssl_subject + "/OU={}".format(subject["organizational_unit"])
if "cn" in subject:
ssl_subject = ssl_subject + "/CN={}".format(subject["cn"])
else:
hookenv.log("When using \"subject\" argument you must "
"provide \"cn\" field at very least")
return False
if "email" in subject:
ssl_subject = ssl_subject + "/emailAddress={}".format(subject["email"])
cmd = ["/usr/bin/openssl", "req", "-new", "-newkey",
"rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509",
"-keyout", keyfile,
"-out", certfile, "-subj", ssl_subject]
elif cn:
cmd = ["/usr/bin/openssl", "req", "-new", "-newkey",
"rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509",
"-keyout", keyfile,
"-out", certfile, "-subj", "/CN={}".format(cn)]
if not cmd:
hookenv.log("No config, subject or cn provided,"
"unable to generate self signed SSL certificates")
return False
try:
subprocess.check_call(cmd)
return True
except Exception as e:
print("Execution of openssl command failed:\n{}".format(e))
return False

View File

@ -1,279 +0,0 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import os
from os.path import join as path_join
from os.path import exists
import subprocess
from charmhelpers.core.hookenv import log, DEBUG
STD_CERT = "standard"
# Mysql server is fairly picky about cert creation
# and types, spec its creation separately for now.
MYSQL_CERT = "mysql"
class ServiceCA(object):
default_expiry = str(365 * 2)
default_ca_expiry = str(365 * 6)
def __init__(self, name, ca_dir, cert_type=STD_CERT):
self.name = name
self.ca_dir = ca_dir
self.cert_type = cert_type
###############
# Hook Helper API
@staticmethod
def get_ca(type=STD_CERT):
service_name = os.environ['JUJU_UNIT_NAME'].split('/')[0]
ca_path = os.path.join(os.environ['CHARM_DIR'], 'ca')
ca = ServiceCA(service_name, ca_path, type)
ca.init()
return ca
@classmethod
def get_service_cert(cls, type=STD_CERT):
service_name = os.environ['JUJU_UNIT_NAME'].split('/')[0]
ca = cls.get_ca()
crt, key = ca.get_or_create_cert(service_name)
return crt, key, ca.get_ca_bundle()
###############
def init(self):
log("initializing service ca", level=DEBUG)
if not exists(self.ca_dir):
self._init_ca_dir(self.ca_dir)
self._init_ca()
@property
def ca_key(self):
return path_join(self.ca_dir, 'private', 'cacert.key')
@property
def ca_cert(self):
return path_join(self.ca_dir, 'cacert.pem')
@property
def ca_conf(self):
return path_join(self.ca_dir, 'ca.cnf')
@property
def signing_conf(self):
return path_join(self.ca_dir, 'signing.cnf')
def _init_ca_dir(self, ca_dir):
os.mkdir(ca_dir)
for i in ['certs', 'crl', 'newcerts', 'private']:
sd = path_join(ca_dir, i)
if not exists(sd):
os.mkdir(sd)
if not exists(path_join(ca_dir, 'serial')):
with open(path_join(ca_dir, 'serial'), 'w') as fh:
fh.write('02\n')
if not exists(path_join(ca_dir, 'index.txt')):
with open(path_join(ca_dir, 'index.txt'), 'w') as fh:
fh.write('')
def _init_ca(self):
"""Generate the root ca's cert and key.
"""
if not exists(path_join(self.ca_dir, 'ca.cnf')):
with open(path_join(self.ca_dir, 'ca.cnf'), 'w') as fh:
fh.write(
CA_CONF_TEMPLATE % (self.get_conf_variables()))
if not exists(path_join(self.ca_dir, 'signing.cnf')):
with open(path_join(self.ca_dir, 'signing.cnf'), 'w') as fh:
fh.write(
SIGNING_CONF_TEMPLATE % (self.get_conf_variables()))
if exists(self.ca_cert) or exists(self.ca_key):
raise RuntimeError("Initialized called when CA already exists")
cmd = ['openssl', 'req', '-config', self.ca_conf,
'-x509', '-nodes', '-newkey', 'rsa',
'-days', self.default_ca_expiry,
'-keyout', self.ca_key, '-out', self.ca_cert,
'-outform', 'PEM']
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
log("CA Init:\n %s" % output, level=DEBUG)
def get_conf_variables(self):
return dict(
org_name="juju",
org_unit_name="%s service" % self.name,
common_name=self.name,
ca_dir=self.ca_dir)
def get_or_create_cert(self, common_name):
if common_name in self:
return self.get_certificate(common_name)
return self.create_certificate(common_name)
def create_certificate(self, common_name):
if common_name in self:
return self.get_certificate(common_name)
key_p = path_join(self.ca_dir, "certs", "%s.key" % common_name)
crt_p = path_join(self.ca_dir, "certs", "%s.crt" % common_name)
csr_p = path_join(self.ca_dir, "certs", "%s.csr" % common_name)
self._create_certificate(common_name, key_p, csr_p, crt_p)
return self.get_certificate(common_name)
def get_certificate(self, common_name):
if common_name not in self:
raise ValueError("No certificate for %s" % common_name)
key_p = path_join(self.ca_dir, "certs", "%s.key" % common_name)
crt_p = path_join(self.ca_dir, "certs", "%s.crt" % common_name)
with open(crt_p) as fh:
crt = fh.read()
with open(key_p) as fh:
key = fh.read()
return crt, key
def __contains__(self, common_name):
crt_p = path_join(self.ca_dir, "certs", "%s.crt" % common_name)
return exists(crt_p)
def _create_certificate(self, common_name, key_p, csr_p, crt_p):
template_vars = self.get_conf_variables()
template_vars['common_name'] = common_name
subj = '/O=%(org_name)s/OU=%(org_unit_name)s/CN=%(common_name)s' % (
template_vars)
log("CA Create Cert %s" % common_name, level=DEBUG)
cmd = ['openssl', 'req', '-sha1', '-newkey', 'rsa:2048',
'-nodes', '-days', self.default_expiry,
'-keyout', key_p, '-out', csr_p, '-subj', subj]
subprocess.check_call(cmd, stderr=subprocess.PIPE)
cmd = ['openssl', 'rsa', '-in', key_p, '-out', key_p]
subprocess.check_call(cmd, stderr=subprocess.PIPE)
log("CA Sign Cert %s" % common_name, level=DEBUG)
if self.cert_type == MYSQL_CERT:
cmd = ['openssl', 'x509', '-req',
'-in', csr_p, '-days', self.default_expiry,
'-CA', self.ca_cert, '-CAkey', self.ca_key,
'-set_serial', '01', '-out', crt_p]
else:
cmd = ['openssl', 'ca', '-config', self.signing_conf,
'-extensions', 'req_extensions',
'-days', self.default_expiry, '-notext',
'-in', csr_p, '-out', crt_p, '-subj', subj, '-batch']
log("running %s" % " ".join(cmd), level=DEBUG)
subprocess.check_call(cmd, stderr=subprocess.PIPE)
def get_ca_bundle(self):
with open(self.ca_cert) as fh:
return fh.read()
CA_CONF_TEMPLATE = """
[ ca ]
default_ca = CA_default
[ CA_default ]
dir = %(ca_dir)s
policy = policy_match
database = $dir/index.txt
serial = $dir/serial
certs = $dir/certs
crl_dir = $dir/crl
new_certs_dir = $dir/newcerts
certificate = $dir/cacert.pem
private_key = $dir/private/cacert.key
RANDFILE = $dir/private/.rand
default_md = default
[ req ]
default_bits = 1024
default_md = sha1
prompt = no
distinguished_name = ca_distinguished_name
x509_extensions = ca_extensions
[ ca_distinguished_name ]
organizationName = %(org_name)s
organizationalUnitName = %(org_unit_name)s Certificate Authority
[ policy_match ]
countryName = optional
stateOrProvinceName = optional
organizationName = match
organizationalUnitName = optional
commonName = supplied
[ ca_extensions ]
basicConstraints = critical,CA:true
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid:always, issuer
keyUsage = cRLSign, keyCertSign
"""
SIGNING_CONF_TEMPLATE = """
[ ca ]
default_ca = CA_default
[ CA_default ]
dir = %(ca_dir)s
policy = policy_match
database = $dir/index.txt
serial = $dir/serial
certs = $dir/certs
crl_dir = $dir/crl
new_certs_dir = $dir/newcerts
certificate = $dir/cacert.pem
private_key = $dir/private/cacert.key
RANDFILE = $dir/private/.rand
default_md = default
[ req ]
default_bits = 1024
default_md = sha1
prompt = no
distinguished_name = req_distinguished_name
x509_extensions = req_extensions
[ req_distinguished_name ]
organizationName = %(org_name)s
organizationalUnitName = %(org_unit_name)s machine resources
commonName = %(common_name)s
[ policy_match ]
countryName = optional
stateOrProvinceName = optional
organizationName = match
organizationalUnitName = optional
commonName = supplied
[ req_extensions ]
basicConstraints = CA:false
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid:always, issuer
keyUsage = digitalSignature, keyEncipherment, keyAgreement
extendedKeyUsage = serverAuth, clientAuth
"""

View File

@ -23,11 +23,16 @@
# James Page <james.page@ubuntu.com>
# Adam Gandelman <adamg@ubuntu.com>
#
import bisect
import errno
import hashlib
import six
import os
import shutil
import json
import time
import uuid
from subprocess import (
check_call,
@ -35,8 +40,10 @@ from subprocess import (
CalledProcessError,
)
from charmhelpers.core.hookenv import (
local_unit,
relation_get,
relation_ids,
relation_set,
related_units,
log,
DEBUG,
@ -56,6 +63,8 @@ from charmhelpers.fetch import (
apt_install,
)
from charmhelpers.core.kernel import modprobe
KEYRING = '/etc/ceph/ceph.client.{}.keyring'
KEYFILE = '/etc/ceph/ceph.client.{}.key'
@ -67,6 +76,559 @@ log to syslog = {use_syslog}
err to syslog = {use_syslog}
clog to syslog = {use_syslog}
"""
# For 50 < osds < 240,000 OSDs (Roughly 1 Exabyte at 6T OSDs)
powers_of_two = [8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608]
def validator(value, valid_type, valid_range=None):
"""
Used to validate these: http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values
Example input:
validator(value=1,
valid_type=int,
valid_range=[0, 2])
This says I'm testing value=1. It must be an int inclusive in [0,2]
:param value: The value to validate
:param valid_type: The type that value should be.
:param valid_range: A range of values that value can assume.
:return:
"""
assert isinstance(value, valid_type), "{} is not a {}".format(
value,
valid_type)
if valid_range is not None:
assert isinstance(valid_range, list), \
"valid_range must be a list, was given {}".format(valid_range)
# If we're dealing with strings
if valid_type is six.string_types:
assert value in valid_range, \
"{} is not in the list {}".format(value, valid_range)
# Integer, float should have a min and max
else:
if len(valid_range) != 2:
raise ValueError(
"Invalid valid_range list of {} for {}. "
"List must be [min,max]".format(valid_range, value))
assert value >= valid_range[0], \
"{} is less than minimum allowed value of {}".format(
value, valid_range[0])
assert value <= valid_range[1], \
"{} is greater than maximum allowed value of {}".format(
value, valid_range[1])
class PoolCreationError(Exception):
"""
A custom error to inform the caller that a pool creation failed. Provides an error message
"""
def __init__(self, message):
super(PoolCreationError, self).__init__(message)
class Pool(object):
"""
An object oriented approach to Ceph pool creation. This base class is inherited by ReplicatedPool and ErasurePool.
Do not call create() on this base class as it will not do anything. Instantiate a child class and call create().
"""
def __init__(self, service, name):
self.service = service
self.name = name
# Create the pool if it doesn't exist already
# To be implemented by subclasses
def create(self):
pass
def add_cache_tier(self, cache_pool, mode):
"""
Adds a new cache tier to an existing pool.
:param cache_pool: six.string_types. The cache tier pool name to add.
:param mode: six.string_types. The caching mode to use for this pool. valid range = ["readonly", "writeback"]
:return: None
"""
# Check the input types and values
validator(value=cache_pool, valid_type=six.string_types)
validator(value=mode, valid_type=six.string_types, valid_range=["readonly", "writeback"])
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'add', self.name, cache_pool])
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, mode])
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'set-overlay', self.name, cache_pool])
check_call(['ceph', '--id', self.service, 'osd', 'pool', 'set', cache_pool, 'hit_set_type', 'bloom'])
def remove_cache_tier(self, cache_pool):
"""
Removes a cache tier from Ceph. Flushes all dirty objects from writeback pools and waits for that to complete.
:param cache_pool: six.string_types. The cache tier pool name to remove.
:return: None
"""
# read-only is easy, writeback is much harder
mode = get_cache_mode(self.service, cache_pool)
version = ceph_version()
if mode == 'readonly':
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none'])
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
elif mode == 'writeback':
pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier',
'cache-mode', cache_pool, 'forward']
if version >= '10.1':
# Jewel added a mandatory flag
pool_forward_cmd.append('--yes-i-really-mean-it')
check_call(pool_forward_cmd)
# Flush the cache and wait for it to return
check_call(['rados', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all'])
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name])
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
def get_pgs(self, pool_size):
"""
:param pool_size: int. pool_size is either the number of replicas for replicated pools or the K+M sum for
erasure coded pools
:return: int. The number of pgs to use.
"""
validator(value=pool_size, valid_type=int)
osd_list = get_osds(self.service)
if not osd_list:
# NOTE(james-page): Default to 200 for older ceph versions
# which don't support OSD query from cli
return 200
osd_list_length = len(osd_list)
# Calculate based on Ceph best practices
if osd_list_length < 5:
return 128
elif 5 < osd_list_length < 10:
return 512
elif 10 < osd_list_length < 50:
return 4096
else:
estimate = (osd_list_length * 100) / pool_size
# Return the next nearest power of 2
index = bisect.bisect_right(powers_of_two, estimate)
return powers_of_two[index]
class ReplicatedPool(Pool):
def __init__(self, service, name, pg_num=None, replicas=2):
super(ReplicatedPool, self).__init__(service=service, name=name)
self.replicas = replicas
if pg_num is None:
self.pg_num = self.get_pgs(self.replicas)
else:
self.pg_num = pg_num
def create(self):
if not pool_exists(self.service, self.name):
# Create it
cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create',
self.name, str(self.pg_num)]
try:
check_call(cmd)
# Set the pool replica size
update_pool(client=self.service,
pool=self.name,
settings={'size': str(self.replicas)})
except CalledProcessError:
raise
# Default jerasure erasure coded pool
class ErasurePool(Pool):
def __init__(self, service, name, erasure_code_profile="default"):
super(ErasurePool, self).__init__(service=service, name=name)
self.erasure_code_profile = erasure_code_profile
def create(self):
if not pool_exists(self.service, self.name):
# Try to find the erasure profile information so we can properly size the pgs
erasure_profile = get_erasure_profile(service=self.service, name=self.erasure_code_profile)
# Check for errors
if erasure_profile is None:
log(message='Failed to discover erasure_profile named={}'.format(self.erasure_code_profile),
level=ERROR)
raise PoolCreationError(message='unable to find erasure profile {}'.format(self.erasure_code_profile))
if 'k' not in erasure_profile or 'm' not in erasure_profile:
# Error
log(message='Unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile),
level=ERROR)
raise PoolCreationError(
message='unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile))
pgs = self.get_pgs(int(erasure_profile['k']) + int(erasure_profile['m']))
# Create it
cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs), str(pgs),
'erasure', self.erasure_code_profile]
try:
check_call(cmd)
except CalledProcessError:
raise
"""Get an existing erasure code profile if it already exists.
Returns json formatted output"""
def get_mon_map(service):
"""
Returns the current monitor map.
:param service: six.string_types. The Ceph user name to run the command under
:return: json string. :raise: ValueError if the monmap fails to parse.
Also raises CalledProcessError if our ceph command fails
"""
try:
mon_status = check_output(
['ceph', '--id', service,
'mon_status', '--format=json'])
try:
return json.loads(mon_status)
except ValueError as v:
log("Unable to parse mon_status json: {}. Error: {}".format(
mon_status, v.message))
raise
except CalledProcessError as e:
log("mon_status command failed with message: {}".format(
e.message))
raise
def hash_monitor_names(service):
"""
Uses the get_mon_map() function to get information about the monitor
cluster.
Hash the name of each monitor. Return a sorted list of monitor hashes
in an ascending order.
:param service: six.string_types. The Ceph user name to run the command under
:rtype : dict. json dict of monitor name, ip address and rank
example: {
'name': 'ip-172-31-13-165',
'rank': 0,
'addr': '172.31.13.165:6789/0'}
"""
try:
hash_list = []
monitor_list = get_mon_map(service=service)
if monitor_list['monmap']['mons']:
for mon in monitor_list['monmap']['mons']:
hash_list.append(
hashlib.sha224(mon['name'].encode('utf-8')).hexdigest())
return sorted(hash_list)
else:
return None
except (ValueError, CalledProcessError):
raise
def monitor_key_delete(service, key):
"""
Delete a key and value pair from the monitor cluster
:param service: six.string_types. The Ceph user name to run the command under
Deletes a key value pair on the monitor cluster.
:param key: six.string_types. The key to delete.
"""
try:
check_output(
['ceph', '--id', service,
'config-key', 'del', str(key)])
except CalledProcessError as e:
log("Monitor config-key put failed with message: {}".format(
e.output))
raise
def monitor_key_set(service, key, value):
"""
Sets a key value pair on the monitor cluster.
:param service: six.string_types. The Ceph user name to run the command under
:param key: six.string_types. The key to set.
:param value: The value to set. This will be converted to a string
before setting
"""
try:
check_output(
['ceph', '--id', service,
'config-key', 'put', str(key), str(value)])
except CalledProcessError as e:
log("Monitor config-key put failed with message: {}".format(
e.output))
raise
def monitor_key_get(service, key):
"""
Gets the value of an existing key in the monitor cluster.
:param service: six.string_types. The Ceph user name to run the command under
:param key: six.string_types. The key to search for.
:return: Returns the value of that key or None if not found.
"""
try:
output = check_output(
['ceph', '--id', service,
'config-key', 'get', str(key)])
return output
except CalledProcessError as e:
log("Monitor config-key get failed with message: {}".format(
e.output))
return None
def monitor_key_exists(service, key):
"""
Searches for the existence of a key in the monitor cluster.
:param service: six.string_types. The Ceph user name to run the command under
:param key: six.string_types. The key to search for
:return: Returns True if the key exists, False if not and raises an
exception if an unknown error occurs. :raise: CalledProcessError if
an unknown error occurs
"""
try:
check_call(
['ceph', '--id', service,
'config-key', 'exists', str(key)])
# I can return true here regardless because Ceph returns
# ENOENT if the key wasn't found
return True
except CalledProcessError as e:
if e.returncode == errno.ENOENT:
return False
else:
log("Unknown error from ceph config-get exists: {} {}".format(
e.returncode, e.output))
raise
def get_erasure_profile(service, name):
"""
:param service: six.string_types. The Ceph user name to run the command under
:param name:
:return:
"""
try:
out = check_output(['ceph', '--id', service,
'osd', 'erasure-code-profile', 'get',
name, '--format=json'])
return json.loads(out)
except (CalledProcessError, OSError, ValueError):
return None
def pool_set(service, pool_name, key, value):
"""
Sets a value for a RADOS pool in ceph.
:param service: six.string_types. The Ceph user name to run the command under
:param pool_name: six.string_types
:param key: six.string_types
:param value:
:return: None. Can raise CalledProcessError
"""
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, value]
try:
check_call(cmd)
except CalledProcessError:
raise
def snapshot_pool(service, pool_name, snapshot_name):
"""
Snapshots a RADOS pool in ceph.
:param service: six.string_types. The Ceph user name to run the command under
:param pool_name: six.string_types
:param snapshot_name: six.string_types
:return: None. Can raise CalledProcessError
"""
cmd = ['ceph', '--id', service, 'osd', 'pool', 'mksnap', pool_name, snapshot_name]
try:
check_call(cmd)
except CalledProcessError:
raise
def remove_pool_snapshot(service, pool_name, snapshot_name):
"""
Remove a snapshot from a RADOS pool in ceph.
:param service: six.string_types. The Ceph user name to run the command under
:param pool_name: six.string_types
:param snapshot_name: six.string_types
:return: None. Can raise CalledProcessError
"""
cmd = ['ceph', '--id', service, 'osd', 'pool', 'rmsnap', pool_name, snapshot_name]
try:
check_call(cmd)
except CalledProcessError:
raise
# max_bytes should be an int or long
def set_pool_quota(service, pool_name, max_bytes):
"""
:param service: six.string_types. The Ceph user name to run the command under
:param pool_name: six.string_types
:param max_bytes: int or long
:return: None. Can raise CalledProcessError
"""
# Set a byte quota on a RADOS pool in ceph.
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name,
'max_bytes', str(max_bytes)]
try:
check_call(cmd)
except CalledProcessError:
raise
def remove_pool_quota(service, pool_name):
"""
Set a byte quota on a RADOS pool in ceph.
:param service: six.string_types. The Ceph user name to run the command under
:param pool_name: six.string_types
:return: None. Can raise CalledProcessError
"""
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', '0']
try:
check_call(cmd)
except CalledProcessError:
raise
def remove_erasure_profile(service, profile_name):
"""
Create a new erasure code profile if one does not already exist for it. Updates
the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/
for more details
:param service: six.string_types. The Ceph user name to run the command under
:param profile_name: six.string_types
:return: None. Can raise CalledProcessError
"""
cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'rm',
profile_name]
try:
check_call(cmd)
except CalledProcessError:
raise
def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure',
failure_domain='host',
data_chunks=2, coding_chunks=1,
locality=None, durability_estimator=None):
"""
Create a new erasure code profile if one does not already exist for it. Updates
the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/
for more details
:param service: six.string_types. The Ceph user name to run the command under
:param profile_name: six.string_types
:param erasure_plugin_name: six.string_types
:param failure_domain: six.string_types. One of ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region',
'room', 'root', 'row'])
:param data_chunks: int
:param coding_chunks: int
:param locality: int
:param durability_estimator: int
:return: None. Can raise CalledProcessError
"""
# Ensure this failure_domain is allowed by Ceph
validator(failure_domain, six.string_types,
['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row'])
cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name,
'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks),
'ruleset_failure_domain=' + failure_domain]
if locality is not None and durability_estimator is not None:
raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.")
# Add plugin specific information
if locality is not None:
# For local erasure codes
cmd.append('l=' + str(locality))
if durability_estimator is not None:
# For Shec erasure codes
cmd.append('c=' + str(durability_estimator))
if erasure_profile_exists(service, profile_name):
cmd.append('--force')
try:
check_call(cmd)
except CalledProcessError:
raise
def rename_pool(service, old_name, new_name):
"""
Rename a Ceph pool from old_name to new_name
:param service: six.string_types. The Ceph user name to run the command under
:param old_name: six.string_types
:param new_name: six.string_types
:return: None
"""
validator(value=old_name, valid_type=six.string_types)
validator(value=new_name, valid_type=six.string_types)
cmd = ['ceph', '--id', service, 'osd', 'pool', 'rename', old_name, new_name]
check_call(cmd)
def erasure_profile_exists(service, name):
"""
Check to see if an Erasure code profile already exists.
:param service: six.string_types. The Ceph user name to run the command under
:param name: six.string_types
:return: int or None
"""
validator(value=name, valid_type=six.string_types)
try:
check_call(['ceph', '--id', service,
'osd', 'erasure-code-profile', 'get',
name])
return True
except CalledProcessError:
return False
def get_cache_mode(service, pool_name):
"""
Find the current caching mode of the pool_name given.
:param service: six.string_types. The Ceph user name to run the command under
:param pool_name: six.string_types
:return: int or None
"""
validator(value=service, valid_type=six.string_types)
validator(value=pool_name, valid_type=six.string_types)
out = check_output(['ceph', '--id', service, 'osd', 'dump', '--format=json'])
try:
osd_json = json.loads(out)
for pool in osd_json['pools']:
if pool['pool_name'] == pool_name:
return pool['cache_mode']
return None
except ValueError:
raise
def pool_exists(service, name):
"""Check to see if a RADOS pool already exists."""
try:
out = check_output(['rados', '--id', service,
'lspools']).decode('UTF-8')
except CalledProcessError:
return False
return name in out.split()
def get_osds(service):
"""Return a list of all Ceph Object Storage Daemons currently in the
cluster.
"""
version = ceph_version()
if version and version >= '0.56':
return json.loads(check_output(['ceph', '--id', service,
'osd', 'ls',
'--format=json']).decode('UTF-8'))
return None
def install():
@ -96,53 +658,37 @@ def create_rbd_image(service, pool, image, sizemb):
check_call(cmd)
def pool_exists(service, name):
"""Check to see if a RADOS pool already exists."""
try:
out = check_output(['rados', '--id', service,
'lspools']).decode('UTF-8')
except CalledProcessError:
return False
def update_pool(client, pool, settings):
cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool]
for k, v in six.iteritems(settings):
cmd.append(k)
cmd.append(v)
return name in out
check_call(cmd)
def get_osds(service):
"""Return a list of all Ceph Object Storage Daemons currently in the
cluster.
"""
version = ceph_version()
if version and version >= '0.56':
return json.loads(check_output(['ceph', '--id', service,
'osd', 'ls',
'--format=json']).decode('UTF-8'))
return None
def create_pool(service, name, replicas=3):
def create_pool(service, name, replicas=3, pg_num=None):
"""Create a new RADOS pool."""
if pool_exists(service, name):
log("Ceph pool {} already exists, skipping creation".format(name),
level=WARNING)
return
# Calculate the number of placement groups based
# on upstream recommended best practices.
osds = get_osds(service)
if osds:
pgnum = (len(osds) * 100 // replicas)
else:
# NOTE(james-page): Default to 200 for older ceph versions
# which don't support OSD query from cli
pgnum = 200
if not pg_num:
# Calculate the number of placement groups based
# on upstream recommended best practices.
osds = get_osds(service)
if osds:
pg_num = (len(osds) * 100 // replicas)
else:
# NOTE(james-page): Default to 200 for older ceph versions
# which don't support OSD query from cli
pg_num = 200
cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)]
cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pg_num)]
check_call(cmd)
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size',
str(replicas)]
check_call(cmd)
update_pool(service, name, settings={'size': str(replicas)})
def delete_pool(service, name):
@ -197,10 +743,10 @@ def create_key_file(service, key):
log('Created new keyfile at %s.' % keyfile, level=INFO)
def get_ceph_nodes():
"""Query named relation 'ceph' to determine current nodes."""
def get_ceph_nodes(relation='ceph'):
"""Query named relation to determine current nodes."""
hosts = []
for r_id in relation_ids('ceph'):
for r_id in relation_ids(relation):
for unit in related_units(r_id):
hosts.append(relation_get('private-address', unit=unit, rid=r_id))
@ -288,17 +834,6 @@ def place_data_on_block_device(blk_device, data_src_dst):
os.chown(data_src_dst, uid, gid)
# TODO: re-use
def modprobe(module):
"""Load a kernel module and configure for auto-load on reboot."""
log('Loading kernel module', level=INFO)
cmd = ['modprobe', module]
check_call(cmd)
with open('/etc/modules', 'r+') as modules:
if module not in modules.read():
modules.write(module)
def copy_files(src, dst, symlinks=False, ignore=None):
"""Copy files from src to dst."""
for item in os.listdir(src):
@ -363,14 +898,14 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
service_start(svc)
def ensure_ceph_keyring(service, user=None, group=None):
def ensure_ceph_keyring(service, user=None, group=None, relation='ceph'):
"""Ensures a ceph keyring is created for a named service and optionally
ensures user and group ownership.
Returns False if no ceph key is available in relation state.
"""
key = None
for rid in relation_ids('ceph'):
for rid in relation_ids(relation):
for unit in related_units(rid):
key = relation_get('key', rid=rid, unit=unit)
if key:
@ -411,17 +946,60 @@ class CephBrokerRq(object):
The API is versioned and defaults to version 1.
"""
def __init__(self, api_version=1):
def __init__(self, api_version=1, request_id=None):
self.api_version = api_version
if request_id:
self.request_id = request_id
else:
self.request_id = str(uuid.uuid1())
self.ops = []
def add_op_create_pool(self, name, replica_count=3):
def add_op_create_pool(self, name, replica_count=3, pg_num=None):
"""Adds an operation to create a pool.
@param pg_num setting: optional setting. If not provided, this value
will be calculated by the broker based on how many OSDs are in the
cluster at the time of creation. Note that, if provided, this value
will be capped at the current available maximum.
"""
self.ops.append({'op': 'create-pool', 'name': name,
'replicas': replica_count})
'replicas': replica_count, 'pg_num': pg_num})
def set_ops(self, ops):
"""Set request ops to provided value.
Useful for injecting ops that come from a previous request
to allow comparisons to ensure validity.
"""
self.ops = ops
@property
def request(self):
return json.dumps({'api-version': self.api_version, 'ops': self.ops})
return json.dumps({'api-version': self.api_version, 'ops': self.ops,
'request-id': self.request_id})
def _ops_equal(self, other):
if len(self.ops) == len(other.ops):
for req_no in range(0, len(self.ops)):
for key in ['replicas', 'name', 'op', 'pg_num']:
if self.ops[req_no].get(key) != other.ops[req_no].get(key):
return False
else:
return False
return True
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
if self.api_version == other.api_version and \
self._ops_equal(other):
return True
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
class CephBrokerRsp(object):
@ -431,10 +1009,15 @@ class CephBrokerRsp(object):
The API is versioned and defaults to version 1.
"""
def __init__(self, encoded_rsp):
self.api_version = None
self.rsp = json.loads(encoded_rsp)
@property
def request_id(self):
return self.rsp.get('request-id')
@property
def exit_code(self):
return self.rsp.get('exit-code')
@ -442,3 +1025,182 @@ class CephBrokerRsp(object):
@property
def exit_msg(self):
return self.rsp.get('stderr')
# Ceph Broker Conversation:
# If a charm needs an action to be taken by ceph it can create a CephBrokerRq
# and send that request to ceph via the ceph relation. The CephBrokerRq has a
# unique id so that the client can identity which CephBrokerRsp is associated
# with the request. Ceph will also respond to each client unit individually
# creating a response key per client unit eg glance/0 will get a CephBrokerRsp
# via key broker-rsp-glance-0
#
# To use this the charm can just do something like:
#
# from charmhelpers.contrib.storage.linux.ceph import (
# send_request_if_needed,
# is_request_complete,
# CephBrokerRq,
# )
#
# @hooks.hook('ceph-relation-changed')
# def ceph_changed():
# rq = CephBrokerRq()
# rq.add_op_create_pool(name='poolname', replica_count=3)
#
# if is_request_complete(rq):
# <Request complete actions>
# else:
# send_request_if_needed(get_ceph_request())
#
# CephBrokerRq and CephBrokerRsp are serialized into JSON. Below is an example
# of glance having sent a request to ceph which ceph has successfully processed
# 'ceph:8': {
# 'ceph/0': {
# 'auth': 'cephx',
# 'broker-rsp-glance-0': '{"request-id": "0bc7dc54", "exit-code": 0}',
# 'broker_rsp': '{"request-id": "0da543b8", "exit-code": 0}',
# 'ceph-public-address': '10.5.44.103',
# 'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==',
# 'private-address': '10.5.44.103',
# },
# 'glance/0': {
# 'broker_req': ('{"api-version": 1, "request-id": "0bc7dc54", '
# '"ops": [{"replicas": 3, "name": "glance", '
# '"op": "create-pool"}]}'),
# 'private-address': '10.5.44.109',
# },
# }
def get_previous_request(rid):
"""Return the last ceph broker request sent on a given relation
@param rid: Relation id to query for request
"""
request = None
broker_req = relation_get(attribute='broker_req', rid=rid,
unit=local_unit())
if broker_req:
request_data = json.loads(broker_req)
request = CephBrokerRq(api_version=request_data['api-version'],
request_id=request_data['request-id'])
request.set_ops(request_data['ops'])
return request
def get_request_states(request, relation='ceph'):
"""Return a dict of requests per relation id with their corresponding
completion state.
This allows a charm, which has a request for ceph, to see whether there is
an equivalent request already being processed and if so what state that
request is in.
@param request: A CephBrokerRq object
"""
complete = []
requests = {}
for rid in relation_ids(relation):
complete = False
previous_request = get_previous_request(rid)
if request == previous_request:
sent = True
complete = is_request_complete_for_rid(previous_request, rid)
else:
sent = False
complete = False
requests[rid] = {
'sent': sent,
'complete': complete,
}
return requests
def is_request_sent(request, relation='ceph'):
"""Check to see if a functionally equivalent request has already been sent
Returns True if a similair request has been sent
@param request: A CephBrokerRq object
"""
states = get_request_states(request, relation=relation)
for rid in states.keys():
if not states[rid]['sent']:
return False
return True
def is_request_complete(request, relation='ceph'):
"""Check to see if a functionally equivalent request has already been
completed
Returns True if a similair request has been completed
@param request: A CephBrokerRq object
"""
states = get_request_states(request, relation=relation)
for rid in states.keys():
if not states[rid]['complete']:
return False
return True
def is_request_complete_for_rid(request, rid):
"""Check if a given request has been completed on the given relation
@param request: A CephBrokerRq object
@param rid: Relation ID
"""
broker_key = get_broker_rsp_key()
for unit in related_units(rid):
rdata = relation_get(rid=rid, unit=unit)
if rdata.get(broker_key):
rsp = CephBrokerRsp(rdata.get(broker_key))
if rsp.request_id == request.request_id:
if not rsp.exit_code:
return True
else:
# The remote unit sent no reply targeted at this unit so either the
# remote ceph cluster does not support unit targeted replies or it
# has not processed our request yet.
if rdata.get('broker_rsp'):
request_data = json.loads(rdata['broker_rsp'])
if request_data.get('request-id'):
log('Ignoring legacy broker_rsp without unit key as remote '
'service supports unit specific replies', level=DEBUG)
else:
log('Using legacy broker_rsp as remote service does not '
'supports unit specific replies', level=DEBUG)
rsp = CephBrokerRsp(rdata['broker_rsp'])
if not rsp.exit_code:
return True
return False
def get_broker_rsp_key():
"""Return broker response key for this unit
This is the key that ceph is going to use to pass request status
information back to this unit
"""
return 'broker-rsp-' + local_unit().replace('/', '-')
def send_request_if_needed(request, relation='ceph'):
"""Send broker request if an equivalent request has not already been sent
@param request: A CephBrokerRq object
"""
if is_request_sent(request, relation=relation):
log('Request already sent but not complete, not sending new request',
level=DEBUG)
else:
for rid in relation_ids(relation):
log('Sending request {}'.format(request.request_id), level=DEBUG)
relation_set(relation_id=rid, broker_req=request.request)

View File

@ -76,3 +76,13 @@ def ensure_loopback_device(path, size):
check_call(cmd)
return create_loopback(path)
def is_mapped_loopback_device(device):
"""
Checks if a given device name is an existing/mapped loopback device.
:param device: str: Full path to the device (eg, /dev/loop1).
:returns: str: Path to the backing file if is a loopback device
empty string otherwise
"""
return loopback_devices().get(device, "")

View File

@ -43,9 +43,10 @@ def zap_disk(block_device):
:param block_device: str: Full path of block device to clean.
'''
# https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b
# sometimes sgdisk exits non-zero; this is OK, dd will clean up
call(['sgdisk', '--zap-all', '--mbrtogpt',
'--clear', block_device])
call(['sgdisk', '--zap-all', '--', block_device])
call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device])
dev_end = check_output(['blockdev', '--getsz',
block_device]).decode('UTF-8')
gpt_end = int(dev_end.split()[0]) - 100
@ -63,8 +64,8 @@ def is_device_mounted(device):
:returns: boolean: True if the path represents a mounted device, False if
it doesn't.
'''
is_partition = bool(re.search(r".*[0-9]+\b", device))
out = check_output(['mount']).decode('UTF-8')
if is_partition:
return bool(re.search(device + r"\b", out))
return bool(re.search(device + r"[0-9]*\b", out))
try:
out = check_output(['lsblk', '-P', device]).decode('UTF-8')
except:
return False
return bool(re.search(r'MOUNTPOINT=".+"', out))

View File

@ -1,15 +0,0 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.

View File

@ -1,139 +0,0 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
# Copyright 2013 Canonical Ltd.
#
# Authors:
# Charm Helpers Developers <juju@lists.ubuntu.com>
"""A helper to create a yaml cache of config with namespaced relation data."""
import os
import yaml
import six
import charmhelpers.core.hookenv
charm_dir = os.environ.get('CHARM_DIR', '')
def dict_keys_without_hyphens(a_dict):
"""Return the a new dict with underscores instead of hyphens in keys."""
return dict(
(key.replace('-', '_'), val) for key, val in a_dict.items())
def update_relations(context, namespace_separator=':'):
"""Update the context with the relation data."""
# Add any relation data prefixed with the relation type.
relation_type = charmhelpers.core.hookenv.relation_type()
relations = []
context['current_relation'] = {}
if relation_type is not None:
relation_data = charmhelpers.core.hookenv.relation_get()
context['current_relation'] = relation_data
# Deprecated: the following use of relation data as keys
# directly in the context will be removed.
relation_data = dict(
("{relation_type}{namespace_separator}{key}".format(
relation_type=relation_type,
key=key,
namespace_separator=namespace_separator), val)
for key, val in relation_data.items())
relation_data = dict_keys_without_hyphens(relation_data)
context.update(relation_data)
relations = charmhelpers.core.hookenv.relations_of_type(relation_type)
relations = [dict_keys_without_hyphens(rel) for rel in relations]
context['relations_full'] = charmhelpers.core.hookenv.relations()
# the hookenv.relations() data structure is effectively unusable in
# templates and other contexts when trying to access relation data other
# than the current relation. So provide a more useful structure that works
# with any hook.
local_unit = charmhelpers.core.hookenv.local_unit()
relations = {}
for rname, rids in context['relations_full'].items():
relations[rname] = []
for rid, rdata in rids.items():
data = rdata.copy()
if local_unit in rdata:
data.pop(local_unit)
for unit_name, rel_data in data.items():
new_data = {'__relid__': rid, '__unit__': unit_name}
new_data.update(rel_data)
relations[rname].append(new_data)
context['relations'] = relations
def juju_state_to_yaml(yaml_path, namespace_separator=':',
allow_hyphens_in_keys=True, mode=None):
"""Update the juju config and state in a yaml file.
This includes any current relation-get data, and the charm
directory.
This function was created for the ansible and saltstack
support, as those libraries can use a yaml file to supply
context to templates, but it may be useful generally to
create and update an on-disk cache of all the config, including
previous relation data.
By default, hyphens are allowed in keys as this is supported
by yaml, but for tools like ansible, hyphens are not valid [1].
[1] http://www.ansibleworks.com/docs/playbooks_variables.html#what-makes-a-valid-variable-name
"""
config = charmhelpers.core.hookenv.config()
# Add the charm_dir which we will need to refer to charm
# file resources etc.
config['charm_dir'] = charm_dir
config['local_unit'] = charmhelpers.core.hookenv.local_unit()
config['unit_private_address'] = charmhelpers.core.hookenv.unit_private_ip()
config['unit_public_address'] = charmhelpers.core.hookenv.unit_get(
'public-address'
)
# Don't use non-standard tags for unicode which will not
# work when salt uses yaml.load_safe.
yaml.add_representer(six.text_type,
lambda dumper, value: dumper.represent_scalar(
six.u('tag:yaml.org,2002:str'), value))
yaml_dir = os.path.dirname(yaml_path)
if not os.path.exists(yaml_dir):
os.makedirs(yaml_dir)
if os.path.exists(yaml_path):
with open(yaml_path, "r") as existing_vars_file:
existing_vars = yaml.load(existing_vars_file.read())
else:
with open(yaml_path, "w+"):
pass
existing_vars = {}
if mode is not None:
os.chmod(yaml_path, mode)
if not allow_hyphens_in_keys:
config = dict_keys_without_hyphens(config)
existing_vars.update(config)
update_relations(existing_vars, namespace_separator)
with open(yaml_path, "w+") as fp:
fp.write(yaml.dump(existing_vars, default_flow_style=False))

View File

@ -1,39 +0,0 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
"""
Templating using the python-jinja2 package.
"""
import six
from charmhelpers.fetch import apt_install
try:
import jinja2
except ImportError:
if six.PY3:
apt_install(["python3-jinja2"])
else:
apt_install(["python-jinja2"])
import jinja2
DEFAULT_TEMPLATES_DIR = 'templates'
def render(template_name, context, template_dir=DEFAULT_TEMPLATES_DIR):
templates = jinja2.Environment(
loader=jinja2.FileSystemLoader(template_dir))
template = templates.get_template(template_name)
return template.render(context)

View File

@ -1,29 +0,0 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
'''
Templating using standard Python str.format() method.
'''
from charmhelpers.core import hookenv
def render(template, extra={}, **kwargs):
"""Return the template rendered using Python's str.format()."""
context = hookenv.execution_environment()
context.update(extra)
context.update(kwargs)
return template.format(**context)

View File

@ -1,313 +0,0 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
# Easy file synchronization among peer units using ssh + unison.
#
# For the -joined, -changed, and -departed peer relations, add a call to
# ssh_authorized_peers() describing the peer relation and the desired
# user + group. After all peer relations have settled, all hosts should
# be able to connect to on another via key auth'd ssh as the specified user.
#
# Other hooks are then free to synchronize files and directories using
# sync_to_peers().
#
# For a peer relation named 'cluster', for example:
#
# cluster-relation-joined:
# ...
# ssh_authorized_peers(peer_interface='cluster',
# user='juju_ssh', group='juju_ssh',
# ensure_local_user=True)
# ...
#
# cluster-relation-changed:
# ...
# ssh_authorized_peers(peer_interface='cluster',
# user='juju_ssh', group='juju_ssh',
# ensure_local_user=True)
# ...
#
# cluster-relation-departed:
# ...
# ssh_authorized_peers(peer_interface='cluster',
# user='juju_ssh', group='juju_ssh',
# ensure_local_user=True)
# ...
#
# Hooks are now free to sync files as easily as:
#
# files = ['/etc/fstab', '/etc/apt.conf.d/']
# sync_to_peers(peer_interface='cluster',
# user='juju_ssh, paths=[files])
#
# It is assumed the charm itself has setup permissions on each unit
# such that 'juju_ssh' has read + write permissions. Also assumed
# that the calling charm takes care of leader delegation.
#
# Additionally files can be synchronized only to an specific unit:
# sync_to_peer(slave_address, user='juju_ssh',
# paths=[files], verbose=False)
import os
import pwd
from copy import copy
from subprocess import check_call, check_output
from charmhelpers.core.host import (
adduser,
add_user_to_group,
pwgen,
)
from charmhelpers.core.hookenv import (
log,
hook_name,
relation_ids,
related_units,
relation_set,
relation_get,
unit_private_ip,
INFO,
ERROR,
)
BASE_CMD = ['unison', '-auto', '-batch=true', '-confirmbigdel=false',
'-fastcheck=true', '-group=false', '-owner=false',
'-prefer=newer', '-times=true']
def get_homedir(user):
try:
user = pwd.getpwnam(user)
return user.pw_dir
except KeyError:
log('Could not get homedir for user %s: user exists?' % (user), ERROR)
raise Exception
def create_private_key(user, priv_key_path, key_type='rsa'):
types_bits = {
'rsa': '2048',
'ecdsa': '521',
}
if key_type not in types_bits:
log('Unknown ssh key type {}, using rsa'.format(key_type), ERROR)
key_type = 'rsa'
if not os.path.isfile(priv_key_path):
log('Generating new SSH key for user %s.' % user)
cmd = ['ssh-keygen', '-q', '-N', '', '-t', key_type,
'-b', types_bits[key_type], '-f', priv_key_path]
check_call(cmd)
else:
log('SSH key already exists at %s.' % priv_key_path)
check_call(['chown', user, priv_key_path])
check_call(['chmod', '0600', priv_key_path])
def create_public_key(user, priv_key_path, pub_key_path):
if not os.path.isfile(pub_key_path):
log('Generating missing ssh public key @ %s.' % pub_key_path)
cmd = ['ssh-keygen', '-y', '-f', priv_key_path]
p = check_output(cmd).strip()
with open(pub_key_path, 'wb') as out:
out.write(p)
check_call(['chown', user, pub_key_path])
def get_keypair(user):
home_dir = get_homedir(user)
ssh_dir = os.path.join(home_dir, '.ssh')
priv_key = os.path.join(ssh_dir, 'id_rsa')
pub_key = '%s.pub' % priv_key
if not os.path.isdir(ssh_dir):
os.mkdir(ssh_dir)
check_call(['chown', '-R', user, ssh_dir])
create_private_key(user, priv_key)
create_public_key(user, priv_key, pub_key)
with open(priv_key, 'r') as p:
_priv = p.read().strip()
with open(pub_key, 'r') as p:
_pub = p.read().strip()
return (_priv, _pub)
def write_authorized_keys(user, keys):
home_dir = get_homedir(user)
ssh_dir = os.path.join(home_dir, '.ssh')
auth_keys = os.path.join(ssh_dir, 'authorized_keys')
log('Syncing authorized_keys @ %s.' % auth_keys)
with open(auth_keys, 'w') as out:
for k in keys:
out.write('%s\n' % k)
def write_known_hosts(user, hosts):
home_dir = get_homedir(user)
ssh_dir = os.path.join(home_dir, '.ssh')
known_hosts = os.path.join(ssh_dir, 'known_hosts')
khosts = []
for host in hosts:
cmd = ['ssh-keyscan', host]
remote_key = check_output(cmd, universal_newlines=True).strip()
khosts.append(remote_key)
log('Syncing known_hosts @ %s.' % known_hosts)
with open(known_hosts, 'w') as out:
for host in khosts:
out.write('%s\n' % host)
def ensure_user(user, group=None):
adduser(user, pwgen())
if group:
add_user_to_group(user, group)
def ssh_authorized_peers(peer_interface, user, group=None,
ensure_local_user=False):
"""
Main setup function, should be called from both peer -changed and -joined
hooks with the same parameters.
"""
if ensure_local_user:
ensure_user(user, group)
priv_key, pub_key = get_keypair(user)
hook = hook_name()
if hook == '%s-relation-joined' % peer_interface:
relation_set(ssh_pub_key=pub_key)
elif hook == '%s-relation-changed' % peer_interface or \
hook == '%s-relation-departed' % peer_interface:
hosts = []
keys = []
for r_id in relation_ids(peer_interface):
for unit in related_units(r_id):
ssh_pub_key = relation_get('ssh_pub_key',
rid=r_id,
unit=unit)
priv_addr = relation_get('private-address',
rid=r_id,
unit=unit)
if ssh_pub_key:
keys.append(ssh_pub_key)
hosts.append(priv_addr)
else:
log('ssh_authorized_peers(): ssh_pub_key '
'missing for unit %s, skipping.' % unit)
write_authorized_keys(user, keys)
write_known_hosts(user, hosts)
authed_hosts = ':'.join(hosts)
relation_set(ssh_authorized_hosts=authed_hosts)
def _run_as_user(user, gid=None):
try:
user = pwd.getpwnam(user)
except KeyError:
log('Invalid user: %s' % user)
raise Exception
uid = user.pw_uid
gid = gid or user.pw_gid
os.environ['HOME'] = user.pw_dir
def _inner():
os.setgid(gid)
os.setuid(uid)
return _inner
def run_as_user(user, cmd, gid=None):
return check_output(cmd, preexec_fn=_run_as_user(user, gid), cwd='/')
def collect_authed_hosts(peer_interface):
'''Iterate through the units on peer interface to find all that
have the calling host in its authorized hosts list'''
hosts = []
for r_id in (relation_ids(peer_interface) or []):
for unit in related_units(r_id):
private_addr = relation_get('private-address',
rid=r_id, unit=unit)
authed_hosts = relation_get('ssh_authorized_hosts',
rid=r_id, unit=unit)
if not authed_hosts:
log('Peer %s has not authorized *any* hosts yet, skipping.' %
(unit), level=INFO)
continue
if unit_private_ip() in authed_hosts.split(':'):
hosts.append(private_addr)
else:
log('Peer %s has not authorized *this* host yet, skipping.' %
(unit), level=INFO)
return hosts
def sync_path_to_host(path, host, user, verbose=False, cmd=None, gid=None,
fatal=False):
"""Sync path to an specific peer host
Propagates exception if operation fails and fatal=True.
"""
cmd = cmd or copy(BASE_CMD)
if not verbose:
cmd.append('-silent')
# removing trailing slash from directory paths, unison
# doesn't like these.
if path.endswith('/'):
path = path[:(len(path) - 1)]
cmd = cmd + [path, 'ssh://%s@%s/%s' % (user, host, path)]
try:
log('Syncing local path %s to %s@%s:%s' % (path, user, host, path))
run_as_user(user, cmd, gid)
except:
log('Error syncing remote files')
if fatal:
raise
def sync_to_peer(host, user, paths=None, verbose=False, cmd=None, gid=None,
fatal=False):
"""Sync paths to an specific peer host
Propagates exception if any operation fails and fatal=True.
"""
if paths:
for p in paths:
sync_path_to_host(p, host, user, verbose, cmd, gid, fatal)
def sync_to_peers(peer_interface, user, paths=None, verbose=False, cmd=None,
gid=None, fatal=False):
"""Sync all hosts to an specific path
The type of group is integer, it allows user has permissions to
operate a directory have a different group id with the user id.
Propagates exception if any operation fails and fatal=True.
"""
if paths:
for host in collect_authed_hosts(peer_interface):
sync_to_peer(host, user, paths, verbose, cmd, gid, fatal)

View File

@ -74,6 +74,7 @@ def cached(func):
res = func(*args, **kwargs)
cache[key] = res
return res
wrapper._wrapped = func
return wrapper
@ -173,9 +174,19 @@ def relation_type():
return os.environ.get('JUJU_RELATION', None)
def relation_id():
"""The relation ID for the current relation hook"""
return os.environ.get('JUJU_RELATION_ID', None)
@cached
def relation_id(relation_name=None, service_or_unit=None):
"""The relation ID for the current or a specified relation"""
if not relation_name and not service_or_unit:
return os.environ.get('JUJU_RELATION_ID', None)
elif relation_name and service_or_unit:
service_name = service_or_unit.split('/')[0]
for relid in relation_ids(relation_name):
remote_service = remote_service_name(relid)
if remote_service == service_name:
return relid
else:
raise ValueError('Must specify neither or both of relation_name and service_or_unit')
def local_unit():
@ -193,9 +204,20 @@ def service_name():
return local_unit().split('/')[0]
@cached
def remote_service_name(relid=None):
"""The remote service name for a given relation-id (or the current relation)"""
if relid is None:
unit = remote_unit()
else:
units = related_units(relid)
unit = units[0] if units else None
return unit.split('/')[0] if unit else None
def hook_name():
"""The name of the currently executing hook"""
return os.path.basename(sys.argv[0])
return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0]))
class Config(dict):
@ -468,6 +490,76 @@ def relation_types():
return rel_types
@cached
def peer_relation_id():
'''Get the peers relation id if a peers relation has been joined, else None.'''
md = metadata()
section = md.get('peers')
if section:
for key in section:
relids = relation_ids(key)
if relids:
return relids[0]
return None
@cached
def relation_to_interface(relation_name):
"""
Given the name of a relation, return the interface that relation uses.
:returns: The interface name, or ``None``.
"""
return relation_to_role_and_interface(relation_name)[1]
@cached
def relation_to_role_and_interface(relation_name):
"""
Given the name of a relation, return the role and the name of the interface
that relation uses (where role is one of ``provides``, ``requires``, or ``peers``).
:returns: A tuple containing ``(role, interface)``, or ``(None, None)``.
"""
_metadata = metadata()
for role in ('provides', 'requires', 'peers'):
interface = _metadata.get(role, {}).get(relation_name, {}).get('interface')
if interface:
return role, interface
return None, None
@cached
def role_and_interface_to_relations(role, interface_name):
"""
Given a role and interface name, return a list of relation names for the
current charm that use that interface under that role (where role is one
of ``provides``, ``requires``, or ``peers``).
:returns: A list of relation names.
"""
_metadata = metadata()
results = []
for relation_name, relation in _metadata.get(role, {}).items():
if relation['interface'] == interface_name:
results.append(relation_name)
return results
@cached
def interface_to_relations(interface_name):
"""
Given an interface, return a list of relation names for the current
charm that use that interface.
:returns: A list of relation names.
"""
results = []
for role in ('provides', 'requires', 'peers'):
results.extend(role_and_interface_to_relations(role, interface_name))
return results
@cached
def charm_name():
"""Get the name of the current charm as is specified on metadata.yaml"""
@ -544,6 +636,38 @@ def unit_private_ip():
return unit_get('private-address')
@cached
def storage_get(attribute=None, storage_id=None):
"""Get storage attributes"""
_args = ['storage-get', '--format=json']
if storage_id:
_args.extend(('-s', storage_id))
if attribute:
_args.append(attribute)
try:
return json.loads(subprocess.check_output(_args).decode('UTF-8'))
except ValueError:
return None
@cached
def storage_list(storage_name=None):
"""List the storage IDs for the unit"""
_args = ['storage-list', '--format=json']
if storage_name:
_args.append(storage_name)
try:
return json.loads(subprocess.check_output(_args).decode('UTF-8'))
except ValueError:
return None
except OSError as e:
import errno
if e.errno == errno.ENOENT:
# storage-list does not exist
return []
raise
class UnregisteredHookError(Exception):
"""Raised when an undefined hook is called"""
pass
@ -644,6 +768,21 @@ def action_fail(message):
subprocess.check_call(['action-fail', message])
def action_name():
"""Get the name of the currently executing action."""
return os.environ.get('JUJU_ACTION_NAME')
def action_uuid():
"""Get the UUID of the currently executing action."""
return os.environ.get('JUJU_ACTION_UUID')
def action_tag():
"""Get the tag for the currently executing action."""
return os.environ.get('JUJU_ACTION_TAG')
def status_set(workload_state, message):
"""Set the workload state with a message
@ -673,25 +812,28 @@ def status_set(workload_state, message):
def status_get():
"""Retrieve the previously set juju workload state
"""Retrieve the previously set juju workload state and message
If the status-get command is not found then assume this is juju < 1.23 and
return 'unknown', ""
If the status-set command is not found then assume this is juju < 1.23 and
return 'unknown'
"""
cmd = ['status-get']
cmd = ['status-get', "--format=json", "--include-data"]
try:
raw_status = subprocess.check_output(cmd, universal_newlines=True)
status = raw_status.rstrip()
return status
raw_status = subprocess.check_output(cmd)
except OSError as e:
if e.errno == errno.ENOENT:
return 'unknown'
return ('unknown', "")
else:
raise
else:
status = json.loads(raw_status.decode("UTF-8"))
return (status["status"], status["message"])
def translate_exc(from_exc, to_exc):
def inner_translate_exc1(f):
@wraps(f)
def inner_translate_exc2(*args, **kwargs):
try:
return f(*args, **kwargs)
@ -736,6 +878,58 @@ def leader_set(settings=None, **kwargs):
subprocess.check_call(cmd)
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
def payload_register(ptype, klass, pid):
""" is used while a hook is running to let Juju know that a
payload has been started."""
cmd = ['payload-register']
for x in [ptype, klass, pid]:
cmd.append(x)
subprocess.check_call(cmd)
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
def payload_unregister(klass, pid):
""" is used while a hook is running to let Juju know
that a payload has been manually stopped. The <class> and <id> provided
must match a payload that has been previously registered with juju using
payload-register."""
cmd = ['payload-unregister']
for x in [klass, pid]:
cmd.append(x)
subprocess.check_call(cmd)
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
def payload_status_set(klass, pid, status):
"""is used to update the current status of a registered payload.
The <class> and <id> provided must match a payload that has been previously
registered with juju using payload-register. The <status> must be one of the
follow: starting, started, stopping, stopped"""
cmd = ['payload-status-set']
for x in [klass, pid, status]:
cmd.append(x)
subprocess.check_call(cmd)
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
def resource_get(name):
"""used to fetch the resource path of the given name.
<name> must match a name of defined resource in metadata.yaml
returns either a path or False if resource not available
"""
if not name:
return False
cmd = ['resource-get', name]
try:
return subprocess.check_output(cmd).decode('UTF-8')
except subprocess.CalledProcessError:
return False
@cached
def juju_version():
"""Full version string (eg. '1.23.3.1-trusty-amd64')"""
@ -800,3 +994,16 @@ def _run_atexit():
for callback, args, kwargs in reversed(_atexit):
callback(*args, **kwargs)
del _atexit[:]
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
def network_get_primary_address(binding):
'''
Retrieve the primary network address for a named binding
:param binding: string. The name of a relation of extra-binding
:return: string. The primary IP address for the named binding
:raise: NotImplementedError if run on Juju < 2.0
'''
cmd = ['network-get', '--primary-address', binding]
return subprocess.check_output(cmd).strip()

View File

@ -30,6 +30,8 @@ import random
import string
import subprocess
import hashlib
import functools
import itertools
from contextlib import contextmanager
from collections import OrderedDict
@ -63,54 +65,96 @@ def service_reload(service_name, restart_on_failure=False):
return service_result
def service_pause(service_name, init_dir=None):
def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"):
"""Pause a system service.
Stop it, and prevent it from starting again at boot."""
if init_dir is None:
init_dir = "/etc/init"
stopped = service_stop(service_name)
# XXX: Support systemd too
override_path = os.path.join(
init_dir, '{}.conf.override'.format(service_name))
with open(override_path, 'w') as fh:
fh.write("manual\n")
stopped = True
if service_running(service_name):
stopped = service_stop(service_name)
upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
sysv_file = os.path.join(initd_dir, service_name)
if init_is_systemd():
service('disable', service_name)
elif os.path.exists(upstart_file):
override_path = os.path.join(
init_dir, '{}.override'.format(service_name))
with open(override_path, 'w') as fh:
fh.write("manual\n")
elif os.path.exists(sysv_file):
subprocess.check_call(["update-rc.d", service_name, "disable"])
else:
raise ValueError(
"Unable to detect {0} as SystemD, Upstart {1} or"
" SysV {2}".format(
service_name, upstart_file, sysv_file))
return stopped
def service_resume(service_name, init_dir=None):
def service_resume(service_name, init_dir="/etc/init",
initd_dir="/etc/init.d"):
"""Resume a system service.
Reenable starting again at boot. Start the service"""
# XXX: Support systemd too
if init_dir is None:
init_dir = "/etc/init"
override_path = os.path.join(
init_dir, '{}.conf.override'.format(service_name))
if os.path.exists(override_path):
os.unlink(override_path)
started = service_start(service_name)
upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
sysv_file = os.path.join(initd_dir, service_name)
if init_is_systemd():
service('enable', service_name)
elif os.path.exists(upstart_file):
override_path = os.path.join(
init_dir, '{}.override'.format(service_name))
if os.path.exists(override_path):
os.unlink(override_path)
elif os.path.exists(sysv_file):
subprocess.check_call(["update-rc.d", service_name, "enable"])
else:
raise ValueError(
"Unable to detect {0} as SystemD, Upstart {1} or"
" SysV {2}".format(
service_name, upstart_file, sysv_file))
started = service_running(service_name)
if not started:
started = service_start(service_name)
return started
def service(action, service_name):
"""Control a system service"""
cmd = ['service', service_name, action]
if init_is_systemd():
cmd = ['systemctl', action, service_name]
else:
cmd = ['service', service_name, action]
return subprocess.call(cmd) == 0
def service_running(service):
def systemv_services_running():
output = subprocess.check_output(
['service', '--status-all'],
stderr=subprocess.STDOUT).decode('UTF-8')
return [row.split()[-1] for row in output.split('\n') if '[ + ]' in row]
def service_running(service_name):
"""Determine whether a system service is running"""
try:
output = subprocess.check_output(
['service', service, 'status'],
stderr=subprocess.STDOUT).decode('UTF-8')
except subprocess.CalledProcessError:
return False
if init_is_systemd():
return service('is-active', service_name)
else:
if ("start/running" in output or "is running" in output):
return True
try:
output = subprocess.check_output(
['service', service_name, 'status'],
stderr=subprocess.STDOUT).decode('UTF-8')
except subprocess.CalledProcessError:
return False
else:
# This works for upstart scripts where the 'service' command
# returns a consistent string to represent running 'start/running'
if ("start/running" in output or "is running" in output or
"up and running" in output):
return True
# Check System V scripts init script return codes
if service_name in systemv_services_running():
return True
return False
@ -126,8 +170,29 @@ def service_available(service_name):
return True
def adduser(username, password=None, shell='/bin/bash', system_user=False):
"""Add a user to the system"""
SYSTEMD_SYSTEM = '/run/systemd/system'
def init_is_systemd():
"""Return True if the host system uses systemd, False otherwise."""
return os.path.isdir(SYSTEMD_SYSTEM)
def adduser(username, password=None, shell='/bin/bash', system_user=False,
primary_group=None, secondary_groups=None):
"""Add a user to the system.
Will log but otherwise succeed if the user already exists.
:param str username: Username to create
:param str password: Password for user; if ``None``, create a system user
:param str shell: The default shell for the user
:param bool system_user: Whether to create a login or system user
:param str primary_group: Primary group for user; defaults to username
:param list secondary_groups: Optional list of additional groups
:returns: The password database entry struct, as returned by `pwd.getpwnam`
"""
try:
user_info = pwd.getpwnam(username)
log('user {0} already exists!'.format(username))
@ -142,12 +207,32 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False):
'--shell', shell,
'--password', password,
])
if not primary_group:
try:
grp.getgrnam(username)
primary_group = username # avoid "group exists" error
except KeyError:
pass
if primary_group:
cmd.extend(['-g', primary_group])
if secondary_groups:
cmd.extend(['-G', ','.join(secondary_groups)])
cmd.append(username)
subprocess.check_call(cmd)
user_info = pwd.getpwnam(username)
return user_info
def user_exists(username):
"""Check if a user exists"""
try:
pwd.getpwnam(username)
user_exists = True
except KeyError:
user_exists = False
return user_exists
def add_group(group_name, system_group=False):
"""Add a group to the system"""
try:
@ -229,14 +314,12 @@ def write_file(path, content, owner='root', group='root', perms=0o444):
def fstab_remove(mp):
"""Remove the given mountpoint entry from /etc/fstab
"""
"""Remove the given mountpoint entry from /etc/fstab"""
return Fstab.remove_by_mountpoint(mp)
def fstab_add(dev, mp, fs, options=None):
"""Adds the given device entry to the /etc/fstab file
"""
"""Adds the given device entry to the /etc/fstab file"""
return Fstab.add(dev, mp, fs, options=options)
@ -280,9 +363,19 @@ def mounts():
return system_mounts
def fstab_mount(mountpoint):
"""Mount filesystem using fstab"""
cmd_args = ['mount', mountpoint]
try:
subprocess.check_output(cmd_args)
except subprocess.CalledProcessError as e:
log('Error unmounting {}\n{}'.format(mountpoint, e.output))
return False
return True
def file_hash(path, hash_type='md5'):
"""
Generate a hash checksum of the contents of 'path' or None if not found.
"""Generate a hash checksum of the contents of 'path' or None if not found.
:param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
such as md5, sha1, sha256, sha512, etc.
@ -297,10 +390,9 @@ def file_hash(path, hash_type='md5'):
def path_hash(path):
"""
Generate a hash checksum of all files matching 'path'. Standard wildcards
like '*' and '?' are supported, see documentation for the 'glob' module for
more information.
"""Generate a hash checksum of all files matching 'path'. Standard
wildcards like '*' and '?' are supported, see documentation for the 'glob'
module for more information.
:return: dict: A { filename: hash } dictionary for all matched files.
Empty if none found.
@ -312,8 +404,7 @@ def path_hash(path):
def check_hash(path, checksum, hash_type='md5'):
"""
Validate a file using a cryptographic checksum.
"""Validate a file using a cryptographic checksum.
:param str checksum: Value of the checksum used to validate the file.
:param str hash_type: Hash algorithm used to generate `checksum`.
@ -328,10 +419,11 @@ def check_hash(path, checksum, hash_type='md5'):
class ChecksumError(ValueError):
"""A class derived from Value error to indicate the checksum failed."""
pass
def restart_on_change(restart_map, stopstart=False):
def restart_on_change(restart_map, stopstart=False, restart_functions=None):
"""Restart services based on configuration files changing
This function is used a decorator, for example::
@ -349,27 +441,58 @@ def restart_on_change(restart_map, stopstart=False):
restarted if any file matching the pattern got changed, created
or removed. Standard wildcards are supported, see documentation
for the 'glob' module for more information.
@param restart_map: {path_file_name: [service_name, ...]
@param stopstart: DEFAULT false; whether to stop, start OR restart
@param restart_functions: nonstandard functions to use to restart services
{svc: func, ...}
@returns result from decorated function
"""
def wrap(f):
@functools.wraps(f)
def wrapped_f(*args, **kwargs):
checksums = {path: path_hash(path) for path in restart_map}
f(*args, **kwargs)
restarts = []
for path in restart_map:
if path_hash(path) != checksums[path]:
restarts += restart_map[path]
services_list = list(OrderedDict.fromkeys(restarts))
if not stopstart:
for service_name in services_list:
service('restart', service_name)
else:
for action in ['stop', 'start']:
for service_name in services_list:
service(action, service_name)
return restart_on_change_helper(
(lambda: f(*args, **kwargs)), restart_map, stopstart,
restart_functions)
return wrapped_f
return wrap
def restart_on_change_helper(lambda_f, restart_map, stopstart=False,
restart_functions=None):
"""Helper function to perform the restart_on_change function.
This is provided for decorators to restart services if files described
in the restart_map have changed after an invocation of lambda_f().
@param lambda_f: function to call.
@param restart_map: {file: [service, ...]}
@param stopstart: whether to stop, start or restart a service
@param restart_functions: nonstandard functions to use to restart services
{svc: func, ...}
@returns result of lambda_f()
"""
if restart_functions is None:
restart_functions = {}
checksums = {path: path_hash(path) for path in restart_map}
r = lambda_f()
# create a list of lists of the services to restart
restarts = [restart_map[path]
for path in restart_map
if path_hash(path) != checksums[path]]
# create a flat list of ordered services without duplicates from lists
services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts)))
if services_list:
actions = ('stop', 'start') if stopstart else ('restart',)
for service_name in services_list:
if service_name in restart_functions:
restart_functions[service_name](service_name)
else:
for action in actions:
service(action, service_name)
return r
def lsb_release():
"""Return /etc/lsb-release in a dict"""
d = {}
@ -396,36 +519,92 @@ def pwgen(length=None):
return(''.join(random_chars))
def list_nics(nic_type):
'''Return a list of nics of given type(s)'''
def is_phy_iface(interface):
"""Returns True if interface is not virtual, otherwise False."""
if interface:
sys_net = '/sys/class/net'
if os.path.isdir(sys_net):
for iface in glob.glob(os.path.join(sys_net, '*')):
if '/virtual/' in os.path.realpath(iface):
continue
if interface == os.path.basename(iface):
return True
return False
def get_bond_master(interface):
"""Returns bond master if interface is bond slave otherwise None.
NOTE: the provided interface is expected to be physical
"""
if interface:
iface_path = '/sys/class/net/%s' % (interface)
if os.path.exists(iface_path):
if '/virtual/' in os.path.realpath(iface_path):
return None
master = os.path.join(iface_path, 'master')
if os.path.exists(master):
master = os.path.realpath(master)
# make sure it is a bond master
if os.path.exists(os.path.join(master, 'bonding')):
return os.path.basename(master)
return None
def list_nics(nic_type=None):
"""Return a list of nics of given type(s)"""
if isinstance(nic_type, six.string_types):
int_types = [nic_type]
else:
int_types = nic_type
interfaces = []
for int_type in int_types:
cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
if nic_type:
for int_type in int_types:
cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
ip_output = subprocess.check_output(cmd).decode('UTF-8')
ip_output = ip_output.split('\n')
ip_output = (line for line in ip_output if line)
for line in ip_output:
if line.split()[1].startswith(int_type):
matched = re.search('.*: (' + int_type +
r'[0-9]+\.[0-9]+)@.*', line)
if matched:
iface = matched.groups()[0]
else:
iface = line.split()[1].replace(":", "")
if iface not in interfaces:
interfaces.append(iface)
else:
cmd = ['ip', 'a']
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
ip_output = (line for line in ip_output if line)
ip_output = (line.strip() for line in ip_output if line)
key = re.compile('^[0-9]+:\s+(.+):')
for line in ip_output:
if line.split()[1].startswith(int_type):
matched = re.search('.*: (' + int_type + r'[0-9]+\.[0-9]+)@.*', line)
if matched:
interface = matched.groups()[0]
else:
interface = line.split()[1].replace(":", "")
interfaces.append(interface)
matched = re.search(key, line)
if matched:
iface = matched.group(1)
iface = iface.partition("@")[0]
if iface not in interfaces:
interfaces.append(iface)
return interfaces
def set_nic_mtu(nic, mtu):
'''Set MTU on a network interface'''
"""Set the Maximum Transmission Unit (MTU) on a network interface."""
cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
subprocess.check_call(cmd)
def get_nic_mtu(nic):
"""Return the Maximum Transmission Unit (MTU) for a network interface."""
cmd = ['ip', 'addr', 'show', nic]
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
mtu = ""
@ -437,6 +616,7 @@ def get_nic_mtu(nic):
def get_nic_hwaddr(nic):
"""Return the Media Access Control (MAC) for a network interface."""
cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
ip_output = subprocess.check_output(cmd).decode('UTF-8')
hwaddr = ""
@ -447,7 +627,7 @@ def get_nic_hwaddr(nic):
def cmp_pkgrevno(package, revno, pkgcache=None):
'''Compare supplied revno with the revno of the installed package
"""Compare supplied revno with the revno of the installed package
* 1 => Installed revno is greater than supplied arg
* 0 => Installed revno is the same as supplied arg
@ -456,7 +636,7 @@ def cmp_pkgrevno(package, revno, pkgcache=None):
This function imports apt_cache function from charmhelpers.fetch if
the pkgcache argument is None. Be sure to add charmhelpers.fetch if
you call this function, or pass an apt_pkg.Cache() instance.
'''
"""
import apt_pkg
if not pkgcache:
from charmhelpers.fetch import apt_cache
@ -466,15 +646,30 @@ def cmp_pkgrevno(package, revno, pkgcache=None):
@contextmanager
def chdir(d):
def chdir(directory):
"""Change the current working directory to a different directory for a code
block and return the previous directory after the block exits. Useful to
run commands from a specificed directory.
:param str directory: The directory path to change to for this context.
"""
cur = os.getcwd()
try:
yield os.chdir(d)
yield os.chdir(directory)
finally:
os.chdir(cur)
def chownr(path, owner, group, follow_links=True):
def chownr(path, owner, group, follow_links=True, chowntopdir=False):
"""Recursively change user and group ownership of files and directories
in given path. Doesn't chown path itself by default, only its children.
:param str path: The string path to start changing ownership.
:param str owner: The owner string to use when looking up the uid.
:param str group: The group string to use when looking up the gid.
:param bool follow_links: Also Chown links if True
:param bool chowntopdir: Also chown path itself if True
"""
uid = pwd.getpwnam(owner).pw_uid
gid = grp.getgrnam(group).gr_gid
if follow_links:
@ -482,6 +677,10 @@ def chownr(path, owner, group, follow_links=True):
else:
chown = os.lchown
if chowntopdir:
broken_symlink = os.path.lexists(path) and not os.path.exists(path)
if not broken_symlink:
chown(path, uid, gid)
for root, dirs, files in os.walk(path):
for name in dirs + files:
full = os.path.join(root, name)
@ -491,4 +690,28 @@ def chownr(path, owner, group, follow_links=True):
def lchownr(path, owner, group):
"""Recursively change user and group ownership of files and directories
in a given path, not following symbolic links. See the documentation for
'os.lchown' for more information.
:param str path: The string path to start changing ownership.
:param str owner: The owner string to use when looking up the uid.
:param str group: The group string to use when looking up the gid.
"""
chownr(path, owner, group, follow_links=False)
def get_total_ram():
"""The total amount of system RAM in bytes.
This is what is reported by the OS, and may be overcommitted when
there are multiple containers hosted on the same machine.
"""
with open('/proc/meminfo', 'r') as f:
for line in f.readlines():
if line:
key, value, unit = line.split()
if key == 'MemTotal:':
assert unit == 'kB', 'Unknown unit'
return int(value) * 1024 # Classic, not KiB.
raise NotImplementedError()

View File

@ -0,0 +1,71 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import yaml
from charmhelpers.core import fstab
from charmhelpers.core import sysctl
from charmhelpers.core.host import (
add_group,
add_user_to_group,
fstab_mount,
mkdir,
)
from charmhelpers.core.strutils import bytes_from_string
from subprocess import check_output
def hugepage_support(user, group='hugetlb', nr_hugepages=256,
max_map_count=65536, mnt_point='/run/hugepages/kvm',
pagesize='2MB', mount=True, set_shmmax=False):
"""Enable hugepages on system.
Args:
user (str) -- Username to allow access to hugepages to
group (str) -- Group name to own hugepages
nr_hugepages (int) -- Number of pages to reserve
max_map_count (int) -- Number of Virtual Memory Areas a process can own
mnt_point (str) -- Directory to mount hugepages on
pagesize (str) -- Size of hugepages
mount (bool) -- Whether to Mount hugepages
"""
group_info = add_group(group)
gid = group_info.gr_gid
add_user_to_group(user, group)
if max_map_count < 2 * nr_hugepages:
max_map_count = 2 * nr_hugepages
sysctl_settings = {
'vm.nr_hugepages': nr_hugepages,
'vm.max_map_count': max_map_count,
'vm.hugetlb_shm_group': gid,
}
if set_shmmax:
shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax']))
shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages
if shmmax_minsize > shmmax_current:
sysctl_settings['kernel.shmmax'] = shmmax_minsize
sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf')
mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False)
lfstab = fstab.Fstab()
fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point)
if fstab_entry:
lfstab.remove_entry(fstab_entry)
entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs',
'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0)
lfstab.add_entry(entry)
if mount:
fstab_mount(mnt_point)

View File

@ -0,0 +1,68 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
from charmhelpers.core.hookenv import (
log,
INFO
)
from subprocess import check_call, check_output
import re
def modprobe(module, persist=True):
"""Load a kernel module and configure for auto-load on reboot."""
cmd = ['modprobe', module]
log('Loading kernel module %s' % module, level=INFO)
check_call(cmd)
if persist:
with open('/etc/modules', 'r+') as modules:
if module not in modules.read():
modules.write(module)
def rmmod(module, force=False):
"""Remove a module from the linux kernel"""
cmd = ['rmmod']
if force:
cmd.append('-f')
cmd.append(module)
log('Removing kernel module %s' % module, level=INFO)
return check_call(cmd)
def lsmod():
"""Shows what kernel modules are currently loaded"""
return check_output(['lsmod'],
universal_newlines=True)
def is_module_loaded(module):
"""Checks if a kernel module is already loaded"""
matches = re.findall('^%s[ ]+' % module, lsmod(), re.M)
return len(matches) > 0
def update_initramfs(version='all'):
"""Updates an initramfs image"""
return check_call(["update-initramfs", "-k", version, "-u"])

View File

@ -16,7 +16,9 @@
import os
import yaml
from charmhelpers.core import hookenv
from charmhelpers.core import host
from charmhelpers.core import templating
from charmhelpers.core.services.base import ManagerCallback
@ -240,27 +242,50 @@ class TemplateCallback(ManagerCallback):
:param str source: The template source file, relative to
`$CHARM_DIR/templates`
:param str target: The target to write the rendered template to
:param str target: The target to write the rendered template to (or None)
:param str owner: The owner of the rendered file
:param str group: The group of the rendered file
:param int perms: The permissions of the rendered file
:param partial on_change_action: functools partial to be executed when
rendered file changes
:param jinja2 loader template_loader: A jinja2 template loader
:return str: The rendered template
"""
def __init__(self, source, target,
owner='root', group='root', perms=0o444):
owner='root', group='root', perms=0o444,
on_change_action=None, template_loader=None):
self.source = source
self.target = target
self.owner = owner
self.group = group
self.perms = perms
self.on_change_action = on_change_action
self.template_loader = template_loader
def __call__(self, manager, service_name, event_name):
pre_checksum = ''
if self.on_change_action and os.path.isfile(self.target):
pre_checksum = host.file_hash(self.target)
service = manager.get_service(service_name)
context = {}
context = {'ctx': {}}
for ctx in service.get('required_data', []):
context.update(ctx)
templating.render(self.source, self.target, context,
self.owner, self.group, self.perms)
context['ctx'].update(ctx)
result = templating.render(self.source, self.target, context,
self.owner, self.group, self.perms,
template_loader=self.template_loader)
if self.on_change_action:
if pre_checksum == host.file_hash(self.target):
hookenv.log(
'No change detected: {}'.format(self.target),
hookenv.DEBUG)
else:
self.on_change_action()
return result
# Convenience aliases for templates

View File

@ -18,6 +18,7 @@
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import six
import re
def bool_from_string(value):
@ -40,3 +41,32 @@ def bool_from_string(value):
msg = "Unable to interpret string value '%s' as boolean" % (value)
raise ValueError(msg)
def bytes_from_string(value):
"""Interpret human readable string value as bytes.
Returns int
"""
BYTE_POWER = {
'K': 1,
'KB': 1,
'M': 2,
'MB': 2,
'G': 3,
'GB': 3,
'T': 4,
'TB': 4,
'P': 5,
'PB': 5,
}
if isinstance(value, six.string_types):
value = six.text_type(value)
else:
msg = "Unable to interpret non-string value '%s' as boolean" % (value)
raise ValueError(msg)
matches = re.match("([0-9]+)([a-zA-Z]+)", value)
if not matches:
msg = "Unable to interpret string value '%s' as bytes" % (value)
raise ValueError(msg)
return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])

View File

@ -21,13 +21,14 @@ from charmhelpers.core import hookenv
def render(source, target, context, owner='root', group='root',
perms=0o444, templates_dir=None, encoding='UTF-8'):
perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None):
"""
Render a template.
The `source` path, if not absolute, is relative to the `templates_dir`.
The `target` path should be absolute.
The `target` path should be absolute. It can also be `None`, in which
case no file will be written.
The context should be a dict containing the values to be replaced in the
template.
@ -36,6 +37,9 @@ def render(source, target, context, owner='root', group='root',
If omitted, `templates_dir` defaults to the `templates` folder in the charm.
The rendered template will be written to the file as well as being returned
as a string.
Note: Using this requires python-jinja2; if it is not installed, calling
this will attempt to use charmhelpers.fetch.apt_install to install it.
"""
@ -52,17 +56,26 @@ def render(source, target, context, owner='root', group='root',
apt_install('python-jinja2', fatal=True)
from jinja2 import FileSystemLoader, Environment, exceptions
if templates_dir is None:
templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
loader = Environment(loader=FileSystemLoader(templates_dir))
if template_loader:
template_env = Environment(loader=template_loader)
else:
if templates_dir is None:
templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
template_env = Environment(loader=FileSystemLoader(templates_dir))
try:
source = source
template = loader.get_template(source)
template = template_env.get_template(source)
except exceptions.TemplateNotFound as e:
hookenv.log('Could not load template %s from %s.' %
(source, templates_dir),
level=hookenv.ERROR)
raise e
content = template.render(context)
host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
host.write_file(target, content.encode(encoding), owner, group, perms)
if target is not None:
target_dir = os.path.dirname(target)
if not os.path.exists(target_dir):
# This is a terrible default directory permission, as the file
# or its siblings will often contain secrets.
host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
host.write_file(target, content.encode(encoding), owner, group, perms)
return content

View File

@ -152,6 +152,7 @@ associated to the hookname.
import collections
import contextlib
import datetime
import itertools
import json
import os
import pprint
@ -164,8 +165,7 @@ __author__ = 'Kapil Thangavelu <kapil.foss@gmail.com>'
class Storage(object):
"""Simple key value database for local unit state within charms.
Modifications are automatically committed at hook exit. That's
currently regardless of exit code.
Modifications are not persisted unless :meth:`flush` is called.
To support dicts, lists, integer, floats, and booleans values
are automatically json encoded/decoded.
@ -173,8 +173,11 @@ class Storage(object):
def __init__(self, path=None):
self.db_path = path
if path is None:
self.db_path = os.path.join(
os.environ.get('CHARM_DIR', ''), '.unit-state.db')
if 'UNIT_STATE_DB' in os.environ:
self.db_path = os.environ['UNIT_STATE_DB']
else:
self.db_path = os.path.join(
os.environ.get('CHARM_DIR', ''), '.unit-state.db')
self.conn = sqlite3.connect('%s' % self.db_path)
self.cursor = self.conn.cursor()
self.revision = None
@ -189,15 +192,8 @@ class Storage(object):
self.conn.close()
self._closed = True
def _scoped_query(self, stmt, params=None):
if params is None:
params = []
return stmt, params
def get(self, key, default=None, record=False):
self.cursor.execute(
*self._scoped_query(
'select data from kv where key=?', [key]))
self.cursor.execute('select data from kv where key=?', [key])
result = self.cursor.fetchone()
if not result:
return default
@ -206,33 +202,81 @@ class Storage(object):
return json.loads(result[0])
def getrange(self, key_prefix, strip=False):
stmt = "select key, data from kv where key like '%s%%'" % key_prefix
self.cursor.execute(*self._scoped_query(stmt))
"""
Get a range of keys starting with a common prefix as a mapping of
keys to values.
:param str key_prefix: Common prefix among all keys
:param bool strip: Optionally strip the common prefix from the key
names in the returned dict
:return dict: A (possibly empty) dict of key-value mappings
"""
self.cursor.execute("select key, data from kv where key like ?",
['%s%%' % key_prefix])
result = self.cursor.fetchall()
if not result:
return None
return {}
if not strip:
key_prefix = ''
return dict([
(k[len(key_prefix):], json.loads(v)) for k, v in result])
def update(self, mapping, prefix=""):
"""
Set the values of multiple keys at once.
:param dict mapping: Mapping of keys to values
:param str prefix: Optional prefix to apply to all keys in `mapping`
before setting
"""
for k, v in mapping.items():
self.set("%s%s" % (prefix, k), v)
def unset(self, key):
"""
Remove a key from the database entirely.
"""
self.cursor.execute('delete from kv where key=?', [key])
if self.revision and self.cursor.rowcount:
self.cursor.execute(
'insert into kv_revisions values (?, ?, ?)',
[key, self.revision, json.dumps('DELETED')])
def unsetrange(self, keys=None, prefix=""):
"""
Remove a range of keys starting with a common prefix, from the database
entirely.
:param list keys: List of keys to remove.
:param str prefix: Optional prefix to apply to all keys in ``keys``
before removing.
"""
if keys is not None:
keys = ['%s%s' % (prefix, key) for key in keys]
self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys)
if self.revision and self.cursor.rowcount:
self.cursor.execute(
'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)),
list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys)))
else:
self.cursor.execute('delete from kv where key like ?',
['%s%%' % prefix])
if self.revision and self.cursor.rowcount:
self.cursor.execute(
'insert into kv_revisions values (?, ?, ?)',
['%s%%' % prefix, self.revision, json.dumps('DELETED')])
def set(self, key, value):
"""
Set a value in the database.
:param str key: Key to set the value for
:param value: Any JSON-serializable value to be set
"""
serialized = json.dumps(value)
self.cursor.execute(
'select data from kv where key=?', [key])
self.cursor.execute('select data from kv where key=?', [key])
exists = self.cursor.fetchone()
# Skip mutations to the same value

View File

@ -90,6 +90,22 @@ CLOUD_ARCHIVE_POCKETS = {
'kilo/proposed': 'trusty-proposed/kilo',
'trusty-kilo/proposed': 'trusty-proposed/kilo',
'trusty-proposed/kilo': 'trusty-proposed/kilo',
# Liberty
'liberty': 'trusty-updates/liberty',
'trusty-liberty': 'trusty-updates/liberty',
'trusty-liberty/updates': 'trusty-updates/liberty',
'trusty-updates/liberty': 'trusty-updates/liberty',
'liberty/proposed': 'trusty-proposed/liberty',
'trusty-liberty/proposed': 'trusty-proposed/liberty',
'trusty-proposed/liberty': 'trusty-proposed/liberty',
# Mitaka
'mitaka': 'trusty-updates/mitaka',
'trusty-mitaka': 'trusty-updates/mitaka',
'trusty-mitaka/updates': 'trusty-updates/mitaka',
'trusty-updates/mitaka': 'trusty-updates/mitaka',
'mitaka/proposed': 'trusty-proposed/mitaka',
'trusty-mitaka/proposed': 'trusty-proposed/mitaka',
'trusty-proposed/mitaka': 'trusty-proposed/mitaka',
}
# The order of this list is very important. Handlers should be listed in from
@ -217,12 +233,12 @@ def apt_purge(packages, fatal=False):
def apt_mark(packages, mark, fatal=False):
"""Flag one or more packages using apt-mark"""
log("Marking {} as {}".format(packages, mark))
cmd = ['apt-mark', mark]
if isinstance(packages, six.string_types):
cmd.append(packages)
else:
cmd.extend(packages)
log("Holding {}".format(packages))
if fatal:
subprocess.check_call(cmd, universal_newlines=True)
@ -403,7 +419,7 @@ def plugins(fetch_handlers=None):
importlib.import_module(package),
classname)
plugin_list.append(handler_class())
except (ImportError, AttributeError):
except NotImplementedError:
# Skip missing plugins so that they can be ommitted from
# installation if desired
log("FetchHandler {} not found, skipping plugin".format(

View File

@ -108,7 +108,7 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
install_opener(opener)
response = urlopen(source)
try:
with open(dest, 'w') as dest_file:
with open(dest, 'wb') as dest_file:
dest_file.write(response.read())
except Exception as e:
if os.path.isfile(dest):

View File

@ -15,60 +15,50 @@
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import os
from subprocess import check_call
from charmhelpers.fetch import (
BaseFetchHandler,
UnhandledSource
UnhandledSource,
filter_installed_packages,
apt_install,
)
from charmhelpers.core.host import mkdir
import six
if six.PY3:
raise ImportError('bzrlib does not support Python3')
try:
from bzrlib.branch import Branch
from bzrlib import bzrdir, workingtree, errors
except ImportError:
from charmhelpers.fetch import apt_install
apt_install("python-bzrlib")
from bzrlib.branch import Branch
from bzrlib import bzrdir, workingtree, errors
if filter_installed_packages(['bzr']) != []:
apt_install(['bzr'])
if filter_installed_packages(['bzr']) != []:
raise NotImplementedError('Unable to install bzr')
class BzrUrlFetchHandler(BaseFetchHandler):
"""Handler for bazaar branches via generic and lp URLs"""
def can_handle(self, source):
url_parts = self.parse_url(source)
if url_parts.scheme not in ('bzr+ssh', 'lp'):
if url_parts.scheme not in ('bzr+ssh', 'lp', ''):
return False
elif not url_parts.scheme:
return os.path.exists(os.path.join(source, '.bzr'))
else:
return True
def branch(self, source, dest):
url_parts = self.parse_url(source)
# If we use lp:branchname scheme we need to load plugins
if not self.can_handle(source):
raise UnhandledSource("Cannot handle {}".format(source))
if url_parts.scheme == "lp":
from bzrlib.plugin import load_plugins
load_plugins()
try:
local_branch = bzrdir.BzrDir.create_branch_convenience(dest)
except errors.AlreadyControlDirError:
local_branch = Branch.open(dest)
try:
remote_branch = Branch.open(source)
remote_branch.push(local_branch)
tree = workingtree.WorkingTree.open(dest)
tree.update()
except Exception as e:
raise e
if os.path.exists(dest):
check_call(['bzr', 'pull', '--overwrite', '-d', dest, source])
else:
check_call(['bzr', 'branch', source, dest])
def install(self, source):
def install(self, source, dest=None):
url_parts = self.parse_url(source)
branch_name = url_parts.path.strip("/").split("/")[-1]
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
branch_name)
if dest:
dest_dir = os.path.join(dest, branch_name)
else:
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
branch_name)
if not os.path.exists(dest_dir):
mkdir(dest_dir, perms=0o755)
try:

View File

@ -15,24 +15,18 @@
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import os
from subprocess import check_call, CalledProcessError
from charmhelpers.fetch import (
BaseFetchHandler,
UnhandledSource
UnhandledSource,
filter_installed_packages,
apt_install,
)
from charmhelpers.core.host import mkdir
import six
if six.PY3:
raise ImportError('GitPython does not support Python 3')
try:
from git import Repo
except ImportError:
from charmhelpers.fetch import apt_install
apt_install("python-git")
from git import Repo
from git.exc import GitCommandError # noqa E402
if filter_installed_packages(['git']) != []:
apt_install(['git'])
if filter_installed_packages(['git']) != []:
raise NotImplementedError('Unable to install git')
class GitUrlFetchHandler(BaseFetchHandler):
@ -40,19 +34,24 @@ class GitUrlFetchHandler(BaseFetchHandler):
def can_handle(self, source):
url_parts = self.parse_url(source)
# TODO (mattyw) no support for ssh git@ yet
if url_parts.scheme not in ('http', 'https', 'git'):
if url_parts.scheme not in ('http', 'https', 'git', ''):
return False
elif not url_parts.scheme:
return os.path.exists(os.path.join(source, '.git'))
else:
return True
def clone(self, source, dest, branch, depth=None):
def clone(self, source, dest, branch="master", depth=None):
if not self.can_handle(source):
raise UnhandledSource("Cannot handle {}".format(source))
if depth:
Repo.clone_from(source, dest, branch=branch, depth=depth)
if os.path.exists(dest):
cmd = ['git', '-C', dest, 'pull', source, branch]
else:
Repo.clone_from(source, dest, branch=branch)
cmd = ['git', 'clone', source, dest, '--branch', branch]
if depth:
cmd.extend(['--depth', depth])
check_call(cmd)
def install(self, source, branch="master", dest=None, depth=None):
url_parts = self.parse_url(source)
@ -62,11 +61,9 @@ class GitUrlFetchHandler(BaseFetchHandler):
else:
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
branch_name)
if not os.path.exists(dest_dir):
mkdir(dest_dir, perms=0o755)
try:
self.clone(source, dest_dir, branch, depth)
except GitCommandError as e:
except CalledProcessError as e:
raise UnhandledSource(e)
except OSError as e:
raise UnhandledSource(e.strerror)

View File

@ -156,8 +156,9 @@ def restart_pg():
raise ValueError("plumgrid service couldn't be started")
else:
if service_start('libvirt-bin'):
time.sleep(3)
if not service_running('plumgrid'):
time.sleep(8)
if not service_running('plumgrid') \
and not service_start('plumgrid'):
raise ValueError("plumgrid service couldn't be started")
else:
raise ValueError("libvirt-bin service couldn't be started")

View File

@ -29,7 +29,8 @@ TO_PATCH = [
'add_lcm_key',
'determine_packages',
'load_iptables',
'director_cluster_ready'
'director_cluster_ready',
'status_set'
]
NEUTRON_CONF_DIR = "/etc/neutron"