Merged: Liberty/Mitaka changes
This commit is contained in:
commit
eaf245e9b5
2
Makefile
2
Makefile
|
@ -4,7 +4,7 @@ PYTHON := /usr/bin/env python
|
||||||
virtualenv:
|
virtualenv:
|
||||||
virtualenv .venv
|
virtualenv .venv
|
||||||
.venv/bin/pip install flake8 nose coverage mock pyyaml netifaces \
|
.venv/bin/pip install flake8 nose coverage mock pyyaml netifaces \
|
||||||
netaddr jinja2
|
netaddr jinja2 pyflakes pep8 six pbr funcsigs psutil
|
||||||
|
|
||||||
lint: virtualenv
|
lint: virtualenv
|
||||||
.venv/bin/flake8 --exclude hooks/charmhelpers hooks unit_tests tests --ignore E402
|
.venv/bin/flake8 --exclude hooks/charmhelpers hooks unit_tests tests --ignore E402
|
||||||
|
|
|
@ -0,0 +1,253 @@
|
||||||
|
#!/usr/bin/python
|
||||||
|
|
||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
# Authors:
|
||||||
|
# Adam Gandelman <adamg@ubuntu.com>
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import optparse
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
import shutil
|
||||||
|
import sys
|
||||||
|
import tempfile
|
||||||
|
import yaml
|
||||||
|
from fnmatch import fnmatch
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
|
CHARM_HELPERS_BRANCH = 'lp:charm-helpers'
|
||||||
|
|
||||||
|
|
||||||
|
def parse_config(conf_file):
|
||||||
|
if not os.path.isfile(conf_file):
|
||||||
|
logging.error('Invalid config file: %s.' % conf_file)
|
||||||
|
return False
|
||||||
|
return yaml.load(open(conf_file).read())
|
||||||
|
|
||||||
|
|
||||||
|
def clone_helpers(work_dir, branch):
|
||||||
|
dest = os.path.join(work_dir, 'charm-helpers')
|
||||||
|
logging.info('Checking out %s to %s.' % (branch, dest))
|
||||||
|
cmd = ['bzr', 'checkout', '--lightweight', branch, dest]
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
return dest
|
||||||
|
|
||||||
|
|
||||||
|
def _module_path(module):
|
||||||
|
return os.path.join(*module.split('.'))
|
||||||
|
|
||||||
|
|
||||||
|
def _src_path(src, module):
|
||||||
|
return os.path.join(src, 'charmhelpers', _module_path(module))
|
||||||
|
|
||||||
|
|
||||||
|
def _dest_path(dest, module):
|
||||||
|
return os.path.join(dest, _module_path(module))
|
||||||
|
|
||||||
|
|
||||||
|
def _is_pyfile(path):
|
||||||
|
return os.path.isfile(path + '.py')
|
||||||
|
|
||||||
|
|
||||||
|
def ensure_init(path):
|
||||||
|
'''
|
||||||
|
ensure directories leading up to path are importable, omitting
|
||||||
|
parent directory, eg path='/hooks/helpers/foo'/:
|
||||||
|
hooks/
|
||||||
|
hooks/helpers/__init__.py
|
||||||
|
hooks/helpers/foo/__init__.py
|
||||||
|
'''
|
||||||
|
for d, dirs, files in os.walk(os.path.join(*path.split('/')[:2])):
|
||||||
|
_i = os.path.join(d, '__init__.py')
|
||||||
|
if not os.path.exists(_i):
|
||||||
|
logging.info('Adding missing __init__.py: %s' % _i)
|
||||||
|
open(_i, 'wb').close()
|
||||||
|
|
||||||
|
|
||||||
|
def sync_pyfile(src, dest):
|
||||||
|
src = src + '.py'
|
||||||
|
src_dir = os.path.dirname(src)
|
||||||
|
logging.info('Syncing pyfile: %s -> %s.' % (src, dest))
|
||||||
|
if not os.path.exists(dest):
|
||||||
|
os.makedirs(dest)
|
||||||
|
shutil.copy(src, dest)
|
||||||
|
if os.path.isfile(os.path.join(src_dir, '__init__.py')):
|
||||||
|
shutil.copy(os.path.join(src_dir, '__init__.py'),
|
||||||
|
dest)
|
||||||
|
ensure_init(dest)
|
||||||
|
|
||||||
|
|
||||||
|
def get_filter(opts=None):
|
||||||
|
opts = opts or []
|
||||||
|
if 'inc=*' in opts:
|
||||||
|
# do not filter any files, include everything
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _filter(dir, ls):
|
||||||
|
incs = [opt.split('=').pop() for opt in opts if 'inc=' in opt]
|
||||||
|
_filter = []
|
||||||
|
for f in ls:
|
||||||
|
_f = os.path.join(dir, f)
|
||||||
|
|
||||||
|
if not os.path.isdir(_f) and not _f.endswith('.py') and incs:
|
||||||
|
if True not in [fnmatch(_f, inc) for inc in incs]:
|
||||||
|
logging.debug('Not syncing %s, does not match include '
|
||||||
|
'filters (%s)' % (_f, incs))
|
||||||
|
_filter.append(f)
|
||||||
|
else:
|
||||||
|
logging.debug('Including file, which matches include '
|
||||||
|
'filters (%s): %s' % (incs, _f))
|
||||||
|
elif (os.path.isfile(_f) and not _f.endswith('.py')):
|
||||||
|
logging.debug('Not syncing file: %s' % f)
|
||||||
|
_filter.append(f)
|
||||||
|
elif (os.path.isdir(_f) and not
|
||||||
|
os.path.isfile(os.path.join(_f, '__init__.py'))):
|
||||||
|
logging.debug('Not syncing directory: %s' % f)
|
||||||
|
_filter.append(f)
|
||||||
|
return _filter
|
||||||
|
return _filter
|
||||||
|
|
||||||
|
|
||||||
|
def sync_directory(src, dest, opts=None):
|
||||||
|
if os.path.exists(dest):
|
||||||
|
logging.debug('Removing existing directory: %s' % dest)
|
||||||
|
shutil.rmtree(dest)
|
||||||
|
logging.info('Syncing directory: %s -> %s.' % (src, dest))
|
||||||
|
|
||||||
|
shutil.copytree(src, dest, ignore=get_filter(opts))
|
||||||
|
ensure_init(dest)
|
||||||
|
|
||||||
|
|
||||||
|
def sync(src, dest, module, opts=None):
|
||||||
|
|
||||||
|
# Sync charmhelpers/__init__.py for bootstrap code.
|
||||||
|
sync_pyfile(_src_path(src, '__init__'), dest)
|
||||||
|
|
||||||
|
# Sync other __init__.py files in the path leading to module.
|
||||||
|
m = []
|
||||||
|
steps = module.split('.')[:-1]
|
||||||
|
while steps:
|
||||||
|
m.append(steps.pop(0))
|
||||||
|
init = '.'.join(m + ['__init__'])
|
||||||
|
sync_pyfile(_src_path(src, init),
|
||||||
|
os.path.dirname(_dest_path(dest, init)))
|
||||||
|
|
||||||
|
# Sync the module, or maybe a .py file.
|
||||||
|
if os.path.isdir(_src_path(src, module)):
|
||||||
|
sync_directory(_src_path(src, module), _dest_path(dest, module), opts)
|
||||||
|
elif _is_pyfile(_src_path(src, module)):
|
||||||
|
sync_pyfile(_src_path(src, module),
|
||||||
|
os.path.dirname(_dest_path(dest, module)))
|
||||||
|
else:
|
||||||
|
logging.warn('Could not sync: %s. Neither a pyfile or directory, '
|
||||||
|
'does it even exist?' % module)
|
||||||
|
|
||||||
|
|
||||||
|
def parse_sync_options(options):
|
||||||
|
if not options:
|
||||||
|
return []
|
||||||
|
return options.split(',')
|
||||||
|
|
||||||
|
|
||||||
|
def extract_options(inc, global_options=None):
|
||||||
|
global_options = global_options or []
|
||||||
|
if global_options and isinstance(global_options, six.string_types):
|
||||||
|
global_options = [global_options]
|
||||||
|
if '|' not in inc:
|
||||||
|
return (inc, global_options)
|
||||||
|
inc, opts = inc.split('|')
|
||||||
|
return (inc, parse_sync_options(opts) + global_options)
|
||||||
|
|
||||||
|
|
||||||
|
def sync_helpers(include, src, dest, options=None):
|
||||||
|
if not os.path.isdir(dest):
|
||||||
|
os.makedirs(dest)
|
||||||
|
|
||||||
|
global_options = parse_sync_options(options)
|
||||||
|
|
||||||
|
for inc in include:
|
||||||
|
if isinstance(inc, str):
|
||||||
|
inc, opts = extract_options(inc, global_options)
|
||||||
|
sync(src, dest, inc, opts)
|
||||||
|
elif isinstance(inc, dict):
|
||||||
|
# could also do nested dicts here.
|
||||||
|
for k, v in six.iteritems(inc):
|
||||||
|
if isinstance(v, list):
|
||||||
|
for m in v:
|
||||||
|
inc, opts = extract_options(m, global_options)
|
||||||
|
sync(src, dest, '%s.%s' % (k, inc), opts)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
parser = optparse.OptionParser()
|
||||||
|
parser.add_option('-c', '--config', action='store', dest='config',
|
||||||
|
default=None, help='helper config file')
|
||||||
|
parser.add_option('-D', '--debug', action='store_true', dest='debug',
|
||||||
|
default=False, help='debug')
|
||||||
|
parser.add_option('-b', '--branch', action='store', dest='branch',
|
||||||
|
help='charm-helpers bzr branch (overrides config)')
|
||||||
|
parser.add_option('-d', '--destination', action='store', dest='dest_dir',
|
||||||
|
help='sync destination dir (overrides config)')
|
||||||
|
(opts, args) = parser.parse_args()
|
||||||
|
|
||||||
|
if opts.debug:
|
||||||
|
logging.basicConfig(level=logging.DEBUG)
|
||||||
|
else:
|
||||||
|
logging.basicConfig(level=logging.INFO)
|
||||||
|
|
||||||
|
if opts.config:
|
||||||
|
logging.info('Loading charm helper config from %s.' % opts.config)
|
||||||
|
config = parse_config(opts.config)
|
||||||
|
if not config:
|
||||||
|
logging.error('Could not parse config from %s.' % opts.config)
|
||||||
|
sys.exit(1)
|
||||||
|
else:
|
||||||
|
config = {}
|
||||||
|
|
||||||
|
if 'branch' not in config:
|
||||||
|
config['branch'] = CHARM_HELPERS_BRANCH
|
||||||
|
if opts.branch:
|
||||||
|
config['branch'] = opts.branch
|
||||||
|
if opts.dest_dir:
|
||||||
|
config['destination'] = opts.dest_dir
|
||||||
|
|
||||||
|
if 'destination' not in config:
|
||||||
|
logging.error('No destination dir. specified as option or config.')
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
if 'include' not in config:
|
||||||
|
if not args:
|
||||||
|
logging.error('No modules to sync specified as option or config.')
|
||||||
|
sys.exit(1)
|
||||||
|
config['include'] = []
|
||||||
|
[config['include'].append(a) for a in args]
|
||||||
|
|
||||||
|
sync_options = None
|
||||||
|
if 'options' in config:
|
||||||
|
sync_options = config['options']
|
||||||
|
tmpd = tempfile.mkdtemp()
|
||||||
|
try:
|
||||||
|
checkout = clone_helpers(tmpd, config['branch'])
|
||||||
|
sync_helpers(config['include'], checkout, config['destination'],
|
||||||
|
options=sync_options)
|
||||||
|
except Exception as e:
|
||||||
|
logging.error("Could not sync: %s" % e)
|
||||||
|
raise e
|
||||||
|
finally:
|
||||||
|
logging.debug('Cleaning up %s' % tmpd)
|
||||||
|
shutil.rmtree(tmpd)
|
|
@ -3,5 +3,10 @@ destination: hooks/charmhelpers
|
||||||
include:
|
include:
|
||||||
- core
|
- core
|
||||||
- fetch
|
- fetch
|
||||||
- contrib
|
- contrib.amulet
|
||||||
|
- contrib.hahelpers
|
||||||
|
- contrib.network
|
||||||
|
- contrib.openstack
|
||||||
|
- contrib.python
|
||||||
|
- contrib.storage
|
||||||
- payload
|
- payload
|
||||||
|
|
|
@ -3,6 +3,14 @@ options:
|
||||||
default: 192.168.100.250
|
default: 192.168.100.250
|
||||||
type: string
|
type: string
|
||||||
description: IP address of the Director's Management interface. Same IP can be used to access PG Console.
|
description: IP address of the Director's Management interface. Same IP can be used to access PG Console.
|
||||||
|
plumgrid-username:
|
||||||
|
default: plumgrid
|
||||||
|
type: string
|
||||||
|
description: Username to access PLUMgrid Director
|
||||||
|
plumgrid-password:
|
||||||
|
default: plumgrid
|
||||||
|
type: string
|
||||||
|
description: Password to access PLUMgrid Director
|
||||||
lcm-ssh-key:
|
lcm-ssh-key:
|
||||||
default: 'null'
|
default: 'null'
|
||||||
type: string
|
type: string
|
||||||
|
|
|
@ -51,7 +51,8 @@ class AmuletDeployment(object):
|
||||||
if 'units' not in this_service:
|
if 'units' not in this_service:
|
||||||
this_service['units'] = 1
|
this_service['units'] = 1
|
||||||
|
|
||||||
self.d.add(this_service['name'], units=this_service['units'])
|
self.d.add(this_service['name'], units=this_service['units'],
|
||||||
|
constraints=this_service.get('constraints'))
|
||||||
|
|
||||||
for svc in other_services:
|
for svc in other_services:
|
||||||
if 'location' in svc:
|
if 'location' in svc:
|
||||||
|
@ -64,7 +65,8 @@ class AmuletDeployment(object):
|
||||||
if 'units' not in svc:
|
if 'units' not in svc:
|
||||||
svc['units'] = 1
|
svc['units'] = 1
|
||||||
|
|
||||||
self.d.add(svc['name'], charm=branch_location, units=svc['units'])
|
self.d.add(svc['name'], charm=branch_location, units=svc['units'],
|
||||||
|
constraints=svc.get('constraints'))
|
||||||
|
|
||||||
def _add_relations(self, relations):
|
def _add_relations(self, relations):
|
||||||
"""Add all of the relations for the services."""
|
"""Add all of the relations for the services."""
|
||||||
|
|
|
@ -14,17 +14,25 @@
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
import amulet
|
|
||||||
import ConfigParser
|
|
||||||
import distro_info
|
|
||||||
import io
|
import io
|
||||||
|
import json
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import six
|
import socket
|
||||||
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
import urlparse
|
import uuid
|
||||||
|
|
||||||
|
import amulet
|
||||||
|
import distro_info
|
||||||
|
import six
|
||||||
|
from six.moves import configparser
|
||||||
|
if six.PY3:
|
||||||
|
from urllib import parse as urlparse
|
||||||
|
else:
|
||||||
|
import urlparse
|
||||||
|
|
||||||
|
|
||||||
class AmuletUtils(object):
|
class AmuletUtils(object):
|
||||||
|
@ -108,7 +116,7 @@ class AmuletUtils(object):
|
||||||
# /!\ DEPRECATION WARNING (beisner):
|
# /!\ DEPRECATION WARNING (beisner):
|
||||||
# New and existing tests should be rewritten to use
|
# New and existing tests should be rewritten to use
|
||||||
# validate_services_by_name() as it is aware of init systems.
|
# validate_services_by_name() as it is aware of init systems.
|
||||||
self.log.warn('/!\\ DEPRECATION WARNING: use '
|
self.log.warn('DEPRECATION WARNING: use '
|
||||||
'validate_services_by_name instead of validate_services '
|
'validate_services_by_name instead of validate_services '
|
||||||
'due to init system differences.')
|
'due to init system differences.')
|
||||||
|
|
||||||
|
@ -142,19 +150,23 @@ class AmuletUtils(object):
|
||||||
|
|
||||||
for service_name in services_list:
|
for service_name in services_list:
|
||||||
if (self.ubuntu_releases.index(release) >= systemd_switch or
|
if (self.ubuntu_releases.index(release) >= systemd_switch or
|
||||||
service_name == "rabbitmq-server"):
|
service_name in ['rabbitmq-server', 'apache2']):
|
||||||
# init is systemd
|
# init is systemd (or regular sysv)
|
||||||
cmd = 'sudo service {} status'.format(service_name)
|
cmd = 'sudo service {} status'.format(service_name)
|
||||||
|
output, code = sentry_unit.run(cmd)
|
||||||
|
service_running = code == 0
|
||||||
elif self.ubuntu_releases.index(release) < systemd_switch:
|
elif self.ubuntu_releases.index(release) < systemd_switch:
|
||||||
# init is upstart
|
# init is upstart
|
||||||
cmd = 'sudo status {}'.format(service_name)
|
cmd = 'sudo status {}'.format(service_name)
|
||||||
|
|
||||||
output, code = sentry_unit.run(cmd)
|
output, code = sentry_unit.run(cmd)
|
||||||
|
service_running = code == 0 and "start/running" in output
|
||||||
|
|
||||||
self.log.debug('{} `{}` returned '
|
self.log.debug('{} `{}` returned '
|
||||||
'{}'.format(sentry_unit.info['unit_name'],
|
'{}'.format(sentry_unit.info['unit_name'],
|
||||||
cmd, code))
|
cmd, code))
|
||||||
if code != 0:
|
if not service_running:
|
||||||
return "command `{}` returned {}".format(cmd, str(code))
|
return u"command `{}` returned {} {}".format(
|
||||||
|
cmd, output, str(code))
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def _get_config(self, unit, filename):
|
def _get_config(self, unit, filename):
|
||||||
|
@ -164,7 +176,7 @@ class AmuletUtils(object):
|
||||||
# NOTE(beisner): by default, ConfigParser does not handle options
|
# NOTE(beisner): by default, ConfigParser does not handle options
|
||||||
# with no value, such as the flags used in the mysql my.cnf file.
|
# with no value, such as the flags used in the mysql my.cnf file.
|
||||||
# https://bugs.python.org/issue7005
|
# https://bugs.python.org/issue7005
|
||||||
config = ConfigParser.ConfigParser(allow_no_value=True)
|
config = configparser.ConfigParser(allow_no_value=True)
|
||||||
config.readfp(io.StringIO(file_contents))
|
config.readfp(io.StringIO(file_contents))
|
||||||
return config
|
return config
|
||||||
|
|
||||||
|
@ -259,33 +271,52 @@ class AmuletUtils(object):
|
||||||
"""Get last modification time of directory."""
|
"""Get last modification time of directory."""
|
||||||
return sentry_unit.directory_stat(directory)['mtime']
|
return sentry_unit.directory_stat(directory)['mtime']
|
||||||
|
|
||||||
def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False):
|
def _get_proc_start_time(self, sentry_unit, service, pgrep_full=None):
|
||||||
"""Get process' start time.
|
"""Get start time of a process based on the last modification time
|
||||||
|
of the /proc/pid directory.
|
||||||
|
|
||||||
Determine start time of the process based on the last modification
|
:sentry_unit: The sentry unit to check for the service on
|
||||||
time of the /proc/pid directory. If pgrep_full is True, the process
|
:service: service name to look for in process table
|
||||||
name is matched against the full command line.
|
:pgrep_full: [Deprecated] Use full command line search mode with pgrep
|
||||||
|
:returns: epoch time of service process start
|
||||||
|
:param commands: list of bash commands
|
||||||
|
:param sentry_units: list of sentry unit pointers
|
||||||
|
:returns: None if successful; Failure message otherwise
|
||||||
"""
|
"""
|
||||||
if pgrep_full:
|
if pgrep_full is not None:
|
||||||
cmd = 'pgrep -o -f {}'.format(service)
|
# /!\ DEPRECATION WARNING (beisner):
|
||||||
else:
|
# No longer implemented, as pidof is now used instead of pgrep.
|
||||||
cmd = 'pgrep -o {}'.format(service)
|
# https://bugs.launchpad.net/charm-helpers/+bug/1474030
|
||||||
cmd = cmd + ' | grep -v pgrep || exit 0'
|
self.log.warn('DEPRECATION WARNING: pgrep_full bool is no '
|
||||||
cmd_out = sentry_unit.run(cmd)
|
'longer implemented re: lp 1474030.')
|
||||||
self.log.debug('CMDout: ' + str(cmd_out))
|
|
||||||
if cmd_out[0]:
|
pid_list = self.get_process_id_list(sentry_unit, service)
|
||||||
self.log.debug('Pid for %s %s' % (service, str(cmd_out[0])))
|
pid = pid_list[0]
|
||||||
proc_dir = '/proc/{}'.format(cmd_out[0].strip())
|
proc_dir = '/proc/{}'.format(pid)
|
||||||
|
self.log.debug('Pid for {} on {}: {}'.format(
|
||||||
|
service, sentry_unit.info['unit_name'], pid))
|
||||||
|
|
||||||
return self._get_dir_mtime(sentry_unit, proc_dir)
|
return self._get_dir_mtime(sentry_unit, proc_dir)
|
||||||
|
|
||||||
def service_restarted(self, sentry_unit, service, filename,
|
def service_restarted(self, sentry_unit, service, filename,
|
||||||
pgrep_full=False, sleep_time=20):
|
pgrep_full=None, sleep_time=20):
|
||||||
"""Check if service was restarted.
|
"""Check if service was restarted.
|
||||||
|
|
||||||
Compare a service's start time vs a file's last modification time
|
Compare a service's start time vs a file's last modification time
|
||||||
(such as a config file for that service) to determine if the service
|
(such as a config file for that service) to determine if the service
|
||||||
has been restarted.
|
has been restarted.
|
||||||
"""
|
"""
|
||||||
|
# /!\ DEPRECATION WARNING (beisner):
|
||||||
|
# This method is prone to races in that no before-time is known.
|
||||||
|
# Use validate_service_config_changed instead.
|
||||||
|
|
||||||
|
# NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
|
||||||
|
# used instead of pgrep. pgrep_full is still passed through to ensure
|
||||||
|
# deprecation WARNS. lp1474030
|
||||||
|
self.log.warn('DEPRECATION WARNING: use '
|
||||||
|
'validate_service_config_changed instead of '
|
||||||
|
'service_restarted due to known races.')
|
||||||
|
|
||||||
time.sleep(sleep_time)
|
time.sleep(sleep_time)
|
||||||
if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >=
|
if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >=
|
||||||
self._get_file_mtime(sentry_unit, filename)):
|
self._get_file_mtime(sentry_unit, filename)):
|
||||||
|
@ -294,78 +325,122 @@ class AmuletUtils(object):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def service_restarted_since(self, sentry_unit, mtime, service,
|
def service_restarted_since(self, sentry_unit, mtime, service,
|
||||||
pgrep_full=False, sleep_time=20,
|
pgrep_full=None, sleep_time=20,
|
||||||
retry_count=2):
|
retry_count=30, retry_sleep_time=10):
|
||||||
"""Check if service was been started after a given time.
|
"""Check if service was been started after a given time.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
sentry_unit (sentry): The sentry unit to check for the service on
|
sentry_unit (sentry): The sentry unit to check for the service on
|
||||||
mtime (float): The epoch time to check against
|
mtime (float): The epoch time to check against
|
||||||
service (string): service name to look for in process table
|
service (string): service name to look for in process table
|
||||||
pgrep_full (boolean): Use full command line search mode with pgrep
|
pgrep_full: [Deprecated] Use full command line search mode with pgrep
|
||||||
sleep_time (int): Seconds to sleep before looking for process
|
sleep_time (int): Initial sleep time (s) before looking for file
|
||||||
retry_count (int): If service is not found, how many times to retry
|
retry_sleep_time (int): Time (s) to sleep between retries
|
||||||
|
retry_count (int): If file is not found, how many times to retry
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
bool: True if service found and its start time it newer than mtime,
|
bool: True if service found and its start time it newer than mtime,
|
||||||
False if service is older than mtime or if service was
|
False if service is older than mtime or if service was
|
||||||
not found.
|
not found.
|
||||||
"""
|
"""
|
||||||
self.log.debug('Checking %s restarted since %s' % (service, mtime))
|
# NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
|
||||||
|
# used instead of pgrep. pgrep_full is still passed through to ensure
|
||||||
|
# deprecation WARNS. lp1474030
|
||||||
|
|
||||||
|
unit_name = sentry_unit.info['unit_name']
|
||||||
|
self.log.debug('Checking that %s service restarted since %s on '
|
||||||
|
'%s' % (service, mtime, unit_name))
|
||||||
time.sleep(sleep_time)
|
time.sleep(sleep_time)
|
||||||
proc_start_time = self._get_proc_start_time(sentry_unit, service,
|
proc_start_time = None
|
||||||
|
tries = 0
|
||||||
|
while tries <= retry_count and not proc_start_time:
|
||||||
|
try:
|
||||||
|
proc_start_time = self._get_proc_start_time(sentry_unit,
|
||||||
|
service,
|
||||||
pgrep_full)
|
pgrep_full)
|
||||||
while retry_count > 0 and not proc_start_time:
|
self.log.debug('Attempt {} to get {} proc start time on {} '
|
||||||
self.log.debug('No pid file found for service %s, will retry %i '
|
'OK'.format(tries, service, unit_name))
|
||||||
'more times' % (service, retry_count))
|
except IOError as e:
|
||||||
time.sleep(30)
|
# NOTE(beisner) - race avoidance, proc may not exist yet.
|
||||||
proc_start_time = self._get_proc_start_time(sentry_unit, service,
|
# https://bugs.launchpad.net/charm-helpers/+bug/1474030
|
||||||
pgrep_full)
|
self.log.debug('Attempt {} to get {} proc start time on {} '
|
||||||
retry_count = retry_count - 1
|
'failed\n{}'.format(tries, service,
|
||||||
|
unit_name, e))
|
||||||
|
time.sleep(retry_sleep_time)
|
||||||
|
tries += 1
|
||||||
|
|
||||||
if not proc_start_time:
|
if not proc_start_time:
|
||||||
self.log.warn('No proc start time found, assuming service did '
|
self.log.warn('No proc start time found, assuming service did '
|
||||||
'not start')
|
'not start')
|
||||||
return False
|
return False
|
||||||
if proc_start_time >= mtime:
|
if proc_start_time >= mtime:
|
||||||
self.log.debug('proc start time is newer than provided mtime'
|
self.log.debug('Proc start time is newer than provided mtime'
|
||||||
'(%s >= %s)' % (proc_start_time, mtime))
|
'(%s >= %s) on %s (OK)' % (proc_start_time,
|
||||||
|
mtime, unit_name))
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
self.log.warn('proc start time (%s) is older than provided mtime '
|
self.log.warn('Proc start time (%s) is older than provided mtime '
|
||||||
'(%s), service did not restart' % (proc_start_time,
|
'(%s) on %s, service did not '
|
||||||
mtime))
|
'restart' % (proc_start_time, mtime, unit_name))
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def config_updated_since(self, sentry_unit, filename, mtime,
|
def config_updated_since(self, sentry_unit, filename, mtime,
|
||||||
sleep_time=20):
|
sleep_time=20, retry_count=30,
|
||||||
|
retry_sleep_time=10):
|
||||||
"""Check if file was modified after a given time.
|
"""Check if file was modified after a given time.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
sentry_unit (sentry): The sentry unit to check the file mtime on
|
sentry_unit (sentry): The sentry unit to check the file mtime on
|
||||||
filename (string): The file to check mtime of
|
filename (string): The file to check mtime of
|
||||||
mtime (float): The epoch time to check against
|
mtime (float): The epoch time to check against
|
||||||
sleep_time (int): Seconds to sleep before looking for process
|
sleep_time (int): Initial sleep time (s) before looking for file
|
||||||
|
retry_sleep_time (int): Time (s) to sleep between retries
|
||||||
|
retry_count (int): If file is not found, how many times to retry
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
bool: True if file was modified more recently than mtime, False if
|
bool: True if file was modified more recently than mtime, False if
|
||||||
file was modified before mtime,
|
file was modified before mtime, or if file not found.
|
||||||
"""
|
"""
|
||||||
self.log.debug('Checking %s updated since %s' % (filename, mtime))
|
unit_name = sentry_unit.info['unit_name']
|
||||||
|
self.log.debug('Checking that %s updated since %s on '
|
||||||
|
'%s' % (filename, mtime, unit_name))
|
||||||
time.sleep(sleep_time)
|
time.sleep(sleep_time)
|
||||||
|
file_mtime = None
|
||||||
|
tries = 0
|
||||||
|
while tries <= retry_count and not file_mtime:
|
||||||
|
try:
|
||||||
file_mtime = self._get_file_mtime(sentry_unit, filename)
|
file_mtime = self._get_file_mtime(sentry_unit, filename)
|
||||||
|
self.log.debug('Attempt {} to get {} file mtime on {} '
|
||||||
|
'OK'.format(tries, filename, unit_name))
|
||||||
|
except IOError as e:
|
||||||
|
# NOTE(beisner) - race avoidance, file may not exist yet.
|
||||||
|
# https://bugs.launchpad.net/charm-helpers/+bug/1474030
|
||||||
|
self.log.debug('Attempt {} to get {} file mtime on {} '
|
||||||
|
'failed\n{}'.format(tries, filename,
|
||||||
|
unit_name, e))
|
||||||
|
time.sleep(retry_sleep_time)
|
||||||
|
tries += 1
|
||||||
|
|
||||||
|
if not file_mtime:
|
||||||
|
self.log.warn('Could not determine file mtime, assuming '
|
||||||
|
'file does not exist')
|
||||||
|
return False
|
||||||
|
|
||||||
if file_mtime >= mtime:
|
if file_mtime >= mtime:
|
||||||
self.log.debug('File mtime is newer than provided mtime '
|
self.log.debug('File mtime is newer than provided mtime '
|
||||||
'(%s >= %s)' % (file_mtime, mtime))
|
'(%s >= %s) on %s (OK)' % (file_mtime,
|
||||||
|
mtime, unit_name))
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
self.log.warn('File mtime %s is older than provided mtime %s'
|
self.log.warn('File mtime is older than provided mtime'
|
||||||
% (file_mtime, mtime))
|
'(%s < on %s) on %s' % (file_mtime,
|
||||||
|
mtime, unit_name))
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def validate_service_config_changed(self, sentry_unit, mtime, service,
|
def validate_service_config_changed(self, sentry_unit, mtime, service,
|
||||||
filename, pgrep_full=False,
|
filename, pgrep_full=None,
|
||||||
sleep_time=20, retry_count=2):
|
sleep_time=20, retry_count=30,
|
||||||
|
retry_sleep_time=10):
|
||||||
"""Check service and file were updated after mtime
|
"""Check service and file were updated after mtime
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
@ -373,9 +448,10 @@ class AmuletUtils(object):
|
||||||
mtime (float): The epoch time to check against
|
mtime (float): The epoch time to check against
|
||||||
service (string): service name to look for in process table
|
service (string): service name to look for in process table
|
||||||
filename (string): The file to check mtime of
|
filename (string): The file to check mtime of
|
||||||
pgrep_full (boolean): Use full command line search mode with pgrep
|
pgrep_full: [Deprecated] Use full command line search mode with pgrep
|
||||||
sleep_time (int): Seconds to sleep before looking for process
|
sleep_time (int): Initial sleep in seconds to pass to test helpers
|
||||||
retry_count (int): If service is not found, how many times to retry
|
retry_count (int): If service is not found, how many times to retry
|
||||||
|
retry_sleep_time (int): Time in seconds to wait between retries
|
||||||
|
|
||||||
Typical Usage:
|
Typical Usage:
|
||||||
u = OpenStackAmuletUtils(ERROR)
|
u = OpenStackAmuletUtils(ERROR)
|
||||||
|
@ -392,15 +468,27 @@ class AmuletUtils(object):
|
||||||
mtime, False if service is older than mtime or if service was
|
mtime, False if service is older than mtime or if service was
|
||||||
not found or if filename was modified before mtime.
|
not found or if filename was modified before mtime.
|
||||||
"""
|
"""
|
||||||
self.log.debug('Checking %s restarted since %s' % (service, mtime))
|
|
||||||
time.sleep(sleep_time)
|
# NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
|
||||||
service_restart = self.service_restarted_since(sentry_unit, mtime,
|
# used instead of pgrep. pgrep_full is still passed through to ensure
|
||||||
|
# deprecation WARNS. lp1474030
|
||||||
|
|
||||||
|
service_restart = self.service_restarted_since(
|
||||||
|
sentry_unit, mtime,
|
||||||
service,
|
service,
|
||||||
pgrep_full=pgrep_full,
|
pgrep_full=pgrep_full,
|
||||||
sleep_time=0,
|
sleep_time=sleep_time,
|
||||||
retry_count=retry_count)
|
retry_count=retry_count,
|
||||||
config_update = self.config_updated_since(sentry_unit, filename, mtime,
|
retry_sleep_time=retry_sleep_time)
|
||||||
sleep_time=0)
|
|
||||||
|
config_update = self.config_updated_since(
|
||||||
|
sentry_unit,
|
||||||
|
filename,
|
||||||
|
mtime,
|
||||||
|
sleep_time=sleep_time,
|
||||||
|
retry_count=retry_count,
|
||||||
|
retry_sleep_time=retry_sleep_time)
|
||||||
|
|
||||||
return service_restart and config_update
|
return service_restart and config_update
|
||||||
|
|
||||||
def get_sentry_time(self, sentry_unit):
|
def get_sentry_time(self, sentry_unit):
|
||||||
|
@ -418,7 +506,6 @@ class AmuletUtils(object):
|
||||||
"""Return a list of all Ubuntu releases in order of release."""
|
"""Return a list of all Ubuntu releases in order of release."""
|
||||||
_d = distro_info.UbuntuDistroInfo()
|
_d = distro_info.UbuntuDistroInfo()
|
||||||
_release_list = _d.all
|
_release_list = _d.all
|
||||||
self.log.debug('Ubuntu release list: {}'.format(_release_list))
|
|
||||||
return _release_list
|
return _release_list
|
||||||
|
|
||||||
def file_to_url(self, file_rel_path):
|
def file_to_url(self, file_rel_path):
|
||||||
|
@ -450,15 +537,20 @@ class AmuletUtils(object):
|
||||||
cmd, code, output))
|
cmd, code, output))
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def get_process_id_list(self, sentry_unit, process_name):
|
def get_process_id_list(self, sentry_unit, process_name,
|
||||||
|
expect_success=True):
|
||||||
"""Get a list of process ID(s) from a single sentry juju unit
|
"""Get a list of process ID(s) from a single sentry juju unit
|
||||||
for a single process name.
|
for a single process name.
|
||||||
|
|
||||||
:param sentry_unit: Pointer to amulet sentry instance (juju unit)
|
:param sentry_unit: Amulet sentry instance (juju unit)
|
||||||
:param process_name: Process name
|
:param process_name: Process name
|
||||||
|
:param expect_success: If False, expect the PID to be missing,
|
||||||
|
raise if it is present.
|
||||||
:returns: List of process IDs
|
:returns: List of process IDs
|
||||||
"""
|
"""
|
||||||
cmd = 'pidof {}'.format(process_name)
|
cmd = 'pidof -x {}'.format(process_name)
|
||||||
|
if not expect_success:
|
||||||
|
cmd += " || exit 0 && exit 1"
|
||||||
output, code = sentry_unit.run(cmd)
|
output, code = sentry_unit.run(cmd)
|
||||||
if code != 0:
|
if code != 0:
|
||||||
msg = ('{} `{}` returned {} '
|
msg = ('{} `{}` returned {} '
|
||||||
|
@ -467,14 +559,23 @@ class AmuletUtils(object):
|
||||||
amulet.raise_status(amulet.FAIL, msg=msg)
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
return str(output).split()
|
return str(output).split()
|
||||||
|
|
||||||
def get_unit_process_ids(self, unit_processes):
|
def get_unit_process_ids(self, unit_processes, expect_success=True):
|
||||||
"""Construct a dict containing unit sentries, process names, and
|
"""Construct a dict containing unit sentries, process names, and
|
||||||
process IDs."""
|
process IDs.
|
||||||
|
|
||||||
|
:param unit_processes: A dictionary of Amulet sentry instance
|
||||||
|
to list of process names.
|
||||||
|
:param expect_success: if False expect the processes to not be
|
||||||
|
running, raise if they are.
|
||||||
|
:returns: Dictionary of Amulet sentry instance to dictionary
|
||||||
|
of process names to PIDs.
|
||||||
|
"""
|
||||||
pid_dict = {}
|
pid_dict = {}
|
||||||
for sentry_unit, process_list in unit_processes.iteritems():
|
for sentry_unit, process_list in six.iteritems(unit_processes):
|
||||||
pid_dict[sentry_unit] = {}
|
pid_dict[sentry_unit] = {}
|
||||||
for process in process_list:
|
for process in process_list:
|
||||||
pids = self.get_process_id_list(sentry_unit, process)
|
pids = self.get_process_id_list(
|
||||||
|
sentry_unit, process, expect_success=expect_success)
|
||||||
pid_dict[sentry_unit].update({process: pids})
|
pid_dict[sentry_unit].update({process: pids})
|
||||||
return pid_dict
|
return pid_dict
|
||||||
|
|
||||||
|
@ -488,7 +589,7 @@ class AmuletUtils(object):
|
||||||
return ('Unit count mismatch. expected, actual: {}, '
|
return ('Unit count mismatch. expected, actual: {}, '
|
||||||
'{} '.format(len(expected), len(actual)))
|
'{} '.format(len(expected), len(actual)))
|
||||||
|
|
||||||
for (e_sentry, e_proc_names) in expected.iteritems():
|
for (e_sentry, e_proc_names) in six.iteritems(expected):
|
||||||
e_sentry_name = e_sentry.info['unit_name']
|
e_sentry_name = e_sentry.info['unit_name']
|
||||||
if e_sentry in actual.keys():
|
if e_sentry in actual.keys():
|
||||||
a_proc_names = actual[e_sentry]
|
a_proc_names = actual[e_sentry]
|
||||||
|
@ -500,22 +601,40 @@ class AmuletUtils(object):
|
||||||
return ('Process name count mismatch. expected, actual: {}, '
|
return ('Process name count mismatch. expected, actual: {}, '
|
||||||
'{}'.format(len(expected), len(actual)))
|
'{}'.format(len(expected), len(actual)))
|
||||||
|
|
||||||
for (e_proc_name, e_pids_length), (a_proc_name, a_pids) in \
|
for (e_proc_name, e_pids), (a_proc_name, a_pids) in \
|
||||||
zip(e_proc_names.items(), a_proc_names.items()):
|
zip(e_proc_names.items(), a_proc_names.items()):
|
||||||
if e_proc_name != a_proc_name:
|
if e_proc_name != a_proc_name:
|
||||||
return ('Process name mismatch. expected, actual: {}, '
|
return ('Process name mismatch. expected, actual: {}, '
|
||||||
'{}'.format(e_proc_name, a_proc_name))
|
'{}'.format(e_proc_name, a_proc_name))
|
||||||
|
|
||||||
a_pids_length = len(a_pids)
|
a_pids_length = len(a_pids)
|
||||||
if e_pids_length != a_pids_length:
|
fail_msg = ('PID count mismatch. {} ({}) expected, actual: '
|
||||||
return ('PID count mismatch. {} ({}) expected, actual: '
|
|
||||||
'{}, {} ({})'.format(e_sentry_name, e_proc_name,
|
'{}, {} ({})'.format(e_sentry_name, e_proc_name,
|
||||||
e_pids_length, a_pids_length,
|
e_pids, a_pids_length,
|
||||||
a_pids))
|
a_pids))
|
||||||
|
|
||||||
|
# If expected is a list, ensure at least one PID quantity match
|
||||||
|
if isinstance(e_pids, list) and \
|
||||||
|
a_pids_length not in e_pids:
|
||||||
|
return fail_msg
|
||||||
|
# If expected is not bool and not list,
|
||||||
|
# ensure PID quantities match
|
||||||
|
elif not isinstance(e_pids, bool) and \
|
||||||
|
not isinstance(e_pids, list) and \
|
||||||
|
a_pids_length != e_pids:
|
||||||
|
return fail_msg
|
||||||
|
# If expected is bool True, ensure 1 or more PIDs exist
|
||||||
|
elif isinstance(e_pids, bool) and \
|
||||||
|
e_pids is True and a_pids_length < 1:
|
||||||
|
return fail_msg
|
||||||
|
# If expected is bool False, ensure 0 PIDs exist
|
||||||
|
elif isinstance(e_pids, bool) and \
|
||||||
|
e_pids is False and a_pids_length != 0:
|
||||||
|
return fail_msg
|
||||||
else:
|
else:
|
||||||
self.log.debug('PID check OK: {} {} {}: '
|
self.log.debug('PID check OK: {} {} {}: '
|
||||||
'{}'.format(e_sentry_name, e_proc_name,
|
'{}'.format(e_sentry_name, e_proc_name,
|
||||||
e_pids_length, a_pids))
|
e_pids, a_pids))
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def validate_list_of_identical_dicts(self, list_of_dicts):
|
def validate_list_of_identical_dicts(self, list_of_dicts):
|
||||||
|
@ -531,3 +650,180 @@ class AmuletUtils(object):
|
||||||
return 'Dicts within list are not identical'
|
return 'Dicts within list are not identical'
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
def validate_sectionless_conf(self, file_contents, expected):
|
||||||
|
"""A crude conf parser. Useful to inspect configuration files which
|
||||||
|
do not have section headers (as would be necessary in order to use
|
||||||
|
the configparser). Such as openstack-dashboard or rabbitmq confs."""
|
||||||
|
for line in file_contents.split('\n'):
|
||||||
|
if '=' in line:
|
||||||
|
args = line.split('=')
|
||||||
|
if len(args) <= 1:
|
||||||
|
continue
|
||||||
|
key = args[0].strip()
|
||||||
|
value = args[1].strip()
|
||||||
|
if key in expected.keys():
|
||||||
|
if expected[key] != value:
|
||||||
|
msg = ('Config mismatch. Expected, actual: {}, '
|
||||||
|
'{}'.format(expected[key], value))
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
def get_unit_hostnames(self, units):
|
||||||
|
"""Return a dict of juju unit names to hostnames."""
|
||||||
|
host_names = {}
|
||||||
|
for unit in units:
|
||||||
|
host_names[unit.info['unit_name']] = \
|
||||||
|
str(unit.file_contents('/etc/hostname').strip())
|
||||||
|
self.log.debug('Unit host names: {}'.format(host_names))
|
||||||
|
return host_names
|
||||||
|
|
||||||
|
def run_cmd_unit(self, sentry_unit, cmd):
|
||||||
|
"""Run a command on a unit, return the output and exit code."""
|
||||||
|
output, code = sentry_unit.run(cmd)
|
||||||
|
if code == 0:
|
||||||
|
self.log.debug('{} `{}` command returned {} '
|
||||||
|
'(OK)'.format(sentry_unit.info['unit_name'],
|
||||||
|
cmd, code))
|
||||||
|
else:
|
||||||
|
msg = ('{} `{}` command returned {} '
|
||||||
|
'{}'.format(sentry_unit.info['unit_name'],
|
||||||
|
cmd, code, output))
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
return str(output), code
|
||||||
|
|
||||||
|
def file_exists_on_unit(self, sentry_unit, file_name):
|
||||||
|
"""Check if a file exists on a unit."""
|
||||||
|
try:
|
||||||
|
sentry_unit.file_stat(file_name)
|
||||||
|
return True
|
||||||
|
except IOError:
|
||||||
|
return False
|
||||||
|
except Exception as e:
|
||||||
|
msg = 'Error checking file {}: {}'.format(file_name, e)
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
def file_contents_safe(self, sentry_unit, file_name,
|
||||||
|
max_wait=60, fatal=False):
|
||||||
|
"""Get file contents from a sentry unit. Wrap amulet file_contents
|
||||||
|
with retry logic to address races where a file checks as existing,
|
||||||
|
but no longer exists by the time file_contents is called.
|
||||||
|
Return None if file not found. Optionally raise if fatal is True."""
|
||||||
|
unit_name = sentry_unit.info['unit_name']
|
||||||
|
file_contents = False
|
||||||
|
tries = 0
|
||||||
|
while not file_contents and tries < (max_wait / 4):
|
||||||
|
try:
|
||||||
|
file_contents = sentry_unit.file_contents(file_name)
|
||||||
|
except IOError:
|
||||||
|
self.log.debug('Attempt {} to open file {} from {} '
|
||||||
|
'failed'.format(tries, file_name,
|
||||||
|
unit_name))
|
||||||
|
time.sleep(4)
|
||||||
|
tries += 1
|
||||||
|
|
||||||
|
if file_contents:
|
||||||
|
return file_contents
|
||||||
|
elif not fatal:
|
||||||
|
return None
|
||||||
|
elif fatal:
|
||||||
|
msg = 'Failed to get file contents from unit.'
|
||||||
|
amulet.raise_status(amulet.FAIL, msg)
|
||||||
|
|
||||||
|
def port_knock_tcp(self, host="localhost", port=22, timeout=15):
|
||||||
|
"""Open a TCP socket to check for a listening sevice on a host.
|
||||||
|
|
||||||
|
:param host: host name or IP address, default to localhost
|
||||||
|
:param port: TCP port number, default to 22
|
||||||
|
:param timeout: Connect timeout, default to 15 seconds
|
||||||
|
:returns: True if successful, False if connect failed
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Resolve host name if possible
|
||||||
|
try:
|
||||||
|
connect_host = socket.gethostbyname(host)
|
||||||
|
host_human = "{} ({})".format(connect_host, host)
|
||||||
|
except socket.error as e:
|
||||||
|
self.log.warn('Unable to resolve address: '
|
||||||
|
'{} ({}) Trying anyway!'.format(host, e))
|
||||||
|
connect_host = host
|
||||||
|
host_human = connect_host
|
||||||
|
|
||||||
|
# Attempt socket connection
|
||||||
|
try:
|
||||||
|
knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||||
|
knock.settimeout(timeout)
|
||||||
|
knock.connect((connect_host, port))
|
||||||
|
knock.close()
|
||||||
|
self.log.debug('Socket connect OK for host '
|
||||||
|
'{} on port {}.'.format(host_human, port))
|
||||||
|
return True
|
||||||
|
except socket.error as e:
|
||||||
|
self.log.debug('Socket connect FAIL for'
|
||||||
|
' {} port {} ({})'.format(host_human, port, e))
|
||||||
|
return False
|
||||||
|
|
||||||
|
def port_knock_units(self, sentry_units, port=22,
|
||||||
|
timeout=15, expect_success=True):
|
||||||
|
"""Open a TCP socket to check for a listening sevice on each
|
||||||
|
listed juju unit.
|
||||||
|
|
||||||
|
:param sentry_units: list of sentry unit pointers
|
||||||
|
:param port: TCP port number, default to 22
|
||||||
|
:param timeout: Connect timeout, default to 15 seconds
|
||||||
|
:expect_success: True by default, set False to invert logic
|
||||||
|
:returns: None if successful, Failure message otherwise
|
||||||
|
"""
|
||||||
|
for unit in sentry_units:
|
||||||
|
host = unit.info['public-address']
|
||||||
|
connected = self.port_knock_tcp(host, port, timeout)
|
||||||
|
if not connected and expect_success:
|
||||||
|
return 'Socket connect failed.'
|
||||||
|
elif connected and not expect_success:
|
||||||
|
return 'Socket connected unexpectedly.'
|
||||||
|
|
||||||
|
def get_uuid_epoch_stamp(self):
|
||||||
|
"""Returns a stamp string based on uuid4 and epoch time. Useful in
|
||||||
|
generating test messages which need to be unique-ish."""
|
||||||
|
return '[{}-{}]'.format(uuid.uuid4(), time.time())
|
||||||
|
|
||||||
|
# amulet juju action helpers:
|
||||||
|
def run_action(self, unit_sentry, action,
|
||||||
|
_check_output=subprocess.check_output,
|
||||||
|
params=None):
|
||||||
|
"""Run the named action on a given unit sentry.
|
||||||
|
|
||||||
|
params a dict of parameters to use
|
||||||
|
_check_output parameter is used for dependency injection.
|
||||||
|
|
||||||
|
@return action_id.
|
||||||
|
"""
|
||||||
|
unit_id = unit_sentry.info["unit_name"]
|
||||||
|
command = ["juju", "action", "do", "--format=json", unit_id, action]
|
||||||
|
if params is not None:
|
||||||
|
for key, value in params.iteritems():
|
||||||
|
command.append("{}={}".format(key, value))
|
||||||
|
self.log.info("Running command: %s\n" % " ".join(command))
|
||||||
|
output = _check_output(command, universal_newlines=True)
|
||||||
|
data = json.loads(output)
|
||||||
|
action_id = data[u'Action queued with id']
|
||||||
|
return action_id
|
||||||
|
|
||||||
|
def wait_on_action(self, action_id, _check_output=subprocess.check_output):
|
||||||
|
"""Wait for a given action, returning if it completed or not.
|
||||||
|
|
||||||
|
_check_output parameter is used for dependency injection.
|
||||||
|
"""
|
||||||
|
command = ["juju", "action", "fetch", "--format=json", "--wait=0",
|
||||||
|
action_id]
|
||||||
|
output = _check_output(command, universal_newlines=True)
|
||||||
|
data = json.loads(output)
|
||||||
|
return data.get(u"status") == "completed"
|
||||||
|
|
||||||
|
def status_get(self, unit):
|
||||||
|
"""Return the current service status of this unit."""
|
||||||
|
raw_status, return_code = unit.run(
|
||||||
|
"status-get --format=json --include-data")
|
||||||
|
if return_code != 0:
|
||||||
|
return ("unknown", "")
|
||||||
|
status = json.loads(raw_status)
|
||||||
|
return (status["status"], status["message"])
|
||||||
|
|
|
@ -1,254 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
# Copyright 2013 Canonical Ltd.
|
|
||||||
#
|
|
||||||
# Authors:
|
|
||||||
# Charm Helpers Developers <juju@lists.ubuntu.com>
|
|
||||||
"""Charm Helpers ansible - declare the state of your machines.
|
|
||||||
|
|
||||||
This helper enables you to declare your machine state, rather than
|
|
||||||
program it procedurally (and have to test each change to your procedures).
|
|
||||||
Your install hook can be as simple as::
|
|
||||||
|
|
||||||
{{{
|
|
||||||
import charmhelpers.contrib.ansible
|
|
||||||
|
|
||||||
|
|
||||||
def install():
|
|
||||||
charmhelpers.contrib.ansible.install_ansible_support()
|
|
||||||
charmhelpers.contrib.ansible.apply_playbook('playbooks/install.yaml')
|
|
||||||
}}}
|
|
||||||
|
|
||||||
and won't need to change (nor will its tests) when you change the machine
|
|
||||||
state.
|
|
||||||
|
|
||||||
All of your juju config and relation-data are available as template
|
|
||||||
variables within your playbooks and templates. An install playbook looks
|
|
||||||
something like::
|
|
||||||
|
|
||||||
{{{
|
|
||||||
---
|
|
||||||
- hosts: localhost
|
|
||||||
user: root
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- name: Add private repositories.
|
|
||||||
template:
|
|
||||||
src: ../templates/private-repositories.list.jinja2
|
|
||||||
dest: /etc/apt/sources.list.d/private.list
|
|
||||||
|
|
||||||
- name: Update the cache.
|
|
||||||
apt: update_cache=yes
|
|
||||||
|
|
||||||
- name: Install dependencies.
|
|
||||||
apt: pkg={{ item }}
|
|
||||||
with_items:
|
|
||||||
- python-mimeparse
|
|
||||||
- python-webob
|
|
||||||
- sunburnt
|
|
||||||
|
|
||||||
- name: Setup groups.
|
|
||||||
group: name={{ item.name }} gid={{ item.gid }}
|
|
||||||
with_items:
|
|
||||||
- { name: 'deploy_user', gid: 1800 }
|
|
||||||
- { name: 'service_user', gid: 1500 }
|
|
||||||
|
|
||||||
...
|
|
||||||
}}}
|
|
||||||
|
|
||||||
Read more online about `playbooks`_ and standard ansible `modules`_.
|
|
||||||
|
|
||||||
.. _playbooks: http://www.ansibleworks.com/docs/playbooks.html
|
|
||||||
.. _modules: http://www.ansibleworks.com/docs/modules.html
|
|
||||||
|
|
||||||
A further feature os the ansible hooks is to provide a light weight "action"
|
|
||||||
scripting tool. This is a decorator that you apply to a function, and that
|
|
||||||
function can now receive cli args, and can pass extra args to the playbook.
|
|
||||||
|
|
||||||
e.g.
|
|
||||||
|
|
||||||
|
|
||||||
@hooks.action()
|
|
||||||
def some_action(amount, force="False"):
|
|
||||||
"Usage: some-action AMOUNT [force=True]" # <-- shown on error
|
|
||||||
# process the arguments
|
|
||||||
# do some calls
|
|
||||||
# return extra-vars to be passed to ansible-playbook
|
|
||||||
return {
|
|
||||||
'amount': int(amount),
|
|
||||||
'type': force,
|
|
||||||
}
|
|
||||||
|
|
||||||
You can now create a symlink to hooks.py that can be invoked like a hook, but
|
|
||||||
with cli params:
|
|
||||||
|
|
||||||
# link actions/some-action to hooks/hooks.py
|
|
||||||
|
|
||||||
actions/some-action amount=10 force=true
|
|
||||||
|
|
||||||
"""
|
|
||||||
import os
|
|
||||||
import stat
|
|
||||||
import subprocess
|
|
||||||
import functools
|
|
||||||
|
|
||||||
import charmhelpers.contrib.templating.contexts
|
|
||||||
import charmhelpers.core.host
|
|
||||||
import charmhelpers.core.hookenv
|
|
||||||
import charmhelpers.fetch
|
|
||||||
|
|
||||||
|
|
||||||
charm_dir = os.environ.get('CHARM_DIR', '')
|
|
||||||
ansible_hosts_path = '/etc/ansible/hosts'
|
|
||||||
# Ansible will automatically include any vars in the following
|
|
||||||
# file in its inventory when run locally.
|
|
||||||
ansible_vars_path = '/etc/ansible/host_vars/localhost'
|
|
||||||
|
|
||||||
|
|
||||||
def install_ansible_support(from_ppa=True, ppa_location='ppa:rquillo/ansible'):
|
|
||||||
"""Installs the ansible package.
|
|
||||||
|
|
||||||
By default it is installed from the `PPA`_ linked from
|
|
||||||
the ansible `website`_ or from a ppa specified by a charm config..
|
|
||||||
|
|
||||||
.. _PPA: https://launchpad.net/~rquillo/+archive/ansible
|
|
||||||
.. _website: http://docs.ansible.com/intro_installation.html#latest-releases-via-apt-ubuntu
|
|
||||||
|
|
||||||
If from_ppa is empty, you must ensure that the package is available
|
|
||||||
from a configured repository.
|
|
||||||
"""
|
|
||||||
if from_ppa:
|
|
||||||
charmhelpers.fetch.add_source(ppa_location)
|
|
||||||
charmhelpers.fetch.apt_update(fatal=True)
|
|
||||||
charmhelpers.fetch.apt_install('ansible')
|
|
||||||
with open(ansible_hosts_path, 'w+') as hosts_file:
|
|
||||||
hosts_file.write('localhost ansible_connection=local')
|
|
||||||
|
|
||||||
|
|
||||||
def apply_playbook(playbook, tags=None, extra_vars=None):
|
|
||||||
tags = tags or []
|
|
||||||
tags = ",".join(tags)
|
|
||||||
charmhelpers.contrib.templating.contexts.juju_state_to_yaml(
|
|
||||||
ansible_vars_path, namespace_separator='__',
|
|
||||||
allow_hyphens_in_keys=False, mode=(stat.S_IRUSR | stat.S_IWUSR))
|
|
||||||
|
|
||||||
# we want ansible's log output to be unbuffered
|
|
||||||
env = os.environ.copy()
|
|
||||||
env['PYTHONUNBUFFERED'] = "1"
|
|
||||||
call = [
|
|
||||||
'ansible-playbook',
|
|
||||||
'-c',
|
|
||||||
'local',
|
|
||||||
playbook,
|
|
||||||
]
|
|
||||||
if tags:
|
|
||||||
call.extend(['--tags', '{}'.format(tags)])
|
|
||||||
if extra_vars:
|
|
||||||
extra = ["%s=%s" % (k, v) for k, v in extra_vars.items()]
|
|
||||||
call.extend(['--extra-vars', " ".join(extra)])
|
|
||||||
subprocess.check_call(call, env=env)
|
|
||||||
|
|
||||||
|
|
||||||
class AnsibleHooks(charmhelpers.core.hookenv.Hooks):
|
|
||||||
"""Run a playbook with the hook-name as the tag.
|
|
||||||
|
|
||||||
This helper builds on the standard hookenv.Hooks helper,
|
|
||||||
but additionally runs the playbook with the hook-name specified
|
|
||||||
using --tags (ie. running all the tasks tagged with the hook-name).
|
|
||||||
|
|
||||||
Example::
|
|
||||||
|
|
||||||
hooks = AnsibleHooks(playbook_path='playbooks/my_machine_state.yaml')
|
|
||||||
|
|
||||||
# All the tasks within my_machine_state.yaml tagged with 'install'
|
|
||||||
# will be run automatically after do_custom_work()
|
|
||||||
@hooks.hook()
|
|
||||||
def install():
|
|
||||||
do_custom_work()
|
|
||||||
|
|
||||||
# For most of your hooks, you won't need to do anything other
|
|
||||||
# than run the tagged tasks for the hook:
|
|
||||||
@hooks.hook('config-changed', 'start', 'stop')
|
|
||||||
def just_use_playbook():
|
|
||||||
pass
|
|
||||||
|
|
||||||
# As a convenience, you can avoid the above noop function by specifying
|
|
||||||
# the hooks which are handled by ansible-only and they'll be registered
|
|
||||||
# for you:
|
|
||||||
# hooks = AnsibleHooks(
|
|
||||||
# 'playbooks/my_machine_state.yaml',
|
|
||||||
# default_hooks=['config-changed', 'start', 'stop'])
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
# execute a hook based on the name the program is called by
|
|
||||||
hooks.execute(sys.argv)
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, playbook_path, default_hooks=None):
|
|
||||||
"""Register any hooks handled by ansible."""
|
|
||||||
super(AnsibleHooks, self).__init__()
|
|
||||||
|
|
||||||
self._actions = {}
|
|
||||||
self.playbook_path = playbook_path
|
|
||||||
|
|
||||||
default_hooks = default_hooks or []
|
|
||||||
|
|
||||||
def noop(*args, **kwargs):
|
|
||||||
pass
|
|
||||||
|
|
||||||
for hook in default_hooks:
|
|
||||||
self.register(hook, noop)
|
|
||||||
|
|
||||||
def register_action(self, name, function):
|
|
||||||
"""Register a hook"""
|
|
||||||
self._actions[name] = function
|
|
||||||
|
|
||||||
def execute(self, args):
|
|
||||||
"""Execute the hook followed by the playbook using the hook as tag."""
|
|
||||||
hook_name = os.path.basename(args[0])
|
|
||||||
extra_vars = None
|
|
||||||
if hook_name in self._actions:
|
|
||||||
extra_vars = self._actions[hook_name](args[1:])
|
|
||||||
else:
|
|
||||||
super(AnsibleHooks, self).execute(args)
|
|
||||||
|
|
||||||
charmhelpers.contrib.ansible.apply_playbook(
|
|
||||||
self.playbook_path, tags=[hook_name], extra_vars=extra_vars)
|
|
||||||
|
|
||||||
def action(self, *action_names):
|
|
||||||
"""Decorator, registering them as actions"""
|
|
||||||
def action_wrapper(decorated):
|
|
||||||
|
|
||||||
@functools.wraps(decorated)
|
|
||||||
def wrapper(argv):
|
|
||||||
kwargs = dict(arg.split('=') for arg in argv)
|
|
||||||
try:
|
|
||||||
return decorated(**kwargs)
|
|
||||||
except TypeError as e:
|
|
||||||
if decorated.__doc__:
|
|
||||||
e.args += (decorated.__doc__,)
|
|
||||||
raise
|
|
||||||
|
|
||||||
self.register_action(decorated.__name__, wrapper)
|
|
||||||
if '_' in decorated.__name__:
|
|
||||||
self.register_action(
|
|
||||||
decorated.__name__.replace('_', '-'), wrapper)
|
|
||||||
|
|
||||||
return wrapper
|
|
||||||
|
|
||||||
return action_wrapper
|
|
|
@ -1,126 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import subprocess
|
|
||||||
import time
|
|
||||||
import os
|
|
||||||
from distutils.spawn import find_executable
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
in_relation_hook,
|
|
||||||
relation_ids,
|
|
||||||
relation_set,
|
|
||||||
relation_get,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def action_set(key, val):
|
|
||||||
if find_executable('action-set'):
|
|
||||||
action_cmd = ['action-set']
|
|
||||||
|
|
||||||
if isinstance(val, dict):
|
|
||||||
for k, v in iter(val.items()):
|
|
||||||
action_set('%s.%s' % (key, k), v)
|
|
||||||
return True
|
|
||||||
|
|
||||||
action_cmd.append('%s=%s' % (key, val))
|
|
||||||
subprocess.check_call(action_cmd)
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
class Benchmark():
|
|
||||||
"""
|
|
||||||
Helper class for the `benchmark` interface.
|
|
||||||
|
|
||||||
:param list actions: Define the actions that are also benchmarks
|
|
||||||
|
|
||||||
From inside the benchmark-relation-changed hook, you would
|
|
||||||
Benchmark(['memory', 'cpu', 'disk', 'smoke', 'custom'])
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
|
|
||||||
siege = Benchmark(['siege'])
|
|
||||||
siege.start()
|
|
||||||
[... run siege ...]
|
|
||||||
# The higher the score, the better the benchmark
|
|
||||||
siege.set_composite_score(16.70, 'trans/sec', 'desc')
|
|
||||||
siege.finish()
|
|
||||||
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
BENCHMARK_CONF = '/etc/benchmark.conf' # Replaced in testing
|
|
||||||
|
|
||||||
required_keys = [
|
|
||||||
'hostname',
|
|
||||||
'port',
|
|
||||||
'graphite_port',
|
|
||||||
'graphite_endpoint',
|
|
||||||
'api_port'
|
|
||||||
]
|
|
||||||
|
|
||||||
def __init__(self, benchmarks=None):
|
|
||||||
if in_relation_hook():
|
|
||||||
if benchmarks is not None:
|
|
||||||
for rid in sorted(relation_ids('benchmark')):
|
|
||||||
relation_set(relation_id=rid, relation_settings={
|
|
||||||
'benchmarks': ",".join(benchmarks)
|
|
||||||
})
|
|
||||||
|
|
||||||
# Check the relation data
|
|
||||||
config = {}
|
|
||||||
for key in self.required_keys:
|
|
||||||
val = relation_get(key)
|
|
||||||
if val is not None:
|
|
||||||
config[key] = val
|
|
||||||
else:
|
|
||||||
# We don't have all of the required keys
|
|
||||||
config = {}
|
|
||||||
break
|
|
||||||
|
|
||||||
if len(config):
|
|
||||||
with open(self.BENCHMARK_CONF, 'w') as f:
|
|
||||||
for key, val in iter(config.items()):
|
|
||||||
f.write("%s=%s\n" % (key, val))
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def start():
|
|
||||||
action_set('meta.start', time.strftime('%Y-%m-%dT%H:%M:%SZ'))
|
|
||||||
|
|
||||||
"""
|
|
||||||
If the collectd charm is also installed, tell it to send a snapshot
|
|
||||||
of the current profile data.
|
|
||||||
"""
|
|
||||||
COLLECT_PROFILE_DATA = '/usr/local/bin/collect-profile-data'
|
|
||||||
if os.path.exists(COLLECT_PROFILE_DATA):
|
|
||||||
subprocess.check_output([COLLECT_PROFILE_DATA])
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def finish():
|
|
||||||
action_set('meta.stop', time.strftime('%Y-%m-%dT%H:%M:%SZ'))
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def set_composite_score(value, units, direction='asc'):
|
|
||||||
"""
|
|
||||||
Set the composite score for a benchmark run. This is a single number
|
|
||||||
representative of the benchmark results. This could be the most
|
|
||||||
important metric, or an amalgamation of metric scores.
|
|
||||||
"""
|
|
||||||
return action_set(
|
|
||||||
"meta.composite",
|
|
||||||
{'value': value, 'units': units, 'direction': direction}
|
|
||||||
)
|
|
|
@ -1,208 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
# Copyright 2012 Canonical Ltd. This software is licensed under the
|
|
||||||
# GNU Affero General Public License version 3 (see the file LICENSE).
|
|
||||||
|
|
||||||
import warnings
|
|
||||||
warnings.warn("contrib.charmhelpers is deprecated", DeprecationWarning) # noqa
|
|
||||||
|
|
||||||
import operator
|
|
||||||
import tempfile
|
|
||||||
import time
|
|
||||||
import yaml
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
import six
|
|
||||||
if six.PY3:
|
|
||||||
from urllib.request import urlopen
|
|
||||||
from urllib.error import (HTTPError, URLError)
|
|
||||||
else:
|
|
||||||
from urllib2 import (urlopen, HTTPError, URLError)
|
|
||||||
|
|
||||||
"""Helper functions for writing Juju charms in Python."""
|
|
||||||
|
|
||||||
__metaclass__ = type
|
|
||||||
__all__ = [
|
|
||||||
# 'get_config', # core.hookenv.config()
|
|
||||||
# 'log', # core.hookenv.log()
|
|
||||||
# 'log_entry', # core.hookenv.log()
|
|
||||||
# 'log_exit', # core.hookenv.log()
|
|
||||||
# 'relation_get', # core.hookenv.relation_get()
|
|
||||||
# 'relation_set', # core.hookenv.relation_set()
|
|
||||||
# 'relation_ids', # core.hookenv.relation_ids()
|
|
||||||
# 'relation_list', # core.hookenv.relation_units()
|
|
||||||
# 'config_get', # core.hookenv.config()
|
|
||||||
# 'unit_get', # core.hookenv.unit_get()
|
|
||||||
# 'open_port', # core.hookenv.open_port()
|
|
||||||
# 'close_port', # core.hookenv.close_port()
|
|
||||||
# 'service_control', # core.host.service()
|
|
||||||
'unit_info', # client-side, NOT IMPLEMENTED
|
|
||||||
'wait_for_machine', # client-side, NOT IMPLEMENTED
|
|
||||||
'wait_for_page_contents', # client-side, NOT IMPLEMENTED
|
|
||||||
'wait_for_relation', # client-side, NOT IMPLEMENTED
|
|
||||||
'wait_for_unit', # client-side, NOT IMPLEMENTED
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
SLEEP_AMOUNT = 0.1
|
|
||||||
|
|
||||||
|
|
||||||
# We create a juju_status Command here because it makes testing much,
|
|
||||||
# much easier.
|
|
||||||
def juju_status():
|
|
||||||
subprocess.check_call(['juju', 'status'])
|
|
||||||
|
|
||||||
# re-implemented as charmhelpers.fetch.configure_sources()
|
|
||||||
# def configure_source(update=False):
|
|
||||||
# source = config_get('source')
|
|
||||||
# if ((source.startswith('ppa:') or
|
|
||||||
# source.startswith('cloud:') or
|
|
||||||
# source.startswith('http:'))):
|
|
||||||
# run('add-apt-repository', source)
|
|
||||||
# if source.startswith("http:"):
|
|
||||||
# run('apt-key', 'import', config_get('key'))
|
|
||||||
# if update:
|
|
||||||
# run('apt-get', 'update')
|
|
||||||
|
|
||||||
|
|
||||||
# DEPRECATED: client-side only
|
|
||||||
def make_charm_config_file(charm_config):
|
|
||||||
charm_config_file = tempfile.NamedTemporaryFile(mode='w+')
|
|
||||||
charm_config_file.write(yaml.dump(charm_config))
|
|
||||||
charm_config_file.flush()
|
|
||||||
# The NamedTemporaryFile instance is returned instead of just the name
|
|
||||||
# because we want to take advantage of garbage collection-triggered
|
|
||||||
# deletion of the temp file when it goes out of scope in the caller.
|
|
||||||
return charm_config_file
|
|
||||||
|
|
||||||
|
|
||||||
# DEPRECATED: client-side only
|
|
||||||
def unit_info(service_name, item_name, data=None, unit=None):
|
|
||||||
if data is None:
|
|
||||||
data = yaml.safe_load(juju_status())
|
|
||||||
service = data['services'].get(service_name)
|
|
||||||
if service is None:
|
|
||||||
# XXX 2012-02-08 gmb:
|
|
||||||
# This allows us to cope with the race condition that we
|
|
||||||
# have between deploying a service and having it come up in
|
|
||||||
# `juju status`. We could probably do with cleaning it up so
|
|
||||||
# that it fails a bit more noisily after a while.
|
|
||||||
return ''
|
|
||||||
units = service['units']
|
|
||||||
if unit is not None:
|
|
||||||
item = units[unit][item_name]
|
|
||||||
else:
|
|
||||||
# It might seem odd to sort the units here, but we do it to
|
|
||||||
# ensure that when no unit is specified, the first unit for the
|
|
||||||
# service (or at least the one with the lowest number) is the
|
|
||||||
# one whose data gets returned.
|
|
||||||
sorted_unit_names = sorted(units.keys())
|
|
||||||
item = units[sorted_unit_names[0]][item_name]
|
|
||||||
return item
|
|
||||||
|
|
||||||
|
|
||||||
# DEPRECATED: client-side only
|
|
||||||
def get_machine_data():
|
|
||||||
return yaml.safe_load(juju_status())['machines']
|
|
||||||
|
|
||||||
|
|
||||||
# DEPRECATED: client-side only
|
|
||||||
def wait_for_machine(num_machines=1, timeout=300):
|
|
||||||
"""Wait `timeout` seconds for `num_machines` machines to come up.
|
|
||||||
|
|
||||||
This wait_for... function can be called by other wait_for functions
|
|
||||||
whose timeouts might be too short in situations where only a bare
|
|
||||||
Juju setup has been bootstrapped.
|
|
||||||
|
|
||||||
:return: A tuple of (num_machines, time_taken). This is used for
|
|
||||||
testing.
|
|
||||||
"""
|
|
||||||
# You may think this is a hack, and you'd be right. The easiest way
|
|
||||||
# to tell what environment we're working in (LXC vs EC2) is to check
|
|
||||||
# the dns-name of the first machine. If it's localhost we're in LXC
|
|
||||||
# and we can just return here.
|
|
||||||
if get_machine_data()[0]['dns-name'] == 'localhost':
|
|
||||||
return 1, 0
|
|
||||||
start_time = time.time()
|
|
||||||
while True:
|
|
||||||
# Drop the first machine, since it's the Zookeeper and that's
|
|
||||||
# not a machine that we need to wait for. This will only work
|
|
||||||
# for EC2 environments, which is why we return early above if
|
|
||||||
# we're in LXC.
|
|
||||||
machine_data = get_machine_data()
|
|
||||||
non_zookeeper_machines = [
|
|
||||||
machine_data[key] for key in list(machine_data.keys())[1:]]
|
|
||||||
if len(non_zookeeper_machines) >= num_machines:
|
|
||||||
all_machines_running = True
|
|
||||||
for machine in non_zookeeper_machines:
|
|
||||||
if machine.get('instance-state') != 'running':
|
|
||||||
all_machines_running = False
|
|
||||||
break
|
|
||||||
if all_machines_running:
|
|
||||||
break
|
|
||||||
if time.time() - start_time >= timeout:
|
|
||||||
raise RuntimeError('timeout waiting for service to start')
|
|
||||||
time.sleep(SLEEP_AMOUNT)
|
|
||||||
return num_machines, time.time() - start_time
|
|
||||||
|
|
||||||
|
|
||||||
# DEPRECATED: client-side only
|
|
||||||
def wait_for_unit(service_name, timeout=480):
|
|
||||||
"""Wait `timeout` seconds for a given service name to come up."""
|
|
||||||
wait_for_machine(num_machines=1)
|
|
||||||
start_time = time.time()
|
|
||||||
while True:
|
|
||||||
state = unit_info(service_name, 'agent-state')
|
|
||||||
if 'error' in state or state == 'started':
|
|
||||||
break
|
|
||||||
if time.time() - start_time >= timeout:
|
|
||||||
raise RuntimeError('timeout waiting for service to start')
|
|
||||||
time.sleep(SLEEP_AMOUNT)
|
|
||||||
if state != 'started':
|
|
||||||
raise RuntimeError('unit did not start, agent-state: ' + state)
|
|
||||||
|
|
||||||
|
|
||||||
# DEPRECATED: client-side only
|
|
||||||
def wait_for_relation(service_name, relation_name, timeout=120):
|
|
||||||
"""Wait `timeout` seconds for a given relation to come up."""
|
|
||||||
start_time = time.time()
|
|
||||||
while True:
|
|
||||||
relation = unit_info(service_name, 'relations').get(relation_name)
|
|
||||||
if relation is not None and relation['state'] == 'up':
|
|
||||||
break
|
|
||||||
if time.time() - start_time >= timeout:
|
|
||||||
raise RuntimeError('timeout waiting for relation to be up')
|
|
||||||
time.sleep(SLEEP_AMOUNT)
|
|
||||||
|
|
||||||
|
|
||||||
# DEPRECATED: client-side only
|
|
||||||
def wait_for_page_contents(url, contents, timeout=120, validate=None):
|
|
||||||
if validate is None:
|
|
||||||
validate = operator.contains
|
|
||||||
start_time = time.time()
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
stream = urlopen(url)
|
|
||||||
except (HTTPError, URLError):
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
page = stream.read()
|
|
||||||
if validate(page, contents):
|
|
||||||
return page
|
|
||||||
if time.time() - start_time >= timeout:
|
|
||||||
raise RuntimeError('timeout waiting for contents of ' + url)
|
|
||||||
time.sleep(SLEEP_AMOUNT)
|
|
|
@ -1,15 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
|
@ -1,360 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
"""Compatibility with the nrpe-external-master charm"""
|
|
||||||
# Copyright 2012 Canonical Ltd.
|
|
||||||
#
|
|
||||||
# Authors:
|
|
||||||
# Matthew Wedgwood <matthew.wedgwood@canonical.com>
|
|
||||||
|
|
||||||
import subprocess
|
|
||||||
import pwd
|
|
||||||
import grp
|
|
||||||
import os
|
|
||||||
import glob
|
|
||||||
import shutil
|
|
||||||
import re
|
|
||||||
import shlex
|
|
||||||
import yaml
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
config,
|
|
||||||
local_unit,
|
|
||||||
log,
|
|
||||||
relation_ids,
|
|
||||||
relation_set,
|
|
||||||
relations_of_type,
|
|
||||||
)
|
|
||||||
|
|
||||||
from charmhelpers.core.host import service
|
|
||||||
|
|
||||||
# This module adds compatibility with the nrpe-external-master and plain nrpe
|
|
||||||
# subordinate charms. To use it in your charm:
|
|
||||||
#
|
|
||||||
# 1. Update metadata.yaml
|
|
||||||
#
|
|
||||||
# provides:
|
|
||||||
# (...)
|
|
||||||
# nrpe-external-master:
|
|
||||||
# interface: nrpe-external-master
|
|
||||||
# scope: container
|
|
||||||
#
|
|
||||||
# and/or
|
|
||||||
#
|
|
||||||
# provides:
|
|
||||||
# (...)
|
|
||||||
# local-monitors:
|
|
||||||
# interface: local-monitors
|
|
||||||
# scope: container
|
|
||||||
|
|
||||||
#
|
|
||||||
# 2. Add the following to config.yaml
|
|
||||||
#
|
|
||||||
# nagios_context:
|
|
||||||
# default: "juju"
|
|
||||||
# type: string
|
|
||||||
# description: |
|
|
||||||
# Used by the nrpe subordinate charms.
|
|
||||||
# A string that will be prepended to instance name to set the host name
|
|
||||||
# in nagios. So for instance the hostname would be something like:
|
|
||||||
# juju-myservice-0
|
|
||||||
# If you're running multiple environments with the same services in them
|
|
||||||
# this allows you to differentiate between them.
|
|
||||||
# nagios_servicegroups:
|
|
||||||
# default: ""
|
|
||||||
# type: string
|
|
||||||
# description: |
|
|
||||||
# A comma-separated list of nagios servicegroups.
|
|
||||||
# If left empty, the nagios_context will be used as the servicegroup
|
|
||||||
#
|
|
||||||
# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master
|
|
||||||
#
|
|
||||||
# 4. Update your hooks.py with something like this:
|
|
||||||
#
|
|
||||||
# from charmsupport.nrpe import NRPE
|
|
||||||
# (...)
|
|
||||||
# def update_nrpe_config():
|
|
||||||
# nrpe_compat = NRPE()
|
|
||||||
# nrpe_compat.add_check(
|
|
||||||
# shortname = "myservice",
|
|
||||||
# description = "Check MyService",
|
|
||||||
# check_cmd = "check_http -w 2 -c 10 http://localhost"
|
|
||||||
# )
|
|
||||||
# nrpe_compat.add_check(
|
|
||||||
# "myservice_other",
|
|
||||||
# "Check for widget failures",
|
|
||||||
# check_cmd = "/srv/myapp/scripts/widget_check"
|
|
||||||
# )
|
|
||||||
# nrpe_compat.write()
|
|
||||||
#
|
|
||||||
# def config_changed():
|
|
||||||
# (...)
|
|
||||||
# update_nrpe_config()
|
|
||||||
#
|
|
||||||
# def nrpe_external_master_relation_changed():
|
|
||||||
# update_nrpe_config()
|
|
||||||
#
|
|
||||||
# def local_monitors_relation_changed():
|
|
||||||
# update_nrpe_config()
|
|
||||||
#
|
|
||||||
# 5. ln -s hooks.py nrpe-external-master-relation-changed
|
|
||||||
# ln -s hooks.py local-monitors-relation-changed
|
|
||||||
|
|
||||||
|
|
||||||
class CheckException(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class Check(object):
|
|
||||||
shortname_re = '[A-Za-z0-9-_]+$'
|
|
||||||
service_template = ("""
|
|
||||||
#---------------------------------------------------
|
|
||||||
# This file is Juju managed
|
|
||||||
#---------------------------------------------------
|
|
||||||
define service {{
|
|
||||||
use active-service
|
|
||||||
host_name {nagios_hostname}
|
|
||||||
service_description {nagios_hostname}[{shortname}] """
|
|
||||||
"""{description}
|
|
||||||
check_command check_nrpe!{command}
|
|
||||||
servicegroups {nagios_servicegroup}
|
|
||||||
}}
|
|
||||||
""")
|
|
||||||
|
|
||||||
def __init__(self, shortname, description, check_cmd):
|
|
||||||
super(Check, self).__init__()
|
|
||||||
# XXX: could be better to calculate this from the service name
|
|
||||||
if not re.match(self.shortname_re, shortname):
|
|
||||||
raise CheckException("shortname must match {}".format(
|
|
||||||
Check.shortname_re))
|
|
||||||
self.shortname = shortname
|
|
||||||
self.command = "check_{}".format(shortname)
|
|
||||||
# Note: a set of invalid characters is defined by the
|
|
||||||
# Nagios server config
|
|
||||||
# The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()=
|
|
||||||
self.description = description
|
|
||||||
self.check_cmd = self._locate_cmd(check_cmd)
|
|
||||||
|
|
||||||
def _locate_cmd(self, check_cmd):
|
|
||||||
search_path = (
|
|
||||||
'/usr/lib/nagios/plugins',
|
|
||||||
'/usr/local/lib/nagios/plugins',
|
|
||||||
)
|
|
||||||
parts = shlex.split(check_cmd)
|
|
||||||
for path in search_path:
|
|
||||||
if os.path.exists(os.path.join(path, parts[0])):
|
|
||||||
command = os.path.join(path, parts[0])
|
|
||||||
if len(parts) > 1:
|
|
||||||
command += " " + " ".join(parts[1:])
|
|
||||||
return command
|
|
||||||
log('Check command not found: {}'.format(parts[0]))
|
|
||||||
return ''
|
|
||||||
|
|
||||||
def write(self, nagios_context, hostname, nagios_servicegroups):
|
|
||||||
nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format(
|
|
||||||
self.command)
|
|
||||||
with open(nrpe_check_file, 'w') as nrpe_check_config:
|
|
||||||
nrpe_check_config.write("# check {}\n".format(self.shortname))
|
|
||||||
nrpe_check_config.write("command[{}]={}\n".format(
|
|
||||||
self.command, self.check_cmd))
|
|
||||||
|
|
||||||
if not os.path.exists(NRPE.nagios_exportdir):
|
|
||||||
log('Not writing service config as {} is not accessible'.format(
|
|
||||||
NRPE.nagios_exportdir))
|
|
||||||
else:
|
|
||||||
self.write_service_config(nagios_context, hostname,
|
|
||||||
nagios_servicegroups)
|
|
||||||
|
|
||||||
def write_service_config(self, nagios_context, hostname,
|
|
||||||
nagios_servicegroups):
|
|
||||||
for f in os.listdir(NRPE.nagios_exportdir):
|
|
||||||
if re.search('.*{}.cfg'.format(self.command), f):
|
|
||||||
os.remove(os.path.join(NRPE.nagios_exportdir, f))
|
|
||||||
|
|
||||||
templ_vars = {
|
|
||||||
'nagios_hostname': hostname,
|
|
||||||
'nagios_servicegroup': nagios_servicegroups,
|
|
||||||
'description': self.description,
|
|
||||||
'shortname': self.shortname,
|
|
||||||
'command': self.command,
|
|
||||||
}
|
|
||||||
nrpe_service_text = Check.service_template.format(**templ_vars)
|
|
||||||
nrpe_service_file = '{}/service__{}_{}.cfg'.format(
|
|
||||||
NRPE.nagios_exportdir, hostname, self.command)
|
|
||||||
with open(nrpe_service_file, 'w') as nrpe_service_config:
|
|
||||||
nrpe_service_config.write(str(nrpe_service_text))
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
subprocess.call(self.check_cmd)
|
|
||||||
|
|
||||||
|
|
||||||
class NRPE(object):
|
|
||||||
nagios_logdir = '/var/log/nagios'
|
|
||||||
nagios_exportdir = '/var/lib/nagios/export'
|
|
||||||
nrpe_confdir = '/etc/nagios/nrpe.d'
|
|
||||||
|
|
||||||
def __init__(self, hostname=None):
|
|
||||||
super(NRPE, self).__init__()
|
|
||||||
self.config = config()
|
|
||||||
self.nagios_context = self.config['nagios_context']
|
|
||||||
if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']:
|
|
||||||
self.nagios_servicegroups = self.config['nagios_servicegroups']
|
|
||||||
else:
|
|
||||||
self.nagios_servicegroups = self.nagios_context
|
|
||||||
self.unit_name = local_unit().replace('/', '-')
|
|
||||||
if hostname:
|
|
||||||
self.hostname = hostname
|
|
||||||
else:
|
|
||||||
self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
|
|
||||||
self.checks = []
|
|
||||||
|
|
||||||
def add_check(self, *args, **kwargs):
|
|
||||||
self.checks.append(Check(*args, **kwargs))
|
|
||||||
|
|
||||||
def write(self):
|
|
||||||
try:
|
|
||||||
nagios_uid = pwd.getpwnam('nagios').pw_uid
|
|
||||||
nagios_gid = grp.getgrnam('nagios').gr_gid
|
|
||||||
except:
|
|
||||||
log("Nagios user not set up, nrpe checks not updated")
|
|
||||||
return
|
|
||||||
|
|
||||||
if not os.path.exists(NRPE.nagios_logdir):
|
|
||||||
os.mkdir(NRPE.nagios_logdir)
|
|
||||||
os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)
|
|
||||||
|
|
||||||
nrpe_monitors = {}
|
|
||||||
monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
|
|
||||||
for nrpecheck in self.checks:
|
|
||||||
nrpecheck.write(self.nagios_context, self.hostname,
|
|
||||||
self.nagios_servicegroups)
|
|
||||||
nrpe_monitors[nrpecheck.shortname] = {
|
|
||||||
"command": nrpecheck.command,
|
|
||||||
}
|
|
||||||
|
|
||||||
service('restart', 'nagios-nrpe-server')
|
|
||||||
|
|
||||||
monitor_ids = relation_ids("local-monitors") + \
|
|
||||||
relation_ids("nrpe-external-master")
|
|
||||||
for rid in monitor_ids:
|
|
||||||
relation_set(relation_id=rid, monitors=yaml.dump(monitors))
|
|
||||||
|
|
||||||
|
|
||||||
def get_nagios_hostcontext(relation_name='nrpe-external-master'):
|
|
||||||
"""
|
|
||||||
Query relation with nrpe subordinate, return the nagios_host_context
|
|
||||||
|
|
||||||
:param str relation_name: Name of relation nrpe sub joined to
|
|
||||||
"""
|
|
||||||
for rel in relations_of_type(relation_name):
|
|
||||||
if 'nagios_hostname' in rel:
|
|
||||||
return rel['nagios_host_context']
|
|
||||||
|
|
||||||
|
|
||||||
def get_nagios_hostname(relation_name='nrpe-external-master'):
|
|
||||||
"""
|
|
||||||
Query relation with nrpe subordinate, return the nagios_hostname
|
|
||||||
|
|
||||||
:param str relation_name: Name of relation nrpe sub joined to
|
|
||||||
"""
|
|
||||||
for rel in relations_of_type(relation_name):
|
|
||||||
if 'nagios_hostname' in rel:
|
|
||||||
return rel['nagios_hostname']
|
|
||||||
|
|
||||||
|
|
||||||
def get_nagios_unit_name(relation_name='nrpe-external-master'):
|
|
||||||
"""
|
|
||||||
Return the nagios unit name prepended with host_context if needed
|
|
||||||
|
|
||||||
:param str relation_name: Name of relation nrpe sub joined to
|
|
||||||
"""
|
|
||||||
host_context = get_nagios_hostcontext(relation_name)
|
|
||||||
if host_context:
|
|
||||||
unit = "%s:%s" % (host_context, local_unit())
|
|
||||||
else:
|
|
||||||
unit = local_unit()
|
|
||||||
return unit
|
|
||||||
|
|
||||||
|
|
||||||
def add_init_service_checks(nrpe, services, unit_name):
|
|
||||||
"""
|
|
||||||
Add checks for each service in list
|
|
||||||
|
|
||||||
:param NRPE nrpe: NRPE object to add check to
|
|
||||||
:param list services: List of services to check
|
|
||||||
:param str unit_name: Unit name to use in check description
|
|
||||||
"""
|
|
||||||
for svc in services:
|
|
||||||
upstart_init = '/etc/init/%s.conf' % svc
|
|
||||||
sysv_init = '/etc/init.d/%s' % svc
|
|
||||||
if os.path.exists(upstart_init):
|
|
||||||
nrpe.add_check(
|
|
||||||
shortname=svc,
|
|
||||||
description='process check {%s}' % unit_name,
|
|
||||||
check_cmd='check_upstart_job %s' % svc
|
|
||||||
)
|
|
||||||
elif os.path.exists(sysv_init):
|
|
||||||
cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
|
|
||||||
cron_file = ('*/5 * * * * root '
|
|
||||||
'/usr/local/lib/nagios/plugins/check_exit_status.pl '
|
|
||||||
'-s /etc/init.d/%s status > '
|
|
||||||
'/var/lib/nagios/service-check-%s.txt\n' % (svc,
|
|
||||||
svc)
|
|
||||||
)
|
|
||||||
f = open(cronpath, 'w')
|
|
||||||
f.write(cron_file)
|
|
||||||
f.close()
|
|
||||||
nrpe.add_check(
|
|
||||||
shortname=svc,
|
|
||||||
description='process check {%s}' % unit_name,
|
|
||||||
check_cmd='check_status_file.py -f '
|
|
||||||
'/var/lib/nagios/service-check-%s.txt' % svc,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def copy_nrpe_checks():
|
|
||||||
"""
|
|
||||||
Copy the nrpe checks into place
|
|
||||||
|
|
||||||
"""
|
|
||||||
NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
|
|
||||||
nrpe_files_dir = os.path.join(os.getenv('CHARM_DIR'), 'hooks',
|
|
||||||
'charmhelpers', 'contrib', 'openstack',
|
|
||||||
'files')
|
|
||||||
|
|
||||||
if not os.path.exists(NAGIOS_PLUGINS):
|
|
||||||
os.makedirs(NAGIOS_PLUGINS)
|
|
||||||
for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")):
|
|
||||||
if os.path.isfile(fname):
|
|
||||||
shutil.copy2(fname,
|
|
||||||
os.path.join(NAGIOS_PLUGINS, os.path.basename(fname)))
|
|
||||||
|
|
||||||
|
|
||||||
def add_haproxy_checks(nrpe, unit_name):
|
|
||||||
"""
|
|
||||||
Add checks for each service in list
|
|
||||||
|
|
||||||
:param NRPE nrpe: NRPE object to add check to
|
|
||||||
:param str unit_name: Unit name to use in check description
|
|
||||||
"""
|
|
||||||
nrpe.add_check(
|
|
||||||
shortname='haproxy_servers',
|
|
||||||
description='Check HAProxy {%s}' % unit_name,
|
|
||||||
check_cmd='check_haproxy.sh')
|
|
||||||
nrpe.add_check(
|
|
||||||
shortname='haproxy_queue',
|
|
||||||
description='Check HAProxy queue depth {%s}' % unit_name,
|
|
||||||
check_cmd='check_haproxy_queue_depth.sh')
|
|
|
@ -1,175 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
'''
|
|
||||||
Functions for managing volumes in juju units. One volume is supported per unit.
|
|
||||||
Subordinates may have their own storage, provided it is on its own partition.
|
|
||||||
|
|
||||||
Configuration stanzas::
|
|
||||||
|
|
||||||
volume-ephemeral:
|
|
||||||
type: boolean
|
|
||||||
default: true
|
|
||||||
description: >
|
|
||||||
If false, a volume is mounted as sepecified in "volume-map"
|
|
||||||
If true, ephemeral storage will be used, meaning that log data
|
|
||||||
will only exist as long as the machine. YOU HAVE BEEN WARNED.
|
|
||||||
volume-map:
|
|
||||||
type: string
|
|
||||||
default: {}
|
|
||||||
description: >
|
|
||||||
YAML map of units to device names, e.g:
|
|
||||||
"{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }"
|
|
||||||
Service units will raise a configure-error if volume-ephemeral
|
|
||||||
is 'true' and no volume-map value is set. Use 'juju set' to set a
|
|
||||||
value and 'juju resolved' to complete configuration.
|
|
||||||
|
|
||||||
Usage::
|
|
||||||
|
|
||||||
from charmsupport.volumes import configure_volume, VolumeConfigurationError
|
|
||||||
from charmsupport.hookenv import log, ERROR
|
|
||||||
def post_mount_hook():
|
|
||||||
stop_service('myservice')
|
|
||||||
def post_mount_hook():
|
|
||||||
start_service('myservice')
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
try:
|
|
||||||
configure_volume(before_change=pre_mount_hook,
|
|
||||||
after_change=post_mount_hook)
|
|
||||||
except VolumeConfigurationError:
|
|
||||||
log('Storage could not be configured', ERROR)
|
|
||||||
|
|
||||||
'''
|
|
||||||
|
|
||||||
# XXX: Known limitations
|
|
||||||
# - fstab is neither consulted nor updated
|
|
||||||
|
|
||||||
import os
|
|
||||||
from charmhelpers.core import hookenv
|
|
||||||
from charmhelpers.core import host
|
|
||||||
import yaml
|
|
||||||
|
|
||||||
|
|
||||||
MOUNT_BASE = '/srv/juju/volumes'
|
|
||||||
|
|
||||||
|
|
||||||
class VolumeConfigurationError(Exception):
|
|
||||||
'''Volume configuration data is missing or invalid'''
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def get_config():
|
|
||||||
'''Gather and sanity-check volume configuration data'''
|
|
||||||
volume_config = {}
|
|
||||||
config = hookenv.config()
|
|
||||||
|
|
||||||
errors = False
|
|
||||||
|
|
||||||
if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'):
|
|
||||||
volume_config['ephemeral'] = True
|
|
||||||
else:
|
|
||||||
volume_config['ephemeral'] = False
|
|
||||||
|
|
||||||
try:
|
|
||||||
volume_map = yaml.safe_load(config.get('volume-map', '{}'))
|
|
||||||
except yaml.YAMLError as e:
|
|
||||||
hookenv.log("Error parsing YAML volume-map: {}".format(e),
|
|
||||||
hookenv.ERROR)
|
|
||||||
errors = True
|
|
||||||
if volume_map is None:
|
|
||||||
# probably an empty string
|
|
||||||
volume_map = {}
|
|
||||||
elif not isinstance(volume_map, dict):
|
|
||||||
hookenv.log("Volume-map should be a dictionary, not {}".format(
|
|
||||||
type(volume_map)))
|
|
||||||
errors = True
|
|
||||||
|
|
||||||
volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME'])
|
|
||||||
if volume_config['device'] and volume_config['ephemeral']:
|
|
||||||
# asked for ephemeral storage but also defined a volume ID
|
|
||||||
hookenv.log('A volume is defined for this unit, but ephemeral '
|
|
||||||
'storage was requested', hookenv.ERROR)
|
|
||||||
errors = True
|
|
||||||
elif not volume_config['device'] and not volume_config['ephemeral']:
|
|
||||||
# asked for permanent storage but did not define volume ID
|
|
||||||
hookenv.log('Ephemeral storage was requested, but there is no volume '
|
|
||||||
'defined for this unit.', hookenv.ERROR)
|
|
||||||
errors = True
|
|
||||||
|
|
||||||
unit_mount_name = hookenv.local_unit().replace('/', '-')
|
|
||||||
volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name)
|
|
||||||
|
|
||||||
if errors:
|
|
||||||
return None
|
|
||||||
return volume_config
|
|
||||||
|
|
||||||
|
|
||||||
def mount_volume(config):
|
|
||||||
if os.path.exists(config['mountpoint']):
|
|
||||||
if not os.path.isdir(config['mountpoint']):
|
|
||||||
hookenv.log('Not a directory: {}'.format(config['mountpoint']))
|
|
||||||
raise VolumeConfigurationError()
|
|
||||||
else:
|
|
||||||
host.mkdir(config['mountpoint'])
|
|
||||||
if os.path.ismount(config['mountpoint']):
|
|
||||||
unmount_volume(config)
|
|
||||||
if not host.mount(config['device'], config['mountpoint'], persist=True):
|
|
||||||
raise VolumeConfigurationError()
|
|
||||||
|
|
||||||
|
|
||||||
def unmount_volume(config):
|
|
||||||
if os.path.ismount(config['mountpoint']):
|
|
||||||
if not host.umount(config['mountpoint'], persist=True):
|
|
||||||
raise VolumeConfigurationError()
|
|
||||||
|
|
||||||
|
|
||||||
def managed_mounts():
|
|
||||||
'''List of all mounted managed volumes'''
|
|
||||||
return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts())
|
|
||||||
|
|
||||||
|
|
||||||
def configure_volume(before_change=lambda: None, after_change=lambda: None):
|
|
||||||
'''Set up storage (or don't) according to the charm's volume configuration.
|
|
||||||
Returns the mount point or "ephemeral". before_change and after_change
|
|
||||||
are optional functions to be called if the volume configuration changes.
|
|
||||||
'''
|
|
||||||
|
|
||||||
config = get_config()
|
|
||||||
if not config:
|
|
||||||
hookenv.log('Failed to read volume configuration', hookenv.CRITICAL)
|
|
||||||
raise VolumeConfigurationError()
|
|
||||||
|
|
||||||
if config['ephemeral']:
|
|
||||||
if os.path.ismount(config['mountpoint']):
|
|
||||||
before_change()
|
|
||||||
unmount_volume(config)
|
|
||||||
after_change()
|
|
||||||
return 'ephemeral'
|
|
||||||
else:
|
|
||||||
# persistent storage
|
|
||||||
if os.path.ismount(config['mountpoint']):
|
|
||||||
mounts = dict(managed_mounts())
|
|
||||||
if mounts.get(config['mountpoint']) != config['device']:
|
|
||||||
before_change()
|
|
||||||
unmount_volume(config)
|
|
||||||
mount_volume(config)
|
|
||||||
after_change()
|
|
||||||
else:
|
|
||||||
before_change()
|
|
||||||
mount_volume(config)
|
|
||||||
after_change()
|
|
||||||
return config['mountpoint']
|
|
|
@ -1,412 +0,0 @@
|
||||||
"""Helper for working with a MySQL database"""
|
|
||||||
import json
|
|
||||||
import re
|
|
||||||
import sys
|
|
||||||
import platform
|
|
||||||
import os
|
|
||||||
import glob
|
|
||||||
|
|
||||||
# from string import upper
|
|
||||||
|
|
||||||
from charmhelpers.core.host import (
|
|
||||||
mkdir,
|
|
||||||
pwgen,
|
|
||||||
write_file
|
|
||||||
)
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
config as config_get,
|
|
||||||
relation_get,
|
|
||||||
related_units,
|
|
||||||
unit_get,
|
|
||||||
log,
|
|
||||||
DEBUG,
|
|
||||||
INFO,
|
|
||||||
WARNING,
|
|
||||||
)
|
|
||||||
from charmhelpers.fetch import (
|
|
||||||
apt_install,
|
|
||||||
apt_update,
|
|
||||||
filter_installed_packages,
|
|
||||||
)
|
|
||||||
from charmhelpers.contrib.peerstorage import (
|
|
||||||
peer_store,
|
|
||||||
peer_retrieve,
|
|
||||||
)
|
|
||||||
from charmhelpers.contrib.network.ip import get_host_ip
|
|
||||||
|
|
||||||
try:
|
|
||||||
import MySQLdb
|
|
||||||
except ImportError:
|
|
||||||
apt_update(fatal=True)
|
|
||||||
apt_install(filter_installed_packages(['python-mysqldb']), fatal=True)
|
|
||||||
import MySQLdb
|
|
||||||
|
|
||||||
|
|
||||||
class MySQLHelper(object):
|
|
||||||
|
|
||||||
def __init__(self, rpasswdf_template, upasswdf_template, host='localhost',
|
|
||||||
migrate_passwd_to_peer_relation=True,
|
|
||||||
delete_ondisk_passwd_file=True):
|
|
||||||
self.host = host
|
|
||||||
# Password file path templates
|
|
||||||
self.root_passwd_file_template = rpasswdf_template
|
|
||||||
self.user_passwd_file_template = upasswdf_template
|
|
||||||
|
|
||||||
self.migrate_passwd_to_peer_relation = migrate_passwd_to_peer_relation
|
|
||||||
# If we migrate we have the option to delete local copy of root passwd
|
|
||||||
self.delete_ondisk_passwd_file = delete_ondisk_passwd_file
|
|
||||||
|
|
||||||
def connect(self, user='root', password=None):
|
|
||||||
log("Opening db connection for %s@%s" % (user, self.host), level=DEBUG)
|
|
||||||
self.connection = MySQLdb.connect(user=user, host=self.host,
|
|
||||||
passwd=password)
|
|
||||||
|
|
||||||
def database_exists(self, db_name):
|
|
||||||
cursor = self.connection.cursor()
|
|
||||||
try:
|
|
||||||
cursor.execute("SHOW DATABASES")
|
|
||||||
databases = [i[0] for i in cursor.fetchall()]
|
|
||||||
finally:
|
|
||||||
cursor.close()
|
|
||||||
|
|
||||||
return db_name in databases
|
|
||||||
|
|
||||||
def create_database(self, db_name):
|
|
||||||
cursor = self.connection.cursor()
|
|
||||||
try:
|
|
||||||
cursor.execute("CREATE DATABASE {} CHARACTER SET UTF8"
|
|
||||||
.format(db_name))
|
|
||||||
finally:
|
|
||||||
cursor.close()
|
|
||||||
|
|
||||||
def grant_exists(self, db_name, db_user, remote_ip):
|
|
||||||
cursor = self.connection.cursor()
|
|
||||||
priv_string = "GRANT ALL PRIVILEGES ON `{}`.* " \
|
|
||||||
"TO '{}'@'{}'".format(db_name, db_user, remote_ip)
|
|
||||||
try:
|
|
||||||
cursor.execute("SHOW GRANTS for '{}'@'{}'".format(db_user,
|
|
||||||
remote_ip))
|
|
||||||
grants = [i[0] for i in cursor.fetchall()]
|
|
||||||
except MySQLdb.OperationalError:
|
|
||||||
return False
|
|
||||||
finally:
|
|
||||||
cursor.close()
|
|
||||||
|
|
||||||
# TODO: review for different grants
|
|
||||||
return priv_string in grants
|
|
||||||
|
|
||||||
def create_grant(self, db_name, db_user, remote_ip, password):
|
|
||||||
cursor = self.connection.cursor()
|
|
||||||
try:
|
|
||||||
# TODO: review for different grants
|
|
||||||
cursor.execute("GRANT ALL PRIVILEGES ON {}.* TO '{}'@'{}' "
|
|
||||||
"IDENTIFIED BY '{}'".format(db_name,
|
|
||||||
db_user,
|
|
||||||
remote_ip,
|
|
||||||
password))
|
|
||||||
finally:
|
|
||||||
cursor.close()
|
|
||||||
|
|
||||||
def create_admin_grant(self, db_user, remote_ip, password):
|
|
||||||
cursor = self.connection.cursor()
|
|
||||||
try:
|
|
||||||
cursor.execute("GRANT ALL PRIVILEGES ON *.* TO '{}'@'{}' "
|
|
||||||
"IDENTIFIED BY '{}'".format(db_user,
|
|
||||||
remote_ip,
|
|
||||||
password))
|
|
||||||
finally:
|
|
||||||
cursor.close()
|
|
||||||
|
|
||||||
def cleanup_grant(self, db_user, remote_ip):
|
|
||||||
cursor = self.connection.cursor()
|
|
||||||
try:
|
|
||||||
cursor.execute("DROP FROM mysql.user WHERE user='{}' "
|
|
||||||
"AND HOST='{}'".format(db_user,
|
|
||||||
remote_ip))
|
|
||||||
finally:
|
|
||||||
cursor.close()
|
|
||||||
|
|
||||||
def execute(self, sql):
|
|
||||||
"""Execute arbitary SQL against the database."""
|
|
||||||
cursor = self.connection.cursor()
|
|
||||||
try:
|
|
||||||
cursor.execute(sql)
|
|
||||||
finally:
|
|
||||||
cursor.close()
|
|
||||||
|
|
||||||
def migrate_passwords_to_peer_relation(self, excludes=None):
|
|
||||||
"""Migrate any passwords storage on disk to cluster peer relation."""
|
|
||||||
dirname = os.path.dirname(self.root_passwd_file_template)
|
|
||||||
path = os.path.join(dirname, '*.passwd')
|
|
||||||
for f in glob.glob(path):
|
|
||||||
if excludes and f in excludes:
|
|
||||||
log("Excluding %s from peer migration" % (f), level=DEBUG)
|
|
||||||
continue
|
|
||||||
|
|
||||||
key = os.path.basename(f)
|
|
||||||
with open(f, 'r') as passwd:
|
|
||||||
_value = passwd.read().strip()
|
|
||||||
|
|
||||||
try:
|
|
||||||
peer_store(key, _value)
|
|
||||||
|
|
||||||
if self.delete_ondisk_passwd_file:
|
|
||||||
os.unlink(f)
|
|
||||||
except ValueError:
|
|
||||||
# NOTE cluster relation not yet ready - skip for now
|
|
||||||
pass
|
|
||||||
|
|
||||||
def get_mysql_password_on_disk(self, username=None, password=None):
|
|
||||||
"""Retrieve, generate or store a mysql password for the provided
|
|
||||||
username on disk."""
|
|
||||||
if username:
|
|
||||||
template = self.user_passwd_file_template
|
|
||||||
passwd_file = template.format(username)
|
|
||||||
else:
|
|
||||||
passwd_file = self.root_passwd_file_template
|
|
||||||
|
|
||||||
_password = None
|
|
||||||
if os.path.exists(passwd_file):
|
|
||||||
log("Using existing password file '%s'" % passwd_file, level=DEBUG)
|
|
||||||
with open(passwd_file, 'r') as passwd:
|
|
||||||
_password = passwd.read().strip()
|
|
||||||
else:
|
|
||||||
log("Generating new password file '%s'" % passwd_file, level=DEBUG)
|
|
||||||
if not os.path.isdir(os.path.dirname(passwd_file)):
|
|
||||||
# NOTE: need to ensure this is not mysql root dir (which needs
|
|
||||||
# to be mysql readable)
|
|
||||||
mkdir(os.path.dirname(passwd_file), owner='root', group='root',
|
|
||||||
perms=0o770)
|
|
||||||
# Force permissions - for some reason the chmod in makedirs
|
|
||||||
# fails
|
|
||||||
os.chmod(os.path.dirname(passwd_file), 0o770)
|
|
||||||
|
|
||||||
_password = password or pwgen(length=32)
|
|
||||||
write_file(passwd_file, _password, owner='root', group='root',
|
|
||||||
perms=0o660)
|
|
||||||
|
|
||||||
return _password
|
|
||||||
|
|
||||||
def passwd_keys(self, username):
|
|
||||||
"""Generator to return keys used to store passwords in peer store.
|
|
||||||
|
|
||||||
NOTE: we support both legacy and new format to support mysql
|
|
||||||
charm prior to refactor. This is necessary to avoid LP 1451890.
|
|
||||||
"""
|
|
||||||
keys = []
|
|
||||||
if username == 'mysql':
|
|
||||||
log("Bad username '%s'" % (username), level=WARNING)
|
|
||||||
|
|
||||||
if username:
|
|
||||||
# IMPORTANT: *newer* format must be returned first
|
|
||||||
keys.append('mysql-%s.passwd' % (username))
|
|
||||||
keys.append('%s.passwd' % (username))
|
|
||||||
else:
|
|
||||||
keys.append('mysql.passwd')
|
|
||||||
|
|
||||||
for key in keys:
|
|
||||||
yield key
|
|
||||||
|
|
||||||
def get_mysql_password(self, username=None, password=None):
|
|
||||||
"""Retrieve, generate or store a mysql password for the provided
|
|
||||||
username using peer relation cluster."""
|
|
||||||
excludes = []
|
|
||||||
|
|
||||||
# First check peer relation.
|
|
||||||
try:
|
|
||||||
for key in self.passwd_keys(username):
|
|
||||||
_password = peer_retrieve(key)
|
|
||||||
if _password:
|
|
||||||
break
|
|
||||||
|
|
||||||
# If root password available don't update peer relation from local
|
|
||||||
if _password and not username:
|
|
||||||
excludes.append(self.root_passwd_file_template)
|
|
||||||
|
|
||||||
except ValueError:
|
|
||||||
# cluster relation is not yet started; use on-disk
|
|
||||||
_password = None
|
|
||||||
|
|
||||||
# If none available, generate new one
|
|
||||||
if not _password:
|
|
||||||
_password = self.get_mysql_password_on_disk(username, password)
|
|
||||||
|
|
||||||
# Put on wire if required
|
|
||||||
if self.migrate_passwd_to_peer_relation:
|
|
||||||
self.migrate_passwords_to_peer_relation(excludes=excludes)
|
|
||||||
|
|
||||||
return _password
|
|
||||||
|
|
||||||
def get_mysql_root_password(self, password=None):
|
|
||||||
"""Retrieve or generate mysql root password for service units."""
|
|
||||||
return self.get_mysql_password(username=None, password=password)
|
|
||||||
|
|
||||||
def normalize_address(self, hostname):
|
|
||||||
"""Ensure that address returned is an IP address (i.e. not fqdn)"""
|
|
||||||
if config_get('prefer-ipv6'):
|
|
||||||
# TODO: add support for ipv6 dns
|
|
||||||
return hostname
|
|
||||||
|
|
||||||
if hostname != unit_get('private-address'):
|
|
||||||
return get_host_ip(hostname, fallback=hostname)
|
|
||||||
|
|
||||||
# Otherwise assume localhost
|
|
||||||
return '127.0.0.1'
|
|
||||||
|
|
||||||
def get_allowed_units(self, database, username, relation_id=None):
|
|
||||||
"""Get list of units with access grants for database with username.
|
|
||||||
|
|
||||||
This is typically used to provide shared-db relations with a list of
|
|
||||||
which units have been granted access to the given database.
|
|
||||||
"""
|
|
||||||
self.connect(password=self.get_mysql_root_password())
|
|
||||||
allowed_units = set()
|
|
||||||
for unit in related_units(relation_id):
|
|
||||||
settings = relation_get(rid=relation_id, unit=unit)
|
|
||||||
# First check for setting with prefix, then without
|
|
||||||
for attr in ["%s_hostname" % (database), 'hostname']:
|
|
||||||
hosts = settings.get(attr, None)
|
|
||||||
if hosts:
|
|
||||||
break
|
|
||||||
|
|
||||||
if hosts:
|
|
||||||
# hostname can be json-encoded list of hostnames
|
|
||||||
try:
|
|
||||||
hosts = json.loads(hosts)
|
|
||||||
except ValueError:
|
|
||||||
hosts = [hosts]
|
|
||||||
else:
|
|
||||||
hosts = [settings['private-address']]
|
|
||||||
|
|
||||||
if hosts:
|
|
||||||
for host in hosts:
|
|
||||||
host = self.normalize_address(host)
|
|
||||||
if self.grant_exists(database, username, host):
|
|
||||||
log("Grant exists for host '%s' on db '%s'" %
|
|
||||||
(host, database), level=DEBUG)
|
|
||||||
if unit not in allowed_units:
|
|
||||||
allowed_units.add(unit)
|
|
||||||
else:
|
|
||||||
log("Grant does NOT exist for host '%s' on db '%s'" %
|
|
||||||
(host, database), level=DEBUG)
|
|
||||||
else:
|
|
||||||
log("No hosts found for grant check", level=INFO)
|
|
||||||
|
|
||||||
return allowed_units
|
|
||||||
|
|
||||||
def configure_db(self, hostname, database, username, admin=False):
|
|
||||||
"""Configure access to database for username from hostname."""
|
|
||||||
self.connect(password=self.get_mysql_root_password())
|
|
||||||
if not self.database_exists(database):
|
|
||||||
self.create_database(database)
|
|
||||||
|
|
||||||
remote_ip = self.normalize_address(hostname)
|
|
||||||
password = self.get_mysql_password(username)
|
|
||||||
if not self.grant_exists(database, username, remote_ip):
|
|
||||||
if not admin:
|
|
||||||
self.create_grant(database, username, remote_ip, password)
|
|
||||||
else:
|
|
||||||
self.create_admin_grant(username, remote_ip, password)
|
|
||||||
|
|
||||||
return password
|
|
||||||
|
|
||||||
|
|
||||||
class PerconaClusterHelper(object):
|
|
||||||
|
|
||||||
# Going for the biggest page size to avoid wasted bytes.
|
|
||||||
# InnoDB page size is 16MB
|
|
||||||
|
|
||||||
DEFAULT_PAGE_SIZE = 16 * 1024 * 1024
|
|
||||||
DEFAULT_INNODB_BUFFER_FACTOR = 0.50
|
|
||||||
|
|
||||||
def human_to_bytes(self, human):
|
|
||||||
"""Convert human readable configuration options to bytes."""
|
|
||||||
num_re = re.compile('^[0-9]+$')
|
|
||||||
if num_re.match(human):
|
|
||||||
return human
|
|
||||||
|
|
||||||
factors = {
|
|
||||||
'K': 1024,
|
|
||||||
'M': 1048576,
|
|
||||||
'G': 1073741824,
|
|
||||||
'T': 1099511627776
|
|
||||||
}
|
|
||||||
modifier = human[-1]
|
|
||||||
if modifier in factors:
|
|
||||||
return int(human[:-1]) * factors[modifier]
|
|
||||||
|
|
||||||
if modifier == '%':
|
|
||||||
total_ram = self.human_to_bytes(self.get_mem_total())
|
|
||||||
if self.is_32bit_system() and total_ram > self.sys_mem_limit():
|
|
||||||
total_ram = self.sys_mem_limit()
|
|
||||||
factor = int(human[:-1]) * 0.01
|
|
||||||
pctram = total_ram * factor
|
|
||||||
return int(pctram - (pctram % self.DEFAULT_PAGE_SIZE))
|
|
||||||
|
|
||||||
raise ValueError("Can only convert K,M,G, or T")
|
|
||||||
|
|
||||||
def is_32bit_system(self):
|
|
||||||
"""Determine whether system is 32 or 64 bit."""
|
|
||||||
try:
|
|
||||||
return sys.maxsize < 2 ** 32
|
|
||||||
except OverflowError:
|
|
||||||
return False
|
|
||||||
|
|
||||||
def sys_mem_limit(self):
|
|
||||||
"""Determine the default memory limit for the current service unit."""
|
|
||||||
if platform.machine() in ['armv7l']:
|
|
||||||
_mem_limit = self.human_to_bytes('2700M') # experimentally determined
|
|
||||||
else:
|
|
||||||
# Limit for x86 based 32bit systems
|
|
||||||
_mem_limit = self.human_to_bytes('4G')
|
|
||||||
|
|
||||||
return _mem_limit
|
|
||||||
|
|
||||||
def get_mem_total(self):
|
|
||||||
"""Calculate the total memory in the current service unit."""
|
|
||||||
with open('/proc/meminfo') as meminfo_file:
|
|
||||||
for line in meminfo_file:
|
|
||||||
key, mem = line.split(':', 2)
|
|
||||||
if key == 'MemTotal':
|
|
||||||
mtot, modifier = mem.strip().split(' ')
|
|
||||||
return '%s%s' % (mtot, modifier[0].upper())
|
|
||||||
|
|
||||||
def parse_config(self):
|
|
||||||
"""Parse charm configuration and calculate values for config files."""
|
|
||||||
config = config_get()
|
|
||||||
mysql_config = {}
|
|
||||||
if 'max-connections' in config:
|
|
||||||
mysql_config['max_connections'] = config['max-connections']
|
|
||||||
|
|
||||||
if 'wait-timeout' in config:
|
|
||||||
mysql_config['wait_timeout'] = config['wait-timeout']
|
|
||||||
|
|
||||||
if 'innodb-flush-log-at-trx-commit' in config:
|
|
||||||
mysql_config['innodb_flush_log_at_trx_commit'] = config['innodb-flush-log-at-trx-commit']
|
|
||||||
|
|
||||||
# Set a sane default key_buffer size
|
|
||||||
mysql_config['key_buffer'] = self.human_to_bytes('32M')
|
|
||||||
total_memory = self.human_to_bytes(self.get_mem_total())
|
|
||||||
|
|
||||||
dataset_bytes = config.get('dataset-size', None)
|
|
||||||
innodb_buffer_pool_size = config.get('innodb-buffer-pool-size', None)
|
|
||||||
|
|
||||||
if innodb_buffer_pool_size:
|
|
||||||
innodb_buffer_pool_size = self.human_to_bytes(
|
|
||||||
innodb_buffer_pool_size)
|
|
||||||
elif dataset_bytes:
|
|
||||||
log("Option 'dataset-size' has been deprecated, please use"
|
|
||||||
"innodb_buffer_pool_size option instead", level="WARN")
|
|
||||||
innodb_buffer_pool_size = self.human_to_bytes(
|
|
||||||
dataset_bytes)
|
|
||||||
else:
|
|
||||||
innodb_buffer_pool_size = int(
|
|
||||||
total_memory * self.DEFAULT_INNODB_BUFFER_FACTOR)
|
|
||||||
|
|
||||||
if innodb_buffer_pool_size > total_memory:
|
|
||||||
log("innodb_buffer_pool_size; {} is greater than system available memory:{}".format(
|
|
||||||
innodb_buffer_pool_size,
|
|
||||||
total_memory), level='WARN')
|
|
||||||
|
|
||||||
mysql_config['innodb_buffer_pool_size'] = innodb_buffer_pool_size
|
|
||||||
return mysql_config
|
|
|
@ -23,7 +23,7 @@ import socket
|
||||||
from functools import partial
|
from functools import partial
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import unit_get
|
from charmhelpers.core.hookenv import unit_get
|
||||||
from charmhelpers.fetch import apt_install
|
from charmhelpers.fetch import apt_install, apt_update
|
||||||
from charmhelpers.core.hookenv import (
|
from charmhelpers.core.hookenv import (
|
||||||
log,
|
log,
|
||||||
WARNING,
|
WARNING,
|
||||||
|
@ -32,13 +32,15 @@ from charmhelpers.core.hookenv import (
|
||||||
try:
|
try:
|
||||||
import netifaces
|
import netifaces
|
||||||
except ImportError:
|
except ImportError:
|
||||||
apt_install('python-netifaces')
|
apt_update(fatal=True)
|
||||||
|
apt_install('python-netifaces', fatal=True)
|
||||||
import netifaces
|
import netifaces
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import netaddr
|
import netaddr
|
||||||
except ImportError:
|
except ImportError:
|
||||||
apt_install('python-netaddr')
|
apt_update(fatal=True)
|
||||||
|
apt_install('python-netaddr', fatal=True)
|
||||||
import netaddr
|
import netaddr
|
||||||
|
|
||||||
|
|
||||||
|
@ -51,7 +53,7 @@ def _validate_cidr(network):
|
||||||
|
|
||||||
|
|
||||||
def no_ip_found_error_out(network):
|
def no_ip_found_error_out(network):
|
||||||
errmsg = ("No IP address found in network: %s" % network)
|
errmsg = ("No IP address found in network(s): %s" % network)
|
||||||
raise ValueError(errmsg)
|
raise ValueError(errmsg)
|
||||||
|
|
||||||
|
|
||||||
|
@ -59,7 +61,7 @@ def get_address_in_network(network, fallback=None, fatal=False):
|
||||||
"""Get an IPv4 or IPv6 address within the network from the host.
|
"""Get an IPv4 or IPv6 address within the network from the host.
|
||||||
|
|
||||||
:param network (str): CIDR presentation format. For example,
|
:param network (str): CIDR presentation format. For example,
|
||||||
'192.168.1.0/24'.
|
'192.168.1.0/24'. Supports multiple networks as a space-delimited list.
|
||||||
:param fallback (str): If no address is found, return fallback.
|
:param fallback (str): If no address is found, return fallback.
|
||||||
:param fatal (boolean): If no address is found, fallback is not
|
:param fatal (boolean): If no address is found, fallback is not
|
||||||
set and fatal is True then exit(1).
|
set and fatal is True then exit(1).
|
||||||
|
@ -73,6 +75,8 @@ def get_address_in_network(network, fallback=None, fatal=False):
|
||||||
else:
|
else:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
networks = network.split() or [network]
|
||||||
|
for network in networks:
|
||||||
_validate_cidr(network)
|
_validate_cidr(network)
|
||||||
network = netaddr.IPNetwork(network)
|
network = netaddr.IPNetwork(network)
|
||||||
for iface in netifaces.interfaces():
|
for iface in netifaces.interfaces():
|
||||||
|
@ -187,6 +191,15 @@ get_iface_for_address = partial(_get_for_address, key='iface')
|
||||||
get_netmask_for_address = partial(_get_for_address, key='netmask')
|
get_netmask_for_address = partial(_get_for_address, key='netmask')
|
||||||
|
|
||||||
|
|
||||||
|
def resolve_network_cidr(ip_address):
|
||||||
|
'''
|
||||||
|
Resolves the full address cidr of an ip_address based on
|
||||||
|
configured network interfaces
|
||||||
|
'''
|
||||||
|
netmask = get_netmask_for_address(ip_address)
|
||||||
|
return str(netaddr.IPNetwork("%s/%s" % (ip_address, netmask)).cidr)
|
||||||
|
|
||||||
|
|
||||||
def format_ipv6_addr(address):
|
def format_ipv6_addr(address):
|
||||||
"""If address is IPv6, wrap it in '[]' otherwise return None.
|
"""If address is IPv6, wrap it in '[]' otherwise return None.
|
||||||
|
|
||||||
|
@ -435,7 +448,11 @@ def get_hostname(address, fqdn=True):
|
||||||
|
|
||||||
rev = dns.reversename.from_address(address)
|
rev = dns.reversename.from_address(address)
|
||||||
result = ns_query(rev)
|
result = ns_query(rev)
|
||||||
|
|
||||||
if not result:
|
if not result:
|
||||||
|
try:
|
||||||
|
result = socket.gethostbyaddr(address)[0]
|
||||||
|
except:
|
||||||
return None
|
return None
|
||||||
else:
|
else:
|
||||||
result = address
|
result = address
|
||||||
|
@ -448,3 +465,18 @@ def get_hostname(address, fqdn=True):
|
||||||
return result
|
return result
|
||||||
else:
|
else:
|
||||||
return result.split('.')[0]
|
return result.split('.')[0]
|
||||||
|
|
||||||
|
|
||||||
|
def port_has_listener(address, port):
|
||||||
|
"""
|
||||||
|
Returns True if the address:port is open and being listened to,
|
||||||
|
else False.
|
||||||
|
|
||||||
|
@param address: an IP address or hostname
|
||||||
|
@param port: integer port
|
||||||
|
|
||||||
|
Note calls 'zc' via a subprocess shell
|
||||||
|
"""
|
||||||
|
cmd = ['nc', '-z', address, str(port)]
|
||||||
|
result = subprocess.call(cmd)
|
||||||
|
return not(bool(result))
|
||||||
|
|
|
@ -25,10 +25,14 @@ from charmhelpers.core.host import (
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def add_bridge(name):
|
def add_bridge(name, datapath_type=None):
|
||||||
''' Add the named bridge to openvswitch '''
|
''' Add the named bridge to openvswitch '''
|
||||||
log('Creating bridge {}'.format(name))
|
log('Creating bridge {}'.format(name))
|
||||||
subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-br", name])
|
cmd = ["ovs-vsctl", "--", "--may-exist", "add-br", name]
|
||||||
|
if datapath_type is not None:
|
||||||
|
cmd += ['--', 'set', 'bridge', name,
|
||||||
|
'datapath_type={}'.format(datapath_type)]
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
def del_bridge(name):
|
def del_bridge(name):
|
||||||
|
|
|
@ -40,7 +40,9 @@ Examples:
|
||||||
import re
|
import re
|
||||||
import os
|
import os
|
||||||
import subprocess
|
import subprocess
|
||||||
|
|
||||||
from charmhelpers.core import hookenv
|
from charmhelpers.core import hookenv
|
||||||
|
from charmhelpers.core.kernel import modprobe, is_module_loaded
|
||||||
|
|
||||||
__author__ = "Felipe Reyes <felipe.reyes@canonical.com>"
|
__author__ = "Felipe Reyes <felipe.reyes@canonical.com>"
|
||||||
|
|
||||||
|
@ -82,14 +84,11 @@ def is_ipv6_ok(soft_fail=False):
|
||||||
# do we have IPv6 in the machine?
|
# do we have IPv6 in the machine?
|
||||||
if os.path.isdir('/proc/sys/net/ipv6'):
|
if os.path.isdir('/proc/sys/net/ipv6'):
|
||||||
# is ip6tables kernel module loaded?
|
# is ip6tables kernel module loaded?
|
||||||
lsmod = subprocess.check_output(['lsmod'], universal_newlines=True)
|
if not is_module_loaded('ip6_tables'):
|
||||||
matches = re.findall('^ip6_tables[ ]+', lsmod, re.M)
|
|
||||||
if len(matches) == 0:
|
|
||||||
# ip6tables support isn't complete, let's try to load it
|
# ip6tables support isn't complete, let's try to load it
|
||||||
try:
|
try:
|
||||||
subprocess.check_output(['modprobe', 'ip6_tables'],
|
modprobe('ip6_tables')
|
||||||
universal_newlines=True)
|
# great, we can load the module
|
||||||
# great, we could load the module
|
|
||||||
return True
|
return True
|
||||||
except subprocess.CalledProcessError as ex:
|
except subprocess.CalledProcessError as ex:
|
||||||
hookenv.log("Couldn't load ip6_tables module: %s" % ex.output,
|
hookenv.log("Couldn't load ip6_tables module: %s" % ex.output,
|
||||||
|
|
|
@ -14,12 +14,18 @@
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
import six
|
import six
|
||||||
from collections import OrderedDict
|
from collections import OrderedDict
|
||||||
from charmhelpers.contrib.amulet.deployment import (
|
from charmhelpers.contrib.amulet.deployment import (
|
||||||
AmuletDeployment
|
AmuletDeployment
|
||||||
)
|
)
|
||||||
|
|
||||||
|
DEBUG = logging.DEBUG
|
||||||
|
ERROR = logging.ERROR
|
||||||
|
|
||||||
|
|
||||||
class OpenStackAmuletDeployment(AmuletDeployment):
|
class OpenStackAmuletDeployment(AmuletDeployment):
|
||||||
"""OpenStack amulet deployment.
|
"""OpenStack amulet deployment.
|
||||||
|
@ -28,9 +34,12 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
||||||
that is specifically for use by OpenStack charms.
|
that is specifically for use by OpenStack charms.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, series=None, openstack=None, source=None, stable=True):
|
def __init__(self, series=None, openstack=None, source=None,
|
||||||
|
stable=True, log_level=DEBUG):
|
||||||
"""Initialize the deployment environment."""
|
"""Initialize the deployment environment."""
|
||||||
super(OpenStackAmuletDeployment, self).__init__(series)
|
super(OpenStackAmuletDeployment, self).__init__(series)
|
||||||
|
self.log = self.get_logger(level=log_level)
|
||||||
|
self.log.info('OpenStackAmuletDeployment: init')
|
||||||
self.openstack = openstack
|
self.openstack = openstack
|
||||||
self.source = source
|
self.source = source
|
||||||
self.stable = stable
|
self.stable = stable
|
||||||
|
@ -38,26 +47,55 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
||||||
# out.
|
# out.
|
||||||
self.current_next = "trusty"
|
self.current_next = "trusty"
|
||||||
|
|
||||||
|
def get_logger(self, name="deployment-logger", level=logging.DEBUG):
|
||||||
|
"""Get a logger object that will log to stdout."""
|
||||||
|
log = logging
|
||||||
|
logger = log.getLogger(name)
|
||||||
|
fmt = log.Formatter("%(asctime)s %(funcName)s "
|
||||||
|
"%(levelname)s: %(message)s")
|
||||||
|
|
||||||
|
handler = log.StreamHandler(stream=sys.stdout)
|
||||||
|
handler.setLevel(level)
|
||||||
|
handler.setFormatter(fmt)
|
||||||
|
|
||||||
|
logger.addHandler(handler)
|
||||||
|
logger.setLevel(level)
|
||||||
|
|
||||||
|
return logger
|
||||||
|
|
||||||
def _determine_branch_locations(self, other_services):
|
def _determine_branch_locations(self, other_services):
|
||||||
"""Determine the branch locations for the other services.
|
"""Determine the branch locations for the other services.
|
||||||
|
|
||||||
Determine if the local branch being tested is derived from its
|
Determine if the local branch being tested is derived from its
|
||||||
stable or next (dev) branch, and based on this, use the corresonding
|
stable or next (dev) branch, and based on this, use the corresonding
|
||||||
stable or next branches for the other_services."""
|
stable or next branches for the other_services."""
|
||||||
base_charms = ['mysql', 'mongodb']
|
|
||||||
|
self.log.info('OpenStackAmuletDeployment: determine branch locations')
|
||||||
|
|
||||||
|
# Charms outside the lp:~openstack-charmers namespace
|
||||||
|
base_charms = ['mysql', 'mongodb', 'nrpe']
|
||||||
|
|
||||||
|
# Force these charms to current series even when using an older series.
|
||||||
|
# ie. Use trusty/nrpe even when series is precise, as the P charm
|
||||||
|
# does not possess the necessary external master config and hooks.
|
||||||
|
force_series_current = ['nrpe']
|
||||||
|
|
||||||
if self.series in ['precise', 'trusty']:
|
if self.series in ['precise', 'trusty']:
|
||||||
base_series = self.series
|
base_series = self.series
|
||||||
else:
|
else:
|
||||||
base_series = self.current_next
|
base_series = self.current_next
|
||||||
|
|
||||||
if self.stable:
|
|
||||||
for svc in other_services:
|
for svc in other_services:
|
||||||
|
if svc['name'] in force_series_current:
|
||||||
|
base_series = self.current_next
|
||||||
|
# If a location has been explicitly set, use it
|
||||||
|
if svc.get('location'):
|
||||||
|
continue
|
||||||
|
if self.stable:
|
||||||
temp = 'lp:charms/{}/{}'
|
temp = 'lp:charms/{}/{}'
|
||||||
svc['location'] = temp.format(base_series,
|
svc['location'] = temp.format(base_series,
|
||||||
svc['name'])
|
svc['name'])
|
||||||
else:
|
else:
|
||||||
for svc in other_services:
|
|
||||||
if svc['name'] in base_charms:
|
if svc['name'] in base_charms:
|
||||||
temp = 'lp:charms/{}/{}'
|
temp = 'lp:charms/{}/{}'
|
||||||
svc['location'] = temp.format(base_series,
|
svc['location'] = temp.format(base_series,
|
||||||
|
@ -66,10 +104,13 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
||||||
temp = 'lp:~openstack-charmers/charms/{}/{}/next'
|
temp = 'lp:~openstack-charmers/charms/{}/{}/next'
|
||||||
svc['location'] = temp.format(self.current_next,
|
svc['location'] = temp.format(self.current_next,
|
||||||
svc['name'])
|
svc['name'])
|
||||||
|
|
||||||
return other_services
|
return other_services
|
||||||
|
|
||||||
def _add_services(self, this_service, other_services):
|
def _add_services(self, this_service, other_services):
|
||||||
"""Add services to the deployment and set openstack-origin/source."""
|
"""Add services to the deployment and set openstack-origin/source."""
|
||||||
|
self.log.info('OpenStackAmuletDeployment: adding services')
|
||||||
|
|
||||||
other_services = self._determine_branch_locations(other_services)
|
other_services = self._determine_branch_locations(other_services)
|
||||||
|
|
||||||
super(OpenStackAmuletDeployment, self)._add_services(this_service,
|
super(OpenStackAmuletDeployment, self)._add_services(this_service,
|
||||||
|
@ -77,29 +118,105 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
||||||
|
|
||||||
services = other_services
|
services = other_services
|
||||||
services.append(this_service)
|
services.append(this_service)
|
||||||
|
|
||||||
|
# Charms which should use the source config option
|
||||||
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
|
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
|
||||||
'ceph-osd', 'ceph-radosgw']
|
'ceph-osd', 'ceph-radosgw', 'ceph-mon']
|
||||||
# Most OpenStack subordinate charms do not expose an origin option
|
|
||||||
# as that is controlled by the principle.
|
# Charms which can not use openstack-origin, ie. many subordinates
|
||||||
ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch']
|
no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
|
||||||
|
'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
|
||||||
|
'cinder-backup', 'nexentaedge-data',
|
||||||
|
'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
|
||||||
|
'cinder-nexentaedge', 'nexentaedge-mgmt']
|
||||||
|
|
||||||
if self.openstack:
|
if self.openstack:
|
||||||
for svc in services:
|
for svc in services:
|
||||||
if svc['name'] not in use_source + ignore:
|
if svc['name'] not in use_source + no_origin:
|
||||||
config = {'openstack-origin': self.openstack}
|
config = {'openstack-origin': self.openstack}
|
||||||
self.d.configure(svc['name'], config)
|
self.d.configure(svc['name'], config)
|
||||||
|
|
||||||
if self.source:
|
if self.source:
|
||||||
for svc in services:
|
for svc in services:
|
||||||
if svc['name'] in use_source and svc['name'] not in ignore:
|
if svc['name'] in use_source and svc['name'] not in no_origin:
|
||||||
config = {'source': self.source}
|
config = {'source': self.source}
|
||||||
self.d.configure(svc['name'], config)
|
self.d.configure(svc['name'], config)
|
||||||
|
|
||||||
def _configure_services(self, configs):
|
def _configure_services(self, configs):
|
||||||
"""Configure all of the services."""
|
"""Configure all of the services."""
|
||||||
|
self.log.info('OpenStackAmuletDeployment: configure services')
|
||||||
for service, config in six.iteritems(configs):
|
for service, config in six.iteritems(configs):
|
||||||
self.d.configure(service, config)
|
self.d.configure(service, config)
|
||||||
|
|
||||||
|
def _auto_wait_for_status(self, message=None, exclude_services=None,
|
||||||
|
include_only=None, timeout=1800):
|
||||||
|
"""Wait for all units to have a specific extended status, except
|
||||||
|
for any defined as excluded. Unless specified via message, any
|
||||||
|
status containing any case of 'ready' will be considered a match.
|
||||||
|
|
||||||
|
Examples of message usage:
|
||||||
|
|
||||||
|
Wait for all unit status to CONTAIN any case of 'ready' or 'ok':
|
||||||
|
message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE)
|
||||||
|
|
||||||
|
Wait for all units to reach this status (exact match):
|
||||||
|
message = re.compile('^Unit is ready and clustered$')
|
||||||
|
|
||||||
|
Wait for all units to reach any one of these (exact match):
|
||||||
|
message = re.compile('Unit is ready|OK|Ready')
|
||||||
|
|
||||||
|
Wait for at least one unit to reach this status (exact match):
|
||||||
|
message = {'ready'}
|
||||||
|
|
||||||
|
See Amulet's sentry.wait_for_messages() for message usage detail.
|
||||||
|
https://github.com/juju/amulet/blob/master/amulet/sentry.py
|
||||||
|
|
||||||
|
:param message: Expected status match
|
||||||
|
:param exclude_services: List of juju service names to ignore,
|
||||||
|
not to be used in conjuction with include_only.
|
||||||
|
:param include_only: List of juju service names to exclusively check,
|
||||||
|
not to be used in conjuction with exclude_services.
|
||||||
|
:param timeout: Maximum time in seconds to wait for status match
|
||||||
|
:returns: None. Raises if timeout is hit.
|
||||||
|
"""
|
||||||
|
self.log.info('Waiting for extended status on units...')
|
||||||
|
|
||||||
|
all_services = self.d.services.keys()
|
||||||
|
|
||||||
|
if exclude_services and include_only:
|
||||||
|
raise ValueError('exclude_services can not be used '
|
||||||
|
'with include_only')
|
||||||
|
|
||||||
|
if message:
|
||||||
|
if isinstance(message, re._pattern_type):
|
||||||
|
match = message.pattern
|
||||||
|
else:
|
||||||
|
match = message
|
||||||
|
|
||||||
|
self.log.debug('Custom extended status wait match: '
|
||||||
|
'{}'.format(match))
|
||||||
|
else:
|
||||||
|
self.log.debug('Default extended status wait match: contains '
|
||||||
|
'READY (case-insensitive)')
|
||||||
|
message = re.compile('.*ready.*', re.IGNORECASE)
|
||||||
|
|
||||||
|
if exclude_services:
|
||||||
|
self.log.debug('Excluding services from extended status match: '
|
||||||
|
'{}'.format(exclude_services))
|
||||||
|
else:
|
||||||
|
exclude_services = []
|
||||||
|
|
||||||
|
if include_only:
|
||||||
|
services = include_only
|
||||||
|
else:
|
||||||
|
services = list(set(all_services) - set(exclude_services))
|
||||||
|
|
||||||
|
self.log.debug('Waiting up to {}s for extended status on services: '
|
||||||
|
'{}'.format(timeout, services))
|
||||||
|
service_messages = {service: message for service in services}
|
||||||
|
self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
|
||||||
|
self.log.info('OK')
|
||||||
|
|
||||||
def _get_openstack_release(self):
|
def _get_openstack_release(self):
|
||||||
"""Get openstack release.
|
"""Get openstack release.
|
||||||
|
|
||||||
|
@ -111,7 +228,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
||||||
self.precise_havana, self.precise_icehouse,
|
self.precise_havana, self.precise_icehouse,
|
||||||
self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
|
self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
|
||||||
self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
|
self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
|
||||||
self.wily_liberty) = range(12)
|
self.wily_liberty, self.trusty_mitaka,
|
||||||
|
self.xenial_mitaka) = range(14)
|
||||||
|
|
||||||
releases = {
|
releases = {
|
||||||
('precise', None): self.precise_essex,
|
('precise', None): self.precise_essex,
|
||||||
|
@ -123,9 +241,11 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
||||||
('trusty', 'cloud:trusty-juno'): self.trusty_juno,
|
('trusty', 'cloud:trusty-juno'): self.trusty_juno,
|
||||||
('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
|
('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
|
||||||
('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
|
('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
|
||||||
|
('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka,
|
||||||
('utopic', None): self.utopic_juno,
|
('utopic', None): self.utopic_juno,
|
||||||
('vivid', None): self.vivid_kilo,
|
('vivid', None): self.vivid_kilo,
|
||||||
('wily', None): self.wily_liberty}
|
('wily', None): self.wily_liberty,
|
||||||
|
('xenial', None): self.xenial_mitaka}
|
||||||
return releases[(self.series, self.openstack)]
|
return releases[(self.series, self.openstack)]
|
||||||
|
|
||||||
def _get_openstack_release_string(self):
|
def _get_openstack_release_string(self):
|
||||||
|
@ -142,6 +262,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
||||||
('utopic', 'juno'),
|
('utopic', 'juno'),
|
||||||
('vivid', 'kilo'),
|
('vivid', 'kilo'),
|
||||||
('wily', 'liberty'),
|
('wily', 'liberty'),
|
||||||
|
('xenial', 'mitaka'),
|
||||||
])
|
])
|
||||||
if self.openstack:
|
if self.openstack:
|
||||||
os_origin = self.openstack.split(':')[1]
|
os_origin = self.openstack.split(':')[1]
|
||||||
|
|
|
@ -18,6 +18,7 @@ import amulet
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
import re
|
||||||
import six
|
import six
|
||||||
import time
|
import time
|
||||||
import urllib
|
import urllib
|
||||||
|
@ -26,7 +27,12 @@ import cinderclient.v1.client as cinder_client
|
||||||
import glanceclient.v1.client as glance_client
|
import glanceclient.v1.client as glance_client
|
||||||
import heatclient.v1.client as heat_client
|
import heatclient.v1.client as heat_client
|
||||||
import keystoneclient.v2_0 as keystone_client
|
import keystoneclient.v2_0 as keystone_client
|
||||||
import novaclient.v1_1.client as nova_client
|
from keystoneclient.auth.identity import v3 as keystone_id_v3
|
||||||
|
from keystoneclient import session as keystone_session
|
||||||
|
from keystoneclient.v3 import client as keystone_client_v3
|
||||||
|
|
||||||
|
import novaclient.client as nova_client
|
||||||
|
import pika
|
||||||
import swiftclient
|
import swiftclient
|
||||||
|
|
||||||
from charmhelpers.contrib.amulet.utils import (
|
from charmhelpers.contrib.amulet.utils import (
|
||||||
|
@ -36,6 +42,8 @@ from charmhelpers.contrib.amulet.utils import (
|
||||||
DEBUG = logging.DEBUG
|
DEBUG = logging.DEBUG
|
||||||
ERROR = logging.ERROR
|
ERROR = logging.ERROR
|
||||||
|
|
||||||
|
NOVA_CLIENT_VERSION = "2"
|
||||||
|
|
||||||
|
|
||||||
class OpenStackAmuletUtils(AmuletUtils):
|
class OpenStackAmuletUtils(AmuletUtils):
|
||||||
"""OpenStack amulet utilities.
|
"""OpenStack amulet utilities.
|
||||||
|
@ -137,7 +145,7 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||||
return "role {} does not exist".format(e['name'])
|
return "role {} does not exist".format(e['name'])
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
def validate_user_data(self, expected, actual):
|
def validate_user_data(self, expected, actual, api_version=None):
|
||||||
"""Validate user data.
|
"""Validate user data.
|
||||||
|
|
||||||
Validate a list of actual user data vs a list of expected user
|
Validate a list of actual user data vs a list of expected user
|
||||||
|
@ -148,10 +156,15 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||||
for e in expected:
|
for e in expected:
|
||||||
found = False
|
found = False
|
||||||
for act in actual:
|
for act in actual:
|
||||||
|
if e['name'] == act.name:
|
||||||
a = {'enabled': act.enabled, 'name': act.name,
|
a = {'enabled': act.enabled, 'name': act.name,
|
||||||
'email': act.email, 'tenantId': act.tenantId,
|
'email': act.email, 'id': act.id}
|
||||||
'id': act.id}
|
if api_version == 3:
|
||||||
if e['name'] == a['name']:
|
a['default_project_id'] = getattr(act,
|
||||||
|
'default_project_id',
|
||||||
|
'none')
|
||||||
|
else:
|
||||||
|
a['tenantId'] = act.tenantId
|
||||||
found = True
|
found = True
|
||||||
ret = self._validate_dict_data(e, a)
|
ret = self._validate_dict_data(e, a)
|
||||||
if ret:
|
if ret:
|
||||||
|
@ -186,15 +199,30 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||||
return cinder_client.Client(username, password, tenant, ept)
|
return cinder_client.Client(username, password, tenant, ept)
|
||||||
|
|
||||||
def authenticate_keystone_admin(self, keystone_sentry, user, password,
|
def authenticate_keystone_admin(self, keystone_sentry, user, password,
|
||||||
tenant):
|
tenant=None, api_version=None,
|
||||||
|
keystone_ip=None):
|
||||||
"""Authenticates admin user with the keystone admin endpoint."""
|
"""Authenticates admin user with the keystone admin endpoint."""
|
||||||
self.log.debug('Authenticating keystone admin...')
|
self.log.debug('Authenticating keystone admin...')
|
||||||
unit = keystone_sentry
|
unit = keystone_sentry
|
||||||
service_ip = unit.relation('shared-db',
|
if not keystone_ip:
|
||||||
|
keystone_ip = unit.relation('shared-db',
|
||||||
'mysql:shared-db')['private-address']
|
'mysql:shared-db')['private-address']
|
||||||
ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
|
base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8'))
|
||||||
|
if not api_version or api_version == 2:
|
||||||
|
ep = base_ep + "/v2.0"
|
||||||
return keystone_client.Client(username=user, password=password,
|
return keystone_client.Client(username=user, password=password,
|
||||||
tenant_name=tenant, auth_url=ep)
|
tenant_name=tenant, auth_url=ep)
|
||||||
|
else:
|
||||||
|
ep = base_ep + "/v3"
|
||||||
|
auth = keystone_id_v3.Password(
|
||||||
|
user_domain_name='admin_domain',
|
||||||
|
username=user,
|
||||||
|
password=password,
|
||||||
|
domain_name='admin_domain',
|
||||||
|
auth_url=ep,
|
||||||
|
)
|
||||||
|
sess = keystone_session.Session(auth=auth)
|
||||||
|
return keystone_client_v3.Client(session=sess)
|
||||||
|
|
||||||
def authenticate_keystone_user(self, keystone, user, password, tenant):
|
def authenticate_keystone_user(self, keystone, user, password, tenant):
|
||||||
"""Authenticates a regular user with the keystone public endpoint."""
|
"""Authenticates a regular user with the keystone public endpoint."""
|
||||||
|
@ -223,7 +251,8 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||||
self.log.debug('Authenticating nova user ({})...'.format(user))
|
self.log.debug('Authenticating nova user ({})...'.format(user))
|
||||||
ep = keystone.service_catalog.url_for(service_type='identity',
|
ep = keystone.service_catalog.url_for(service_type='identity',
|
||||||
endpoint_type='publicURL')
|
endpoint_type='publicURL')
|
||||||
return nova_client.Client(username=user, api_key=password,
|
return nova_client.Client(NOVA_CLIENT_VERSION,
|
||||||
|
username=user, api_key=password,
|
||||||
project_id=tenant, auth_url=ep)
|
project_id=tenant, auth_url=ep)
|
||||||
|
|
||||||
def authenticate_swift_user(self, keystone, user, password, tenant):
|
def authenticate_swift_user(self, keystone, user, password, tenant):
|
||||||
|
@ -602,3 +631,382 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||||
self.log.debug('Ceph {} samples (OK): '
|
self.log.debug('Ceph {} samples (OK): '
|
||||||
'{}'.format(sample_type, samples))
|
'{}'.format(sample_type, samples))
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
# rabbitmq/amqp specific helpers:
|
||||||
|
|
||||||
|
def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200):
|
||||||
|
"""Wait for rmq units extended status to show cluster readiness,
|
||||||
|
after an optional initial sleep period. Initial sleep is likely
|
||||||
|
necessary to be effective following a config change, as status
|
||||||
|
message may not instantly update to non-ready."""
|
||||||
|
|
||||||
|
if init_sleep:
|
||||||
|
time.sleep(init_sleep)
|
||||||
|
|
||||||
|
message = re.compile('^Unit is ready and clustered$')
|
||||||
|
deployment._auto_wait_for_status(message=message,
|
||||||
|
timeout=timeout,
|
||||||
|
include_only=['rabbitmq-server'])
|
||||||
|
|
||||||
|
def add_rmq_test_user(self, sentry_units,
|
||||||
|
username="testuser1", password="changeme"):
|
||||||
|
"""Add a test user via the first rmq juju unit, check connection as
|
||||||
|
the new user against all sentry units.
|
||||||
|
|
||||||
|
:param sentry_units: list of sentry unit pointers
|
||||||
|
:param username: amqp user name, default to testuser1
|
||||||
|
:param password: amqp user password
|
||||||
|
:returns: None if successful. Raise on error.
|
||||||
|
"""
|
||||||
|
self.log.debug('Adding rmq user ({})...'.format(username))
|
||||||
|
|
||||||
|
# Check that user does not already exist
|
||||||
|
cmd_user_list = 'rabbitmqctl list_users'
|
||||||
|
output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
|
||||||
|
if username in output:
|
||||||
|
self.log.warning('User ({}) already exists, returning '
|
||||||
|
'gracefully.'.format(username))
|
||||||
|
return
|
||||||
|
|
||||||
|
perms = '".*" ".*" ".*"'
|
||||||
|
cmds = ['rabbitmqctl add_user {} {}'.format(username, password),
|
||||||
|
'rabbitmqctl set_permissions {} {}'.format(username, perms)]
|
||||||
|
|
||||||
|
# Add user via first unit
|
||||||
|
for cmd in cmds:
|
||||||
|
output, _ = self.run_cmd_unit(sentry_units[0], cmd)
|
||||||
|
|
||||||
|
# Check connection against the other sentry_units
|
||||||
|
self.log.debug('Checking user connect against units...')
|
||||||
|
for sentry_unit in sentry_units:
|
||||||
|
connection = self.connect_amqp_by_unit(sentry_unit, ssl=False,
|
||||||
|
username=username,
|
||||||
|
password=password)
|
||||||
|
connection.close()
|
||||||
|
|
||||||
|
def delete_rmq_test_user(self, sentry_units, username="testuser1"):
|
||||||
|
"""Delete a rabbitmq user via the first rmq juju unit.
|
||||||
|
|
||||||
|
:param sentry_units: list of sentry unit pointers
|
||||||
|
:param username: amqp user name, default to testuser1
|
||||||
|
:param password: amqp user password
|
||||||
|
:returns: None if successful or no such user.
|
||||||
|
"""
|
||||||
|
self.log.debug('Deleting rmq user ({})...'.format(username))
|
||||||
|
|
||||||
|
# Check that the user exists
|
||||||
|
cmd_user_list = 'rabbitmqctl list_users'
|
||||||
|
output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
|
||||||
|
|
||||||
|
if username not in output:
|
||||||
|
self.log.warning('User ({}) does not exist, returning '
|
||||||
|
'gracefully.'.format(username))
|
||||||
|
return
|
||||||
|
|
||||||
|
# Delete the user
|
||||||
|
cmd_user_del = 'rabbitmqctl delete_user {}'.format(username)
|
||||||
|
output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del)
|
||||||
|
|
||||||
|
def get_rmq_cluster_status(self, sentry_unit):
|
||||||
|
"""Execute rabbitmq cluster status command on a unit and return
|
||||||
|
the full output.
|
||||||
|
|
||||||
|
:param unit: sentry unit
|
||||||
|
:returns: String containing console output of cluster status command
|
||||||
|
"""
|
||||||
|
cmd = 'rabbitmqctl cluster_status'
|
||||||
|
output, _ = self.run_cmd_unit(sentry_unit, cmd)
|
||||||
|
self.log.debug('{} cluster_status:\n{}'.format(
|
||||||
|
sentry_unit.info['unit_name'], output))
|
||||||
|
return str(output)
|
||||||
|
|
||||||
|
def get_rmq_cluster_running_nodes(self, sentry_unit):
|
||||||
|
"""Parse rabbitmqctl cluster_status output string, return list of
|
||||||
|
running rabbitmq cluster nodes.
|
||||||
|
|
||||||
|
:param unit: sentry unit
|
||||||
|
:returns: List containing node names of running nodes
|
||||||
|
"""
|
||||||
|
# NOTE(beisner): rabbitmqctl cluster_status output is not
|
||||||
|
# json-parsable, do string chop foo, then json.loads that.
|
||||||
|
str_stat = self.get_rmq_cluster_status(sentry_unit)
|
||||||
|
if 'running_nodes' in str_stat:
|
||||||
|
pos_start = str_stat.find("{running_nodes,") + 15
|
||||||
|
pos_end = str_stat.find("]},", pos_start) + 1
|
||||||
|
str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"')
|
||||||
|
run_nodes = json.loads(str_run_nodes)
|
||||||
|
return run_nodes
|
||||||
|
else:
|
||||||
|
return []
|
||||||
|
|
||||||
|
def validate_rmq_cluster_running_nodes(self, sentry_units):
|
||||||
|
"""Check that all rmq unit hostnames are represented in the
|
||||||
|
cluster_status output of all units.
|
||||||
|
|
||||||
|
:param host_names: dict of juju unit names to host names
|
||||||
|
:param units: list of sentry unit pointers (all rmq units)
|
||||||
|
:returns: None if successful, otherwise return error message
|
||||||
|
"""
|
||||||
|
host_names = self.get_unit_hostnames(sentry_units)
|
||||||
|
errors = []
|
||||||
|
|
||||||
|
# Query every unit for cluster_status running nodes
|
||||||
|
for query_unit in sentry_units:
|
||||||
|
query_unit_name = query_unit.info['unit_name']
|
||||||
|
running_nodes = self.get_rmq_cluster_running_nodes(query_unit)
|
||||||
|
|
||||||
|
# Confirm that every unit is represented in the queried unit's
|
||||||
|
# cluster_status running nodes output.
|
||||||
|
for validate_unit in sentry_units:
|
||||||
|
val_host_name = host_names[validate_unit.info['unit_name']]
|
||||||
|
val_node_name = 'rabbit@{}'.format(val_host_name)
|
||||||
|
|
||||||
|
if val_node_name not in running_nodes:
|
||||||
|
errors.append('Cluster member check failed on {}: {} not '
|
||||||
|
'in {}\n'.format(query_unit_name,
|
||||||
|
val_node_name,
|
||||||
|
running_nodes))
|
||||||
|
if errors:
|
||||||
|
return ''.join(errors)
|
||||||
|
|
||||||
|
def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None):
|
||||||
|
"""Check a single juju rmq unit for ssl and port in the config file."""
|
||||||
|
host = sentry_unit.info['public-address']
|
||||||
|
unit_name = sentry_unit.info['unit_name']
|
||||||
|
|
||||||
|
conf_file = '/etc/rabbitmq/rabbitmq.config'
|
||||||
|
conf_contents = str(self.file_contents_safe(sentry_unit,
|
||||||
|
conf_file, max_wait=16))
|
||||||
|
# Checks
|
||||||
|
conf_ssl = 'ssl' in conf_contents
|
||||||
|
conf_port = str(port) in conf_contents
|
||||||
|
|
||||||
|
# Port explicitly checked in config
|
||||||
|
if port and conf_port and conf_ssl:
|
||||||
|
self.log.debug('SSL is enabled @{}:{} '
|
||||||
|
'({})'.format(host, port, unit_name))
|
||||||
|
return True
|
||||||
|
elif port and not conf_port and conf_ssl:
|
||||||
|
self.log.debug('SSL is enabled @{} but not on port {} '
|
||||||
|
'({})'.format(host, port, unit_name))
|
||||||
|
return False
|
||||||
|
# Port not checked (useful when checking that ssl is disabled)
|
||||||
|
elif not port and conf_ssl:
|
||||||
|
self.log.debug('SSL is enabled @{}:{} '
|
||||||
|
'({})'.format(host, port, unit_name))
|
||||||
|
return True
|
||||||
|
elif not conf_ssl:
|
||||||
|
self.log.debug('SSL not enabled @{}:{} '
|
||||||
|
'({})'.format(host, port, unit_name))
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
msg = ('Unknown condition when checking SSL status @{}:{} '
|
||||||
|
'({})'.format(host, port, unit_name))
|
||||||
|
amulet.raise_status(amulet.FAIL, msg)
|
||||||
|
|
||||||
|
def validate_rmq_ssl_enabled_units(self, sentry_units, port=None):
|
||||||
|
"""Check that ssl is enabled on rmq juju sentry units.
|
||||||
|
|
||||||
|
:param sentry_units: list of all rmq sentry units
|
||||||
|
:param port: optional ssl port override to validate
|
||||||
|
:returns: None if successful, otherwise return error message
|
||||||
|
"""
|
||||||
|
for sentry_unit in sentry_units:
|
||||||
|
if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port):
|
||||||
|
return ('Unexpected condition: ssl is disabled on unit '
|
||||||
|
'({})'.format(sentry_unit.info['unit_name']))
|
||||||
|
return None
|
||||||
|
|
||||||
|
def validate_rmq_ssl_disabled_units(self, sentry_units):
|
||||||
|
"""Check that ssl is enabled on listed rmq juju sentry units.
|
||||||
|
|
||||||
|
:param sentry_units: list of all rmq sentry units
|
||||||
|
:returns: True if successful. Raise on error.
|
||||||
|
"""
|
||||||
|
for sentry_unit in sentry_units:
|
||||||
|
if self.rmq_ssl_is_enabled_on_unit(sentry_unit):
|
||||||
|
return ('Unexpected condition: ssl is enabled on unit '
|
||||||
|
'({})'.format(sentry_unit.info['unit_name']))
|
||||||
|
return None
|
||||||
|
|
||||||
|
def configure_rmq_ssl_on(self, sentry_units, deployment,
|
||||||
|
port=None, max_wait=60):
|
||||||
|
"""Turn ssl charm config option on, with optional non-default
|
||||||
|
ssl port specification. Confirm that it is enabled on every
|
||||||
|
unit.
|
||||||
|
|
||||||
|
:param sentry_units: list of sentry units
|
||||||
|
:param deployment: amulet deployment object pointer
|
||||||
|
:param port: amqp port, use defaults if None
|
||||||
|
:param max_wait: maximum time to wait in seconds to confirm
|
||||||
|
:returns: None if successful. Raise on error.
|
||||||
|
"""
|
||||||
|
self.log.debug('Setting ssl charm config option: on')
|
||||||
|
|
||||||
|
# Enable RMQ SSL
|
||||||
|
config = {'ssl': 'on'}
|
||||||
|
if port:
|
||||||
|
config['ssl_port'] = port
|
||||||
|
|
||||||
|
deployment.d.configure('rabbitmq-server', config)
|
||||||
|
|
||||||
|
# Wait for unit status
|
||||||
|
self.rmq_wait_for_cluster(deployment)
|
||||||
|
|
||||||
|
# Confirm
|
||||||
|
tries = 0
|
||||||
|
ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
|
||||||
|
while ret and tries < (max_wait / 4):
|
||||||
|
time.sleep(4)
|
||||||
|
self.log.debug('Attempt {}: {}'.format(tries, ret))
|
||||||
|
ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
|
||||||
|
tries += 1
|
||||||
|
|
||||||
|
if ret:
|
||||||
|
amulet.raise_status(amulet.FAIL, ret)
|
||||||
|
|
||||||
|
def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60):
|
||||||
|
"""Turn ssl charm config option off, confirm that it is disabled
|
||||||
|
on every unit.
|
||||||
|
|
||||||
|
:param sentry_units: list of sentry units
|
||||||
|
:param deployment: amulet deployment object pointer
|
||||||
|
:param max_wait: maximum time to wait in seconds to confirm
|
||||||
|
:returns: None if successful. Raise on error.
|
||||||
|
"""
|
||||||
|
self.log.debug('Setting ssl charm config option: off')
|
||||||
|
|
||||||
|
# Disable RMQ SSL
|
||||||
|
config = {'ssl': 'off'}
|
||||||
|
deployment.d.configure('rabbitmq-server', config)
|
||||||
|
|
||||||
|
# Wait for unit status
|
||||||
|
self.rmq_wait_for_cluster(deployment)
|
||||||
|
|
||||||
|
# Confirm
|
||||||
|
tries = 0
|
||||||
|
ret = self.validate_rmq_ssl_disabled_units(sentry_units)
|
||||||
|
while ret and tries < (max_wait / 4):
|
||||||
|
time.sleep(4)
|
||||||
|
self.log.debug('Attempt {}: {}'.format(tries, ret))
|
||||||
|
ret = self.validate_rmq_ssl_disabled_units(sentry_units)
|
||||||
|
tries += 1
|
||||||
|
|
||||||
|
if ret:
|
||||||
|
amulet.raise_status(amulet.FAIL, ret)
|
||||||
|
|
||||||
|
def connect_amqp_by_unit(self, sentry_unit, ssl=False,
|
||||||
|
port=None, fatal=True,
|
||||||
|
username="testuser1", password="changeme"):
|
||||||
|
"""Establish and return a pika amqp connection to the rabbitmq service
|
||||||
|
running on a rmq juju unit.
|
||||||
|
|
||||||
|
:param sentry_unit: sentry unit pointer
|
||||||
|
:param ssl: boolean, default to False
|
||||||
|
:param port: amqp port, use defaults if None
|
||||||
|
:param fatal: boolean, default to True (raises on connect error)
|
||||||
|
:param username: amqp user name, default to testuser1
|
||||||
|
:param password: amqp user password
|
||||||
|
:returns: pika amqp connection pointer or None if failed and non-fatal
|
||||||
|
"""
|
||||||
|
host = sentry_unit.info['public-address']
|
||||||
|
unit_name = sentry_unit.info['unit_name']
|
||||||
|
|
||||||
|
# Default port logic if port is not specified
|
||||||
|
if ssl and not port:
|
||||||
|
port = 5671
|
||||||
|
elif not ssl and not port:
|
||||||
|
port = 5672
|
||||||
|
|
||||||
|
self.log.debug('Connecting to amqp on {}:{} ({}) as '
|
||||||
|
'{}...'.format(host, port, unit_name, username))
|
||||||
|
|
||||||
|
try:
|
||||||
|
credentials = pika.PlainCredentials(username, password)
|
||||||
|
parameters = pika.ConnectionParameters(host=host, port=port,
|
||||||
|
credentials=credentials,
|
||||||
|
ssl=ssl,
|
||||||
|
connection_attempts=3,
|
||||||
|
retry_delay=5,
|
||||||
|
socket_timeout=1)
|
||||||
|
connection = pika.BlockingConnection(parameters)
|
||||||
|
assert connection.server_properties['product'] == 'RabbitMQ'
|
||||||
|
self.log.debug('Connect OK')
|
||||||
|
return connection
|
||||||
|
except Exception as e:
|
||||||
|
msg = ('amqp connection failed to {}:{} as '
|
||||||
|
'{} ({})'.format(host, port, username, str(e)))
|
||||||
|
if fatal:
|
||||||
|
amulet.raise_status(amulet.FAIL, msg)
|
||||||
|
else:
|
||||||
|
self.log.warn(msg)
|
||||||
|
return None
|
||||||
|
|
||||||
|
def publish_amqp_message_by_unit(self, sentry_unit, message,
|
||||||
|
queue="test", ssl=False,
|
||||||
|
username="testuser1",
|
||||||
|
password="changeme",
|
||||||
|
port=None):
|
||||||
|
"""Publish an amqp message to a rmq juju unit.
|
||||||
|
|
||||||
|
:param sentry_unit: sentry unit pointer
|
||||||
|
:param message: amqp message string
|
||||||
|
:param queue: message queue, default to test
|
||||||
|
:param username: amqp user name, default to testuser1
|
||||||
|
:param password: amqp user password
|
||||||
|
:param ssl: boolean, default to False
|
||||||
|
:param port: amqp port, use defaults if None
|
||||||
|
:returns: None. Raises exception if publish failed.
|
||||||
|
"""
|
||||||
|
self.log.debug('Publishing message to {} queue:\n{}'.format(queue,
|
||||||
|
message))
|
||||||
|
connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
|
||||||
|
port=port,
|
||||||
|
username=username,
|
||||||
|
password=password)
|
||||||
|
|
||||||
|
# NOTE(beisner): extra debug here re: pika hang potential:
|
||||||
|
# https://github.com/pika/pika/issues/297
|
||||||
|
# https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw
|
||||||
|
self.log.debug('Defining channel...')
|
||||||
|
channel = connection.channel()
|
||||||
|
self.log.debug('Declaring queue...')
|
||||||
|
channel.queue_declare(queue=queue, auto_delete=False, durable=True)
|
||||||
|
self.log.debug('Publishing message...')
|
||||||
|
channel.basic_publish(exchange='', routing_key=queue, body=message)
|
||||||
|
self.log.debug('Closing channel...')
|
||||||
|
channel.close()
|
||||||
|
self.log.debug('Closing connection...')
|
||||||
|
connection.close()
|
||||||
|
|
||||||
|
def get_amqp_message_by_unit(self, sentry_unit, queue="test",
|
||||||
|
username="testuser1",
|
||||||
|
password="changeme",
|
||||||
|
ssl=False, port=None):
|
||||||
|
"""Get an amqp message from a rmq juju unit.
|
||||||
|
|
||||||
|
:param sentry_unit: sentry unit pointer
|
||||||
|
:param queue: message queue, default to test
|
||||||
|
:param username: amqp user name, default to testuser1
|
||||||
|
:param password: amqp user password
|
||||||
|
:param ssl: boolean, default to False
|
||||||
|
:param port: amqp port, use defaults if None
|
||||||
|
:returns: amqp message body as string. Raise if get fails.
|
||||||
|
"""
|
||||||
|
connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
|
||||||
|
port=port,
|
||||||
|
username=username,
|
||||||
|
password=password)
|
||||||
|
channel = connection.channel()
|
||||||
|
method_frame, _, body = channel.basic_get(queue)
|
||||||
|
|
||||||
|
if method_frame:
|
||||||
|
self.log.debug('Retreived message from {} queue:\n{}'.format(queue,
|
||||||
|
body))
|
||||||
|
channel.basic_ack(method_frame.delivery_tag)
|
||||||
|
channel.close()
|
||||||
|
connection.close()
|
||||||
|
return body
|
||||||
|
else:
|
||||||
|
msg = 'No message retrieved.'
|
||||||
|
amulet.raise_status(amulet.FAIL, msg)
|
||||||
|
|
|
@ -14,12 +14,13 @@
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import glob
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import time
|
import time
|
||||||
from base64 import b64decode
|
from base64 import b64decode
|
||||||
from subprocess import check_call
|
from subprocess import check_call, CalledProcessError
|
||||||
|
|
||||||
import six
|
import six
|
||||||
import yaml
|
import yaml
|
||||||
|
@ -44,16 +45,20 @@ from charmhelpers.core.hookenv import (
|
||||||
INFO,
|
INFO,
|
||||||
WARNING,
|
WARNING,
|
||||||
ERROR,
|
ERROR,
|
||||||
|
status_set,
|
||||||
)
|
)
|
||||||
|
|
||||||
from charmhelpers.core.sysctl import create as sysctl_create
|
from charmhelpers.core.sysctl import create as sysctl_create
|
||||||
from charmhelpers.core.strutils import bool_from_string
|
from charmhelpers.core.strutils import bool_from_string
|
||||||
|
|
||||||
from charmhelpers.core.host import (
|
from charmhelpers.core.host import (
|
||||||
|
get_bond_master,
|
||||||
|
is_phy_iface,
|
||||||
list_nics,
|
list_nics,
|
||||||
get_nic_hwaddr,
|
get_nic_hwaddr,
|
||||||
mkdir,
|
mkdir,
|
||||||
write_file,
|
write_file,
|
||||||
|
pwgen,
|
||||||
)
|
)
|
||||||
from charmhelpers.contrib.hahelpers.cluster import (
|
from charmhelpers.contrib.hahelpers.cluster import (
|
||||||
determine_apache_port,
|
determine_apache_port,
|
||||||
|
@ -84,6 +89,14 @@ from charmhelpers.contrib.network.ip import (
|
||||||
is_bridge_member,
|
is_bridge_member,
|
||||||
)
|
)
|
||||||
from charmhelpers.contrib.openstack.utils import get_host_ip
|
from charmhelpers.contrib.openstack.utils import get_host_ip
|
||||||
|
from charmhelpers.core.unitdata import kv
|
||||||
|
|
||||||
|
try:
|
||||||
|
import psutil
|
||||||
|
except ImportError:
|
||||||
|
apt_install('python-psutil', fatal=True)
|
||||||
|
import psutil
|
||||||
|
|
||||||
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
|
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
|
||||||
ADDRESS_TYPES = ['admin', 'internal', 'public']
|
ADDRESS_TYPES = ['admin', 'internal', 'public']
|
||||||
|
|
||||||
|
@ -192,10 +205,50 @@ def config_flags_parser(config_flags):
|
||||||
class OSContextGenerator(object):
|
class OSContextGenerator(object):
|
||||||
"""Base class for all context generators."""
|
"""Base class for all context generators."""
|
||||||
interfaces = []
|
interfaces = []
|
||||||
|
related = False
|
||||||
|
complete = False
|
||||||
|
missing_data = []
|
||||||
|
|
||||||
def __call__(self):
|
def __call__(self):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
|
def context_complete(self, ctxt):
|
||||||
|
"""Check for missing data for the required context data.
|
||||||
|
Set self.missing_data if it exists and return False.
|
||||||
|
Set self.complete if no missing data and return True.
|
||||||
|
"""
|
||||||
|
# Fresh start
|
||||||
|
self.complete = False
|
||||||
|
self.missing_data = []
|
||||||
|
for k, v in six.iteritems(ctxt):
|
||||||
|
if v is None or v == '':
|
||||||
|
if k not in self.missing_data:
|
||||||
|
self.missing_data.append(k)
|
||||||
|
|
||||||
|
if self.missing_data:
|
||||||
|
self.complete = False
|
||||||
|
log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO)
|
||||||
|
else:
|
||||||
|
self.complete = True
|
||||||
|
return self.complete
|
||||||
|
|
||||||
|
def get_related(self):
|
||||||
|
"""Check if any of the context interfaces have relation ids.
|
||||||
|
Set self.related and return True if one of the interfaces
|
||||||
|
has relation ids.
|
||||||
|
"""
|
||||||
|
# Fresh start
|
||||||
|
self.related = False
|
||||||
|
try:
|
||||||
|
for interface in self.interfaces:
|
||||||
|
if relation_ids(interface):
|
||||||
|
self.related = True
|
||||||
|
return self.related
|
||||||
|
except AttributeError as e:
|
||||||
|
log("{} {}"
|
||||||
|
"".format(self, e), 'INFO')
|
||||||
|
return self.related
|
||||||
|
|
||||||
|
|
||||||
class SharedDBContext(OSContextGenerator):
|
class SharedDBContext(OSContextGenerator):
|
||||||
interfaces = ['shared-db']
|
interfaces = ['shared-db']
|
||||||
|
@ -211,6 +264,7 @@ class SharedDBContext(OSContextGenerator):
|
||||||
self.database = database
|
self.database = database
|
||||||
self.user = user
|
self.user = user
|
||||||
self.ssl_dir = ssl_dir
|
self.ssl_dir = ssl_dir
|
||||||
|
self.rel_name = self.interfaces[0]
|
||||||
|
|
||||||
def __call__(self):
|
def __call__(self):
|
||||||
self.database = self.database or config('database')
|
self.database = self.database or config('database')
|
||||||
|
@ -244,6 +298,7 @@ class SharedDBContext(OSContextGenerator):
|
||||||
password_setting = self.relation_prefix + '_password'
|
password_setting = self.relation_prefix + '_password'
|
||||||
|
|
||||||
for rid in relation_ids(self.interfaces[0]):
|
for rid in relation_ids(self.interfaces[0]):
|
||||||
|
self.related = True
|
||||||
for unit in related_units(rid):
|
for unit in related_units(rid):
|
||||||
rdata = relation_get(rid=rid, unit=unit)
|
rdata = relation_get(rid=rid, unit=unit)
|
||||||
host = rdata.get('db_host')
|
host = rdata.get('db_host')
|
||||||
|
@ -255,7 +310,7 @@ class SharedDBContext(OSContextGenerator):
|
||||||
'database_password': rdata.get(password_setting),
|
'database_password': rdata.get(password_setting),
|
||||||
'database_type': 'mysql'
|
'database_type': 'mysql'
|
||||||
}
|
}
|
||||||
if context_complete(ctxt):
|
if self.context_complete(ctxt):
|
||||||
db_ssl(rdata, ctxt, self.ssl_dir)
|
db_ssl(rdata, ctxt, self.ssl_dir)
|
||||||
return ctxt
|
return ctxt
|
||||||
return {}
|
return {}
|
||||||
|
@ -276,6 +331,7 @@ class PostgresqlDBContext(OSContextGenerator):
|
||||||
|
|
||||||
ctxt = {}
|
ctxt = {}
|
||||||
for rid in relation_ids(self.interfaces[0]):
|
for rid in relation_ids(self.interfaces[0]):
|
||||||
|
self.related = True
|
||||||
for unit in related_units(rid):
|
for unit in related_units(rid):
|
||||||
rel_host = relation_get('host', rid=rid, unit=unit)
|
rel_host = relation_get('host', rid=rid, unit=unit)
|
||||||
rel_user = relation_get('user', rid=rid, unit=unit)
|
rel_user = relation_get('user', rid=rid, unit=unit)
|
||||||
|
@ -285,7 +341,7 @@ class PostgresqlDBContext(OSContextGenerator):
|
||||||
'database_user': rel_user,
|
'database_user': rel_user,
|
||||||
'database_password': rel_passwd,
|
'database_password': rel_passwd,
|
||||||
'database_type': 'postgresql'}
|
'database_type': 'postgresql'}
|
||||||
if context_complete(ctxt):
|
if self.context_complete(ctxt):
|
||||||
return ctxt
|
return ctxt
|
||||||
|
|
||||||
return {}
|
return {}
|
||||||
|
@ -346,6 +402,7 @@ class IdentityServiceContext(OSContextGenerator):
|
||||||
ctxt['signing_dir'] = cachedir
|
ctxt['signing_dir'] = cachedir
|
||||||
|
|
||||||
for rid in relation_ids(self.rel_name):
|
for rid in relation_ids(self.rel_name):
|
||||||
|
self.related = True
|
||||||
for unit in related_units(rid):
|
for unit in related_units(rid):
|
||||||
rdata = relation_get(rid=rid, unit=unit)
|
rdata = relation_get(rid=rid, unit=unit)
|
||||||
serv_host = rdata.get('service_host')
|
serv_host = rdata.get('service_host')
|
||||||
|
@ -354,6 +411,7 @@ class IdentityServiceContext(OSContextGenerator):
|
||||||
auth_host = format_ipv6_addr(auth_host) or auth_host
|
auth_host = format_ipv6_addr(auth_host) or auth_host
|
||||||
svc_protocol = rdata.get('service_protocol') or 'http'
|
svc_protocol = rdata.get('service_protocol') or 'http'
|
||||||
auth_protocol = rdata.get('auth_protocol') or 'http'
|
auth_protocol = rdata.get('auth_protocol') or 'http'
|
||||||
|
api_version = rdata.get('api_version') or '2.0'
|
||||||
ctxt.update({'service_port': rdata.get('service_port'),
|
ctxt.update({'service_port': rdata.get('service_port'),
|
||||||
'service_host': serv_host,
|
'service_host': serv_host,
|
||||||
'auth_host': auth_host,
|
'auth_host': auth_host,
|
||||||
|
@ -362,9 +420,10 @@ class IdentityServiceContext(OSContextGenerator):
|
||||||
'admin_user': rdata.get('service_username'),
|
'admin_user': rdata.get('service_username'),
|
||||||
'admin_password': rdata.get('service_password'),
|
'admin_password': rdata.get('service_password'),
|
||||||
'service_protocol': svc_protocol,
|
'service_protocol': svc_protocol,
|
||||||
'auth_protocol': auth_protocol})
|
'auth_protocol': auth_protocol,
|
||||||
|
'api_version': api_version})
|
||||||
|
|
||||||
if context_complete(ctxt):
|
if self.context_complete(ctxt):
|
||||||
# NOTE(jamespage) this is required for >= icehouse
|
# NOTE(jamespage) this is required for >= icehouse
|
||||||
# so a missing value just indicates keystone needs
|
# so a missing value just indicates keystone needs
|
||||||
# upgrading
|
# upgrading
|
||||||
|
@ -403,6 +462,7 @@ class AMQPContext(OSContextGenerator):
|
||||||
ctxt = {}
|
ctxt = {}
|
||||||
for rid in relation_ids(self.rel_name):
|
for rid in relation_ids(self.rel_name):
|
||||||
ha_vip_only = False
|
ha_vip_only = False
|
||||||
|
self.related = True
|
||||||
for unit in related_units(rid):
|
for unit in related_units(rid):
|
||||||
if relation_get('clustered', rid=rid, unit=unit):
|
if relation_get('clustered', rid=rid, unit=unit):
|
||||||
ctxt['clustered'] = True
|
ctxt['clustered'] = True
|
||||||
|
@ -435,7 +495,7 @@ class AMQPContext(OSContextGenerator):
|
||||||
ha_vip_only = relation_get('ha-vip-only',
|
ha_vip_only = relation_get('ha-vip-only',
|
||||||
rid=rid, unit=unit) is not None
|
rid=rid, unit=unit) is not None
|
||||||
|
|
||||||
if context_complete(ctxt):
|
if self.context_complete(ctxt):
|
||||||
if 'rabbit_ssl_ca' in ctxt:
|
if 'rabbit_ssl_ca' in ctxt:
|
||||||
if not self.ssl_dir:
|
if not self.ssl_dir:
|
||||||
log("Charm not setup for ssl support but ssl ca "
|
log("Charm not setup for ssl support but ssl ca "
|
||||||
|
@ -467,7 +527,7 @@ class AMQPContext(OSContextGenerator):
|
||||||
ctxt['oslo_messaging_flags'] = config_flags_parser(
|
ctxt['oslo_messaging_flags'] = config_flags_parser(
|
||||||
oslo_messaging_flags)
|
oslo_messaging_flags)
|
||||||
|
|
||||||
if not context_complete(ctxt):
|
if not self.complete:
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
return ctxt
|
return ctxt
|
||||||
|
@ -483,13 +543,15 @@ class CephContext(OSContextGenerator):
|
||||||
|
|
||||||
log('Generating template context for ceph', level=DEBUG)
|
log('Generating template context for ceph', level=DEBUG)
|
||||||
mon_hosts = []
|
mon_hosts = []
|
||||||
auth = None
|
ctxt = {
|
||||||
key = None
|
'use_syslog': str(config('use-syslog')).lower()
|
||||||
use_syslog = str(config('use-syslog')).lower()
|
}
|
||||||
for rid in relation_ids('ceph'):
|
for rid in relation_ids('ceph'):
|
||||||
for unit in related_units(rid):
|
for unit in related_units(rid):
|
||||||
auth = relation_get('auth', rid=rid, unit=unit)
|
if not ctxt.get('auth'):
|
||||||
key = relation_get('key', rid=rid, unit=unit)
|
ctxt['auth'] = relation_get('auth', rid=rid, unit=unit)
|
||||||
|
if not ctxt.get('key'):
|
||||||
|
ctxt['key'] = relation_get('key', rid=rid, unit=unit)
|
||||||
ceph_pub_addr = relation_get('ceph-public-address', rid=rid,
|
ceph_pub_addr = relation_get('ceph-public-address', rid=rid,
|
||||||
unit=unit)
|
unit=unit)
|
||||||
unit_priv_addr = relation_get('private-address', rid=rid,
|
unit_priv_addr = relation_get('private-address', rid=rid,
|
||||||
|
@ -498,15 +560,12 @@ class CephContext(OSContextGenerator):
|
||||||
ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
|
ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
|
||||||
mon_hosts.append(ceph_addr)
|
mon_hosts.append(ceph_addr)
|
||||||
|
|
||||||
ctxt = {'mon_hosts': ' '.join(sorted(mon_hosts)),
|
ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts))
|
||||||
'auth': auth,
|
|
||||||
'key': key,
|
|
||||||
'use_syslog': use_syslog}
|
|
||||||
|
|
||||||
if not os.path.isdir('/etc/ceph'):
|
if not os.path.isdir('/etc/ceph'):
|
||||||
os.mkdir('/etc/ceph')
|
os.mkdir('/etc/ceph')
|
||||||
|
|
||||||
if not context_complete(ctxt):
|
if not self.context_complete(ctxt):
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
ensure_packages(['ceph-common'])
|
ensure_packages(['ceph-common'])
|
||||||
|
@ -579,15 +638,28 @@ class HAProxyContext(OSContextGenerator):
|
||||||
if config('haproxy-client-timeout'):
|
if config('haproxy-client-timeout'):
|
||||||
ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
|
ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
|
||||||
|
|
||||||
|
if config('haproxy-queue-timeout'):
|
||||||
|
ctxt['haproxy_queue_timeout'] = config('haproxy-queue-timeout')
|
||||||
|
|
||||||
|
if config('haproxy-connect-timeout'):
|
||||||
|
ctxt['haproxy_connect_timeout'] = config('haproxy-connect-timeout')
|
||||||
|
|
||||||
if config('prefer-ipv6'):
|
if config('prefer-ipv6'):
|
||||||
ctxt['ipv6'] = True
|
ctxt['ipv6'] = True
|
||||||
ctxt['local_host'] = 'ip6-localhost'
|
ctxt['local_host'] = 'ip6-localhost'
|
||||||
ctxt['haproxy_host'] = '::'
|
ctxt['haproxy_host'] = '::'
|
||||||
ctxt['stat_port'] = ':::8888'
|
|
||||||
else:
|
else:
|
||||||
ctxt['local_host'] = '127.0.0.1'
|
ctxt['local_host'] = '127.0.0.1'
|
||||||
ctxt['haproxy_host'] = '0.0.0.0'
|
ctxt['haproxy_host'] = '0.0.0.0'
|
||||||
ctxt['stat_port'] = ':8888'
|
|
||||||
|
ctxt['stat_port'] = '8888'
|
||||||
|
|
||||||
|
db = kv()
|
||||||
|
ctxt['stat_password'] = db.get('stat-password')
|
||||||
|
if not ctxt['stat_password']:
|
||||||
|
ctxt['stat_password'] = db.set('stat-password',
|
||||||
|
pwgen(32))
|
||||||
|
db.flush()
|
||||||
|
|
||||||
for frontend in cluster_hosts:
|
for frontend in cluster_hosts:
|
||||||
if (len(cluster_hosts[frontend]['backends']) > 1 or
|
if (len(cluster_hosts[frontend]['backends']) > 1 or
|
||||||
|
@ -878,19 +950,6 @@ class NeutronContext(OSContextGenerator):
|
||||||
|
|
||||||
return calico_ctxt
|
return calico_ctxt
|
||||||
|
|
||||||
def pg_ctxt(self):
|
|
||||||
driver = neutron_plugin_attribute(self.plugin, 'driver',
|
|
||||||
self.network_manager)
|
|
||||||
config = neutron_plugin_attribute(self.plugin, 'config',
|
|
||||||
self.network_manager)
|
|
||||||
pg_ctxt = {'core_plugin': driver,
|
|
||||||
'neutron_plugin': 'plumgrid',
|
|
||||||
'neutron_security_groups': self.neutron_security_groups,
|
|
||||||
'local_ip': unit_private_ip(),
|
|
||||||
'config': config}
|
|
||||||
|
|
||||||
return pg_ctxt
|
|
||||||
|
|
||||||
def neutron_ctxt(self):
|
def neutron_ctxt(self):
|
||||||
if https():
|
if https():
|
||||||
proto = 'https'
|
proto = 'https'
|
||||||
|
@ -906,6 +965,31 @@ class NeutronContext(OSContextGenerator):
|
||||||
'neutron_url': '%s://%s:%s' % (proto, host, '9696')}
|
'neutron_url': '%s://%s:%s' % (proto, host, '9696')}
|
||||||
return ctxt
|
return ctxt
|
||||||
|
|
||||||
|
def pg_ctxt(self):
|
||||||
|
driver = neutron_plugin_attribute(self.plugin, 'driver',
|
||||||
|
self.network_manager)
|
||||||
|
config = neutron_plugin_attribute(self.plugin, 'config',
|
||||||
|
self.network_manager)
|
||||||
|
ovs_ctxt = {'core_plugin': driver,
|
||||||
|
'neutron_plugin': 'plumgrid',
|
||||||
|
'neutron_security_groups': self.neutron_security_groups,
|
||||||
|
'local_ip': unit_private_ip(),
|
||||||
|
'config': config}
|
||||||
|
return ovs_ctxt
|
||||||
|
|
||||||
|
def midonet_ctxt(self):
|
||||||
|
driver = neutron_plugin_attribute(self.plugin, 'driver',
|
||||||
|
self.network_manager)
|
||||||
|
midonet_config = neutron_plugin_attribute(self.plugin, 'config',
|
||||||
|
self.network_manager)
|
||||||
|
mido_ctxt = {'core_plugin': driver,
|
||||||
|
'neutron_plugin': 'midonet',
|
||||||
|
'neutron_security_groups': self.neutron_security_groups,
|
||||||
|
'local_ip': unit_private_ip(),
|
||||||
|
'config': midonet_config}
|
||||||
|
|
||||||
|
return mido_ctxt
|
||||||
|
|
||||||
def __call__(self):
|
def __call__(self):
|
||||||
if self.network_manager not in ['quantum', 'neutron']:
|
if self.network_manager not in ['quantum', 'neutron']:
|
||||||
return {}
|
return {}
|
||||||
|
@ -927,6 +1011,8 @@ class NeutronContext(OSContextGenerator):
|
||||||
ctxt.update(self.nuage_ctxt())
|
ctxt.update(self.nuage_ctxt())
|
||||||
elif self.plugin == 'plumgrid':
|
elif self.plugin == 'plumgrid':
|
||||||
ctxt.update(self.pg_ctxt())
|
ctxt.update(self.pg_ctxt())
|
||||||
|
elif self.plugin == 'midonet':
|
||||||
|
ctxt.update(self.midonet_ctxt())
|
||||||
|
|
||||||
alchemy_flags = config('neutron-alchemy-flags')
|
alchemy_flags = config('neutron-alchemy-flags')
|
||||||
if alchemy_flags:
|
if alchemy_flags:
|
||||||
|
@ -938,7 +1024,6 @@ class NeutronContext(OSContextGenerator):
|
||||||
|
|
||||||
|
|
||||||
class NeutronPortContext(OSContextGenerator):
|
class NeutronPortContext(OSContextGenerator):
|
||||||
NIC_PREFIXES = ['eth', 'bond']
|
|
||||||
|
|
||||||
def resolve_ports(self, ports):
|
def resolve_ports(self, ports):
|
||||||
"""Resolve NICs not yet bound to bridge(s)
|
"""Resolve NICs not yet bound to bridge(s)
|
||||||
|
@ -950,7 +1035,18 @@ class NeutronPortContext(OSContextGenerator):
|
||||||
|
|
||||||
hwaddr_to_nic = {}
|
hwaddr_to_nic = {}
|
||||||
hwaddr_to_ip = {}
|
hwaddr_to_ip = {}
|
||||||
for nic in list_nics(self.NIC_PREFIXES):
|
for nic in list_nics():
|
||||||
|
# Ignore virtual interfaces (bond masters will be identified from
|
||||||
|
# their slaves)
|
||||||
|
if not is_phy_iface(nic):
|
||||||
|
continue
|
||||||
|
|
||||||
|
_nic = get_bond_master(nic)
|
||||||
|
if _nic:
|
||||||
|
log("Replacing iface '%s' with bond master '%s'" % (nic, _nic),
|
||||||
|
level=DEBUG)
|
||||||
|
nic = _nic
|
||||||
|
|
||||||
hwaddr = get_nic_hwaddr(nic)
|
hwaddr = get_nic_hwaddr(nic)
|
||||||
hwaddr_to_nic[hwaddr] = nic
|
hwaddr_to_nic[hwaddr] = nic
|
||||||
addresses = get_ipv4_addr(nic, fatal=False)
|
addresses = get_ipv4_addr(nic, fatal=False)
|
||||||
|
@ -976,7 +1072,8 @@ class NeutronPortContext(OSContextGenerator):
|
||||||
# trust it to be the real external network).
|
# trust it to be the real external network).
|
||||||
resolved.append(entry)
|
resolved.append(entry)
|
||||||
|
|
||||||
return resolved
|
# Ensure no duplicates
|
||||||
|
return list(set(resolved))
|
||||||
|
|
||||||
|
|
||||||
class OSConfigFlagContext(OSContextGenerator):
|
class OSConfigFlagContext(OSContextGenerator):
|
||||||
|
@ -1016,6 +1113,20 @@ class OSConfigFlagContext(OSContextGenerator):
|
||||||
config_flags_parser(config_flags)}
|
config_flags_parser(config_flags)}
|
||||||
|
|
||||||
|
|
||||||
|
class LibvirtConfigFlagsContext(OSContextGenerator):
|
||||||
|
"""
|
||||||
|
This context provides support for extending
|
||||||
|
the libvirt section through user-defined flags.
|
||||||
|
"""
|
||||||
|
def __call__(self):
|
||||||
|
ctxt = {}
|
||||||
|
libvirt_flags = config('libvirt-flags')
|
||||||
|
if libvirt_flags:
|
||||||
|
ctxt['libvirt_flags'] = config_flags_parser(
|
||||||
|
libvirt_flags)
|
||||||
|
return ctxt
|
||||||
|
|
||||||
|
|
||||||
class SubordinateConfigContext(OSContextGenerator):
|
class SubordinateConfigContext(OSContextGenerator):
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
@ -1048,7 +1159,7 @@ class SubordinateConfigContext(OSContextGenerator):
|
||||||
|
|
||||||
ctxt = {
|
ctxt = {
|
||||||
... other context ...
|
... other context ...
|
||||||
'subordinate_config': {
|
'subordinate_configuration': {
|
||||||
'DEFAULT': {
|
'DEFAULT': {
|
||||||
'key1': 'value1',
|
'key1': 'value1',
|
||||||
},
|
},
|
||||||
|
@ -1066,13 +1177,22 @@ class SubordinateConfigContext(OSContextGenerator):
|
||||||
:param config_file : Service's config file to query sections
|
:param config_file : Service's config file to query sections
|
||||||
:param interface : Subordinate interface to inspect
|
:param interface : Subordinate interface to inspect
|
||||||
"""
|
"""
|
||||||
self.service = service
|
|
||||||
self.config_file = config_file
|
self.config_file = config_file
|
||||||
self.interface = interface
|
if isinstance(service, list):
|
||||||
|
self.services = service
|
||||||
|
else:
|
||||||
|
self.services = [service]
|
||||||
|
if isinstance(interface, list):
|
||||||
|
self.interfaces = interface
|
||||||
|
else:
|
||||||
|
self.interfaces = [interface]
|
||||||
|
|
||||||
def __call__(self):
|
def __call__(self):
|
||||||
ctxt = {'sections': {}}
|
ctxt = {'sections': {}}
|
||||||
for rid in relation_ids(self.interface):
|
rids = []
|
||||||
|
for interface in self.interfaces:
|
||||||
|
rids.extend(relation_ids(interface))
|
||||||
|
for rid in rids:
|
||||||
for unit in related_units(rid):
|
for unit in related_units(rid):
|
||||||
sub_config = relation_get('subordinate_configuration',
|
sub_config = relation_get('subordinate_configuration',
|
||||||
rid=rid, unit=unit)
|
rid=rid, unit=unit)
|
||||||
|
@ -1080,33 +1200,37 @@ class SubordinateConfigContext(OSContextGenerator):
|
||||||
try:
|
try:
|
||||||
sub_config = json.loads(sub_config)
|
sub_config = json.loads(sub_config)
|
||||||
except:
|
except:
|
||||||
log('Could not parse JSON from subordinate_config '
|
log('Could not parse JSON from '
|
||||||
'setting from %s' % rid, level=ERROR)
|
'subordinate_configuration setting from %s'
|
||||||
|
% rid, level=ERROR)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if self.service not in sub_config:
|
for service in self.services:
|
||||||
log('Found subordinate_config on %s but it contained'
|
if service not in sub_config:
|
||||||
'nothing for %s service' % (rid, self.service),
|
log('Found subordinate_configuration on %s but it '
|
||||||
level=INFO)
|
'contained nothing for %s service'
|
||||||
|
% (rid, service), level=INFO)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
sub_config = sub_config[self.service]
|
sub_config = sub_config[service]
|
||||||
if self.config_file not in sub_config:
|
if self.config_file not in sub_config:
|
||||||
log('Found subordinate_config on %s but it contained'
|
log('Found subordinate_configuration on %s but it '
|
||||||
'nothing for %s' % (rid, self.config_file),
|
'contained nothing for %s'
|
||||||
level=INFO)
|
% (rid, self.config_file), level=INFO)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
sub_config = sub_config[self.config_file]
|
sub_config = sub_config[self.config_file]
|
||||||
for k, v in six.iteritems(sub_config):
|
for k, v in six.iteritems(sub_config):
|
||||||
if k == 'sections':
|
if k == 'sections':
|
||||||
for section, config_dict in six.iteritems(v):
|
for section, config_list in six.iteritems(v):
|
||||||
log("adding section '%s'" % (section),
|
log("adding section '%s'" % (section),
|
||||||
level=DEBUG)
|
level=DEBUG)
|
||||||
ctxt[k][section] = config_dict
|
if ctxt[k].get(section):
|
||||||
|
ctxt[k][section].extend(config_list)
|
||||||
|
else:
|
||||||
|
ctxt[k][section] = config_list
|
||||||
else:
|
else:
|
||||||
ctxt[k] = v
|
ctxt[k] = v
|
||||||
|
|
||||||
log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
|
log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
|
||||||
return ctxt
|
return ctxt
|
||||||
|
|
||||||
|
@ -1143,13 +1267,11 @@ class WorkerConfigContext(OSContextGenerator):
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def num_cpus(self):
|
def num_cpus(self):
|
||||||
try:
|
# NOTE: use cpu_count if present (16.04 support)
|
||||||
from psutil import NUM_CPUS
|
if hasattr(psutil, 'cpu_count'):
|
||||||
except ImportError:
|
return psutil.cpu_count()
|
||||||
apt_install('python-psutil', fatal=True)
|
else:
|
||||||
from psutil import NUM_CPUS
|
return psutil.NUM_CPUS
|
||||||
|
|
||||||
return NUM_CPUS
|
|
||||||
|
|
||||||
def __call__(self):
|
def __call__(self):
|
||||||
multiplier = config('worker-multiplier') or 0
|
multiplier = config('worker-multiplier') or 0
|
||||||
|
@ -1283,15 +1405,19 @@ class DataPortContext(NeutronPortContext):
|
||||||
def __call__(self):
|
def __call__(self):
|
||||||
ports = config('data-port')
|
ports = config('data-port')
|
||||||
if ports:
|
if ports:
|
||||||
|
# Map of {port/mac:bridge}
|
||||||
portmap = parse_data_port_mappings(ports)
|
portmap = parse_data_port_mappings(ports)
|
||||||
ports = portmap.values()
|
ports = portmap.keys()
|
||||||
|
# Resolve provided ports or mac addresses and filter out those
|
||||||
|
# already attached to a bridge.
|
||||||
resolved = self.resolve_ports(ports)
|
resolved = self.resolve_ports(ports)
|
||||||
|
# FIXME: is this necessary?
|
||||||
normalized = {get_nic_hwaddr(port): port for port in resolved
|
normalized = {get_nic_hwaddr(port): port for port in resolved
|
||||||
if port not in ports}
|
if port not in ports}
|
||||||
normalized.update({port: port for port in resolved
|
normalized.update({port: port for port in resolved
|
||||||
if port in ports})
|
if port in ports})
|
||||||
if resolved:
|
if resolved:
|
||||||
return {bridge: normalized[port] for bridge, port in
|
return {normalized[port]: bridge for port, bridge in
|
||||||
six.iteritems(portmap) if port in normalized.keys()}
|
six.iteritems(portmap) if port in normalized.keys()}
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
@ -1302,12 +1428,22 @@ class PhyNICMTUContext(DataPortContext):
|
||||||
def __call__(self):
|
def __call__(self):
|
||||||
ctxt = {}
|
ctxt = {}
|
||||||
mappings = super(PhyNICMTUContext, self).__call__()
|
mappings = super(PhyNICMTUContext, self).__call__()
|
||||||
if mappings and mappings.values():
|
if mappings and mappings.keys():
|
||||||
ports = mappings.values()
|
ports = sorted(mappings.keys())
|
||||||
napi_settings = NeutronAPIContext()()
|
napi_settings = NeutronAPIContext()()
|
||||||
mtu = napi_settings.get('network_device_mtu')
|
mtu = napi_settings.get('network_device_mtu')
|
||||||
|
all_ports = set()
|
||||||
|
# If any of ports is a vlan device, its underlying device must have
|
||||||
|
# mtu applied first.
|
||||||
|
for port in ports:
|
||||||
|
for lport in glob.glob("/sys/class/net/%s/lower_*" % port):
|
||||||
|
lport = os.path.basename(lport)
|
||||||
|
all_ports.add(lport.split('_')[1])
|
||||||
|
|
||||||
|
all_ports = list(all_ports)
|
||||||
|
all_ports.extend(ports)
|
||||||
if mtu:
|
if mtu:
|
||||||
ctxt["devs"] = '\\n'.join(ports)
|
ctxt["devs"] = '\\n'.join(all_ports)
|
||||||
ctxt['mtu'] = mtu
|
ctxt['mtu'] = mtu
|
||||||
|
|
||||||
return ctxt
|
return ctxt
|
||||||
|
@ -1338,7 +1474,110 @@ class NetworkServiceContext(OSContextGenerator):
|
||||||
rdata.get('service_protocol') or 'http',
|
rdata.get('service_protocol') or 'http',
|
||||||
'auth_protocol':
|
'auth_protocol':
|
||||||
rdata.get('auth_protocol') or 'http',
|
rdata.get('auth_protocol') or 'http',
|
||||||
|
'api_version':
|
||||||
|
rdata.get('api_version') or '2.0',
|
||||||
}
|
}
|
||||||
if context_complete(ctxt):
|
if self.context_complete(ctxt):
|
||||||
return ctxt
|
return ctxt
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
class InternalEndpointContext(OSContextGenerator):
|
||||||
|
"""Internal endpoint context.
|
||||||
|
|
||||||
|
This context provides the endpoint type used for communication between
|
||||||
|
services e.g. between Nova and Cinder internally. Openstack uses Public
|
||||||
|
endpoints by default so this allows admins to optionally use internal
|
||||||
|
endpoints.
|
||||||
|
"""
|
||||||
|
def __call__(self):
|
||||||
|
return {'use_internal_endpoints': config('use-internal-endpoints')}
|
||||||
|
|
||||||
|
|
||||||
|
class AppArmorContext(OSContextGenerator):
|
||||||
|
"""Base class for apparmor contexts."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self._ctxt = None
|
||||||
|
self.aa_profile = None
|
||||||
|
self.aa_utils_packages = ['apparmor-utils']
|
||||||
|
|
||||||
|
@property
|
||||||
|
def ctxt(self):
|
||||||
|
if self._ctxt is not None:
|
||||||
|
return self._ctxt
|
||||||
|
self._ctxt = self._determine_ctxt()
|
||||||
|
return self._ctxt
|
||||||
|
|
||||||
|
def _determine_ctxt(self):
|
||||||
|
"""
|
||||||
|
Validate aa-profile-mode settings is disable, enforce, or complain.
|
||||||
|
|
||||||
|
:return ctxt: Dictionary of the apparmor profile or None
|
||||||
|
"""
|
||||||
|
if config('aa-profile-mode') in ['disable', 'enforce', 'complain']:
|
||||||
|
ctxt = {'aa-profile-mode': config('aa-profile-mode')}
|
||||||
|
else:
|
||||||
|
ctxt = None
|
||||||
|
return ctxt
|
||||||
|
|
||||||
|
def __call__(self):
|
||||||
|
return self.ctxt
|
||||||
|
|
||||||
|
def install_aa_utils(self):
|
||||||
|
"""
|
||||||
|
Install packages required for apparmor configuration.
|
||||||
|
"""
|
||||||
|
log("Installing apparmor utils.")
|
||||||
|
ensure_packages(self.aa_utils_packages)
|
||||||
|
|
||||||
|
def manually_disable_aa_profile(self):
|
||||||
|
"""
|
||||||
|
Manually disable an apparmor profile.
|
||||||
|
|
||||||
|
If aa-profile-mode is set to disabled (default) this is required as the
|
||||||
|
template has been written but apparmor is yet unaware of the profile
|
||||||
|
and aa-disable aa-profile fails. Without this the profile would kick
|
||||||
|
into enforce mode on the next service restart.
|
||||||
|
|
||||||
|
"""
|
||||||
|
profile_path = '/etc/apparmor.d'
|
||||||
|
disable_path = '/etc/apparmor.d/disable'
|
||||||
|
if not os.path.lexists(os.path.join(disable_path, self.aa_profile)):
|
||||||
|
os.symlink(os.path.join(profile_path, self.aa_profile),
|
||||||
|
os.path.join(disable_path, self.aa_profile))
|
||||||
|
|
||||||
|
def setup_aa_profile(self):
|
||||||
|
"""
|
||||||
|
Setup an apparmor profile.
|
||||||
|
The ctxt dictionary will contain the apparmor profile mode and
|
||||||
|
the apparmor profile name.
|
||||||
|
Makes calls out to aa-disable, aa-complain, or aa-enforce to setup
|
||||||
|
the apparmor profile.
|
||||||
|
"""
|
||||||
|
self()
|
||||||
|
if not self.ctxt:
|
||||||
|
log("Not enabling apparmor Profile")
|
||||||
|
return
|
||||||
|
self.install_aa_utils()
|
||||||
|
cmd = ['aa-{}'.format(self.ctxt['aa-profile-mode'])]
|
||||||
|
cmd.append(self.ctxt['aa-profile'])
|
||||||
|
log("Setting up the apparmor profile for {} in {} mode."
|
||||||
|
"".format(self.ctxt['aa-profile'], self.ctxt['aa-profile-mode']))
|
||||||
|
try:
|
||||||
|
check_call(cmd)
|
||||||
|
except CalledProcessError as e:
|
||||||
|
# If aa-profile-mode is set to disabled (default) manual
|
||||||
|
# disabling is required as the template has been written but
|
||||||
|
# apparmor is yet unaware of the profile and aa-disable aa-profile
|
||||||
|
# fails. If aa-disable learns to read profile files first this can
|
||||||
|
# be removed.
|
||||||
|
if self.ctxt['aa-profile-mode'] == 'disable':
|
||||||
|
log("Manually disabling the apparmor profile for {}."
|
||||||
|
"".format(self.ctxt['aa-profile']))
|
||||||
|
self.manually_disable_aa_profile()
|
||||||
|
return
|
||||||
|
status_set('blocked', "Apparmor profile {} failed to be set to {}."
|
||||||
|
"".format(self.ctxt['aa-profile'],
|
||||||
|
self.ctxt['aa-profile-mode']))
|
||||||
|
raise e
|
||||||
|
|
|
@ -14,16 +14,19 @@
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
from charmhelpers.core.hookenv import (
|
||||||
config,
|
config,
|
||||||
unit_get,
|
unit_get,
|
||||||
service_name,
|
service_name,
|
||||||
|
network_get_primary_address,
|
||||||
)
|
)
|
||||||
from charmhelpers.contrib.network.ip import (
|
from charmhelpers.contrib.network.ip import (
|
||||||
get_address_in_network,
|
get_address_in_network,
|
||||||
is_address_in_network,
|
is_address_in_network,
|
||||||
is_ipv6,
|
is_ipv6,
|
||||||
get_ipv6_addr,
|
get_ipv6_addr,
|
||||||
|
resolve_network_cidr,
|
||||||
)
|
)
|
||||||
from charmhelpers.contrib.hahelpers.cluster import is_clustered
|
from charmhelpers.contrib.hahelpers.cluster import is_clustered
|
||||||
|
|
||||||
|
@ -33,16 +36,19 @@ ADMIN = 'admin'
|
||||||
|
|
||||||
ADDRESS_MAP = {
|
ADDRESS_MAP = {
|
||||||
PUBLIC: {
|
PUBLIC: {
|
||||||
|
'binding': 'public',
|
||||||
'config': 'os-public-network',
|
'config': 'os-public-network',
|
||||||
'fallback': 'public-address',
|
'fallback': 'public-address',
|
||||||
'override': 'os-public-hostname',
|
'override': 'os-public-hostname',
|
||||||
},
|
},
|
||||||
INTERNAL: {
|
INTERNAL: {
|
||||||
|
'binding': 'internal',
|
||||||
'config': 'os-internal-network',
|
'config': 'os-internal-network',
|
||||||
'fallback': 'private-address',
|
'fallback': 'private-address',
|
||||||
'override': 'os-internal-hostname',
|
'override': 'os-internal-hostname',
|
||||||
},
|
},
|
||||||
ADMIN: {
|
ADMIN: {
|
||||||
|
'binding': 'admin',
|
||||||
'config': 'os-admin-network',
|
'config': 'os-admin-network',
|
||||||
'fallback': 'private-address',
|
'fallback': 'private-address',
|
||||||
'override': 'os-admin-hostname',
|
'override': 'os-admin-hostname',
|
||||||
|
@ -110,7 +116,7 @@ def resolve_address(endpoint_type=PUBLIC):
|
||||||
correct network. If clustered with no nets defined, return primary vip.
|
correct network. If clustered with no nets defined, return primary vip.
|
||||||
|
|
||||||
If not clustered, return unit address ensuring address is on configured net
|
If not clustered, return unit address ensuring address is on configured net
|
||||||
split if one is configured.
|
split if one is configured, or a Juju 2.0 extra-binding has been used.
|
||||||
|
|
||||||
:param endpoint_type: Network endpoing type
|
:param endpoint_type: Network endpoing type
|
||||||
"""
|
"""
|
||||||
|
@ -125,23 +131,45 @@ def resolve_address(endpoint_type=PUBLIC):
|
||||||
net_type = ADDRESS_MAP[endpoint_type]['config']
|
net_type = ADDRESS_MAP[endpoint_type]['config']
|
||||||
net_addr = config(net_type)
|
net_addr = config(net_type)
|
||||||
net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
|
net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
|
||||||
|
binding = ADDRESS_MAP[endpoint_type]['binding']
|
||||||
clustered = is_clustered()
|
clustered = is_clustered()
|
||||||
if clustered:
|
|
||||||
if not net_addr:
|
if clustered and vips:
|
||||||
# If no net-splits defined, we expect a single vip
|
if net_addr:
|
||||||
resolved_address = vips[0]
|
|
||||||
else:
|
|
||||||
for vip in vips:
|
for vip in vips:
|
||||||
if is_address_in_network(net_addr, vip):
|
if is_address_in_network(net_addr, vip):
|
||||||
resolved_address = vip
|
resolved_address = vip
|
||||||
break
|
break
|
||||||
|
else:
|
||||||
|
# NOTE: endeavour to check vips against network space
|
||||||
|
# bindings
|
||||||
|
try:
|
||||||
|
bound_cidr = resolve_network_cidr(
|
||||||
|
network_get_primary_address(binding)
|
||||||
|
)
|
||||||
|
for vip in vips:
|
||||||
|
if is_address_in_network(bound_cidr, vip):
|
||||||
|
resolved_address = vip
|
||||||
|
break
|
||||||
|
except NotImplementedError:
|
||||||
|
# If no net-splits configured and no support for extra
|
||||||
|
# bindings/network spaces so we expect a single vip
|
||||||
|
resolved_address = vips[0]
|
||||||
else:
|
else:
|
||||||
if config('prefer-ipv6'):
|
if config('prefer-ipv6'):
|
||||||
fallback_addr = get_ipv6_addr(exc_list=vips)[0]
|
fallback_addr = get_ipv6_addr(exc_list=vips)[0]
|
||||||
else:
|
else:
|
||||||
fallback_addr = unit_get(net_fallback)
|
fallback_addr = unit_get(net_fallback)
|
||||||
|
|
||||||
|
if net_addr:
|
||||||
resolved_address = get_address_in_network(net_addr, fallback_addr)
|
resolved_address = get_address_in_network(net_addr, fallback_addr)
|
||||||
|
else:
|
||||||
|
# NOTE: only try to use extra bindings if legacy network
|
||||||
|
# configuration is not in use
|
||||||
|
try:
|
||||||
|
resolved_address = network_get_primary_address(binding)
|
||||||
|
except NotImplementedError:
|
||||||
|
resolved_address = fallback_addr
|
||||||
|
|
||||||
if resolved_address is None:
|
if resolved_address is None:
|
||||||
raise ValueError("Unable to resolve a suitable IP address based on "
|
raise ValueError("Unable to resolve a suitable IP address based on "
|
||||||
|
|
|
@ -50,7 +50,7 @@ def determine_dkms_package():
|
||||||
if kernel_version() >= (3, 13):
|
if kernel_version() >= (3, 13):
|
||||||
return []
|
return []
|
||||||
else:
|
else:
|
||||||
return ['openvswitch-datapath-dkms']
|
return [headers_package(), 'openvswitch-datapath-dkms']
|
||||||
|
|
||||||
|
|
||||||
# legacy
|
# legacy
|
||||||
|
@ -70,7 +70,7 @@ def quantum_plugins():
|
||||||
relation_prefix='neutron',
|
relation_prefix='neutron',
|
||||||
ssl_dir=QUANTUM_CONF_DIR)],
|
ssl_dir=QUANTUM_CONF_DIR)],
|
||||||
'services': ['quantum-plugin-openvswitch-agent'],
|
'services': ['quantum-plugin-openvswitch-agent'],
|
||||||
'packages': [[headers_package()] + determine_dkms_package(),
|
'packages': [determine_dkms_package(),
|
||||||
['quantum-plugin-openvswitch-agent']],
|
['quantum-plugin-openvswitch-agent']],
|
||||||
'server_packages': ['quantum-server',
|
'server_packages': ['quantum-server',
|
||||||
'quantum-plugin-openvswitch'],
|
'quantum-plugin-openvswitch'],
|
||||||
|
@ -111,7 +111,7 @@ def neutron_plugins():
|
||||||
relation_prefix='neutron',
|
relation_prefix='neutron',
|
||||||
ssl_dir=NEUTRON_CONF_DIR)],
|
ssl_dir=NEUTRON_CONF_DIR)],
|
||||||
'services': ['neutron-plugin-openvswitch-agent'],
|
'services': ['neutron-plugin-openvswitch-agent'],
|
||||||
'packages': [[headers_package()] + determine_dkms_package(),
|
'packages': [determine_dkms_package(),
|
||||||
['neutron-plugin-openvswitch-agent']],
|
['neutron-plugin-openvswitch-agent']],
|
||||||
'server_packages': ['neutron-server',
|
'server_packages': ['neutron-server',
|
||||||
'neutron-plugin-openvswitch'],
|
'neutron-plugin-openvswitch'],
|
||||||
|
@ -155,7 +155,7 @@ def neutron_plugins():
|
||||||
relation_prefix='neutron',
|
relation_prefix='neutron',
|
||||||
ssl_dir=NEUTRON_CONF_DIR)],
|
ssl_dir=NEUTRON_CONF_DIR)],
|
||||||
'services': [],
|
'services': [],
|
||||||
'packages': [[headers_package()] + determine_dkms_package(),
|
'packages': [determine_dkms_package(),
|
||||||
['neutron-plugin-cisco']],
|
['neutron-plugin-cisco']],
|
||||||
'server_packages': ['neutron-server',
|
'server_packages': ['neutron-server',
|
||||||
'neutron-plugin-cisco'],
|
'neutron-plugin-cisco'],
|
||||||
|
@ -174,7 +174,7 @@ def neutron_plugins():
|
||||||
'neutron-dhcp-agent',
|
'neutron-dhcp-agent',
|
||||||
'nova-api-metadata',
|
'nova-api-metadata',
|
||||||
'etcd'],
|
'etcd'],
|
||||||
'packages': [[headers_package()] + determine_dkms_package(),
|
'packages': [determine_dkms_package(),
|
||||||
['calico-compute',
|
['calico-compute',
|
||||||
'bird',
|
'bird',
|
||||||
'neutron-dhcp-agent',
|
'neutron-dhcp-agent',
|
||||||
|
@ -209,6 +209,20 @@ def neutron_plugins():
|
||||||
'server_packages': ['neutron-server',
|
'server_packages': ['neutron-server',
|
||||||
'neutron-plugin-plumgrid'],
|
'neutron-plugin-plumgrid'],
|
||||||
'server_services': ['neutron-server']
|
'server_services': ['neutron-server']
|
||||||
|
},
|
||||||
|
'midonet': {
|
||||||
|
'config': '/etc/neutron/plugins/midonet/midonet.ini',
|
||||||
|
'driver': 'midonet.neutron.plugin.MidonetPluginV2',
|
||||||
|
'contexts': [
|
||||||
|
context.SharedDBContext(user=config('neutron-database-user'),
|
||||||
|
database=config('neutron-database'),
|
||||||
|
relation_prefix='neutron',
|
||||||
|
ssl_dir=NEUTRON_CONF_DIR)],
|
||||||
|
'services': [],
|
||||||
|
'packages': [determine_dkms_package()],
|
||||||
|
'server_packages': ['neutron-server',
|
||||||
|
'python-neutron-plugin-midonet'],
|
||||||
|
'server_services': ['neutron-server']
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if release >= 'icehouse':
|
if release >= 'icehouse':
|
||||||
|
@ -219,6 +233,20 @@ def neutron_plugins():
|
||||||
'neutron-plugin-ml2']
|
'neutron-plugin-ml2']
|
||||||
# NOTE: patch in vmware renames nvp->nsx for icehouse onwards
|
# NOTE: patch in vmware renames nvp->nsx for icehouse onwards
|
||||||
plugins['nvp'] = plugins['nsx']
|
plugins['nvp'] = plugins['nsx']
|
||||||
|
if release >= 'kilo':
|
||||||
|
plugins['midonet']['driver'] = (
|
||||||
|
'neutron.plugins.midonet.plugin.MidonetPluginV2')
|
||||||
|
if release >= 'liberty':
|
||||||
|
plugins['midonet']['driver'] = (
|
||||||
|
'midonet.neutron.plugin_v1.MidonetPluginV2')
|
||||||
|
plugins['midonet']['server_packages'].remove(
|
||||||
|
'python-neutron-plugin-midonet')
|
||||||
|
plugins['midonet']['server_packages'].append(
|
||||||
|
'python-networking-midonet')
|
||||||
|
plugins['plumgrid']['driver'] = (
|
||||||
|
'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2')
|
||||||
|
plugins['plumgrid']['server_packages'].remove(
|
||||||
|
'neutron-plugin-plumgrid')
|
||||||
return plugins
|
return plugins
|
||||||
|
|
||||||
|
|
||||||
|
@ -269,17 +297,30 @@ def network_manager():
|
||||||
return 'neutron'
|
return 'neutron'
|
||||||
|
|
||||||
|
|
||||||
def parse_mappings(mappings):
|
def parse_mappings(mappings, key_rvalue=False):
|
||||||
|
"""By default mappings are lvalue keyed.
|
||||||
|
|
||||||
|
If key_rvalue is True, the mapping will be reversed to allow multiple
|
||||||
|
configs for the same lvalue.
|
||||||
|
"""
|
||||||
parsed = {}
|
parsed = {}
|
||||||
if mappings:
|
if mappings:
|
||||||
mappings = mappings.split()
|
mappings = mappings.split()
|
||||||
for m in mappings:
|
for m in mappings:
|
||||||
p = m.partition(':')
|
p = m.partition(':')
|
||||||
key = p[0].strip()
|
|
||||||
if p[1]:
|
if key_rvalue:
|
||||||
parsed[key] = p[2].strip()
|
key_index = 2
|
||||||
|
val_index = 0
|
||||||
|
# if there is no rvalue skip to next
|
||||||
|
if not p[1]:
|
||||||
|
continue
|
||||||
else:
|
else:
|
||||||
parsed[key] = ''
|
key_index = 0
|
||||||
|
val_index = 2
|
||||||
|
|
||||||
|
key = p[key_index].strip()
|
||||||
|
parsed[key] = p[val_index].strip()
|
||||||
|
|
||||||
return parsed
|
return parsed
|
||||||
|
|
||||||
|
@ -297,25 +338,25 @@ def parse_bridge_mappings(mappings):
|
||||||
def parse_data_port_mappings(mappings, default_bridge='br-data'):
|
def parse_data_port_mappings(mappings, default_bridge='br-data'):
|
||||||
"""Parse data port mappings.
|
"""Parse data port mappings.
|
||||||
|
|
||||||
Mappings must be a space-delimited list of bridge:port mappings.
|
Mappings must be a space-delimited list of bridge:port.
|
||||||
|
|
||||||
Returns dict of the form {bridge:port}.
|
Returns dict of the form {port:bridge} where ports may be mac addresses or
|
||||||
|
interface names.
|
||||||
"""
|
"""
|
||||||
_mappings = parse_mappings(mappings)
|
|
||||||
|
# NOTE(dosaboy): we use rvalue for key to allow multiple values to be
|
||||||
|
# proposed for <port> since it may be a mac address which will differ
|
||||||
|
# across units this allowing first-known-good to be chosen.
|
||||||
|
_mappings = parse_mappings(mappings, key_rvalue=True)
|
||||||
if not _mappings or list(_mappings.values()) == ['']:
|
if not _mappings or list(_mappings.values()) == ['']:
|
||||||
if not mappings:
|
if not mappings:
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
# For backwards-compatibility we need to support port-only provided in
|
# For backwards-compatibility we need to support port-only provided in
|
||||||
# config.
|
# config.
|
||||||
_mappings = {default_bridge: mappings.split()[0]}
|
_mappings = {mappings.split()[0]: default_bridge}
|
||||||
|
|
||||||
bridges = _mappings.keys()
|
|
||||||
ports = _mappings.values()
|
|
||||||
if len(set(bridges)) != len(bridges):
|
|
||||||
raise Exception("It is not allowed to have more than one port "
|
|
||||||
"configured on the same bridge")
|
|
||||||
|
|
||||||
|
ports = _mappings.keys()
|
||||||
if len(set(ports)) != len(ports):
|
if len(set(ports)) != len(ports):
|
||||||
raise Exception("It is not allowed to have the same port configured "
|
raise Exception("It is not allowed to have the same port configured "
|
||||||
"on more than one bridge")
|
"on more than one bridge")
|
||||||
|
|
|
@ -18,7 +18,7 @@ import os
|
||||||
|
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from charmhelpers.fetch import apt_install
|
from charmhelpers.fetch import apt_install, apt_update
|
||||||
from charmhelpers.core.hookenv import (
|
from charmhelpers.core.hookenv import (
|
||||||
log,
|
log,
|
||||||
ERROR,
|
ERROR,
|
||||||
|
@ -29,6 +29,7 @@ from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
|
||||||
try:
|
try:
|
||||||
from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
|
from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
|
||||||
except ImportError:
|
except ImportError:
|
||||||
|
apt_update(fatal=True)
|
||||||
apt_install('python-jinja2', fatal=True)
|
apt_install('python-jinja2', fatal=True)
|
||||||
from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
|
from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
|
||||||
|
|
||||||
|
@ -112,7 +113,7 @@ class OSConfigTemplate(object):
|
||||||
|
|
||||||
def complete_contexts(self):
|
def complete_contexts(self):
|
||||||
'''
|
'''
|
||||||
Return a list of interfaces that have atisfied contexts.
|
Return a list of interfaces that have satisfied contexts.
|
||||||
'''
|
'''
|
||||||
if self._complete_contexts:
|
if self._complete_contexts:
|
||||||
return self._complete_contexts
|
return self._complete_contexts
|
||||||
|
@ -293,3 +294,30 @@ class OSConfigRenderer(object):
|
||||||
[interfaces.extend(i.complete_contexts())
|
[interfaces.extend(i.complete_contexts())
|
||||||
for i in six.itervalues(self.templates)]
|
for i in six.itervalues(self.templates)]
|
||||||
return interfaces
|
return interfaces
|
||||||
|
|
||||||
|
def get_incomplete_context_data(self, interfaces):
|
||||||
|
'''
|
||||||
|
Return dictionary of relation status of interfaces and any missing
|
||||||
|
required context data. Example:
|
||||||
|
{'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
|
||||||
|
'zeromq-configuration': {'related': False}}
|
||||||
|
'''
|
||||||
|
incomplete_context_data = {}
|
||||||
|
|
||||||
|
for i in six.itervalues(self.templates):
|
||||||
|
for context in i.contexts:
|
||||||
|
for interface in interfaces:
|
||||||
|
related = False
|
||||||
|
if interface in context.interfaces:
|
||||||
|
related = context.get_related()
|
||||||
|
missing_data = context.missing_data
|
||||||
|
if missing_data:
|
||||||
|
incomplete_context_data[interface] = {'missing_data': missing_data}
|
||||||
|
if related:
|
||||||
|
if incomplete_context_data.get(interface):
|
||||||
|
incomplete_context_data[interface].update({'related': True})
|
||||||
|
else:
|
||||||
|
incomplete_context_data[interface] = {'related': True}
|
||||||
|
else:
|
||||||
|
incomplete_context_data[interface] = {'related': False}
|
||||||
|
return incomplete_context_data
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,268 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import json
|
|
||||||
import six
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import relation_id as current_relation_id
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
is_relation_made,
|
|
||||||
relation_ids,
|
|
||||||
relation_get as _relation_get,
|
|
||||||
local_unit,
|
|
||||||
relation_set as _relation_set,
|
|
||||||
leader_get as _leader_get,
|
|
||||||
leader_set,
|
|
||||||
is_leader,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
"""
|
|
||||||
This helper provides functions to support use of a peer relation
|
|
||||||
for basic key/value storage, with the added benefit that all storage
|
|
||||||
can be replicated across peer units.
|
|
||||||
|
|
||||||
Requirement to use:
|
|
||||||
|
|
||||||
To use this, the "peer_echo()" method has to be called form the peer
|
|
||||||
relation's relation-changed hook:
|
|
||||||
|
|
||||||
@hooks.hook("cluster-relation-changed") # Adapt the to your peer relation name
|
|
||||||
def cluster_relation_changed():
|
|
||||||
peer_echo()
|
|
||||||
|
|
||||||
Once this is done, you can use peer storage from anywhere:
|
|
||||||
|
|
||||||
@hooks.hook("some-hook")
|
|
||||||
def some_hook():
|
|
||||||
# You can store and retrieve key/values this way:
|
|
||||||
if is_relation_made("cluster"): # from charmhelpers.core.hookenv
|
|
||||||
# There are peers available so we can work with peer storage
|
|
||||||
peer_store("mykey", "myvalue")
|
|
||||||
value = peer_retrieve("mykey")
|
|
||||||
print value
|
|
||||||
else:
|
|
||||||
print "No peers joind the relation, cannot share key/values :("
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
def leader_get(attribute=None):
|
|
||||||
"""Wrapper to ensure that settings are migrated from the peer relation.
|
|
||||||
|
|
||||||
This is to support upgrading an environment that does not support
|
|
||||||
Juju leadership election to one that does.
|
|
||||||
|
|
||||||
If a setting is not extant in the leader-get but is on the relation-get
|
|
||||||
peer rel, it is migrated and marked as such so that it is not re-migrated.
|
|
||||||
"""
|
|
||||||
migration_key = '__leader_get_migrated_settings__'
|
|
||||||
if not is_leader():
|
|
||||||
return _leader_get(attribute=attribute)
|
|
||||||
|
|
||||||
settings_migrated = False
|
|
||||||
leader_settings = _leader_get(attribute=attribute)
|
|
||||||
previously_migrated = _leader_get(attribute=migration_key)
|
|
||||||
|
|
||||||
if previously_migrated:
|
|
||||||
migrated = set(json.loads(previously_migrated))
|
|
||||||
else:
|
|
||||||
migrated = set([])
|
|
||||||
|
|
||||||
try:
|
|
||||||
if migration_key in leader_settings:
|
|
||||||
del leader_settings[migration_key]
|
|
||||||
except TypeError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
if attribute:
|
|
||||||
if attribute in migrated:
|
|
||||||
return leader_settings
|
|
||||||
|
|
||||||
# If attribute not present in leader db, check if this unit has set
|
|
||||||
# the attribute in the peer relation
|
|
||||||
if not leader_settings:
|
|
||||||
peer_setting = relation_get(attribute=attribute, unit=local_unit())
|
|
||||||
if peer_setting:
|
|
||||||
leader_set(settings={attribute: peer_setting})
|
|
||||||
leader_settings = peer_setting
|
|
||||||
|
|
||||||
if leader_settings:
|
|
||||||
settings_migrated = True
|
|
||||||
migrated.add(attribute)
|
|
||||||
else:
|
|
||||||
r_settings = relation_get(unit=local_unit())
|
|
||||||
if r_settings:
|
|
||||||
for key in set(r_settings.keys()).difference(migrated):
|
|
||||||
# Leader setting wins
|
|
||||||
if not leader_settings.get(key):
|
|
||||||
leader_settings[key] = r_settings[key]
|
|
||||||
|
|
||||||
settings_migrated = True
|
|
||||||
migrated.add(key)
|
|
||||||
|
|
||||||
if settings_migrated:
|
|
||||||
leader_set(**leader_settings)
|
|
||||||
|
|
||||||
if migrated and settings_migrated:
|
|
||||||
migrated = json.dumps(list(migrated))
|
|
||||||
leader_set(settings={migration_key: migrated})
|
|
||||||
|
|
||||||
return leader_settings
|
|
||||||
|
|
||||||
|
|
||||||
def relation_set(relation_id=None, relation_settings=None, **kwargs):
|
|
||||||
"""Attempt to use leader-set if supported in the current version of Juju,
|
|
||||||
otherwise falls back on relation-set.
|
|
||||||
|
|
||||||
Note that we only attempt to use leader-set if the provided relation_id is
|
|
||||||
a peer relation id or no relation id is provided (in which case we assume
|
|
||||||
we are within the peer relation context).
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
if relation_id in relation_ids('cluster'):
|
|
||||||
return leader_set(settings=relation_settings, **kwargs)
|
|
||||||
else:
|
|
||||||
raise NotImplementedError
|
|
||||||
except NotImplementedError:
|
|
||||||
return _relation_set(relation_id=relation_id,
|
|
||||||
relation_settings=relation_settings, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
def relation_get(attribute=None, unit=None, rid=None):
|
|
||||||
"""Attempt to use leader-get if supported in the current version of Juju,
|
|
||||||
otherwise falls back on relation-get.
|
|
||||||
|
|
||||||
Note that we only attempt to use leader-get if the provided rid is a peer
|
|
||||||
relation id or no relation id is provided (in which case we assume we are
|
|
||||||
within the peer relation context).
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
if rid in relation_ids('cluster'):
|
|
||||||
return leader_get(attribute)
|
|
||||||
else:
|
|
||||||
raise NotImplementedError
|
|
||||||
except NotImplementedError:
|
|
||||||
return _relation_get(attribute=attribute, rid=rid, unit=unit)
|
|
||||||
|
|
||||||
|
|
||||||
def peer_retrieve(key, relation_name='cluster'):
|
|
||||||
"""Retrieve a named key from peer relation `relation_name`."""
|
|
||||||
cluster_rels = relation_ids(relation_name)
|
|
||||||
if len(cluster_rels) > 0:
|
|
||||||
cluster_rid = cluster_rels[0]
|
|
||||||
return relation_get(attribute=key, rid=cluster_rid,
|
|
||||||
unit=local_unit())
|
|
||||||
else:
|
|
||||||
raise ValueError('Unable to detect'
|
|
||||||
'peer relation {}'.format(relation_name))
|
|
||||||
|
|
||||||
|
|
||||||
def peer_retrieve_by_prefix(prefix, relation_name='cluster', delimiter='_',
|
|
||||||
inc_list=None, exc_list=None):
|
|
||||||
""" Retrieve k/v pairs given a prefix and filter using {inc,exc}_list """
|
|
||||||
inc_list = inc_list if inc_list else []
|
|
||||||
exc_list = exc_list if exc_list else []
|
|
||||||
peerdb_settings = peer_retrieve('-', relation_name=relation_name)
|
|
||||||
matched = {}
|
|
||||||
if peerdb_settings is None:
|
|
||||||
return matched
|
|
||||||
for k, v in peerdb_settings.items():
|
|
||||||
full_prefix = prefix + delimiter
|
|
||||||
if k.startswith(full_prefix):
|
|
||||||
new_key = k.replace(full_prefix, '')
|
|
||||||
if new_key in exc_list:
|
|
||||||
continue
|
|
||||||
if new_key in inc_list or len(inc_list) == 0:
|
|
||||||
matched[new_key] = v
|
|
||||||
return matched
|
|
||||||
|
|
||||||
|
|
||||||
def peer_store(key, value, relation_name='cluster'):
|
|
||||||
"""Store the key/value pair on the named peer relation `relation_name`."""
|
|
||||||
cluster_rels = relation_ids(relation_name)
|
|
||||||
if len(cluster_rels) > 0:
|
|
||||||
cluster_rid = cluster_rels[0]
|
|
||||||
relation_set(relation_id=cluster_rid,
|
|
||||||
relation_settings={key: value})
|
|
||||||
else:
|
|
||||||
raise ValueError('Unable to detect '
|
|
||||||
'peer relation {}'.format(relation_name))
|
|
||||||
|
|
||||||
|
|
||||||
def peer_echo(includes=None, force=False):
|
|
||||||
"""Echo filtered attributes back onto the same relation for storage.
|
|
||||||
|
|
||||||
This is a requirement to use the peerstorage module - it needs to be called
|
|
||||||
from the peer relation's changed hook.
|
|
||||||
|
|
||||||
If Juju leader support exists this will be a noop unless force is True.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
is_leader()
|
|
||||||
except NotImplementedError:
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
if not force:
|
|
||||||
return # NOOP if leader-election is supported
|
|
||||||
|
|
||||||
# Use original non-leader calls
|
|
||||||
relation_get = _relation_get
|
|
||||||
relation_set = _relation_set
|
|
||||||
|
|
||||||
rdata = relation_get()
|
|
||||||
echo_data = {}
|
|
||||||
if includes is None:
|
|
||||||
echo_data = rdata.copy()
|
|
||||||
for ex in ['private-address', 'public-address']:
|
|
||||||
if ex in echo_data:
|
|
||||||
echo_data.pop(ex)
|
|
||||||
else:
|
|
||||||
for attribute, value in six.iteritems(rdata):
|
|
||||||
for include in includes:
|
|
||||||
if include in attribute:
|
|
||||||
echo_data[attribute] = value
|
|
||||||
if len(echo_data) > 0:
|
|
||||||
relation_set(relation_settings=echo_data)
|
|
||||||
|
|
||||||
|
|
||||||
def peer_store_and_set(relation_id=None, peer_relation_name='cluster',
|
|
||||||
peer_store_fatal=False, relation_settings=None,
|
|
||||||
delimiter='_', **kwargs):
|
|
||||||
"""Store passed-in arguments both in argument relation and in peer storage.
|
|
||||||
|
|
||||||
It functions like doing relation_set() and peer_store() at the same time,
|
|
||||||
with the same data.
|
|
||||||
|
|
||||||
@param relation_id: the id of the relation to store the data on. Defaults
|
|
||||||
to the current relation.
|
|
||||||
@param peer_store_fatal: Set to True, the function will raise an exception
|
|
||||||
should the peer sotrage not be avialable."""
|
|
||||||
|
|
||||||
relation_settings = relation_settings if relation_settings else {}
|
|
||||||
relation_set(relation_id=relation_id,
|
|
||||||
relation_settings=relation_settings,
|
|
||||||
**kwargs)
|
|
||||||
if is_relation_made(peer_relation_name):
|
|
||||||
for key, value in six.iteritems(dict(list(kwargs.items()) +
|
|
||||||
list(relation_settings.items()))):
|
|
||||||
key_prefix = relation_id or current_relation_id()
|
|
||||||
peer_store(key_prefix + delimiter + key,
|
|
||||||
value,
|
|
||||||
relation_name=peer_relation_name)
|
|
||||||
else:
|
|
||||||
if peer_store_fatal:
|
|
||||||
raise ValueError('Unable to detect '
|
|
||||||
'peer relation {}'.format(peer_relation_name))
|
|
|
@ -19,18 +19,33 @@
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import subprocess
|
import subprocess
|
||||||
|
import sys
|
||||||
|
|
||||||
from charmhelpers.fetch import apt_install, apt_update
|
from charmhelpers.fetch import apt_install, apt_update
|
||||||
from charmhelpers.core.hookenv import charm_dir, log
|
from charmhelpers.core.hookenv import charm_dir, log
|
||||||
|
|
||||||
try:
|
__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
|
||||||
from pip import main as pip_execute
|
|
||||||
except ImportError:
|
|
||||||
|
def pip_execute(*args, **kwargs):
|
||||||
|
"""Overriden pip_execute() to stop sys.path being changed.
|
||||||
|
|
||||||
|
The act of importing main from the pip module seems to cause add wheels
|
||||||
|
from the /usr/share/python-wheels which are installed by various tools.
|
||||||
|
This function ensures that sys.path remains the same after the call is
|
||||||
|
executed.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
_path = sys.path
|
||||||
|
try:
|
||||||
|
from pip import main as _pip_execute
|
||||||
|
except ImportError:
|
||||||
apt_update()
|
apt_update()
|
||||||
apt_install('python-pip')
|
apt_install('python-pip')
|
||||||
from pip import main as pip_execute
|
from pip import main as _pip_execute
|
||||||
|
_pip_execute(*args, **kwargs)
|
||||||
__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
|
finally:
|
||||||
|
sys.path = _path
|
||||||
|
|
||||||
|
|
||||||
def parse_options(given, available):
|
def parse_options(given, available):
|
||||||
|
@ -42,8 +57,12 @@ def parse_options(given, available):
|
||||||
yield "--{0}={1}".format(key, value)
|
yield "--{0}={1}".format(key, value)
|
||||||
|
|
||||||
|
|
||||||
def pip_install_requirements(requirements, **options):
|
def pip_install_requirements(requirements, constraints=None, **options):
|
||||||
"""Install a requirements file """
|
"""Install a requirements file.
|
||||||
|
|
||||||
|
:param constraints: Path to pip constraints file.
|
||||||
|
http://pip.readthedocs.org/en/stable/user_guide/#constraints-files
|
||||||
|
"""
|
||||||
command = ["install"]
|
command = ["install"]
|
||||||
|
|
||||||
available_options = ('proxy', 'src', 'log', )
|
available_options = ('proxy', 'src', 'log', )
|
||||||
|
@ -51,6 +70,11 @@ def pip_install_requirements(requirements, **options):
|
||||||
command.append(option)
|
command.append(option)
|
||||||
|
|
||||||
command.append("-r {0}".format(requirements))
|
command.append("-r {0}".format(requirements))
|
||||||
|
if constraints:
|
||||||
|
command.append("-c {0}".format(constraints))
|
||||||
|
log("Installing from file: {} with constraints {} "
|
||||||
|
"and options: {}".format(requirements, constraints, command))
|
||||||
|
else:
|
||||||
log("Installing from file: {} with options: {}".format(requirements,
|
log("Installing from file: {} with options: {}".format(requirements,
|
||||||
command))
|
command))
|
||||||
pip_execute(command)
|
pip_execute(command)
|
||||||
|
|
|
@ -1,118 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
"""Charm Helpers saltstack - declare the state of your machines.
|
|
||||||
|
|
||||||
This helper enables you to declare your machine state, rather than
|
|
||||||
program it procedurally (and have to test each change to your procedures).
|
|
||||||
Your install hook can be as simple as::
|
|
||||||
|
|
||||||
{{{
|
|
||||||
from charmhelpers.contrib.saltstack import (
|
|
||||||
install_salt_support,
|
|
||||||
update_machine_state,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def install():
|
|
||||||
install_salt_support()
|
|
||||||
update_machine_state('machine_states/dependencies.yaml')
|
|
||||||
update_machine_state('machine_states/installed.yaml')
|
|
||||||
}}}
|
|
||||||
|
|
||||||
and won't need to change (nor will its tests) when you change the machine
|
|
||||||
state.
|
|
||||||
|
|
||||||
It's using a python package called salt-minion which allows various formats for
|
|
||||||
specifying resources, such as::
|
|
||||||
|
|
||||||
{{{
|
|
||||||
/srv/{{ basedir }}:
|
|
||||||
file.directory:
|
|
||||||
- group: ubunet
|
|
||||||
- user: ubunet
|
|
||||||
- require:
|
|
||||||
- user: ubunet
|
|
||||||
- recurse:
|
|
||||||
- user
|
|
||||||
- group
|
|
||||||
|
|
||||||
ubunet:
|
|
||||||
group.present:
|
|
||||||
- gid: 1500
|
|
||||||
user.present:
|
|
||||||
- uid: 1500
|
|
||||||
- gid: 1500
|
|
||||||
- createhome: False
|
|
||||||
- require:
|
|
||||||
- group: ubunet
|
|
||||||
}}}
|
|
||||||
|
|
||||||
The docs for all the different state definitions are at:
|
|
||||||
http://docs.saltstack.com/ref/states/all/
|
|
||||||
|
|
||||||
|
|
||||||
TODO:
|
|
||||||
* Add test helpers which will ensure that machine state definitions
|
|
||||||
are functionally (but not necessarily logically) correct (ie. getting
|
|
||||||
salt to parse all state defs.
|
|
||||||
* Add a link to a public bootstrap charm example / blogpost.
|
|
||||||
* Find a way to obviate the need to use the grains['charm_dir'] syntax
|
|
||||||
in templates.
|
|
||||||
"""
|
|
||||||
# Copyright 2013 Canonical Ltd.
|
|
||||||
#
|
|
||||||
# Authors:
|
|
||||||
# Charm Helpers Developers <juju@lists.ubuntu.com>
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
import charmhelpers.contrib.templating.contexts
|
|
||||||
import charmhelpers.core.host
|
|
||||||
import charmhelpers.core.hookenv
|
|
||||||
|
|
||||||
|
|
||||||
salt_grains_path = '/etc/salt/grains'
|
|
||||||
|
|
||||||
|
|
||||||
def install_salt_support(from_ppa=True):
|
|
||||||
"""Installs the salt-minion helper for machine state.
|
|
||||||
|
|
||||||
By default the salt-minion package is installed from
|
|
||||||
the saltstack PPA. If from_ppa is False you must ensure
|
|
||||||
that the salt-minion package is available in the apt cache.
|
|
||||||
"""
|
|
||||||
if from_ppa:
|
|
||||||
subprocess.check_call([
|
|
||||||
'/usr/bin/add-apt-repository',
|
|
||||||
'--yes',
|
|
||||||
'ppa:saltstack/salt',
|
|
||||||
])
|
|
||||||
subprocess.check_call(['/usr/bin/apt-get', 'update'])
|
|
||||||
# We install salt-common as salt-minion would run the salt-minion
|
|
||||||
# daemon.
|
|
||||||
charmhelpers.fetch.apt_install('salt-common')
|
|
||||||
|
|
||||||
|
|
||||||
def update_machine_state(state_path):
|
|
||||||
"""Update the machine state using the provided state declaration."""
|
|
||||||
charmhelpers.contrib.templating.contexts.juju_state_to_yaml(
|
|
||||||
salt_grains_path)
|
|
||||||
subprocess.check_call([
|
|
||||||
'salt-call',
|
|
||||||
'--local',
|
|
||||||
'state.template',
|
|
||||||
state_path,
|
|
||||||
])
|
|
|
@ -1,94 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import subprocess
|
|
||||||
from charmhelpers.core import hookenv
|
|
||||||
|
|
||||||
|
|
||||||
def generate_selfsigned(keyfile, certfile, keysize="1024", config=None, subject=None, cn=None):
|
|
||||||
"""Generate selfsigned SSL keypair
|
|
||||||
|
|
||||||
You must provide one of the 3 optional arguments:
|
|
||||||
config, subject or cn
|
|
||||||
If more than one is provided the leftmost will be used
|
|
||||||
|
|
||||||
Arguments:
|
|
||||||
keyfile -- (required) full path to the keyfile to be created
|
|
||||||
certfile -- (required) full path to the certfile to be created
|
|
||||||
keysize -- (optional) SSL key length
|
|
||||||
config -- (optional) openssl configuration file
|
|
||||||
subject -- (optional) dictionary with SSL subject variables
|
|
||||||
cn -- (optional) cerfificate common name
|
|
||||||
|
|
||||||
Required keys in subject dict:
|
|
||||||
cn -- Common name (eq. FQDN)
|
|
||||||
|
|
||||||
Optional keys in subject dict
|
|
||||||
country -- Country Name (2 letter code)
|
|
||||||
state -- State or Province Name (full name)
|
|
||||||
locality -- Locality Name (eg, city)
|
|
||||||
organization -- Organization Name (eg, company)
|
|
||||||
organizational_unit -- Organizational Unit Name (eg, section)
|
|
||||||
email -- Email Address
|
|
||||||
"""
|
|
||||||
|
|
||||||
cmd = []
|
|
||||||
if config:
|
|
||||||
cmd = ["/usr/bin/openssl", "req", "-new", "-newkey",
|
|
||||||
"rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509",
|
|
||||||
"-keyout", keyfile,
|
|
||||||
"-out", certfile, "-config", config]
|
|
||||||
elif subject:
|
|
||||||
ssl_subject = ""
|
|
||||||
if "country" in subject:
|
|
||||||
ssl_subject = ssl_subject + "/C={}".format(subject["country"])
|
|
||||||
if "state" in subject:
|
|
||||||
ssl_subject = ssl_subject + "/ST={}".format(subject["state"])
|
|
||||||
if "locality" in subject:
|
|
||||||
ssl_subject = ssl_subject + "/L={}".format(subject["locality"])
|
|
||||||
if "organization" in subject:
|
|
||||||
ssl_subject = ssl_subject + "/O={}".format(subject["organization"])
|
|
||||||
if "organizational_unit" in subject:
|
|
||||||
ssl_subject = ssl_subject + "/OU={}".format(subject["organizational_unit"])
|
|
||||||
if "cn" in subject:
|
|
||||||
ssl_subject = ssl_subject + "/CN={}".format(subject["cn"])
|
|
||||||
else:
|
|
||||||
hookenv.log("When using \"subject\" argument you must "
|
|
||||||
"provide \"cn\" field at very least")
|
|
||||||
return False
|
|
||||||
if "email" in subject:
|
|
||||||
ssl_subject = ssl_subject + "/emailAddress={}".format(subject["email"])
|
|
||||||
|
|
||||||
cmd = ["/usr/bin/openssl", "req", "-new", "-newkey",
|
|
||||||
"rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509",
|
|
||||||
"-keyout", keyfile,
|
|
||||||
"-out", certfile, "-subj", ssl_subject]
|
|
||||||
elif cn:
|
|
||||||
cmd = ["/usr/bin/openssl", "req", "-new", "-newkey",
|
|
||||||
"rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509",
|
|
||||||
"-keyout", keyfile,
|
|
||||||
"-out", certfile, "-subj", "/CN={}".format(cn)]
|
|
||||||
|
|
||||||
if not cmd:
|
|
||||||
hookenv.log("No config, subject or cn provided,"
|
|
||||||
"unable to generate self signed SSL certificates")
|
|
||||||
return False
|
|
||||||
try:
|
|
||||||
subprocess.check_call(cmd)
|
|
||||||
return True
|
|
||||||
except Exception as e:
|
|
||||||
print("Execution of openssl command failed:\n{}".format(e))
|
|
||||||
return False
|
|
|
@ -1,279 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import os
|
|
||||||
from os.path import join as path_join
|
|
||||||
from os.path import exists
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import log, DEBUG
|
|
||||||
|
|
||||||
STD_CERT = "standard"
|
|
||||||
|
|
||||||
# Mysql server is fairly picky about cert creation
|
|
||||||
# and types, spec its creation separately for now.
|
|
||||||
MYSQL_CERT = "mysql"
|
|
||||||
|
|
||||||
|
|
||||||
class ServiceCA(object):
|
|
||||||
|
|
||||||
default_expiry = str(365 * 2)
|
|
||||||
default_ca_expiry = str(365 * 6)
|
|
||||||
|
|
||||||
def __init__(self, name, ca_dir, cert_type=STD_CERT):
|
|
||||||
self.name = name
|
|
||||||
self.ca_dir = ca_dir
|
|
||||||
self.cert_type = cert_type
|
|
||||||
|
|
||||||
###############
|
|
||||||
# Hook Helper API
|
|
||||||
@staticmethod
|
|
||||||
def get_ca(type=STD_CERT):
|
|
||||||
service_name = os.environ['JUJU_UNIT_NAME'].split('/')[0]
|
|
||||||
ca_path = os.path.join(os.environ['CHARM_DIR'], 'ca')
|
|
||||||
ca = ServiceCA(service_name, ca_path, type)
|
|
||||||
ca.init()
|
|
||||||
return ca
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_service_cert(cls, type=STD_CERT):
|
|
||||||
service_name = os.environ['JUJU_UNIT_NAME'].split('/')[0]
|
|
||||||
ca = cls.get_ca()
|
|
||||||
crt, key = ca.get_or_create_cert(service_name)
|
|
||||||
return crt, key, ca.get_ca_bundle()
|
|
||||||
|
|
||||||
###############
|
|
||||||
|
|
||||||
def init(self):
|
|
||||||
log("initializing service ca", level=DEBUG)
|
|
||||||
if not exists(self.ca_dir):
|
|
||||||
self._init_ca_dir(self.ca_dir)
|
|
||||||
self._init_ca()
|
|
||||||
|
|
||||||
@property
|
|
||||||
def ca_key(self):
|
|
||||||
return path_join(self.ca_dir, 'private', 'cacert.key')
|
|
||||||
|
|
||||||
@property
|
|
||||||
def ca_cert(self):
|
|
||||||
return path_join(self.ca_dir, 'cacert.pem')
|
|
||||||
|
|
||||||
@property
|
|
||||||
def ca_conf(self):
|
|
||||||
return path_join(self.ca_dir, 'ca.cnf')
|
|
||||||
|
|
||||||
@property
|
|
||||||
def signing_conf(self):
|
|
||||||
return path_join(self.ca_dir, 'signing.cnf')
|
|
||||||
|
|
||||||
def _init_ca_dir(self, ca_dir):
|
|
||||||
os.mkdir(ca_dir)
|
|
||||||
for i in ['certs', 'crl', 'newcerts', 'private']:
|
|
||||||
sd = path_join(ca_dir, i)
|
|
||||||
if not exists(sd):
|
|
||||||
os.mkdir(sd)
|
|
||||||
|
|
||||||
if not exists(path_join(ca_dir, 'serial')):
|
|
||||||
with open(path_join(ca_dir, 'serial'), 'w') as fh:
|
|
||||||
fh.write('02\n')
|
|
||||||
|
|
||||||
if not exists(path_join(ca_dir, 'index.txt')):
|
|
||||||
with open(path_join(ca_dir, 'index.txt'), 'w') as fh:
|
|
||||||
fh.write('')
|
|
||||||
|
|
||||||
def _init_ca(self):
|
|
||||||
"""Generate the root ca's cert and key.
|
|
||||||
"""
|
|
||||||
if not exists(path_join(self.ca_dir, 'ca.cnf')):
|
|
||||||
with open(path_join(self.ca_dir, 'ca.cnf'), 'w') as fh:
|
|
||||||
fh.write(
|
|
||||||
CA_CONF_TEMPLATE % (self.get_conf_variables()))
|
|
||||||
|
|
||||||
if not exists(path_join(self.ca_dir, 'signing.cnf')):
|
|
||||||
with open(path_join(self.ca_dir, 'signing.cnf'), 'w') as fh:
|
|
||||||
fh.write(
|
|
||||||
SIGNING_CONF_TEMPLATE % (self.get_conf_variables()))
|
|
||||||
|
|
||||||
if exists(self.ca_cert) or exists(self.ca_key):
|
|
||||||
raise RuntimeError("Initialized called when CA already exists")
|
|
||||||
cmd = ['openssl', 'req', '-config', self.ca_conf,
|
|
||||||
'-x509', '-nodes', '-newkey', 'rsa',
|
|
||||||
'-days', self.default_ca_expiry,
|
|
||||||
'-keyout', self.ca_key, '-out', self.ca_cert,
|
|
||||||
'-outform', 'PEM']
|
|
||||||
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
|
||||||
log("CA Init:\n %s" % output, level=DEBUG)
|
|
||||||
|
|
||||||
def get_conf_variables(self):
|
|
||||||
return dict(
|
|
||||||
org_name="juju",
|
|
||||||
org_unit_name="%s service" % self.name,
|
|
||||||
common_name=self.name,
|
|
||||||
ca_dir=self.ca_dir)
|
|
||||||
|
|
||||||
def get_or_create_cert(self, common_name):
|
|
||||||
if common_name in self:
|
|
||||||
return self.get_certificate(common_name)
|
|
||||||
return self.create_certificate(common_name)
|
|
||||||
|
|
||||||
def create_certificate(self, common_name):
|
|
||||||
if common_name in self:
|
|
||||||
return self.get_certificate(common_name)
|
|
||||||
key_p = path_join(self.ca_dir, "certs", "%s.key" % common_name)
|
|
||||||
crt_p = path_join(self.ca_dir, "certs", "%s.crt" % common_name)
|
|
||||||
csr_p = path_join(self.ca_dir, "certs", "%s.csr" % common_name)
|
|
||||||
self._create_certificate(common_name, key_p, csr_p, crt_p)
|
|
||||||
return self.get_certificate(common_name)
|
|
||||||
|
|
||||||
def get_certificate(self, common_name):
|
|
||||||
if common_name not in self:
|
|
||||||
raise ValueError("No certificate for %s" % common_name)
|
|
||||||
key_p = path_join(self.ca_dir, "certs", "%s.key" % common_name)
|
|
||||||
crt_p = path_join(self.ca_dir, "certs", "%s.crt" % common_name)
|
|
||||||
with open(crt_p) as fh:
|
|
||||||
crt = fh.read()
|
|
||||||
with open(key_p) as fh:
|
|
||||||
key = fh.read()
|
|
||||||
return crt, key
|
|
||||||
|
|
||||||
def __contains__(self, common_name):
|
|
||||||
crt_p = path_join(self.ca_dir, "certs", "%s.crt" % common_name)
|
|
||||||
return exists(crt_p)
|
|
||||||
|
|
||||||
def _create_certificate(self, common_name, key_p, csr_p, crt_p):
|
|
||||||
template_vars = self.get_conf_variables()
|
|
||||||
template_vars['common_name'] = common_name
|
|
||||||
subj = '/O=%(org_name)s/OU=%(org_unit_name)s/CN=%(common_name)s' % (
|
|
||||||
template_vars)
|
|
||||||
|
|
||||||
log("CA Create Cert %s" % common_name, level=DEBUG)
|
|
||||||
cmd = ['openssl', 'req', '-sha1', '-newkey', 'rsa:2048',
|
|
||||||
'-nodes', '-days', self.default_expiry,
|
|
||||||
'-keyout', key_p, '-out', csr_p, '-subj', subj]
|
|
||||||
subprocess.check_call(cmd, stderr=subprocess.PIPE)
|
|
||||||
cmd = ['openssl', 'rsa', '-in', key_p, '-out', key_p]
|
|
||||||
subprocess.check_call(cmd, stderr=subprocess.PIPE)
|
|
||||||
|
|
||||||
log("CA Sign Cert %s" % common_name, level=DEBUG)
|
|
||||||
if self.cert_type == MYSQL_CERT:
|
|
||||||
cmd = ['openssl', 'x509', '-req',
|
|
||||||
'-in', csr_p, '-days', self.default_expiry,
|
|
||||||
'-CA', self.ca_cert, '-CAkey', self.ca_key,
|
|
||||||
'-set_serial', '01', '-out', crt_p]
|
|
||||||
else:
|
|
||||||
cmd = ['openssl', 'ca', '-config', self.signing_conf,
|
|
||||||
'-extensions', 'req_extensions',
|
|
||||||
'-days', self.default_expiry, '-notext',
|
|
||||||
'-in', csr_p, '-out', crt_p, '-subj', subj, '-batch']
|
|
||||||
log("running %s" % " ".join(cmd), level=DEBUG)
|
|
||||||
subprocess.check_call(cmd, stderr=subprocess.PIPE)
|
|
||||||
|
|
||||||
def get_ca_bundle(self):
|
|
||||||
with open(self.ca_cert) as fh:
|
|
||||||
return fh.read()
|
|
||||||
|
|
||||||
|
|
||||||
CA_CONF_TEMPLATE = """
|
|
||||||
[ ca ]
|
|
||||||
default_ca = CA_default
|
|
||||||
|
|
||||||
[ CA_default ]
|
|
||||||
dir = %(ca_dir)s
|
|
||||||
policy = policy_match
|
|
||||||
database = $dir/index.txt
|
|
||||||
serial = $dir/serial
|
|
||||||
certs = $dir/certs
|
|
||||||
crl_dir = $dir/crl
|
|
||||||
new_certs_dir = $dir/newcerts
|
|
||||||
certificate = $dir/cacert.pem
|
|
||||||
private_key = $dir/private/cacert.key
|
|
||||||
RANDFILE = $dir/private/.rand
|
|
||||||
default_md = default
|
|
||||||
|
|
||||||
[ req ]
|
|
||||||
default_bits = 1024
|
|
||||||
default_md = sha1
|
|
||||||
|
|
||||||
prompt = no
|
|
||||||
distinguished_name = ca_distinguished_name
|
|
||||||
|
|
||||||
x509_extensions = ca_extensions
|
|
||||||
|
|
||||||
[ ca_distinguished_name ]
|
|
||||||
organizationName = %(org_name)s
|
|
||||||
organizationalUnitName = %(org_unit_name)s Certificate Authority
|
|
||||||
|
|
||||||
|
|
||||||
[ policy_match ]
|
|
||||||
countryName = optional
|
|
||||||
stateOrProvinceName = optional
|
|
||||||
organizationName = match
|
|
||||||
organizationalUnitName = optional
|
|
||||||
commonName = supplied
|
|
||||||
|
|
||||||
[ ca_extensions ]
|
|
||||||
basicConstraints = critical,CA:true
|
|
||||||
subjectKeyIdentifier = hash
|
|
||||||
authorityKeyIdentifier = keyid:always, issuer
|
|
||||||
keyUsage = cRLSign, keyCertSign
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
SIGNING_CONF_TEMPLATE = """
|
|
||||||
[ ca ]
|
|
||||||
default_ca = CA_default
|
|
||||||
|
|
||||||
[ CA_default ]
|
|
||||||
dir = %(ca_dir)s
|
|
||||||
policy = policy_match
|
|
||||||
database = $dir/index.txt
|
|
||||||
serial = $dir/serial
|
|
||||||
certs = $dir/certs
|
|
||||||
crl_dir = $dir/crl
|
|
||||||
new_certs_dir = $dir/newcerts
|
|
||||||
certificate = $dir/cacert.pem
|
|
||||||
private_key = $dir/private/cacert.key
|
|
||||||
RANDFILE = $dir/private/.rand
|
|
||||||
default_md = default
|
|
||||||
|
|
||||||
[ req ]
|
|
||||||
default_bits = 1024
|
|
||||||
default_md = sha1
|
|
||||||
|
|
||||||
prompt = no
|
|
||||||
distinguished_name = req_distinguished_name
|
|
||||||
|
|
||||||
x509_extensions = req_extensions
|
|
||||||
|
|
||||||
[ req_distinguished_name ]
|
|
||||||
organizationName = %(org_name)s
|
|
||||||
organizationalUnitName = %(org_unit_name)s machine resources
|
|
||||||
commonName = %(common_name)s
|
|
||||||
|
|
||||||
[ policy_match ]
|
|
||||||
countryName = optional
|
|
||||||
stateOrProvinceName = optional
|
|
||||||
organizationName = match
|
|
||||||
organizationalUnitName = optional
|
|
||||||
commonName = supplied
|
|
||||||
|
|
||||||
[ req_extensions ]
|
|
||||||
basicConstraints = CA:false
|
|
||||||
subjectKeyIdentifier = hash
|
|
||||||
authorityKeyIdentifier = keyid:always, issuer
|
|
||||||
keyUsage = digitalSignature, keyEncipherment, keyAgreement
|
|
||||||
extendedKeyUsage = serverAuth, clientAuth
|
|
||||||
"""
|
|
|
@ -23,11 +23,16 @@
|
||||||
# James Page <james.page@ubuntu.com>
|
# James Page <james.page@ubuntu.com>
|
||||||
# Adam Gandelman <adamg@ubuntu.com>
|
# Adam Gandelman <adamg@ubuntu.com>
|
||||||
#
|
#
|
||||||
|
import bisect
|
||||||
|
import errno
|
||||||
|
import hashlib
|
||||||
|
import six
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
import json
|
import json
|
||||||
import time
|
import time
|
||||||
|
import uuid
|
||||||
|
|
||||||
from subprocess import (
|
from subprocess import (
|
||||||
check_call,
|
check_call,
|
||||||
|
@ -35,8 +40,10 @@ from subprocess import (
|
||||||
CalledProcessError,
|
CalledProcessError,
|
||||||
)
|
)
|
||||||
from charmhelpers.core.hookenv import (
|
from charmhelpers.core.hookenv import (
|
||||||
|
local_unit,
|
||||||
relation_get,
|
relation_get,
|
||||||
relation_ids,
|
relation_ids,
|
||||||
|
relation_set,
|
||||||
related_units,
|
related_units,
|
||||||
log,
|
log,
|
||||||
DEBUG,
|
DEBUG,
|
||||||
|
@ -56,6 +63,8 @@ from charmhelpers.fetch import (
|
||||||
apt_install,
|
apt_install,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
from charmhelpers.core.kernel import modprobe
|
||||||
|
|
||||||
KEYRING = '/etc/ceph/ceph.client.{}.keyring'
|
KEYRING = '/etc/ceph/ceph.client.{}.keyring'
|
||||||
KEYFILE = '/etc/ceph/ceph.client.{}.key'
|
KEYFILE = '/etc/ceph/ceph.client.{}.key'
|
||||||
|
|
||||||
|
@ -67,6 +76,559 @@ log to syslog = {use_syslog}
|
||||||
err to syslog = {use_syslog}
|
err to syslog = {use_syslog}
|
||||||
clog to syslog = {use_syslog}
|
clog to syslog = {use_syslog}
|
||||||
"""
|
"""
|
||||||
|
# For 50 < osds < 240,000 OSDs (Roughly 1 Exabyte at 6T OSDs)
|
||||||
|
powers_of_two = [8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608]
|
||||||
|
|
||||||
|
|
||||||
|
def validator(value, valid_type, valid_range=None):
|
||||||
|
"""
|
||||||
|
Used to validate these: http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values
|
||||||
|
Example input:
|
||||||
|
validator(value=1,
|
||||||
|
valid_type=int,
|
||||||
|
valid_range=[0, 2])
|
||||||
|
This says I'm testing value=1. It must be an int inclusive in [0,2]
|
||||||
|
|
||||||
|
:param value: The value to validate
|
||||||
|
:param valid_type: The type that value should be.
|
||||||
|
:param valid_range: A range of values that value can assume.
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
|
assert isinstance(value, valid_type), "{} is not a {}".format(
|
||||||
|
value,
|
||||||
|
valid_type)
|
||||||
|
if valid_range is not None:
|
||||||
|
assert isinstance(valid_range, list), \
|
||||||
|
"valid_range must be a list, was given {}".format(valid_range)
|
||||||
|
# If we're dealing with strings
|
||||||
|
if valid_type is six.string_types:
|
||||||
|
assert value in valid_range, \
|
||||||
|
"{} is not in the list {}".format(value, valid_range)
|
||||||
|
# Integer, float should have a min and max
|
||||||
|
else:
|
||||||
|
if len(valid_range) != 2:
|
||||||
|
raise ValueError(
|
||||||
|
"Invalid valid_range list of {} for {}. "
|
||||||
|
"List must be [min,max]".format(valid_range, value))
|
||||||
|
assert value >= valid_range[0], \
|
||||||
|
"{} is less than minimum allowed value of {}".format(
|
||||||
|
value, valid_range[0])
|
||||||
|
assert value <= valid_range[1], \
|
||||||
|
"{} is greater than maximum allowed value of {}".format(
|
||||||
|
value, valid_range[1])
|
||||||
|
|
||||||
|
|
||||||
|
class PoolCreationError(Exception):
|
||||||
|
"""
|
||||||
|
A custom error to inform the caller that a pool creation failed. Provides an error message
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, message):
|
||||||
|
super(PoolCreationError, self).__init__(message)
|
||||||
|
|
||||||
|
|
||||||
|
class Pool(object):
|
||||||
|
"""
|
||||||
|
An object oriented approach to Ceph pool creation. This base class is inherited by ReplicatedPool and ErasurePool.
|
||||||
|
Do not call create() on this base class as it will not do anything. Instantiate a child class and call create().
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, service, name):
|
||||||
|
self.service = service
|
||||||
|
self.name = name
|
||||||
|
|
||||||
|
# Create the pool if it doesn't exist already
|
||||||
|
# To be implemented by subclasses
|
||||||
|
def create(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def add_cache_tier(self, cache_pool, mode):
|
||||||
|
"""
|
||||||
|
Adds a new cache tier to an existing pool.
|
||||||
|
:param cache_pool: six.string_types. The cache tier pool name to add.
|
||||||
|
:param mode: six.string_types. The caching mode to use for this pool. valid range = ["readonly", "writeback"]
|
||||||
|
:return: None
|
||||||
|
"""
|
||||||
|
# Check the input types and values
|
||||||
|
validator(value=cache_pool, valid_type=six.string_types)
|
||||||
|
validator(value=mode, valid_type=six.string_types, valid_range=["readonly", "writeback"])
|
||||||
|
|
||||||
|
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'add', self.name, cache_pool])
|
||||||
|
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, mode])
|
||||||
|
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'set-overlay', self.name, cache_pool])
|
||||||
|
check_call(['ceph', '--id', self.service, 'osd', 'pool', 'set', cache_pool, 'hit_set_type', 'bloom'])
|
||||||
|
|
||||||
|
def remove_cache_tier(self, cache_pool):
|
||||||
|
"""
|
||||||
|
Removes a cache tier from Ceph. Flushes all dirty objects from writeback pools and waits for that to complete.
|
||||||
|
:param cache_pool: six.string_types. The cache tier pool name to remove.
|
||||||
|
:return: None
|
||||||
|
"""
|
||||||
|
# read-only is easy, writeback is much harder
|
||||||
|
mode = get_cache_mode(self.service, cache_pool)
|
||||||
|
version = ceph_version()
|
||||||
|
if mode == 'readonly':
|
||||||
|
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none'])
|
||||||
|
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
|
||||||
|
|
||||||
|
elif mode == 'writeback':
|
||||||
|
pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier',
|
||||||
|
'cache-mode', cache_pool, 'forward']
|
||||||
|
if version >= '10.1':
|
||||||
|
# Jewel added a mandatory flag
|
||||||
|
pool_forward_cmd.append('--yes-i-really-mean-it')
|
||||||
|
|
||||||
|
check_call(pool_forward_cmd)
|
||||||
|
# Flush the cache and wait for it to return
|
||||||
|
check_call(['rados', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all'])
|
||||||
|
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name])
|
||||||
|
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
|
||||||
|
|
||||||
|
def get_pgs(self, pool_size):
|
||||||
|
"""
|
||||||
|
:param pool_size: int. pool_size is either the number of replicas for replicated pools or the K+M sum for
|
||||||
|
erasure coded pools
|
||||||
|
:return: int. The number of pgs to use.
|
||||||
|
"""
|
||||||
|
validator(value=pool_size, valid_type=int)
|
||||||
|
osd_list = get_osds(self.service)
|
||||||
|
if not osd_list:
|
||||||
|
# NOTE(james-page): Default to 200 for older ceph versions
|
||||||
|
# which don't support OSD query from cli
|
||||||
|
return 200
|
||||||
|
|
||||||
|
osd_list_length = len(osd_list)
|
||||||
|
# Calculate based on Ceph best practices
|
||||||
|
if osd_list_length < 5:
|
||||||
|
return 128
|
||||||
|
elif 5 < osd_list_length < 10:
|
||||||
|
return 512
|
||||||
|
elif 10 < osd_list_length < 50:
|
||||||
|
return 4096
|
||||||
|
else:
|
||||||
|
estimate = (osd_list_length * 100) / pool_size
|
||||||
|
# Return the next nearest power of 2
|
||||||
|
index = bisect.bisect_right(powers_of_two, estimate)
|
||||||
|
return powers_of_two[index]
|
||||||
|
|
||||||
|
|
||||||
|
class ReplicatedPool(Pool):
|
||||||
|
def __init__(self, service, name, pg_num=None, replicas=2):
|
||||||
|
super(ReplicatedPool, self).__init__(service=service, name=name)
|
||||||
|
self.replicas = replicas
|
||||||
|
if pg_num is None:
|
||||||
|
self.pg_num = self.get_pgs(self.replicas)
|
||||||
|
else:
|
||||||
|
self.pg_num = pg_num
|
||||||
|
|
||||||
|
def create(self):
|
||||||
|
if not pool_exists(self.service, self.name):
|
||||||
|
# Create it
|
||||||
|
cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create',
|
||||||
|
self.name, str(self.pg_num)]
|
||||||
|
try:
|
||||||
|
check_call(cmd)
|
||||||
|
# Set the pool replica size
|
||||||
|
update_pool(client=self.service,
|
||||||
|
pool=self.name,
|
||||||
|
settings={'size': str(self.replicas)})
|
||||||
|
except CalledProcessError:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
# Default jerasure erasure coded pool
|
||||||
|
class ErasurePool(Pool):
|
||||||
|
def __init__(self, service, name, erasure_code_profile="default"):
|
||||||
|
super(ErasurePool, self).__init__(service=service, name=name)
|
||||||
|
self.erasure_code_profile = erasure_code_profile
|
||||||
|
|
||||||
|
def create(self):
|
||||||
|
if not pool_exists(self.service, self.name):
|
||||||
|
# Try to find the erasure profile information so we can properly size the pgs
|
||||||
|
erasure_profile = get_erasure_profile(service=self.service, name=self.erasure_code_profile)
|
||||||
|
|
||||||
|
# Check for errors
|
||||||
|
if erasure_profile is None:
|
||||||
|
log(message='Failed to discover erasure_profile named={}'.format(self.erasure_code_profile),
|
||||||
|
level=ERROR)
|
||||||
|
raise PoolCreationError(message='unable to find erasure profile {}'.format(self.erasure_code_profile))
|
||||||
|
if 'k' not in erasure_profile or 'm' not in erasure_profile:
|
||||||
|
# Error
|
||||||
|
log(message='Unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile),
|
||||||
|
level=ERROR)
|
||||||
|
raise PoolCreationError(
|
||||||
|
message='unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile))
|
||||||
|
|
||||||
|
pgs = self.get_pgs(int(erasure_profile['k']) + int(erasure_profile['m']))
|
||||||
|
# Create it
|
||||||
|
cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs), str(pgs),
|
||||||
|
'erasure', self.erasure_code_profile]
|
||||||
|
try:
|
||||||
|
check_call(cmd)
|
||||||
|
except CalledProcessError:
|
||||||
|
raise
|
||||||
|
|
||||||
|
"""Get an existing erasure code profile if it already exists.
|
||||||
|
Returns json formatted output"""
|
||||||
|
|
||||||
|
|
||||||
|
def get_mon_map(service):
|
||||||
|
"""
|
||||||
|
Returns the current monitor map.
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:return: json string. :raise: ValueError if the monmap fails to parse.
|
||||||
|
Also raises CalledProcessError if our ceph command fails
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
mon_status = check_output(
|
||||||
|
['ceph', '--id', service,
|
||||||
|
'mon_status', '--format=json'])
|
||||||
|
try:
|
||||||
|
return json.loads(mon_status)
|
||||||
|
except ValueError as v:
|
||||||
|
log("Unable to parse mon_status json: {}. Error: {}".format(
|
||||||
|
mon_status, v.message))
|
||||||
|
raise
|
||||||
|
except CalledProcessError as e:
|
||||||
|
log("mon_status command failed with message: {}".format(
|
||||||
|
e.message))
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def hash_monitor_names(service):
|
||||||
|
"""
|
||||||
|
Uses the get_mon_map() function to get information about the monitor
|
||||||
|
cluster.
|
||||||
|
Hash the name of each monitor. Return a sorted list of monitor hashes
|
||||||
|
in an ascending order.
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:rtype : dict. json dict of monitor name, ip address and rank
|
||||||
|
example: {
|
||||||
|
'name': 'ip-172-31-13-165',
|
||||||
|
'rank': 0,
|
||||||
|
'addr': '172.31.13.165:6789/0'}
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
hash_list = []
|
||||||
|
monitor_list = get_mon_map(service=service)
|
||||||
|
if monitor_list['monmap']['mons']:
|
||||||
|
for mon in monitor_list['monmap']['mons']:
|
||||||
|
hash_list.append(
|
||||||
|
hashlib.sha224(mon['name'].encode('utf-8')).hexdigest())
|
||||||
|
return sorted(hash_list)
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
except (ValueError, CalledProcessError):
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def monitor_key_delete(service, key):
|
||||||
|
"""
|
||||||
|
Delete a key and value pair from the monitor cluster
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
Deletes a key value pair on the monitor cluster.
|
||||||
|
:param key: six.string_types. The key to delete.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
check_output(
|
||||||
|
['ceph', '--id', service,
|
||||||
|
'config-key', 'del', str(key)])
|
||||||
|
except CalledProcessError as e:
|
||||||
|
log("Monitor config-key put failed with message: {}".format(
|
||||||
|
e.output))
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def monitor_key_set(service, key, value):
|
||||||
|
"""
|
||||||
|
Sets a key value pair on the monitor cluster.
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:param key: six.string_types. The key to set.
|
||||||
|
:param value: The value to set. This will be converted to a string
|
||||||
|
before setting
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
check_output(
|
||||||
|
['ceph', '--id', service,
|
||||||
|
'config-key', 'put', str(key), str(value)])
|
||||||
|
except CalledProcessError as e:
|
||||||
|
log("Monitor config-key put failed with message: {}".format(
|
||||||
|
e.output))
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def monitor_key_get(service, key):
|
||||||
|
"""
|
||||||
|
Gets the value of an existing key in the monitor cluster.
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:param key: six.string_types. The key to search for.
|
||||||
|
:return: Returns the value of that key or None if not found.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
output = check_output(
|
||||||
|
['ceph', '--id', service,
|
||||||
|
'config-key', 'get', str(key)])
|
||||||
|
return output
|
||||||
|
except CalledProcessError as e:
|
||||||
|
log("Monitor config-key get failed with message: {}".format(
|
||||||
|
e.output))
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def monitor_key_exists(service, key):
|
||||||
|
"""
|
||||||
|
Searches for the existence of a key in the monitor cluster.
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:param key: six.string_types. The key to search for
|
||||||
|
:return: Returns True if the key exists, False if not and raises an
|
||||||
|
exception if an unknown error occurs. :raise: CalledProcessError if
|
||||||
|
an unknown error occurs
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
check_call(
|
||||||
|
['ceph', '--id', service,
|
||||||
|
'config-key', 'exists', str(key)])
|
||||||
|
# I can return true here regardless because Ceph returns
|
||||||
|
# ENOENT if the key wasn't found
|
||||||
|
return True
|
||||||
|
except CalledProcessError as e:
|
||||||
|
if e.returncode == errno.ENOENT:
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
log("Unknown error from ceph config-get exists: {} {}".format(
|
||||||
|
e.returncode, e.output))
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def get_erasure_profile(service, name):
|
||||||
|
"""
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:param name:
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
out = check_output(['ceph', '--id', service,
|
||||||
|
'osd', 'erasure-code-profile', 'get',
|
||||||
|
name, '--format=json'])
|
||||||
|
return json.loads(out)
|
||||||
|
except (CalledProcessError, OSError, ValueError):
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def pool_set(service, pool_name, key, value):
|
||||||
|
"""
|
||||||
|
Sets a value for a RADOS pool in ceph.
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:param pool_name: six.string_types
|
||||||
|
:param key: six.string_types
|
||||||
|
:param value:
|
||||||
|
:return: None. Can raise CalledProcessError
|
||||||
|
"""
|
||||||
|
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, value]
|
||||||
|
try:
|
||||||
|
check_call(cmd)
|
||||||
|
except CalledProcessError:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def snapshot_pool(service, pool_name, snapshot_name):
|
||||||
|
"""
|
||||||
|
Snapshots a RADOS pool in ceph.
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:param pool_name: six.string_types
|
||||||
|
:param snapshot_name: six.string_types
|
||||||
|
:return: None. Can raise CalledProcessError
|
||||||
|
"""
|
||||||
|
cmd = ['ceph', '--id', service, 'osd', 'pool', 'mksnap', pool_name, snapshot_name]
|
||||||
|
try:
|
||||||
|
check_call(cmd)
|
||||||
|
except CalledProcessError:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def remove_pool_snapshot(service, pool_name, snapshot_name):
|
||||||
|
"""
|
||||||
|
Remove a snapshot from a RADOS pool in ceph.
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:param pool_name: six.string_types
|
||||||
|
:param snapshot_name: six.string_types
|
||||||
|
:return: None. Can raise CalledProcessError
|
||||||
|
"""
|
||||||
|
cmd = ['ceph', '--id', service, 'osd', 'pool', 'rmsnap', pool_name, snapshot_name]
|
||||||
|
try:
|
||||||
|
check_call(cmd)
|
||||||
|
except CalledProcessError:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
# max_bytes should be an int or long
|
||||||
|
def set_pool_quota(service, pool_name, max_bytes):
|
||||||
|
"""
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:param pool_name: six.string_types
|
||||||
|
:param max_bytes: int or long
|
||||||
|
:return: None. Can raise CalledProcessError
|
||||||
|
"""
|
||||||
|
# Set a byte quota on a RADOS pool in ceph.
|
||||||
|
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name,
|
||||||
|
'max_bytes', str(max_bytes)]
|
||||||
|
try:
|
||||||
|
check_call(cmd)
|
||||||
|
except CalledProcessError:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def remove_pool_quota(service, pool_name):
|
||||||
|
"""
|
||||||
|
Set a byte quota on a RADOS pool in ceph.
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:param pool_name: six.string_types
|
||||||
|
:return: None. Can raise CalledProcessError
|
||||||
|
"""
|
||||||
|
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', '0']
|
||||||
|
try:
|
||||||
|
check_call(cmd)
|
||||||
|
except CalledProcessError:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def remove_erasure_profile(service, profile_name):
|
||||||
|
"""
|
||||||
|
Create a new erasure code profile if one does not already exist for it. Updates
|
||||||
|
the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/
|
||||||
|
for more details
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:param profile_name: six.string_types
|
||||||
|
:return: None. Can raise CalledProcessError
|
||||||
|
"""
|
||||||
|
cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'rm',
|
||||||
|
profile_name]
|
||||||
|
try:
|
||||||
|
check_call(cmd)
|
||||||
|
except CalledProcessError:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure',
|
||||||
|
failure_domain='host',
|
||||||
|
data_chunks=2, coding_chunks=1,
|
||||||
|
locality=None, durability_estimator=None):
|
||||||
|
"""
|
||||||
|
Create a new erasure code profile if one does not already exist for it. Updates
|
||||||
|
the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/
|
||||||
|
for more details
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:param profile_name: six.string_types
|
||||||
|
:param erasure_plugin_name: six.string_types
|
||||||
|
:param failure_domain: six.string_types. One of ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region',
|
||||||
|
'room', 'root', 'row'])
|
||||||
|
:param data_chunks: int
|
||||||
|
:param coding_chunks: int
|
||||||
|
:param locality: int
|
||||||
|
:param durability_estimator: int
|
||||||
|
:return: None. Can raise CalledProcessError
|
||||||
|
"""
|
||||||
|
# Ensure this failure_domain is allowed by Ceph
|
||||||
|
validator(failure_domain, six.string_types,
|
||||||
|
['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row'])
|
||||||
|
|
||||||
|
cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name,
|
||||||
|
'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks),
|
||||||
|
'ruleset_failure_domain=' + failure_domain]
|
||||||
|
if locality is not None and durability_estimator is not None:
|
||||||
|
raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.")
|
||||||
|
|
||||||
|
# Add plugin specific information
|
||||||
|
if locality is not None:
|
||||||
|
# For local erasure codes
|
||||||
|
cmd.append('l=' + str(locality))
|
||||||
|
if durability_estimator is not None:
|
||||||
|
# For Shec erasure codes
|
||||||
|
cmd.append('c=' + str(durability_estimator))
|
||||||
|
|
||||||
|
if erasure_profile_exists(service, profile_name):
|
||||||
|
cmd.append('--force')
|
||||||
|
|
||||||
|
try:
|
||||||
|
check_call(cmd)
|
||||||
|
except CalledProcessError:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def rename_pool(service, old_name, new_name):
|
||||||
|
"""
|
||||||
|
Rename a Ceph pool from old_name to new_name
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:param old_name: six.string_types
|
||||||
|
:param new_name: six.string_types
|
||||||
|
:return: None
|
||||||
|
"""
|
||||||
|
validator(value=old_name, valid_type=six.string_types)
|
||||||
|
validator(value=new_name, valid_type=six.string_types)
|
||||||
|
|
||||||
|
cmd = ['ceph', '--id', service, 'osd', 'pool', 'rename', old_name, new_name]
|
||||||
|
check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def erasure_profile_exists(service, name):
|
||||||
|
"""
|
||||||
|
Check to see if an Erasure code profile already exists.
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:param name: six.string_types
|
||||||
|
:return: int or None
|
||||||
|
"""
|
||||||
|
validator(value=name, valid_type=six.string_types)
|
||||||
|
try:
|
||||||
|
check_call(['ceph', '--id', service,
|
||||||
|
'osd', 'erasure-code-profile', 'get',
|
||||||
|
name])
|
||||||
|
return True
|
||||||
|
except CalledProcessError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def get_cache_mode(service, pool_name):
|
||||||
|
"""
|
||||||
|
Find the current caching mode of the pool_name given.
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:param pool_name: six.string_types
|
||||||
|
:return: int or None
|
||||||
|
"""
|
||||||
|
validator(value=service, valid_type=six.string_types)
|
||||||
|
validator(value=pool_name, valid_type=six.string_types)
|
||||||
|
out = check_output(['ceph', '--id', service, 'osd', 'dump', '--format=json'])
|
||||||
|
try:
|
||||||
|
osd_json = json.loads(out)
|
||||||
|
for pool in osd_json['pools']:
|
||||||
|
if pool['pool_name'] == pool_name:
|
||||||
|
return pool['cache_mode']
|
||||||
|
return None
|
||||||
|
except ValueError:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def pool_exists(service, name):
|
||||||
|
"""Check to see if a RADOS pool already exists."""
|
||||||
|
try:
|
||||||
|
out = check_output(['rados', '--id', service,
|
||||||
|
'lspools']).decode('UTF-8')
|
||||||
|
except CalledProcessError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return name in out.split()
|
||||||
|
|
||||||
|
|
||||||
|
def get_osds(service):
|
||||||
|
"""Return a list of all Ceph Object Storage Daemons currently in the
|
||||||
|
cluster.
|
||||||
|
"""
|
||||||
|
version = ceph_version()
|
||||||
|
if version and version >= '0.56':
|
||||||
|
return json.loads(check_output(['ceph', '--id', service,
|
||||||
|
'osd', 'ls',
|
||||||
|
'--format=json']).decode('UTF-8'))
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
def install():
|
def install():
|
||||||
|
@ -96,53 +658,37 @@ def create_rbd_image(service, pool, image, sizemb):
|
||||||
check_call(cmd)
|
check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
def pool_exists(service, name):
|
def update_pool(client, pool, settings):
|
||||||
"""Check to see if a RADOS pool already exists."""
|
cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool]
|
||||||
try:
|
for k, v in six.iteritems(settings):
|
||||||
out = check_output(['rados', '--id', service,
|
cmd.append(k)
|
||||||
'lspools']).decode('UTF-8')
|
cmd.append(v)
|
||||||
except CalledProcessError:
|
|
||||||
return False
|
|
||||||
|
|
||||||
return name in out
|
check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
def get_osds(service):
|
def create_pool(service, name, replicas=3, pg_num=None):
|
||||||
"""Return a list of all Ceph Object Storage Daemons currently in the
|
|
||||||
cluster.
|
|
||||||
"""
|
|
||||||
version = ceph_version()
|
|
||||||
if version and version >= '0.56':
|
|
||||||
return json.loads(check_output(['ceph', '--id', service,
|
|
||||||
'osd', 'ls',
|
|
||||||
'--format=json']).decode('UTF-8'))
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def create_pool(service, name, replicas=3):
|
|
||||||
"""Create a new RADOS pool."""
|
"""Create a new RADOS pool."""
|
||||||
if pool_exists(service, name):
|
if pool_exists(service, name):
|
||||||
log("Ceph pool {} already exists, skipping creation".format(name),
|
log("Ceph pool {} already exists, skipping creation".format(name),
|
||||||
level=WARNING)
|
level=WARNING)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
if not pg_num:
|
||||||
# Calculate the number of placement groups based
|
# Calculate the number of placement groups based
|
||||||
# on upstream recommended best practices.
|
# on upstream recommended best practices.
|
||||||
osds = get_osds(service)
|
osds = get_osds(service)
|
||||||
if osds:
|
if osds:
|
||||||
pgnum = (len(osds) * 100 // replicas)
|
pg_num = (len(osds) * 100 // replicas)
|
||||||
else:
|
else:
|
||||||
# NOTE(james-page): Default to 200 for older ceph versions
|
# NOTE(james-page): Default to 200 for older ceph versions
|
||||||
# which don't support OSD query from cli
|
# which don't support OSD query from cli
|
||||||
pgnum = 200
|
pg_num = 200
|
||||||
|
|
||||||
cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)]
|
cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pg_num)]
|
||||||
check_call(cmd)
|
check_call(cmd)
|
||||||
|
|
||||||
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size',
|
update_pool(service, name, settings={'size': str(replicas)})
|
||||||
str(replicas)]
|
|
||||||
check_call(cmd)
|
|
||||||
|
|
||||||
|
|
||||||
def delete_pool(service, name):
|
def delete_pool(service, name):
|
||||||
|
@ -197,10 +743,10 @@ def create_key_file(service, key):
|
||||||
log('Created new keyfile at %s.' % keyfile, level=INFO)
|
log('Created new keyfile at %s.' % keyfile, level=INFO)
|
||||||
|
|
||||||
|
|
||||||
def get_ceph_nodes():
|
def get_ceph_nodes(relation='ceph'):
|
||||||
"""Query named relation 'ceph' to determine current nodes."""
|
"""Query named relation to determine current nodes."""
|
||||||
hosts = []
|
hosts = []
|
||||||
for r_id in relation_ids('ceph'):
|
for r_id in relation_ids(relation):
|
||||||
for unit in related_units(r_id):
|
for unit in related_units(r_id):
|
||||||
hosts.append(relation_get('private-address', unit=unit, rid=r_id))
|
hosts.append(relation_get('private-address', unit=unit, rid=r_id))
|
||||||
|
|
||||||
|
@ -288,17 +834,6 @@ def place_data_on_block_device(blk_device, data_src_dst):
|
||||||
os.chown(data_src_dst, uid, gid)
|
os.chown(data_src_dst, uid, gid)
|
||||||
|
|
||||||
|
|
||||||
# TODO: re-use
|
|
||||||
def modprobe(module):
|
|
||||||
"""Load a kernel module and configure for auto-load on reboot."""
|
|
||||||
log('Loading kernel module', level=INFO)
|
|
||||||
cmd = ['modprobe', module]
|
|
||||||
check_call(cmd)
|
|
||||||
with open('/etc/modules', 'r+') as modules:
|
|
||||||
if module not in modules.read():
|
|
||||||
modules.write(module)
|
|
||||||
|
|
||||||
|
|
||||||
def copy_files(src, dst, symlinks=False, ignore=None):
|
def copy_files(src, dst, symlinks=False, ignore=None):
|
||||||
"""Copy files from src to dst."""
|
"""Copy files from src to dst."""
|
||||||
for item in os.listdir(src):
|
for item in os.listdir(src):
|
||||||
|
@ -363,14 +898,14 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
|
||||||
service_start(svc)
|
service_start(svc)
|
||||||
|
|
||||||
|
|
||||||
def ensure_ceph_keyring(service, user=None, group=None):
|
def ensure_ceph_keyring(service, user=None, group=None, relation='ceph'):
|
||||||
"""Ensures a ceph keyring is created for a named service and optionally
|
"""Ensures a ceph keyring is created for a named service and optionally
|
||||||
ensures user and group ownership.
|
ensures user and group ownership.
|
||||||
|
|
||||||
Returns False if no ceph key is available in relation state.
|
Returns False if no ceph key is available in relation state.
|
||||||
"""
|
"""
|
||||||
key = None
|
key = None
|
||||||
for rid in relation_ids('ceph'):
|
for rid in relation_ids(relation):
|
||||||
for unit in related_units(rid):
|
for unit in related_units(rid):
|
||||||
key = relation_get('key', rid=rid, unit=unit)
|
key = relation_get('key', rid=rid, unit=unit)
|
||||||
if key:
|
if key:
|
||||||
|
@ -411,17 +946,60 @@ class CephBrokerRq(object):
|
||||||
|
|
||||||
The API is versioned and defaults to version 1.
|
The API is versioned and defaults to version 1.
|
||||||
"""
|
"""
|
||||||
def __init__(self, api_version=1):
|
|
||||||
|
def __init__(self, api_version=1, request_id=None):
|
||||||
self.api_version = api_version
|
self.api_version = api_version
|
||||||
|
if request_id:
|
||||||
|
self.request_id = request_id
|
||||||
|
else:
|
||||||
|
self.request_id = str(uuid.uuid1())
|
||||||
self.ops = []
|
self.ops = []
|
||||||
|
|
||||||
def add_op_create_pool(self, name, replica_count=3):
|
def add_op_create_pool(self, name, replica_count=3, pg_num=None):
|
||||||
|
"""Adds an operation to create a pool.
|
||||||
|
|
||||||
|
@param pg_num setting: optional setting. If not provided, this value
|
||||||
|
will be calculated by the broker based on how many OSDs are in the
|
||||||
|
cluster at the time of creation. Note that, if provided, this value
|
||||||
|
will be capped at the current available maximum.
|
||||||
|
"""
|
||||||
self.ops.append({'op': 'create-pool', 'name': name,
|
self.ops.append({'op': 'create-pool', 'name': name,
|
||||||
'replicas': replica_count})
|
'replicas': replica_count, 'pg_num': pg_num})
|
||||||
|
|
||||||
|
def set_ops(self, ops):
|
||||||
|
"""Set request ops to provided value.
|
||||||
|
|
||||||
|
Useful for injecting ops that come from a previous request
|
||||||
|
to allow comparisons to ensure validity.
|
||||||
|
"""
|
||||||
|
self.ops = ops
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def request(self):
|
def request(self):
|
||||||
return json.dumps({'api-version': self.api_version, 'ops': self.ops})
|
return json.dumps({'api-version': self.api_version, 'ops': self.ops,
|
||||||
|
'request-id': self.request_id})
|
||||||
|
|
||||||
|
def _ops_equal(self, other):
|
||||||
|
if len(self.ops) == len(other.ops):
|
||||||
|
for req_no in range(0, len(self.ops)):
|
||||||
|
for key in ['replicas', 'name', 'op', 'pg_num']:
|
||||||
|
if self.ops[req_no].get(key) != other.ops[req_no].get(key):
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
if not isinstance(other, self.__class__):
|
||||||
|
return False
|
||||||
|
if self.api_version == other.api_version and \
|
||||||
|
self._ops_equal(other):
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def __ne__(self, other):
|
||||||
|
return not self.__eq__(other)
|
||||||
|
|
||||||
|
|
||||||
class CephBrokerRsp(object):
|
class CephBrokerRsp(object):
|
||||||
|
@ -431,10 +1009,15 @@ class CephBrokerRsp(object):
|
||||||
|
|
||||||
The API is versioned and defaults to version 1.
|
The API is versioned and defaults to version 1.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, encoded_rsp):
|
def __init__(self, encoded_rsp):
|
||||||
self.api_version = None
|
self.api_version = None
|
||||||
self.rsp = json.loads(encoded_rsp)
|
self.rsp = json.loads(encoded_rsp)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def request_id(self):
|
||||||
|
return self.rsp.get('request-id')
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def exit_code(self):
|
def exit_code(self):
|
||||||
return self.rsp.get('exit-code')
|
return self.rsp.get('exit-code')
|
||||||
|
@ -442,3 +1025,182 @@ class CephBrokerRsp(object):
|
||||||
@property
|
@property
|
||||||
def exit_msg(self):
|
def exit_msg(self):
|
||||||
return self.rsp.get('stderr')
|
return self.rsp.get('stderr')
|
||||||
|
|
||||||
|
|
||||||
|
# Ceph Broker Conversation:
|
||||||
|
# If a charm needs an action to be taken by ceph it can create a CephBrokerRq
|
||||||
|
# and send that request to ceph via the ceph relation. The CephBrokerRq has a
|
||||||
|
# unique id so that the client can identity which CephBrokerRsp is associated
|
||||||
|
# with the request. Ceph will also respond to each client unit individually
|
||||||
|
# creating a response key per client unit eg glance/0 will get a CephBrokerRsp
|
||||||
|
# via key broker-rsp-glance-0
|
||||||
|
#
|
||||||
|
# To use this the charm can just do something like:
|
||||||
|
#
|
||||||
|
# from charmhelpers.contrib.storage.linux.ceph import (
|
||||||
|
# send_request_if_needed,
|
||||||
|
# is_request_complete,
|
||||||
|
# CephBrokerRq,
|
||||||
|
# )
|
||||||
|
#
|
||||||
|
# @hooks.hook('ceph-relation-changed')
|
||||||
|
# def ceph_changed():
|
||||||
|
# rq = CephBrokerRq()
|
||||||
|
# rq.add_op_create_pool(name='poolname', replica_count=3)
|
||||||
|
#
|
||||||
|
# if is_request_complete(rq):
|
||||||
|
# <Request complete actions>
|
||||||
|
# else:
|
||||||
|
# send_request_if_needed(get_ceph_request())
|
||||||
|
#
|
||||||
|
# CephBrokerRq and CephBrokerRsp are serialized into JSON. Below is an example
|
||||||
|
# of glance having sent a request to ceph which ceph has successfully processed
|
||||||
|
# 'ceph:8': {
|
||||||
|
# 'ceph/0': {
|
||||||
|
# 'auth': 'cephx',
|
||||||
|
# 'broker-rsp-glance-0': '{"request-id": "0bc7dc54", "exit-code": 0}',
|
||||||
|
# 'broker_rsp': '{"request-id": "0da543b8", "exit-code": 0}',
|
||||||
|
# 'ceph-public-address': '10.5.44.103',
|
||||||
|
# 'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==',
|
||||||
|
# 'private-address': '10.5.44.103',
|
||||||
|
# },
|
||||||
|
# 'glance/0': {
|
||||||
|
# 'broker_req': ('{"api-version": 1, "request-id": "0bc7dc54", '
|
||||||
|
# '"ops": [{"replicas": 3, "name": "glance", '
|
||||||
|
# '"op": "create-pool"}]}'),
|
||||||
|
# 'private-address': '10.5.44.109',
|
||||||
|
# },
|
||||||
|
# }
|
||||||
|
|
||||||
|
def get_previous_request(rid):
|
||||||
|
"""Return the last ceph broker request sent on a given relation
|
||||||
|
|
||||||
|
@param rid: Relation id to query for request
|
||||||
|
"""
|
||||||
|
request = None
|
||||||
|
broker_req = relation_get(attribute='broker_req', rid=rid,
|
||||||
|
unit=local_unit())
|
||||||
|
if broker_req:
|
||||||
|
request_data = json.loads(broker_req)
|
||||||
|
request = CephBrokerRq(api_version=request_data['api-version'],
|
||||||
|
request_id=request_data['request-id'])
|
||||||
|
request.set_ops(request_data['ops'])
|
||||||
|
|
||||||
|
return request
|
||||||
|
|
||||||
|
|
||||||
|
def get_request_states(request, relation='ceph'):
|
||||||
|
"""Return a dict of requests per relation id with their corresponding
|
||||||
|
completion state.
|
||||||
|
|
||||||
|
This allows a charm, which has a request for ceph, to see whether there is
|
||||||
|
an equivalent request already being processed and if so what state that
|
||||||
|
request is in.
|
||||||
|
|
||||||
|
@param request: A CephBrokerRq object
|
||||||
|
"""
|
||||||
|
complete = []
|
||||||
|
requests = {}
|
||||||
|
for rid in relation_ids(relation):
|
||||||
|
complete = False
|
||||||
|
previous_request = get_previous_request(rid)
|
||||||
|
if request == previous_request:
|
||||||
|
sent = True
|
||||||
|
complete = is_request_complete_for_rid(previous_request, rid)
|
||||||
|
else:
|
||||||
|
sent = False
|
||||||
|
complete = False
|
||||||
|
|
||||||
|
requests[rid] = {
|
||||||
|
'sent': sent,
|
||||||
|
'complete': complete,
|
||||||
|
}
|
||||||
|
|
||||||
|
return requests
|
||||||
|
|
||||||
|
|
||||||
|
def is_request_sent(request, relation='ceph'):
|
||||||
|
"""Check to see if a functionally equivalent request has already been sent
|
||||||
|
|
||||||
|
Returns True if a similair request has been sent
|
||||||
|
|
||||||
|
@param request: A CephBrokerRq object
|
||||||
|
"""
|
||||||
|
states = get_request_states(request, relation=relation)
|
||||||
|
for rid in states.keys():
|
||||||
|
if not states[rid]['sent']:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def is_request_complete(request, relation='ceph'):
|
||||||
|
"""Check to see if a functionally equivalent request has already been
|
||||||
|
completed
|
||||||
|
|
||||||
|
Returns True if a similair request has been completed
|
||||||
|
|
||||||
|
@param request: A CephBrokerRq object
|
||||||
|
"""
|
||||||
|
states = get_request_states(request, relation=relation)
|
||||||
|
for rid in states.keys():
|
||||||
|
if not states[rid]['complete']:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def is_request_complete_for_rid(request, rid):
|
||||||
|
"""Check if a given request has been completed on the given relation
|
||||||
|
|
||||||
|
@param request: A CephBrokerRq object
|
||||||
|
@param rid: Relation ID
|
||||||
|
"""
|
||||||
|
broker_key = get_broker_rsp_key()
|
||||||
|
for unit in related_units(rid):
|
||||||
|
rdata = relation_get(rid=rid, unit=unit)
|
||||||
|
if rdata.get(broker_key):
|
||||||
|
rsp = CephBrokerRsp(rdata.get(broker_key))
|
||||||
|
if rsp.request_id == request.request_id:
|
||||||
|
if not rsp.exit_code:
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
# The remote unit sent no reply targeted at this unit so either the
|
||||||
|
# remote ceph cluster does not support unit targeted replies or it
|
||||||
|
# has not processed our request yet.
|
||||||
|
if rdata.get('broker_rsp'):
|
||||||
|
request_data = json.loads(rdata['broker_rsp'])
|
||||||
|
if request_data.get('request-id'):
|
||||||
|
log('Ignoring legacy broker_rsp without unit key as remote '
|
||||||
|
'service supports unit specific replies', level=DEBUG)
|
||||||
|
else:
|
||||||
|
log('Using legacy broker_rsp as remote service does not '
|
||||||
|
'supports unit specific replies', level=DEBUG)
|
||||||
|
rsp = CephBrokerRsp(rdata['broker_rsp'])
|
||||||
|
if not rsp.exit_code:
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def get_broker_rsp_key():
|
||||||
|
"""Return broker response key for this unit
|
||||||
|
|
||||||
|
This is the key that ceph is going to use to pass request status
|
||||||
|
information back to this unit
|
||||||
|
"""
|
||||||
|
return 'broker-rsp-' + local_unit().replace('/', '-')
|
||||||
|
|
||||||
|
|
||||||
|
def send_request_if_needed(request, relation='ceph'):
|
||||||
|
"""Send broker request if an equivalent request has not already been sent
|
||||||
|
|
||||||
|
@param request: A CephBrokerRq object
|
||||||
|
"""
|
||||||
|
if is_request_sent(request, relation=relation):
|
||||||
|
log('Request already sent but not complete, not sending new request',
|
||||||
|
level=DEBUG)
|
||||||
|
else:
|
||||||
|
for rid in relation_ids(relation):
|
||||||
|
log('Sending request {}'.format(request.request_id), level=DEBUG)
|
||||||
|
relation_set(relation_id=rid, broker_req=request.request)
|
||||||
|
|
|
@ -76,3 +76,13 @@ def ensure_loopback_device(path, size):
|
||||||
check_call(cmd)
|
check_call(cmd)
|
||||||
|
|
||||||
return create_loopback(path)
|
return create_loopback(path)
|
||||||
|
|
||||||
|
|
||||||
|
def is_mapped_loopback_device(device):
|
||||||
|
"""
|
||||||
|
Checks if a given device name is an existing/mapped loopback device.
|
||||||
|
:param device: str: Full path to the device (eg, /dev/loop1).
|
||||||
|
:returns: str: Path to the backing file if is a loopback device
|
||||||
|
empty string otherwise
|
||||||
|
"""
|
||||||
|
return loopback_devices().get(device, "")
|
||||||
|
|
|
@ -43,9 +43,10 @@ def zap_disk(block_device):
|
||||||
|
|
||||||
:param block_device: str: Full path of block device to clean.
|
:param block_device: str: Full path of block device to clean.
|
||||||
'''
|
'''
|
||||||
|
# https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b
|
||||||
# sometimes sgdisk exits non-zero; this is OK, dd will clean up
|
# sometimes sgdisk exits non-zero; this is OK, dd will clean up
|
||||||
call(['sgdisk', '--zap-all', '--mbrtogpt',
|
call(['sgdisk', '--zap-all', '--', block_device])
|
||||||
'--clear', block_device])
|
call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device])
|
||||||
dev_end = check_output(['blockdev', '--getsz',
|
dev_end = check_output(['blockdev', '--getsz',
|
||||||
block_device]).decode('UTF-8')
|
block_device]).decode('UTF-8')
|
||||||
gpt_end = int(dev_end.split()[0]) - 100
|
gpt_end = int(dev_end.split()[0]) - 100
|
||||||
|
@ -63,8 +64,8 @@ def is_device_mounted(device):
|
||||||
:returns: boolean: True if the path represents a mounted device, False if
|
:returns: boolean: True if the path represents a mounted device, False if
|
||||||
it doesn't.
|
it doesn't.
|
||||||
'''
|
'''
|
||||||
is_partition = bool(re.search(r".*[0-9]+\b", device))
|
try:
|
||||||
out = check_output(['mount']).decode('UTF-8')
|
out = check_output(['lsblk', '-P', device]).decode('UTF-8')
|
||||||
if is_partition:
|
except:
|
||||||
return bool(re.search(device + r"\b", out))
|
return False
|
||||||
return bool(re.search(device + r"[0-9]*\b", out))
|
return bool(re.search(r'MOUNTPOINT=".+"', out))
|
||||||
|
|
|
@ -1,15 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
|
@ -1,139 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
# Copyright 2013 Canonical Ltd.
|
|
||||||
#
|
|
||||||
# Authors:
|
|
||||||
# Charm Helpers Developers <juju@lists.ubuntu.com>
|
|
||||||
"""A helper to create a yaml cache of config with namespaced relation data."""
|
|
||||||
import os
|
|
||||||
import yaml
|
|
||||||
|
|
||||||
import six
|
|
||||||
|
|
||||||
import charmhelpers.core.hookenv
|
|
||||||
|
|
||||||
|
|
||||||
charm_dir = os.environ.get('CHARM_DIR', '')
|
|
||||||
|
|
||||||
|
|
||||||
def dict_keys_without_hyphens(a_dict):
|
|
||||||
"""Return the a new dict with underscores instead of hyphens in keys."""
|
|
||||||
return dict(
|
|
||||||
(key.replace('-', '_'), val) for key, val in a_dict.items())
|
|
||||||
|
|
||||||
|
|
||||||
def update_relations(context, namespace_separator=':'):
|
|
||||||
"""Update the context with the relation data."""
|
|
||||||
# Add any relation data prefixed with the relation type.
|
|
||||||
relation_type = charmhelpers.core.hookenv.relation_type()
|
|
||||||
relations = []
|
|
||||||
context['current_relation'] = {}
|
|
||||||
if relation_type is not None:
|
|
||||||
relation_data = charmhelpers.core.hookenv.relation_get()
|
|
||||||
context['current_relation'] = relation_data
|
|
||||||
# Deprecated: the following use of relation data as keys
|
|
||||||
# directly in the context will be removed.
|
|
||||||
relation_data = dict(
|
|
||||||
("{relation_type}{namespace_separator}{key}".format(
|
|
||||||
relation_type=relation_type,
|
|
||||||
key=key,
|
|
||||||
namespace_separator=namespace_separator), val)
|
|
||||||
for key, val in relation_data.items())
|
|
||||||
relation_data = dict_keys_without_hyphens(relation_data)
|
|
||||||
context.update(relation_data)
|
|
||||||
relations = charmhelpers.core.hookenv.relations_of_type(relation_type)
|
|
||||||
relations = [dict_keys_without_hyphens(rel) for rel in relations]
|
|
||||||
|
|
||||||
context['relations_full'] = charmhelpers.core.hookenv.relations()
|
|
||||||
|
|
||||||
# the hookenv.relations() data structure is effectively unusable in
|
|
||||||
# templates and other contexts when trying to access relation data other
|
|
||||||
# than the current relation. So provide a more useful structure that works
|
|
||||||
# with any hook.
|
|
||||||
local_unit = charmhelpers.core.hookenv.local_unit()
|
|
||||||
relations = {}
|
|
||||||
for rname, rids in context['relations_full'].items():
|
|
||||||
relations[rname] = []
|
|
||||||
for rid, rdata in rids.items():
|
|
||||||
data = rdata.copy()
|
|
||||||
if local_unit in rdata:
|
|
||||||
data.pop(local_unit)
|
|
||||||
for unit_name, rel_data in data.items():
|
|
||||||
new_data = {'__relid__': rid, '__unit__': unit_name}
|
|
||||||
new_data.update(rel_data)
|
|
||||||
relations[rname].append(new_data)
|
|
||||||
context['relations'] = relations
|
|
||||||
|
|
||||||
|
|
||||||
def juju_state_to_yaml(yaml_path, namespace_separator=':',
|
|
||||||
allow_hyphens_in_keys=True, mode=None):
|
|
||||||
"""Update the juju config and state in a yaml file.
|
|
||||||
|
|
||||||
This includes any current relation-get data, and the charm
|
|
||||||
directory.
|
|
||||||
|
|
||||||
This function was created for the ansible and saltstack
|
|
||||||
support, as those libraries can use a yaml file to supply
|
|
||||||
context to templates, but it may be useful generally to
|
|
||||||
create and update an on-disk cache of all the config, including
|
|
||||||
previous relation data.
|
|
||||||
|
|
||||||
By default, hyphens are allowed in keys as this is supported
|
|
||||||
by yaml, but for tools like ansible, hyphens are not valid [1].
|
|
||||||
|
|
||||||
[1] http://www.ansibleworks.com/docs/playbooks_variables.html#what-makes-a-valid-variable-name
|
|
||||||
"""
|
|
||||||
config = charmhelpers.core.hookenv.config()
|
|
||||||
|
|
||||||
# Add the charm_dir which we will need to refer to charm
|
|
||||||
# file resources etc.
|
|
||||||
config['charm_dir'] = charm_dir
|
|
||||||
config['local_unit'] = charmhelpers.core.hookenv.local_unit()
|
|
||||||
config['unit_private_address'] = charmhelpers.core.hookenv.unit_private_ip()
|
|
||||||
config['unit_public_address'] = charmhelpers.core.hookenv.unit_get(
|
|
||||||
'public-address'
|
|
||||||
)
|
|
||||||
|
|
||||||
# Don't use non-standard tags for unicode which will not
|
|
||||||
# work when salt uses yaml.load_safe.
|
|
||||||
yaml.add_representer(six.text_type,
|
|
||||||
lambda dumper, value: dumper.represent_scalar(
|
|
||||||
six.u('tag:yaml.org,2002:str'), value))
|
|
||||||
|
|
||||||
yaml_dir = os.path.dirname(yaml_path)
|
|
||||||
if not os.path.exists(yaml_dir):
|
|
||||||
os.makedirs(yaml_dir)
|
|
||||||
|
|
||||||
if os.path.exists(yaml_path):
|
|
||||||
with open(yaml_path, "r") as existing_vars_file:
|
|
||||||
existing_vars = yaml.load(existing_vars_file.read())
|
|
||||||
else:
|
|
||||||
with open(yaml_path, "w+"):
|
|
||||||
pass
|
|
||||||
existing_vars = {}
|
|
||||||
|
|
||||||
if mode is not None:
|
|
||||||
os.chmod(yaml_path, mode)
|
|
||||||
|
|
||||||
if not allow_hyphens_in_keys:
|
|
||||||
config = dict_keys_without_hyphens(config)
|
|
||||||
existing_vars.update(config)
|
|
||||||
|
|
||||||
update_relations(existing_vars, namespace_separator)
|
|
||||||
|
|
||||||
with open(yaml_path, "w+") as fp:
|
|
||||||
fp.write(yaml.dump(existing_vars, default_flow_style=False))
|
|
|
@ -1,39 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
"""
|
|
||||||
Templating using the python-jinja2 package.
|
|
||||||
"""
|
|
||||||
import six
|
|
||||||
from charmhelpers.fetch import apt_install
|
|
||||||
try:
|
|
||||||
import jinja2
|
|
||||||
except ImportError:
|
|
||||||
if six.PY3:
|
|
||||||
apt_install(["python3-jinja2"])
|
|
||||||
else:
|
|
||||||
apt_install(["python-jinja2"])
|
|
||||||
import jinja2
|
|
||||||
|
|
||||||
|
|
||||||
DEFAULT_TEMPLATES_DIR = 'templates'
|
|
||||||
|
|
||||||
|
|
||||||
def render(template_name, context, template_dir=DEFAULT_TEMPLATES_DIR):
|
|
||||||
templates = jinja2.Environment(
|
|
||||||
loader=jinja2.FileSystemLoader(template_dir))
|
|
||||||
template = templates.get_template(template_name)
|
|
||||||
return template.render(context)
|
|
|
@ -1,29 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
'''
|
|
||||||
Templating using standard Python str.format() method.
|
|
||||||
'''
|
|
||||||
|
|
||||||
from charmhelpers.core import hookenv
|
|
||||||
|
|
||||||
|
|
||||||
def render(template, extra={}, **kwargs):
|
|
||||||
"""Return the template rendered using Python's str.format()."""
|
|
||||||
context = hookenv.execution_environment()
|
|
||||||
context.update(extra)
|
|
||||||
context.update(kwargs)
|
|
||||||
return template.format(**context)
|
|
|
@ -1,313 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
# Easy file synchronization among peer units using ssh + unison.
|
|
||||||
#
|
|
||||||
# For the -joined, -changed, and -departed peer relations, add a call to
|
|
||||||
# ssh_authorized_peers() describing the peer relation and the desired
|
|
||||||
# user + group. After all peer relations have settled, all hosts should
|
|
||||||
# be able to connect to on another via key auth'd ssh as the specified user.
|
|
||||||
#
|
|
||||||
# Other hooks are then free to synchronize files and directories using
|
|
||||||
# sync_to_peers().
|
|
||||||
#
|
|
||||||
# For a peer relation named 'cluster', for example:
|
|
||||||
#
|
|
||||||
# cluster-relation-joined:
|
|
||||||
# ...
|
|
||||||
# ssh_authorized_peers(peer_interface='cluster',
|
|
||||||
# user='juju_ssh', group='juju_ssh',
|
|
||||||
# ensure_local_user=True)
|
|
||||||
# ...
|
|
||||||
#
|
|
||||||
# cluster-relation-changed:
|
|
||||||
# ...
|
|
||||||
# ssh_authorized_peers(peer_interface='cluster',
|
|
||||||
# user='juju_ssh', group='juju_ssh',
|
|
||||||
# ensure_local_user=True)
|
|
||||||
# ...
|
|
||||||
#
|
|
||||||
# cluster-relation-departed:
|
|
||||||
# ...
|
|
||||||
# ssh_authorized_peers(peer_interface='cluster',
|
|
||||||
# user='juju_ssh', group='juju_ssh',
|
|
||||||
# ensure_local_user=True)
|
|
||||||
# ...
|
|
||||||
#
|
|
||||||
# Hooks are now free to sync files as easily as:
|
|
||||||
#
|
|
||||||
# files = ['/etc/fstab', '/etc/apt.conf.d/']
|
|
||||||
# sync_to_peers(peer_interface='cluster',
|
|
||||||
# user='juju_ssh, paths=[files])
|
|
||||||
#
|
|
||||||
# It is assumed the charm itself has setup permissions on each unit
|
|
||||||
# such that 'juju_ssh' has read + write permissions. Also assumed
|
|
||||||
# that the calling charm takes care of leader delegation.
|
|
||||||
#
|
|
||||||
# Additionally files can be synchronized only to an specific unit:
|
|
||||||
# sync_to_peer(slave_address, user='juju_ssh',
|
|
||||||
# paths=[files], verbose=False)
|
|
||||||
|
|
||||||
import os
|
|
||||||
import pwd
|
|
||||||
|
|
||||||
from copy import copy
|
|
||||||
from subprocess import check_call, check_output
|
|
||||||
|
|
||||||
from charmhelpers.core.host import (
|
|
||||||
adduser,
|
|
||||||
add_user_to_group,
|
|
||||||
pwgen,
|
|
||||||
)
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
log,
|
|
||||||
hook_name,
|
|
||||||
relation_ids,
|
|
||||||
related_units,
|
|
||||||
relation_set,
|
|
||||||
relation_get,
|
|
||||||
unit_private_ip,
|
|
||||||
INFO,
|
|
||||||
ERROR,
|
|
||||||
)
|
|
||||||
|
|
||||||
BASE_CMD = ['unison', '-auto', '-batch=true', '-confirmbigdel=false',
|
|
||||||
'-fastcheck=true', '-group=false', '-owner=false',
|
|
||||||
'-prefer=newer', '-times=true']
|
|
||||||
|
|
||||||
|
|
||||||
def get_homedir(user):
|
|
||||||
try:
|
|
||||||
user = pwd.getpwnam(user)
|
|
||||||
return user.pw_dir
|
|
||||||
except KeyError:
|
|
||||||
log('Could not get homedir for user %s: user exists?' % (user), ERROR)
|
|
||||||
raise Exception
|
|
||||||
|
|
||||||
|
|
||||||
def create_private_key(user, priv_key_path, key_type='rsa'):
|
|
||||||
types_bits = {
|
|
||||||
'rsa': '2048',
|
|
||||||
'ecdsa': '521',
|
|
||||||
}
|
|
||||||
if key_type not in types_bits:
|
|
||||||
log('Unknown ssh key type {}, using rsa'.format(key_type), ERROR)
|
|
||||||
key_type = 'rsa'
|
|
||||||
if not os.path.isfile(priv_key_path):
|
|
||||||
log('Generating new SSH key for user %s.' % user)
|
|
||||||
cmd = ['ssh-keygen', '-q', '-N', '', '-t', key_type,
|
|
||||||
'-b', types_bits[key_type], '-f', priv_key_path]
|
|
||||||
check_call(cmd)
|
|
||||||
else:
|
|
||||||
log('SSH key already exists at %s.' % priv_key_path)
|
|
||||||
check_call(['chown', user, priv_key_path])
|
|
||||||
check_call(['chmod', '0600', priv_key_path])
|
|
||||||
|
|
||||||
|
|
||||||
def create_public_key(user, priv_key_path, pub_key_path):
|
|
||||||
if not os.path.isfile(pub_key_path):
|
|
||||||
log('Generating missing ssh public key @ %s.' % pub_key_path)
|
|
||||||
cmd = ['ssh-keygen', '-y', '-f', priv_key_path]
|
|
||||||
p = check_output(cmd).strip()
|
|
||||||
with open(pub_key_path, 'wb') as out:
|
|
||||||
out.write(p)
|
|
||||||
check_call(['chown', user, pub_key_path])
|
|
||||||
|
|
||||||
|
|
||||||
def get_keypair(user):
|
|
||||||
home_dir = get_homedir(user)
|
|
||||||
ssh_dir = os.path.join(home_dir, '.ssh')
|
|
||||||
priv_key = os.path.join(ssh_dir, 'id_rsa')
|
|
||||||
pub_key = '%s.pub' % priv_key
|
|
||||||
|
|
||||||
if not os.path.isdir(ssh_dir):
|
|
||||||
os.mkdir(ssh_dir)
|
|
||||||
check_call(['chown', '-R', user, ssh_dir])
|
|
||||||
|
|
||||||
create_private_key(user, priv_key)
|
|
||||||
create_public_key(user, priv_key, pub_key)
|
|
||||||
|
|
||||||
with open(priv_key, 'r') as p:
|
|
||||||
_priv = p.read().strip()
|
|
||||||
|
|
||||||
with open(pub_key, 'r') as p:
|
|
||||||
_pub = p.read().strip()
|
|
||||||
|
|
||||||
return (_priv, _pub)
|
|
||||||
|
|
||||||
|
|
||||||
def write_authorized_keys(user, keys):
|
|
||||||
home_dir = get_homedir(user)
|
|
||||||
ssh_dir = os.path.join(home_dir, '.ssh')
|
|
||||||
auth_keys = os.path.join(ssh_dir, 'authorized_keys')
|
|
||||||
log('Syncing authorized_keys @ %s.' % auth_keys)
|
|
||||||
with open(auth_keys, 'w') as out:
|
|
||||||
for k in keys:
|
|
||||||
out.write('%s\n' % k)
|
|
||||||
|
|
||||||
|
|
||||||
def write_known_hosts(user, hosts):
|
|
||||||
home_dir = get_homedir(user)
|
|
||||||
ssh_dir = os.path.join(home_dir, '.ssh')
|
|
||||||
known_hosts = os.path.join(ssh_dir, 'known_hosts')
|
|
||||||
khosts = []
|
|
||||||
for host in hosts:
|
|
||||||
cmd = ['ssh-keyscan', host]
|
|
||||||
remote_key = check_output(cmd, universal_newlines=True).strip()
|
|
||||||
khosts.append(remote_key)
|
|
||||||
log('Syncing known_hosts @ %s.' % known_hosts)
|
|
||||||
with open(known_hosts, 'w') as out:
|
|
||||||
for host in khosts:
|
|
||||||
out.write('%s\n' % host)
|
|
||||||
|
|
||||||
|
|
||||||
def ensure_user(user, group=None):
|
|
||||||
adduser(user, pwgen())
|
|
||||||
if group:
|
|
||||||
add_user_to_group(user, group)
|
|
||||||
|
|
||||||
|
|
||||||
def ssh_authorized_peers(peer_interface, user, group=None,
|
|
||||||
ensure_local_user=False):
|
|
||||||
"""
|
|
||||||
Main setup function, should be called from both peer -changed and -joined
|
|
||||||
hooks with the same parameters.
|
|
||||||
"""
|
|
||||||
if ensure_local_user:
|
|
||||||
ensure_user(user, group)
|
|
||||||
priv_key, pub_key = get_keypair(user)
|
|
||||||
hook = hook_name()
|
|
||||||
if hook == '%s-relation-joined' % peer_interface:
|
|
||||||
relation_set(ssh_pub_key=pub_key)
|
|
||||||
elif hook == '%s-relation-changed' % peer_interface or \
|
|
||||||
hook == '%s-relation-departed' % peer_interface:
|
|
||||||
hosts = []
|
|
||||||
keys = []
|
|
||||||
|
|
||||||
for r_id in relation_ids(peer_interface):
|
|
||||||
for unit in related_units(r_id):
|
|
||||||
ssh_pub_key = relation_get('ssh_pub_key',
|
|
||||||
rid=r_id,
|
|
||||||
unit=unit)
|
|
||||||
priv_addr = relation_get('private-address',
|
|
||||||
rid=r_id,
|
|
||||||
unit=unit)
|
|
||||||
if ssh_pub_key:
|
|
||||||
keys.append(ssh_pub_key)
|
|
||||||
hosts.append(priv_addr)
|
|
||||||
else:
|
|
||||||
log('ssh_authorized_peers(): ssh_pub_key '
|
|
||||||
'missing for unit %s, skipping.' % unit)
|
|
||||||
write_authorized_keys(user, keys)
|
|
||||||
write_known_hosts(user, hosts)
|
|
||||||
authed_hosts = ':'.join(hosts)
|
|
||||||
relation_set(ssh_authorized_hosts=authed_hosts)
|
|
||||||
|
|
||||||
|
|
||||||
def _run_as_user(user, gid=None):
|
|
||||||
try:
|
|
||||||
user = pwd.getpwnam(user)
|
|
||||||
except KeyError:
|
|
||||||
log('Invalid user: %s' % user)
|
|
||||||
raise Exception
|
|
||||||
uid = user.pw_uid
|
|
||||||
gid = gid or user.pw_gid
|
|
||||||
os.environ['HOME'] = user.pw_dir
|
|
||||||
|
|
||||||
def _inner():
|
|
||||||
os.setgid(gid)
|
|
||||||
os.setuid(uid)
|
|
||||||
return _inner
|
|
||||||
|
|
||||||
|
|
||||||
def run_as_user(user, cmd, gid=None):
|
|
||||||
return check_output(cmd, preexec_fn=_run_as_user(user, gid), cwd='/')
|
|
||||||
|
|
||||||
|
|
||||||
def collect_authed_hosts(peer_interface):
|
|
||||||
'''Iterate through the units on peer interface to find all that
|
|
||||||
have the calling host in its authorized hosts list'''
|
|
||||||
hosts = []
|
|
||||||
for r_id in (relation_ids(peer_interface) or []):
|
|
||||||
for unit in related_units(r_id):
|
|
||||||
private_addr = relation_get('private-address',
|
|
||||||
rid=r_id, unit=unit)
|
|
||||||
authed_hosts = relation_get('ssh_authorized_hosts',
|
|
||||||
rid=r_id, unit=unit)
|
|
||||||
|
|
||||||
if not authed_hosts:
|
|
||||||
log('Peer %s has not authorized *any* hosts yet, skipping.' %
|
|
||||||
(unit), level=INFO)
|
|
||||||
continue
|
|
||||||
|
|
||||||
if unit_private_ip() in authed_hosts.split(':'):
|
|
||||||
hosts.append(private_addr)
|
|
||||||
else:
|
|
||||||
log('Peer %s has not authorized *this* host yet, skipping.' %
|
|
||||||
(unit), level=INFO)
|
|
||||||
return hosts
|
|
||||||
|
|
||||||
|
|
||||||
def sync_path_to_host(path, host, user, verbose=False, cmd=None, gid=None,
|
|
||||||
fatal=False):
|
|
||||||
"""Sync path to an specific peer host
|
|
||||||
|
|
||||||
Propagates exception if operation fails and fatal=True.
|
|
||||||
"""
|
|
||||||
cmd = cmd or copy(BASE_CMD)
|
|
||||||
if not verbose:
|
|
||||||
cmd.append('-silent')
|
|
||||||
|
|
||||||
# removing trailing slash from directory paths, unison
|
|
||||||
# doesn't like these.
|
|
||||||
if path.endswith('/'):
|
|
||||||
path = path[:(len(path) - 1)]
|
|
||||||
|
|
||||||
cmd = cmd + [path, 'ssh://%s@%s/%s' % (user, host, path)]
|
|
||||||
|
|
||||||
try:
|
|
||||||
log('Syncing local path %s to %s@%s:%s' % (path, user, host, path))
|
|
||||||
run_as_user(user, cmd, gid)
|
|
||||||
except:
|
|
||||||
log('Error syncing remote files')
|
|
||||||
if fatal:
|
|
||||||
raise
|
|
||||||
|
|
||||||
|
|
||||||
def sync_to_peer(host, user, paths=None, verbose=False, cmd=None, gid=None,
|
|
||||||
fatal=False):
|
|
||||||
"""Sync paths to an specific peer host
|
|
||||||
|
|
||||||
Propagates exception if any operation fails and fatal=True.
|
|
||||||
"""
|
|
||||||
if paths:
|
|
||||||
for p in paths:
|
|
||||||
sync_path_to_host(p, host, user, verbose, cmd, gid, fatal)
|
|
||||||
|
|
||||||
|
|
||||||
def sync_to_peers(peer_interface, user, paths=None, verbose=False, cmd=None,
|
|
||||||
gid=None, fatal=False):
|
|
||||||
"""Sync all hosts to an specific path
|
|
||||||
|
|
||||||
The type of group is integer, it allows user has permissions to
|
|
||||||
operate a directory have a different group id with the user id.
|
|
||||||
|
|
||||||
Propagates exception if any operation fails and fatal=True.
|
|
||||||
"""
|
|
||||||
if paths:
|
|
||||||
for host in collect_authed_hosts(peer_interface):
|
|
||||||
sync_to_peer(host, user, paths, verbose, cmd, gid, fatal)
|
|
|
@ -74,6 +74,7 @@ def cached(func):
|
||||||
res = func(*args, **kwargs)
|
res = func(*args, **kwargs)
|
||||||
cache[key] = res
|
cache[key] = res
|
||||||
return res
|
return res
|
||||||
|
wrapper._wrapped = func
|
||||||
return wrapper
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
|
@ -173,9 +174,19 @@ def relation_type():
|
||||||
return os.environ.get('JUJU_RELATION', None)
|
return os.environ.get('JUJU_RELATION', None)
|
||||||
|
|
||||||
|
|
||||||
def relation_id():
|
@cached
|
||||||
"""The relation ID for the current relation hook"""
|
def relation_id(relation_name=None, service_or_unit=None):
|
||||||
|
"""The relation ID for the current or a specified relation"""
|
||||||
|
if not relation_name and not service_or_unit:
|
||||||
return os.environ.get('JUJU_RELATION_ID', None)
|
return os.environ.get('JUJU_RELATION_ID', None)
|
||||||
|
elif relation_name and service_or_unit:
|
||||||
|
service_name = service_or_unit.split('/')[0]
|
||||||
|
for relid in relation_ids(relation_name):
|
||||||
|
remote_service = remote_service_name(relid)
|
||||||
|
if remote_service == service_name:
|
||||||
|
return relid
|
||||||
|
else:
|
||||||
|
raise ValueError('Must specify neither or both of relation_name and service_or_unit')
|
||||||
|
|
||||||
|
|
||||||
def local_unit():
|
def local_unit():
|
||||||
|
@ -193,9 +204,20 @@ def service_name():
|
||||||
return local_unit().split('/')[0]
|
return local_unit().split('/')[0]
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def remote_service_name(relid=None):
|
||||||
|
"""The remote service name for a given relation-id (or the current relation)"""
|
||||||
|
if relid is None:
|
||||||
|
unit = remote_unit()
|
||||||
|
else:
|
||||||
|
units = related_units(relid)
|
||||||
|
unit = units[0] if units else None
|
||||||
|
return unit.split('/')[0] if unit else None
|
||||||
|
|
||||||
|
|
||||||
def hook_name():
|
def hook_name():
|
||||||
"""The name of the currently executing hook"""
|
"""The name of the currently executing hook"""
|
||||||
return os.path.basename(sys.argv[0])
|
return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0]))
|
||||||
|
|
||||||
|
|
||||||
class Config(dict):
|
class Config(dict):
|
||||||
|
@ -468,6 +490,76 @@ def relation_types():
|
||||||
return rel_types
|
return rel_types
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def peer_relation_id():
|
||||||
|
'''Get the peers relation id if a peers relation has been joined, else None.'''
|
||||||
|
md = metadata()
|
||||||
|
section = md.get('peers')
|
||||||
|
if section:
|
||||||
|
for key in section:
|
||||||
|
relids = relation_ids(key)
|
||||||
|
if relids:
|
||||||
|
return relids[0]
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def relation_to_interface(relation_name):
|
||||||
|
"""
|
||||||
|
Given the name of a relation, return the interface that relation uses.
|
||||||
|
|
||||||
|
:returns: The interface name, or ``None``.
|
||||||
|
"""
|
||||||
|
return relation_to_role_and_interface(relation_name)[1]
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def relation_to_role_and_interface(relation_name):
|
||||||
|
"""
|
||||||
|
Given the name of a relation, return the role and the name of the interface
|
||||||
|
that relation uses (where role is one of ``provides``, ``requires``, or ``peers``).
|
||||||
|
|
||||||
|
:returns: A tuple containing ``(role, interface)``, or ``(None, None)``.
|
||||||
|
"""
|
||||||
|
_metadata = metadata()
|
||||||
|
for role in ('provides', 'requires', 'peers'):
|
||||||
|
interface = _metadata.get(role, {}).get(relation_name, {}).get('interface')
|
||||||
|
if interface:
|
||||||
|
return role, interface
|
||||||
|
return None, None
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def role_and_interface_to_relations(role, interface_name):
|
||||||
|
"""
|
||||||
|
Given a role and interface name, return a list of relation names for the
|
||||||
|
current charm that use that interface under that role (where role is one
|
||||||
|
of ``provides``, ``requires``, or ``peers``).
|
||||||
|
|
||||||
|
:returns: A list of relation names.
|
||||||
|
"""
|
||||||
|
_metadata = metadata()
|
||||||
|
results = []
|
||||||
|
for relation_name, relation in _metadata.get(role, {}).items():
|
||||||
|
if relation['interface'] == interface_name:
|
||||||
|
results.append(relation_name)
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def interface_to_relations(interface_name):
|
||||||
|
"""
|
||||||
|
Given an interface, return a list of relation names for the current
|
||||||
|
charm that use that interface.
|
||||||
|
|
||||||
|
:returns: A list of relation names.
|
||||||
|
"""
|
||||||
|
results = []
|
||||||
|
for role in ('provides', 'requires', 'peers'):
|
||||||
|
results.extend(role_and_interface_to_relations(role, interface_name))
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
@cached
|
@cached
|
||||||
def charm_name():
|
def charm_name():
|
||||||
"""Get the name of the current charm as is specified on metadata.yaml"""
|
"""Get the name of the current charm as is specified on metadata.yaml"""
|
||||||
|
@ -544,6 +636,38 @@ def unit_private_ip():
|
||||||
return unit_get('private-address')
|
return unit_get('private-address')
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def storage_get(attribute=None, storage_id=None):
|
||||||
|
"""Get storage attributes"""
|
||||||
|
_args = ['storage-get', '--format=json']
|
||||||
|
if storage_id:
|
||||||
|
_args.extend(('-s', storage_id))
|
||||||
|
if attribute:
|
||||||
|
_args.append(attribute)
|
||||||
|
try:
|
||||||
|
return json.loads(subprocess.check_output(_args).decode('UTF-8'))
|
||||||
|
except ValueError:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def storage_list(storage_name=None):
|
||||||
|
"""List the storage IDs for the unit"""
|
||||||
|
_args = ['storage-list', '--format=json']
|
||||||
|
if storage_name:
|
||||||
|
_args.append(storage_name)
|
||||||
|
try:
|
||||||
|
return json.loads(subprocess.check_output(_args).decode('UTF-8'))
|
||||||
|
except ValueError:
|
||||||
|
return None
|
||||||
|
except OSError as e:
|
||||||
|
import errno
|
||||||
|
if e.errno == errno.ENOENT:
|
||||||
|
# storage-list does not exist
|
||||||
|
return []
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
class UnregisteredHookError(Exception):
|
class UnregisteredHookError(Exception):
|
||||||
"""Raised when an undefined hook is called"""
|
"""Raised when an undefined hook is called"""
|
||||||
pass
|
pass
|
||||||
|
@ -644,6 +768,21 @@ def action_fail(message):
|
||||||
subprocess.check_call(['action-fail', message])
|
subprocess.check_call(['action-fail', message])
|
||||||
|
|
||||||
|
|
||||||
|
def action_name():
|
||||||
|
"""Get the name of the currently executing action."""
|
||||||
|
return os.environ.get('JUJU_ACTION_NAME')
|
||||||
|
|
||||||
|
|
||||||
|
def action_uuid():
|
||||||
|
"""Get the UUID of the currently executing action."""
|
||||||
|
return os.environ.get('JUJU_ACTION_UUID')
|
||||||
|
|
||||||
|
|
||||||
|
def action_tag():
|
||||||
|
"""Get the tag for the currently executing action."""
|
||||||
|
return os.environ.get('JUJU_ACTION_TAG')
|
||||||
|
|
||||||
|
|
||||||
def status_set(workload_state, message):
|
def status_set(workload_state, message):
|
||||||
"""Set the workload state with a message
|
"""Set the workload state with a message
|
||||||
|
|
||||||
|
@ -673,25 +812,28 @@ def status_set(workload_state, message):
|
||||||
|
|
||||||
|
|
||||||
def status_get():
|
def status_get():
|
||||||
"""Retrieve the previously set juju workload state
|
"""Retrieve the previously set juju workload state and message
|
||||||
|
|
||||||
|
If the status-get command is not found then assume this is juju < 1.23 and
|
||||||
|
return 'unknown', ""
|
||||||
|
|
||||||
If the status-set command is not found then assume this is juju < 1.23 and
|
|
||||||
return 'unknown'
|
|
||||||
"""
|
"""
|
||||||
cmd = ['status-get']
|
cmd = ['status-get', "--format=json", "--include-data"]
|
||||||
try:
|
try:
|
||||||
raw_status = subprocess.check_output(cmd, universal_newlines=True)
|
raw_status = subprocess.check_output(cmd)
|
||||||
status = raw_status.rstrip()
|
|
||||||
return status
|
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
if e.errno == errno.ENOENT:
|
if e.errno == errno.ENOENT:
|
||||||
return 'unknown'
|
return ('unknown', "")
|
||||||
else:
|
else:
|
||||||
raise
|
raise
|
||||||
|
else:
|
||||||
|
status = json.loads(raw_status.decode("UTF-8"))
|
||||||
|
return (status["status"], status["message"])
|
||||||
|
|
||||||
|
|
||||||
def translate_exc(from_exc, to_exc):
|
def translate_exc(from_exc, to_exc):
|
||||||
def inner_translate_exc1(f):
|
def inner_translate_exc1(f):
|
||||||
|
@wraps(f)
|
||||||
def inner_translate_exc2(*args, **kwargs):
|
def inner_translate_exc2(*args, **kwargs):
|
||||||
try:
|
try:
|
||||||
return f(*args, **kwargs)
|
return f(*args, **kwargs)
|
||||||
|
@ -736,6 +878,58 @@ def leader_set(settings=None, **kwargs):
|
||||||
subprocess.check_call(cmd)
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
|
||||||
|
def payload_register(ptype, klass, pid):
|
||||||
|
""" is used while a hook is running to let Juju know that a
|
||||||
|
payload has been started."""
|
||||||
|
cmd = ['payload-register']
|
||||||
|
for x in [ptype, klass, pid]:
|
||||||
|
cmd.append(x)
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
|
||||||
|
def payload_unregister(klass, pid):
|
||||||
|
""" is used while a hook is running to let Juju know
|
||||||
|
that a payload has been manually stopped. The <class> and <id> provided
|
||||||
|
must match a payload that has been previously registered with juju using
|
||||||
|
payload-register."""
|
||||||
|
cmd = ['payload-unregister']
|
||||||
|
for x in [klass, pid]:
|
||||||
|
cmd.append(x)
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
|
||||||
|
def payload_status_set(klass, pid, status):
|
||||||
|
"""is used to update the current status of a registered payload.
|
||||||
|
The <class> and <id> provided must match a payload that has been previously
|
||||||
|
registered with juju using payload-register. The <status> must be one of the
|
||||||
|
follow: starting, started, stopping, stopped"""
|
||||||
|
cmd = ['payload-status-set']
|
||||||
|
for x in [klass, pid, status]:
|
||||||
|
cmd.append(x)
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
|
||||||
|
def resource_get(name):
|
||||||
|
"""used to fetch the resource path of the given name.
|
||||||
|
|
||||||
|
<name> must match a name of defined resource in metadata.yaml
|
||||||
|
|
||||||
|
returns either a path or False if resource not available
|
||||||
|
"""
|
||||||
|
if not name:
|
||||||
|
return False
|
||||||
|
|
||||||
|
cmd = ['resource-get', name]
|
||||||
|
try:
|
||||||
|
return subprocess.check_output(cmd).decode('UTF-8')
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
@cached
|
@cached
|
||||||
def juju_version():
|
def juju_version():
|
||||||
"""Full version string (eg. '1.23.3.1-trusty-amd64')"""
|
"""Full version string (eg. '1.23.3.1-trusty-amd64')"""
|
||||||
|
@ -800,3 +994,16 @@ def _run_atexit():
|
||||||
for callback, args, kwargs in reversed(_atexit):
|
for callback, args, kwargs in reversed(_atexit):
|
||||||
callback(*args, **kwargs)
|
callback(*args, **kwargs)
|
||||||
del _atexit[:]
|
del _atexit[:]
|
||||||
|
|
||||||
|
|
||||||
|
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
|
||||||
|
def network_get_primary_address(binding):
|
||||||
|
'''
|
||||||
|
Retrieve the primary network address for a named binding
|
||||||
|
|
||||||
|
:param binding: string. The name of a relation of extra-binding
|
||||||
|
:return: string. The primary IP address for the named binding
|
||||||
|
:raise: NotImplementedError if run on Juju < 2.0
|
||||||
|
'''
|
||||||
|
cmd = ['network-get', '--primary-address', binding]
|
||||||
|
return subprocess.check_output(cmd).strip()
|
||||||
|
|
|
@ -30,6 +30,8 @@ import random
|
||||||
import string
|
import string
|
||||||
import subprocess
|
import subprocess
|
||||||
import hashlib
|
import hashlib
|
||||||
|
import functools
|
||||||
|
import itertools
|
||||||
from contextlib import contextmanager
|
from contextlib import contextmanager
|
||||||
from collections import OrderedDict
|
from collections import OrderedDict
|
||||||
|
|
||||||
|
@ -63,54 +65,96 @@ def service_reload(service_name, restart_on_failure=False):
|
||||||
return service_result
|
return service_result
|
||||||
|
|
||||||
|
|
||||||
def service_pause(service_name, init_dir=None):
|
def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"):
|
||||||
"""Pause a system service.
|
"""Pause a system service.
|
||||||
|
|
||||||
Stop it, and prevent it from starting again at boot."""
|
Stop it, and prevent it from starting again at boot."""
|
||||||
if init_dir is None:
|
stopped = True
|
||||||
init_dir = "/etc/init"
|
if service_running(service_name):
|
||||||
stopped = service_stop(service_name)
|
stopped = service_stop(service_name)
|
||||||
# XXX: Support systemd too
|
upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
|
||||||
|
sysv_file = os.path.join(initd_dir, service_name)
|
||||||
|
if init_is_systemd():
|
||||||
|
service('disable', service_name)
|
||||||
|
elif os.path.exists(upstart_file):
|
||||||
override_path = os.path.join(
|
override_path = os.path.join(
|
||||||
init_dir, '{}.conf.override'.format(service_name))
|
init_dir, '{}.override'.format(service_name))
|
||||||
with open(override_path, 'w') as fh:
|
with open(override_path, 'w') as fh:
|
||||||
fh.write("manual\n")
|
fh.write("manual\n")
|
||||||
|
elif os.path.exists(sysv_file):
|
||||||
|
subprocess.check_call(["update-rc.d", service_name, "disable"])
|
||||||
|
else:
|
||||||
|
raise ValueError(
|
||||||
|
"Unable to detect {0} as SystemD, Upstart {1} or"
|
||||||
|
" SysV {2}".format(
|
||||||
|
service_name, upstart_file, sysv_file))
|
||||||
return stopped
|
return stopped
|
||||||
|
|
||||||
|
|
||||||
def service_resume(service_name, init_dir=None):
|
def service_resume(service_name, init_dir="/etc/init",
|
||||||
|
initd_dir="/etc/init.d"):
|
||||||
"""Resume a system service.
|
"""Resume a system service.
|
||||||
|
|
||||||
Reenable starting again at boot. Start the service"""
|
Reenable starting again at boot. Start the service"""
|
||||||
# XXX: Support systemd too
|
upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
|
||||||
if init_dir is None:
|
sysv_file = os.path.join(initd_dir, service_name)
|
||||||
init_dir = "/etc/init"
|
if init_is_systemd():
|
||||||
|
service('enable', service_name)
|
||||||
|
elif os.path.exists(upstart_file):
|
||||||
override_path = os.path.join(
|
override_path = os.path.join(
|
||||||
init_dir, '{}.conf.override'.format(service_name))
|
init_dir, '{}.override'.format(service_name))
|
||||||
if os.path.exists(override_path):
|
if os.path.exists(override_path):
|
||||||
os.unlink(override_path)
|
os.unlink(override_path)
|
||||||
|
elif os.path.exists(sysv_file):
|
||||||
|
subprocess.check_call(["update-rc.d", service_name, "enable"])
|
||||||
|
else:
|
||||||
|
raise ValueError(
|
||||||
|
"Unable to detect {0} as SystemD, Upstart {1} or"
|
||||||
|
" SysV {2}".format(
|
||||||
|
service_name, upstart_file, sysv_file))
|
||||||
|
|
||||||
|
started = service_running(service_name)
|
||||||
|
if not started:
|
||||||
started = service_start(service_name)
|
started = service_start(service_name)
|
||||||
return started
|
return started
|
||||||
|
|
||||||
|
|
||||||
def service(action, service_name):
|
def service(action, service_name):
|
||||||
"""Control a system service"""
|
"""Control a system service"""
|
||||||
|
if init_is_systemd():
|
||||||
|
cmd = ['systemctl', action, service_name]
|
||||||
|
else:
|
||||||
cmd = ['service', service_name, action]
|
cmd = ['service', service_name, action]
|
||||||
return subprocess.call(cmd) == 0
|
return subprocess.call(cmd) == 0
|
||||||
|
|
||||||
|
|
||||||
def service_running(service):
|
def systemv_services_running():
|
||||||
|
output = subprocess.check_output(
|
||||||
|
['service', '--status-all'],
|
||||||
|
stderr=subprocess.STDOUT).decode('UTF-8')
|
||||||
|
return [row.split()[-1] for row in output.split('\n') if '[ + ]' in row]
|
||||||
|
|
||||||
|
|
||||||
|
def service_running(service_name):
|
||||||
"""Determine whether a system service is running"""
|
"""Determine whether a system service is running"""
|
||||||
|
if init_is_systemd():
|
||||||
|
return service('is-active', service_name)
|
||||||
|
else:
|
||||||
try:
|
try:
|
||||||
output = subprocess.check_output(
|
output = subprocess.check_output(
|
||||||
['service', service, 'status'],
|
['service', service_name, 'status'],
|
||||||
stderr=subprocess.STDOUT).decode('UTF-8')
|
stderr=subprocess.STDOUT).decode('UTF-8')
|
||||||
except subprocess.CalledProcessError:
|
except subprocess.CalledProcessError:
|
||||||
return False
|
return False
|
||||||
else:
|
else:
|
||||||
if ("start/running" in output or "is running" in output):
|
# This works for upstart scripts where the 'service' command
|
||||||
|
# returns a consistent string to represent running 'start/running'
|
||||||
|
if ("start/running" in output or "is running" in output or
|
||||||
|
"up and running" in output):
|
||||||
|
return True
|
||||||
|
# Check System V scripts init script return codes
|
||||||
|
if service_name in systemv_services_running():
|
||||||
return True
|
return True
|
||||||
else:
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
@ -126,8 +170,29 @@ def service_available(service_name):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
def adduser(username, password=None, shell='/bin/bash', system_user=False):
|
SYSTEMD_SYSTEM = '/run/systemd/system'
|
||||||
"""Add a user to the system"""
|
|
||||||
|
|
||||||
|
def init_is_systemd():
|
||||||
|
"""Return True if the host system uses systemd, False otherwise."""
|
||||||
|
return os.path.isdir(SYSTEMD_SYSTEM)
|
||||||
|
|
||||||
|
|
||||||
|
def adduser(username, password=None, shell='/bin/bash', system_user=False,
|
||||||
|
primary_group=None, secondary_groups=None):
|
||||||
|
"""Add a user to the system.
|
||||||
|
|
||||||
|
Will log but otherwise succeed if the user already exists.
|
||||||
|
|
||||||
|
:param str username: Username to create
|
||||||
|
:param str password: Password for user; if ``None``, create a system user
|
||||||
|
:param str shell: The default shell for the user
|
||||||
|
:param bool system_user: Whether to create a login or system user
|
||||||
|
:param str primary_group: Primary group for user; defaults to username
|
||||||
|
:param list secondary_groups: Optional list of additional groups
|
||||||
|
|
||||||
|
:returns: The password database entry struct, as returned by `pwd.getpwnam`
|
||||||
|
"""
|
||||||
try:
|
try:
|
||||||
user_info = pwd.getpwnam(username)
|
user_info = pwd.getpwnam(username)
|
||||||
log('user {0} already exists!'.format(username))
|
log('user {0} already exists!'.format(username))
|
||||||
|
@ -142,12 +207,32 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False):
|
||||||
'--shell', shell,
|
'--shell', shell,
|
||||||
'--password', password,
|
'--password', password,
|
||||||
])
|
])
|
||||||
|
if not primary_group:
|
||||||
|
try:
|
||||||
|
grp.getgrnam(username)
|
||||||
|
primary_group = username # avoid "group exists" error
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
|
if primary_group:
|
||||||
|
cmd.extend(['-g', primary_group])
|
||||||
|
if secondary_groups:
|
||||||
|
cmd.extend(['-G', ','.join(secondary_groups)])
|
||||||
cmd.append(username)
|
cmd.append(username)
|
||||||
subprocess.check_call(cmd)
|
subprocess.check_call(cmd)
|
||||||
user_info = pwd.getpwnam(username)
|
user_info = pwd.getpwnam(username)
|
||||||
return user_info
|
return user_info
|
||||||
|
|
||||||
|
|
||||||
|
def user_exists(username):
|
||||||
|
"""Check if a user exists"""
|
||||||
|
try:
|
||||||
|
pwd.getpwnam(username)
|
||||||
|
user_exists = True
|
||||||
|
except KeyError:
|
||||||
|
user_exists = False
|
||||||
|
return user_exists
|
||||||
|
|
||||||
|
|
||||||
def add_group(group_name, system_group=False):
|
def add_group(group_name, system_group=False):
|
||||||
"""Add a group to the system"""
|
"""Add a group to the system"""
|
||||||
try:
|
try:
|
||||||
|
@ -229,14 +314,12 @@ def write_file(path, content, owner='root', group='root', perms=0o444):
|
||||||
|
|
||||||
|
|
||||||
def fstab_remove(mp):
|
def fstab_remove(mp):
|
||||||
"""Remove the given mountpoint entry from /etc/fstab
|
"""Remove the given mountpoint entry from /etc/fstab"""
|
||||||
"""
|
|
||||||
return Fstab.remove_by_mountpoint(mp)
|
return Fstab.remove_by_mountpoint(mp)
|
||||||
|
|
||||||
|
|
||||||
def fstab_add(dev, mp, fs, options=None):
|
def fstab_add(dev, mp, fs, options=None):
|
||||||
"""Adds the given device entry to the /etc/fstab file
|
"""Adds the given device entry to the /etc/fstab file"""
|
||||||
"""
|
|
||||||
return Fstab.add(dev, mp, fs, options=options)
|
return Fstab.add(dev, mp, fs, options=options)
|
||||||
|
|
||||||
|
|
||||||
|
@ -280,9 +363,19 @@ def mounts():
|
||||||
return system_mounts
|
return system_mounts
|
||||||
|
|
||||||
|
|
||||||
|
def fstab_mount(mountpoint):
|
||||||
|
"""Mount filesystem using fstab"""
|
||||||
|
cmd_args = ['mount', mountpoint]
|
||||||
|
try:
|
||||||
|
subprocess.check_output(cmd_args)
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
log('Error unmounting {}\n{}'.format(mountpoint, e.output))
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
def file_hash(path, hash_type='md5'):
|
def file_hash(path, hash_type='md5'):
|
||||||
"""
|
"""Generate a hash checksum of the contents of 'path' or None if not found.
|
||||||
Generate a hash checksum of the contents of 'path' or None if not found.
|
|
||||||
|
|
||||||
:param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
|
:param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
|
||||||
such as md5, sha1, sha256, sha512, etc.
|
such as md5, sha1, sha256, sha512, etc.
|
||||||
|
@ -297,10 +390,9 @@ def file_hash(path, hash_type='md5'):
|
||||||
|
|
||||||
|
|
||||||
def path_hash(path):
|
def path_hash(path):
|
||||||
"""
|
"""Generate a hash checksum of all files matching 'path'. Standard
|
||||||
Generate a hash checksum of all files matching 'path'. Standard wildcards
|
wildcards like '*' and '?' are supported, see documentation for the 'glob'
|
||||||
like '*' and '?' are supported, see documentation for the 'glob' module for
|
module for more information.
|
||||||
more information.
|
|
||||||
|
|
||||||
:return: dict: A { filename: hash } dictionary for all matched files.
|
:return: dict: A { filename: hash } dictionary for all matched files.
|
||||||
Empty if none found.
|
Empty if none found.
|
||||||
|
@ -312,8 +404,7 @@ def path_hash(path):
|
||||||
|
|
||||||
|
|
||||||
def check_hash(path, checksum, hash_type='md5'):
|
def check_hash(path, checksum, hash_type='md5'):
|
||||||
"""
|
"""Validate a file using a cryptographic checksum.
|
||||||
Validate a file using a cryptographic checksum.
|
|
||||||
|
|
||||||
:param str checksum: Value of the checksum used to validate the file.
|
:param str checksum: Value of the checksum used to validate the file.
|
||||||
:param str hash_type: Hash algorithm used to generate `checksum`.
|
:param str hash_type: Hash algorithm used to generate `checksum`.
|
||||||
|
@ -328,10 +419,11 @@ def check_hash(path, checksum, hash_type='md5'):
|
||||||
|
|
||||||
|
|
||||||
class ChecksumError(ValueError):
|
class ChecksumError(ValueError):
|
||||||
|
"""A class derived from Value error to indicate the checksum failed."""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
def restart_on_change(restart_map, stopstart=False):
|
def restart_on_change(restart_map, stopstart=False, restart_functions=None):
|
||||||
"""Restart services based on configuration files changing
|
"""Restart services based on configuration files changing
|
||||||
|
|
||||||
This function is used a decorator, for example::
|
This function is used a decorator, for example::
|
||||||
|
@ -349,27 +441,58 @@ def restart_on_change(restart_map, stopstart=False):
|
||||||
restarted if any file matching the pattern got changed, created
|
restarted if any file matching the pattern got changed, created
|
||||||
or removed. Standard wildcards are supported, see documentation
|
or removed. Standard wildcards are supported, see documentation
|
||||||
for the 'glob' module for more information.
|
for the 'glob' module for more information.
|
||||||
|
|
||||||
|
@param restart_map: {path_file_name: [service_name, ...]
|
||||||
|
@param stopstart: DEFAULT false; whether to stop, start OR restart
|
||||||
|
@param restart_functions: nonstandard functions to use to restart services
|
||||||
|
{svc: func, ...}
|
||||||
|
@returns result from decorated function
|
||||||
"""
|
"""
|
||||||
def wrap(f):
|
def wrap(f):
|
||||||
|
@functools.wraps(f)
|
||||||
def wrapped_f(*args, **kwargs):
|
def wrapped_f(*args, **kwargs):
|
||||||
checksums = {path: path_hash(path) for path in restart_map}
|
return restart_on_change_helper(
|
||||||
f(*args, **kwargs)
|
(lambda: f(*args, **kwargs)), restart_map, stopstart,
|
||||||
restarts = []
|
restart_functions)
|
||||||
for path in restart_map:
|
|
||||||
if path_hash(path) != checksums[path]:
|
|
||||||
restarts += restart_map[path]
|
|
||||||
services_list = list(OrderedDict.fromkeys(restarts))
|
|
||||||
if not stopstart:
|
|
||||||
for service_name in services_list:
|
|
||||||
service('restart', service_name)
|
|
||||||
else:
|
|
||||||
for action in ['stop', 'start']:
|
|
||||||
for service_name in services_list:
|
|
||||||
service(action, service_name)
|
|
||||||
return wrapped_f
|
return wrapped_f
|
||||||
return wrap
|
return wrap
|
||||||
|
|
||||||
|
|
||||||
|
def restart_on_change_helper(lambda_f, restart_map, stopstart=False,
|
||||||
|
restart_functions=None):
|
||||||
|
"""Helper function to perform the restart_on_change function.
|
||||||
|
|
||||||
|
This is provided for decorators to restart services if files described
|
||||||
|
in the restart_map have changed after an invocation of lambda_f().
|
||||||
|
|
||||||
|
@param lambda_f: function to call.
|
||||||
|
@param restart_map: {file: [service, ...]}
|
||||||
|
@param stopstart: whether to stop, start or restart a service
|
||||||
|
@param restart_functions: nonstandard functions to use to restart services
|
||||||
|
{svc: func, ...}
|
||||||
|
@returns result of lambda_f()
|
||||||
|
"""
|
||||||
|
if restart_functions is None:
|
||||||
|
restart_functions = {}
|
||||||
|
checksums = {path: path_hash(path) for path in restart_map}
|
||||||
|
r = lambda_f()
|
||||||
|
# create a list of lists of the services to restart
|
||||||
|
restarts = [restart_map[path]
|
||||||
|
for path in restart_map
|
||||||
|
if path_hash(path) != checksums[path]]
|
||||||
|
# create a flat list of ordered services without duplicates from lists
|
||||||
|
services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts)))
|
||||||
|
if services_list:
|
||||||
|
actions = ('stop', 'start') if stopstart else ('restart',)
|
||||||
|
for service_name in services_list:
|
||||||
|
if service_name in restart_functions:
|
||||||
|
restart_functions[service_name](service_name)
|
||||||
|
else:
|
||||||
|
for action in actions:
|
||||||
|
service(action, service_name)
|
||||||
|
return r
|
||||||
|
|
||||||
|
|
||||||
def lsb_release():
|
def lsb_release():
|
||||||
"""Return /etc/lsb-release in a dict"""
|
"""Return /etc/lsb-release in a dict"""
|
||||||
d = {}
|
d = {}
|
||||||
|
@ -396,36 +519,92 @@ def pwgen(length=None):
|
||||||
return(''.join(random_chars))
|
return(''.join(random_chars))
|
||||||
|
|
||||||
|
|
||||||
def list_nics(nic_type):
|
def is_phy_iface(interface):
|
||||||
'''Return a list of nics of given type(s)'''
|
"""Returns True if interface is not virtual, otherwise False."""
|
||||||
|
if interface:
|
||||||
|
sys_net = '/sys/class/net'
|
||||||
|
if os.path.isdir(sys_net):
|
||||||
|
for iface in glob.glob(os.path.join(sys_net, '*')):
|
||||||
|
if '/virtual/' in os.path.realpath(iface):
|
||||||
|
continue
|
||||||
|
|
||||||
|
if interface == os.path.basename(iface):
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def get_bond_master(interface):
|
||||||
|
"""Returns bond master if interface is bond slave otherwise None.
|
||||||
|
|
||||||
|
NOTE: the provided interface is expected to be physical
|
||||||
|
"""
|
||||||
|
if interface:
|
||||||
|
iface_path = '/sys/class/net/%s' % (interface)
|
||||||
|
if os.path.exists(iface_path):
|
||||||
|
if '/virtual/' in os.path.realpath(iface_path):
|
||||||
|
return None
|
||||||
|
|
||||||
|
master = os.path.join(iface_path, 'master')
|
||||||
|
if os.path.exists(master):
|
||||||
|
master = os.path.realpath(master)
|
||||||
|
# make sure it is a bond master
|
||||||
|
if os.path.exists(os.path.join(master, 'bonding')):
|
||||||
|
return os.path.basename(master)
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def list_nics(nic_type=None):
|
||||||
|
"""Return a list of nics of given type(s)"""
|
||||||
if isinstance(nic_type, six.string_types):
|
if isinstance(nic_type, six.string_types):
|
||||||
int_types = [nic_type]
|
int_types = [nic_type]
|
||||||
else:
|
else:
|
||||||
int_types = nic_type
|
int_types = nic_type
|
||||||
|
|
||||||
interfaces = []
|
interfaces = []
|
||||||
|
if nic_type:
|
||||||
for int_type in int_types:
|
for int_type in int_types:
|
||||||
cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
|
cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
|
||||||
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
|
ip_output = subprocess.check_output(cmd).decode('UTF-8')
|
||||||
|
ip_output = ip_output.split('\n')
|
||||||
ip_output = (line for line in ip_output if line)
|
ip_output = (line for line in ip_output if line)
|
||||||
for line in ip_output:
|
for line in ip_output:
|
||||||
if line.split()[1].startswith(int_type):
|
if line.split()[1].startswith(int_type):
|
||||||
matched = re.search('.*: (' + int_type + r'[0-9]+\.[0-9]+)@.*', line)
|
matched = re.search('.*: (' + int_type +
|
||||||
|
r'[0-9]+\.[0-9]+)@.*', line)
|
||||||
if matched:
|
if matched:
|
||||||
interface = matched.groups()[0]
|
iface = matched.groups()[0]
|
||||||
else:
|
else:
|
||||||
interface = line.split()[1].replace(":", "")
|
iface = line.split()[1].replace(":", "")
|
||||||
interfaces.append(interface)
|
|
||||||
|
if iface not in interfaces:
|
||||||
|
interfaces.append(iface)
|
||||||
|
else:
|
||||||
|
cmd = ['ip', 'a']
|
||||||
|
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
|
||||||
|
ip_output = (line.strip() for line in ip_output if line)
|
||||||
|
|
||||||
|
key = re.compile('^[0-9]+:\s+(.+):')
|
||||||
|
for line in ip_output:
|
||||||
|
matched = re.search(key, line)
|
||||||
|
if matched:
|
||||||
|
iface = matched.group(1)
|
||||||
|
iface = iface.partition("@")[0]
|
||||||
|
if iface not in interfaces:
|
||||||
|
interfaces.append(iface)
|
||||||
|
|
||||||
return interfaces
|
return interfaces
|
||||||
|
|
||||||
|
|
||||||
def set_nic_mtu(nic, mtu):
|
def set_nic_mtu(nic, mtu):
|
||||||
'''Set MTU on a network interface'''
|
"""Set the Maximum Transmission Unit (MTU) on a network interface."""
|
||||||
cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
|
cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
|
||||||
subprocess.check_call(cmd)
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
def get_nic_mtu(nic):
|
def get_nic_mtu(nic):
|
||||||
|
"""Return the Maximum Transmission Unit (MTU) for a network interface."""
|
||||||
cmd = ['ip', 'addr', 'show', nic]
|
cmd = ['ip', 'addr', 'show', nic]
|
||||||
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
|
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
|
||||||
mtu = ""
|
mtu = ""
|
||||||
|
@ -437,6 +616,7 @@ def get_nic_mtu(nic):
|
||||||
|
|
||||||
|
|
||||||
def get_nic_hwaddr(nic):
|
def get_nic_hwaddr(nic):
|
||||||
|
"""Return the Media Access Control (MAC) for a network interface."""
|
||||||
cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
|
cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
|
||||||
ip_output = subprocess.check_output(cmd).decode('UTF-8')
|
ip_output = subprocess.check_output(cmd).decode('UTF-8')
|
||||||
hwaddr = ""
|
hwaddr = ""
|
||||||
|
@ -447,7 +627,7 @@ def get_nic_hwaddr(nic):
|
||||||
|
|
||||||
|
|
||||||
def cmp_pkgrevno(package, revno, pkgcache=None):
|
def cmp_pkgrevno(package, revno, pkgcache=None):
|
||||||
'''Compare supplied revno with the revno of the installed package
|
"""Compare supplied revno with the revno of the installed package
|
||||||
|
|
||||||
* 1 => Installed revno is greater than supplied arg
|
* 1 => Installed revno is greater than supplied arg
|
||||||
* 0 => Installed revno is the same as supplied arg
|
* 0 => Installed revno is the same as supplied arg
|
||||||
|
@ -456,7 +636,7 @@ def cmp_pkgrevno(package, revno, pkgcache=None):
|
||||||
This function imports apt_cache function from charmhelpers.fetch if
|
This function imports apt_cache function from charmhelpers.fetch if
|
||||||
the pkgcache argument is None. Be sure to add charmhelpers.fetch if
|
the pkgcache argument is None. Be sure to add charmhelpers.fetch if
|
||||||
you call this function, or pass an apt_pkg.Cache() instance.
|
you call this function, or pass an apt_pkg.Cache() instance.
|
||||||
'''
|
"""
|
||||||
import apt_pkg
|
import apt_pkg
|
||||||
if not pkgcache:
|
if not pkgcache:
|
||||||
from charmhelpers.fetch import apt_cache
|
from charmhelpers.fetch import apt_cache
|
||||||
|
@ -466,15 +646,30 @@ def cmp_pkgrevno(package, revno, pkgcache=None):
|
||||||
|
|
||||||
|
|
||||||
@contextmanager
|
@contextmanager
|
||||||
def chdir(d):
|
def chdir(directory):
|
||||||
|
"""Change the current working directory to a different directory for a code
|
||||||
|
block and return the previous directory after the block exits. Useful to
|
||||||
|
run commands from a specificed directory.
|
||||||
|
|
||||||
|
:param str directory: The directory path to change to for this context.
|
||||||
|
"""
|
||||||
cur = os.getcwd()
|
cur = os.getcwd()
|
||||||
try:
|
try:
|
||||||
yield os.chdir(d)
|
yield os.chdir(directory)
|
||||||
finally:
|
finally:
|
||||||
os.chdir(cur)
|
os.chdir(cur)
|
||||||
|
|
||||||
|
|
||||||
def chownr(path, owner, group, follow_links=True):
|
def chownr(path, owner, group, follow_links=True, chowntopdir=False):
|
||||||
|
"""Recursively change user and group ownership of files and directories
|
||||||
|
in given path. Doesn't chown path itself by default, only its children.
|
||||||
|
|
||||||
|
:param str path: The string path to start changing ownership.
|
||||||
|
:param str owner: The owner string to use when looking up the uid.
|
||||||
|
:param str group: The group string to use when looking up the gid.
|
||||||
|
:param bool follow_links: Also Chown links if True
|
||||||
|
:param bool chowntopdir: Also chown path itself if True
|
||||||
|
"""
|
||||||
uid = pwd.getpwnam(owner).pw_uid
|
uid = pwd.getpwnam(owner).pw_uid
|
||||||
gid = grp.getgrnam(group).gr_gid
|
gid = grp.getgrnam(group).gr_gid
|
||||||
if follow_links:
|
if follow_links:
|
||||||
|
@ -482,6 +677,10 @@ def chownr(path, owner, group, follow_links=True):
|
||||||
else:
|
else:
|
||||||
chown = os.lchown
|
chown = os.lchown
|
||||||
|
|
||||||
|
if chowntopdir:
|
||||||
|
broken_symlink = os.path.lexists(path) and not os.path.exists(path)
|
||||||
|
if not broken_symlink:
|
||||||
|
chown(path, uid, gid)
|
||||||
for root, dirs, files in os.walk(path):
|
for root, dirs, files in os.walk(path):
|
||||||
for name in dirs + files:
|
for name in dirs + files:
|
||||||
full = os.path.join(root, name)
|
full = os.path.join(root, name)
|
||||||
|
@ -491,4 +690,28 @@ def chownr(path, owner, group, follow_links=True):
|
||||||
|
|
||||||
|
|
||||||
def lchownr(path, owner, group):
|
def lchownr(path, owner, group):
|
||||||
|
"""Recursively change user and group ownership of files and directories
|
||||||
|
in a given path, not following symbolic links. See the documentation for
|
||||||
|
'os.lchown' for more information.
|
||||||
|
|
||||||
|
:param str path: The string path to start changing ownership.
|
||||||
|
:param str owner: The owner string to use when looking up the uid.
|
||||||
|
:param str group: The group string to use when looking up the gid.
|
||||||
|
"""
|
||||||
chownr(path, owner, group, follow_links=False)
|
chownr(path, owner, group, follow_links=False)
|
||||||
|
|
||||||
|
|
||||||
|
def get_total_ram():
|
||||||
|
"""The total amount of system RAM in bytes.
|
||||||
|
|
||||||
|
This is what is reported by the OS, and may be overcommitted when
|
||||||
|
there are multiple containers hosted on the same machine.
|
||||||
|
"""
|
||||||
|
with open('/proc/meminfo', 'r') as f:
|
||||||
|
for line in f.readlines():
|
||||||
|
if line:
|
||||||
|
key, value, unit = line.split()
|
||||||
|
if key == 'MemTotal:':
|
||||||
|
assert unit == 'kB', 'Unknown unit'
|
||||||
|
return int(value) * 1024 # Classic, not KiB.
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
|
@ -0,0 +1,71 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import yaml
|
||||||
|
from charmhelpers.core import fstab
|
||||||
|
from charmhelpers.core import sysctl
|
||||||
|
from charmhelpers.core.host import (
|
||||||
|
add_group,
|
||||||
|
add_user_to_group,
|
||||||
|
fstab_mount,
|
||||||
|
mkdir,
|
||||||
|
)
|
||||||
|
from charmhelpers.core.strutils import bytes_from_string
|
||||||
|
from subprocess import check_output
|
||||||
|
|
||||||
|
|
||||||
|
def hugepage_support(user, group='hugetlb', nr_hugepages=256,
|
||||||
|
max_map_count=65536, mnt_point='/run/hugepages/kvm',
|
||||||
|
pagesize='2MB', mount=True, set_shmmax=False):
|
||||||
|
"""Enable hugepages on system.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user (str) -- Username to allow access to hugepages to
|
||||||
|
group (str) -- Group name to own hugepages
|
||||||
|
nr_hugepages (int) -- Number of pages to reserve
|
||||||
|
max_map_count (int) -- Number of Virtual Memory Areas a process can own
|
||||||
|
mnt_point (str) -- Directory to mount hugepages on
|
||||||
|
pagesize (str) -- Size of hugepages
|
||||||
|
mount (bool) -- Whether to Mount hugepages
|
||||||
|
"""
|
||||||
|
group_info = add_group(group)
|
||||||
|
gid = group_info.gr_gid
|
||||||
|
add_user_to_group(user, group)
|
||||||
|
if max_map_count < 2 * nr_hugepages:
|
||||||
|
max_map_count = 2 * nr_hugepages
|
||||||
|
sysctl_settings = {
|
||||||
|
'vm.nr_hugepages': nr_hugepages,
|
||||||
|
'vm.max_map_count': max_map_count,
|
||||||
|
'vm.hugetlb_shm_group': gid,
|
||||||
|
}
|
||||||
|
if set_shmmax:
|
||||||
|
shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax']))
|
||||||
|
shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages
|
||||||
|
if shmmax_minsize > shmmax_current:
|
||||||
|
sysctl_settings['kernel.shmmax'] = shmmax_minsize
|
||||||
|
sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf')
|
||||||
|
mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False)
|
||||||
|
lfstab = fstab.Fstab()
|
||||||
|
fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point)
|
||||||
|
if fstab_entry:
|
||||||
|
lfstab.remove_entry(fstab_entry)
|
||||||
|
entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs',
|
||||||
|
'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0)
|
||||||
|
lfstab.add_entry(entry)
|
||||||
|
if mount:
|
||||||
|
fstab_mount(mnt_point)
|
|
@ -0,0 +1,68 @@
|
||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
|
||||||
|
|
||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
log,
|
||||||
|
INFO
|
||||||
|
)
|
||||||
|
|
||||||
|
from subprocess import check_call, check_output
|
||||||
|
import re
|
||||||
|
|
||||||
|
|
||||||
|
def modprobe(module, persist=True):
|
||||||
|
"""Load a kernel module and configure for auto-load on reboot."""
|
||||||
|
cmd = ['modprobe', module]
|
||||||
|
|
||||||
|
log('Loading kernel module %s' % module, level=INFO)
|
||||||
|
|
||||||
|
check_call(cmd)
|
||||||
|
if persist:
|
||||||
|
with open('/etc/modules', 'r+') as modules:
|
||||||
|
if module not in modules.read():
|
||||||
|
modules.write(module)
|
||||||
|
|
||||||
|
|
||||||
|
def rmmod(module, force=False):
|
||||||
|
"""Remove a module from the linux kernel"""
|
||||||
|
cmd = ['rmmod']
|
||||||
|
if force:
|
||||||
|
cmd.append('-f')
|
||||||
|
cmd.append(module)
|
||||||
|
log('Removing kernel module %s' % module, level=INFO)
|
||||||
|
return check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def lsmod():
|
||||||
|
"""Shows what kernel modules are currently loaded"""
|
||||||
|
return check_output(['lsmod'],
|
||||||
|
universal_newlines=True)
|
||||||
|
|
||||||
|
|
||||||
|
def is_module_loaded(module):
|
||||||
|
"""Checks if a kernel module is already loaded"""
|
||||||
|
matches = re.findall('^%s[ ]+' % module, lsmod(), re.M)
|
||||||
|
return len(matches) > 0
|
||||||
|
|
||||||
|
|
||||||
|
def update_initramfs(version='all'):
|
||||||
|
"""Updates an initramfs image"""
|
||||||
|
return check_call(["update-initramfs", "-k", version, "-u"])
|
|
@ -16,7 +16,9 @@
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
from charmhelpers.core import hookenv
|
from charmhelpers.core import hookenv
|
||||||
|
from charmhelpers.core import host
|
||||||
from charmhelpers.core import templating
|
from charmhelpers.core import templating
|
||||||
|
|
||||||
from charmhelpers.core.services.base import ManagerCallback
|
from charmhelpers.core.services.base import ManagerCallback
|
||||||
|
@ -240,27 +242,50 @@ class TemplateCallback(ManagerCallback):
|
||||||
|
|
||||||
:param str source: The template source file, relative to
|
:param str source: The template source file, relative to
|
||||||
`$CHARM_DIR/templates`
|
`$CHARM_DIR/templates`
|
||||||
:param str target: The target to write the rendered template to
|
|
||||||
|
:param str target: The target to write the rendered template to (or None)
|
||||||
:param str owner: The owner of the rendered file
|
:param str owner: The owner of the rendered file
|
||||||
:param str group: The group of the rendered file
|
:param str group: The group of the rendered file
|
||||||
:param int perms: The permissions of the rendered file
|
:param int perms: The permissions of the rendered file
|
||||||
|
:param partial on_change_action: functools partial to be executed when
|
||||||
|
rendered file changes
|
||||||
|
:param jinja2 loader template_loader: A jinja2 template loader
|
||||||
|
|
||||||
|
:return str: The rendered template
|
||||||
"""
|
"""
|
||||||
def __init__(self, source, target,
|
def __init__(self, source, target,
|
||||||
owner='root', group='root', perms=0o444):
|
owner='root', group='root', perms=0o444,
|
||||||
|
on_change_action=None, template_loader=None):
|
||||||
self.source = source
|
self.source = source
|
||||||
self.target = target
|
self.target = target
|
||||||
self.owner = owner
|
self.owner = owner
|
||||||
self.group = group
|
self.group = group
|
||||||
self.perms = perms
|
self.perms = perms
|
||||||
|
self.on_change_action = on_change_action
|
||||||
|
self.template_loader = template_loader
|
||||||
|
|
||||||
def __call__(self, manager, service_name, event_name):
|
def __call__(self, manager, service_name, event_name):
|
||||||
|
pre_checksum = ''
|
||||||
|
if self.on_change_action and os.path.isfile(self.target):
|
||||||
|
pre_checksum = host.file_hash(self.target)
|
||||||
service = manager.get_service(service_name)
|
service = manager.get_service(service_name)
|
||||||
context = {}
|
context = {'ctx': {}}
|
||||||
for ctx in service.get('required_data', []):
|
for ctx in service.get('required_data', []):
|
||||||
context.update(ctx)
|
context.update(ctx)
|
||||||
templating.render(self.source, self.target, context,
|
context['ctx'].update(ctx)
|
||||||
self.owner, self.group, self.perms)
|
|
||||||
|
result = templating.render(self.source, self.target, context,
|
||||||
|
self.owner, self.group, self.perms,
|
||||||
|
template_loader=self.template_loader)
|
||||||
|
if self.on_change_action:
|
||||||
|
if pre_checksum == host.file_hash(self.target):
|
||||||
|
hookenv.log(
|
||||||
|
'No change detected: {}'.format(self.target),
|
||||||
|
hookenv.DEBUG)
|
||||||
|
else:
|
||||||
|
self.on_change_action()
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
# Convenience aliases for templates
|
# Convenience aliases for templates
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
import six
|
import six
|
||||||
|
import re
|
||||||
|
|
||||||
|
|
||||||
def bool_from_string(value):
|
def bool_from_string(value):
|
||||||
|
@ -40,3 +41,32 @@ def bool_from_string(value):
|
||||||
|
|
||||||
msg = "Unable to interpret string value '%s' as boolean" % (value)
|
msg = "Unable to interpret string value '%s' as boolean" % (value)
|
||||||
raise ValueError(msg)
|
raise ValueError(msg)
|
||||||
|
|
||||||
|
|
||||||
|
def bytes_from_string(value):
|
||||||
|
"""Interpret human readable string value as bytes.
|
||||||
|
|
||||||
|
Returns int
|
||||||
|
"""
|
||||||
|
BYTE_POWER = {
|
||||||
|
'K': 1,
|
||||||
|
'KB': 1,
|
||||||
|
'M': 2,
|
||||||
|
'MB': 2,
|
||||||
|
'G': 3,
|
||||||
|
'GB': 3,
|
||||||
|
'T': 4,
|
||||||
|
'TB': 4,
|
||||||
|
'P': 5,
|
||||||
|
'PB': 5,
|
||||||
|
}
|
||||||
|
if isinstance(value, six.string_types):
|
||||||
|
value = six.text_type(value)
|
||||||
|
else:
|
||||||
|
msg = "Unable to interpret non-string value '%s' as boolean" % (value)
|
||||||
|
raise ValueError(msg)
|
||||||
|
matches = re.match("([0-9]+)([a-zA-Z]+)", value)
|
||||||
|
if not matches:
|
||||||
|
msg = "Unable to interpret string value '%s' as bytes" % (value)
|
||||||
|
raise ValueError(msg)
|
||||||
|
return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
|
||||||
|
|
|
@ -21,13 +21,14 @@ from charmhelpers.core import hookenv
|
||||||
|
|
||||||
|
|
||||||
def render(source, target, context, owner='root', group='root',
|
def render(source, target, context, owner='root', group='root',
|
||||||
perms=0o444, templates_dir=None, encoding='UTF-8'):
|
perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None):
|
||||||
"""
|
"""
|
||||||
Render a template.
|
Render a template.
|
||||||
|
|
||||||
The `source` path, if not absolute, is relative to the `templates_dir`.
|
The `source` path, if not absolute, is relative to the `templates_dir`.
|
||||||
|
|
||||||
The `target` path should be absolute.
|
The `target` path should be absolute. It can also be `None`, in which
|
||||||
|
case no file will be written.
|
||||||
|
|
||||||
The context should be a dict containing the values to be replaced in the
|
The context should be a dict containing the values to be replaced in the
|
||||||
template.
|
template.
|
||||||
|
@ -36,6 +37,9 @@ def render(source, target, context, owner='root', group='root',
|
||||||
|
|
||||||
If omitted, `templates_dir` defaults to the `templates` folder in the charm.
|
If omitted, `templates_dir` defaults to the `templates` folder in the charm.
|
||||||
|
|
||||||
|
The rendered template will be written to the file as well as being returned
|
||||||
|
as a string.
|
||||||
|
|
||||||
Note: Using this requires python-jinja2; if it is not installed, calling
|
Note: Using this requires python-jinja2; if it is not installed, calling
|
||||||
this will attempt to use charmhelpers.fetch.apt_install to install it.
|
this will attempt to use charmhelpers.fetch.apt_install to install it.
|
||||||
"""
|
"""
|
||||||
|
@ -52,17 +56,26 @@ def render(source, target, context, owner='root', group='root',
|
||||||
apt_install('python-jinja2', fatal=True)
|
apt_install('python-jinja2', fatal=True)
|
||||||
from jinja2 import FileSystemLoader, Environment, exceptions
|
from jinja2 import FileSystemLoader, Environment, exceptions
|
||||||
|
|
||||||
|
if template_loader:
|
||||||
|
template_env = Environment(loader=template_loader)
|
||||||
|
else:
|
||||||
if templates_dir is None:
|
if templates_dir is None:
|
||||||
templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
|
templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
|
||||||
loader = Environment(loader=FileSystemLoader(templates_dir))
|
template_env = Environment(loader=FileSystemLoader(templates_dir))
|
||||||
try:
|
try:
|
||||||
source = source
|
source = source
|
||||||
template = loader.get_template(source)
|
template = template_env.get_template(source)
|
||||||
except exceptions.TemplateNotFound as e:
|
except exceptions.TemplateNotFound as e:
|
||||||
hookenv.log('Could not load template %s from %s.' %
|
hookenv.log('Could not load template %s from %s.' %
|
||||||
(source, templates_dir),
|
(source, templates_dir),
|
||||||
level=hookenv.ERROR)
|
level=hookenv.ERROR)
|
||||||
raise e
|
raise e
|
||||||
content = template.render(context)
|
content = template.render(context)
|
||||||
|
if target is not None:
|
||||||
|
target_dir = os.path.dirname(target)
|
||||||
|
if not os.path.exists(target_dir):
|
||||||
|
# This is a terrible default directory permission, as the file
|
||||||
|
# or its siblings will often contain secrets.
|
||||||
host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
|
host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
|
||||||
host.write_file(target, content.encode(encoding), owner, group, perms)
|
host.write_file(target, content.encode(encoding), owner, group, perms)
|
||||||
|
return content
|
||||||
|
|
|
@ -152,6 +152,7 @@ associated to the hookname.
|
||||||
import collections
|
import collections
|
||||||
import contextlib
|
import contextlib
|
||||||
import datetime
|
import datetime
|
||||||
|
import itertools
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
import pprint
|
import pprint
|
||||||
|
@ -164,8 +165,7 @@ __author__ = 'Kapil Thangavelu <kapil.foss@gmail.com>'
|
||||||
class Storage(object):
|
class Storage(object):
|
||||||
"""Simple key value database for local unit state within charms.
|
"""Simple key value database for local unit state within charms.
|
||||||
|
|
||||||
Modifications are automatically committed at hook exit. That's
|
Modifications are not persisted unless :meth:`flush` is called.
|
||||||
currently regardless of exit code.
|
|
||||||
|
|
||||||
To support dicts, lists, integer, floats, and booleans values
|
To support dicts, lists, integer, floats, and booleans values
|
||||||
are automatically json encoded/decoded.
|
are automatically json encoded/decoded.
|
||||||
|
@ -173,6 +173,9 @@ class Storage(object):
|
||||||
def __init__(self, path=None):
|
def __init__(self, path=None):
|
||||||
self.db_path = path
|
self.db_path = path
|
||||||
if path is None:
|
if path is None:
|
||||||
|
if 'UNIT_STATE_DB' in os.environ:
|
||||||
|
self.db_path = os.environ['UNIT_STATE_DB']
|
||||||
|
else:
|
||||||
self.db_path = os.path.join(
|
self.db_path = os.path.join(
|
||||||
os.environ.get('CHARM_DIR', ''), '.unit-state.db')
|
os.environ.get('CHARM_DIR', ''), '.unit-state.db')
|
||||||
self.conn = sqlite3.connect('%s' % self.db_path)
|
self.conn = sqlite3.connect('%s' % self.db_path)
|
||||||
|
@ -189,15 +192,8 @@ class Storage(object):
|
||||||
self.conn.close()
|
self.conn.close()
|
||||||
self._closed = True
|
self._closed = True
|
||||||
|
|
||||||
def _scoped_query(self, stmt, params=None):
|
|
||||||
if params is None:
|
|
||||||
params = []
|
|
||||||
return stmt, params
|
|
||||||
|
|
||||||
def get(self, key, default=None, record=False):
|
def get(self, key, default=None, record=False):
|
||||||
self.cursor.execute(
|
self.cursor.execute('select data from kv where key=?', [key])
|
||||||
*self._scoped_query(
|
|
||||||
'select data from kv where key=?', [key]))
|
|
||||||
result = self.cursor.fetchone()
|
result = self.cursor.fetchone()
|
||||||
if not result:
|
if not result:
|
||||||
return default
|
return default
|
||||||
|
@ -206,33 +202,81 @@ class Storage(object):
|
||||||
return json.loads(result[0])
|
return json.loads(result[0])
|
||||||
|
|
||||||
def getrange(self, key_prefix, strip=False):
|
def getrange(self, key_prefix, strip=False):
|
||||||
stmt = "select key, data from kv where key like '%s%%'" % key_prefix
|
"""
|
||||||
self.cursor.execute(*self._scoped_query(stmt))
|
Get a range of keys starting with a common prefix as a mapping of
|
||||||
|
keys to values.
|
||||||
|
|
||||||
|
:param str key_prefix: Common prefix among all keys
|
||||||
|
:param bool strip: Optionally strip the common prefix from the key
|
||||||
|
names in the returned dict
|
||||||
|
:return dict: A (possibly empty) dict of key-value mappings
|
||||||
|
"""
|
||||||
|
self.cursor.execute("select key, data from kv where key like ?",
|
||||||
|
['%s%%' % key_prefix])
|
||||||
result = self.cursor.fetchall()
|
result = self.cursor.fetchall()
|
||||||
|
|
||||||
if not result:
|
if not result:
|
||||||
return None
|
return {}
|
||||||
if not strip:
|
if not strip:
|
||||||
key_prefix = ''
|
key_prefix = ''
|
||||||
return dict([
|
return dict([
|
||||||
(k[len(key_prefix):], json.loads(v)) for k, v in result])
|
(k[len(key_prefix):], json.loads(v)) for k, v in result])
|
||||||
|
|
||||||
def update(self, mapping, prefix=""):
|
def update(self, mapping, prefix=""):
|
||||||
|
"""
|
||||||
|
Set the values of multiple keys at once.
|
||||||
|
|
||||||
|
:param dict mapping: Mapping of keys to values
|
||||||
|
:param str prefix: Optional prefix to apply to all keys in `mapping`
|
||||||
|
before setting
|
||||||
|
"""
|
||||||
for k, v in mapping.items():
|
for k, v in mapping.items():
|
||||||
self.set("%s%s" % (prefix, k), v)
|
self.set("%s%s" % (prefix, k), v)
|
||||||
|
|
||||||
def unset(self, key):
|
def unset(self, key):
|
||||||
|
"""
|
||||||
|
Remove a key from the database entirely.
|
||||||
|
"""
|
||||||
self.cursor.execute('delete from kv where key=?', [key])
|
self.cursor.execute('delete from kv where key=?', [key])
|
||||||
if self.revision and self.cursor.rowcount:
|
if self.revision and self.cursor.rowcount:
|
||||||
self.cursor.execute(
|
self.cursor.execute(
|
||||||
'insert into kv_revisions values (?, ?, ?)',
|
'insert into kv_revisions values (?, ?, ?)',
|
||||||
[key, self.revision, json.dumps('DELETED')])
|
[key, self.revision, json.dumps('DELETED')])
|
||||||
|
|
||||||
|
def unsetrange(self, keys=None, prefix=""):
|
||||||
|
"""
|
||||||
|
Remove a range of keys starting with a common prefix, from the database
|
||||||
|
entirely.
|
||||||
|
|
||||||
|
:param list keys: List of keys to remove.
|
||||||
|
:param str prefix: Optional prefix to apply to all keys in ``keys``
|
||||||
|
before removing.
|
||||||
|
"""
|
||||||
|
if keys is not None:
|
||||||
|
keys = ['%s%s' % (prefix, key) for key in keys]
|
||||||
|
self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys)
|
||||||
|
if self.revision and self.cursor.rowcount:
|
||||||
|
self.cursor.execute(
|
||||||
|
'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)),
|
||||||
|
list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys)))
|
||||||
|
else:
|
||||||
|
self.cursor.execute('delete from kv where key like ?',
|
||||||
|
['%s%%' % prefix])
|
||||||
|
if self.revision and self.cursor.rowcount:
|
||||||
|
self.cursor.execute(
|
||||||
|
'insert into kv_revisions values (?, ?, ?)',
|
||||||
|
['%s%%' % prefix, self.revision, json.dumps('DELETED')])
|
||||||
|
|
||||||
def set(self, key, value):
|
def set(self, key, value):
|
||||||
|
"""
|
||||||
|
Set a value in the database.
|
||||||
|
|
||||||
|
:param str key: Key to set the value for
|
||||||
|
:param value: Any JSON-serializable value to be set
|
||||||
|
"""
|
||||||
serialized = json.dumps(value)
|
serialized = json.dumps(value)
|
||||||
|
|
||||||
self.cursor.execute(
|
self.cursor.execute('select data from kv where key=?', [key])
|
||||||
'select data from kv where key=?', [key])
|
|
||||||
exists = self.cursor.fetchone()
|
exists = self.cursor.fetchone()
|
||||||
|
|
||||||
# Skip mutations to the same value
|
# Skip mutations to the same value
|
||||||
|
|
|
@ -90,6 +90,22 @@ CLOUD_ARCHIVE_POCKETS = {
|
||||||
'kilo/proposed': 'trusty-proposed/kilo',
|
'kilo/proposed': 'trusty-proposed/kilo',
|
||||||
'trusty-kilo/proposed': 'trusty-proposed/kilo',
|
'trusty-kilo/proposed': 'trusty-proposed/kilo',
|
||||||
'trusty-proposed/kilo': 'trusty-proposed/kilo',
|
'trusty-proposed/kilo': 'trusty-proposed/kilo',
|
||||||
|
# Liberty
|
||||||
|
'liberty': 'trusty-updates/liberty',
|
||||||
|
'trusty-liberty': 'trusty-updates/liberty',
|
||||||
|
'trusty-liberty/updates': 'trusty-updates/liberty',
|
||||||
|
'trusty-updates/liberty': 'trusty-updates/liberty',
|
||||||
|
'liberty/proposed': 'trusty-proposed/liberty',
|
||||||
|
'trusty-liberty/proposed': 'trusty-proposed/liberty',
|
||||||
|
'trusty-proposed/liberty': 'trusty-proposed/liberty',
|
||||||
|
# Mitaka
|
||||||
|
'mitaka': 'trusty-updates/mitaka',
|
||||||
|
'trusty-mitaka': 'trusty-updates/mitaka',
|
||||||
|
'trusty-mitaka/updates': 'trusty-updates/mitaka',
|
||||||
|
'trusty-updates/mitaka': 'trusty-updates/mitaka',
|
||||||
|
'mitaka/proposed': 'trusty-proposed/mitaka',
|
||||||
|
'trusty-mitaka/proposed': 'trusty-proposed/mitaka',
|
||||||
|
'trusty-proposed/mitaka': 'trusty-proposed/mitaka',
|
||||||
}
|
}
|
||||||
|
|
||||||
# The order of this list is very important. Handlers should be listed in from
|
# The order of this list is very important. Handlers should be listed in from
|
||||||
|
@ -217,12 +233,12 @@ def apt_purge(packages, fatal=False):
|
||||||
|
|
||||||
def apt_mark(packages, mark, fatal=False):
|
def apt_mark(packages, mark, fatal=False):
|
||||||
"""Flag one or more packages using apt-mark"""
|
"""Flag one or more packages using apt-mark"""
|
||||||
|
log("Marking {} as {}".format(packages, mark))
|
||||||
cmd = ['apt-mark', mark]
|
cmd = ['apt-mark', mark]
|
||||||
if isinstance(packages, six.string_types):
|
if isinstance(packages, six.string_types):
|
||||||
cmd.append(packages)
|
cmd.append(packages)
|
||||||
else:
|
else:
|
||||||
cmd.extend(packages)
|
cmd.extend(packages)
|
||||||
log("Holding {}".format(packages))
|
|
||||||
|
|
||||||
if fatal:
|
if fatal:
|
||||||
subprocess.check_call(cmd, universal_newlines=True)
|
subprocess.check_call(cmd, universal_newlines=True)
|
||||||
|
@ -403,7 +419,7 @@ def plugins(fetch_handlers=None):
|
||||||
importlib.import_module(package),
|
importlib.import_module(package),
|
||||||
classname)
|
classname)
|
||||||
plugin_list.append(handler_class())
|
plugin_list.append(handler_class())
|
||||||
except (ImportError, AttributeError):
|
except NotImplementedError:
|
||||||
# Skip missing plugins so that they can be ommitted from
|
# Skip missing plugins so that they can be ommitted from
|
||||||
# installation if desired
|
# installation if desired
|
||||||
log("FetchHandler {} not found, skipping plugin".format(
|
log("FetchHandler {} not found, skipping plugin".format(
|
||||||
|
|
|
@ -108,7 +108,7 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
|
||||||
install_opener(opener)
|
install_opener(opener)
|
||||||
response = urlopen(source)
|
response = urlopen(source)
|
||||||
try:
|
try:
|
||||||
with open(dest, 'w') as dest_file:
|
with open(dest, 'wb') as dest_file:
|
||||||
dest_file.write(response.read())
|
dest_file.write(response.read())
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if os.path.isfile(dest):
|
if os.path.isfile(dest):
|
||||||
|
|
|
@ -15,60 +15,50 @@
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
from subprocess import check_call
|
||||||
from charmhelpers.fetch import (
|
from charmhelpers.fetch import (
|
||||||
BaseFetchHandler,
|
BaseFetchHandler,
|
||||||
UnhandledSource
|
UnhandledSource,
|
||||||
|
filter_installed_packages,
|
||||||
|
apt_install,
|
||||||
)
|
)
|
||||||
from charmhelpers.core.host import mkdir
|
from charmhelpers.core.host import mkdir
|
||||||
|
|
||||||
import six
|
|
||||||
if six.PY3:
|
|
||||||
raise ImportError('bzrlib does not support Python3')
|
|
||||||
|
|
||||||
try:
|
if filter_installed_packages(['bzr']) != []:
|
||||||
from bzrlib.branch import Branch
|
apt_install(['bzr'])
|
||||||
from bzrlib import bzrdir, workingtree, errors
|
if filter_installed_packages(['bzr']) != []:
|
||||||
except ImportError:
|
raise NotImplementedError('Unable to install bzr')
|
||||||
from charmhelpers.fetch import apt_install
|
|
||||||
apt_install("python-bzrlib")
|
|
||||||
from bzrlib.branch import Branch
|
|
||||||
from bzrlib import bzrdir, workingtree, errors
|
|
||||||
|
|
||||||
|
|
||||||
class BzrUrlFetchHandler(BaseFetchHandler):
|
class BzrUrlFetchHandler(BaseFetchHandler):
|
||||||
"""Handler for bazaar branches via generic and lp URLs"""
|
"""Handler for bazaar branches via generic and lp URLs"""
|
||||||
def can_handle(self, source):
|
def can_handle(self, source):
|
||||||
url_parts = self.parse_url(source)
|
url_parts = self.parse_url(source)
|
||||||
if url_parts.scheme not in ('bzr+ssh', 'lp'):
|
if url_parts.scheme not in ('bzr+ssh', 'lp', ''):
|
||||||
return False
|
return False
|
||||||
|
elif not url_parts.scheme:
|
||||||
|
return os.path.exists(os.path.join(source, '.bzr'))
|
||||||
else:
|
else:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def branch(self, source, dest):
|
def branch(self, source, dest):
|
||||||
url_parts = self.parse_url(source)
|
|
||||||
# If we use lp:branchname scheme we need to load plugins
|
|
||||||
if not self.can_handle(source):
|
if not self.can_handle(source):
|
||||||
raise UnhandledSource("Cannot handle {}".format(source))
|
raise UnhandledSource("Cannot handle {}".format(source))
|
||||||
if url_parts.scheme == "lp":
|
if os.path.exists(dest):
|
||||||
from bzrlib.plugin import load_plugins
|
check_call(['bzr', 'pull', '--overwrite', '-d', dest, source])
|
||||||
load_plugins()
|
else:
|
||||||
try:
|
check_call(['bzr', 'branch', source, dest])
|
||||||
local_branch = bzrdir.BzrDir.create_branch_convenience(dest)
|
|
||||||
except errors.AlreadyControlDirError:
|
|
||||||
local_branch = Branch.open(dest)
|
|
||||||
try:
|
|
||||||
remote_branch = Branch.open(source)
|
|
||||||
remote_branch.push(local_branch)
|
|
||||||
tree = workingtree.WorkingTree.open(dest)
|
|
||||||
tree.update()
|
|
||||||
except Exception as e:
|
|
||||||
raise e
|
|
||||||
|
|
||||||
def install(self, source):
|
def install(self, source, dest=None):
|
||||||
url_parts = self.parse_url(source)
|
url_parts = self.parse_url(source)
|
||||||
branch_name = url_parts.path.strip("/").split("/")[-1]
|
branch_name = url_parts.path.strip("/").split("/")[-1]
|
||||||
|
if dest:
|
||||||
|
dest_dir = os.path.join(dest, branch_name)
|
||||||
|
else:
|
||||||
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
|
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
|
||||||
branch_name)
|
branch_name)
|
||||||
|
|
||||||
if not os.path.exists(dest_dir):
|
if not os.path.exists(dest_dir):
|
||||||
mkdir(dest_dir, perms=0o755)
|
mkdir(dest_dir, perms=0o755)
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -15,24 +15,18 @@
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
from subprocess import check_call, CalledProcessError
|
||||||
from charmhelpers.fetch import (
|
from charmhelpers.fetch import (
|
||||||
BaseFetchHandler,
|
BaseFetchHandler,
|
||||||
UnhandledSource
|
UnhandledSource,
|
||||||
|
filter_installed_packages,
|
||||||
|
apt_install,
|
||||||
)
|
)
|
||||||
from charmhelpers.core.host import mkdir
|
|
||||||
|
|
||||||
import six
|
if filter_installed_packages(['git']) != []:
|
||||||
if six.PY3:
|
apt_install(['git'])
|
||||||
raise ImportError('GitPython does not support Python 3')
|
if filter_installed_packages(['git']) != []:
|
||||||
|
raise NotImplementedError('Unable to install git')
|
||||||
try:
|
|
||||||
from git import Repo
|
|
||||||
except ImportError:
|
|
||||||
from charmhelpers.fetch import apt_install
|
|
||||||
apt_install("python-git")
|
|
||||||
from git import Repo
|
|
||||||
|
|
||||||
from git.exc import GitCommandError # noqa E402
|
|
||||||
|
|
||||||
|
|
||||||
class GitUrlFetchHandler(BaseFetchHandler):
|
class GitUrlFetchHandler(BaseFetchHandler):
|
||||||
|
@ -40,19 +34,24 @@ class GitUrlFetchHandler(BaseFetchHandler):
|
||||||
def can_handle(self, source):
|
def can_handle(self, source):
|
||||||
url_parts = self.parse_url(source)
|
url_parts = self.parse_url(source)
|
||||||
# TODO (mattyw) no support for ssh git@ yet
|
# TODO (mattyw) no support for ssh git@ yet
|
||||||
if url_parts.scheme not in ('http', 'https', 'git'):
|
if url_parts.scheme not in ('http', 'https', 'git', ''):
|
||||||
return False
|
return False
|
||||||
|
elif not url_parts.scheme:
|
||||||
|
return os.path.exists(os.path.join(source, '.git'))
|
||||||
else:
|
else:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def clone(self, source, dest, branch, depth=None):
|
def clone(self, source, dest, branch="master", depth=None):
|
||||||
if not self.can_handle(source):
|
if not self.can_handle(source):
|
||||||
raise UnhandledSource("Cannot handle {}".format(source))
|
raise UnhandledSource("Cannot handle {}".format(source))
|
||||||
|
|
||||||
if depth:
|
if os.path.exists(dest):
|
||||||
Repo.clone_from(source, dest, branch=branch, depth=depth)
|
cmd = ['git', '-C', dest, 'pull', source, branch]
|
||||||
else:
|
else:
|
||||||
Repo.clone_from(source, dest, branch=branch)
|
cmd = ['git', 'clone', source, dest, '--branch', branch]
|
||||||
|
if depth:
|
||||||
|
cmd.extend(['--depth', depth])
|
||||||
|
check_call(cmd)
|
||||||
|
|
||||||
def install(self, source, branch="master", dest=None, depth=None):
|
def install(self, source, branch="master", dest=None, depth=None):
|
||||||
url_parts = self.parse_url(source)
|
url_parts = self.parse_url(source)
|
||||||
|
@ -62,11 +61,9 @@ class GitUrlFetchHandler(BaseFetchHandler):
|
||||||
else:
|
else:
|
||||||
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
|
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
|
||||||
branch_name)
|
branch_name)
|
||||||
if not os.path.exists(dest_dir):
|
|
||||||
mkdir(dest_dir, perms=0o755)
|
|
||||||
try:
|
try:
|
||||||
self.clone(source, dest_dir, branch, depth)
|
self.clone(source, dest_dir, branch, depth)
|
||||||
except GitCommandError as e:
|
except CalledProcessError as e:
|
||||||
raise UnhandledSource(e)
|
raise UnhandledSource(e)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
raise UnhandledSource(e.strerror)
|
raise UnhandledSource(e.strerror)
|
||||||
|
|
|
@ -87,6 +87,21 @@ def plumgrid_joined(relation_id=None):
|
||||||
relation_set(relation_id=relation_id, opsvm_ip=opsvm_ip)
|
relation_set(relation_id=relation_id, opsvm_ip=opsvm_ip)
|
||||||
|
|
||||||
|
|
||||||
|
@hooks.hook('plumgrid-configs-relation-joined')
|
||||||
|
def plumgrid_configs_joined(relation_id=None):
|
||||||
|
'''
|
||||||
|
This hook is run when relation with neutron-api-plumgrid is created.
|
||||||
|
'''
|
||||||
|
relation_settings = {
|
||||||
|
'plumgrid_virtual_ip': config('plumgrid-virtual-ip'),
|
||||||
|
'plumgrid_username': config('plumgrid-username'),
|
||||||
|
'plumgrid_password': config('plumgrid-password'),
|
||||||
|
}
|
||||||
|
if is_leader():
|
||||||
|
relation_set(relation_id=relation_id,
|
||||||
|
relation_settings=relation_settings)
|
||||||
|
|
||||||
|
|
||||||
@hooks.hook('config-changed')
|
@hooks.hook('config-changed')
|
||||||
def config_changed():
|
def config_changed():
|
||||||
'''
|
'''
|
||||||
|
@ -108,6 +123,12 @@ def config_changed():
|
||||||
if charm_config.changed('plumgrid-virtual-ip'):
|
if charm_config.changed('plumgrid-virtual-ip'):
|
||||||
CONFIGS.write_all()
|
CONFIGS.write_all()
|
||||||
stop_pg()
|
stop_pg()
|
||||||
|
for rid in relation_ids('plumgrid-configs'):
|
||||||
|
plumgrid_configs_joined(rid)
|
||||||
|
if (charm_config.changed('plumgrid-username') or
|
||||||
|
charm_config.changed('plumgrid-password')):
|
||||||
|
for rid in relation_ids('plumgrid-configs'):
|
||||||
|
plumgrid_configs_joined(rid)
|
||||||
if (charm_config.changed('install_sources') or
|
if (charm_config.changed('install_sources') or
|
||||||
charm_config.changed('plumgrid-build') or
|
charm_config.changed('plumgrid-build') or
|
||||||
charm_config.changed('install_keys') or
|
charm_config.changed('install_keys') or
|
||||||
|
|
|
@ -156,8 +156,9 @@ def restart_pg():
|
||||||
raise ValueError("plumgrid service couldn't be started")
|
raise ValueError("plumgrid service couldn't be started")
|
||||||
else:
|
else:
|
||||||
if service_start('libvirt-bin'):
|
if service_start('libvirt-bin'):
|
||||||
time.sleep(3)
|
time.sleep(8)
|
||||||
if not service_running('plumgrid'):
|
if not service_running('plumgrid') \
|
||||||
|
and not service_start('plumgrid'):
|
||||||
raise ValueError("plumgrid service couldn't be started")
|
raise ValueError("plumgrid service couldn't be started")
|
||||||
else:
|
else:
|
||||||
raise ValueError("libvirt-bin service couldn't be started")
|
raise ValueError("libvirt-bin service couldn't be started")
|
||||||
|
|
|
@ -0,0 +1 @@
|
||||||
|
pg_dir_hooks.py
|
|
@ -15,6 +15,8 @@ requires:
|
||||||
provides:
|
provides:
|
||||||
plumgrid:
|
plumgrid:
|
||||||
interface: plumgrid
|
interface: plumgrid
|
||||||
|
plumgrid-configs:
|
||||||
|
interface: plumgrid-configs
|
||||||
peers:
|
peers:
|
||||||
director:
|
director:
|
||||||
interface: director
|
interface: director
|
||||||
|
|
|
@ -71,10 +71,14 @@ server {
|
||||||
proxy_set_header Host $host;
|
proxy_set_header Host $host;
|
||||||
}
|
}
|
||||||
|
|
||||||
location /cloudApex/ {
|
location ~ /cloudApex {
|
||||||
index index.html;
|
index index.html;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
location ~* /cloudapex {
|
||||||
|
rewrite (?i)/cloudapex(.*)$ /cloudApex$1 last;
|
||||||
|
}
|
||||||
|
|
||||||
location /vtap/ {
|
location /vtap/ {
|
||||||
alias /opt/pg/vtap;
|
alias /opt/pg/vtap;
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,7 +31,8 @@ TO_PATCH = [
|
||||||
'determine_packages',
|
'determine_packages',
|
||||||
'post_pg_license',
|
'post_pg_license',
|
||||||
'config',
|
'config',
|
||||||
'load_iptables'
|
'load_iptables',
|
||||||
|
'status_set'
|
||||||
]
|
]
|
||||||
NEUTRON_CONF_DIR = "/etc/neutron"
|
NEUTRON_CONF_DIR = "/etc/neutron"
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue