make sync

This commit is contained in:
Junaid Ali 2016-04-22 04:15:33 -04:00
parent 1dee6f9daa
commit c0d2d9c303
43 changed files with 4130 additions and 159 deletions

253
bin/charm_helpers_sync.py Normal file
View File

@ -0,0 +1,253 @@
#!/usr/bin/python
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
# Authors:
# Adam Gandelman <adamg@ubuntu.com>
import logging
import optparse
import os
import subprocess
import shutil
import sys
import tempfile
import yaml
from fnmatch import fnmatch
import six
CHARM_HELPERS_BRANCH = 'lp:charm-helpers'
def parse_config(conf_file):
if not os.path.isfile(conf_file):
logging.error('Invalid config file: %s.' % conf_file)
return False
return yaml.load(open(conf_file).read())
def clone_helpers(work_dir, branch):
dest = os.path.join(work_dir, 'charm-helpers')
logging.info('Checking out %s to %s.' % (branch, dest))
cmd = ['bzr', 'checkout', '--lightweight', branch, dest]
subprocess.check_call(cmd)
return dest
def _module_path(module):
return os.path.join(*module.split('.'))
def _src_path(src, module):
return os.path.join(src, 'charmhelpers', _module_path(module))
def _dest_path(dest, module):
return os.path.join(dest, _module_path(module))
def _is_pyfile(path):
return os.path.isfile(path + '.py')
def ensure_init(path):
'''
ensure directories leading up to path are importable, omitting
parent directory, eg path='/hooks/helpers/foo'/:
hooks/
hooks/helpers/__init__.py
hooks/helpers/foo/__init__.py
'''
for d, dirs, files in os.walk(os.path.join(*path.split('/')[:2])):
_i = os.path.join(d, '__init__.py')
if not os.path.exists(_i):
logging.info('Adding missing __init__.py: %s' % _i)
open(_i, 'wb').close()
def sync_pyfile(src, dest):
src = src + '.py'
src_dir = os.path.dirname(src)
logging.info('Syncing pyfile: %s -> %s.' % (src, dest))
if not os.path.exists(dest):
os.makedirs(dest)
shutil.copy(src, dest)
if os.path.isfile(os.path.join(src_dir, '__init__.py')):
shutil.copy(os.path.join(src_dir, '__init__.py'),
dest)
ensure_init(dest)
def get_filter(opts=None):
opts = opts or []
if 'inc=*' in opts:
# do not filter any files, include everything
return None
def _filter(dir, ls):
incs = [opt.split('=').pop() for opt in opts if 'inc=' in opt]
_filter = []
for f in ls:
_f = os.path.join(dir, f)
if not os.path.isdir(_f) and not _f.endswith('.py') and incs:
if True not in [fnmatch(_f, inc) for inc in incs]:
logging.debug('Not syncing %s, does not match include '
'filters (%s)' % (_f, incs))
_filter.append(f)
else:
logging.debug('Including file, which matches include '
'filters (%s): %s' % (incs, _f))
elif (os.path.isfile(_f) and not _f.endswith('.py')):
logging.debug('Not syncing file: %s' % f)
_filter.append(f)
elif (os.path.isdir(_f) and not
os.path.isfile(os.path.join(_f, '__init__.py'))):
logging.debug('Not syncing directory: %s' % f)
_filter.append(f)
return _filter
return _filter
def sync_directory(src, dest, opts=None):
if os.path.exists(dest):
logging.debug('Removing existing directory: %s' % dest)
shutil.rmtree(dest)
logging.info('Syncing directory: %s -> %s.' % (src, dest))
shutil.copytree(src, dest, ignore=get_filter(opts))
ensure_init(dest)
def sync(src, dest, module, opts=None):
# Sync charmhelpers/__init__.py for bootstrap code.
sync_pyfile(_src_path(src, '__init__'), dest)
# Sync other __init__.py files in the path leading to module.
m = []
steps = module.split('.')[:-1]
while steps:
m.append(steps.pop(0))
init = '.'.join(m + ['__init__'])
sync_pyfile(_src_path(src, init),
os.path.dirname(_dest_path(dest, init)))
# Sync the module, or maybe a .py file.
if os.path.isdir(_src_path(src, module)):
sync_directory(_src_path(src, module), _dest_path(dest, module), opts)
elif _is_pyfile(_src_path(src, module)):
sync_pyfile(_src_path(src, module),
os.path.dirname(_dest_path(dest, module)))
else:
logging.warn('Could not sync: %s. Neither a pyfile or directory, '
'does it even exist?' % module)
def parse_sync_options(options):
if not options:
return []
return options.split(',')
def extract_options(inc, global_options=None):
global_options = global_options or []
if global_options and isinstance(global_options, six.string_types):
global_options = [global_options]
if '|' not in inc:
return (inc, global_options)
inc, opts = inc.split('|')
return (inc, parse_sync_options(opts) + global_options)
def sync_helpers(include, src, dest, options=None):
if not os.path.isdir(dest):
os.makedirs(dest)
global_options = parse_sync_options(options)
for inc in include:
if isinstance(inc, str):
inc, opts = extract_options(inc, global_options)
sync(src, dest, inc, opts)
elif isinstance(inc, dict):
# could also do nested dicts here.
for k, v in six.iteritems(inc):
if isinstance(v, list):
for m in v:
inc, opts = extract_options(m, global_options)
sync(src, dest, '%s.%s' % (k, inc), opts)
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option('-c', '--config', action='store', dest='config',
default=None, help='helper config file')
parser.add_option('-D', '--debug', action='store_true', dest='debug',
default=False, help='debug')
parser.add_option('-b', '--branch', action='store', dest='branch',
help='charm-helpers bzr branch (overrides config)')
parser.add_option('-d', '--destination', action='store', dest='dest_dir',
help='sync destination dir (overrides config)')
(opts, args) = parser.parse_args()
if opts.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
if opts.config:
logging.info('Loading charm helper config from %s.' % opts.config)
config = parse_config(opts.config)
if not config:
logging.error('Could not parse config from %s.' % opts.config)
sys.exit(1)
else:
config = {}
if 'branch' not in config:
config['branch'] = CHARM_HELPERS_BRANCH
if opts.branch:
config['branch'] = opts.branch
if opts.dest_dir:
config['destination'] = opts.dest_dir
if 'destination' not in config:
logging.error('No destination dir. specified as option or config.')
sys.exit(1)
if 'include' not in config:
if not args:
logging.error('No modules to sync specified as option or config.')
sys.exit(1)
config['include'] = []
[config['include'].append(a) for a in args]
sync_options = None
if 'options' in config:
sync_options = config['options']
tmpd = tempfile.mkdtemp()
try:
checkout = clone_helpers(tmpd, config['branch'])
sync_helpers(config['include'], checkout, config['destination'],
options=sync_options)
except Exception as e:
logging.error("Could not sync: %s" % e)
raise e
finally:
logging.debug('Cleaning up %s' % tmpd)
shutil.rmtree(tmpd)

View File

@ -601,7 +601,7 @@ class AmuletUtils(object):
return ('Process name count mismatch. expected, actual: {}, '
'{}'.format(len(expected), len(actual)))
for (e_proc_name, e_pids_length), (a_proc_name, a_pids) in \
for (e_proc_name, e_pids), (a_proc_name, a_pids) in \
zip(e_proc_names.items(), a_proc_names.items()):
if e_proc_name != a_proc_name:
return ('Process name mismatch. expected, actual: {}, '
@ -610,25 +610,31 @@ class AmuletUtils(object):
a_pids_length = len(a_pids)
fail_msg = ('PID count mismatch. {} ({}) expected, actual: '
'{}, {} ({})'.format(e_sentry_name, e_proc_name,
e_pids_length, a_pids_length,
e_pids, a_pids_length,
a_pids))
# If expected is not bool, ensure PID quantities match
if not isinstance(e_pids_length, bool) and \
a_pids_length != e_pids_length:
# If expected is a list, ensure at least one PID quantity match
if isinstance(e_pids, list) and \
a_pids_length not in e_pids:
return fail_msg
# If expected is not bool and not list,
# ensure PID quantities match
elif not isinstance(e_pids, bool) and \
not isinstance(e_pids, list) and \
a_pids_length != e_pids:
return fail_msg
# If expected is bool True, ensure 1 or more PIDs exist
elif isinstance(e_pids_length, bool) and \
e_pids_length is True and a_pids_length < 1:
elif isinstance(e_pids, bool) and \
e_pids is True and a_pids_length < 1:
return fail_msg
# If expected is bool False, ensure 0 PIDs exist
elif isinstance(e_pids_length, bool) and \
e_pids_length is False and a_pids_length != 0:
elif isinstance(e_pids, bool) and \
e_pids is False and a_pids_length != 0:
return fail_msg
else:
self.log.debug('PID check OK: {} {} {}: '
'{}'.format(e_sentry_name, e_proc_name,
e_pids_length, a_pids))
e_pids, a_pids))
return None
def validate_list_of_identical_dicts(self, list_of_dicts):
@ -782,15 +788,20 @@ class AmuletUtils(object):
# amulet juju action helpers:
def run_action(self, unit_sentry, action,
_check_output=subprocess.check_output):
_check_output=subprocess.check_output,
params=None):
"""Run the named action on a given unit sentry.
params a dict of parameters to use
_check_output parameter is used for dependency injection.
@return action_id.
"""
unit_id = unit_sentry.info["unit_name"]
command = ["juju", "action", "do", "--format=json", unit_id, action]
if params is not None:
for key, value in params.iteritems():
command.append("{}={}".format(key, value))
self.log.info("Running command: %s\n" % " ".join(command))
output = _check_output(command, universal_newlines=True)
data = json.loads(output)

View File

@ -0,0 +1,15 @@
# Copyright 2016 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.

View File

@ -0,0 +1,19 @@
# Copyright 2016 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
from os import path
TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates')

View File

@ -0,0 +1,31 @@
# Copyright 2016 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
from charmhelpers.core.hookenv import (
log,
DEBUG,
)
from charmhelpers.contrib.hardening.apache.checks import config
def run_apache_checks():
log("Starting Apache hardening checks.", level=DEBUG)
checks = config.get_audits()
for check in checks:
log("Running '%s' check" % (check.__class__.__name__), level=DEBUG)
check.ensure_compliance()
log("Apache hardening checks complete.", level=DEBUG)

View File

@ -0,0 +1,100 @@
# Copyright 2016 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import os
import re
import subprocess
from charmhelpers.core.hookenv import (
log,
INFO,
)
from charmhelpers.contrib.hardening.audits.file import (
FilePermissionAudit,
DirectoryPermissionAudit,
NoReadWriteForOther,
TemplatedFile,
)
from charmhelpers.contrib.hardening.audits.apache import DisabledModuleAudit
from charmhelpers.contrib.hardening.apache import TEMPLATES_DIR
from charmhelpers.contrib.hardening import utils
def get_audits():
"""Get Apache hardening config audits.
:returns: dictionary of audits
"""
if subprocess.call(['which', 'apache2'], stdout=subprocess.PIPE) != 0:
log("Apache server does not appear to be installed on this node - "
"skipping apache hardening", level=INFO)
return []
context = ApacheConfContext()
settings = utils.get_settings('apache')
audits = [
FilePermissionAudit(paths='/etc/apache2/apache2.conf', user='root',
group='root', mode=0o0640),
TemplatedFile(os.path.join(settings['common']['apache_dir'],
'mods-available/alias.conf'),
context,
TEMPLATES_DIR,
mode=0o0755,
user='root',
service_actions=[{'service': 'apache2',
'actions': ['restart']}]),
TemplatedFile(os.path.join(settings['common']['apache_dir'],
'conf-enabled/hardening.conf'),
context,
TEMPLATES_DIR,
mode=0o0640,
user='root',
service_actions=[{'service': 'apache2',
'actions': ['restart']}]),
DirectoryPermissionAudit(settings['common']['apache_dir'],
user='root',
group='root',
mode=0o640),
DisabledModuleAudit(settings['hardening']['modules_to_disable']),
NoReadWriteForOther(settings['common']['apache_dir']),
]
return audits
class ApacheConfContext(object):
"""Defines the set of key/value pairs to set in a apache config file.
This context, when called, will return a dictionary containing the
key/value pairs of setting to specify in the
/etc/apache/conf-enabled/hardening.conf file.
"""
def __call__(self):
settings = utils.get_settings('apache')
ctxt = settings['hardening']
out = subprocess.check_output(['apache2', '-v'])
ctxt['apache_version'] = re.search(r'.+version: Apache/(.+?)\s.+',
out).group(1)
ctxt['apache_icondir'] = '/usr/share/apache2/icons/'
ctxt['traceenable'] = settings['hardening']['traceenable']
return ctxt

View File

@ -0,0 +1,63 @@
# Copyright 2016 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
class BaseAudit(object): # NO-QA
"""Base class for hardening checks.
The lifecycle of a hardening check is to first check to see if the system
is in compliance for the specified check. If it is not in compliance, the
check method will return a value which will be supplied to the.
"""
def __init__(self, *args, **kwargs):
self.unless = kwargs.get('unless', None)
super(BaseAudit, self).__init__()
def ensure_compliance(self):
"""Checks to see if the current hardening check is in compliance or
not.
If the check that is performed is not in compliance, then an exception
should be raised.
"""
pass
def _take_action(self):
"""Determines whether to perform the action or not.
Checks whether or not an action should be taken. This is determined by
the truthy value for the unless parameter. If unless is a callback
method, it will be invoked with no parameters in order to determine
whether or not the action should be taken. Otherwise, the truthy value
of the unless attribute will determine if the action should be
performed.
"""
# Do the action if there isn't an unless override.
if self.unless is None:
return True
# Invoke the callback if there is one.
if hasattr(self.unless, '__call__'):
results = self.unless()
if results:
return False
else:
return True
if self.unless:
return False
else:
return True

View File

@ -0,0 +1,100 @@
# Copyright 2016 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import re
import subprocess
from six import string_types
from charmhelpers.core.hookenv import (
log,
INFO,
ERROR,
)
from charmhelpers.contrib.hardening.audits import BaseAudit
class DisabledModuleAudit(BaseAudit):
"""Audits Apache2 modules.
Determines if the apache2 modules are enabled. If the modules are enabled
then they are removed in the ensure_compliance.
"""
def __init__(self, modules):
if modules is None:
self.modules = []
elif isinstance(modules, string_types):
self.modules = [modules]
else:
self.modules = modules
def ensure_compliance(self):
"""Ensures that the modules are not loaded."""
if not self.modules:
return
try:
loaded_modules = self._get_loaded_modules()
non_compliant_modules = []
for module in self.modules:
if module in loaded_modules:
log("Module '%s' is enabled but should not be." %
(module), level=INFO)
non_compliant_modules.append(module)
if len(non_compliant_modules) == 0:
return
for module in non_compliant_modules:
self._disable_module(module)
self._restart_apache()
except subprocess.CalledProcessError as e:
log('Error occurred auditing apache module compliance. '
'This may have been already reported. '
'Output is: %s' % e.output, level=ERROR)
@staticmethod
def _get_loaded_modules():
"""Returns the modules which are enabled in Apache."""
output = subprocess.check_output(['apache2ctl', '-M'])
modules = []
for line in output.strip().split():
# Each line of the enabled module output looks like:
# module_name (static|shared)
# Plus a header line at the top of the output which is stripped
# out by the regex.
matcher = re.search(r'^ (\S*)', line)
if matcher:
modules.append(matcher.group(1))
return modules
@staticmethod
def _disable_module(module):
"""Disables the specified module in Apache."""
try:
subprocess.check_call(['a2dismod', module])
except subprocess.CalledProcessError as e:
# Note: catch error here to allow the attempt of disabling
# multiple modules in one go rather than failing after the
# first module fails.
log('Error occurred disabling module %s. '
'Output is: %s' % (module, e.output), level=ERROR)
@staticmethod
def _restart_apache():
"""Restarts the apache process"""
subprocess.check_output(['service', 'apache2', 'restart'])

View File

@ -0,0 +1,105 @@
# Copyright 2016 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import # required for external apt import
from apt import apt_pkg
from six import string_types
from charmhelpers.fetch import (
apt_cache,
apt_purge
)
from charmhelpers.core.hookenv import (
log,
DEBUG,
WARNING,
)
from charmhelpers.contrib.hardening.audits import BaseAudit
class AptConfig(BaseAudit):
def __init__(self, config, **kwargs):
self.config = config
def verify_config(self):
apt_pkg.init()
for cfg in self.config:
value = apt_pkg.config.get(cfg['key'], cfg.get('default', ''))
if value and value != cfg['expected']:
log("APT config '%s' has unexpected value '%s' "
"(expected='%s')" %
(cfg['key'], value, cfg['expected']), level=WARNING)
def ensure_compliance(self):
self.verify_config()
class RestrictedPackages(BaseAudit):
"""Class used to audit restricted packages on the system."""
def __init__(self, pkgs, **kwargs):
super(RestrictedPackages, self).__init__(**kwargs)
if isinstance(pkgs, string_types) or not hasattr(pkgs, '__iter__'):
self.pkgs = [pkgs]
else:
self.pkgs = pkgs
def ensure_compliance(self):
cache = apt_cache()
for p in self.pkgs:
if p not in cache:
continue
pkg = cache[p]
if not self.is_virtual_package(pkg):
if not pkg.current_ver:
log("Package '%s' is not installed." % pkg.name,
level=DEBUG)
continue
else:
log("Restricted package '%s' is installed" % pkg.name,
level=WARNING)
self.delete_package(cache, pkg)
else:
log("Checking restricted virtual package '%s' provides" %
pkg.name, level=DEBUG)
self.delete_package(cache, pkg)
def delete_package(self, cache, pkg):
"""Deletes the package from the system.
Deletes the package form the system, properly handling virtual
packages.
:param cache: the apt cache
:param pkg: the package to remove
"""
if self.is_virtual_package(pkg):
log("Package '%s' appears to be virtual - purging provides" %
pkg.name, level=DEBUG)
for _p in pkg.provides_list:
self.delete_package(cache, _p[2].parent_pkg)
elif not pkg.current_ver:
log("Package '%s' not installed" % pkg.name, level=DEBUG)
return
else:
log("Purging package '%s'" % pkg.name, level=DEBUG)
apt_purge(pkg.name)
def is_virtual_package(self, pkg):
return pkg.has_provides and not pkg.has_versions

View File

@ -0,0 +1,552 @@
# Copyright 2016 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import grp
import os
import pwd
import re
from subprocess import (
CalledProcessError,
check_output,
check_call,
)
from traceback import format_exc
from six import string_types
from stat import (
S_ISGID,
S_ISUID
)
from charmhelpers.core.hookenv import (
log,
DEBUG,
INFO,
WARNING,
ERROR,
)
from charmhelpers.core import unitdata
from charmhelpers.core.host import file_hash
from charmhelpers.contrib.hardening.audits import BaseAudit
from charmhelpers.contrib.hardening.templating import (
get_template_path,
render_and_write,
)
from charmhelpers.contrib.hardening import utils
class BaseFileAudit(BaseAudit):
"""Base class for file audits.
Provides api stubs for compliance check flow that must be used by any class
that implemented this one.
"""
def __init__(self, paths, always_comply=False, *args, **kwargs):
"""
:param paths: string path of list of paths of files we want to apply
compliance checks are criteria to.
:param always_comply: if true compliance criteria is always applied
else compliance is skipped for non-existent
paths.
"""
super(BaseFileAudit, self).__init__(*args, **kwargs)
self.always_comply = always_comply
if isinstance(paths, string_types) or not hasattr(paths, '__iter__'):
self.paths = [paths]
else:
self.paths = paths
def ensure_compliance(self):
"""Ensure that the all registered files comply to registered criteria.
"""
for p in self.paths:
if os.path.exists(p):
if self.is_compliant(p):
continue
log('File %s is not in compliance.' % p, level=INFO)
else:
if not self.always_comply:
log("Non-existent path '%s' - skipping compliance check"
% (p), level=INFO)
continue
if self._take_action():
log("Applying compliance criteria to '%s'" % (p), level=INFO)
self.comply(p)
def is_compliant(self, path):
"""Audits the path to see if it is compliance.
:param path: the path to the file that should be checked.
"""
raise NotImplementedError
def comply(self, path):
"""Enforces the compliance of a path.
:param path: the path to the file that should be enforced.
"""
raise NotImplementedError
@classmethod
def _get_stat(cls, path):
"""Returns the Posix st_stat information for the specified file path.
:param path: the path to get the st_stat information for.
:returns: an st_stat object for the path or None if the path doesn't
exist.
"""
return os.stat(path)
class FilePermissionAudit(BaseFileAudit):
"""Implements an audit for file permissions and ownership for a user.
This class implements functionality that ensures that a specific user/group
will own the file(s) specified and that the permissions specified are
applied properly to the file.
"""
def __init__(self, paths, user, group=None, mode=0o600, **kwargs):
self.user = user
self.group = group
self.mode = mode
super(FilePermissionAudit, self).__init__(paths, user, group, mode,
**kwargs)
@property
def user(self):
return self._user
@user.setter
def user(self, name):
try:
user = pwd.getpwnam(name)
except KeyError:
log('Unknown user %s' % name, level=ERROR)
user = None
self._user = user
@property
def group(self):
return self._group
@group.setter
def group(self, name):
try:
group = None
if name:
group = grp.getgrnam(name)
else:
group = grp.getgrgid(self.user.pw_gid)
except KeyError:
log('Unknown group %s' % name, level=ERROR)
self._group = group
def is_compliant(self, path):
"""Checks if the path is in compliance.
Used to determine if the path specified meets the necessary
requirements to be in compliance with the check itself.
:param path: the file path to check
:returns: True if the path is compliant, False otherwise.
"""
stat = self._get_stat(path)
user = self.user
group = self.group
compliant = True
if stat.st_uid != user.pw_uid or stat.st_gid != group.gr_gid:
log('File %s is not owned by %s:%s.' % (path, user.pw_name,
group.gr_name),
level=INFO)
compliant = False
# POSIX refers to the st_mode bits as corresponding to both the
# file type and file permission bits, where the least significant 12
# bits (o7777) are the suid (11), sgid (10), sticky bits (9), and the
# file permission bits (8-0)
perms = stat.st_mode & 0o7777
if perms != self.mode:
log('File %s has incorrect permissions, currently set to %s' %
(path, oct(stat.st_mode & 0o7777)), level=INFO)
compliant = False
return compliant
def comply(self, path):
"""Issues a chown and chmod to the file paths specified."""
utils.ensure_permissions(path, self.user.pw_name, self.group.gr_name,
self.mode)
class DirectoryPermissionAudit(FilePermissionAudit):
"""Performs a permission check for the specified directory path."""
def __init__(self, paths, user, group=None, mode=0o600,
recursive=True, **kwargs):
super(DirectoryPermissionAudit, self).__init__(paths, user, group,
mode, **kwargs)
self.recursive = recursive
def is_compliant(self, path):
"""Checks if the directory is compliant.
Used to determine if the path specified and all of its children
directories are in compliance with the check itself.
:param path: the directory path to check
:returns: True if the directory tree is compliant, otherwise False.
"""
if not os.path.isdir(path):
log('Path specified %s is not a directory.' % path, level=ERROR)
raise ValueError("%s is not a directory." % path)
if not self.recursive:
return super(DirectoryPermissionAudit, self).is_compliant(path)
compliant = True
for root, dirs, _ in os.walk(path):
if len(dirs) > 0:
continue
if not super(DirectoryPermissionAudit, self).is_compliant(root):
compliant = False
continue
return compliant
def comply(self, path):
for root, dirs, _ in os.walk(path):
if len(dirs) > 0:
super(DirectoryPermissionAudit, self).comply(root)
class ReadOnly(BaseFileAudit):
"""Audits that files and folders are read only."""
def __init__(self, paths, *args, **kwargs):
super(ReadOnly, self).__init__(paths=paths, *args, **kwargs)
def is_compliant(self, path):
try:
output = check_output(['find', path, '-perm', '-go+w',
'-type', 'f']).strip()
# The find above will find any files which have permission sets
# which allow too broad of write access. As such, the path is
# compliant if there is no output.
if output:
return False
return True
except CalledProcessError as e:
log('Error occurred checking finding writable files for %s. '
'Error information is: command %s failed with returncode '
'%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output,
format_exc(e)), level=ERROR)
return False
def comply(self, path):
try:
check_output(['chmod', 'go-w', '-R', path])
except CalledProcessError as e:
log('Error occurred removing writeable permissions for %s. '
'Error information is: command %s failed with returncode '
'%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output,
format_exc(e)), level=ERROR)
class NoReadWriteForOther(BaseFileAudit):
"""Ensures that the files found under the base path are readable or
writable by anyone other than the owner or the group.
"""
def __init__(self, paths):
super(NoReadWriteForOther, self).__init__(paths)
def is_compliant(self, path):
try:
cmd = ['find', path, '-perm', '-o+r', '-type', 'f', '-o',
'-perm', '-o+w', '-type', 'f']
output = check_output(cmd).strip()
# The find above here will find any files which have read or
# write permissions for other, meaning there is too broad of access
# to read/write the file. As such, the path is compliant if there's
# no output.
if output:
return False
return True
except CalledProcessError as e:
log('Error occurred while finding files which are readable or '
'writable to the world in %s. '
'Command output is: %s.' % (path, e.output), level=ERROR)
def comply(self, path):
try:
check_output(['chmod', '-R', 'o-rw', path])
except CalledProcessError as e:
log('Error occurred attempting to change modes of files under '
'path %s. Output of command is: %s' % (path, e.output))
class NoSUIDSGIDAudit(BaseFileAudit):
"""Audits that specified files do not have SUID/SGID bits set."""
def __init__(self, paths, *args, **kwargs):
super(NoSUIDSGIDAudit, self).__init__(paths=paths, *args, **kwargs)
def is_compliant(self, path):
stat = self._get_stat(path)
if (stat.st_mode & (S_ISGID | S_ISUID)) != 0:
return False
return True
def comply(self, path):
try:
log('Removing suid/sgid from %s.' % path, level=DEBUG)
check_output(['chmod', '-s', path])
except CalledProcessError as e:
log('Error occurred removing suid/sgid from %s.'
'Error information is: command %s failed with returncode '
'%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output,
format_exc(e)), level=ERROR)
class TemplatedFile(BaseFileAudit):
"""The TemplatedFileAudit audits the contents of a templated file.
This audit renders a file from a template, sets the appropriate file
permissions, then generates a hashsum with which to check the content
changed.
"""
def __init__(self, path, context, template_dir, mode, user='root',
group='root', service_actions=None, **kwargs):
self.context = context
self.user = user
self.group = group
self.mode = mode
self.template_dir = template_dir
self.service_actions = service_actions
super(TemplatedFile, self).__init__(paths=path, always_comply=True,
**kwargs)
def is_compliant(self, path):
"""Determines if the templated file is compliant.
A templated file is only compliant if it has not changed (as
determined by its sha256 hashsum) AND its file permissions are set
appropriately.
:param path: the path to check compliance.
"""
same_templates = self.templates_match(path)
same_content = self.contents_match(path)
same_permissions = self.permissions_match(path)
if same_content and same_permissions and same_templates:
return True
return False
def run_service_actions(self):
"""Run any actions on services requested."""
if not self.service_actions:
return
for svc_action in self.service_actions:
name = svc_action['service']
actions = svc_action['actions']
log("Running service '%s' actions '%s'" % (name, actions),
level=DEBUG)
for action in actions:
cmd = ['service', name, action]
try:
check_call(cmd)
except CalledProcessError as exc:
log("Service name='%s' action='%s' failed - %s" %
(name, action, exc), level=WARNING)
def comply(self, path):
"""Ensures the contents and the permissions of the file.
:param path: the path to correct
"""
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
self.pre_write()
render_and_write(self.template_dir, path, self.context())
utils.ensure_permissions(path, self.user, self.group, self.mode)
self.run_service_actions()
self.save_checksum(path)
self.post_write()
def pre_write(self):
"""Invoked prior to writing the template."""
pass
def post_write(self):
"""Invoked after writing the template."""
pass
def templates_match(self, path):
"""Determines if the template files are the same.
The template file equality is determined by the hashsum of the
template files themselves. If there is no hashsum, then the content
cannot be sure to be the same so treat it as if they changed.
Otherwise, return whether or not the hashsums are the same.
:param path: the path to check
:returns: boolean
"""
template_path = get_template_path(self.template_dir, path)
key = 'hardening:template:%s' % template_path
template_checksum = file_hash(template_path)
kv = unitdata.kv()
stored_tmplt_checksum = kv.get(key)
if not stored_tmplt_checksum:
kv.set(key, template_checksum)
kv.flush()
log('Saved template checksum for %s.' % template_path,
level=DEBUG)
# Since we don't have a template checksum, then assume it doesn't
# match and return that the template is different.
return False
elif stored_tmplt_checksum != template_checksum:
kv.set(key, template_checksum)
kv.flush()
log('Updated template checksum for %s.' % template_path,
level=DEBUG)
return False
# Here the template hasn't changed based upon the calculated
# checksum of the template and what was previously stored.
return True
def contents_match(self, path):
"""Determines if the file content is the same.
This is determined by comparing hashsum of the file contents and
the saved hashsum. If there is no hashsum, then the content cannot
be sure to be the same so treat them as if they are not the same.
Otherwise, return True if the hashsums are the same, False if they
are not the same.
:param path: the file to check.
"""
checksum = file_hash(path)
kv = unitdata.kv()
stored_checksum = kv.get('hardening:%s' % path)
if not stored_checksum:
# If the checksum hasn't been generated, return False to ensure
# the file is written and the checksum stored.
log('Checksum for %s has not been calculated.' % path, level=DEBUG)
return False
elif stored_checksum != checksum:
log('Checksum mismatch for %s.' % path, level=DEBUG)
return False
return True
def permissions_match(self, path):
"""Determines if the file owner and permissions match.
:param path: the path to check.
"""
audit = FilePermissionAudit(path, self.user, self.group, self.mode)
return audit.is_compliant(path)
def save_checksum(self, path):
"""Calculates and saves the checksum for the path specified.
:param path: the path of the file to save the checksum.
"""
checksum = file_hash(path)
kv = unitdata.kv()
kv.set('hardening:%s' % path, checksum)
kv.flush()
class DeletedFile(BaseFileAudit):
"""Audit to ensure that a file is deleted."""
def __init__(self, paths):
super(DeletedFile, self).__init__(paths)
def is_compliant(self, path):
return not os.path.exists(path)
def comply(self, path):
os.remove(path)
class FileContentAudit(BaseFileAudit):
"""Audit the contents of a file."""
def __init__(self, paths, cases, **kwargs):
# Cases we expect to pass
self.pass_cases = cases.get('pass', [])
# Cases we expect to fail
self.fail_cases = cases.get('fail', [])
super(FileContentAudit, self).__init__(paths, **kwargs)
def is_compliant(self, path):
"""
Given a set of content matching cases i.e. tuple(regex, bool) where
bool value denotes whether or not regex is expected to match, check that
all cases match as expected with the contents of the file. Cases can be
expected to pass of fail.
:param path: Path of file to check.
:returns: Boolean value representing whether or not all cases are
found to be compliant.
"""
log("Auditing contents of file '%s'" % (path), level=DEBUG)
with open(path, 'r') as fd:
contents = fd.read()
matches = 0
for pattern in self.pass_cases:
key = re.compile(pattern, flags=re.MULTILINE)
results = re.search(key, contents)
if results:
matches += 1
else:
log("Pattern '%s' was expected to pass but instead it failed"
% (pattern), level=WARNING)
for pattern in self.fail_cases:
key = re.compile(pattern, flags=re.MULTILINE)
results = re.search(key, contents)
if not results:
matches += 1
else:
log("Pattern '%s' was expected to fail but instead it passed"
% (pattern), level=WARNING)
total = len(self.pass_cases) + len(self.fail_cases)
log("Checked %s cases and %s passed" % (total, matches), level=DEBUG)
return matches == total
def comply(self, *args, **kwargs):
"""NOOP since we just issue warnings. This is to avoid the
NotImplememtedError.
"""
log("Not applying any compliance criteria, only checks.", level=INFO)

View File

@ -0,0 +1,84 @@
# Copyright 2016 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import six
from collections import OrderedDict
from charmhelpers.core.hookenv import (
config,
log,
DEBUG,
WARNING,
)
from charmhelpers.contrib.hardening.host.checks import run_os_checks
from charmhelpers.contrib.hardening.ssh.checks import run_ssh_checks
from charmhelpers.contrib.hardening.mysql.checks import run_mysql_checks
from charmhelpers.contrib.hardening.apache.checks import run_apache_checks
def harden(overrides=None):
"""Hardening decorator.
This is the main entry point for running the hardening stack. In order to
run modules of the stack you must add this decorator to charm hook(s) and
ensure that your charm config.yaml contains the 'harden' option set to
one or more of the supported modules. Setting these will cause the
corresponding hardening code to be run when the hook fires.
This decorator can and should be applied to more than one hook or function
such that hardening modules are called multiple times. This is because
subsequent calls will perform auditing checks that will report any changes
to resources hardened by the first run (and possibly perform compliance
actions as a result of any detected infractions).
:param overrides: Optional list of stack modules used to override those
provided with 'harden' config.
:returns: Returns value returned by decorated function once executed.
"""
def _harden_inner1(f):
log("Hardening function '%s'" % (f.__name__), level=DEBUG)
def _harden_inner2(*args, **kwargs):
RUN_CATALOG = OrderedDict([('os', run_os_checks),
('ssh', run_ssh_checks),
('mysql', run_mysql_checks),
('apache', run_apache_checks)])
enabled = overrides or (config("harden") or "").split()
if enabled:
modules_to_run = []
# modules will always be performed in the following order
for module, func in six.iteritems(RUN_CATALOG):
if module in enabled:
enabled.remove(module)
modules_to_run.append(func)
if enabled:
log("Unknown hardening modules '%s' - ignoring" %
(', '.join(enabled)), level=WARNING)
for hardener in modules_to_run:
log("Executing hardening module '%s'" %
(hardener.__name__), level=DEBUG)
hardener()
else:
log("No hardening applied to '%s'" % (f.__name__), level=DEBUG)
return f(*args, **kwargs)
return _harden_inner2
return _harden_inner1

View File

@ -0,0 +1,19 @@
# Copyright 2016 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
from os import path
TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates')

View File

@ -0,0 +1,50 @@
# Copyright 2016 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
from charmhelpers.core.hookenv import (
log,
DEBUG,
)
from charmhelpers.contrib.hardening.host.checks import (
apt,
limits,
login,
minimize_access,
pam,
profile,
securetty,
suid_sgid,
sysctl
)
def run_os_checks():
log("Starting OS hardening checks.", level=DEBUG)
checks = apt.get_audits()
checks.extend(limits.get_audits())
checks.extend(login.get_audits())
checks.extend(minimize_access.get_audits())
checks.extend(pam.get_audits())
checks.extend(profile.get_audits())
checks.extend(securetty.get_audits())
checks.extend(suid_sgid.get_audits())
checks.extend(sysctl.get_audits())
for check in checks:
log("Running '%s' check" % (check.__class__.__name__), level=DEBUG)
check.ensure_compliance()
log("OS hardening checks complete.", level=DEBUG)

View File

@ -0,0 +1,39 @@
# Copyright 2016 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
from charmhelpers.contrib.hardening.utils import get_settings
from charmhelpers.contrib.hardening.audits.apt import (
AptConfig,
RestrictedPackages,
)
def get_audits():
"""Get OS hardening apt audits.
:returns: dictionary of audits
"""
audits = [AptConfig([{'key': 'APT::Get::AllowUnauthenticated',
'expected': 'false'}])]
settings = get_settings('os')
clean_packages = settings['security']['packages_clean']
if clean_packages:
security_packages = settings['security']['packages_list']
if security_packages:
audits.append(RestrictedPackages(security_packages))
return audits

View File

@ -0,0 +1,55 @@
# Copyright 2016 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
from charmhelpers.contrib.hardening.audits.file import (
DirectoryPermissionAudit,
TemplatedFile,
)
from charmhelpers.contrib.hardening.host import TEMPLATES_DIR
from charmhelpers.contrib.hardening import utils
def get_audits():
"""Get OS hardening security limits audits.
:returns: dictionary of audits
"""
audits = []
settings = utils.get_settings('os')
# Ensure that the /etc/security/limits.d directory is only writable
# by the root user, but others can execute and read.
audits.append(DirectoryPermissionAudit('/etc/security/limits.d',
user='root', group='root',
mode=0o755))
# If core dumps are not enabled, then don't allow core dumps to be
# created as they may contain sensitive information.
if not settings['security']['kernel_enable_core_dump']:
audits.append(TemplatedFile('/etc/security/limits.d/10.hardcore.conf',
SecurityLimitsContext(),
template_dir=TEMPLATES_DIR,
user='root', group='root', mode=0o0440))
return audits
class SecurityLimitsContext(object):
def __call__(self):
settings = utils.get_settings('os')
ctxt = {'disable_core_dump':
not settings['security']['kernel_enable_core_dump']}
return ctxt

View File

@ -0,0 +1,67 @@
# Copyright 2016 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
from six import string_types
from charmhelpers.contrib.hardening.audits.file import TemplatedFile
from charmhelpers.contrib.hardening.host import TEMPLATES_DIR
from charmhelpers.contrib.hardening import utils
def get_audits():
"""Get OS hardening login.defs audits.
:returns: dictionary of audits
"""
audits = [TemplatedFile('/etc/login.defs', LoginContext(),
template_dir=TEMPLATES_DIR,
user='root', group='root', mode=0o0444)]
return audits
class LoginContext(object):
def __call__(self):
settings = utils.get_settings('os')
# Octal numbers in yaml end up being turned into decimal,
# so check if the umask is entered as a string (e.g. '027')
# or as an octal umask as we know it (e.g. 002). If its not
# a string assume it to be octal and turn it into an octal
# string.
umask = settings['environment']['umask']
if not isinstance(umask, string_types):
umask = '%s' % oct(umask)
ctxt = {
'additional_user_paths':
settings['environment']['extra_user_paths'],
'umask': umask,
'pwd_max_age': settings['auth']['pw_max_age'],
'pwd_min_age': settings['auth']['pw_min_age'],
'uid_min': settings['auth']['uid_min'],
'sys_uid_min': settings['auth']['sys_uid_min'],
'sys_uid_max': settings['auth']['sys_uid_max'],
'gid_min': settings['auth']['gid_min'],
'sys_gid_min': settings['auth']['sys_gid_min'],
'sys_gid_max': settings['auth']['sys_gid_max'],
'login_retries': settings['auth']['retries'],
'login_timeout': settings['auth']['timeout'],
'chfn_restrict': settings['auth']['chfn_restrict'],
'allow_login_without_home': settings['auth']['allow_homeless']
}
return ctxt

View File

@ -0,0 +1,52 @@
# Copyright 2016 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
from charmhelpers.contrib.hardening.audits.file import (
FilePermissionAudit,
ReadOnly,
)
from charmhelpers.contrib.hardening import utils
def get_audits():
"""Get OS hardening access audits.
:returns: dictionary of audits
"""
audits = []
settings = utils.get_settings('os')
# Remove write permissions from $PATH folders for all regular users.
# This prevents changing system-wide commands from normal users.
path_folders = {'/usr/local/sbin',
'/usr/local/bin',
'/usr/sbin',
'/usr/bin',
'/bin'}
extra_user_paths = settings['environment']['extra_user_paths']
path_folders.update(extra_user_paths)
audits.append(ReadOnly(path_folders))
# Only allow the root user to have access to the shadow file.
audits.append(FilePermissionAudit('/etc/shadow', 'root', 'root', 0o0600))
if 'change_user' not in settings['security']['users_allow']:
# su should only be accessible to user and group root, unless it is
# expressly defined to allow users to change to root via the
# security_users_allow config option.
audits.append(FilePermissionAudit('/bin/su', 'root', 'root', 0o750))
return audits

View File

@ -0,0 +1,134 @@
# Copyright 2016 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
from subprocess import (
check_output,
CalledProcessError,
)
from charmhelpers.core.hookenv import (
log,
DEBUG,
ERROR,
)
from charmhelpers.fetch import (
apt_install,
apt_purge,
apt_update,
)
from charmhelpers.contrib.hardening.audits.file import (
TemplatedFile,
DeletedFile,
)
from charmhelpers.contrib.hardening import utils
from charmhelpers.contrib.hardening.host import TEMPLATES_DIR
def get_audits():
"""Get OS hardening PAM authentication audits.
:returns: dictionary of audits
"""
audits = []
settings = utils.get_settings('os')
if settings['auth']['pam_passwdqc_enable']:
audits.append(PasswdqcPAM('/etc/passwdqc.conf'))
if settings['auth']['retries']:
audits.append(Tally2PAM('/usr/share/pam-configs/tally2'))
else:
audits.append(DeletedFile('/usr/share/pam-configs/tally2'))
return audits
class PasswdqcPAMContext(object):
def __call__(self):
ctxt = {}
settings = utils.get_settings('os')
ctxt['auth_pam_passwdqc_options'] = \
settings['auth']['pam_passwdqc_options']
return ctxt
class PasswdqcPAM(TemplatedFile):
"""The PAM Audit verifies the linux PAM settings."""
def __init__(self, path):
super(PasswdqcPAM, self).__init__(path=path,
template_dir=TEMPLATES_DIR,
context=PasswdqcPAMContext(),
user='root',
group='root',
mode=0o0640)
def pre_write(self):
# Always remove?
for pkg in ['libpam-ccreds', 'libpam-cracklib']:
log("Purging package '%s'" % pkg, level=DEBUG),
apt_purge(pkg)
apt_update(fatal=True)
for pkg in ['libpam-passwdqc']:
log("Installing package '%s'" % pkg, level=DEBUG),
apt_install(pkg)
def post_write(self):
"""Updates the PAM configuration after the file has been written"""
try:
check_output(['pam-auth-update', '--package'])
except CalledProcessError as e:
log('Error calling pam-auth-update: %s' % e, level=ERROR)
class Tally2PAMContext(object):
def __call__(self):
ctxt = {}
settings = utils.get_settings('os')
ctxt['auth_lockout_time'] = settings['auth']['lockout_time']
ctxt['auth_retries'] = settings['auth']['retries']
return ctxt
class Tally2PAM(TemplatedFile):
"""The PAM Audit verifies the linux PAM settings."""
def __init__(self, path):
super(Tally2PAM, self).__init__(path=path,
template_dir=TEMPLATES_DIR,
context=Tally2PAMContext(),
user='root',
group='root',
mode=0o0640)
def pre_write(self):
# Always remove?
apt_purge('libpam-ccreds')
apt_update(fatal=True)
apt_install('libpam-modules')
def post_write(self):
"""Updates the PAM configuration after the file has been written"""
try:
check_output(['pam-auth-update', '--package'])
except CalledProcessError as e:
log('Error calling pam-auth-update: %s' % e, level=ERROR)

View File

@ -0,0 +1,45 @@
# Copyright 2016 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
from charmhelpers.contrib.hardening.audits.file import TemplatedFile
from charmhelpers.contrib.hardening.host import TEMPLATES_DIR
from charmhelpers.contrib.hardening import utils
def get_audits():
"""Get OS hardening profile audits.
:returns: dictionary of audits
"""
audits = []
settings = utils.get_settings('os')
# If core dumps are not enabled, then don't allow core dumps to be
# created as they may contain sensitive information.
if not settings['security']['kernel_enable_core_dump']:
audits.append(TemplatedFile('/etc/profile.d/pinerolo_profile.sh',
ProfileContext(),
template_dir=TEMPLATES_DIR,
mode=0o0755, user='root', group='root'))
return audits
class ProfileContext(object):
def __call__(self):
ctxt = {}
return ctxt

View File

@ -0,0 +1,39 @@
# Copyright 2016 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
from charmhelpers.contrib.hardening.audits.file import TemplatedFile
from charmhelpers.contrib.hardening.host import TEMPLATES_DIR
from charmhelpers.contrib.hardening import utils
def get_audits():
"""Get OS hardening Secure TTY audits.
:returns: dictionary of audits
"""
audits = []
audits.append(TemplatedFile('/etc/securetty', SecureTTYContext(),
template_dir=TEMPLATES_DIR,
mode=0o0400, user='root', group='root'))
return audits
class SecureTTYContext(object):
def __call__(self):
settings = utils.get_settings('os')
ctxt = {'ttys': settings['auth']['root_ttys']}
return ctxt

View File

@ -0,0 +1,131 @@
# Copyright 2016 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import subprocess
from charmhelpers.core.hookenv import (
log,
INFO,
)
from charmhelpers.contrib.hardening.audits.file import NoSUIDSGIDAudit
from charmhelpers.contrib.hardening import utils
BLACKLIST = ['/usr/bin/rcp', '/usr/bin/rlogin', '/usr/bin/rsh',
'/usr/libexec/openssh/ssh-keysign',
'/usr/lib/openssh/ssh-keysign',
'/sbin/netreport',
'/usr/sbin/usernetctl',
'/usr/sbin/userisdnctl',
'/usr/sbin/pppd',
'/usr/bin/lockfile',
'/usr/bin/mail-lock',
'/usr/bin/mail-unlock',
'/usr/bin/mail-touchlock',
'/usr/bin/dotlockfile',
'/usr/bin/arping',
'/usr/sbin/uuidd',
'/usr/bin/mtr',
'/usr/lib/evolution/camel-lock-helper-1.2',
'/usr/lib/pt_chown',
'/usr/lib/eject/dmcrypt-get-device',
'/usr/lib/mc/cons.saver']
WHITELIST = ['/bin/mount', '/bin/ping', '/bin/su', '/bin/umount',
'/sbin/pam_timestamp_check', '/sbin/unix_chkpwd', '/usr/bin/at',
'/usr/bin/gpasswd', '/usr/bin/locate', '/usr/bin/newgrp',
'/usr/bin/passwd', '/usr/bin/ssh-agent',
'/usr/libexec/utempter/utempter', '/usr/sbin/lockdev',
'/usr/sbin/sendmail.sendmail', '/usr/bin/expiry',
'/bin/ping6', '/usr/bin/traceroute6.iputils',
'/sbin/mount.nfs', '/sbin/umount.nfs',
'/sbin/mount.nfs4', '/sbin/umount.nfs4',
'/usr/bin/crontab',
'/usr/bin/wall', '/usr/bin/write',
'/usr/bin/screen',
'/usr/bin/mlocate',
'/usr/bin/chage', '/usr/bin/chfn', '/usr/bin/chsh',
'/bin/fusermount',
'/usr/bin/pkexec',
'/usr/bin/sudo', '/usr/bin/sudoedit',
'/usr/sbin/postdrop', '/usr/sbin/postqueue',
'/usr/sbin/suexec',
'/usr/lib/squid/ncsa_auth', '/usr/lib/squid/pam_auth',
'/usr/kerberos/bin/ksu',
'/usr/sbin/ccreds_validate',
'/usr/bin/Xorg',
'/usr/bin/X',
'/usr/lib/dbus-1.0/dbus-daemon-launch-helper',
'/usr/lib/vte/gnome-pty-helper',
'/usr/lib/libvte9/gnome-pty-helper',
'/usr/lib/libvte-2.90-9/gnome-pty-helper']
def get_audits():
"""Get OS hardening suid/sgid audits.
:returns: dictionary of audits
"""
checks = []
settings = utils.get_settings('os')
if not settings['security']['suid_sgid_enforce']:
log("Skipping suid/sgid hardening", level=INFO)
return checks
# Build the blacklist and whitelist of files for suid/sgid checks.
# There are a total of 4 lists:
# 1. the system blacklist
# 2. the system whitelist
# 3. the user blacklist
# 4. the user whitelist
#
# The blacklist is the set of paths which should NOT have the suid/sgid bit
# set and the whitelist is the set of paths which MAY have the suid/sgid
# bit setl. The user whitelist/blacklist effectively override the system
# whitelist/blacklist.
u_b = settings['security']['suid_sgid_blacklist']
u_w = settings['security']['suid_sgid_whitelist']
blacklist = set(BLACKLIST) - set(u_w + u_b)
whitelist = set(WHITELIST) - set(u_b + u_w)
checks.append(NoSUIDSGIDAudit(blacklist))
dry_run = settings['security']['suid_sgid_dry_run_on_unknown']
if settings['security']['suid_sgid_remove_from_unknown'] or dry_run:
# If the policy is a dry_run (e.g. complain only) or remove unknown
# suid/sgid bits then find all of the paths which have the suid/sgid
# bit set and then remove the whitelisted paths.
root_path = settings['environment']['root_path']
unknown_paths = find_paths_with_suid_sgid(root_path) - set(whitelist)
checks.append(NoSUIDSGIDAudit(unknown_paths, unless=dry_run))
return checks
def find_paths_with_suid_sgid(root_path):
"""Finds all paths/files which have an suid/sgid bit enabled.
Starting with the root_path, this will recursively find all paths which
have an suid or sgid bit set.
"""
cmd = ['find', root_path, '-perm', '-4000', '-o', '-perm', '-2000',
'-type', 'f', '!', '-path', '/proc/*', '-print']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, _ = p.communicate()
return set(out.split('\n'))

View File

@ -0,0 +1,211 @@
# Copyright 2016 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import os
import platform
import re
import six
import subprocess
from charmhelpers.core.hookenv import (
log,
INFO,
WARNING,
)
from charmhelpers.contrib.hardening import utils
from charmhelpers.contrib.hardening.audits.file import (
FilePermissionAudit,
TemplatedFile,
)
from charmhelpers.contrib.hardening.host import TEMPLATES_DIR
SYSCTL_DEFAULTS = """net.ipv4.ip_forward=%(net_ipv4_ip_forward)s
net.ipv6.conf.all.forwarding=%(net_ipv6_conf_all_forwarding)s
net.ipv4.conf.all.rp_filter=1
net.ipv4.conf.default.rp_filter=1
net.ipv4.icmp_echo_ignore_broadcasts=1
net.ipv4.icmp_ignore_bogus_error_responses=1
net.ipv4.icmp_ratelimit=100
net.ipv4.icmp_ratemask=88089
net.ipv6.conf.all.disable_ipv6=%(net_ipv6_conf_all_disable_ipv6)s
net.ipv4.tcp_timestamps=%(net_ipv4_tcp_timestamps)s
net.ipv4.conf.all.arp_ignore=%(net_ipv4_conf_all_arp_ignore)s
net.ipv4.conf.all.arp_announce=%(net_ipv4_conf_all_arp_announce)s
net.ipv4.tcp_rfc1337=1
net.ipv4.tcp_syncookies=1
net.ipv4.conf.all.shared_media=1
net.ipv4.conf.default.shared_media=1
net.ipv4.conf.all.accept_source_route=0
net.ipv4.conf.default.accept_source_route=0
net.ipv4.conf.all.accept_redirects=0
net.ipv4.conf.default.accept_redirects=0
net.ipv6.conf.all.accept_redirects=0
net.ipv6.conf.default.accept_redirects=0
net.ipv4.conf.all.secure_redirects=0
net.ipv4.conf.default.secure_redirects=0
net.ipv4.conf.all.send_redirects=0
net.ipv4.conf.default.send_redirects=0
net.ipv4.conf.all.log_martians=0
net.ipv6.conf.default.router_solicitations=0
net.ipv6.conf.default.accept_ra_rtr_pref=0
net.ipv6.conf.default.accept_ra_pinfo=0
net.ipv6.conf.default.accept_ra_defrtr=0
net.ipv6.conf.default.autoconf=0
net.ipv6.conf.default.dad_transmits=0
net.ipv6.conf.default.max_addresses=1
net.ipv6.conf.all.accept_ra=0
net.ipv6.conf.default.accept_ra=0
kernel.modules_disabled=%(kernel_modules_disabled)s
kernel.sysrq=%(kernel_sysrq)s
fs.suid_dumpable=%(fs_suid_dumpable)s
kernel.randomize_va_space=2
"""
def get_audits():
"""Get OS hardening sysctl audits.
:returns: dictionary of audits
"""
audits = []
settings = utils.get_settings('os')
# Apply the sysctl settings which are configured to be applied.
audits.append(SysctlConf())
# Make sure that only root has access to the sysctl.conf file, and
# that it is read-only.
audits.append(FilePermissionAudit('/etc/sysctl.conf',
user='root',
group='root', mode=0o0440))
# If module loading is not enabled, then ensure that the modules
# file has the appropriate permissions and rebuild the initramfs
if not settings['security']['kernel_enable_module_loading']:
audits.append(ModulesTemplate())
return audits
class ModulesContext(object):
def __call__(self):
settings = utils.get_settings('os')
with open('/proc/cpuinfo', 'r') as fd:
cpuinfo = fd.readlines()
for line in cpuinfo:
match = re.search(r"^vendor_id\s+:\s+(.+)", line)
if match:
vendor = match.group(1)
if vendor == "GenuineIntel":
vendor = "intel"
elif vendor == "AuthenticAMD":
vendor = "amd"
ctxt = {'arch': platform.processor(),
'cpuVendor': vendor,
'desktop_enable': settings['general']['desktop_enable']}
return ctxt
class ModulesTemplate(object):
def __init__(self):
super(ModulesTemplate, self).__init__('/etc/initramfs-tools/modules',
ModulesContext(),
templates_dir=TEMPLATES_DIR,
user='root', group='root',
mode=0o0440)
def post_write(self):
subprocess.check_call(['update-initramfs', '-u'])
class SysCtlHardeningContext(object):
def __call__(self):
settings = utils.get_settings('os')
ctxt = {'sysctl': {}}
log("Applying sysctl settings", level=INFO)
extras = {'net_ipv4_ip_forward': 0,
'net_ipv6_conf_all_forwarding': 0,
'net_ipv6_conf_all_disable_ipv6': 1,
'net_ipv4_tcp_timestamps': 0,
'net_ipv4_conf_all_arp_ignore': 0,
'net_ipv4_conf_all_arp_announce': 0,
'kernel_sysrq': 0,
'fs_suid_dumpable': 0,
'kernel_modules_disabled': 1}
if settings['sysctl']['ipv6_enable']:
extras['net_ipv6_conf_all_disable_ipv6'] = 0
if settings['sysctl']['forwarding']:
extras['net_ipv4_ip_forward'] = 1
extras['net_ipv6_conf_all_forwarding'] = 1
if settings['sysctl']['arp_restricted']:
extras['net_ipv4_conf_all_arp_ignore'] = 1
extras['net_ipv4_conf_all_arp_announce'] = 2
if settings['security']['kernel_enable_module_loading']:
extras['kernel_modules_disabled'] = 0
if settings['sysctl']['kernel_enable_sysrq']:
sysrq_val = settings['sysctl']['kernel_secure_sysrq']
extras['kernel_sysrq'] = sysrq_val
if settings['security']['kernel_enable_core_dump']:
extras['fs_suid_dumpable'] = 1
settings.update(extras)
for d in (SYSCTL_DEFAULTS % settings).split():
d = d.strip().partition('=')
key = d[0].strip()
path = os.path.join('/proc/sys', key.replace('.', '/'))
if not os.path.exists(path):
log("Skipping '%s' since '%s' does not exist" % (key, path),
level=WARNING)
continue
ctxt['sysctl'][key] = d[2] or None
# Translate for python3
return {'sysctl_settings':
[(k, v) for k, v in six.iteritems(ctxt['sysctl'])]}
class SysctlConf(TemplatedFile):
"""An audit check for sysctl settings."""
def __init__(self):
self.conffile = '/etc/sysctl.d/99-juju-hardening.conf'
super(SysctlConf, self).__init__(self.conffile,
SysCtlHardeningContext(),
template_dir=TEMPLATES_DIR,
user='root', group='root',
mode=0o0440)
def post_write(self):
try:
subprocess.check_call(['sysctl', '-p', self.conffile])
except subprocess.CalledProcessError as e:
# NOTE: on some systems if sysctl cannot apply all settings it
# will return non-zero as well.
log("sysctl command returned an error (maybe some "
"keys could not be set) - %s" % (e),
level=WARNING)

View File

@ -0,0 +1,19 @@
# Copyright 2016 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
from os import path
TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates')

View File

@ -0,0 +1,31 @@
# Copyright 2016 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
from charmhelpers.core.hookenv import (
log,
DEBUG,
)
from charmhelpers.contrib.hardening.mysql.checks import config
def run_mysql_checks():
log("Starting MySQL hardening checks.", level=DEBUG)
checks = config.get_audits()
for check in checks:
log("Running '%s' check" % (check.__class__.__name__), level=DEBUG)
check.ensure_compliance()
log("MySQL hardening checks complete.", level=DEBUG)

View File

@ -0,0 +1,89 @@
# Copyright 2016 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import six
import subprocess
from charmhelpers.core.hookenv import (
log,
WARNING,
)
from charmhelpers.contrib.hardening.audits.file import (
FilePermissionAudit,
DirectoryPermissionAudit,
TemplatedFile,
)
from charmhelpers.contrib.hardening.mysql import TEMPLATES_DIR
from charmhelpers.contrib.hardening import utils
def get_audits():
"""Get MySQL hardening config audits.
:returns: dictionary of audits
"""
if subprocess.call(['which', 'mysql'], stdout=subprocess.PIPE) != 0:
log("MySQL does not appear to be installed on this node - "
"skipping mysql hardening", level=WARNING)
return []
settings = utils.get_settings('mysql')
hardening_settings = settings['hardening']
my_cnf = hardening_settings['mysql-conf']
audits = [
FilePermissionAudit(paths=[my_cnf], user='root',
group='root', mode=0o0600),
TemplatedFile(hardening_settings['hardening-conf'],
MySQLConfContext(),
TEMPLATES_DIR,
mode=0o0750,
user='mysql',
group='root',
service_actions=[{'service': 'mysql',
'actions': ['restart']}]),
# MySQL and Percona charms do not allow configuration of the
# data directory, so use the default.
DirectoryPermissionAudit('/var/lib/mysql',
user='mysql',
group='mysql',
recursive=False,
mode=0o755),
DirectoryPermissionAudit('/etc/mysql',
user='root',
group='root',
recursive=False,
mode=0o700),
]
return audits
class MySQLConfContext(object):
"""Defines the set of key/value pairs to set in a mysql config file.
This context, when called, will return a dictionary containing the
key/value pairs of setting to specify in the
/etc/mysql/conf.d/hardening.cnf file.
"""
def __call__(self):
settings = utils.get_settings('mysql')
# Translate for python3
return {'mysql_settings':
[(k, v) for k, v in six.iteritems(settings['security'])]}

View File

@ -0,0 +1,19 @@
# Copyright 2016 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
from os import path
TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates')

View File

@ -0,0 +1,31 @@
# Copyright 2016 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
from charmhelpers.core.hookenv import (
log,
DEBUG,
)
from charmhelpers.contrib.hardening.ssh.checks import config
def run_ssh_checks():
log("Starting SSH hardening checks.", level=DEBUG)
checks = config.get_audits()
for check in checks:
log("Running '%s' check" % (check.__class__.__name__), level=DEBUG)
check.ensure_compliance()
log("SSH hardening checks complete.", level=DEBUG)

View File

@ -0,0 +1,394 @@
# Copyright 2016 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import os
from charmhelpers.core.hookenv import (
log,
DEBUG,
)
from charmhelpers.fetch import (
apt_install,
apt_update,
)
from charmhelpers.core.host import lsb_release
from charmhelpers.contrib.hardening.audits.file import (
TemplatedFile,
FileContentAudit,
)
from charmhelpers.contrib.hardening.ssh import TEMPLATES_DIR
from charmhelpers.contrib.hardening import utils
def get_audits():
"""Get SSH hardening config audits.
:returns: dictionary of audits
"""
audits = [SSHConfig(), SSHDConfig(), SSHConfigFileContentAudit(),
SSHDConfigFileContentAudit()]
return audits
class SSHConfigContext(object):
type = 'client'
def get_macs(self, allow_weak_mac):
if allow_weak_mac:
weak_macs = 'weak'
else:
weak_macs = 'default'
default = 'hmac-sha2-512,hmac-sha2-256,hmac-ripemd160'
macs = {'default': default,
'weak': default + ',hmac-sha1'}
default = ('hmac-sha2-512-etm@openssh.com,'
'hmac-sha2-256-etm@openssh.com,'
'hmac-ripemd160-etm@openssh.com,umac-128-etm@openssh.com,'
'hmac-sha2-512,hmac-sha2-256,hmac-ripemd160')
macs_66 = {'default': default,
'weak': default + ',hmac-sha1'}
# Use newer ciphers on Ubuntu Trusty and above
if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty':
log("Detected Ubuntu 14.04 or newer, using new macs", level=DEBUG)
macs = macs_66
return macs[weak_macs]
def get_kexs(self, allow_weak_kex):
if allow_weak_kex:
weak_kex = 'weak'
else:
weak_kex = 'default'
default = 'diffie-hellman-group-exchange-sha256'
weak = (default + ',diffie-hellman-group14-sha1,'
'diffie-hellman-group-exchange-sha1,'
'diffie-hellman-group1-sha1')
kex = {'default': default,
'weak': weak}
default = ('curve25519-sha256@libssh.org,'
'diffie-hellman-group-exchange-sha256')
weak = (default + ',diffie-hellman-group14-sha1,'
'diffie-hellman-group-exchange-sha1,'
'diffie-hellman-group1-sha1')
kex_66 = {'default': default,
'weak': weak}
# Use newer kex on Ubuntu Trusty and above
if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty':
log('Detected Ubuntu 14.04 or newer, using new key exchange '
'algorithms', level=DEBUG)
kex = kex_66
return kex[weak_kex]
def get_ciphers(self, cbc_required):
if cbc_required:
weak_ciphers = 'weak'
else:
weak_ciphers = 'default'
default = 'aes256-ctr,aes192-ctr,aes128-ctr'
cipher = {'default': default,
'weak': default + 'aes256-cbc,aes192-cbc,aes128-cbc'}
default = ('chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,'
'aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr')
ciphers_66 = {'default': default,
'weak': default + ',aes256-cbc,aes192-cbc,aes128-cbc'}
# Use newer ciphers on ubuntu Trusty and above
if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty':
log('Detected Ubuntu 14.04 or newer, using new ciphers',
level=DEBUG)
cipher = ciphers_66
return cipher[weak_ciphers]
def __call__(self):
settings = utils.get_settings('ssh')
if settings['common']['network_ipv6_enable']:
addr_family = 'any'
else:
addr_family = 'inet'
ctxt = {
'addr_family': addr_family,
'remote_hosts': settings['common']['remote_hosts'],
'password_auth_allowed':
settings['client']['password_authentication'],
'ports': settings['common']['ports'],
'ciphers': self.get_ciphers(settings['client']['cbc_required']),
'macs': self.get_macs(settings['client']['weak_hmac']),
'kexs': self.get_kexs(settings['client']['weak_kex']),
'roaming': settings['client']['roaming'],
}
return ctxt
class SSHConfig(TemplatedFile):
def __init__(self):
path = '/etc/ssh/ssh_config'
super(SSHConfig, self).__init__(path=path,
template_dir=TEMPLATES_DIR,
context=SSHConfigContext(),
user='root',
group='root',
mode=0o0644)
def pre_write(self):
settings = utils.get_settings('ssh')
apt_update(fatal=True)
apt_install(settings['client']['package'])
if not os.path.exists('/etc/ssh'):
os.makedir('/etc/ssh')
# NOTE: don't recurse
utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755,
maxdepth=0)
def post_write(self):
# NOTE: don't recurse
utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755,
maxdepth=0)
class SSHDConfigContext(SSHConfigContext):
type = 'server'
def __call__(self):
settings = utils.get_settings('ssh')
if settings['common']['network_ipv6_enable']:
addr_family = 'any'
else:
addr_family = 'inet'
ctxt = {
'ssh_ip': settings['server']['listen_to'],
'password_auth_allowed':
settings['server']['password_authentication'],
'ports': settings['common']['ports'],
'addr_family': addr_family,
'ciphers': self.get_ciphers(settings['server']['cbc_required']),
'macs': self.get_macs(settings['server']['weak_hmac']),
'kexs': self.get_kexs(settings['server']['weak_kex']),
'host_key_files': settings['server']['host_key_files'],
'allow_root_with_key': settings['server']['allow_root_with_key'],
'password_authentication':
settings['server']['password_authentication'],
'use_priv_sep': settings['server']['use_privilege_separation'],
'use_pam': settings['server']['use_pam'],
'allow_x11_forwarding': settings['server']['allow_x11_forwarding'],
'print_motd': settings['server']['print_motd'],
'print_last_log': settings['server']['print_last_log'],
'client_alive_interval':
settings['server']['alive_interval'],
'client_alive_count': settings['server']['alive_count'],
'allow_tcp_forwarding': settings['server']['allow_tcp_forwarding'],
'allow_agent_forwarding':
settings['server']['allow_agent_forwarding'],
'deny_users': settings['server']['deny_users'],
'allow_users': settings['server']['allow_users'],
'deny_groups': settings['server']['deny_groups'],
'allow_groups': settings['server']['allow_groups'],
'use_dns': settings['server']['use_dns'],
'sftp_enable': settings['server']['sftp_enable'],
'sftp_group': settings['server']['sftp_group'],
'sftp_chroot': settings['server']['sftp_chroot'],
'max_auth_tries': settings['server']['max_auth_tries'],
'max_sessions': settings['server']['max_sessions'],
}
return ctxt
class SSHDConfig(TemplatedFile):
def __init__(self):
path = '/etc/ssh/sshd_config'
super(SSHDConfig, self).__init__(path=path,
template_dir=TEMPLATES_DIR,
context=SSHDConfigContext(),
user='root',
group='root',
mode=0o0600,
service_actions=[{'service': 'ssh',
'actions':
['restart']}])
def pre_write(self):
settings = utils.get_settings('ssh')
apt_update(fatal=True)
apt_install(settings['server']['package'])
if not os.path.exists('/etc/ssh'):
os.makedir('/etc/ssh')
# NOTE: don't recurse
utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755,
maxdepth=0)
def post_write(self):
# NOTE: don't recurse
utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755,
maxdepth=0)
class SSHConfigFileContentAudit(FileContentAudit):
def __init__(self):
self.path = '/etc/ssh/ssh_config'
super(SSHConfigFileContentAudit, self).__init__(self.path, {})
def is_compliant(self, *args, **kwargs):
self.pass_cases = []
self.fail_cases = []
settings = utils.get_settings('ssh')
if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty':
if not settings['server']['weak_hmac']:
self.pass_cases.append(r'^MACs.+,hmac-ripemd160$')
else:
self.pass_cases.append(r'^MACs.+,hmac-sha1$')
if settings['server']['weak_kex']:
self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa
self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa
self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa
self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa
else:
self.pass_cases.append(r'^KexAlgorithms.+,diffie-hellman-group-exchange-sha256$') # noqa
self.fail_cases.append(r'^KexAlgorithms.*diffie-hellman-group14-sha1[,\s]?') # noqa
if settings['server']['cbc_required']:
self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?')
self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
else:
self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
self.pass_cases.append(r'^Ciphers\schacha20-poly1305@openssh.com,.+') # noqa
self.pass_cases.append(r'^Ciphers\s.*aes128-ctr$')
self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
else:
if not settings['client']['weak_hmac']:
self.fail_cases.append(r'^MACs.+,hmac-sha1$')
else:
self.pass_cases.append(r'^MACs.+,hmac-sha1$')
if settings['client']['weak_kex']:
self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa
self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa
self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa
self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa
else:
self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256$') # noqa
self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa
self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa
self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa
if settings['client']['cbc_required']:
self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?')
self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
else:
self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
self.pass_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?')
self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
if settings['client']['roaming']:
self.pass_cases.append(r'^UseRoaming yes$')
else:
self.fail_cases.append(r'^UseRoaming yes$')
return super(SSHConfigFileContentAudit, self).is_compliant(*args,
**kwargs)
class SSHDConfigFileContentAudit(FileContentAudit):
def __init__(self):
self.path = '/etc/ssh/sshd_config'
super(SSHDConfigFileContentAudit, self).__init__(self.path, {})
def is_compliant(self, *args, **kwargs):
self.pass_cases = []
self.fail_cases = []
settings = utils.get_settings('ssh')
if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty':
if not settings['server']['weak_hmac']:
self.pass_cases.append(r'^MACs.+,hmac-ripemd160$')
else:
self.pass_cases.append(r'^MACs.+,hmac-sha1$')
if settings['server']['weak_kex']:
self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa
self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa
self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa
self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa
else:
self.pass_cases.append(r'^KexAlgorithms.+,diffie-hellman-group-exchange-sha256$') # noqa
self.fail_cases.append(r'^KexAlgorithms.*diffie-hellman-group14-sha1[,\s]?') # noqa
if settings['server']['cbc_required']:
self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?')
self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
else:
self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
self.pass_cases.append(r'^Ciphers\schacha20-poly1305@openssh.com,.+') # noqa
self.pass_cases.append(r'^Ciphers\s.*aes128-ctr$')
self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
else:
if not settings['server']['weak_hmac']:
self.pass_cases.append(r'^MACs.+,hmac-ripemd160$')
else:
self.pass_cases.append(r'^MACs.+,hmac-sha1$')
if settings['server']['weak_kex']:
self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa
self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa
self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa
self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa
else:
self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256$') # noqa
self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa
self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa
self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa
if settings['server']['cbc_required']:
self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?')
self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
else:
self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
self.pass_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?')
self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
if settings['server']['sftp_enable']:
self.pass_cases.append(r'^Subsystem\ssftp')
else:
self.fail_cases.append(r'^Subsystem\ssftp')
return super(SSHDConfigFileContentAudit, self).is_compliant(*args,
**kwargs)

View File

@ -0,0 +1,71 @@
# Copyright 2016 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import os
from charmhelpers.core.hookenv import (
log,
DEBUG,
WARNING,
)
try:
from jinja2 import FileSystemLoader, Environment
except ImportError:
from charmhelpers.fetch import apt_install
from charmhelpers.fetch import apt_update
apt_update(fatal=True)
apt_install('python-jinja2', fatal=True)
from jinja2 import FileSystemLoader, Environment
# NOTE: function separated from main rendering code to facilitate easier
# mocking in unit tests.
def write(path, data):
with open(path, 'wb') as out:
out.write(data)
def get_template_path(template_dir, path):
"""Returns the template file which would be used to render the path.
The path to the template file is returned.
:param template_dir: the directory the templates are located in
:param path: the file path to be written to.
:returns: path to the template file
"""
return os.path.join(template_dir, os.path.basename(path))
def render_and_write(template_dir, path, context):
"""Renders the specified template into the file.
:param template_dir: the directory to load the template from
:param path: the path to write the templated contents to
:param context: the parameters to pass to the rendering engine
"""
env = Environment(loader=FileSystemLoader(template_dir))
template_file = os.path.basename(path)
template = env.get_template(template_file)
log('Rendering from template: %s' % template.name, level=DEBUG)
rendered_content = template.render(context)
if not rendered_content:
log("Render returned None - skipping '%s'" % path,
level=WARNING)
return
write(path, rendered_content.encode('utf-8').strip())
log('Wrote template %s' % path, level=DEBUG)

View File

@ -0,0 +1,157 @@
# Copyright 2016 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import glob
import grp
import os
import pwd
import six
import yaml
from charmhelpers.core.hookenv import (
log,
DEBUG,
INFO,
WARNING,
ERROR,
)
# Global settings cache. Since each hook fire entails a fresh module import it
# is safe to hold this in memory and not risk missing config changes (since
# they will result in a new hook fire and thus re-import).
__SETTINGS__ = {}
def _get_defaults(modules):
"""Load the default config for the provided modules.
:param modules: stack modules config defaults to lookup.
:returns: modules default config dictionary.
"""
default = os.path.join(os.path.dirname(__file__),
'defaults/%s.yaml' % (modules))
return yaml.safe_load(open(default))
def _get_schema(modules):
"""Load the config schema for the provided modules.
NOTE: this schema is intended to have 1-1 relationship with they keys in
the default config and is used a means to verify valid overrides provided
by the user.
:param modules: stack modules config schema to lookup.
:returns: modules default schema dictionary.
"""
schema = os.path.join(os.path.dirname(__file__),
'defaults/%s.yaml.schema' % (modules))
return yaml.safe_load(open(schema))
def _get_user_provided_overrides(modules):
"""Load user-provided config overrides.
:param modules: stack modules to lookup in user overrides yaml file.
:returns: overrides dictionary.
"""
overrides = os.path.join(os.environ['JUJU_CHARM_DIR'],
'hardening.yaml')
if os.path.exists(overrides):
log("Found user-provided config overrides file '%s'" %
(overrides), level=DEBUG)
settings = yaml.safe_load(open(overrides))
if settings and settings.get(modules):
log("Applying '%s' overrides" % (modules), level=DEBUG)
return settings.get(modules)
log("No overrides found for '%s'" % (modules), level=DEBUG)
else:
log("No hardening config overrides file '%s' found in charm "
"root dir" % (overrides), level=DEBUG)
return {}
def _apply_overrides(settings, overrides, schema):
"""Get overrides config overlayed onto modules defaults.
:param modules: require stack modules config.
:returns: dictionary of modules config with user overrides applied.
"""
if overrides:
for k, v in six.iteritems(overrides):
if k in schema:
if schema[k] is None:
settings[k] = v
elif type(schema[k]) is dict:
settings[k] = _apply_overrides(settings[k], overrides[k],
schema[k])
else:
raise Exception("Unexpected type found in schema '%s'" %
type(schema[k]), level=ERROR)
else:
log("Unknown override key '%s' - ignoring" % (k), level=INFO)
return settings
def get_settings(modules):
global __SETTINGS__
if modules in __SETTINGS__:
return __SETTINGS__[modules]
schema = _get_schema(modules)
settings = _get_defaults(modules)
overrides = _get_user_provided_overrides(modules)
__SETTINGS__[modules] = _apply_overrides(settings, overrides, schema)
return __SETTINGS__[modules]
def ensure_permissions(path, user, group, permissions, maxdepth=-1):
"""Ensure permissions for path.
If path is a file, apply to file and return. If path is a directory,
apply recursively (if required) to directory contents and return.
:param user: user name
:param group: group name
:param permissions: octal permissions
:param maxdepth: maximum recursion depth. A negative maxdepth allows
infinite recursion and maxdepth=0 means no recursion.
:returns: None
"""
if not os.path.exists(path):
log("File '%s' does not exist - cannot set permissions" % (path),
level=WARNING)
return
_user = pwd.getpwnam(user)
os.chown(path, _user.pw_uid, grp.getgrnam(group).gr_gid)
os.chmod(path, permissions)
if maxdepth == 0:
log("Max recursion depth reached - skipping further recursion",
level=DEBUG)
return
elif maxdepth > 0:
maxdepth -= 1
if os.path.isdir(path):
contents = glob.glob("%s/*" % (path))
for c in contents:
ensure_permissions(c, user=user, group=group,
permissions=permissions, maxdepth=maxdepth)

View File

@ -191,6 +191,15 @@ get_iface_for_address = partial(_get_for_address, key='iface')
get_netmask_for_address = partial(_get_for_address, key='netmask')
def resolve_network_cidr(ip_address):
'''
Resolves the full address cidr of an ip_address based on
configured network interfaces
'''
netmask = get_netmask_for_address(ip_address)
return str(netaddr.IPNetwork("%s/%s" % (ip_address, netmask)).cidr)
def format_ipv6_addr(address):
"""If address is IPv6, wrap it in '[]' otherwise return None.
@ -456,3 +465,18 @@ def get_hostname(address, fqdn=True):
return result
else:
return result.split('.')[0]
def port_has_listener(address, port):
"""
Returns True if the address:port is open and being listened to,
else False.
@param address: an IP address or hostname
@param port: integer port
Note calls 'zc' via a subprocess shell
"""
cmd = ['nc', '-z', address, str(port)]
result = subprocess.call(cmd)
return not(bool(result))

View File

@ -25,10 +25,14 @@ from charmhelpers.core.host import (
)
def add_bridge(name):
def add_bridge(name, datapath_type=None):
''' Add the named bridge to openvswitch '''
log('Creating bridge {}'.format(name))
subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-br", name])
cmd = ["ovs-vsctl", "--", "--may-exist", "add-br", name]
if datapath_type is not None:
cmd += ['--', 'set', 'bridge', name,
'datapath_type={}'.format(datapath_type)]
subprocess.check_call(cmd)
def del_bridge(name):

View File

@ -126,7 +126,9 @@ class OpenStackAmuletDeployment(AmuletDeployment):
# Charms which can not use openstack-origin, ie. many subordinates
no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
'cinder-backup']
'cinder-backup', 'nexentaedge-data',
'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
'cinder-nexentaedge', 'nexentaedge-mgmt']
if self.openstack:
for svc in services:

View File

@ -27,7 +27,11 @@ import cinderclient.v1.client as cinder_client
import glanceclient.v1.client as glance_client
import heatclient.v1.client as heat_client
import keystoneclient.v2_0 as keystone_client
import novaclient.v1_1.client as nova_client
from keystoneclient.auth.identity import v3 as keystone_id_v3
from keystoneclient import session as keystone_session
from keystoneclient.v3 import client as keystone_client_v3
import novaclient.client as nova_client
import pika
import swiftclient
@ -38,6 +42,8 @@ from charmhelpers.contrib.amulet.utils import (
DEBUG = logging.DEBUG
ERROR = logging.ERROR
NOVA_CLIENT_VERSION = "2"
class OpenStackAmuletUtils(AmuletUtils):
"""OpenStack amulet utilities.
@ -139,7 +145,7 @@ class OpenStackAmuletUtils(AmuletUtils):
return "role {} does not exist".format(e['name'])
return ret
def validate_user_data(self, expected, actual):
def validate_user_data(self, expected, actual, api_version=None):
"""Validate user data.
Validate a list of actual user data vs a list of expected user
@ -150,10 +156,15 @@ class OpenStackAmuletUtils(AmuletUtils):
for e in expected:
found = False
for act in actual:
if e['name'] == act.name:
a = {'enabled': act.enabled, 'name': act.name,
'email': act.email, 'tenantId': act.tenantId,
'id': act.id}
if e['name'] == a['name']:
'email': act.email, 'id': act.id}
if api_version == 3:
a['default_project_id'] = getattr(act,
'default_project_id',
'none')
else:
a['tenantId'] = act.tenantId
found = True
ret = self._validate_dict_data(e, a)
if ret:
@ -188,15 +199,30 @@ class OpenStackAmuletUtils(AmuletUtils):
return cinder_client.Client(username, password, tenant, ept)
def authenticate_keystone_admin(self, keystone_sentry, user, password,
tenant):
tenant=None, api_version=None,
keystone_ip=None):
"""Authenticates admin user with the keystone admin endpoint."""
self.log.debug('Authenticating keystone admin...')
unit = keystone_sentry
service_ip = unit.relation('shared-db',
if not keystone_ip:
keystone_ip = unit.relation('shared-db',
'mysql:shared-db')['private-address']
ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8'))
if not api_version or api_version == 2:
ep = base_ep + "/v2.0"
return keystone_client.Client(username=user, password=password,
tenant_name=tenant, auth_url=ep)
else:
ep = base_ep + "/v3"
auth = keystone_id_v3.Password(
user_domain_name='admin_domain',
username=user,
password=password,
domain_name='admin_domain',
auth_url=ep,
)
sess = keystone_session.Session(auth=auth)
return keystone_client_v3.Client(session=sess)
def authenticate_keystone_user(self, keystone, user, password, tenant):
"""Authenticates a regular user with the keystone public endpoint."""
@ -225,7 +251,8 @@ class OpenStackAmuletUtils(AmuletUtils):
self.log.debug('Authenticating nova user ({})...'.format(user))
ep = keystone.service_catalog.url_for(service_type='identity',
endpoint_type='publicURL')
return nova_client.Client(username=user, api_key=password,
return nova_client.Client(NOVA_CLIENT_VERSION,
username=user, api_key=password,
project_id=tenant, auth_url=ep)
def authenticate_swift_user(self, keystone, user, password, tenant):

View File

@ -20,7 +20,7 @@ import os
import re
import time
from base64 import b64decode
from subprocess import check_call
from subprocess import check_call, CalledProcessError
import six
import yaml
@ -45,6 +45,7 @@ from charmhelpers.core.hookenv import (
INFO,
WARNING,
ERROR,
status_set,
)
from charmhelpers.core.sysctl import create as sysctl_create
@ -410,6 +411,7 @@ class IdentityServiceContext(OSContextGenerator):
auth_host = format_ipv6_addr(auth_host) or auth_host
svc_protocol = rdata.get('service_protocol') or 'http'
auth_protocol = rdata.get('auth_protocol') or 'http'
api_version = rdata.get('api_version') or '2.0'
ctxt.update({'service_port': rdata.get('service_port'),
'service_host': serv_host,
'auth_host': auth_host,
@ -418,7 +420,8 @@ class IdentityServiceContext(OSContextGenerator):
'admin_user': rdata.get('service_username'),
'admin_password': rdata.get('service_password'),
'service_protocol': svc_protocol,
'auth_protocol': auth_protocol})
'auth_protocol': auth_protocol,
'api_version': api_version})
if self.context_complete(ctxt):
# NOTE(jamespage) this is required for >= icehouse
@ -1471,7 +1474,110 @@ class NetworkServiceContext(OSContextGenerator):
rdata.get('service_protocol') or 'http',
'auth_protocol':
rdata.get('auth_protocol') or 'http',
'api_version':
rdata.get('api_version') or '2.0',
}
if self.context_complete(ctxt):
return ctxt
return {}
class InternalEndpointContext(OSContextGenerator):
"""Internal endpoint context.
This context provides the endpoint type used for communication between
services e.g. between Nova and Cinder internally. Openstack uses Public
endpoints by default so this allows admins to optionally use internal
endpoints.
"""
def __call__(self):
return {'use_internal_endpoints': config('use-internal-endpoints')}
class AppArmorContext(OSContextGenerator):
"""Base class for apparmor contexts."""
def __init__(self):
self._ctxt = None
self.aa_profile = None
self.aa_utils_packages = ['apparmor-utils']
@property
def ctxt(self):
if self._ctxt is not None:
return self._ctxt
self._ctxt = self._determine_ctxt()
return self._ctxt
def _determine_ctxt(self):
"""
Validate aa-profile-mode settings is disable, enforce, or complain.
:return ctxt: Dictionary of the apparmor profile or None
"""
if config('aa-profile-mode') in ['disable', 'enforce', 'complain']:
ctxt = {'aa-profile-mode': config('aa-profile-mode')}
else:
ctxt = None
return ctxt
def __call__(self):
return self.ctxt
def install_aa_utils(self):
"""
Install packages required for apparmor configuration.
"""
log("Installing apparmor utils.")
ensure_packages(self.aa_utils_packages)
def manually_disable_aa_profile(self):
"""
Manually disable an apparmor profile.
If aa-profile-mode is set to disabled (default) this is required as the
template has been written but apparmor is yet unaware of the profile
and aa-disable aa-profile fails. Without this the profile would kick
into enforce mode on the next service restart.
"""
profile_path = '/etc/apparmor.d'
disable_path = '/etc/apparmor.d/disable'
if not os.path.lexists(os.path.join(disable_path, self.aa_profile)):
os.symlink(os.path.join(profile_path, self.aa_profile),
os.path.join(disable_path, self.aa_profile))
def setup_aa_profile(self):
"""
Setup an apparmor profile.
The ctxt dictionary will contain the apparmor profile mode and
the apparmor profile name.
Makes calls out to aa-disable, aa-complain, or aa-enforce to setup
the apparmor profile.
"""
self()
if not self.ctxt:
log("Not enabling apparmor Profile")
return
self.install_aa_utils()
cmd = ['aa-{}'.format(self.ctxt['aa-profile-mode'])]
cmd.append(self.ctxt['aa-profile'])
log("Setting up the apparmor profile for {} in {} mode."
"".format(self.ctxt['aa-profile'], self.ctxt['aa-profile-mode']))
try:
check_call(cmd)
except CalledProcessError as e:
# If aa-profile-mode is set to disabled (default) manual
# disabling is required as the template has been written but
# apparmor is yet unaware of the profile and aa-disable aa-profile
# fails. If aa-disable learns to read profile files first this can
# be removed.
if self.ctxt['aa-profile-mode'] == 'disable':
log("Manually disabling the apparmor profile for {}."
"".format(self.ctxt['aa-profile']))
self.manually_disable_aa_profile()
return
status_set('blocked', "Apparmor profile {} failed to be set to {}."
"".format(self.ctxt['aa-profile'],
self.ctxt['aa-profile-mode']))
raise e

View File

@ -14,16 +14,19 @@
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
from charmhelpers.core.hookenv import (
config,
unit_get,
service_name,
network_get_primary_address,
)
from charmhelpers.contrib.network.ip import (
get_address_in_network,
is_address_in_network,
is_ipv6,
get_ipv6_addr,
resolve_network_cidr,
)
from charmhelpers.contrib.hahelpers.cluster import is_clustered
@ -33,16 +36,19 @@ ADMIN = 'admin'
ADDRESS_MAP = {
PUBLIC: {
'binding': 'public',
'config': 'os-public-network',
'fallback': 'public-address',
'override': 'os-public-hostname',
},
INTERNAL: {
'binding': 'internal',
'config': 'os-internal-network',
'fallback': 'private-address',
'override': 'os-internal-hostname',
},
ADMIN: {
'binding': 'admin',
'config': 'os-admin-network',
'fallback': 'private-address',
'override': 'os-admin-hostname',
@ -110,7 +116,7 @@ def resolve_address(endpoint_type=PUBLIC):
correct network. If clustered with no nets defined, return primary vip.
If not clustered, return unit address ensuring address is on configured net
split if one is configured.
split if one is configured, or a Juju 2.0 extra-binding has been used.
:param endpoint_type: Network endpoing type
"""
@ -125,23 +131,45 @@ def resolve_address(endpoint_type=PUBLIC):
net_type = ADDRESS_MAP[endpoint_type]['config']
net_addr = config(net_type)
net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
binding = ADDRESS_MAP[endpoint_type]['binding']
clustered = is_clustered()
if clustered:
if not net_addr:
# If no net-splits defined, we expect a single vip
resolved_address = vips[0]
else:
if clustered and vips:
if net_addr:
for vip in vips:
if is_address_in_network(net_addr, vip):
resolved_address = vip
break
else:
# NOTE: endeavour to check vips against network space
# bindings
try:
bound_cidr = resolve_network_cidr(
network_get_primary_address(binding)
)
for vip in vips:
if is_address_in_network(bound_cidr, vip):
resolved_address = vip
break
except NotImplementedError:
# If no net-splits configured and no support for extra
# bindings/network spaces so we expect a single vip
resolved_address = vips[0]
else:
if config('prefer-ipv6'):
fallback_addr = get_ipv6_addr(exc_list=vips)[0]
else:
fallback_addr = unit_get(net_fallback)
if net_addr:
resolved_address = get_address_in_network(net_addr, fallback_addr)
else:
# NOTE: only try to use extra bindings if legacy network
# configuration is not in use
try:
resolved_address = network_get_primary_address(binding)
except NotImplementedError:
resolved_address = fallback_addr
if resolved_address is None:
raise ValueError("Unable to resolve a suitable IP address based on "

View File

@ -237,10 +237,16 @@ def neutron_plugins():
plugins['midonet']['driver'] = (
'neutron.plugins.midonet.plugin.MidonetPluginV2')
if release >= 'liberty':
midonet_origin = config('midonet-origin')
if midonet_origin is not None and midonet_origin[4:5] == '1':
plugins['midonet']['driver'] = (
'midonet.neutron.plugin_v1.MidonetPluginV2')
plugins['midonet']['server_packages'].remove(
'python-neutron-plugin-midonet')
plugins['midonet']['server_packages'].append(
'python-networking-midonet')
plugins['plumgrid']['driver'] = (
'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2')
plugins['plumgrid']['server_packages'].remove(
'neutron-plugin-plumgrid')
return plugins

View File

@ -23,8 +23,11 @@ import json
import os
import sys
import re
import itertools
import functools
import six
import tempfile
import traceback
import uuid
import yaml
@ -41,6 +44,7 @@ from charmhelpers.core.hookenv import (
config,
log as juju_log,
charm_dir,
DEBUG,
INFO,
related_units,
relation_ids,
@ -58,6 +62,7 @@ from charmhelpers.contrib.storage.linux.lvm import (
from charmhelpers.contrib.network.ip import (
get_ipv6_addr,
is_ipv6,
port_has_listener,
)
from charmhelpers.contrib.python.packages import (
@ -65,7 +70,15 @@ from charmhelpers.contrib.python.packages import (
pip_install,
)
from charmhelpers.core.host import lsb_release, mounts, umount
from charmhelpers.core.host import (
lsb_release,
mounts,
umount,
service_running,
service_pause,
service_resume,
restart_on_change_helper,
)
from charmhelpers.fetch import apt_install, apt_cache, install_remote
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
@ -124,7 +137,7 @@ SWIFT_CODENAMES = OrderedDict([
('liberty',
['2.3.0', '2.4.0', '2.5.0']),
('mitaka',
['2.5.0']),
['2.5.0', '2.6.0', '2.7.0']),
])
# >= Liberty version->codename mapping
@ -143,6 +156,7 @@ PACKAGE_CODENAMES = {
]),
'keystone': OrderedDict([
('8.0', 'liberty'),
('8.1', 'liberty'),
('9.0', 'mitaka'),
]),
'horizon-common': OrderedDict([
@ -347,12 +361,42 @@ def os_release(package, base='essex'):
def import_key(keyid):
cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 " \
"--recv-keys %s" % keyid
key = keyid.strip()
if (key.startswith('-----BEGIN PGP PUBLIC KEY BLOCK-----') and
key.endswith('-----END PGP PUBLIC KEY BLOCK-----')):
juju_log("PGP key found (looks like ASCII Armor format)", level=DEBUG)
juju_log("Importing ASCII Armor PGP key", level=DEBUG)
with tempfile.NamedTemporaryFile() as keyfile:
with open(keyfile.name, 'w') as fd:
fd.write(key)
fd.write("\n")
cmd = ['apt-key', 'add', keyfile.name]
try:
subprocess.check_call(cmd.split(' '))
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
error_out("Error importing repo key %s" % keyid)
error_out("Error importing PGP key '%s'" % key)
else:
juju_log("PGP key found (looks like Radix64 format)", level=DEBUG)
juju_log("Importing PGP key from keyserver", level=DEBUG)
cmd = ['apt-key', 'adv', '--keyserver',
'hkp://keyserver.ubuntu.com:80', '--recv-keys', key]
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
error_out("Error importing PGP key '%s'" % key)
def get_source_and_pgp_key(input):
"""Look for a pgp key ID or ascii-armor key in the given input."""
index = input.strip()
index = input.rfind('|')
if index < 0:
return input, None
key = input[index + 1:].strip('|')
source = input[:index]
return source, key
def configure_installation_source(rel):
@ -364,16 +408,16 @@ def configure_installation_source(rel):
with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
f.write(DISTRO_PROPOSED % ubuntu_rel)
elif rel[:4] == "ppa:":
src = rel
src, key = get_source_and_pgp_key(rel)
if key:
import_key(key)
subprocess.check_call(["add-apt-repository", "-y", src])
elif rel[:3] == "deb":
l = len(rel.split('|'))
if l == 2:
src, key = rel.split('|')
juju_log("Importing PPA key from keyserver for %s" % src)
src, key = get_source_and_pgp_key(rel)
if key:
import_key(key)
elif l == 1:
src = rel
with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
f.write(src)
elif rel[:6] == 'cloud:':
@ -729,7 +773,8 @@ def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy,
os.mkdir(parent_dir)
juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
repo_dir = install_remote(repo, dest=parent_dir, branch=branch, depth=depth)
repo_dir = install_remote(
repo, dest=parent_dir, branch=branch, depth=depth)
venv = os.path.join(parent_dir, 'venv')
@ -828,56 +873,155 @@ def os_workload_status(configs, required_interfaces, charm_func=None):
return wrap
def set_os_workload_status(configs, required_interfaces, charm_func=None):
"""
Set workload status based on complete contexts.
status-set missing or incomplete contexts
and juju-log details of missing required data.
charm_func is a charm specific function to run checking
for charm specific requirements such as a VIP setting.
"""
incomplete_rel_data = incomplete_relation_data(configs, required_interfaces)
state = 'active'
missing_relations = []
incomplete_relations = []
message = None
charm_state = None
charm_message = None
def set_os_workload_status(configs, required_interfaces, charm_func=None,
services=None, ports=None):
"""Set the state of the workload status for the charm.
for generic_interface in incomplete_rel_data.keys():
This calls _determine_os_workload_status() to get the new state, message
and sets the status using status_set()
@param configs: a templating.OSConfigRenderer() object
@param required_interfaces: {generic: [specific, specific2, ...]}
@param charm_func: a callable function that returns state, message. The
signature is charm_func(configs) -> (state, message)
@param services: list of strings OR dictionary specifying services/ports
@param ports: OPTIONAL list of port numbers.
@returns state, message: the new workload status, user message
"""
state, message = _determine_os_workload_status(
configs, required_interfaces, charm_func, services, ports)
status_set(state, message)
def _determine_os_workload_status(
configs, required_interfaces, charm_func=None,
services=None, ports=None):
"""Determine the state of the workload status for the charm.
This function returns the new workload status for the charm based
on the state of the interfaces, the paused state and whether the
services are actually running and any specified ports are open.
This checks:
1. if the unit should be paused, that it is actually paused. If so the
state is 'maintenance' + message, else 'broken'.
2. that the interfaces/relations are complete. If they are not then
it sets the state to either 'broken' or 'waiting' and an appropriate
message.
3. If all the relation data is set, then it checks that the actual
services really are running. If not it sets the state to 'broken'.
If everything is okay then the state returns 'active'.
@param configs: a templating.OSConfigRenderer() object
@param required_interfaces: {generic: [specific, specific2, ...]}
@param charm_func: a callable function that returns state, message. The
signature is charm_func(configs) -> (state, message)
@param services: list of strings OR dictionary specifying services/ports
@param ports: OPTIONAL list of port numbers.
@returns state, message: the new workload status, user message
"""
state, message = _ows_check_if_paused(services, ports)
if state is None:
state, message = _ows_check_generic_interfaces(
configs, required_interfaces)
if state != 'maintenance' and charm_func:
# _ows_check_charm_func() may modify the state, message
state, message = _ows_check_charm_func(
state, message, lambda: charm_func(configs))
if state is None:
state, message = _ows_check_services_running(services, ports)
if state is None:
state = 'active'
message = "Unit is ready"
juju_log(message, 'INFO')
return state, message
def _ows_check_if_paused(services=None, ports=None):
"""Check if the unit is supposed to be paused, and if so check that the
services/ports (if passed) are actually stopped/not being listened to.
if the unit isn't supposed to be paused, just return None, None
@param services: OPTIONAL services spec or list of service names.
@param ports: OPTIONAL list of port numbers.
@returns state, message or None, None
"""
if is_unit_paused_set():
state, message = check_actually_paused(services=services,
ports=ports)
if state is None:
# we're paused okay, so set maintenance and return
state = "maintenance"
message = "Paused. Use 'resume' action to resume normal service."
return state, message
return None, None
def _ows_check_generic_interfaces(configs, required_interfaces):
"""Check the complete contexts to determine the workload status.
- Checks for missing or incomplete contexts
- juju log details of missing required data.
- determines the correct workload status
- creates an appropriate message for status_set(...)
if there are no problems then the function returns None, None
@param configs: a templating.OSConfigRenderer() object
@params required_interfaces: {generic_interface: [specific_interface], }
@returns state, message or None, None
"""
incomplete_rel_data = incomplete_relation_data(configs,
required_interfaces)
state = None
message = None
missing_relations = set()
incomplete_relations = set()
for generic_interface, relations_states in incomplete_rel_data.items():
related_interface = None
missing_data = {}
# Related or not?
for interface in incomplete_rel_data[generic_interface]:
if incomplete_rel_data[generic_interface][interface].get('related'):
for interface, relation_state in relations_states.items():
if relation_state.get('related'):
related_interface = interface
missing_data = incomplete_rel_data[generic_interface][interface].get('missing_data')
# No relation ID for the generic_interface
missing_data = relation_state.get('missing_data')
break
# No relation ID for the generic_interface?
if not related_interface:
juju_log("{} relation is missing and must be related for "
"functionality. ".format(generic_interface), 'WARN')
state = 'blocked'
if generic_interface not in missing_relations:
missing_relations.append(generic_interface)
missing_relations.add(generic_interface)
else:
# Relation ID exists but no related unit
# Relation ID eists but no related unit
if not missing_data:
# Edge case relation ID exists but departing
if ('departed' in hook_name() or 'broken' in hook_name()) \
and related_interface in hook_name():
# Edge case - relation ID exists but departings
_hook_name = hook_name()
if (('departed' in _hook_name or 'broken' in _hook_name) and
related_interface in _hook_name):
state = 'blocked'
if generic_interface not in missing_relations:
missing_relations.append(generic_interface)
missing_relations.add(generic_interface)
juju_log("{} relation's interface, {}, "
"relationship is departed or broken "
"and is required for functionality."
"".format(generic_interface, related_interface), "WARN")
"".format(generic_interface, related_interface),
"WARN")
# Normal case relation ID exists but no related unit
# (joining)
else:
juju_log("{} relations's interface, {}, is related but has"
" no units in the relation."
"".format(generic_interface, related_interface), "INFO")
"".format(generic_interface, related_interface),
"INFO")
# Related unit exists and data missing on the relation
else:
juju_log("{} relation's interface, {}, is related awaiting "
@ -886,9 +1030,8 @@ def set_os_workload_status(configs, required_interfaces, charm_func=None):
", ".join(missing_data)), "INFO")
if state != 'blocked':
state = 'waiting'
if generic_interface not in incomplete_relations \
and generic_interface not in missing_relations:
incomplete_relations.append(generic_interface)
if generic_interface not in missing_relations:
incomplete_relations.add(generic_interface)
if missing_relations:
message = "Missing relations: {}".format(", ".join(missing_relations))
@ -901,9 +1044,22 @@ def set_os_workload_status(configs, required_interfaces, charm_func=None):
"".format(", ".join(incomplete_relations))
state = 'waiting'
# Run charm specific checks
if charm_func:
charm_state, charm_message = charm_func(configs)
return state, message
def _ows_check_charm_func(state, message, charm_func_with_configs):
"""Run a custom check function for the charm to see if it wants to
change the state. This is only run if not in 'maintenance' and
tests to see if the new state is more important that the previous
one determined by the interfaces/relations check.
@param state: the previously determined state so far.
@param message: the user orientated message so far.
@param charm_func: a callable function that returns state, message
@returns state, message strings.
"""
if charm_func_with_configs:
charm_state, charm_message = charm_func_with_configs()
if charm_state != 'active' and charm_state != 'unknown':
state = workload_state_compare(state, charm_state)
if message:
@ -912,13 +1068,151 @@ def set_os_workload_status(configs, required_interfaces, charm_func=None):
message = "{}, {}".format(message, charm_message)
else:
message = charm_message
return state, message
# Set to active if all requirements have been met
if state == 'active':
message = "Unit is ready"
juju_log(message, "INFO")
status_set(state, message)
def _ows_check_services_running(services, ports):
"""Check that the services that should be running are actually running
and that any ports specified are being listened to.
@param services: list of strings OR dictionary specifying services/ports
@param ports: list of ports
@returns state, message: strings or None, None
"""
messages = []
state = None
if services is not None:
services = _extract_services_list_helper(services)
services_running, running = _check_running_services(services)
if not all(running):
messages.append(
"Services not running that should be: {}"
.format(", ".join(_filter_tuples(services_running, False))))
state = 'blocked'
# also verify that the ports that should be open are open
# NB, that ServiceManager objects only OPTIONALLY have ports
map_not_open, ports_open = (
_check_listening_on_services_ports(services))
if not all(ports_open):
# find which service has missing ports. They are in service
# order which makes it a bit easier.
message_parts = {service: ", ".join([str(v) for v in open_ports])
for service, open_ports in map_not_open.items()}
message = ", ".join(
["{}: [{}]".format(s, sp) for s, sp in message_parts.items()])
messages.append(
"Services with ports not open that should be: {}"
.format(message))
state = 'blocked'
if ports is not None:
# and we can also check ports which we don't know the service for
ports_open, ports_open_bools = _check_listening_on_ports_list(ports)
if not all(ports_open_bools):
messages.append(
"Ports which should be open, but are not: {}"
.format(", ".join([str(p) for p, v in ports_open
if not v])))
state = 'blocked'
if state is not None:
message = "; ".join(messages)
return state, message
return None, None
def _extract_services_list_helper(services):
"""Extract a OrderedDict of {service: [ports]} of the supplied services
for use by the other functions.
The services object can either be:
- None : no services were passed (an empty dict is returned)
- a list of strings
- A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
- An array of [{'service': service_name, ...}, ...]
@param services: see above
@returns OrderedDict(service: [ports], ...)
"""
if services is None:
return {}
if isinstance(services, dict):
services = services.values()
# either extract the list of services from the dictionary, or if
# it is a simple string, use that. i.e. works with mixed lists.
_s = OrderedDict()
for s in services:
if isinstance(s, dict) and 'service' in s:
_s[s['service']] = s.get('ports', [])
if isinstance(s, str):
_s[s] = []
return _s
def _check_running_services(services):
"""Check that the services dict provided is actually running and provide
a list of (service, boolean) tuples for each service.
Returns both a zipped list of (service, boolean) and a list of booleans
in the same order as the services.
@param services: OrderedDict of strings: [ports], one for each service to
check.
@returns [(service, boolean), ...], : results for checks
[boolean] : just the result of the service checks
"""
services_running = [service_running(s) for s in services]
return list(zip(services, services_running)), services_running
def _check_listening_on_services_ports(services, test=False):
"""Check that the unit is actually listening (has the port open) on the
ports that the service specifies are open. If test is True then the
function returns the services with ports that are open rather than
closed.
Returns an OrderedDict of service: ports and a list of booleans
@param services: OrderedDict(service: [port, ...], ...)
@param test: default=False, if False, test for closed, otherwise open.
@returns OrderedDict(service: [port-not-open, ...]...), [boolean]
"""
test = not(not(test)) # ensure test is True or False
all_ports = list(itertools.chain(*services.values()))
ports_states = [port_has_listener('0.0.0.0', p) for p in all_ports]
map_ports = OrderedDict()
matched_ports = [p for p, opened in zip(all_ports, ports_states)
if opened == test] # essentially opened xor test
for service, ports in services.items():
set_ports = set(ports).intersection(matched_ports)
if set_ports:
map_ports[service] = set_ports
return map_ports, ports_states
def _check_listening_on_ports_list(ports):
"""Check that the ports list given are being listened to
Returns a list of ports being listened to and a list of the
booleans.
@param ports: LIST or port numbers.
@returns [(port_num, boolean), ...], [boolean]
"""
ports_open = [port_has_listener('0.0.0.0', p) for p in ports]
return zip(ports, ports_open), ports_open
def _filter_tuples(services_states, state):
"""Return a simple list from a list of tuples according to the condition
@param services_states: LIST of (string, boolean): service and running
state.
@param state: Boolean to match the tuple against.
@returns [LIST of strings] that matched the tuple RHS.
"""
return [s for s, b in services_states if b == state]
def workload_state_compare(current_workload_state, workload_state):
@ -943,8 +1237,7 @@ def workload_state_compare(current_workload_state, workload_state):
def incomplete_relation_data(configs, required_interfaces):
"""
Check complete contexts against required_interfaces
"""Check complete contexts against required_interfaces
Return dictionary of incomplete relation data.
configs is an OSConfigRenderer object with configs registered
@ -969,19 +1262,13 @@ def incomplete_relation_data(configs, required_interfaces):
'shared-db': {'related': True}}}
"""
complete_ctxts = configs.complete_contexts()
incomplete_relations = []
for svc_type in required_interfaces.keys():
# Avoid duplicates
found_ctxt = False
for interface in required_interfaces[svc_type]:
if interface in complete_ctxts:
found_ctxt = True
if not found_ctxt:
incomplete_relations.append(svc_type)
incomplete_context_data = {}
for i in incomplete_relations:
incomplete_context_data[i] = configs.get_incomplete_context_data(required_interfaces[i])
return incomplete_context_data
incomplete_relations = [
svc_type
for svc_type, interfaces in required_interfaces.items()
if not set(interfaces).intersection(complete_ctxts)]
return {
i: configs.get_incomplete_context_data(required_interfaces[i])
for i in incomplete_relations}
def do_action_openstack_upgrade(package, upgrade_callback, configs):
@ -1042,3 +1329,247 @@ def remote_restart(rel_name, remote_service=None):
relation_set(relation_id=rid,
relation_settings=trigger,
)
def check_actually_paused(services=None, ports=None):
"""Check that services listed in the services object and and ports
are actually closed (not listened to), to verify that the unit is
properly paused.
@param services: See _extract_services_list_helper
@returns status, : string for status (None if okay)
message : string for problem for status_set
"""
state = None
message = None
messages = []
if services is not None:
services = _extract_services_list_helper(services)
services_running, services_states = _check_running_services(services)
if any(services_states):
# there shouldn't be any running so this is a problem
messages.append("these services running: {}"
.format(", ".join(
_filter_tuples(services_running, True))))
state = "blocked"
ports_open, ports_open_bools = (
_check_listening_on_services_ports(services, True))
if any(ports_open_bools):
message_parts = {service: ", ".join([str(v) for v in open_ports])
for service, open_ports in ports_open.items()}
message = ", ".join(
["{}: [{}]".format(s, sp) for s, sp in message_parts.items()])
messages.append(
"these service:ports are open: {}".format(message))
state = 'blocked'
if ports is not None:
ports_open, bools = _check_listening_on_ports_list(ports)
if any(bools):
messages.append(
"these ports which should be closed, but are open: {}"
.format(", ".join([str(p) for p, v in ports_open if v])))
state = 'blocked'
if messages:
message = ("Services should be paused but {}"
.format(", ".join(messages)))
return state, message
def set_unit_paused():
"""Set the unit to a paused state in the local kv() store.
This does NOT actually pause the unit
"""
with unitdata.HookData()() as t:
kv = t[0]
kv.set('unit-paused', True)
def clear_unit_paused():
"""Clear the unit from a paused state in the local kv() store
This does NOT actually restart any services - it only clears the
local state.
"""
with unitdata.HookData()() as t:
kv = t[0]
kv.set('unit-paused', False)
def is_unit_paused_set():
"""Return the state of the kv().get('unit-paused').
This does NOT verify that the unit really is paused.
To help with units that don't have HookData() (testing)
if it excepts, return False
"""
try:
with unitdata.HookData()() as t:
kv = t[0]
# transform something truth-y into a Boolean.
return not(not(kv.get('unit-paused')))
except:
return False
def pause_unit(assess_status_func, services=None, ports=None,
charm_func=None):
"""Pause a unit by stopping the services and setting 'unit-paused'
in the local kv() store.
Also checks that the services have stopped and ports are no longer
being listened to.
An optional charm_func() can be called that can either raise an
Exception or return non None, None to indicate that the unit
didn't pause cleanly.
The signature for charm_func is:
charm_func() -> message: string
charm_func() is executed after any services are stopped, if supplied.
The services object can either be:
- None : no services were passed (an empty dict is returned)
- a list of strings
- A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
- An array of [{'service': service_name, ...}, ...]
@param assess_status_func: (f() -> message: string | None) or None
@param services: OPTIONAL see above
@param ports: OPTIONAL list of port
@param charm_func: function to run for custom charm pausing.
@returns None
@raises Exception(message) on an error for action_fail().
"""
services = _extract_services_list_helper(services)
messages = []
if services:
for service in services.keys():
stopped = service_pause(service)
if not stopped:
messages.append("{} didn't stop cleanly.".format(service))
if charm_func:
try:
message = charm_func()
if message:
messages.append(message)
except Exception as e:
message.append(str(e))
set_unit_paused()
if assess_status_func:
message = assess_status_func()
if message:
messages.append(message)
if messages:
raise Exception("Couldn't pause: {}".format("; ".join(messages)))
def resume_unit(assess_status_func, services=None, ports=None,
charm_func=None):
"""Resume a unit by starting the services and clearning 'unit-paused'
in the local kv() store.
Also checks that the services have started and ports are being listened to.
An optional charm_func() can be called that can either raise an
Exception or return non None to indicate that the unit
didn't resume cleanly.
The signature for charm_func is:
charm_func() -> message: string
charm_func() is executed after any services are started, if supplied.
The services object can either be:
- None : no services were passed (an empty dict is returned)
- a list of strings
- A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
- An array of [{'service': service_name, ...}, ...]
@param assess_status_func: (f() -> message: string | None) or None
@param services: OPTIONAL see above
@param ports: OPTIONAL list of port
@param charm_func: function to run for custom charm resuming.
@returns None
@raises Exception(message) on an error for action_fail().
"""
services = _extract_services_list_helper(services)
messages = []
if services:
for service in services.keys():
started = service_resume(service)
if not started:
messages.append("{} didn't start cleanly.".format(service))
if charm_func:
try:
message = charm_func()
if message:
messages.append(message)
except Exception as e:
message.append(str(e))
clear_unit_paused()
if assess_status_func:
message = assess_status_func()
if message:
messages.append(message)
if messages:
raise Exception("Couldn't resume: {}".format("; ".join(messages)))
def make_assess_status_func(*args, **kwargs):
"""Creates an assess_status_func() suitable for handing to pause_unit()
and resume_unit().
This uses the _determine_os_workload_status(...) function to determine
what the workload_status should be for the unit. If the unit is
not in maintenance or active states, then the message is returned to
the caller. This is so an action that doesn't result in either a
complete pause or complete resume can signal failure with an action_fail()
"""
def _assess_status_func():
state, message = _determine_os_workload_status(*args, **kwargs)
status_set(state, message)
if state not in ['maintenance', 'active']:
return message
return None
return _assess_status_func
def pausable_restart_on_change(restart_map, stopstart=False,
restart_functions=None):
"""A restart_on_change decorator that checks to see if the unit is
paused. If it is paused then the decorated function doesn't fire.
This is provided as a helper, as the @restart_on_change(...) decorator
is in core.host, yet the openstack specific helpers are in this file
(contrib.openstack.utils). Thus, this needs to be an optional feature
for openstack charms (or charms that wish to use the openstack
pause/resume type features).
It is used as follows:
from contrib.openstack.utils import (
pausable_restart_on_change as restart_on_change)
@restart_on_change(restart_map, stopstart=<boolean>)
def some_hook(...):
pass
see core.utils.restart_on_change() for more details.
@param f: the function to decorate
@param restart_map: the restart map {conf_file: [services]}
@param stopstart: DEFAULT false; whether to stop, start or just restart
@returns decorator to use a restart_on_change with pausability
"""
def wrap(f):
@functools.wraps(f)
def wrapped_f(*args, **kwargs):
if is_unit_paused_set():
return f(*args, **kwargs)
# otherwise, normal restart_on_change functionality
return restart_on_change_helper(
(lambda: f(*args, **kwargs)), restart_map, stopstart,
restart_functions)
return wrapped_f
return wrap

View File

@ -19,18 +19,33 @@
import os
import subprocess
import sys
from charmhelpers.fetch import apt_install, apt_update
from charmhelpers.core.hookenv import charm_dir, log
__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
def pip_execute(*args, **kwargs):
"""Overriden pip_execute() to stop sys.path being changed.
The act of importing main from the pip module seems to cause add wheels
from the /usr/share/python-wheels which are installed by various tools.
This function ensures that sys.path remains the same after the call is
executed.
"""
try:
from pip import main as pip_execute
_path = sys.path
try:
from pip import main as _pip_execute
except ImportError:
apt_update()
apt_install('python-pip')
from pip import main as pip_execute
__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
from pip import main as _pip_execute
_pip_execute(*args, **kwargs)
finally:
sys.path = _path
def parse_options(given, available):

View File

@ -24,6 +24,8 @@
# Adam Gandelman <adamg@ubuntu.com>
#
import bisect
import errno
import hashlib
import six
import os
@ -120,6 +122,7 @@ class PoolCreationError(Exception):
"""
A custom error to inform the caller that a pool creation failed. Provides an error message
"""
def __init__(self, message):
super(PoolCreationError, self).__init__(message)
@ -129,6 +132,7 @@ class Pool(object):
An object oriented approach to Ceph pool creation. This base class is inherited by ReplicatedPool and ErasurePool.
Do not call create() on this base class as it will not do anything. Instantiate a child class and call create().
"""
def __init__(self, service, name):
self.service = service
self.name = name
@ -161,15 +165,22 @@ class Pool(object):
:return: None
"""
# read-only is easy, writeback is much harder
mode = get_cache_mode(cache_pool)
mode = get_cache_mode(self.service, cache_pool)
version = ceph_version()
if mode == 'readonly':
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none'])
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
elif mode == 'writeback':
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'forward'])
pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier',
'cache-mode', cache_pool, 'forward']
if version >= '10.1':
# Jewel added a mandatory flag
pool_forward_cmd.append('--yes-i-really-mean-it')
check_call(pool_forward_cmd)
# Flush the cache and wait for it to return
check_call(['ceph', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all'])
check_call(['rados', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all'])
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name])
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
@ -180,38 +191,47 @@ class Pool(object):
:return: int. The number of pgs to use.
"""
validator(value=pool_size, valid_type=int)
osds = get_osds(self.service)
if not osds:
osd_list = get_osds(self.service)
if not osd_list:
# NOTE(james-page): Default to 200 for older ceph versions
# which don't support OSD query from cli
return 200
osd_list_length = len(osd_list)
# Calculate based on Ceph best practices
if osds < 5:
if osd_list_length < 5:
return 128
elif 5 < osds < 10:
elif 5 < osd_list_length < 10:
return 512
elif 10 < osds < 50:
elif 10 < osd_list_length < 50:
return 4096
else:
estimate = (osds * 100) / pool_size
estimate = (osd_list_length * 100) / pool_size
# Return the next nearest power of 2
index = bisect.bisect_right(powers_of_two, estimate)
return powers_of_two[index]
class ReplicatedPool(Pool):
def __init__(self, service, name, replicas=2):
def __init__(self, service, name, pg_num=None, replicas=2):
super(ReplicatedPool, self).__init__(service=service, name=name)
self.replicas = replicas
if pg_num is None:
self.pg_num = self.get_pgs(self.replicas)
else:
self.pg_num = pg_num
def create(self):
if not pool_exists(self.service, self.name):
# Create it
pgs = self.get_pgs(self.replicas)
cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs)]
cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create',
self.name, str(self.pg_num)]
try:
check_call(cmd)
# Set the pool replica size
update_pool(client=self.service,
pool=self.name,
settings={'size': str(self.replicas)})
except CalledProcessError:
raise
@ -241,7 +261,7 @@ class ErasurePool(Pool):
pgs = self.get_pgs(int(erasure_profile['k']) + int(erasure_profile['m']))
# Create it
cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs),
cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs), str(pgs),
'erasure', self.erasure_code_profile]
try:
check_call(cmd)
@ -252,6 +272,134 @@ class ErasurePool(Pool):
Returns json formatted output"""
def get_mon_map(service):
"""
Returns the current monitor map.
:param service: six.string_types. The Ceph user name to run the command under
:return: json string. :raise: ValueError if the monmap fails to parse.
Also raises CalledProcessError if our ceph command fails
"""
try:
mon_status = check_output(
['ceph', '--id', service,
'mon_status', '--format=json'])
try:
return json.loads(mon_status)
except ValueError as v:
log("Unable to parse mon_status json: {}. Error: {}".format(
mon_status, v.message))
raise
except CalledProcessError as e:
log("mon_status command failed with message: {}".format(
e.message))
raise
def hash_monitor_names(service):
"""
Uses the get_mon_map() function to get information about the monitor
cluster.
Hash the name of each monitor. Return a sorted list of monitor hashes
in an ascending order.
:param service: six.string_types. The Ceph user name to run the command under
:rtype : dict. json dict of monitor name, ip address and rank
example: {
'name': 'ip-172-31-13-165',
'rank': 0,
'addr': '172.31.13.165:6789/0'}
"""
try:
hash_list = []
monitor_list = get_mon_map(service=service)
if monitor_list['monmap']['mons']:
for mon in monitor_list['monmap']['mons']:
hash_list.append(
hashlib.sha224(mon['name'].encode('utf-8')).hexdigest())
return sorted(hash_list)
else:
return None
except (ValueError, CalledProcessError):
raise
def monitor_key_delete(service, key):
"""
Delete a key and value pair from the monitor cluster
:param service: six.string_types. The Ceph user name to run the command under
Deletes a key value pair on the monitor cluster.
:param key: six.string_types. The key to delete.
"""
try:
check_output(
['ceph', '--id', service,
'config-key', 'del', str(key)])
except CalledProcessError as e:
log("Monitor config-key put failed with message: {}".format(
e.output))
raise
def monitor_key_set(service, key, value):
"""
Sets a key value pair on the monitor cluster.
:param service: six.string_types. The Ceph user name to run the command under
:param key: six.string_types. The key to set.
:param value: The value to set. This will be converted to a string
before setting
"""
try:
check_output(
['ceph', '--id', service,
'config-key', 'put', str(key), str(value)])
except CalledProcessError as e:
log("Monitor config-key put failed with message: {}".format(
e.output))
raise
def monitor_key_get(service, key):
"""
Gets the value of an existing key in the monitor cluster.
:param service: six.string_types. The Ceph user name to run the command under
:param key: six.string_types. The key to search for.
:return: Returns the value of that key or None if not found.
"""
try:
output = check_output(
['ceph', '--id', service,
'config-key', 'get', str(key)])
return output
except CalledProcessError as e:
log("Monitor config-key get failed with message: {}".format(
e.output))
return None
def monitor_key_exists(service, key):
"""
Searches for the existence of a key in the monitor cluster.
:param service: six.string_types. The Ceph user name to run the command under
:param key: six.string_types. The key to search for
:return: Returns True if the key exists, False if not and raises an
exception if an unknown error occurs. :raise: CalledProcessError if
an unknown error occurs
"""
try:
check_call(
['ceph', '--id', service,
'config-key', 'exists', str(key)])
# I can return true here regardless because Ceph returns
# ENOENT if the key wasn't found
return True
except CalledProcessError as e:
if e.returncode == errno.ENOENT:
return False
else:
log("Unknown error from ceph config-get exists: {} {}".format(
e.returncode, e.output))
raise
def get_erasure_profile(service, name):
"""
:param service: six.string_types. The Ceph user name to run the command under
@ -322,7 +470,8 @@ def set_pool_quota(service, pool_name, max_bytes):
:return: None. Can raise CalledProcessError
"""
# Set a byte quota on a RADOS pool in ceph.
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', max_bytes]
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name,
'max_bytes', str(max_bytes)]
try:
check_call(cmd)
except CalledProcessError:
@ -343,7 +492,25 @@ def remove_pool_quota(service, pool_name):
raise
def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure', failure_domain='host',
def remove_erasure_profile(service, profile_name):
"""
Create a new erasure code profile if one does not already exist for it. Updates
the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/
for more details
:param service: six.string_types. The Ceph user name to run the command under
:param profile_name: six.string_types
:return: None. Can raise CalledProcessError
"""
cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'rm',
profile_name]
try:
check_call(cmd)
except CalledProcessError:
raise
def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure',
failure_domain='host',
data_chunks=2, coding_chunks=1,
locality=None, durability_estimator=None):
"""
@ -448,7 +615,7 @@ def pool_exists(service, name):
except CalledProcessError:
return False
return name in out
return name in out.split()
def get_osds(service):

View File

@ -64,8 +64,8 @@ def is_device_mounted(device):
:returns: boolean: True if the path represents a mounted device, False if
it doesn't.
'''
is_partition = bool(re.search(r".*[0-9]+\b", device))
out = check_output(['mount']).decode('UTF-8')
if is_partition:
return bool(re.search(device + r"\b", out))
return bool(re.search(device + r"[0-9]*\b", out))
try:
out = check_output(['lsblk', '-P', device]).decode('UTF-8')
except:
return False
return bool(re.search(r'MOUNTPOINT=".+"', out))

View File

@ -912,6 +912,24 @@ def payload_status_set(klass, pid, status):
subprocess.check_call(cmd)
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
def resource_get(name):
"""used to fetch the resource path of the given name.
<name> must match a name of defined resource in metadata.yaml
returns either a path or False if resource not available
"""
if not name:
return False
cmd = ['resource-get', name]
try:
return subprocess.check_output(cmd).decode('UTF-8')
except subprocess.CalledProcessError:
return False
@cached
def juju_version():
"""Full version string (eg. '1.23.3.1-trusty-amd64')"""
@ -976,3 +994,16 @@ def _run_atexit():
for callback, args, kwargs in reversed(_atexit):
callback(*args, **kwargs)
del _atexit[:]
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
def network_get_primary_address(binding):
'''
Retrieve the primary network address for a named binding
:param binding: string. The name of a relation of extra-binding
:return: string. The primary IP address for the named binding
:raise: NotImplementedError if run on Juju < 2.0
'''
cmd = ['network-get', '--primary-address', binding]
return subprocess.check_output(cmd).strip()

View File

@ -30,6 +30,8 @@ import random
import string
import subprocess
import hashlib
import functools
import itertools
from contextlib import contextmanager
from collections import OrderedDict
@ -126,6 +128,13 @@ def service(action, service_name):
return subprocess.call(cmd) == 0
def systemv_services_running():
output = subprocess.check_output(
['service', '--status-all'],
stderr=subprocess.STDOUT).decode('UTF-8')
return [row.split()[-1] for row in output.split('\n') if '[ + ]' in row]
def service_running(service_name):
"""Determine whether a system service is running"""
if init_is_systemd():
@ -138,10 +147,14 @@ def service_running(service_name):
except subprocess.CalledProcessError:
return False
else:
# This works for upstart scripts where the 'service' command
# returns a consistent string to represent running 'start/running'
if ("start/running" in output or "is running" in output or
"up and running" in output):
return True
else:
# Check System V scripts init script return codes
if service_name in systemv_services_running():
return True
return False
@ -410,7 +423,7 @@ class ChecksumError(ValueError):
pass
def restart_on_change(restart_map, stopstart=False):
def restart_on_change(restart_map, stopstart=False, restart_functions=None):
"""Restart services based on configuration files changing
This function is used a decorator, for example::
@ -428,27 +441,58 @@ def restart_on_change(restart_map, stopstart=False):
restarted if any file matching the pattern got changed, created
or removed. Standard wildcards are supported, see documentation
for the 'glob' module for more information.
@param restart_map: {path_file_name: [service_name, ...]
@param stopstart: DEFAULT false; whether to stop, start OR restart
@param restart_functions: nonstandard functions to use to restart services
{svc: func, ...}
@returns result from decorated function
"""
def wrap(f):
@functools.wraps(f)
def wrapped_f(*args, **kwargs):
checksums = {path: path_hash(path) for path in restart_map}
f(*args, **kwargs)
restarts = []
for path in restart_map:
if path_hash(path) != checksums[path]:
restarts += restart_map[path]
services_list = list(OrderedDict.fromkeys(restarts))
if not stopstart:
for service_name in services_list:
service('restart', service_name)
else:
for action in ['stop', 'start']:
for service_name in services_list:
service(action, service_name)
return restart_on_change_helper(
(lambda: f(*args, **kwargs)), restart_map, stopstart,
restart_functions)
return wrapped_f
return wrap
def restart_on_change_helper(lambda_f, restart_map, stopstart=False,
restart_functions=None):
"""Helper function to perform the restart_on_change function.
This is provided for decorators to restart services if files described
in the restart_map have changed after an invocation of lambda_f().
@param lambda_f: function to call.
@param restart_map: {file: [service, ...]}
@param stopstart: whether to stop, start or restart a service
@param restart_functions: nonstandard functions to use to restart services
{svc: func, ...}
@returns result of lambda_f()
"""
if restart_functions is None:
restart_functions = {}
checksums = {path: path_hash(path) for path in restart_map}
r = lambda_f()
# create a list of lists of the services to restart
restarts = [restart_map[path]
for path in restart_map
if path_hash(path) != checksums[path]]
# create a flat list of ordered services without duplicates from lists
services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts)))
if services_list:
actions = ('stop', 'start') if stopstart else ('restart',)
for service_name in services_list:
if service_name in restart_functions:
restart_functions[service_name](service_name)
else:
for action in actions:
service(action, service_name)
return r
def lsb_release():
"""Return /etc/lsb-release in a dict"""
d = {}