From c50fb62b97cffa53ca08d645199341da36d42380 Mon Sep 17 00:00:00 2001 From: David Ames Date: Tue, 14 Feb 2017 11:45:17 -0800 Subject: [PATCH] Pre-release charm-helpers sync 17.02 Get each charm up to date with lp:charm-helpers for release testing. Change-Id: I2b3e5889be3f509ac8b0f317387feba12314cc11 --- charm-helpers-tests.yaml | 1 + hooks/charmhelpers/contrib/network/ip.py | 6 +- hooks/charmhelpers/contrib/openstack/utils.py | 70 +- .../contrib/storage/linux/ceph.py | 16 +- hooks/charmhelpers/core/hookenv.py | 45 + hooks/charmhelpers/core/host.py | 227 +++- hooks/charmhelpers/osplatform.py | 6 + tests/basic_deployment.py | 2 - tests/charmhelpers/contrib/amulet/utils.py | 3 +- .../contrib/openstack/amulet/utils.py | 174 ++- tests/charmhelpers/core/__init__.py | 13 + tests/charmhelpers/core/decorators.py | 55 + tests/charmhelpers/core/files.py | 43 + tests/charmhelpers/core/fstab.py | 132 ++ tests/charmhelpers/core/hookenv.py | 1068 +++++++++++++++++ tests/charmhelpers/core/host.py | 918 ++++++++++++++ .../core/host_factory/__init__.py | 0 .../charmhelpers/core/host_factory/centos.py | 56 + .../charmhelpers/core/host_factory/ubuntu.py | 56 + tests/charmhelpers/core/hugepage.py | 69 ++ tests/charmhelpers/core/kernel.py | 72 ++ .../core/kernel_factory/__init__.py | 0 .../core/kernel_factory/centos.py | 17 + .../core/kernel_factory/ubuntu.py | 13 + tests/charmhelpers/core/services/__init__.py | 16 + tests/charmhelpers/core/services/base.py | 351 ++++++ tests/charmhelpers/core/services/helpers.py | 290 +++++ tests/charmhelpers/core/strutils.py | 70 ++ tests/charmhelpers/core/sysctl.py | 54 + tests/charmhelpers/core/templating.py | 84 ++ tests/charmhelpers/core/unitdata.py | 518 ++++++++ 31 files changed, 4387 insertions(+), 58 deletions(-) create mode 100644 tests/charmhelpers/core/__init__.py create mode 100644 tests/charmhelpers/core/decorators.py create mode 100644 tests/charmhelpers/core/files.py create mode 100644 tests/charmhelpers/core/fstab.py create mode 100644 tests/charmhelpers/core/hookenv.py create mode 100644 tests/charmhelpers/core/host.py create mode 100644 tests/charmhelpers/core/host_factory/__init__.py create mode 100644 tests/charmhelpers/core/host_factory/centos.py create mode 100644 tests/charmhelpers/core/host_factory/ubuntu.py create mode 100644 tests/charmhelpers/core/hugepage.py create mode 100644 tests/charmhelpers/core/kernel.py create mode 100644 tests/charmhelpers/core/kernel_factory/__init__.py create mode 100644 tests/charmhelpers/core/kernel_factory/centos.py create mode 100644 tests/charmhelpers/core/kernel_factory/ubuntu.py create mode 100644 tests/charmhelpers/core/services/__init__.py create mode 100644 tests/charmhelpers/core/services/base.py create mode 100644 tests/charmhelpers/core/services/helpers.py create mode 100644 tests/charmhelpers/core/strutils.py create mode 100644 tests/charmhelpers/core/sysctl.py create mode 100644 tests/charmhelpers/core/templating.py create mode 100644 tests/charmhelpers/core/unitdata.py diff --git a/charm-helpers-tests.yaml b/charm-helpers-tests.yaml index 48b12f6..e506325 100644 --- a/charm-helpers-tests.yaml +++ b/charm-helpers-tests.yaml @@ -3,3 +3,4 @@ destination: tests/charmhelpers include: - contrib.amulet - contrib.openstack.amulet + - core diff --git a/hooks/charmhelpers/contrib/network/ip.py b/hooks/charmhelpers/contrib/network/ip.py index 2d2026e..e141fc1 100644 --- a/hooks/charmhelpers/contrib/network/ip.py +++ b/hooks/charmhelpers/contrib/network/ip.py @@ -424,7 +424,11 @@ def ns_query(address): else: return None - answers = dns.resolver.query(address, rtype) + try: + answers = dns.resolver.query(address, rtype) + except dns.resolver.NXDOMAIN: + return None + if answers: return str(answers[0]) return None diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index 6d544e7..80219d6 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -153,7 +153,7 @@ SWIFT_CODENAMES = OrderedDict([ ('newton', ['2.8.0', '2.9.0', '2.10.0']), ('ocata', - ['2.11.0']), + ['2.11.0', '2.12.0']), ]) # >= Liberty version->codename mapping @@ -549,9 +549,9 @@ def configure_installation_source(rel): 'newton': 'xenial-updates/newton', 'newton/updates': 'xenial-updates/newton', 'newton/proposed': 'xenial-proposed/newton', - 'zesty': 'zesty-updates/ocata', - 'zesty/updates': 'xenial-updates/ocata', - 'zesty/proposed': 'xenial-proposed/ocata', + 'ocata': 'xenial-updates/ocata', + 'ocata/updates': 'xenial-updates/ocata', + 'ocata/proposed': 'xenial-proposed/ocata', } try: @@ -1119,6 +1119,35 @@ def git_generate_systemd_init_files(templates_dir): shutil.copyfile(service_source, service_dest) +def git_determine_usr_bin(): + """Return the /usr/bin path for Apache2 config. + + The /usr/bin path will be located in the virtualenv if the charm + is configured to deploy from source. + """ + if git_install_requested(): + projects_yaml = config('openstack-origin-git') + projects_yaml = git_default_repos(projects_yaml) + return os.path.join(git_pip_venv_dir(projects_yaml), 'bin') + else: + return '/usr/bin' + + +def git_determine_python_path(): + """Return the python-path for Apache2 config. + + Returns 'None' unless the charm is configured to deploy from source, + in which case the path of the virtualenv's site-packages is returned. + """ + if git_install_requested(): + projects_yaml = config('openstack-origin-git') + projects_yaml = git_default_repos(projects_yaml) + return os.path.join(git_pip_venv_dir(projects_yaml), + 'lib/python2.7/site-packages') + else: + return None + + def os_workload_status(configs, required_interfaces, charm_func=None): """ Decorator to set workload status based on complete contexts @@ -1925,3 +1954,36 @@ def os_application_version_set(package): application_version_set(os_release(package)) else: application_version_set(application_version) + + +def enable_memcache(source=None, release=None, package=None): + """Determine if memcache should be enabled on the local unit + + @param release: release of OpenStack currently deployed + @param package: package to derive OpenStack version deployed + @returns boolean Whether memcache should be enabled + """ + _release = None + if release: + _release = release + else: + _release = os_release(package, base='icehouse') + if not _release: + _release = get_os_codename_install_source(source) + + # TODO: this should be changed to a numeric comparison using a known list + # of releases and comparing by index. + return _release >= 'mitaka' + + +def token_cache_pkgs(source=None, release=None): + """Determine additional packages needed for token caching + + @param source: source string for charm + @param release: release of OpenStack currently deployed + @returns List of package to enable token caching + """ + packages = [] + if enable_memcache(source=source, release=release): + packages.extend(['memcached', 'python-memcache']) + return packages diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py index edb536c..ae7f3f9 100644 --- a/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -40,6 +40,7 @@ from subprocess import ( ) from charmhelpers.core.hookenv import ( config, + service_name, local_unit, relation_get, relation_ids, @@ -1043,8 +1044,18 @@ class CephBrokerRq(object): self.request_id = str(uuid.uuid1()) self.ops = [] + def add_op_request_access_to_group(self, name, namespace=None, + permission=None, key_name=None): + """ + Adds the requested permissions to the current service's Ceph key, + allowing the key to access only the specified pools + """ + self.ops.append({'op': 'add-permissions-to-key', 'group': name, + 'namespace': namespace, 'name': key_name or service_name(), + 'group-permission': permission}) + def add_op_create_pool(self, name, replica_count=3, pg_num=None, - weight=None): + weight=None, group=None, namespace=None): """Adds an operation to create a pool. @param pg_num setting: optional setting. If not provided, this value @@ -1058,7 +1069,8 @@ class CephBrokerRq(object): self.ops.append({'op': 'create-pool', 'name': name, 'replicas': replica_count, 'pg_num': pg_num, - 'weight': weight}) + 'weight': weight, 'group': group, + 'group-namespace': namespace}) def set_ops(self, ops): """Set request ops to provided value. diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py index 94fc996..e44e22b 100644 --- a/hooks/charmhelpers/core/hookenv.py +++ b/hooks/charmhelpers/core/hookenv.py @@ -616,6 +616,20 @@ def close_port(port, protocol="TCP"): subprocess.check_call(_args) +def open_ports(start, end, protocol="TCP"): + """Opens a range of service network ports""" + _args = ['open-port'] + _args.append('{}-{}/{}'.format(start, end, protocol)) + subprocess.check_call(_args) + + +def close_ports(start, end, protocol="TCP"): + """Close a range of service network ports""" + _args = ['close-port'] + _args.append('{}-{}/{}'.format(start, end, protocol)) + subprocess.check_call(_args) + + @cached def unit_get(attribute): """Get the unit ID for the remote unit""" @@ -1021,3 +1035,34 @@ def network_get_primary_address(binding): ''' cmd = ['network-get', '--primary-address', binding] return subprocess.check_output(cmd).decode('UTF-8').strip() + + +def add_metric(*args, **kwargs): + """Add metric values. Values may be expressed with keyword arguments. For + metric names containing dashes, these may be expressed as one or more + 'key=value' positional arguments. May only be called from the collect-metrics + hook.""" + _args = ['add-metric'] + _kvpairs = [] + _kvpairs.extend(args) + _kvpairs.extend(['{}={}'.format(k, v) for k, v in kwargs.items()]) + _args.extend(sorted(_kvpairs)) + try: + subprocess.check_call(_args) + return + except EnvironmentError as e: + if e.errno != errno.ENOENT: + raise + log_message = 'add-metric failed: {}'.format(' '.join(_kvpairs)) + log(log_message, level='INFO') + + +def meter_status(): + """Get the meter status, if running in the meter-status-changed hook.""" + return os.environ.get('JUJU_METER_STATUS') + + +def meter_info(): + """Get the meter status information, if running in the meter-status-changed + hook.""" + return os.environ.get('JUJU_METER_INFO') diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py index 04cadb3..edbb72f 100644 --- a/hooks/charmhelpers/core/host.py +++ b/hooks/charmhelpers/core/host.py @@ -54,38 +54,138 @@ elif __platform__ == "centos": cmp_pkgrevno, ) # flake8: noqa -- ignore F401 for this import +UPDATEDB_PATH = '/etc/updatedb.conf' -def service_start(service_name): - """Start a system service""" - return service('start', service_name) +def service_start(service_name, **kwargs): + """Start a system service. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be reloaded. The follow- + ing example stops the ceph-osd service for instance id=4: + + service_stop('ceph-osd', id=4) + + :param service_name: the name of the service to stop + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for systemd enabled systems. + """ + return service('start', service_name, **kwargs) -def service_stop(service_name): - """Stop a system service""" - return service('stop', service_name) +def service_stop(service_name, **kwargs): + """Stop a system service. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be reloaded. The follow- + ing example stops the ceph-osd service for instance id=4: + + service_stop('ceph-osd', id=4) + + :param service_name: the name of the service to stop + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for systemd enabled systems. + """ + return service('stop', service_name, **kwargs) -def service_restart(service_name): - """Restart a system service""" +def service_restart(service_name, **kwargs): + """Restart a system service. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be restarted. The follow- + ing example restarts the ceph-osd service for instance id=4: + + service_restart('ceph-osd', id=4) + + :param service_name: the name of the service to restart + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems not allowing additional + parameters via the commandline (systemd). + """ return service('restart', service_name) -def service_reload(service_name, restart_on_failure=False): +def service_reload(service_name, restart_on_failure=False, **kwargs): """Reload a system service, optionally falling back to restart if - reload fails""" - service_result = service('reload', service_name) + reload fails. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be reloaded. The follow- + ing example restarts the ceph-osd service for instance id=4: + + service_reload('ceph-osd', id=4) + + :param service_name: the name of the service to reload + :param restart_on_failure: boolean indicating whether to fallback to a + restart if the reload fails. + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems not allowing additional + parameters via the commandline (systemd). + """ + service_result = service('reload', service_name, **kwargs) if not service_result and restart_on_failure: - service_result = service('restart', service_name) + service_result = service('restart', service_name, **kwargs) return service_result -def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"): +def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", + **kwargs): """Pause a system service. - Stop it, and prevent it from starting again at boot.""" + Stop it, and prevent it from starting again at boot. + + :param service_name: the name of the service to pause + :param init_dir: path to the upstart init directory + :param initd_dir: path to the sysv init directory + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems which do not support + key=value arguments via the commandline. + """ stopped = True - if service_running(service_name): - stopped = service_stop(service_name) + if service_running(service_name, **kwargs): + stopped = service_stop(service_name, **kwargs) upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): @@ -106,10 +206,19 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"): def service_resume(service_name, init_dir="/etc/init", - initd_dir="/etc/init.d"): + initd_dir="/etc/init.d", **kwargs): """Resume a system service. - Reenable starting again at boot. Start the service""" + Reenable starting again at boot. Start the service. + + :param service_name: the name of the service to resume + :param init_dir: the path to the init dir + :param initd dir: the path to the initd dir + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for systemd enabled systems. + """ upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): @@ -126,19 +235,28 @@ def service_resume(service_name, init_dir="/etc/init", "Unable to detect {0} as SystemD, Upstart {1} or" " SysV {2}".format( service_name, upstart_file, sysv_file)) + started = service_running(service_name, **kwargs) - started = service_running(service_name) if not started: - started = service_start(service_name) + started = service_start(service_name, **kwargs) return started -def service(action, service_name): - """Control a system service""" +def service(action, service_name, **kwargs): + """Control a system service. + + :param action: the action to take on the service + :param service_name: the name of the service to perform th action on + :param **kwargs: additional params to be passed to the service command in + the form of key=value. + """ if init_is_systemd(): cmd = ['systemctl', action, service_name] else: cmd = ['service', service_name, action] + for key, value in six.iteritems(kwargs): + parameter = '%s=%s' % (key, value) + cmd.append(parameter) return subprocess.call(cmd) == 0 @@ -146,15 +264,26 @@ _UPSTART_CONF = "/etc/init/{}.conf" _INIT_D_CONF = "/etc/init.d/{}" -def service_running(service_name): - """Determine whether a system service is running""" +def service_running(service_name, **kwargs): + """Determine whether a system service is running. + + :param service_name: the name of the service + :param **kwargs: additional args to pass to the service command. This is + used to pass additional key=value arguments to the + service command line for managing specific instance + units (e.g. service ceph-osd status id=2). The kwargs + are ignored in systemd services. + """ if init_is_systemd(): return service('is-active', service_name) else: if os.path.exists(_UPSTART_CONF.format(service_name)): try: - output = subprocess.check_output( - ['status', service_name], + cmd = ['status', service_name] + for key, value in six.iteritems(kwargs): + parameter = '%s=%s' % (key, value) + cmd.append(parameter) + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8') except subprocess.CalledProcessError: return False @@ -306,15 +435,17 @@ def add_user_to_group(username, group): subprocess.check_call(cmd) -def rsync(from_path, to_path, flags='-r', options=None): +def rsync(from_path, to_path, flags='-r', options=None, timeout=None): """Replicate the contents of a path""" options = options or ['--delete', '--executability'] cmd = ['/usr/bin/rsync', flags] + if timeout: + cmd = ['timeout', str(timeout)] + cmd cmd.extend(options) cmd.append(from_path) cmd.append(to_path) log(" ".join(cmd)) - return subprocess.check_output(cmd).decode('UTF-8').strip() + return subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8').strip() def symlink(source, destination): @@ -684,7 +815,7 @@ def chownr(path, owner, group, follow_links=True, chowntopdir=False): :param str path: The string path to start changing ownership. :param str owner: The owner string to use when looking up the uid. :param str group: The group string to use when looking up the gid. - :param bool follow_links: Also Chown links if True + :param bool follow_links: Also follow and chown links if True :param bool chowntopdir: Also chown path itself if True """ uid = pwd.getpwnam(owner).pw_uid @@ -698,7 +829,7 @@ def chownr(path, owner, group, follow_links=True, chowntopdir=False): broken_symlink = os.path.lexists(path) and not os.path.exists(path) if not broken_symlink: chown(path, uid, gid) - for root, dirs, files in os.walk(path): + for root, dirs, files in os.walk(path, followlinks=follow_links): for name in dirs + files: full = os.path.join(root, name) broken_symlink = os.path.lexists(full) and not os.path.exists(full) @@ -718,6 +849,20 @@ def lchownr(path, owner, group): chownr(path, owner, group, follow_links=False) +def owner(path): + """Returns a tuple containing the username & groupname owning the path. + + :param str path: the string path to retrieve the ownership + :return tuple(str, str): A (username, groupname) tuple containing the + name of the user and group owning the path. + :raises OSError: if the specified path does not exist + """ + stat = os.stat(path) + username = pwd.getpwuid(stat.st_uid)[0] + groupname = grp.getgrgid(stat.st_gid)[0] + return username, groupname + + def get_total_ram(): """The total amount of system RAM in bytes. @@ -749,3 +894,25 @@ def is_container(): else: # Detect using upstart container file marker return os.path.exists(UPSTART_CONTAINER_TYPE) + + +def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH): + with open(updatedb_path, 'r+') as f_id: + updatedb_text = f_id.read() + output = updatedb(updatedb_text, path) + f_id.seek(0) + f_id.write(output) + f_id.truncate() + + +def updatedb(updatedb_text, new_path): + lines = [line for line in updatedb_text.split("\n")] + for i, line in enumerate(lines): + if line.startswith("PRUNEPATHS="): + paths_line = line.split("=")[1].replace('"', '') + paths = paths_line.split(" ") + if new_path not in paths: + paths.append(new_path) + lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths)) + output = "\n".join(lines) + return output diff --git a/hooks/charmhelpers/osplatform.py b/hooks/charmhelpers/osplatform.py index ea490bb..d9a4d5c 100644 --- a/hooks/charmhelpers/osplatform.py +++ b/hooks/charmhelpers/osplatform.py @@ -8,12 +8,18 @@ def get_platform(): will be returned (which is the name of the module). This string is used to decide which platform module should be imported. """ + # linux_distribution is deprecated and will be removed in Python 3.7 + # Warings *not* disabled, as we certainly need to fix this. tuple_platform = platform.linux_distribution() current_platform = tuple_platform[0] if "Ubuntu" in current_platform: return "ubuntu" elif "CentOS" in current_platform: return "centos" + elif "debian" in current_platform: + # Stock Python does not detect Ubuntu and instead returns debian. + # Or at least it does in some build environments like Travis CI + return "ubuntu" else: raise RuntimeError("This module is not supported on {}." .format(current_platform)) diff --git a/tests/basic_deployment.py b/tests/basic_deployment.py index 66cb8ec..a1e96f1 100644 --- a/tests/basic_deployment.py +++ b/tests/basic_deployment.py @@ -76,13 +76,11 @@ class CephBasicDeployment(OpenStackAmuletDeployment): 'osd-devices': '/dev/vdb /srv/ceph /dev/test-non-existent' } - radosgw_config = {"use-embedded-webserver": True} proxy_config = { 'source': self.source } configs = {'ceph-mon': ceph_config, 'ceph-osd': ceph_osd_config, - 'ceph-radosgw': radosgw_config, 'ceph-proxy': proxy_config} super(CephBasicDeployment, self)._configure_services(configs) diff --git a/tests/charmhelpers/contrib/amulet/utils.py b/tests/charmhelpers/contrib/amulet/utils.py index 8e13ab1..f9e4c3a 100644 --- a/tests/charmhelpers/contrib/amulet/utils.py +++ b/tests/charmhelpers/contrib/amulet/utils.py @@ -148,7 +148,8 @@ class AmuletUtils(object): for service_name in services_list: if (self.ubuntu_releases.index(release) >= systemd_switch or - service_name in ['rabbitmq-server', 'apache2']): + service_name in ['rabbitmq-server', 'apache2', + 'memcached']): # init is systemd (or regular sysv) cmd = 'sudo service {} status'.format(service_name) output, code = sentry_unit.run(cmd) diff --git a/tests/charmhelpers/contrib/openstack/amulet/utils.py b/tests/charmhelpers/contrib/openstack/amulet/utils.py index 6a0ba83..401c032 100644 --- a/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -20,6 +20,7 @@ import re import six import time import urllib +import urlparse import cinderclient.v1.client as cinder_client import glanceclient.v1.client as glance_client @@ -37,6 +38,7 @@ import swiftclient from charmhelpers.contrib.amulet.utils import ( AmuletUtils ) +from charmhelpers.core.decorators import retry_on_exception DEBUG = logging.DEBUG ERROR = logging.ERROR @@ -303,6 +305,46 @@ class OpenStackAmuletUtils(AmuletUtils): self.log.debug('Checking if tenant exists ({})...'.format(tenant)) return tenant in [t.name for t in keystone.tenants.list()] + @retry_on_exception(5, base_delay=10) + def keystone_wait_for_propagation(self, sentry_relation_pairs, + api_version): + """Iterate over list of sentry and relation tuples and verify that + api_version has the expected value. + + :param sentry_relation_pairs: list of sentry, relation name tuples used + for monitoring propagation of relation + data + :param api_version: api_version to expect in relation data + :returns: None if successful. Raise on error. + """ + for (sentry, relation_name) in sentry_relation_pairs: + rel = sentry.relation('identity-service', + relation_name) + self.log.debug('keystone relation data: {}'.format(rel)) + if rel['api_version'] != str(api_version): + raise Exception("api_version not propagated through relation" + " data yet ('{}' != '{}')." + "".format(rel['api_version'], api_version)) + + def keystone_configure_api_version(self, sentry_relation_pairs, deployment, + api_version): + """Configure preferred-api-version of keystone in deployment and + monitor provided list of relation objects for propagation + before returning to caller. + + :param sentry_relation_pairs: list of sentry, relation tuples used for + monitoring propagation of relation data + :param deployment: deployment to configure + :param api_version: value preferred-api-version will be set to + :returns: None if successful. Raise on error. + """ + self.log.debug("Setting keystone preferred-api-version: '{}'" + "".format(api_version)) + + config = {'preferred-api-version': api_version} + deployment.d.configure('keystone', config) + self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) + def authenticate_cinder_admin(self, keystone_sentry, username, password, tenant): """Authenticates admin user with cinder.""" @@ -311,6 +353,37 @@ class OpenStackAmuletUtils(AmuletUtils): ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8')) return cinder_client.Client(username, password, tenant, ept) + def authenticate_keystone(self, keystone_ip, username, password, + api_version=False, admin_port=False, + user_domain_name=None, domain_name=None, + project_domain_name=None, project_name=None): + """Authenticate with Keystone""" + self.log.debug('Authenticating with keystone...') + port = 5000 + if admin_port: + port = 35357 + base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), + port) + if not api_version or api_version == 2: + ep = base_ep + "/v2.0" + return keystone_client.Client(username=username, password=password, + tenant_name=project_name, + auth_url=ep) + else: + ep = base_ep + "/v3" + auth = keystone_id_v3.Password( + user_domain_name=user_domain_name, + username=username, + password=password, + domain_name=domain_name, + project_domain_name=project_domain_name, + project_name=project_name, + auth_url=ep + ) + return keystone_client_v3.Client( + session=keystone_session.Session(auth=auth) + ) + def authenticate_keystone_admin(self, keystone_sentry, user, password, tenant=None, api_version=None, keystone_ip=None): @@ -319,30 +392,28 @@ class OpenStackAmuletUtils(AmuletUtils): if not keystone_ip: keystone_ip = keystone_sentry.info['public-address'] - base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8')) - if not api_version or api_version == 2: - ep = base_ep + "/v2.0" - return keystone_client.Client(username=user, password=password, - tenant_name=tenant, auth_url=ep) - else: - ep = base_ep + "/v3" - auth = keystone_id_v3.Password( - user_domain_name='admin_domain', - username=user, - password=password, - domain_name='admin_domain', - auth_url=ep, - ) - sess = keystone_session.Session(auth=auth) - return keystone_client_v3.Client(session=sess) + user_domain_name = None + domain_name = None + if api_version == 3: + user_domain_name = 'admin_domain' + domain_name = user_domain_name + + return self.authenticate_keystone(keystone_ip, user, password, + project_name=tenant, + api_version=api_version, + user_domain_name=user_domain_name, + domain_name=domain_name, + admin_port=True) def authenticate_keystone_user(self, keystone, user, password, tenant): """Authenticates a regular user with the keystone public endpoint.""" self.log.debug('Authenticating keystone user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', endpoint_type='publicURL') - return keystone_client.Client(username=user, password=password, - tenant_name=tenant, auth_url=ep) + keystone_ip = urlparse.urlparse(ep).hostname + + return self.authenticate_keystone(keystone_ip, user, password, + project_name=tenant) def authenticate_glance_admin(self, keystone): """Authenticates admin user with glance.""" @@ -1133,3 +1204,70 @@ class OpenStackAmuletUtils(AmuletUtils): else: msg = 'No message retrieved.' amulet.raise_status(amulet.FAIL, msg) + + def validate_memcache(self, sentry_unit, conf, os_release, + earliest_release=5, section='keystone_authtoken', + check_kvs=None): + """Check Memcache is running and is configured to be used + + Example call from Amulet test: + + def test_110_memcache(self): + u.validate_memcache(self.neutron_api_sentry, + '/etc/neutron/neutron.conf', + self._get_openstack_release()) + + :param sentry_unit: sentry unit + :param conf: OpenStack config file to check memcache settings + :param os_release: Current OpenStack release int code + :param earliest_release: Earliest Openstack release to check int code + :param section: OpenStack config file section to check + :param check_kvs: Dict of settings to check in config file + :returns: None + """ + if os_release < earliest_release: + self.log.debug('Skipping memcache checks for deployment. {} <' + 'mitaka'.format(os_release)) + return + _kvs = check_kvs or {'memcached_servers': 'inet6:[::1]:11211'} + self.log.debug('Checking memcached is running') + ret = self.validate_services_by_name({sentry_unit: ['memcached']}) + if ret: + amulet.raise_status(amulet.FAIL, msg='Memcache running check' + 'failed {}'.format(ret)) + else: + self.log.debug('OK') + self.log.debug('Checking memcache url is configured in {}'.format( + conf)) + if self.validate_config_data(sentry_unit, conf, section, _kvs): + message = "Memcache config error in: {}".format(conf) + amulet.raise_status(amulet.FAIL, msg=message) + else: + self.log.debug('OK') + self.log.debug('Checking memcache configuration in ' + '/etc/memcached.conf') + contents = self.file_contents_safe(sentry_unit, '/etc/memcached.conf', + fatal=True) + ubuntu_release, _ = self.run_cmd_unit(sentry_unit, 'lsb_release -cs') + if ubuntu_release <= 'trusty': + memcache_listen_addr = 'ip6-localhost' + else: + memcache_listen_addr = '::1' + expected = { + '-p': '11211', + '-l': memcache_listen_addr} + found = [] + for key, value in expected.items(): + for line in contents.split('\n'): + if line.startswith(key): + self.log.debug('Checking {} is set to {}'.format( + key, + value)) + assert value == line.split()[-1] + self.log.debug(line.split()[-1]) + found.append(key) + if sorted(found) == sorted(expected.keys()): + self.log.debug('OK') + else: + message = "Memcache config error in: /etc/memcached.conf" + amulet.raise_status(amulet.FAIL, msg=message) diff --git a/tests/charmhelpers/core/__init__.py b/tests/charmhelpers/core/__init__.py new file mode 100644 index 0000000..d7567b8 --- /dev/null +++ b/tests/charmhelpers/core/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/charmhelpers/core/decorators.py b/tests/charmhelpers/core/decorators.py new file mode 100644 index 0000000..6ad41ee --- /dev/null +++ b/tests/charmhelpers/core/decorators.py @@ -0,0 +1,55 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Copyright 2014 Canonical Ltd. +# +# Authors: +# Edward Hope-Morley +# + +import time + +from charmhelpers.core.hookenv import ( + log, + INFO, +) + + +def retry_on_exception(num_retries, base_delay=0, exc_type=Exception): + """If the decorated function raises exception exc_type, allow num_retries + retry attempts before raise the exception. + """ + def _retry_on_exception_inner_1(f): + def _retry_on_exception_inner_2(*args, **kwargs): + retries = num_retries + multiplier = 1 + while True: + try: + return f(*args, **kwargs) + except exc_type: + if not retries: + raise + + delay = base_delay * multiplier + multiplier += 1 + log("Retrying '%s' %d more times (delay=%s)" % + (f.__name__, retries, delay), level=INFO) + retries -= 1 + if delay: + time.sleep(delay) + + return _retry_on_exception_inner_2 + + return _retry_on_exception_inner_1 diff --git a/tests/charmhelpers/core/files.py b/tests/charmhelpers/core/files.py new file mode 100644 index 0000000..fdd82b7 --- /dev/null +++ b/tests/charmhelpers/core/files.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__author__ = 'Jorge Niedbalski ' + +import os +import subprocess + + +def sed(filename, before, after, flags='g'): + """ + Search and replaces the given pattern on filename. + + :param filename: relative or absolute file path. + :param before: expression to be replaced (see 'man sed') + :param after: expression to replace with (see 'man sed') + :param flags: sed-compatible regex flags in example, to make + the search and replace case insensitive, specify ``flags="i"``. + The ``g`` flag is always specified regardless, so you do not + need to remember to include it when overriding this parameter. + :returns: If the sed command exit code was zero then return, + otherwise raise CalledProcessError. + """ + expression = r's/{0}/{1}/{2}'.format(before, + after, flags) + + return subprocess.check_call(["sed", "-i", "-r", "-e", + expression, + os.path.expanduser(filename)]) diff --git a/tests/charmhelpers/core/fstab.py b/tests/charmhelpers/core/fstab.py new file mode 100644 index 0000000..d9fa915 --- /dev/null +++ b/tests/charmhelpers/core/fstab.py @@ -0,0 +1,132 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import io +import os + +__author__ = 'Jorge Niedbalski R. ' + + +class Fstab(io.FileIO): + """This class extends file in order to implement a file reader/writer + for file `/etc/fstab` + """ + + class Entry(object): + """Entry class represents a non-comment line on the `/etc/fstab` file + """ + def __init__(self, device, mountpoint, filesystem, + options, d=0, p=0): + self.device = device + self.mountpoint = mountpoint + self.filesystem = filesystem + + if not options: + options = "defaults" + + self.options = options + self.d = int(d) + self.p = int(p) + + def __eq__(self, o): + return str(self) == str(o) + + def __str__(self): + return "{} {} {} {} {} {}".format(self.device, + self.mountpoint, + self.filesystem, + self.options, + self.d, + self.p) + + DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab') + + def __init__(self, path=None): + if path: + self._path = path + else: + self._path = self.DEFAULT_PATH + super(Fstab, self).__init__(self._path, 'rb+') + + def _hydrate_entry(self, line): + # NOTE: use split with no arguments to split on any + # whitespace including tabs + return Fstab.Entry(*filter( + lambda x: x not in ('', None), + line.strip("\n").split())) + + @property + def entries(self): + self.seek(0) + for line in self.readlines(): + line = line.decode('us-ascii') + try: + if line.strip() and not line.strip().startswith("#"): + yield self._hydrate_entry(line) + except ValueError: + pass + + def get_entry_by_attr(self, attr, value): + for entry in self.entries: + e_attr = getattr(entry, attr) + if e_attr == value: + return entry + return None + + def add_entry(self, entry): + if self.get_entry_by_attr('device', entry.device): + return False + + self.write((str(entry) + '\n').encode('us-ascii')) + self.truncate() + return entry + + def remove_entry(self, entry): + self.seek(0) + + lines = [l.decode('us-ascii') for l in self.readlines()] + + found = False + for index, line in enumerate(lines): + if line.strip() and not line.strip().startswith("#"): + if self._hydrate_entry(line) == entry: + found = True + break + + if not found: + return False + + lines.remove(line) + + self.seek(0) + self.write(''.join(lines).encode('us-ascii')) + self.truncate() + return True + + @classmethod + def remove_by_mountpoint(cls, mountpoint, path=None): + fstab = cls(path=path) + entry = fstab.get_entry_by_attr('mountpoint', mountpoint) + if entry: + return fstab.remove_entry(entry) + return False + + @classmethod + def add(cls, device, mountpoint, filesystem, options=None, path=None): + return cls(path=path).add_entry(Fstab.Entry(device, + mountpoint, filesystem, + options=options)) diff --git a/tests/charmhelpers/core/hookenv.py b/tests/charmhelpers/core/hookenv.py new file mode 100644 index 0000000..e44e22b --- /dev/null +++ b/tests/charmhelpers/core/hookenv.py @@ -0,0 +1,1068 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"Interactions with the Juju environment" +# Copyright 2013 Canonical Ltd. +# +# Authors: +# Charm Helpers Developers + +from __future__ import print_function +import copy +from distutils.version import LooseVersion +from functools import wraps +import glob +import os +import json +import yaml +import subprocess +import sys +import errno +import tempfile +from subprocess import CalledProcessError + +import six +if not six.PY3: + from UserDict import UserDict +else: + from collections import UserDict + +CRITICAL = "CRITICAL" +ERROR = "ERROR" +WARNING = "WARNING" +INFO = "INFO" +DEBUG = "DEBUG" +MARKER = object() + +cache = {} + + +def cached(func): + """Cache return values for multiple executions of func + args + + For example:: + + @cached + def unit_get(attribute): + pass + + unit_get('test') + + will cache the result of unit_get + 'test' for future calls. + """ + @wraps(func) + def wrapper(*args, **kwargs): + global cache + key = str((func, args, kwargs)) + try: + return cache[key] + except KeyError: + pass # Drop out of the exception handler scope. + res = func(*args, **kwargs) + cache[key] = res + return res + wrapper._wrapped = func + return wrapper + + +def flush(key): + """Flushes any entries from function cache where the + key is found in the function+args """ + flush_list = [] + for item in cache: + if key in item: + flush_list.append(item) + for item in flush_list: + del cache[item] + + +def log(message, level=None): + """Write a message to the juju log""" + command = ['juju-log'] + if level: + command += ['-l', level] + if not isinstance(message, six.string_types): + message = repr(message) + command += [message] + # Missing juju-log should not cause failures in unit tests + # Send log output to stderr + try: + subprocess.call(command) + except OSError as e: + if e.errno == errno.ENOENT: + if level: + message = "{}: {}".format(level, message) + message = "juju-log: {}".format(message) + print(message, file=sys.stderr) + else: + raise + + +class Serializable(UserDict): + """Wrapper, an object that can be serialized to yaml or json""" + + def __init__(self, obj): + # wrap the object + UserDict.__init__(self) + self.data = obj + + def __getattr__(self, attr): + # See if this object has attribute. + if attr in ("json", "yaml", "data"): + return self.__dict__[attr] + # Check for attribute in wrapped object. + got = getattr(self.data, attr, MARKER) + if got is not MARKER: + return got + # Proxy to the wrapped object via dict interface. + try: + return self.data[attr] + except KeyError: + raise AttributeError(attr) + + def __getstate__(self): + # Pickle as a standard dictionary. + return self.data + + def __setstate__(self, state): + # Unpickle into our wrapper. + self.data = state + + def json(self): + """Serialize the object to json""" + return json.dumps(self.data) + + def yaml(self): + """Serialize the object to yaml""" + return yaml.dump(self.data) + + +def execution_environment(): + """A convenient bundling of the current execution context""" + context = {} + context['conf'] = config() + if relation_id(): + context['reltype'] = relation_type() + context['relid'] = relation_id() + context['rel'] = relation_get() + context['unit'] = local_unit() + context['rels'] = relations() + context['env'] = os.environ + return context + + +def in_relation_hook(): + """Determine whether we're running in a relation hook""" + return 'JUJU_RELATION' in os.environ + + +def relation_type(): + """The scope for the current relation hook""" + return os.environ.get('JUJU_RELATION', None) + + +@cached +def relation_id(relation_name=None, service_or_unit=None): + """The relation ID for the current or a specified relation""" + if not relation_name and not service_or_unit: + return os.environ.get('JUJU_RELATION_ID', None) + elif relation_name and service_or_unit: + service_name = service_or_unit.split('/')[0] + for relid in relation_ids(relation_name): + remote_service = remote_service_name(relid) + if remote_service == service_name: + return relid + else: + raise ValueError('Must specify neither or both of relation_name and service_or_unit') + + +def local_unit(): + """Local unit ID""" + return os.environ['JUJU_UNIT_NAME'] + + +def remote_unit(): + """The remote unit for the current relation hook""" + return os.environ.get('JUJU_REMOTE_UNIT', None) + + +def service_name(): + """The name service group this unit belongs to""" + return local_unit().split('/')[0] + + +@cached +def remote_service_name(relid=None): + """The remote service name for a given relation-id (or the current relation)""" + if relid is None: + unit = remote_unit() + else: + units = related_units(relid) + unit = units[0] if units else None + return unit.split('/')[0] if unit else None + + +def hook_name(): + """The name of the currently executing hook""" + return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0])) + + +class Config(dict): + """A dictionary representation of the charm's config.yaml, with some + extra features: + + - See which values in the dictionary have changed since the previous hook. + - For values that have changed, see what the previous value was. + - Store arbitrary data for use in a later hook. + + NOTE: Do not instantiate this object directly - instead call + ``hookenv.config()``, which will return an instance of :class:`Config`. + + Example usage:: + + >>> # inside a hook + >>> from charmhelpers.core import hookenv + >>> config = hookenv.config() + >>> config['foo'] + 'bar' + >>> # store a new key/value for later use + >>> config['mykey'] = 'myval' + + + >>> # user runs `juju set mycharm foo=baz` + >>> # now we're inside subsequent config-changed hook + >>> config = hookenv.config() + >>> config['foo'] + 'baz' + >>> # test to see if this val has changed since last hook + >>> config.changed('foo') + True + >>> # what was the previous value? + >>> config.previous('foo') + 'bar' + >>> # keys/values that we add are preserved across hooks + >>> config['mykey'] + 'myval' + + """ + CONFIG_FILE_NAME = '.juju-persistent-config' + + def __init__(self, *args, **kw): + super(Config, self).__init__(*args, **kw) + self.implicit_save = True + self._prev_dict = None + self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) + if os.path.exists(self.path): + self.load_previous() + atexit(self._implicit_save) + + def load_previous(self, path=None): + """Load previous copy of config from disk. + + In normal usage you don't need to call this method directly - it + is called automatically at object initialization. + + :param path: + + File path from which to load the previous config. If `None`, + config is loaded from the default location. If `path` is + specified, subsequent `save()` calls will write to the same + path. + + """ + self.path = path or self.path + with open(self.path) as f: + self._prev_dict = json.load(f) + for k, v in copy.deepcopy(self._prev_dict).items(): + if k not in self: + self[k] = v + + def changed(self, key): + """Return True if the current value for this key is different from + the previous value. + + """ + if self._prev_dict is None: + return True + return self.previous(key) != self.get(key) + + def previous(self, key): + """Return previous value for this key, or None if there + is no previous value. + + """ + if self._prev_dict: + return self._prev_dict.get(key) + return None + + def save(self): + """Save this config to disk. + + If the charm is using the :mod:`Services Framework ` + or :meth:'@hook ' decorator, this + is called automatically at the end of successful hook execution. + Otherwise, it should be called directly by user code. + + To disable automatic saves, set ``implicit_save=False`` on this + instance. + + """ + with open(self.path, 'w') as f: + json.dump(self, f) + + def _implicit_save(self): + if self.implicit_save: + self.save() + + +@cached +def config(scope=None): + """Juju charm configuration""" + config_cmd_line = ['config-get'] + if scope is not None: + config_cmd_line.append(scope) + else: + config_cmd_line.append('--all') + config_cmd_line.append('--format=json') + try: + config_data = json.loads( + subprocess.check_output(config_cmd_line).decode('UTF-8')) + if scope is not None: + return config_data + return Config(config_data) + except ValueError: + return None + + +@cached +def relation_get(attribute=None, unit=None, rid=None): + """Get relation information""" + _args = ['relation-get', '--format=json'] + if rid: + _args.append('-r') + _args.append(rid) + _args.append(attribute or '-') + if unit: + _args.append(unit) + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + except CalledProcessError as e: + if e.returncode == 2: + return None + raise + + +def relation_set(relation_id=None, relation_settings=None, **kwargs): + """Set relation information for the current unit""" + relation_settings = relation_settings if relation_settings else {} + relation_cmd_line = ['relation-set'] + accepts_file = "--file" in subprocess.check_output( + relation_cmd_line + ["--help"], universal_newlines=True) + if relation_id is not None: + relation_cmd_line.extend(('-r', relation_id)) + settings = relation_settings.copy() + settings.update(kwargs) + for key, value in settings.items(): + # Force value to be a string: it always should, but some call + # sites pass in things like dicts or numbers. + if value is not None: + settings[key] = "{}".format(value) + if accepts_file: + # --file was introduced in Juju 1.23.2. Use it by default if + # available, since otherwise we'll break if the relation data is + # too big. Ideally we should tell relation-set to read the data from + # stdin, but that feature is broken in 1.23.2: Bug #1454678. + with tempfile.NamedTemporaryFile(delete=False) as settings_file: + settings_file.write(yaml.safe_dump(settings).encode("utf-8")) + subprocess.check_call( + relation_cmd_line + ["--file", settings_file.name]) + os.remove(settings_file.name) + else: + for key, value in settings.items(): + if value is None: + relation_cmd_line.append('{}='.format(key)) + else: + relation_cmd_line.append('{}={}'.format(key, value)) + subprocess.check_call(relation_cmd_line) + # Flush cache of any relation-gets for local unit + flush(local_unit()) + + +def relation_clear(r_id=None): + ''' Clears any relation data already set on relation r_id ''' + settings = relation_get(rid=r_id, + unit=local_unit()) + for setting in settings: + if setting not in ['public-address', 'private-address']: + settings[setting] = None + relation_set(relation_id=r_id, + **settings) + + +@cached +def relation_ids(reltype=None): + """A list of relation_ids""" + reltype = reltype or relation_type() + relid_cmd_line = ['relation-ids', '--format=json'] + if reltype is not None: + relid_cmd_line.append(reltype) + return json.loads( + subprocess.check_output(relid_cmd_line).decode('UTF-8')) or [] + return [] + + +@cached +def related_units(relid=None): + """A list of related units""" + relid = relid or relation_id() + units_cmd_line = ['relation-list', '--format=json'] + if relid is not None: + units_cmd_line.extend(('-r', relid)) + return json.loads( + subprocess.check_output(units_cmd_line).decode('UTF-8')) or [] + + +@cached +def relation_for_unit(unit=None, rid=None): + """Get the json represenation of a unit's relation""" + unit = unit or remote_unit() + relation = relation_get(unit=unit, rid=rid) + for key in relation: + if key.endswith('-list'): + relation[key] = relation[key].split() + relation['__unit__'] = unit + return relation + + +@cached +def relations_for_id(relid=None): + """Get relations of a specific relation ID""" + relation_data = [] + relid = relid or relation_ids() + for unit in related_units(relid): + unit_data = relation_for_unit(unit, relid) + unit_data['__relid__'] = relid + relation_data.append(unit_data) + return relation_data + + +@cached +def relations_of_type(reltype=None): + """Get relations of a specific type""" + relation_data = [] + reltype = reltype or relation_type() + for relid in relation_ids(reltype): + for relation in relations_for_id(relid): + relation['__relid__'] = relid + relation_data.append(relation) + return relation_data + + +@cached +def metadata(): + """Get the current charm metadata.yaml contents as a python object""" + with open(os.path.join(charm_dir(), 'metadata.yaml')) as md: + return yaml.safe_load(md) + + +@cached +def relation_types(): + """Get a list of relation types supported by this charm""" + rel_types = [] + md = metadata() + for key in ('provides', 'requires', 'peers'): + section = md.get(key) + if section: + rel_types.extend(section.keys()) + return rel_types + + +@cached +def peer_relation_id(): + '''Get the peers relation id if a peers relation has been joined, else None.''' + md = metadata() + section = md.get('peers') + if section: + for key in section: + relids = relation_ids(key) + if relids: + return relids[0] + return None + + +@cached +def relation_to_interface(relation_name): + """ + Given the name of a relation, return the interface that relation uses. + + :returns: The interface name, or ``None``. + """ + return relation_to_role_and_interface(relation_name)[1] + + +@cached +def relation_to_role_and_interface(relation_name): + """ + Given the name of a relation, return the role and the name of the interface + that relation uses (where role is one of ``provides``, ``requires``, or ``peers``). + + :returns: A tuple containing ``(role, interface)``, or ``(None, None)``. + """ + _metadata = metadata() + for role in ('provides', 'requires', 'peers'): + interface = _metadata.get(role, {}).get(relation_name, {}).get('interface') + if interface: + return role, interface + return None, None + + +@cached +def role_and_interface_to_relations(role, interface_name): + """ + Given a role and interface name, return a list of relation names for the + current charm that use that interface under that role (where role is one + of ``provides``, ``requires``, or ``peers``). + + :returns: A list of relation names. + """ + _metadata = metadata() + results = [] + for relation_name, relation in _metadata.get(role, {}).items(): + if relation['interface'] == interface_name: + results.append(relation_name) + return results + + +@cached +def interface_to_relations(interface_name): + """ + Given an interface, return a list of relation names for the current + charm that use that interface. + + :returns: A list of relation names. + """ + results = [] + for role in ('provides', 'requires', 'peers'): + results.extend(role_and_interface_to_relations(role, interface_name)) + return results + + +@cached +def charm_name(): + """Get the name of the current charm as is specified on metadata.yaml""" + return metadata().get('name') + + +@cached +def relations(): + """Get a nested dictionary of relation data for all related units""" + rels = {} + for reltype in relation_types(): + relids = {} + for relid in relation_ids(reltype): + units = {local_unit(): relation_get(unit=local_unit(), rid=relid)} + for unit in related_units(relid): + reldata = relation_get(unit=unit, rid=relid) + units[unit] = reldata + relids[relid] = units + rels[reltype] = relids + return rels + + +@cached +def is_relation_made(relation, keys='private-address'): + ''' + Determine whether a relation is established by checking for + presence of key(s). If a list of keys is provided, they + must all be present for the relation to be identified as made + ''' + if isinstance(keys, str): + keys = [keys] + for r_id in relation_ids(relation): + for unit in related_units(r_id): + context = {} + for k in keys: + context[k] = relation_get(k, rid=r_id, + unit=unit) + if None not in context.values(): + return True + return False + + +def open_port(port, protocol="TCP"): + """Open a service network port""" + _args = ['open-port'] + _args.append('{}/{}'.format(port, protocol)) + subprocess.check_call(_args) + + +def close_port(port, protocol="TCP"): + """Close a service network port""" + _args = ['close-port'] + _args.append('{}/{}'.format(port, protocol)) + subprocess.check_call(_args) + + +def open_ports(start, end, protocol="TCP"): + """Opens a range of service network ports""" + _args = ['open-port'] + _args.append('{}-{}/{}'.format(start, end, protocol)) + subprocess.check_call(_args) + + +def close_ports(start, end, protocol="TCP"): + """Close a range of service network ports""" + _args = ['close-port'] + _args.append('{}-{}/{}'.format(start, end, protocol)) + subprocess.check_call(_args) + + +@cached +def unit_get(attribute): + """Get the unit ID for the remote unit""" + _args = ['unit-get', '--format=json', attribute] + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + + +def unit_public_ip(): + """Get this unit's public IP address""" + return unit_get('public-address') + + +def unit_private_ip(): + """Get this unit's private IP address""" + return unit_get('private-address') + + +@cached +def storage_get(attribute=None, storage_id=None): + """Get storage attributes""" + _args = ['storage-get', '--format=json'] + if storage_id: + _args.extend(('-s', storage_id)) + if attribute: + _args.append(attribute) + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + + +@cached +def storage_list(storage_name=None): + """List the storage IDs for the unit""" + _args = ['storage-list', '--format=json'] + if storage_name: + _args.append(storage_name) + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + except OSError as e: + import errno + if e.errno == errno.ENOENT: + # storage-list does not exist + return [] + raise + + +class UnregisteredHookError(Exception): + """Raised when an undefined hook is called""" + pass + + +class Hooks(object): + """A convenient handler for hook functions. + + Example:: + + hooks = Hooks() + + # register a hook, taking its name from the function name + @hooks.hook() + def install(): + pass # your code here + + # register a hook, providing a custom hook name + @hooks.hook("config-changed") + def config_changed(): + pass # your code here + + if __name__ == "__main__": + # execute a hook based on the name the program is called by + hooks.execute(sys.argv) + """ + + def __init__(self, config_save=None): + super(Hooks, self).__init__() + self._hooks = {} + + # For unknown reasons, we allow the Hooks constructor to override + # config().implicit_save. + if config_save is not None: + config().implicit_save = config_save + + def register(self, name, function): + """Register a hook""" + self._hooks[name] = function + + def execute(self, args): + """Execute a registered hook based on args[0]""" + _run_atstart() + hook_name = os.path.basename(args[0]) + if hook_name in self._hooks: + try: + self._hooks[hook_name]() + except SystemExit as x: + if x.code is None or x.code == 0: + _run_atexit() + raise + _run_atexit() + else: + raise UnregisteredHookError(hook_name) + + def hook(self, *hook_names): + """Decorator, registering them as hooks""" + def wrapper(decorated): + for hook_name in hook_names: + self.register(hook_name, decorated) + else: + self.register(decorated.__name__, decorated) + if '_' in decorated.__name__: + self.register( + decorated.__name__.replace('_', '-'), decorated) + return decorated + return wrapper + + +def charm_dir(): + """Return the root directory of the current charm""" + return os.environ.get('CHARM_DIR') + + +@cached +def action_get(key=None): + """Gets the value of an action parameter, or all key/value param pairs""" + cmd = ['action-get'] + if key is not None: + cmd.append(key) + cmd.append('--format=json') + action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8')) + return action_data + + +def action_set(values): + """Sets the values to be returned after the action finishes""" + cmd = ['action-set'] + for k, v in list(values.items()): + cmd.append('{}={}'.format(k, v)) + subprocess.check_call(cmd) + + +def action_fail(message): + """Sets the action status to failed and sets the error message. + + The results set by action_set are preserved.""" + subprocess.check_call(['action-fail', message]) + + +def action_name(): + """Get the name of the currently executing action.""" + return os.environ.get('JUJU_ACTION_NAME') + + +def action_uuid(): + """Get the UUID of the currently executing action.""" + return os.environ.get('JUJU_ACTION_UUID') + + +def action_tag(): + """Get the tag for the currently executing action.""" + return os.environ.get('JUJU_ACTION_TAG') + + +def status_set(workload_state, message): + """Set the workload state with a message + + Use status-set to set the workload state with a message which is visible + to the user via juju status. If the status-set command is not found then + assume this is juju < 1.23 and juju-log the message unstead. + + workload_state -- valid juju workload state. + message -- status update message + """ + valid_states = ['maintenance', 'blocked', 'waiting', 'active'] + if workload_state not in valid_states: + raise ValueError( + '{!r} is not a valid workload state'.format(workload_state) + ) + cmd = ['status-set', workload_state, message] + try: + ret = subprocess.call(cmd) + if ret == 0: + return + except OSError as e: + if e.errno != errno.ENOENT: + raise + log_message = 'status-set failed: {} {}'.format(workload_state, + message) + log(log_message, level='INFO') + + +def status_get(): + """Retrieve the previously set juju workload state and message + + If the status-get command is not found then assume this is juju < 1.23 and + return 'unknown', "" + + """ + cmd = ['status-get', "--format=json", "--include-data"] + try: + raw_status = subprocess.check_output(cmd) + except OSError as e: + if e.errno == errno.ENOENT: + return ('unknown', "") + else: + raise + else: + status = json.loads(raw_status.decode("UTF-8")) + return (status["status"], status["message"]) + + +def translate_exc(from_exc, to_exc): + def inner_translate_exc1(f): + @wraps(f) + def inner_translate_exc2(*args, **kwargs): + try: + return f(*args, **kwargs) + except from_exc: + raise to_exc + + return inner_translate_exc2 + + return inner_translate_exc1 + + +def application_version_set(version): + """Charm authors may trigger this command from any hook to output what + version of the application is running. This could be a package version, + for instance postgres version 9.5. It could also be a build number or + version control revision identifier, for instance git sha 6fb7ba68. """ + + cmd = ['application-version-set'] + cmd.append(version) + try: + subprocess.check_call(cmd) + except OSError: + log("Application Version: {}".format(version)) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def is_leader(): + """Does the current unit hold the juju leadership + + Uses juju to determine whether the current unit is the leader of its peers + """ + cmd = ['is-leader', '--format=json'] + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def leader_get(attribute=None): + """Juju leader get value(s)""" + cmd = ['leader-get', '--format=json'] + [attribute or '-'] + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def leader_set(settings=None, **kwargs): + """Juju leader set value(s)""" + # Don't log secrets. + # log("Juju leader-set '%s'" % (settings), level=DEBUG) + cmd = ['leader-set'] + settings = settings or {} + settings.update(kwargs) + for k, v in settings.items(): + if v is None: + cmd.append('{}='.format(k)) + else: + cmd.append('{}={}'.format(k, v)) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def payload_register(ptype, klass, pid): + """ is used while a hook is running to let Juju know that a + payload has been started.""" + cmd = ['payload-register'] + for x in [ptype, klass, pid]: + cmd.append(x) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def payload_unregister(klass, pid): + """ is used while a hook is running to let Juju know + that a payload has been manually stopped. The and provided + must match a payload that has been previously registered with juju using + payload-register.""" + cmd = ['payload-unregister'] + for x in [klass, pid]: + cmd.append(x) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def payload_status_set(klass, pid, status): + """is used to update the current status of a registered payload. + The and provided must match a payload that has been previously + registered with juju using payload-register. The must be one of the + follow: starting, started, stopping, stopped""" + cmd = ['payload-status-set'] + for x in [klass, pid, status]: + cmd.append(x) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def resource_get(name): + """used to fetch the resource path of the given name. + + must match a name of defined resource in metadata.yaml + + returns either a path or False if resource not available + """ + if not name: + return False + + cmd = ['resource-get', name] + try: + return subprocess.check_output(cmd).decode('UTF-8') + except subprocess.CalledProcessError: + return False + + +@cached +def juju_version(): + """Full version string (eg. '1.23.3.1-trusty-amd64')""" + # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1 + jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0] + return subprocess.check_output([jujud, 'version'], + universal_newlines=True).strip() + + +@cached +def has_juju_version(minimum_version): + """Return True if the Juju version is at least the provided version""" + return LooseVersion(juju_version()) >= LooseVersion(minimum_version) + + +_atexit = [] +_atstart = [] + + +def atstart(callback, *args, **kwargs): + '''Schedule a callback to run before the main hook. + + Callbacks are run in the order they were added. + + This is useful for modules and classes to perform initialization + and inject behavior. In particular: + + - Run common code before all of your hooks, such as logging + the hook name or interesting relation data. + - Defer object or module initialization that requires a hook + context until we know there actually is a hook context, + making testing easier. + - Rather than requiring charm authors to include boilerplate to + invoke your helper's behavior, have it run automatically if + your object is instantiated or module imported. + + This is not at all useful after your hook framework as been launched. + ''' + global _atstart + _atstart.append((callback, args, kwargs)) + + +def atexit(callback, *args, **kwargs): + '''Schedule a callback to run on successful hook completion. + + Callbacks are run in the reverse order that they were added.''' + _atexit.append((callback, args, kwargs)) + + +def _run_atstart(): + '''Hook frameworks must invoke this before running the main hook body.''' + global _atstart + for callback, args, kwargs in _atstart: + callback(*args, **kwargs) + del _atstart[:] + + +def _run_atexit(): + '''Hook frameworks must invoke this after the main hook body has + successfully completed. Do not invoke it if the hook fails.''' + global _atexit + for callback, args, kwargs in reversed(_atexit): + callback(*args, **kwargs) + del _atexit[:] + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def network_get_primary_address(binding): + ''' + Retrieve the primary network address for a named binding + + :param binding: string. The name of a relation of extra-binding + :return: string. The primary IP address for the named binding + :raise: NotImplementedError if run on Juju < 2.0 + ''' + cmd = ['network-get', '--primary-address', binding] + return subprocess.check_output(cmd).decode('UTF-8').strip() + + +def add_metric(*args, **kwargs): + """Add metric values. Values may be expressed with keyword arguments. For + metric names containing dashes, these may be expressed as one or more + 'key=value' positional arguments. May only be called from the collect-metrics + hook.""" + _args = ['add-metric'] + _kvpairs = [] + _kvpairs.extend(args) + _kvpairs.extend(['{}={}'.format(k, v) for k, v in kwargs.items()]) + _args.extend(sorted(_kvpairs)) + try: + subprocess.check_call(_args) + return + except EnvironmentError as e: + if e.errno != errno.ENOENT: + raise + log_message = 'add-metric failed: {}'.format(' '.join(_kvpairs)) + log(log_message, level='INFO') + + +def meter_status(): + """Get the meter status, if running in the meter-status-changed hook.""" + return os.environ.get('JUJU_METER_STATUS') + + +def meter_info(): + """Get the meter status information, if running in the meter-status-changed + hook.""" + return os.environ.get('JUJU_METER_INFO') diff --git a/tests/charmhelpers/core/host.py b/tests/charmhelpers/core/host.py new file mode 100644 index 0000000..edbb72f --- /dev/null +++ b/tests/charmhelpers/core/host.py @@ -0,0 +1,918 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tools for working with the host system""" +# Copyright 2012 Canonical Ltd. +# +# Authors: +# Nick Moffitt +# Matthew Wedgwood + +import os +import re +import pwd +import glob +import grp +import random +import string +import subprocess +import hashlib +import functools +import itertools +import six + +from contextlib import contextmanager +from collections import OrderedDict +from .hookenv import log +from .fstab import Fstab +from charmhelpers.osplatform import get_platform + +__platform__ = get_platform() +if __platform__ == "ubuntu": + from charmhelpers.core.host_factory.ubuntu import ( + service_available, + add_new_group, + lsb_release, + cmp_pkgrevno, + ) # flake8: noqa -- ignore F401 for this import +elif __platform__ == "centos": + from charmhelpers.core.host_factory.centos import ( + service_available, + add_new_group, + lsb_release, + cmp_pkgrevno, + ) # flake8: noqa -- ignore F401 for this import + +UPDATEDB_PATH = '/etc/updatedb.conf' + +def service_start(service_name, **kwargs): + """Start a system service. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be reloaded. The follow- + ing example stops the ceph-osd service for instance id=4: + + service_stop('ceph-osd', id=4) + + :param service_name: the name of the service to stop + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for systemd enabled systems. + """ + return service('start', service_name, **kwargs) + + +def service_stop(service_name, **kwargs): + """Stop a system service. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be reloaded. The follow- + ing example stops the ceph-osd service for instance id=4: + + service_stop('ceph-osd', id=4) + + :param service_name: the name of the service to stop + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for systemd enabled systems. + """ + return service('stop', service_name, **kwargs) + + +def service_restart(service_name, **kwargs): + """Restart a system service. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be restarted. The follow- + ing example restarts the ceph-osd service for instance id=4: + + service_restart('ceph-osd', id=4) + + :param service_name: the name of the service to restart + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems not allowing additional + parameters via the commandline (systemd). + """ + return service('restart', service_name) + + +def service_reload(service_name, restart_on_failure=False, **kwargs): + """Reload a system service, optionally falling back to restart if + reload fails. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be reloaded. The follow- + ing example restarts the ceph-osd service for instance id=4: + + service_reload('ceph-osd', id=4) + + :param service_name: the name of the service to reload + :param restart_on_failure: boolean indicating whether to fallback to a + restart if the reload fails. + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems not allowing additional + parameters via the commandline (systemd). + """ + service_result = service('reload', service_name, **kwargs) + if not service_result and restart_on_failure: + service_result = service('restart', service_name, **kwargs) + return service_result + + +def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", + **kwargs): + """Pause a system service. + + Stop it, and prevent it from starting again at boot. + + :param service_name: the name of the service to pause + :param init_dir: path to the upstart init directory + :param initd_dir: path to the sysv init directory + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems which do not support + key=value arguments via the commandline. + """ + stopped = True + if service_running(service_name, **kwargs): + stopped = service_stop(service_name, **kwargs) + upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) + sysv_file = os.path.join(initd_dir, service_name) + if init_is_systemd(): + service('disable', service_name) + elif os.path.exists(upstart_file): + override_path = os.path.join( + init_dir, '{}.override'.format(service_name)) + with open(override_path, 'w') as fh: + fh.write("manual\n") + elif os.path.exists(sysv_file): + subprocess.check_call(["update-rc.d", service_name, "disable"]) + else: + raise ValueError( + "Unable to detect {0} as SystemD, Upstart {1} or" + " SysV {2}".format( + service_name, upstart_file, sysv_file)) + return stopped + + +def service_resume(service_name, init_dir="/etc/init", + initd_dir="/etc/init.d", **kwargs): + """Resume a system service. + + Reenable starting again at boot. Start the service. + + :param service_name: the name of the service to resume + :param init_dir: the path to the init dir + :param initd dir: the path to the initd dir + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for systemd enabled systems. + """ + upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) + sysv_file = os.path.join(initd_dir, service_name) + if init_is_systemd(): + service('enable', service_name) + elif os.path.exists(upstart_file): + override_path = os.path.join( + init_dir, '{}.override'.format(service_name)) + if os.path.exists(override_path): + os.unlink(override_path) + elif os.path.exists(sysv_file): + subprocess.check_call(["update-rc.d", service_name, "enable"]) + else: + raise ValueError( + "Unable to detect {0} as SystemD, Upstart {1} or" + " SysV {2}".format( + service_name, upstart_file, sysv_file)) + started = service_running(service_name, **kwargs) + + if not started: + started = service_start(service_name, **kwargs) + return started + + +def service(action, service_name, **kwargs): + """Control a system service. + + :param action: the action to take on the service + :param service_name: the name of the service to perform th action on + :param **kwargs: additional params to be passed to the service command in + the form of key=value. + """ + if init_is_systemd(): + cmd = ['systemctl', action, service_name] + else: + cmd = ['service', service_name, action] + for key, value in six.iteritems(kwargs): + parameter = '%s=%s' % (key, value) + cmd.append(parameter) + return subprocess.call(cmd) == 0 + + +_UPSTART_CONF = "/etc/init/{}.conf" +_INIT_D_CONF = "/etc/init.d/{}" + + +def service_running(service_name, **kwargs): + """Determine whether a system service is running. + + :param service_name: the name of the service + :param **kwargs: additional args to pass to the service command. This is + used to pass additional key=value arguments to the + service command line for managing specific instance + units (e.g. service ceph-osd status id=2). The kwargs + are ignored in systemd services. + """ + if init_is_systemd(): + return service('is-active', service_name) + else: + if os.path.exists(_UPSTART_CONF.format(service_name)): + try: + cmd = ['status', service_name] + for key, value in six.iteritems(kwargs): + parameter = '%s=%s' % (key, value) + cmd.append(parameter) + output = subprocess.check_output(cmd, + stderr=subprocess.STDOUT).decode('UTF-8') + except subprocess.CalledProcessError: + return False + else: + # This works for upstart scripts where the 'service' command + # returns a consistent string to represent running + # 'start/running' + if ("start/running" in output or + "is running" in output or + "up and running" in output): + return True + elif os.path.exists(_INIT_D_CONF.format(service_name)): + # Check System V scripts init script return codes + return service('status', service_name) + return False + + +SYSTEMD_SYSTEM = '/run/systemd/system' + + +def init_is_systemd(): + """Return True if the host system uses systemd, False otherwise.""" + return os.path.isdir(SYSTEMD_SYSTEM) + + +def adduser(username, password=None, shell='/bin/bash', + system_user=False, primary_group=None, + secondary_groups=None, uid=None, home_dir=None): + """Add a user to the system. + + Will log but otherwise succeed if the user already exists. + + :param str username: Username to create + :param str password: Password for user; if ``None``, create a system user + :param str shell: The default shell for the user + :param bool system_user: Whether to create a login or system user + :param str primary_group: Primary group for user; defaults to username + :param list secondary_groups: Optional list of additional groups + :param int uid: UID for user being created + :param str home_dir: Home directory for user + + :returns: The password database entry struct, as returned by `pwd.getpwnam` + """ + try: + user_info = pwd.getpwnam(username) + log('user {0} already exists!'.format(username)) + if uid: + user_info = pwd.getpwuid(int(uid)) + log('user with uid {0} already exists!'.format(uid)) + except KeyError: + log('creating user {0}'.format(username)) + cmd = ['useradd'] + if uid: + cmd.extend(['--uid', str(uid)]) + if home_dir: + cmd.extend(['--home', str(home_dir)]) + if system_user or password is None: + cmd.append('--system') + else: + cmd.extend([ + '--create-home', + '--shell', shell, + '--password', password, + ]) + if not primary_group: + try: + grp.getgrnam(username) + primary_group = username # avoid "group exists" error + except KeyError: + pass + if primary_group: + cmd.extend(['-g', primary_group]) + if secondary_groups: + cmd.extend(['-G', ','.join(secondary_groups)]) + cmd.append(username) + subprocess.check_call(cmd) + user_info = pwd.getpwnam(username) + return user_info + + +def user_exists(username): + """Check if a user exists""" + try: + pwd.getpwnam(username) + user_exists = True + except KeyError: + user_exists = False + return user_exists + + +def uid_exists(uid): + """Check if a uid exists""" + try: + pwd.getpwuid(uid) + uid_exists = True + except KeyError: + uid_exists = False + return uid_exists + + +def group_exists(groupname): + """Check if a group exists""" + try: + grp.getgrnam(groupname) + group_exists = True + except KeyError: + group_exists = False + return group_exists + + +def gid_exists(gid): + """Check if a gid exists""" + try: + grp.getgrgid(gid) + gid_exists = True + except KeyError: + gid_exists = False + return gid_exists + + +def add_group(group_name, system_group=False, gid=None): + """Add a group to the system + + Will log but otherwise succeed if the group already exists. + + :param str group_name: group to create + :param bool system_group: Create system group + :param int gid: GID for user being created + + :returns: The password database entry struct, as returned by `grp.getgrnam` + """ + try: + group_info = grp.getgrnam(group_name) + log('group {0} already exists!'.format(group_name)) + if gid: + group_info = grp.getgrgid(gid) + log('group with gid {0} already exists!'.format(gid)) + except KeyError: + log('creating group {0}'.format(group_name)) + add_new_group(group_name, system_group, gid) + group_info = grp.getgrnam(group_name) + return group_info + + +def add_user_to_group(username, group): + """Add a user to a group""" + cmd = ['gpasswd', '-a', username, group] + log("Adding user {} to group {}".format(username, group)) + subprocess.check_call(cmd) + + +def rsync(from_path, to_path, flags='-r', options=None, timeout=None): + """Replicate the contents of a path""" + options = options or ['--delete', '--executability'] + cmd = ['/usr/bin/rsync', flags] + if timeout: + cmd = ['timeout', str(timeout)] + cmd + cmd.extend(options) + cmd.append(from_path) + cmd.append(to_path) + log(" ".join(cmd)) + return subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8').strip() + + +def symlink(source, destination): + """Create a symbolic link""" + log("Symlinking {} as {}".format(source, destination)) + cmd = [ + 'ln', + '-sf', + source, + destination, + ] + subprocess.check_call(cmd) + + +def mkdir(path, owner='root', group='root', perms=0o555, force=False): + """Create a directory""" + log("Making dir {} {}:{} {:o}".format(path, owner, group, + perms)) + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + realpath = os.path.abspath(path) + path_exists = os.path.exists(realpath) + if path_exists and force: + if not os.path.isdir(realpath): + log("Removing non-directory file {} prior to mkdir()".format(path)) + os.unlink(realpath) + os.makedirs(realpath, perms) + elif not path_exists: + os.makedirs(realpath, perms) + os.chown(realpath, uid, gid) + os.chmod(realpath, perms) + + +def write_file(path, content, owner='root', group='root', perms=0o444): + """Create or overwrite a file with the contents of a byte string.""" + log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + with open(path, 'wb') as target: + os.fchown(target.fileno(), uid, gid) + os.fchmod(target.fileno(), perms) + target.write(content) + + +def fstab_remove(mp): + """Remove the given mountpoint entry from /etc/fstab""" + return Fstab.remove_by_mountpoint(mp) + + +def fstab_add(dev, mp, fs, options=None): + """Adds the given device entry to the /etc/fstab file""" + return Fstab.add(dev, mp, fs, options=options) + + +def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"): + """Mount a filesystem at a particular mountpoint""" + cmd_args = ['mount'] + if options is not None: + cmd_args.extend(['-o', options]) + cmd_args.extend([device, mountpoint]) + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError as e: + log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) + return False + + if persist: + return fstab_add(device, mountpoint, filesystem, options=options) + return True + + +def umount(mountpoint, persist=False): + """Unmount a filesystem""" + cmd_args = ['umount', mountpoint] + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError as e: + log('Error unmounting {}\n{}'.format(mountpoint, e.output)) + return False + + if persist: + return fstab_remove(mountpoint) + return True + + +def mounts(): + """Get a list of all mounted volumes as [[mountpoint,device],[...]]""" + with open('/proc/mounts') as f: + # [['/mount/point','/dev/path'],[...]] + system_mounts = [m[1::-1] for m in [l.strip().split() + for l in f.readlines()]] + return system_mounts + + +def fstab_mount(mountpoint): + """Mount filesystem using fstab""" + cmd_args = ['mount', mountpoint] + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError as e: + log('Error unmounting {}\n{}'.format(mountpoint, e.output)) + return False + return True + + +def file_hash(path, hash_type='md5'): + """Generate a hash checksum of the contents of 'path' or None if not found. + + :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + """ + if os.path.exists(path): + h = getattr(hashlib, hash_type)() + with open(path, 'rb') as source: + h.update(source.read()) + return h.hexdigest() + else: + return None + + +def path_hash(path): + """Generate a hash checksum of all files matching 'path'. Standard + wildcards like '*' and '?' are supported, see documentation for the 'glob' + module for more information. + + :return: dict: A { filename: hash } dictionary for all matched files. + Empty if none found. + """ + return { + filename: file_hash(filename) + for filename in glob.iglob(path) + } + + +def check_hash(path, checksum, hash_type='md5'): + """Validate a file using a cryptographic checksum. + + :param str checksum: Value of the checksum used to validate the file. + :param str hash_type: Hash algorithm used to generate `checksum`. + Can be any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + :raises ChecksumError: If the file fails the checksum + + """ + actual_checksum = file_hash(path, hash_type) + if checksum != actual_checksum: + raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum)) + + +class ChecksumError(ValueError): + """A class derived from Value error to indicate the checksum failed.""" + pass + + +def restart_on_change(restart_map, stopstart=False, restart_functions=None): + """Restart services based on configuration files changing + + This function is used a decorator, for example:: + + @restart_on_change({ + '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] + '/etc/apache/sites-enabled/*': [ 'apache2' ] + }) + def config_changed(): + pass # your code here + + In this example, the cinder-api and cinder-volume services + would be restarted if /etc/ceph/ceph.conf is changed by the + ceph_client_changed function. The apache2 service would be + restarted if any file matching the pattern got changed, created + or removed. Standard wildcards are supported, see documentation + for the 'glob' module for more information. + + @param restart_map: {path_file_name: [service_name, ...] + @param stopstart: DEFAULT false; whether to stop, start OR restart + @param restart_functions: nonstandard functions to use to restart services + {svc: func, ...} + @returns result from decorated function + """ + def wrap(f): + @functools.wraps(f) + def wrapped_f(*args, **kwargs): + return restart_on_change_helper( + (lambda: f(*args, **kwargs)), restart_map, stopstart, + restart_functions) + return wrapped_f + return wrap + + +def restart_on_change_helper(lambda_f, restart_map, stopstart=False, + restart_functions=None): + """Helper function to perform the restart_on_change function. + + This is provided for decorators to restart services if files described + in the restart_map have changed after an invocation of lambda_f(). + + @param lambda_f: function to call. + @param restart_map: {file: [service, ...]} + @param stopstart: whether to stop, start or restart a service + @param restart_functions: nonstandard functions to use to restart services + {svc: func, ...} + @returns result of lambda_f() + """ + if restart_functions is None: + restart_functions = {} + checksums = {path: path_hash(path) for path in restart_map} + r = lambda_f() + # create a list of lists of the services to restart + restarts = [restart_map[path] + for path in restart_map + if path_hash(path) != checksums[path]] + # create a flat list of ordered services without duplicates from lists + services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts))) + if services_list: + actions = ('stop', 'start') if stopstart else ('restart',) + for service_name in services_list: + if service_name in restart_functions: + restart_functions[service_name](service_name) + else: + for action in actions: + service(action, service_name) + return r + + +def pwgen(length=None): + """Generate a random pasword.""" + if length is None: + # A random length is ok to use a weak PRNG + length = random.choice(range(35, 45)) + alphanumeric_chars = [ + l for l in (string.ascii_letters + string.digits) + if l not in 'l0QD1vAEIOUaeiou'] + # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the + # actual password + random_generator = random.SystemRandom() + random_chars = [ + random_generator.choice(alphanumeric_chars) for _ in range(length)] + return(''.join(random_chars)) + + +def is_phy_iface(interface): + """Returns True if interface is not virtual, otherwise False.""" + if interface: + sys_net = '/sys/class/net' + if os.path.isdir(sys_net): + for iface in glob.glob(os.path.join(sys_net, '*')): + if '/virtual/' in os.path.realpath(iface): + continue + + if interface == os.path.basename(iface): + return True + + return False + + +def get_bond_master(interface): + """Returns bond master if interface is bond slave otherwise None. + + NOTE: the provided interface is expected to be physical + """ + if interface: + iface_path = '/sys/class/net/%s' % (interface) + if os.path.exists(iface_path): + if '/virtual/' in os.path.realpath(iface_path): + return None + + master = os.path.join(iface_path, 'master') + if os.path.exists(master): + master = os.path.realpath(master) + # make sure it is a bond master + if os.path.exists(os.path.join(master, 'bonding')): + return os.path.basename(master) + + return None + + +def list_nics(nic_type=None): + """Return a list of nics of given type(s)""" + if isinstance(nic_type, six.string_types): + int_types = [nic_type] + else: + int_types = nic_type + + interfaces = [] + if nic_type: + for int_type in int_types: + cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] + ip_output = subprocess.check_output(cmd).decode('UTF-8') + ip_output = ip_output.split('\n') + ip_output = (line for line in ip_output if line) + for line in ip_output: + if line.split()[1].startswith(int_type): + matched = re.search('.*: (' + int_type + + r'[0-9]+\.[0-9]+)@.*', line) + if matched: + iface = matched.groups()[0] + else: + iface = line.split()[1].replace(":", "") + + if iface not in interfaces: + interfaces.append(iface) + else: + cmd = ['ip', 'a'] + ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') + ip_output = (line.strip() for line in ip_output if line) + + key = re.compile('^[0-9]+:\s+(.+):') + for line in ip_output: + matched = re.search(key, line) + if matched: + iface = matched.group(1) + iface = iface.partition("@")[0] + if iface not in interfaces: + interfaces.append(iface) + + return interfaces + + +def set_nic_mtu(nic, mtu): + """Set the Maximum Transmission Unit (MTU) on a network interface.""" + cmd = ['ip', 'link', 'set', nic, 'mtu', mtu] + subprocess.check_call(cmd) + + +def get_nic_mtu(nic): + """Return the Maximum Transmission Unit (MTU) for a network interface.""" + cmd = ['ip', 'addr', 'show', nic] + ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') + mtu = "" + for line in ip_output: + words = line.split() + if 'mtu' in words: + mtu = words[words.index("mtu") + 1] + return mtu + + +def get_nic_hwaddr(nic): + """Return the Media Access Control (MAC) for a network interface.""" + cmd = ['ip', '-o', '-0', 'addr', 'show', nic] + ip_output = subprocess.check_output(cmd).decode('UTF-8') + hwaddr = "" + words = ip_output.split() + if 'link/ether' in words: + hwaddr = words[words.index('link/ether') + 1] + return hwaddr + + +@contextmanager +def chdir(directory): + """Change the current working directory to a different directory for a code + block and return the previous directory after the block exits. Useful to + run commands from a specificed directory. + + :param str directory: The directory path to change to for this context. + """ + cur = os.getcwd() + try: + yield os.chdir(directory) + finally: + os.chdir(cur) + + +def chownr(path, owner, group, follow_links=True, chowntopdir=False): + """Recursively change user and group ownership of files and directories + in given path. Doesn't chown path itself by default, only its children. + + :param str path: The string path to start changing ownership. + :param str owner: The owner string to use when looking up the uid. + :param str group: The group string to use when looking up the gid. + :param bool follow_links: Also follow and chown links if True + :param bool chowntopdir: Also chown path itself if True + """ + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + if follow_links: + chown = os.chown + else: + chown = os.lchown + + if chowntopdir: + broken_symlink = os.path.lexists(path) and not os.path.exists(path) + if not broken_symlink: + chown(path, uid, gid) + for root, dirs, files in os.walk(path, followlinks=follow_links): + for name in dirs + files: + full = os.path.join(root, name) + broken_symlink = os.path.lexists(full) and not os.path.exists(full) + if not broken_symlink: + chown(full, uid, gid) + + +def lchownr(path, owner, group): + """Recursively change user and group ownership of files and directories + in a given path, not following symbolic links. See the documentation for + 'os.lchown' for more information. + + :param str path: The string path to start changing ownership. + :param str owner: The owner string to use when looking up the uid. + :param str group: The group string to use when looking up the gid. + """ + chownr(path, owner, group, follow_links=False) + + +def owner(path): + """Returns a tuple containing the username & groupname owning the path. + + :param str path: the string path to retrieve the ownership + :return tuple(str, str): A (username, groupname) tuple containing the + name of the user and group owning the path. + :raises OSError: if the specified path does not exist + """ + stat = os.stat(path) + username = pwd.getpwuid(stat.st_uid)[0] + groupname = grp.getgrgid(stat.st_gid)[0] + return username, groupname + + +def get_total_ram(): + """The total amount of system RAM in bytes. + + This is what is reported by the OS, and may be overcommitted when + there are multiple containers hosted on the same machine. + """ + with open('/proc/meminfo', 'r') as f: + for line in f.readlines(): + if line: + key, value, unit = line.split() + if key == 'MemTotal:': + assert unit == 'kB', 'Unknown unit' + return int(value) * 1024 # Classic, not KiB. + raise NotImplementedError() + + +UPSTART_CONTAINER_TYPE = '/run/container_type' + + +def is_container(): + """Determine whether unit is running in a container + + @return: boolean indicating if unit is in a container + """ + if init_is_systemd(): + # Detect using systemd-detect-virt + return subprocess.call(['systemd-detect-virt', + '--container']) == 0 + else: + # Detect using upstart container file marker + return os.path.exists(UPSTART_CONTAINER_TYPE) + + +def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH): + with open(updatedb_path, 'r+') as f_id: + updatedb_text = f_id.read() + output = updatedb(updatedb_text, path) + f_id.seek(0) + f_id.write(output) + f_id.truncate() + + +def updatedb(updatedb_text, new_path): + lines = [line for line in updatedb_text.split("\n")] + for i, line in enumerate(lines): + if line.startswith("PRUNEPATHS="): + paths_line = line.split("=")[1].replace('"', '') + paths = paths_line.split(" ") + if new_path not in paths: + paths.append(new_path) + lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths)) + output = "\n".join(lines) + return output diff --git a/tests/charmhelpers/core/host_factory/__init__.py b/tests/charmhelpers/core/host_factory/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/charmhelpers/core/host_factory/centos.py b/tests/charmhelpers/core/host_factory/centos.py new file mode 100644 index 0000000..902d469 --- /dev/null +++ b/tests/charmhelpers/core/host_factory/centos.py @@ -0,0 +1,56 @@ +import subprocess +import yum +import os + + +def service_available(service_name): + # """Determine whether a system service is available.""" + if os.path.isdir('/run/systemd/system'): + cmd = ['systemctl', 'is-enabled', service_name] + else: + cmd = ['service', service_name, 'is-enabled'] + return subprocess.call(cmd) == 0 + + +def add_new_group(group_name, system_group=False, gid=None): + cmd = ['groupadd'] + if gid: + cmd.extend(['--gid', str(gid)]) + if system_group: + cmd.append('-r') + cmd.append(group_name) + subprocess.check_call(cmd) + + +def lsb_release(): + """Return /etc/os-release in a dict.""" + d = {} + with open('/etc/os-release', 'r') as lsb: + for l in lsb: + s = l.split('=') + if len(s) != 2: + continue + d[s[0].strip()] = s[1].strip() + return d + + +def cmp_pkgrevno(package, revno, pkgcache=None): + """Compare supplied revno with the revno of the installed package. + + * 1 => Installed revno is greater than supplied arg + * 0 => Installed revno is the same as supplied arg + * -1 => Installed revno is less than supplied arg + + This function imports YumBase function if the pkgcache argument + is None. + """ + if not pkgcache: + y = yum.YumBase() + packages = y.doPackageLists() + pkgcache = {i.Name: i.version for i in packages['installed']} + pkg = pkgcache[package] + if pkg > revno: + return 1 + if pkg < revno: + return -1 + return 0 diff --git a/tests/charmhelpers/core/host_factory/ubuntu.py b/tests/charmhelpers/core/host_factory/ubuntu.py new file mode 100644 index 0000000..8c66af5 --- /dev/null +++ b/tests/charmhelpers/core/host_factory/ubuntu.py @@ -0,0 +1,56 @@ +import subprocess + + +def service_available(service_name): + """Determine whether a system service is available""" + try: + subprocess.check_output( + ['service', service_name, 'status'], + stderr=subprocess.STDOUT).decode('UTF-8') + except subprocess.CalledProcessError as e: + return b'unrecognized service' not in e.output + else: + return True + + +def add_new_group(group_name, system_group=False, gid=None): + cmd = ['addgroup'] + if gid: + cmd.extend(['--gid', str(gid)]) + if system_group: + cmd.append('--system') + else: + cmd.extend([ + '--group', + ]) + cmd.append(group_name) + subprocess.check_call(cmd) + + +def lsb_release(): + """Return /etc/lsb-release in a dict""" + d = {} + with open('/etc/lsb-release', 'r') as lsb: + for l in lsb: + k, v = l.split('=') + d[k.strip()] = v.strip() + return d + + +def cmp_pkgrevno(package, revno, pkgcache=None): + """Compare supplied revno with the revno of the installed package. + + * 1 => Installed revno is greater than supplied arg + * 0 => Installed revno is the same as supplied arg + * -1 => Installed revno is less than supplied arg + + This function imports apt_cache function from charmhelpers.fetch if + the pkgcache argument is None. Be sure to add charmhelpers.fetch if + you call this function, or pass an apt_pkg.Cache() instance. + """ + import apt_pkg + if not pkgcache: + from charmhelpers.fetch import apt_cache + pkgcache = apt_cache() + pkg = pkgcache[package] + return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) diff --git a/tests/charmhelpers/core/hugepage.py b/tests/charmhelpers/core/hugepage.py new file mode 100644 index 0000000..54b5b5e --- /dev/null +++ b/tests/charmhelpers/core/hugepage.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import yaml +from charmhelpers.core import fstab +from charmhelpers.core import sysctl +from charmhelpers.core.host import ( + add_group, + add_user_to_group, + fstab_mount, + mkdir, +) +from charmhelpers.core.strutils import bytes_from_string +from subprocess import check_output + + +def hugepage_support(user, group='hugetlb', nr_hugepages=256, + max_map_count=65536, mnt_point='/run/hugepages/kvm', + pagesize='2MB', mount=True, set_shmmax=False): + """Enable hugepages on system. + + Args: + user (str) -- Username to allow access to hugepages to + group (str) -- Group name to own hugepages + nr_hugepages (int) -- Number of pages to reserve + max_map_count (int) -- Number of Virtual Memory Areas a process can own + mnt_point (str) -- Directory to mount hugepages on + pagesize (str) -- Size of hugepages + mount (bool) -- Whether to Mount hugepages + """ + group_info = add_group(group) + gid = group_info.gr_gid + add_user_to_group(user, group) + if max_map_count < 2 * nr_hugepages: + max_map_count = 2 * nr_hugepages + sysctl_settings = { + 'vm.nr_hugepages': nr_hugepages, + 'vm.max_map_count': max_map_count, + 'vm.hugetlb_shm_group': gid, + } + if set_shmmax: + shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax'])) + shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages + if shmmax_minsize > shmmax_current: + sysctl_settings['kernel.shmmax'] = shmmax_minsize + sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf') + mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False) + lfstab = fstab.Fstab() + fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point) + if fstab_entry: + lfstab.remove_entry(fstab_entry) + entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs', + 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0) + lfstab.add_entry(entry) + if mount: + fstab_mount(mnt_point) diff --git a/tests/charmhelpers/core/kernel.py b/tests/charmhelpers/core/kernel.py new file mode 100644 index 0000000..2d40452 --- /dev/null +++ b/tests/charmhelpers/core/kernel.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +import subprocess + +from charmhelpers.osplatform import get_platform +from charmhelpers.core.hookenv import ( + log, + INFO +) + +__platform__ = get_platform() +if __platform__ == "ubuntu": + from charmhelpers.core.kernel_factory.ubuntu import ( + persistent_modprobe, + update_initramfs, + ) # flake8: noqa -- ignore F401 for this import +elif __platform__ == "centos": + from charmhelpers.core.kernel_factory.centos import ( + persistent_modprobe, + update_initramfs, + ) # flake8: noqa -- ignore F401 for this import + +__author__ = "Jorge Niedbalski " + + +def modprobe(module, persist=True): + """Load a kernel module and configure for auto-load on reboot.""" + cmd = ['modprobe', module] + + log('Loading kernel module %s' % module, level=INFO) + + subprocess.check_call(cmd) + if persist: + persistent_modprobe(module) + + +def rmmod(module, force=False): + """Remove a module from the linux kernel""" + cmd = ['rmmod'] + if force: + cmd.append('-f') + cmd.append(module) + log('Removing kernel module %s' % module, level=INFO) + return subprocess.check_call(cmd) + + +def lsmod(): + """Shows what kernel modules are currently loaded""" + return subprocess.check_output(['lsmod'], + universal_newlines=True) + + +def is_module_loaded(module): + """Checks if a kernel module is already loaded""" + matches = re.findall('^%s[ ]+' % module, lsmod(), re.M) + return len(matches) > 0 diff --git a/tests/charmhelpers/core/kernel_factory/__init__.py b/tests/charmhelpers/core/kernel_factory/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/charmhelpers/core/kernel_factory/centos.py b/tests/charmhelpers/core/kernel_factory/centos.py new file mode 100644 index 0000000..1c402c1 --- /dev/null +++ b/tests/charmhelpers/core/kernel_factory/centos.py @@ -0,0 +1,17 @@ +import subprocess +import os + + +def persistent_modprobe(module): + """Load a kernel module and configure for auto-load on reboot.""" + if not os.path.exists('/etc/rc.modules'): + open('/etc/rc.modules', 'a') + os.chmod('/etc/rc.modules', 111) + with open('/etc/rc.modules', 'r+') as modules: + if module not in modules.read(): + modules.write('modprobe %s\n' % module) + + +def update_initramfs(version='all'): + """Updates an initramfs image.""" + return subprocess.check_call(["dracut", "-f", version]) diff --git a/tests/charmhelpers/core/kernel_factory/ubuntu.py b/tests/charmhelpers/core/kernel_factory/ubuntu.py new file mode 100644 index 0000000..3de372f --- /dev/null +++ b/tests/charmhelpers/core/kernel_factory/ubuntu.py @@ -0,0 +1,13 @@ +import subprocess + + +def persistent_modprobe(module): + """Load a kernel module and configure for auto-load on reboot.""" + with open('/etc/modules', 'r+') as modules: + if module not in modules.read(): + modules.write(module + "\n") + + +def update_initramfs(version='all'): + """Updates an initramfs image.""" + return subprocess.check_call(["update-initramfs", "-k", version, "-u"]) diff --git a/tests/charmhelpers/core/services/__init__.py b/tests/charmhelpers/core/services/__init__.py new file mode 100644 index 0000000..61fd074 --- /dev/null +++ b/tests/charmhelpers/core/services/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .base import * # NOQA +from .helpers import * # NOQA diff --git a/tests/charmhelpers/core/services/base.py b/tests/charmhelpers/core/services/base.py new file mode 100644 index 0000000..ca9dc99 --- /dev/null +++ b/tests/charmhelpers/core/services/base.py @@ -0,0 +1,351 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import json +from inspect import getargspec +from collections import Iterable, OrderedDict + +from charmhelpers.core import host +from charmhelpers.core import hookenv + + +__all__ = ['ServiceManager', 'ManagerCallback', + 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports', + 'service_restart', 'service_stop'] + + +class ServiceManager(object): + def __init__(self, services=None): + """ + Register a list of services, given their definitions. + + Service definitions are dicts in the following formats (all keys except + 'service' are optional):: + + { + "service": , + "required_data": , + "provided_data": , + "data_ready": , + "data_lost": , + "start": , + "stop": , + "ports": , + } + + The 'required_data' list should contain dicts of required data (or + dependency managers that act like dicts and know how to collect the data). + Only when all items in the 'required_data' list are populated are the list + of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more + information. + + The 'provided_data' list should contain relation data providers, most likely + a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`, + that will indicate a set of data to set on a given relation. + + The 'data_ready' value should be either a single callback, or a list of + callbacks, to be called when all items in 'required_data' pass `is_ready()`. + Each callback will be called with the service name as the only parameter. + After all of the 'data_ready' callbacks are called, the 'start' callbacks + are fired. + + The 'data_lost' value should be either a single callback, or a list of + callbacks, to be called when a 'required_data' item no longer passes + `is_ready()`. Each callback will be called with the service name as the + only parameter. After all of the 'data_lost' callbacks are called, + the 'stop' callbacks are fired. + + The 'start' value should be either a single callback, or a list of + callbacks, to be called when starting the service, after the 'data_ready' + callbacks are complete. Each callback will be called with the service + name as the only parameter. This defaults to + `[host.service_start, services.open_ports]`. + + The 'stop' value should be either a single callback, or a list of + callbacks, to be called when stopping the service. If the service is + being stopped because it no longer has all of its 'required_data', this + will be called after all of the 'data_lost' callbacks are complete. + Each callback will be called with the service name as the only parameter. + This defaults to `[services.close_ports, host.service_stop]`. + + The 'ports' value should be a list of ports to manage. The default + 'start' handler will open the ports after the service is started, + and the default 'stop' handler will close the ports prior to stopping + the service. + + + Examples: + + The following registers an Upstart service called bingod that depends on + a mongodb relation and which runs a custom `db_migrate` function prior to + restarting the service, and a Runit service called spadesd:: + + manager = services.ServiceManager([ + { + 'service': 'bingod', + 'ports': [80, 443], + 'required_data': [MongoRelation(), config(), {'my': 'data'}], + 'data_ready': [ + services.template(source='bingod.conf'), + services.template(source='bingod.ini', + target='/etc/bingod.ini', + owner='bingo', perms=0400), + ], + }, + { + 'service': 'spadesd', + 'data_ready': services.template(source='spadesd_run.j2', + target='/etc/sv/spadesd/run', + perms=0555), + 'start': runit_start, + 'stop': runit_stop, + }, + ]) + manager.manage() + """ + self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') + self._ready = None + self.services = OrderedDict() + for service in services or []: + service_name = service['service'] + self.services[service_name] = service + + def manage(self): + """ + Handle the current hook by doing The Right Thing with the registered services. + """ + hookenv._run_atstart() + try: + hook_name = hookenv.hook_name() + if hook_name == 'stop': + self.stop_services() + else: + self.reconfigure_services() + self.provide_data() + except SystemExit as x: + if x.code is None or x.code == 0: + hookenv._run_atexit() + hookenv._run_atexit() + + def provide_data(self): + """ + Set the relation data for each provider in the ``provided_data`` list. + + A provider must have a `name` attribute, which indicates which relation + to set data on, and a `provide_data()` method, which returns a dict of + data to set. + + The `provide_data()` method can optionally accept two parameters: + + * ``remote_service`` The name of the remote service that the data will + be provided to. The `provide_data()` method will be called once + for each connected service (not unit). This allows the method to + tailor its data to the given service. + * ``service_ready`` Whether or not the service definition had all of + its requirements met, and thus the ``data_ready`` callbacks run. + + Note that the ``provided_data`` methods are now called **after** the + ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks + a chance to generate any data necessary for the providing to the remote + services. + """ + for service_name, service in self.services.items(): + service_ready = self.is_ready(service_name) + for provider in service.get('provided_data', []): + for relid in hookenv.relation_ids(provider.name): + units = hookenv.related_units(relid) + if not units: + continue + remote_service = units[0].split('/')[0] + argspec = getargspec(provider.provide_data) + if len(argspec.args) > 1: + data = provider.provide_data(remote_service, service_ready) + else: + data = provider.provide_data() + if data: + hookenv.relation_set(relid, data) + + def reconfigure_services(self, *service_names): + """ + Update all files for one or more registered services, and, + if ready, optionally restart them. + + If no service names are given, reconfigures all registered services. + """ + for service_name in service_names or self.services.keys(): + if self.is_ready(service_name): + self.fire_event('data_ready', service_name) + self.fire_event('start', service_name, default=[ + service_restart, + manage_ports]) + self.save_ready(service_name) + else: + if self.was_ready(service_name): + self.fire_event('data_lost', service_name) + self.fire_event('stop', service_name, default=[ + manage_ports, + service_stop]) + self.save_lost(service_name) + + def stop_services(self, *service_names): + """ + Stop one or more registered services, by name. + + If no service names are given, stops all registered services. + """ + for service_name in service_names or self.services.keys(): + self.fire_event('stop', service_name, default=[ + manage_ports, + service_stop]) + + def get_service(self, service_name): + """ + Given the name of a registered service, return its service definition. + """ + service = self.services.get(service_name) + if not service: + raise KeyError('Service not registered: %s' % service_name) + return service + + def fire_event(self, event_name, service_name, default=None): + """ + Fire a data_ready, data_lost, start, or stop event on a given service. + """ + service = self.get_service(service_name) + callbacks = service.get(event_name, default) + if not callbacks: + return + if not isinstance(callbacks, Iterable): + callbacks = [callbacks] + for callback in callbacks: + if isinstance(callback, ManagerCallback): + callback(self, service_name, event_name) + else: + callback(service_name) + + def is_ready(self, service_name): + """ + Determine if a registered service is ready, by checking its 'required_data'. + + A 'required_data' item can be any mapping type, and is considered ready + if `bool(item)` evaluates as True. + """ + service = self.get_service(service_name) + reqs = service.get('required_data', []) + return all(bool(req) for req in reqs) + + def _load_ready_file(self): + if self._ready is not None: + return + if os.path.exists(self._ready_file): + with open(self._ready_file) as fp: + self._ready = set(json.load(fp)) + else: + self._ready = set() + + def _save_ready_file(self): + if self._ready is None: + return + with open(self._ready_file, 'w') as fp: + json.dump(list(self._ready), fp) + + def save_ready(self, service_name): + """ + Save an indicator that the given service is now data_ready. + """ + self._load_ready_file() + self._ready.add(service_name) + self._save_ready_file() + + def save_lost(self, service_name): + """ + Save an indicator that the given service is no longer data_ready. + """ + self._load_ready_file() + self._ready.discard(service_name) + self._save_ready_file() + + def was_ready(self, service_name): + """ + Determine if the given service was previously data_ready. + """ + self._load_ready_file() + return service_name in self._ready + + +class ManagerCallback(object): + """ + Special case of a callback that takes the `ServiceManager` instance + in addition to the service name. + + Subclasses should implement `__call__` which should accept three parameters: + + * `manager` The `ServiceManager` instance + * `service_name` The name of the service it's being triggered for + * `event_name` The name of the event that this callback is handling + """ + def __call__(self, manager, service_name, event_name): + raise NotImplementedError() + + +class PortManagerCallback(ManagerCallback): + """ + Callback class that will open or close ports, for use as either + a start or stop action. + """ + def __call__(self, manager, service_name, event_name): + service = manager.get_service(service_name) + new_ports = service.get('ports', []) + port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) + if os.path.exists(port_file): + with open(port_file) as fp: + old_ports = fp.read().split(',') + for old_port in old_ports: + if bool(old_port): + old_port = int(old_port) + if old_port not in new_ports: + hookenv.close_port(old_port) + with open(port_file, 'w') as fp: + fp.write(','.join(str(port) for port in new_ports)) + for port in new_ports: + if event_name == 'start': + hookenv.open_port(port) + elif event_name == 'stop': + hookenv.close_port(port) + + +def service_stop(service_name): + """ + Wrapper around host.service_stop to prevent spurious "unknown service" + messages in the logs. + """ + if host.service_running(service_name): + host.service_stop(service_name) + + +def service_restart(service_name): + """ + Wrapper around host.service_restart to prevent spurious "unknown service" + messages in the logs. + """ + if host.service_available(service_name): + if host.service_running(service_name): + host.service_restart(service_name) + else: + host.service_start(service_name) + + +# Convenience aliases +open_ports = close_ports = manage_ports = PortManagerCallback() diff --git a/tests/charmhelpers/core/services/helpers.py b/tests/charmhelpers/core/services/helpers.py new file mode 100644 index 0000000..3e6e30d --- /dev/null +++ b/tests/charmhelpers/core/services/helpers.py @@ -0,0 +1,290 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import yaml + +from charmhelpers.core import hookenv +from charmhelpers.core import host +from charmhelpers.core import templating + +from charmhelpers.core.services.base import ManagerCallback + + +__all__ = ['RelationContext', 'TemplateCallback', + 'render_template', 'template'] + + +class RelationContext(dict): + """ + Base class for a context generator that gets relation data from juju. + + Subclasses must provide the attributes `name`, which is the name of the + interface of interest, `interface`, which is the type of the interface of + interest, and `required_keys`, which is the set of keys required for the + relation to be considered complete. The data for all interfaces matching + the `name` attribute that are complete will used to populate the dictionary + values (see `get_data`, below). + + The generated context will be namespaced under the relation :attr:`name`, + to prevent potential naming conflicts. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = None + interface = None + + def __init__(self, name=None, additional_required_keys=None): + if not hasattr(self, 'required_keys'): + self.required_keys = [] + + if name is not None: + self.name = name + if additional_required_keys: + self.required_keys.extend(additional_required_keys) + self.get_data() + + def __bool__(self): + """ + Returns True if all of the required_keys are available. + """ + return self.is_ready() + + __nonzero__ = __bool__ + + def __repr__(self): + return super(RelationContext, self).__repr__() + + def is_ready(self): + """ + Returns True if all of the `required_keys` are available from any units. + """ + ready = len(self.get(self.name, [])) > 0 + if not ready: + hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG) + return ready + + def _is_ready(self, unit_data): + """ + Helper method that tests a set of relation data and returns True if + all of the `required_keys` are present. + """ + return set(unit_data.keys()).issuperset(set(self.required_keys)) + + def get_data(self): + """ + Retrieve the relation data for each unit involved in a relation and, + if complete, store it in a list under `self[self.name]`. This + is automatically called when the RelationContext is instantiated. + + The units are sorted lexographically first by the service ID, then by + the unit ID. Thus, if an interface has two other services, 'db:1' + and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1', + and 'db:2' having one unit, 'mediawiki/0', all of which have a complete + set of data, the relation data for the units will be stored in the + order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'. + + If you only care about a single unit on the relation, you can just + access it as `{{ interface[0]['key'] }}`. However, if you can at all + support multiple units on a relation, you should iterate over the list, + like:: + + {% for unit in interface -%} + {{ unit['key'] }}{% if not loop.last %},{% endif %} + {%- endfor %} + + Note that since all sets of relation data from all related services and + units are in a single list, if you need to know which service or unit a + set of data came from, you'll need to extend this class to preserve + that information. + """ + if not hookenv.relation_ids(self.name): + return + + ns = self.setdefault(self.name, []) + for rid in sorted(hookenv.relation_ids(self.name)): + for unit in sorted(hookenv.related_units(rid)): + reldata = hookenv.relation_get(rid=rid, unit=unit) + if self._is_ready(reldata): + ns.append(reldata) + + def provide_data(self): + """ + Return data to be relation_set for this interface. + """ + return {} + + +class MysqlRelation(RelationContext): + """ + Relation context for the `mysql` interface. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = 'db' + interface = 'mysql' + + def __init__(self, *args, **kwargs): + self.required_keys = ['host', 'user', 'password', 'database'] + RelationContext.__init__(self, *args, **kwargs) + + +class HttpRelation(RelationContext): + """ + Relation context for the `http` interface. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = 'website' + interface = 'http' + + def __init__(self, *args, **kwargs): + self.required_keys = ['host', 'port'] + RelationContext.__init__(self, *args, **kwargs) + + def provide_data(self): + return { + 'host': hookenv.unit_get('private-address'), + 'port': 80, + } + + +class RequiredConfig(dict): + """ + Data context that loads config options with one or more mandatory options. + + Once the required options have been changed from their default values, all + config options will be available, namespaced under `config` to prevent + potential naming conflicts (for example, between a config option and a + relation property). + + :param list *args: List of options that must be changed from their default values. + """ + + def __init__(self, *args): + self.required_options = args + self['config'] = hookenv.config() + with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp: + self.config = yaml.load(fp).get('options', {}) + + def __bool__(self): + for option in self.required_options: + if option not in self['config']: + return False + current_value = self['config'][option] + default_value = self.config[option].get('default') + if current_value == default_value: + return False + if current_value in (None, '') and default_value in (None, ''): + return False + return True + + def __nonzero__(self): + return self.__bool__() + + +class StoredContext(dict): + """ + A data context that always returns the data that it was first created with. + + This is useful to do a one-time generation of things like passwords, that + will thereafter use the same value that was originally generated, instead + of generating a new value each time it is run. + """ + def __init__(self, file_name, config_data): + """ + If the file exists, populate `self` with the data from the file. + Otherwise, populate with the given data and persist it to the file. + """ + if os.path.exists(file_name): + self.update(self.read_context(file_name)) + else: + self.store_context(file_name, config_data) + self.update(config_data) + + def store_context(self, file_name, config_data): + if not os.path.isabs(file_name): + file_name = os.path.join(hookenv.charm_dir(), file_name) + with open(file_name, 'w') as file_stream: + os.fchmod(file_stream.fileno(), 0o600) + yaml.dump(config_data, file_stream) + + def read_context(self, file_name): + if not os.path.isabs(file_name): + file_name = os.path.join(hookenv.charm_dir(), file_name) + with open(file_name, 'r') as file_stream: + data = yaml.load(file_stream) + if not data: + raise OSError("%s is empty" % file_name) + return data + + +class TemplateCallback(ManagerCallback): + """ + Callback class that will render a Jinja2 template, for use as a ready + action. + + :param str source: The template source file, relative to + `$CHARM_DIR/templates` + + :param str target: The target to write the rendered template to (or None) + :param str owner: The owner of the rendered file + :param str group: The group of the rendered file + :param int perms: The permissions of the rendered file + :param partial on_change_action: functools partial to be executed when + rendered file changes + :param jinja2 loader template_loader: A jinja2 template loader + + :return str: The rendered template + """ + def __init__(self, source, target, + owner='root', group='root', perms=0o444, + on_change_action=None, template_loader=None): + self.source = source + self.target = target + self.owner = owner + self.group = group + self.perms = perms + self.on_change_action = on_change_action + self.template_loader = template_loader + + def __call__(self, manager, service_name, event_name): + pre_checksum = '' + if self.on_change_action and os.path.isfile(self.target): + pre_checksum = host.file_hash(self.target) + service = manager.get_service(service_name) + context = {'ctx': {}} + for ctx in service.get('required_data', []): + context.update(ctx) + context['ctx'].update(ctx) + + result = templating.render(self.source, self.target, context, + self.owner, self.group, self.perms, + template_loader=self.template_loader) + if self.on_change_action: + if pre_checksum == host.file_hash(self.target): + hookenv.log( + 'No change detected: {}'.format(self.target), + hookenv.DEBUG) + else: + self.on_change_action() + + return result + + +# Convenience aliases for templates +render_template = template = TemplateCallback diff --git a/tests/charmhelpers/core/strutils.py b/tests/charmhelpers/core/strutils.py new file mode 100644 index 0000000..dd9b971 --- /dev/null +++ b/tests/charmhelpers/core/strutils.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import six +import re + + +def bool_from_string(value): + """Interpret string value as boolean. + + Returns True if value translates to True otherwise False. + """ + if isinstance(value, six.string_types): + value = six.text_type(value) + else: + msg = "Unable to interpret non-string value '%s' as boolean" % (value) + raise ValueError(msg) + + value = value.strip().lower() + + if value in ['y', 'yes', 'true', 't', 'on']: + return True + elif value in ['n', 'no', 'false', 'f', 'off']: + return False + + msg = "Unable to interpret string value '%s' as boolean" % (value) + raise ValueError(msg) + + +def bytes_from_string(value): + """Interpret human readable string value as bytes. + + Returns int + """ + BYTE_POWER = { + 'K': 1, + 'KB': 1, + 'M': 2, + 'MB': 2, + 'G': 3, + 'GB': 3, + 'T': 4, + 'TB': 4, + 'P': 5, + 'PB': 5, + } + if isinstance(value, six.string_types): + value = six.text_type(value) + else: + msg = "Unable to interpret non-string value '%s' as boolean" % (value) + raise ValueError(msg) + matches = re.match("([0-9]+)([a-zA-Z]+)", value) + if not matches: + msg = "Unable to interpret string value '%s' as bytes" % (value) + raise ValueError(msg) + return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) diff --git a/tests/charmhelpers/core/sysctl.py b/tests/charmhelpers/core/sysctl.py new file mode 100644 index 0000000..6e413e3 --- /dev/null +++ b/tests/charmhelpers/core/sysctl.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import yaml + +from subprocess import check_call + +from charmhelpers.core.hookenv import ( + log, + DEBUG, + ERROR, +) + +__author__ = 'Jorge Niedbalski R. ' + + +def create(sysctl_dict, sysctl_file): + """Creates a sysctl.conf file from a YAML associative array + + :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }" + :type sysctl_dict: str + :param sysctl_file: path to the sysctl file to be saved + :type sysctl_file: str or unicode + :returns: None + """ + try: + sysctl_dict_parsed = yaml.safe_load(sysctl_dict) + except yaml.YAMLError: + log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), + level=ERROR) + return + + with open(sysctl_file, "w") as fd: + for key, value in sysctl_dict_parsed.items(): + fd.write("{}={}\n".format(key, value)) + + log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed), + level=DEBUG) + + check_call(["sysctl", "-p", sysctl_file]) diff --git a/tests/charmhelpers/core/templating.py b/tests/charmhelpers/core/templating.py new file mode 100644 index 0000000..7b801a3 --- /dev/null +++ b/tests/charmhelpers/core/templating.py @@ -0,0 +1,84 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys + +from charmhelpers.core import host +from charmhelpers.core import hookenv + + +def render(source, target, context, owner='root', group='root', + perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None): + """ + Render a template. + + The `source` path, if not absolute, is relative to the `templates_dir`. + + The `target` path should be absolute. It can also be `None`, in which + case no file will be written. + + The context should be a dict containing the values to be replaced in the + template. + + The `owner`, `group`, and `perms` options will be passed to `write_file`. + + If omitted, `templates_dir` defaults to the `templates` folder in the charm. + + The rendered template will be written to the file as well as being returned + as a string. + + Note: Using this requires python-jinja2 or python3-jinja2; if it is not + installed, calling this will attempt to use charmhelpers.fetch.apt_install + to install it. + """ + try: + from jinja2 import FileSystemLoader, Environment, exceptions + except ImportError: + try: + from charmhelpers.fetch import apt_install + except ImportError: + hookenv.log('Could not import jinja2, and could not import ' + 'charmhelpers.fetch to install it', + level=hookenv.ERROR) + raise + if sys.version_info.major == 2: + apt_install('python-jinja2', fatal=True) + else: + apt_install('python3-jinja2', fatal=True) + from jinja2 import FileSystemLoader, Environment, exceptions + + if template_loader: + template_env = Environment(loader=template_loader) + else: + if templates_dir is None: + templates_dir = os.path.join(hookenv.charm_dir(), 'templates') + template_env = Environment(loader=FileSystemLoader(templates_dir)) + try: + source = source + template = template_env.get_template(source) + except exceptions.TemplateNotFound as e: + hookenv.log('Could not load template %s from %s.' % + (source, templates_dir), + level=hookenv.ERROR) + raise e + content = template.render(context) + if target is not None: + target_dir = os.path.dirname(target) + if not os.path.exists(target_dir): + # This is a terrible default directory permission, as the file + # or its siblings will often contain secrets. + host.mkdir(os.path.dirname(target), owner, group, perms=0o755) + host.write_file(target, content.encode(encoding), owner, group, perms) + return content diff --git a/tests/charmhelpers/core/unitdata.py b/tests/charmhelpers/core/unitdata.py new file mode 100644 index 0000000..54ec969 --- /dev/null +++ b/tests/charmhelpers/core/unitdata.py @@ -0,0 +1,518 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Authors: +# Kapil Thangavelu +# +""" +Intro +----- + +A simple way to store state in units. This provides a key value +storage with support for versioned, transactional operation, +and can calculate deltas from previous values to simplify unit logic +when processing changes. + + +Hook Integration +---------------- + +There are several extant frameworks for hook execution, including + + - charmhelpers.core.hookenv.Hooks + - charmhelpers.core.services.ServiceManager + +The storage classes are framework agnostic, one simple integration is +via the HookData contextmanager. It will record the current hook +execution environment (including relation data, config data, etc.), +setup a transaction and allow easy access to the changes from +previously seen values. One consequence of the integration is the +reservation of particular keys ('rels', 'unit', 'env', 'config', +'charm_revisions') for their respective values. + +Here's a fully worked integration example using hookenv.Hooks:: + + from charmhelper.core import hookenv, unitdata + + hook_data = unitdata.HookData() + db = unitdata.kv() + hooks = hookenv.Hooks() + + @hooks.hook + def config_changed(): + # Print all changes to configuration from previously seen + # values. + for changed, (prev, cur) in hook_data.conf.items(): + print('config changed', changed, + 'previous value', prev, + 'current value', cur) + + # Get some unit specific bookeeping + if not db.get('pkg_key'): + key = urllib.urlopen('https://example.com/pkg_key').read() + db.set('pkg_key', key) + + # Directly access all charm config as a mapping. + conf = db.getrange('config', True) + + # Directly access all relation data as a mapping + rels = db.getrange('rels', True) + + if __name__ == '__main__': + with hook_data(): + hook.execute() + + +A more basic integration is via the hook_scope context manager which simply +manages transaction scope (and records hook name, and timestamp):: + + >>> from unitdata import kv + >>> db = kv() + >>> with db.hook_scope('install'): + ... # do work, in transactional scope. + ... db.set('x', 1) + >>> db.get('x') + 1 + + +Usage +----- + +Values are automatically json de/serialized to preserve basic typing +and complex data struct capabilities (dicts, lists, ints, booleans, etc). + +Individual values can be manipulated via get/set:: + + >>> kv.set('y', True) + >>> kv.get('y') + True + + # We can set complex values (dicts, lists) as a single key. + >>> kv.set('config', {'a': 1, 'b': True'}) + + # Also supports returning dictionaries as a record which + # provides attribute access. + >>> config = kv.get('config', record=True) + >>> config.b + True + + +Groups of keys can be manipulated with update/getrange:: + + >>> kv.update({'z': 1, 'y': 2}, prefix="gui.") + >>> kv.getrange('gui.', strip=True) + {'z': 1, 'y': 2} + +When updating values, its very helpful to understand which values +have actually changed and how have they changed. The storage +provides a delta method to provide for this:: + + >>> data = {'debug': True, 'option': 2} + >>> delta = kv.delta(data, 'config.') + >>> delta.debug.previous + None + >>> delta.debug.current + True + >>> delta + {'debug': (None, True), 'option': (None, 2)} + +Note the delta method does not persist the actual change, it needs to +be explicitly saved via 'update' method:: + + >>> kv.update(data, 'config.') + +Values modified in the context of a hook scope retain historical values +associated to the hookname. + + >>> with db.hook_scope('config-changed'): + ... db.set('x', 42) + >>> db.gethistory('x') + [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'), + (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')] + +""" + +import collections +import contextlib +import datetime +import itertools +import json +import os +import pprint +import sqlite3 +import sys + +__author__ = 'Kapil Thangavelu ' + + +class Storage(object): + """Simple key value database for local unit state within charms. + + Modifications are not persisted unless :meth:`flush` is called. + + To support dicts, lists, integer, floats, and booleans values + are automatically json encoded/decoded. + """ + def __init__(self, path=None): + self.db_path = path + if path is None: + if 'UNIT_STATE_DB' in os.environ: + self.db_path = os.environ['UNIT_STATE_DB'] + else: + self.db_path = os.path.join( + os.environ.get('CHARM_DIR', ''), '.unit-state.db') + self.conn = sqlite3.connect('%s' % self.db_path) + self.cursor = self.conn.cursor() + self.revision = None + self._closed = False + self._init() + + def close(self): + if self._closed: + return + self.flush(False) + self.cursor.close() + self.conn.close() + self._closed = True + + def get(self, key, default=None, record=False): + self.cursor.execute('select data from kv where key=?', [key]) + result = self.cursor.fetchone() + if not result: + return default + if record: + return Record(json.loads(result[0])) + return json.loads(result[0]) + + def getrange(self, key_prefix, strip=False): + """ + Get a range of keys starting with a common prefix as a mapping of + keys to values. + + :param str key_prefix: Common prefix among all keys + :param bool strip: Optionally strip the common prefix from the key + names in the returned dict + :return dict: A (possibly empty) dict of key-value mappings + """ + self.cursor.execute("select key, data from kv where key like ?", + ['%s%%' % key_prefix]) + result = self.cursor.fetchall() + + if not result: + return {} + if not strip: + key_prefix = '' + return dict([ + (k[len(key_prefix):], json.loads(v)) for k, v in result]) + + def update(self, mapping, prefix=""): + """ + Set the values of multiple keys at once. + + :param dict mapping: Mapping of keys to values + :param str prefix: Optional prefix to apply to all keys in `mapping` + before setting + """ + for k, v in mapping.items(): + self.set("%s%s" % (prefix, k), v) + + def unset(self, key): + """ + Remove a key from the database entirely. + """ + self.cursor.execute('delete from kv where key=?', [key]) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values (?, ?, ?)', + [key, self.revision, json.dumps('DELETED')]) + + def unsetrange(self, keys=None, prefix=""): + """ + Remove a range of keys starting with a common prefix, from the database + entirely. + + :param list keys: List of keys to remove. + :param str prefix: Optional prefix to apply to all keys in ``keys`` + before removing. + """ + if keys is not None: + keys = ['%s%s' % (prefix, key) for key in keys] + self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)), + list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys))) + else: + self.cursor.execute('delete from kv where key like ?', + ['%s%%' % prefix]) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values (?, ?, ?)', + ['%s%%' % prefix, self.revision, json.dumps('DELETED')]) + + def set(self, key, value): + """ + Set a value in the database. + + :param str key: Key to set the value for + :param value: Any JSON-serializable value to be set + """ + serialized = json.dumps(value) + + self.cursor.execute('select data from kv where key=?', [key]) + exists = self.cursor.fetchone() + + # Skip mutations to the same value + if exists: + if exists[0] == serialized: + return value + + if not exists: + self.cursor.execute( + 'insert into kv (key, data) values (?, ?)', + (key, serialized)) + else: + self.cursor.execute(''' + update kv + set data = ? + where key = ?''', [serialized, key]) + + # Save + if not self.revision: + return value + + self.cursor.execute( + 'select 1 from kv_revisions where key=? and revision=?', + [key, self.revision]) + exists = self.cursor.fetchone() + + if not exists: + self.cursor.execute( + '''insert into kv_revisions ( + revision, key, data) values (?, ?, ?)''', + (self.revision, key, serialized)) + else: + self.cursor.execute( + ''' + update kv_revisions + set data = ? + where key = ? + and revision = ?''', + [serialized, key, self.revision]) + + return value + + def delta(self, mapping, prefix): + """ + return a delta containing values that have changed. + """ + previous = self.getrange(prefix, strip=True) + if not previous: + pk = set() + else: + pk = set(previous.keys()) + ck = set(mapping.keys()) + delta = DeltaSet() + + # added + for k in ck.difference(pk): + delta[k] = Delta(None, mapping[k]) + + # removed + for k in pk.difference(ck): + delta[k] = Delta(previous[k], None) + + # changed + for k in pk.intersection(ck): + c = mapping[k] + p = previous[k] + if c != p: + delta[k] = Delta(p, c) + + return delta + + @contextlib.contextmanager + def hook_scope(self, name=""): + """Scope all future interactions to the current hook execution + revision.""" + assert not self.revision + self.cursor.execute( + 'insert into hooks (hook, date) values (?, ?)', + (name or sys.argv[0], + datetime.datetime.utcnow().isoformat())) + self.revision = self.cursor.lastrowid + try: + yield self.revision + self.revision = None + except: + self.flush(False) + self.revision = None + raise + else: + self.flush() + + def flush(self, save=True): + if save: + self.conn.commit() + elif self._closed: + return + else: + self.conn.rollback() + + def _init(self): + self.cursor.execute(''' + create table if not exists kv ( + key text, + data text, + primary key (key) + )''') + self.cursor.execute(''' + create table if not exists kv_revisions ( + key text, + revision integer, + data text, + primary key (key, revision) + )''') + self.cursor.execute(''' + create table if not exists hooks ( + version integer primary key autoincrement, + hook text, + date text + )''') + self.conn.commit() + + def gethistory(self, key, deserialize=False): + self.cursor.execute( + ''' + select kv.revision, kv.key, kv.data, h.hook, h.date + from kv_revisions kv, + hooks h + where kv.key=? + and kv.revision = h.version + ''', [key]) + if deserialize is False: + return self.cursor.fetchall() + return map(_parse_history, self.cursor.fetchall()) + + def debug(self, fh=sys.stderr): + self.cursor.execute('select * from kv') + pprint.pprint(self.cursor.fetchall(), stream=fh) + self.cursor.execute('select * from kv_revisions') + pprint.pprint(self.cursor.fetchall(), stream=fh) + + +def _parse_history(d): + return (d[0], d[1], json.loads(d[2]), d[3], + datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f")) + + +class HookData(object): + """Simple integration for existing hook exec frameworks. + + Records all unit information, and stores deltas for processing + by the hook. + + Sample:: + + from charmhelper.core import hookenv, unitdata + + changes = unitdata.HookData() + db = unitdata.kv() + hooks = hookenv.Hooks() + + @hooks.hook + def config_changed(): + # View all changes to configuration + for changed, (prev, cur) in changes.conf.items(): + print('config changed', changed, + 'previous value', prev, + 'current value', cur) + + # Get some unit specific bookeeping + if not db.get('pkg_key'): + key = urllib.urlopen('https://example.com/pkg_key').read() + db.set('pkg_key', key) + + if __name__ == '__main__': + with changes(): + hook.execute() + + """ + def __init__(self): + self.kv = kv() + self.conf = None + self.rels = None + + @contextlib.contextmanager + def __call__(self): + from charmhelpers.core import hookenv + hook_name = hookenv.hook_name() + + with self.kv.hook_scope(hook_name): + self._record_charm_version(hookenv.charm_dir()) + delta_config, delta_relation = self._record_hook(hookenv) + yield self.kv, delta_config, delta_relation + + def _record_charm_version(self, charm_dir): + # Record revisions.. charm revisions are meaningless + # to charm authors as they don't control the revision. + # so logic dependnent on revision is not particularly + # useful, however it is useful for debugging analysis. + charm_rev = open( + os.path.join(charm_dir, 'revision')).read().strip() + charm_rev = charm_rev or '0' + revs = self.kv.get('charm_revisions', []) + if charm_rev not in revs: + revs.append(charm_rev.strip() or '0') + self.kv.set('charm_revisions', revs) + + def _record_hook(self, hookenv): + data = hookenv.execution_environment() + self.conf = conf_delta = self.kv.delta(data['conf'], 'config') + self.rels = rels_delta = self.kv.delta(data['rels'], 'rels') + self.kv.set('env', dict(data['env'])) + self.kv.set('unit', data['unit']) + self.kv.set('relid', data.get('relid')) + return conf_delta, rels_delta + + +class Record(dict): + + __slots__ = () + + def __getattr__(self, k): + if k in self: + return self[k] + raise AttributeError(k) + + +class DeltaSet(Record): + + __slots__ = () + + +Delta = collections.namedtuple('Delta', ['previous', 'current']) + + +_KV = None + + +def kv(): + global _KV + if _KV is None: + _KV = Storage() + return _KV