From 591c084532c2effea00516d115c255ef10c26730 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 25 Apr 2018 17:50:09 +0100 Subject: [PATCH] Add support for block device encryption Add new secrets-storage relation to vault, supporting the use of block device encryption using dm-crypt/vaultlocker. Prepared devices are now recorded in the local unit kv store; this information is used to provide a list of configured block devices to the swift-proxy charm, rather than the previous best guess provided by determine_block_devices. This allows us to use the dm-crypt device name, rather than the underlying block device. Encrypted block devices are unlocked on boot using vaultlocker-decrypt systemd units (enabled by vaultlocker); /etc/fstab entries for such devices make use of a x-systemd.requires option to ensure that the block device is unlocked prior to attempting to mount it. Add new storage binding to allow charm to be used with Juju storage. Add new ephemeral-unmount configuration option to allow cloud ephemeral storage to be used for testing purposes; update functional testing to use this option. The behaviour of 'overwrite' was changed to accomodate the use of encrypted block devices. Change-Id: I9b3f8cd2de412ee96e0139dba4d4abdf998ecaf2 --- .gitignore | 2 + charmhelpers/contrib/hahelpers/apache.py | 5 +- charmhelpers/contrib/hahelpers/cluster.py | 14 +- .../contrib/openstack/amulet/utils.py | 11 +- charmhelpers/contrib/openstack/context.py | 9 +- .../contrib/openstack/templates/haproxy.cfg | 1 + .../templates/section-oslo-middleware | 5 + .../templates/section-oslo-notifications | 3 + charmhelpers/contrib/openstack/utils.py | 2 +- charmhelpers/contrib/openstack/vaultlocker.py | 126 ++++++++++++++++ charmhelpers/contrib/storage/linux/ceph.py | 43 +++++- charmhelpers/contrib/storage/linux/lvm.py | 29 ++++ charmhelpers/contrib/storage/linux/utils.py | 16 ++ charmhelpers/core/hookenv.py | 138 +++++++++++++----- charmhelpers/core/host.py | 11 +- charmhelpers/core/services/base.py | 25 +++- charmhelpers/core/sysctl.py | 18 ++- charmhelpers/core/unitdata.py | 9 +- charmhelpers/fetch/ubuntu.py | 1 + config.yaml | 17 +++ hooks/block-devices-storage-attached | 1 + hooks/block-devices-storage-detached | 1 + hooks/secrets-storage-relation-broken | 1 + hooks/secrets-storage-relation-changed | 1 + hooks/secrets-storage-relation-departed | 1 + hooks/secrets-storage-relation-joined | 1 + hooks/storage.bootstrap | 8 + hooks/storage.real | 1 + hooks/swift_storage_hooks.py | 75 +++++++++- lib/swift_storage_utils.py | 94 +++++++++--- metadata.yaml | 9 ++ templates/vaultlocker.conf.j2 | 6 + test-requirements.txt | 4 +- tests/basic_deployment.py | 1 + .../contrib/openstack/amulet/utils.py | 11 +- tests/charmhelpers/core/hookenv.py | 138 +++++++++++++----- tests/charmhelpers/core/host.py | 11 +- tests/charmhelpers/core/services/base.py | 25 +++- tests/charmhelpers/core/sysctl.py | 18 ++- tests/charmhelpers/core/unitdata.py | 9 +- tox.ini | 5 + unit_tests/test_swift_storage_relations.py | 18 +-- unit_tests/test_swift_storage_utils.py | 102 +++++++++++-- unit_tests/test_utils.py | 17 +++ 44 files changed, 861 insertions(+), 182 deletions(-) create mode 100644 charmhelpers/contrib/openstack/templates/section-oslo-middleware create mode 100644 charmhelpers/contrib/openstack/vaultlocker.py create mode 120000 hooks/block-devices-storage-attached create mode 120000 hooks/block-devices-storage-detached create mode 120000 hooks/secrets-storage-relation-broken create mode 120000 hooks/secrets-storage-relation-changed create mode 120000 hooks/secrets-storage-relation-departed create mode 120000 hooks/secrets-storage-relation-joined create mode 100755 hooks/storage.bootstrap create mode 120000 hooks/storage.real create mode 100644 templates/vaultlocker.conf.j2 diff --git a/.gitignore b/.gitignore index e9ea010..89b0b2d 100644 --- a/.gitignore +++ b/.gitignore @@ -8,3 +8,5 @@ tags *.pyc tests/cirros-* func-results.json +.settings +**/__pycache__ diff --git a/charmhelpers/contrib/hahelpers/apache.py b/charmhelpers/contrib/hahelpers/apache.py index 22acb68..605a1be 100644 --- a/charmhelpers/contrib/hahelpers/apache.py +++ b/charmhelpers/contrib/hahelpers/apache.py @@ -65,7 +65,8 @@ def get_ca_cert(): if ca_cert is None: log("Inspecting identity-service relations for CA SSL certificate.", level=INFO) - for r_id in relation_ids('identity-service'): + for r_id in (relation_ids('identity-service') + + relation_ids('identity-credentials')): for unit in relation_list(r_id): if ca_cert is None: ca_cert = relation_get('ca_cert', @@ -76,7 +77,7 @@ def get_ca_cert(): def retrieve_ca_cert(cert_file): cert = None if os.path.isfile(cert_file): - with open(cert_file, 'r') as crt: + with open(cert_file, 'rb') as crt: cert = crt.read() return cert diff --git a/charmhelpers/contrib/hahelpers/cluster.py b/charmhelpers/contrib/hahelpers/cluster.py index 4207e42..47facd9 100644 --- a/charmhelpers/contrib/hahelpers/cluster.py +++ b/charmhelpers/contrib/hahelpers/cluster.py @@ -371,6 +371,7 @@ def distributed_wait(modulo=None, wait=None, operation_name='operation'): ''' Distribute operations by waiting based on modulo_distribution If modulo and or wait are not set, check config_get for those values. + If config values are not set, default to modulo=3 and wait=30. :param modulo: int The modulo number creates the group distribution :param wait: int The constant time wait value @@ -382,10 +383,17 @@ def distributed_wait(modulo=None, wait=None, operation_name='operation'): :side effect: Calls time.sleep() ''' if modulo is None: - modulo = config_get('modulo-nodes') + modulo = config_get('modulo-nodes') or 3 if wait is None: - wait = config_get('known-wait') - calculated_wait = modulo_distribution(modulo=modulo, wait=wait) + wait = config_get('known-wait') or 30 + if juju_is_leader(): + # The leader should never wait + calculated_wait = 0 + else: + # non_zero_wait=True guarantees the non-leader who gets modulo 0 + # will still wait + calculated_wait = modulo_distribution(modulo=modulo, wait=wait, + non_zero_wait=True) msg = "Waiting {} seconds for {} ...".format(calculated_wait, operation_name) log(msg, DEBUG) diff --git a/charmhelpers/contrib/openstack/amulet/utils.py b/charmhelpers/contrib/openstack/amulet/utils.py index 5fdcead..84e87f5 100644 --- a/charmhelpers/contrib/openstack/amulet/utils.py +++ b/charmhelpers/contrib/openstack/amulet/utils.py @@ -441,7 +441,7 @@ class OpenStackAmuletUtils(AmuletUtils): if rel.get('api_version') != str(api_version): raise Exception("api_version not propagated through relation" " data yet ('{}' != '{}')." - "".format(rel['api_version'], api_version)) + "".format(rel.get('api_version'), api_version)) def keystone_configure_api_version(self, sentry_relation_pairs, deployment, api_version): @@ -463,16 +463,13 @@ class OpenStackAmuletUtils(AmuletUtils): deployment._auto_wait_for_status() self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) - def authenticate_cinder_admin(self, keystone_sentry, username, - password, tenant, api_version=2): + def authenticate_cinder_admin(self, keystone, api_version=2): """Authenticates admin user with cinder.""" - # NOTE(beisner): cinder python client doesn't accept tokens. - keystone_ip = keystone_sentry.info['public-address'] - ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8')) + self.log.debug('Authenticating cinder admin...') _clients = { 1: cinder_client.Client, 2: cinder_clientv2.Client} - return _clients[api_version](username, password, tenant, ept) + return _clients[api_version](session=keystone.session) def authenticate_keystone(self, keystone_ip, username, password, api_version=False, admin_port=False, diff --git a/charmhelpers/contrib/openstack/context.py b/charmhelpers/contrib/openstack/context.py index 6c4497b..2d91f0a 100644 --- a/charmhelpers/contrib/openstack/context.py +++ b/charmhelpers/contrib/openstack/context.py @@ -797,9 +797,9 @@ class ApacheSSLContext(OSContextGenerator): key_filename = 'key' write_file(path=os.path.join(ssl_dir, cert_filename), - content=b64decode(cert)) + content=b64decode(cert), perms=0o640) write_file(path=os.path.join(ssl_dir, key_filename), - content=b64decode(key)) + content=b64decode(key), perms=0o640) def configure_ca(self): ca_cert = get_ca_cert() @@ -1873,10 +1873,11 @@ class EnsureDirContext(OSContextGenerator): context is needed to do that before rendering a template. ''' - def __init__(self, dirname): + def __init__(self, dirname, **kwargs): '''Used merely to ensure that a given directory exists.''' self.dirname = dirname + self.kwargs = kwargs def __call__(self): - mkdir(self.dirname) + mkdir(self.dirname, **self.kwargs) return {} diff --git a/charmhelpers/contrib/openstack/templates/haproxy.cfg b/charmhelpers/contrib/openstack/templates/haproxy.cfg index d36af2a..f99d99f 100644 --- a/charmhelpers/contrib/openstack/templates/haproxy.cfg +++ b/charmhelpers/contrib/openstack/templates/haproxy.cfg @@ -6,6 +6,7 @@ global group haproxy spread-checks 0 stats socket /var/run/haproxy/admin.sock mode 600 level admin + stats socket /var/run/haproxy/operator.sock mode 600 level operator stats timeout 2m defaults diff --git a/charmhelpers/contrib/openstack/templates/section-oslo-middleware b/charmhelpers/contrib/openstack/templates/section-oslo-middleware new file mode 100644 index 0000000..dd73230 --- /dev/null +++ b/charmhelpers/contrib/openstack/templates/section-oslo-middleware @@ -0,0 +1,5 @@ +[oslo_middleware] + +# Bug #1758675 +enable_proxy_headers_parsing = true + diff --git a/charmhelpers/contrib/openstack/templates/section-oslo-notifications b/charmhelpers/contrib/openstack/templates/section-oslo-notifications index 5dccd4b..021a3c2 100644 --- a/charmhelpers/contrib/openstack/templates/section-oslo-notifications +++ b/charmhelpers/contrib/openstack/templates/section-oslo-notifications @@ -5,4 +5,7 @@ transport_url = {{ transport_url }} {% if notification_topics -%} topics = {{ notification_topics }} {% endif -%} +{% if notification_format -%} +notification_format = {{ notification_format }} +{% endif -%} {% endif -%} diff --git a/charmhelpers/contrib/openstack/utils.py b/charmhelpers/contrib/openstack/utils.py index e719426..6184abd 100644 --- a/charmhelpers/contrib/openstack/utils.py +++ b/charmhelpers/contrib/openstack/utils.py @@ -306,7 +306,7 @@ def get_os_codename_install_source(src): if src.startswith('cloud:'): ca_rel = src.split(':')[1] - ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0] + ca_rel = ca_rel.split('-')[1].split('/')[0] return ca_rel # Best guess match based on deb string provided diff --git a/charmhelpers/contrib/openstack/vaultlocker.py b/charmhelpers/contrib/openstack/vaultlocker.py new file mode 100644 index 0000000..a8e4bf8 --- /dev/null +++ b/charmhelpers/contrib/openstack/vaultlocker.py @@ -0,0 +1,126 @@ +# Copyright 2018 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os + +import charmhelpers.contrib.openstack.alternatives as alternatives +import charmhelpers.contrib.openstack.context as context + +import charmhelpers.core.hookenv as hookenv +import charmhelpers.core.host as host +import charmhelpers.core.templating as templating +import charmhelpers.core.unitdata as unitdata + +VAULTLOCKER_BACKEND = 'charm-vaultlocker' + + +class VaultKVContext(context.OSContextGenerator): + """Vault KV context for interaction with vault-kv interfaces""" + interfaces = ['secrets-storage'] + + def __init__(self, secret_backend=None): + super(context.OSContextGenerator, self).__init__() + self.secret_backend = ( + secret_backend or 'charm-{}'.format(hookenv.service_name()) + ) + + def __call__(self): + db = unitdata.kv() + last_token = db.get('last-token') + secret_id = db.get('secret-id') + for relation_id in hookenv.relation_ids(self.interfaces[0]): + for unit in hookenv.related_units(relation_id): + data = hookenv.relation_get(unit=unit, + rid=relation_id) + vault_url = data.get('vault_url') + role_id = data.get('{}_role_id'.format(hookenv.local_unit())) + token = data.get('{}_token'.format(hookenv.local_unit())) + + if all([vault_url, role_id, token]): + token = json.loads(token) + vault_url = json.loads(vault_url) + + # Tokens may change when secret_id's are being + # reissued - if so use token to get new secret_id + if token != last_token: + secret_id = retrieve_secret_id( + url=vault_url, + token=token + ) + db.set('secret-id', secret_id) + db.set('last-token', token) + db.flush() + + ctxt = { + 'vault_url': vault_url, + 'role_id': json.loads(role_id), + 'secret_id': secret_id, + 'secret_backend': self.secret_backend, + } + vault_ca = data.get('vault_ca') + if vault_ca: + ctxt['vault_ca'] = json.loads(vault_ca) + self.complete = True + return ctxt + return {} + + +def write_vaultlocker_conf(context, priority=100): + """Write vaultlocker configuration to disk and install alternative + + :param context: Dict of data from vault-kv relation + :ptype: context: dict + :param priority: Priority of alternative configuration + :ptype: priority: int""" + charm_vl_path = "/var/lib/charm/{}/vaultlocker.conf".format( + hookenv.service_name() + ) + host.mkdir(os.path.dirname(charm_vl_path), perms=0o700) + templating.render(source='vaultlocker.conf.j2', + target=charm_vl_path, + context=context, perms=0o600), + alternatives.install_alternative('vaultlocker.conf', + '/etc/vaultlocker/vaultlocker.conf', + charm_vl_path, priority) + + +def vault_relation_complete(backend=None): + """Determine whether vault relation is complete + + :param backend: Name of secrets backend requested + :ptype backend: string + :returns: whether the relation to vault is complete + :rtype: bool""" + vault_kv = VaultKVContext(secret_backend=backend or VAULTLOCKER_BACKEND) + vault_kv() + return vault_kv.complete + + +# TODO: contrib a high level unwrap method to hvac that works +def retrieve_secret_id(url, token): + """Retrieve a response-wrapped secret_id from Vault + + :param url: URL to Vault Server + :ptype url: str + :param token: One shot Token to use + :ptype token: str + :returns: secret_id to use for Vault Access + :rtype: str""" + import hvac + client = hvac.Client(url=url, token=token) + response = client._post('/v1/sys/wrapping/unwrap') + if response.status_code == 200: + data = response.json() + return data['data']['secret_id'] diff --git a/charmhelpers/contrib/storage/linux/ceph.py b/charmhelpers/contrib/storage/linux/ceph.py index e13e60a..7682820 100644 --- a/charmhelpers/contrib/storage/linux/ceph.py +++ b/charmhelpers/contrib/storage/linux/ceph.py @@ -291,7 +291,7 @@ class Pool(object): class ReplicatedPool(Pool): def __init__(self, service, name, pg_num=None, replicas=2, - percent_data=10.0): + percent_data=10.0, app_name=None): super(ReplicatedPool, self).__init__(service=service, name=name) self.replicas = replicas if pg_num: @@ -301,6 +301,10 @@ class ReplicatedPool(Pool): self.pg_num = min(pg_num, max_pgs) else: self.pg_num = self.get_pgs(self.replicas, percent_data) + if app_name: + self.app_name = app_name + else: + self.app_name = 'unknown' def create(self): if not pool_exists(self.service, self.name): @@ -313,6 +317,12 @@ class ReplicatedPool(Pool): update_pool(client=self.service, pool=self.name, settings={'size': str(self.replicas)}) + try: + set_app_name_for_pool(client=self.service, + pool=self.name, + name=self.app_name) + except CalledProcessError: + log('Could not set app name for pool {}'.format(self.name, level=WARNING)) except CalledProcessError: raise @@ -320,10 +330,14 @@ class ReplicatedPool(Pool): # Default jerasure erasure coded pool class ErasurePool(Pool): def __init__(self, service, name, erasure_code_profile="default", - percent_data=10.0): + percent_data=10.0, app_name=None): super(ErasurePool, self).__init__(service=service, name=name) self.erasure_code_profile = erasure_code_profile self.percent_data = percent_data + if app_name: + self.app_name = app_name + else: + self.app_name = 'unknown' def create(self): if not pool_exists(self.service, self.name): @@ -355,6 +369,12 @@ class ErasurePool(Pool): 'erasure', self.erasure_code_profile] try: check_call(cmd) + try: + set_app_name_for_pool(client=self.service, + pool=self.name, + name=self.app_name) + except CalledProcessError: + log('Could not set app name for pool {}'.format(self.name, level=WARNING)) except CalledProcessError: raise @@ -778,6 +798,25 @@ def update_pool(client, pool, settings): check_call(cmd) +def set_app_name_for_pool(client, pool, name): + """ + Calls `osd pool application enable` for the specified pool name + + :param client: Name of the ceph client to use + :type client: str + :param pool: Pool to set app name for + :type pool: str + :param name: app name for the specified pool + :type name: str + + :raises: CalledProcessError if ceph call fails + """ + if ceph_version() >= '12.0.0': + cmd = ['ceph', '--id', client, 'osd', 'pool', + 'application', 'enable', pool, name] + check_call(cmd) + + def create_pool(service, name, replicas=3, pg_num=None): """Create a new RADOS pool.""" if pool_exists(service, name): diff --git a/charmhelpers/contrib/storage/linux/lvm.py b/charmhelpers/contrib/storage/linux/lvm.py index 79a7a24..c8bde69 100644 --- a/charmhelpers/contrib/storage/linux/lvm.py +++ b/charmhelpers/contrib/storage/linux/lvm.py @@ -151,3 +151,32 @@ def extend_logical_volume_by_device(lv_name, block_device): ''' cmd = ['lvextend', lv_name, block_device] check_call(cmd) + + +def create_logical_volume(lv_name, volume_group, size=None): + ''' + Create a new logical volume in an existing volume group + + :param lv_name: str: name of logical volume to be created. + :param volume_group: str: Name of volume group to use for the new volume. + :param size: str: Size of logical volume to create (100% if not supplied) + :raises subprocess.CalledProcessError: in the event that the lvcreate fails. + ''' + if size: + check_call([ + 'lvcreate', + '--yes', + '-L', + '{}'.format(size), + '-n', lv_name, volume_group + ]) + # create the lv with all the space available, this is needed because the + # system call is different for LVM + else: + check_call([ + 'lvcreate', + '--yes', + '-l', + '100%FREE', + '-n', lv_name, volume_group + ]) diff --git a/charmhelpers/contrib/storage/linux/utils.py b/charmhelpers/contrib/storage/linux/utils.py index c942889..6f846b0 100644 --- a/charmhelpers/contrib/storage/linux/utils.py +++ b/charmhelpers/contrib/storage/linux/utils.py @@ -67,3 +67,19 @@ def is_device_mounted(device): except Exception: return False return bool(re.search(r'MOUNTPOINT=".+"', out)) + + +def mkfs_xfs(device, force=False): + """Format device with XFS filesystem. + + By default this should fail if the device already has a filesystem on it. + :param device: Full path to device to format + :ptype device: tr + :param force: Force operation + :ptype: force: boolean""" + cmd = ['mkfs.xfs'] + if force: + cmd.append("-f") + + cmd += ['-i', 'size=1024', device] + check_call(cmd) diff --git a/charmhelpers/core/hookenv.py b/charmhelpers/core/hookenv.py index 7ed1cc4..627d8f7 100644 --- a/charmhelpers/core/hookenv.py +++ b/charmhelpers/core/hookenv.py @@ -27,6 +27,7 @@ import glob import os import json import yaml +import re import subprocess import sys import errno @@ -67,7 +68,7 @@ def cached(func): @wraps(func) def wrapper(*args, **kwargs): global cache - key = str((func, args, kwargs)) + key = json.dumps((func, args, kwargs), sort_keys=True, default=str) try: return cache[key] except KeyError: @@ -289,7 +290,7 @@ class Config(dict): self.implicit_save = True self._prev_dict = None self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) - if os.path.exists(self.path): + if os.path.exists(self.path) and os.stat(self.path).st_size: self.load_previous() atexit(self._implicit_save) @@ -309,7 +310,11 @@ class Config(dict): """ self.path = path or self.path with open(self.path) as f: - self._prev_dict = json.load(f) + try: + self._prev_dict = json.load(f) + except ValueError as e: + log('Unable to parse previous config data - {}'.format(str(e)), + level=ERROR) for k, v in copy.deepcopy(self._prev_dict).items(): if k not in self: self[k] = v @@ -353,22 +358,40 @@ class Config(dict): self.save() -@cached +_cache_config = None + + def config(scope=None): - """Juju charm configuration""" - config_cmd_line = ['config-get'] - if scope is not None: - config_cmd_line.append(scope) - else: - config_cmd_line.append('--all') - config_cmd_line.append('--format=json') + """ + Get the juju charm configuration (scope==None) or individual key, + (scope=str). The returned value is a Python data structure loaded as + JSON from the Juju config command. + + :param scope: If set, return the value for the specified key. + :type scope: Optional[str] + :returns: Either the whole config as a Config, or a key from it. + :rtype: Any + """ + global _cache_config + config_cmd_line = ['config-get', '--all', '--format=json'] try: - config_data = json.loads( - subprocess.check_output(config_cmd_line).decode('UTF-8')) + # JSON Decode Exception for Python3.5+ + exc_json = json.decoder.JSONDecodeError + except AttributeError: + # JSON Decode Exception for Python2.7 through Python3.4 + exc_json = ValueError + try: + if _cache_config is None: + config_data = json.loads( + subprocess.check_output(config_cmd_line).decode('UTF-8')) + _cache_config = Config(config_data) if scope is not None: - return config_data - return Config(config_data) - except ValueError: + return _cache_config.get(scope) + return _cache_config + except (exc_json, UnicodeDecodeError) as e: + log('Unable to parse output from config-get: config_cmd_line="{}" ' + 'message="{}"' + .format(config_cmd_line, str(e)), level=ERROR) return None @@ -1043,7 +1066,6 @@ def juju_version(): universal_newlines=True).strip() -@cached def has_juju_version(minimum_version): """Return True if the Juju version is at least the provided version""" return LooseVersion(juju_version()) >= LooseVersion(minimum_version) @@ -1103,6 +1125,8 @@ def _run_atexit(): @translate_exc(from_exc=OSError, to_exc=NotImplementedError) def network_get_primary_address(binding): ''' + Deprecated since Juju 2.3; use network_get() + Retrieve the primary network address for a named binding :param binding: string. The name of a relation of extra-binding @@ -1123,7 +1147,6 @@ def network_get_primary_address(binding): return response -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) def network_get(endpoint, relation_id=None): """ Retrieve the network details for a relation endpoint @@ -1131,24 +1154,20 @@ def network_get(endpoint, relation_id=None): :param endpoint: string. The name of a relation endpoint :param relation_id: int. The ID of the relation for the current context. :return: dict. The loaded YAML output of the network-get query. - :raise: NotImplementedError if run on Juju < 2.1 + :raise: NotImplementedError if request not supported by the Juju version. """ + if not has_juju_version('2.2'): + raise NotImplementedError(juju_version()) # earlier versions require --primary-address + if relation_id and not has_juju_version('2.3'): + raise NotImplementedError # 2.3 added the -r option + cmd = ['network-get', endpoint, '--format', 'yaml'] if relation_id: cmd.append('-r') cmd.append(relation_id) - try: - response = subprocess.check_output( - cmd, - stderr=subprocess.STDOUT).decode('UTF-8').strip() - except CalledProcessError as e: - # Early versions of Juju 2.0.x required the --primary-address argument. - # We catch that condition here and raise NotImplementedError since - # the requested semantics are not available - the caller can then - # use the network_get_primary_address() method instead. - if '--primary-address is currently required' in e.output.decode('UTF-8'): - raise NotImplementedError - raise + response = subprocess.check_output( + cmd, + stderr=subprocess.STDOUT).decode('UTF-8').strip() return yaml.safe_load(response) @@ -1204,9 +1223,23 @@ def iter_units_for_relation_name(relation_name): def ingress_address(rid=None, unit=None): """ - Retrieve the ingress-address from a relation when available. Otherwise, - return the private-address. This function is to be used on the consuming - side of the relation. + Retrieve the ingress-address from a relation when available. + Otherwise, return the private-address. + + When used on the consuming side of the relation (unit is a remote + unit), the ingress-address is the IP address that this unit needs + to use to reach the provided service on the remote unit. + + When used on the providing side of the relation (unit == local_unit()), + the ingress-address is the IP address that is advertised to remote + units on this relation. Remote units need to use this address to + reach the local provided service on this unit. + + Note that charms may document some other method to use in + preference to the ingress_address(), such as an address provided + on a different relation attribute or a service discovery mechanism. + This allows charms to redirect inbound connections to their peers + or different applications such as load balancers. Usage: addresses = [ingress_address(rid=u.rid, unit=u.unit) @@ -1220,3 +1253,40 @@ def ingress_address(rid=None, unit=None): settings = relation_get(rid=rid, unit=unit) return (settings.get('ingress-address') or settings.get('private-address')) + + +def egress_subnets(rid=None, unit=None): + """ + Retrieve the egress-subnets from a relation. + + This function is to be used on the providing side of the + relation, and provides the ranges of addresses that client + connections may come from. The result is uninteresting on + the consuming side of a relation (unit == local_unit()). + + Returns a stable list of subnets in CIDR format. + eg. ['192.168.1.0/24', '2001::F00F/128'] + + If egress-subnets is not available, falls back to using the published + ingress-address, or finally private-address. + + :param rid: string relation id + :param unit: string unit name + :side effect: calls relation_get + :return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128'] + """ + def _to_range(addr): + if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None: + addr += '/32' + elif ':' in addr and '/' not in addr: # IPv6 + addr += '/128' + return addr + + settings = relation_get(rid=rid, unit=unit) + if 'egress-subnets' in settings: + return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()] + if 'ingress-address' in settings: + return [_to_range(settings['ingress-address'])] + if 'private-address' in settings: + return [_to_range(settings['private-address'])] + return [] # Should never happen diff --git a/charmhelpers/core/host.py b/charmhelpers/core/host.py index fd14d60..322ab2a 100644 --- a/charmhelpers/core/host.py +++ b/charmhelpers/core/host.py @@ -993,7 +993,7 @@ def updatedb(updatedb_text, new_path): return output -def modulo_distribution(modulo=3, wait=30): +def modulo_distribution(modulo=3, wait=30, non_zero_wait=False): """ Modulo distribution This helper uses the unit number, a modulo value and a constant wait time @@ -1015,7 +1015,14 @@ def modulo_distribution(modulo=3, wait=30): @param modulo: int The modulo number creates the group distribution @param wait: int The constant time wait value + @param non_zero_wait: boolean Override unit % modulo == 0, + return modulo * wait. Used to avoid collisions with + leader nodes which are often given priority. @return: int Calculated time to wait for unit operation """ unit_number = int(local_unit().split('/')[1]) - return (unit_number % modulo) * wait + calculated_wait_time = (unit_number % modulo) * wait + if non_zero_wait and calculated_wait_time == 0: + return modulo * wait + else: + return calculated_wait_time diff --git a/charmhelpers/core/services/base.py b/charmhelpers/core/services/base.py index ca9dc99..179ad4f 100644 --- a/charmhelpers/core/services/base.py +++ b/charmhelpers/core/services/base.py @@ -307,23 +307,34 @@ class PortManagerCallback(ManagerCallback): """ def __call__(self, manager, service_name, event_name): service = manager.get_service(service_name) - new_ports = service.get('ports', []) + # turn this generator into a list, + # as we'll be going over it multiple times + new_ports = list(service.get('ports', [])) port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) if os.path.exists(port_file): with open(port_file) as fp: old_ports = fp.read().split(',') for old_port in old_ports: - if bool(old_port): - old_port = int(old_port) - if old_port not in new_ports: - hookenv.close_port(old_port) + if bool(old_port) and not self.ports_contains(old_port, new_ports): + hookenv.close_port(old_port) with open(port_file, 'w') as fp: fp.write(','.join(str(port) for port in new_ports)) for port in new_ports: + # A port is either a number or 'ICMP' + protocol = 'TCP' + if str(port).upper() == 'ICMP': + protocol = 'ICMP' if event_name == 'start': - hookenv.open_port(port) + hookenv.open_port(port, protocol) elif event_name == 'stop': - hookenv.close_port(port) + hookenv.close_port(port, protocol) + + def ports_contains(self, port, ports): + if not bool(port): + return False + if str(port).upper() != 'ICMP': + port = int(port) + return port in ports def service_stop(service_name): diff --git a/charmhelpers/core/sysctl.py b/charmhelpers/core/sysctl.py index 6e413e3..1f188d8 100644 --- a/charmhelpers/core/sysctl.py +++ b/charmhelpers/core/sysctl.py @@ -31,18 +31,22 @@ __author__ = 'Jorge Niedbalski R. ' def create(sysctl_dict, sysctl_file): """Creates a sysctl.conf file from a YAML associative array - :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }" + :param sysctl_dict: a dict or YAML-formatted string of sysctl + options eg "{ 'kernel.max_pid': 1337 }" :type sysctl_dict: str :param sysctl_file: path to the sysctl file to be saved :type sysctl_file: str or unicode :returns: None """ - try: - sysctl_dict_parsed = yaml.safe_load(sysctl_dict) - except yaml.YAMLError: - log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), - level=ERROR) - return + if type(sysctl_dict) is not dict: + try: + sysctl_dict_parsed = yaml.safe_load(sysctl_dict) + except yaml.YAMLError: + log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), + level=ERROR) + return + else: + sysctl_dict_parsed = sysctl_dict with open(sysctl_file, "w") as fd: for key, value in sysctl_dict_parsed.items(): diff --git a/charmhelpers/core/unitdata.py b/charmhelpers/core/unitdata.py index 6d7b494..ab55432 100644 --- a/charmhelpers/core/unitdata.py +++ b/charmhelpers/core/unitdata.py @@ -166,6 +166,10 @@ class Storage(object): To support dicts, lists, integer, floats, and booleans values are automatically json encoded/decoded. + + Note: to facilitate unit testing, ':memory:' can be passed as the + path parameter which causes sqlite3 to only build the db in memory. + This should only be used for testing purposes. """ def __init__(self, path=None): self.db_path = path @@ -175,8 +179,9 @@ class Storage(object): else: self.db_path = os.path.join( os.environ.get('CHARM_DIR', ''), '.unit-state.db') - with open(self.db_path, 'a') as f: - os.fchmod(f.fileno(), 0o600) + if self.db_path != ':memory:': + with open(self.db_path, 'a') as f: + os.fchmod(f.fileno(), 0o600) self.conn = sqlite3.connect('%s' % self.db_path) self.cursor = self.conn.cursor() self.revision = None diff --git a/charmhelpers/fetch/ubuntu.py b/charmhelpers/fetch/ubuntu.py index 910e96a..653d58f 100644 --- a/charmhelpers/fetch/ubuntu.py +++ b/charmhelpers/fetch/ubuntu.py @@ -44,6 +44,7 @@ ARCH_TO_PROPOSED_POCKET = { 'x86_64': PROPOSED_POCKET, 'ppc64le': PROPOSED_PORTS_POCKET, 'aarch64': PROPOSED_PORTS_POCKET, + 's390x': PROPOSED_PORTS_POCKET, } CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' diff --git a/config.yaml b/config.yaml index 54843eb..2af23ec 100644 --- a/config.yaml +++ b/config.yaml @@ -40,6 +40,17 @@ options: description: | If true, charm will attempt to unmount and overwrite existing and in-use block-devices (WARNING). + ephemeral-unmount: + type: string + default: + description: | + Cloud instances provide ephermeral storage which is normally mounted + on /mnt. + . + Setting this option to the path of the ephemeral mountpoint will force + an unmount of the corresponding device so that it can be used as a swift + storage device. This is useful for testing purposes (cloud deployment + is not a typical use case). zone: default: 1 type: int @@ -189,3 +200,9 @@ options: be loaded, the charm will fail to install. type: boolean default: False + encrypt: + default: false + type: boolean + description: | + Encrypt block devices used by swift using dm-crypt, making use of + vault for encryption key management; requires a relation to vault. diff --git a/hooks/block-devices-storage-attached b/hooks/block-devices-storage-attached new file mode 120000 index 0000000..b5d94ae --- /dev/null +++ b/hooks/block-devices-storage-attached @@ -0,0 +1 @@ +storage.bootstrap \ No newline at end of file diff --git a/hooks/block-devices-storage-detached b/hooks/block-devices-storage-detached new file mode 120000 index 0000000..b5d94ae --- /dev/null +++ b/hooks/block-devices-storage-detached @@ -0,0 +1 @@ +storage.bootstrap \ No newline at end of file diff --git a/hooks/secrets-storage-relation-broken b/hooks/secrets-storage-relation-broken new file mode 120000 index 0000000..c5c04a7 --- /dev/null +++ b/hooks/secrets-storage-relation-broken @@ -0,0 +1 @@ +swift_storage_hooks.py \ No newline at end of file diff --git a/hooks/secrets-storage-relation-changed b/hooks/secrets-storage-relation-changed new file mode 120000 index 0000000..c5c04a7 --- /dev/null +++ b/hooks/secrets-storage-relation-changed @@ -0,0 +1 @@ +swift_storage_hooks.py \ No newline at end of file diff --git a/hooks/secrets-storage-relation-departed b/hooks/secrets-storage-relation-departed new file mode 120000 index 0000000..c5c04a7 --- /dev/null +++ b/hooks/secrets-storage-relation-departed @@ -0,0 +1 @@ +swift_storage_hooks.py \ No newline at end of file diff --git a/hooks/secrets-storage-relation-joined b/hooks/secrets-storage-relation-joined new file mode 120000 index 0000000..c5c04a7 --- /dev/null +++ b/hooks/secrets-storage-relation-joined @@ -0,0 +1 @@ +swift_storage_hooks.py \ No newline at end of file diff --git a/hooks/storage.bootstrap b/hooks/storage.bootstrap new file mode 100755 index 0000000..c0ec03f --- /dev/null +++ b/hooks/storage.bootstrap @@ -0,0 +1,8 @@ +#!/bin/sh + +if ! dpkg -s swift > /dev/null 2>&1; then + juju-log "Swift not yet installed." + exit 0 +fi + +./hooks/storage.real diff --git a/hooks/storage.real b/hooks/storage.real new file mode 120000 index 0000000..c5c04a7 --- /dev/null +++ b/hooks/storage.real @@ -0,0 +1 @@ +swift_storage_hooks.py \ No newline at end of file diff --git a/hooks/swift_storage_hooks.py b/hooks/swift_storage_hooks.py index 49dceea..953ca00 100755 --- a/hooks/swift_storage_hooks.py +++ b/hooks/swift_storage_hooks.py @@ -14,16 +14,20 @@ # See the License for the specific language governing permissions and # limitations under the License. +import base64 +import copy +import json import os import shutil import sys +import socket +import subprocess import tempfile from lib.swift_storage_utils import ( PACKAGES, RESTART_MAP, SWIFT_SVCS, - determine_block_devices, do_openstack_upgrade, ensure_swift_directories, fetch_swift_rings, @@ -53,16 +57,20 @@ from charmhelpers.core.hookenv import ( relations_of_type, status_set, ingress_address, + DEBUG, ) from charmhelpers.fetch import ( apt_install, apt_update, + add_source, filter_installed_packages ) from charmhelpers.core.host import ( add_to_updatedb_prunepath, rsync, + write_file, + umount, ) from charmhelpers.core.sysctl import create as create_sysctl @@ -81,9 +89,12 @@ from charmhelpers.contrib.network.ip import ( from charmhelpers.contrib.network import ufw from charmhelpers.contrib.charmsupport import nrpe from charmhelpers.contrib.hardening.harden import harden +from charmhelpers.core.unitdata import kv from distutils.dir_util import mkpath +import charmhelpers.contrib.openstack.vaultlocker as vaultlocker + hooks = Hooks() CONFIGS = register_configs() NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins' @@ -173,8 +184,6 @@ def install(): apt_update() apt_install(PACKAGES, fatal=True) initialize_ufw() - status_set('maintenance', 'Setting up storage') - setup_storage() ensure_swift_directories() @@ -186,6 +195,10 @@ def config_changed(): initialize_ufw() else: ufw.disable() + + if config('ephemeral-unmount'): + umount(config('ephemeral-unmount'), persist=True) + if config('prefer-ipv6'): status_set('maintenance', 'Configuring ipv6') assert_charm_supports_ipv6() @@ -198,10 +211,9 @@ def config_changed(): status_set('maintenance', 'Running openstack upgrade') do_openstack_upgrade(configs=CONFIGS) - setup_storage() + install_vaultlocker() - for rid in relation_ids('swift-storage'): - swift_storage_relation_joined(rid=rid) + configure_storage() CONFIGS.write_all() @@ -216,6 +228,17 @@ def config_changed(): add_to_updatedb_prunepath(STORAGE_MOUNT_PATH) +def install_vaultlocker(): + """Determine whether vaultlocker is required and install""" + if config('encrypt'): + pkgs = ['vaultlocker', 'python-hvac'] + installed = len(filter_installed_packages(pkgs)) == 0 + if not installed: + add_source('ppa:openstack-charmers/vaultlocker') + apt_update(fatal=True) + apt_install(pkgs, fatal=True) + + @hooks.hook('upgrade-charm') @harden() def upgrade_charm(): @@ -227,6 +250,10 @@ def upgrade_charm(): @hooks.hook() def swift_storage_relation_joined(rid=None): + if config('encrypt') and not vaultlocker.vault_relation_complete(): + log('Encryption configured and vault not ready, deferring', + level=DEBUG) + return rel_settings = { 'zone': config('zone'), 'object_port': config('object-server-port'), @@ -234,7 +261,8 @@ def swift_storage_relation_joined(rid=None): 'account_port': config('account-server-port'), } - devs = determine_block_devices() or [] + db = kv() + devs = db.get('prepared-devices', []) devs = [os.path.basename(d) for d in devs] rel_settings['device'] = ':'.join(devs) # Keep a reference of devices we are adding to the ring @@ -272,6 +300,34 @@ def swift_storage_relation_departed(): revoke_access(removed_client, port) +@hooks.hook('secrets-storage-relation-joined') +def secrets_storage_joined(relation_id=None): + relation_set(relation_id=relation_id, + secret_backend='charm-vaultlocker', + isolated=True, + access_address=get_relation_ip('secrets-storage'), + hostname=socket.gethostname()) + + +@hooks.hook('secrets-storage-relation-changed') +def secrets_storage_changed(): + vault_ca = relation_get('vault_ca') + if vault_ca: + vault_ca = base64.decodestring(json.loads(vault_ca).encode()) + write_file('/usr/local/share/ca-certificates/vault-ca.crt', + vault_ca, perms=0o644) + subprocess.check_call(['update-ca-certificates', '--fresh']) + configure_storage() + + +@hooks.hook('storage.real') +def configure_storage(): + setup_storage(config('encrypt')) + + for rid in relation_ids('swift-storage'): + swift_storage_relation_joined(rid=rid) + + @hooks.hook('nrpe-external-master-relation-joined') @hooks.hook('nrpe-external-master-relation-changed') def update_nrpe_config(): @@ -318,7 +374,10 @@ def main(): hooks.execute(sys.argv) except UnregisteredHookError as e: log('Unknown hook {} - skipping.'.format(e)) - set_os_workload_status(CONFIGS, REQUIRED_INTERFACES, + required_interfaces = copy.deepcopy(REQUIRED_INTERFACES) + if config('encrypt'): + required_interfaces['vault'] = ['secrets-storage'] + set_os_workload_status(CONFIGS, required_interfaces, charm_func=assess_status) os_application_version_set(VERSION_PACKAGE) diff --git a/lib/swift_storage_utils.py b/lib/swift_storage_utils.py index b1b3394..e1fd894 100644 --- a/lib/swift_storage_utils.py +++ b/lib/swift_storage_utils.py @@ -4,6 +4,7 @@ import re import subprocess import shutil import tempfile +import uuid from subprocess import check_call, call, CalledProcessError, check_output @@ -54,6 +55,8 @@ from charmhelpers.core.hookenv import ( relation_ids, iter_units_for_relation_name, ingress_address, + storage_list, + storage_get, ) from charmhelpers.contrib.network import ufw @@ -62,6 +65,7 @@ from charmhelpers.contrib.network.ip import get_host_ip from charmhelpers.contrib.storage.linux.utils import ( is_block_device, is_device_mounted, + mkfs_xfs, ) from charmhelpers.contrib.storage.linux.loopback import ( @@ -84,6 +88,10 @@ from charmhelpers.core.decorators import ( retry_on_exception, ) +import charmhelpers.contrib.openstack.vaultlocker as vaultlocker + +from charmhelpers.core.unitdata import kv + PACKAGES = [ 'swift', 'swift-account', 'swift-container', 'swift-object', 'xfsprogs', 'gdisk', 'lvm2', 'python-jinja2', 'python-psutil', @@ -162,11 +170,14 @@ def register_configs(): [SwiftStorageContext()]) configs.register('/etc/rsync-juju.d/050-swift-storage.conf', [RsyncContext(), SwiftStorageServerContext()]) + # NOTE: add VaultKVContext so interface status can be assessed for server in ['account', 'object', 'container']: configs.register('/etc/swift/%s-server.conf' % server, [SwiftStorageServerContext(), context.BindHostContext(), - context.WorkerConfigContext()]), + context.WorkerConfigContext(), + vaultlocker.VaultKVContext( + vaultlocker.VAULTLOCKER_BACKEND)]), return configs @@ -269,6 +280,12 @@ def determine_block_devices(): else: bdevs = block_device.split(' ') + # List storage instances for the 'block-devices' + # store declared for this charm too, and add + # their block device paths to the list. + storage_ids = storage_list('block-devices') + bdevs.extend((storage_get('location', s) for s in storage_ids)) + bdevs = list(set(bdevs)) # attempt to ensure block devices, but filter out missing devs _none = ['None', 'none'] @@ -279,19 +296,6 @@ def determine_block_devices(): return valid_bdevs -def mkfs_xfs(bdev, force=False): - """Format device with XFS filesystem. - - By default this should fail if the device already has a filesystem on it. - """ - cmd = ['mkfs.xfs'] - if force: - cmd.append("-f") - - cmd += ['-i', 'size=1024', bdev] - check_call(cmd) - - def devstore_safe_load(devstore): """Attempt to decode json data and return None if an error occurs while also printing a log. @@ -446,21 +450,73 @@ def ensure_devs_tracked(): is_device_in_ring(dev, skip_rel_check=True) -def setup_storage(): +def setup_storage(encrypt=False): + # Preflight check vault relation if encryption is enabled + vault_kv = vaultlocker.VaultKVContext(vaultlocker.VAULTLOCKER_BACKEND) + context = vault_kv() + if encrypt and not vault_kv.complete: + log("Encryption requested but vault relation not complete", + level=DEBUG) + return + elif encrypt and vault_kv.complete: + # NOTE: only write vaultlocker configuration once relation is complete + # otherwise we run the chance of an empty configuration file + # being installed on a machine with other vaultlocker based + # services + vaultlocker.write_vaultlocker_conf(context, priority=90) + # Ensure /srv/node exists just in case no disks # are detected and used. mkdir(os.path.join('/srv', 'node'), owner='swift', group='swift', perms=0o755) reformat = str(config('overwrite')).lower() == "true" + + db = kv() + prepared_devices = db.get('prepared-devices', []) + for dev in determine_block_devices(): + if dev in prepared_devices: + log('Device {} already processed by charm,' + ' skipping'.format(dev)) + continue + if is_device_in_ring(os.path.basename(dev)): log("Device '%s' already in the ring - ignoring" % (dev)) + # NOTE: record existing use of device dealing with + # upgrades from older versions of charms without + # this feature + prepared_devices.append(dev) + db.set('prepared-devices', prepared_devices) + db.flush() + continue + + # NOTE: this deals with a dm-crypt'ed block device already in + # use + if is_device_mounted(dev): + log("Device '{}' is already mounted, ignoring".format(dev)) continue if reformat: clean_storage(dev) + loopback_device = is_mapped_loopback_device(dev) + options = None + + if encrypt and not loopback_device: + dev_uuid = str(uuid.uuid4()) + check_call(['vaultlocker', 'encrypt', + '--uuid', dev_uuid, + dev]) + dev = '/dev/mapper/crypt-{}'.format(dev_uuid) + options = ','.join([ + "defaults", + "nofail", + ("x-systemd.requires=" + "vaultlocker-decrypt@{uuid}.service".format(uuid=dev_uuid)), + "comment=vaultlocker", + ]) + try: # If not cleaned and in use, mkfs should fail. mkfs_xfs(dev, force=reformat) @@ -475,8 +531,6 @@ def setup_storage(): _mp = os.path.join('/srv', 'node', basename) mkdir(_mp, owner='swift', group='swift') - options = None - loopback_device = is_mapped_loopback_device(dev) mountpoint = '/srv/node/%s' % basename if loopback_device: # If an exiting fstab entry exists using the image file as the @@ -497,6 +551,12 @@ def setup_storage(): check_call(['chown', '-R', 'swift:swift', mountpoint]) check_call(['chmod', '-R', '0755', mountpoint]) + # NOTE: record preparation of device - this will be used when + # providing block device configuration for ring builders. + prepared_devices.append(dev) + db.set('prepared-devices', prepared_devices) + db.flush() + @retry_on_exception(3, base_delay=2, exc_type=CalledProcessError) def fetch_swift_rings(rings_url): diff --git a/metadata.yaml b/metadata.yaml index acf08ec..8cd181d 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -28,3 +28,12 @@ provides: scope: container swift-storage: interface: swift +requires: + secrets-storage: + interface: vault-kv +storage: + block-devices: + type: block + multiple: + range: 0- + minimum-size: 1G diff --git a/templates/vaultlocker.conf.j2 b/templates/vaultlocker.conf.j2 new file mode 100644 index 0000000..f4b8f42 --- /dev/null +++ b/templates/vaultlocker.conf.j2 @@ -0,0 +1,6 @@ +# vaultlocker configuration from swift-proxy charm +[vault] +url = {{ vault_url }} +approle = {{ role_id }} +backend = {{ secret_backend }} +secret_id = {{ secret_id }} diff --git a/test-requirements.txt b/test-requirements.txt index 9edd4bb..6757a47 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -5,12 +5,12 @@ coverage>=3.6 mock>=1.2 flake8>=2.2.4,<=2.4.1 os-testr>=0.4.1 -charm-tools>=2.0.0 +charm-tools>=2.0.0;python_version=='2.7' # cheetah templates aren't availble in Python 3+ requests==2.6.0 # BEGIN: Amulet OpenStack Charm Helper Requirements # Liberty client lower constraints amulet>=1.14.3,<2.0 -bundletester>=0.6.1,<1.0 +bundletester>=0.6.1,<1.0;python_version=='2.7' # cheetah templates aren't availble in Python 3+ python-ceilometerclient>=1.5.0 python-cinderclient>=1.4.0 python-glanceclient>=1.1.0 diff --git a/tests/basic_deployment.py b/tests/basic_deployment.py index 2d563ef..c010512 100644 --- a/tests/basic_deployment.py +++ b/tests/basic_deployment.py @@ -96,6 +96,7 @@ class SwiftStorageBasicDeployment(OpenStackAmuletDeployment): 'zone': '1', 'block-device': 'vdb', 'overwrite': 'true', + 'ephemeral-unmount': '/mnt', } pxc_config = { 'innodb-buffer-pool-size': '256M', diff --git a/tests/charmhelpers/contrib/openstack/amulet/utils.py b/tests/charmhelpers/contrib/openstack/amulet/utils.py index 5fdcead..84e87f5 100644 --- a/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -441,7 +441,7 @@ class OpenStackAmuletUtils(AmuletUtils): if rel.get('api_version') != str(api_version): raise Exception("api_version not propagated through relation" " data yet ('{}' != '{}')." - "".format(rel['api_version'], api_version)) + "".format(rel.get('api_version'), api_version)) def keystone_configure_api_version(self, sentry_relation_pairs, deployment, api_version): @@ -463,16 +463,13 @@ class OpenStackAmuletUtils(AmuletUtils): deployment._auto_wait_for_status() self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) - def authenticate_cinder_admin(self, keystone_sentry, username, - password, tenant, api_version=2): + def authenticate_cinder_admin(self, keystone, api_version=2): """Authenticates admin user with cinder.""" - # NOTE(beisner): cinder python client doesn't accept tokens. - keystone_ip = keystone_sentry.info['public-address'] - ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8')) + self.log.debug('Authenticating cinder admin...') _clients = { 1: cinder_client.Client, 2: cinder_clientv2.Client} - return _clients[api_version](username, password, tenant, ept) + return _clients[api_version](session=keystone.session) def authenticate_keystone(self, keystone_ip, username, password, api_version=False, admin_port=False, diff --git a/tests/charmhelpers/core/hookenv.py b/tests/charmhelpers/core/hookenv.py index 7ed1cc4..627d8f7 100644 --- a/tests/charmhelpers/core/hookenv.py +++ b/tests/charmhelpers/core/hookenv.py @@ -27,6 +27,7 @@ import glob import os import json import yaml +import re import subprocess import sys import errno @@ -67,7 +68,7 @@ def cached(func): @wraps(func) def wrapper(*args, **kwargs): global cache - key = str((func, args, kwargs)) + key = json.dumps((func, args, kwargs), sort_keys=True, default=str) try: return cache[key] except KeyError: @@ -289,7 +290,7 @@ class Config(dict): self.implicit_save = True self._prev_dict = None self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) - if os.path.exists(self.path): + if os.path.exists(self.path) and os.stat(self.path).st_size: self.load_previous() atexit(self._implicit_save) @@ -309,7 +310,11 @@ class Config(dict): """ self.path = path or self.path with open(self.path) as f: - self._prev_dict = json.load(f) + try: + self._prev_dict = json.load(f) + except ValueError as e: + log('Unable to parse previous config data - {}'.format(str(e)), + level=ERROR) for k, v in copy.deepcopy(self._prev_dict).items(): if k not in self: self[k] = v @@ -353,22 +358,40 @@ class Config(dict): self.save() -@cached +_cache_config = None + + def config(scope=None): - """Juju charm configuration""" - config_cmd_line = ['config-get'] - if scope is not None: - config_cmd_line.append(scope) - else: - config_cmd_line.append('--all') - config_cmd_line.append('--format=json') + """ + Get the juju charm configuration (scope==None) or individual key, + (scope=str). The returned value is a Python data structure loaded as + JSON from the Juju config command. + + :param scope: If set, return the value for the specified key. + :type scope: Optional[str] + :returns: Either the whole config as a Config, or a key from it. + :rtype: Any + """ + global _cache_config + config_cmd_line = ['config-get', '--all', '--format=json'] try: - config_data = json.loads( - subprocess.check_output(config_cmd_line).decode('UTF-8')) + # JSON Decode Exception for Python3.5+ + exc_json = json.decoder.JSONDecodeError + except AttributeError: + # JSON Decode Exception for Python2.7 through Python3.4 + exc_json = ValueError + try: + if _cache_config is None: + config_data = json.loads( + subprocess.check_output(config_cmd_line).decode('UTF-8')) + _cache_config = Config(config_data) if scope is not None: - return config_data - return Config(config_data) - except ValueError: + return _cache_config.get(scope) + return _cache_config + except (exc_json, UnicodeDecodeError) as e: + log('Unable to parse output from config-get: config_cmd_line="{}" ' + 'message="{}"' + .format(config_cmd_line, str(e)), level=ERROR) return None @@ -1043,7 +1066,6 @@ def juju_version(): universal_newlines=True).strip() -@cached def has_juju_version(minimum_version): """Return True if the Juju version is at least the provided version""" return LooseVersion(juju_version()) >= LooseVersion(minimum_version) @@ -1103,6 +1125,8 @@ def _run_atexit(): @translate_exc(from_exc=OSError, to_exc=NotImplementedError) def network_get_primary_address(binding): ''' + Deprecated since Juju 2.3; use network_get() + Retrieve the primary network address for a named binding :param binding: string. The name of a relation of extra-binding @@ -1123,7 +1147,6 @@ def network_get_primary_address(binding): return response -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) def network_get(endpoint, relation_id=None): """ Retrieve the network details for a relation endpoint @@ -1131,24 +1154,20 @@ def network_get(endpoint, relation_id=None): :param endpoint: string. The name of a relation endpoint :param relation_id: int. The ID of the relation for the current context. :return: dict. The loaded YAML output of the network-get query. - :raise: NotImplementedError if run on Juju < 2.1 + :raise: NotImplementedError if request not supported by the Juju version. """ + if not has_juju_version('2.2'): + raise NotImplementedError(juju_version()) # earlier versions require --primary-address + if relation_id and not has_juju_version('2.3'): + raise NotImplementedError # 2.3 added the -r option + cmd = ['network-get', endpoint, '--format', 'yaml'] if relation_id: cmd.append('-r') cmd.append(relation_id) - try: - response = subprocess.check_output( - cmd, - stderr=subprocess.STDOUT).decode('UTF-8').strip() - except CalledProcessError as e: - # Early versions of Juju 2.0.x required the --primary-address argument. - # We catch that condition here and raise NotImplementedError since - # the requested semantics are not available - the caller can then - # use the network_get_primary_address() method instead. - if '--primary-address is currently required' in e.output.decode('UTF-8'): - raise NotImplementedError - raise + response = subprocess.check_output( + cmd, + stderr=subprocess.STDOUT).decode('UTF-8').strip() return yaml.safe_load(response) @@ -1204,9 +1223,23 @@ def iter_units_for_relation_name(relation_name): def ingress_address(rid=None, unit=None): """ - Retrieve the ingress-address from a relation when available. Otherwise, - return the private-address. This function is to be used on the consuming - side of the relation. + Retrieve the ingress-address from a relation when available. + Otherwise, return the private-address. + + When used on the consuming side of the relation (unit is a remote + unit), the ingress-address is the IP address that this unit needs + to use to reach the provided service on the remote unit. + + When used on the providing side of the relation (unit == local_unit()), + the ingress-address is the IP address that is advertised to remote + units on this relation. Remote units need to use this address to + reach the local provided service on this unit. + + Note that charms may document some other method to use in + preference to the ingress_address(), such as an address provided + on a different relation attribute or a service discovery mechanism. + This allows charms to redirect inbound connections to their peers + or different applications such as load balancers. Usage: addresses = [ingress_address(rid=u.rid, unit=u.unit) @@ -1220,3 +1253,40 @@ def ingress_address(rid=None, unit=None): settings = relation_get(rid=rid, unit=unit) return (settings.get('ingress-address') or settings.get('private-address')) + + +def egress_subnets(rid=None, unit=None): + """ + Retrieve the egress-subnets from a relation. + + This function is to be used on the providing side of the + relation, and provides the ranges of addresses that client + connections may come from. The result is uninteresting on + the consuming side of a relation (unit == local_unit()). + + Returns a stable list of subnets in CIDR format. + eg. ['192.168.1.0/24', '2001::F00F/128'] + + If egress-subnets is not available, falls back to using the published + ingress-address, or finally private-address. + + :param rid: string relation id + :param unit: string unit name + :side effect: calls relation_get + :return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128'] + """ + def _to_range(addr): + if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None: + addr += '/32' + elif ':' in addr and '/' not in addr: # IPv6 + addr += '/128' + return addr + + settings = relation_get(rid=rid, unit=unit) + if 'egress-subnets' in settings: + return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()] + if 'ingress-address' in settings: + return [_to_range(settings['ingress-address'])] + if 'private-address' in settings: + return [_to_range(settings['private-address'])] + return [] # Should never happen diff --git a/tests/charmhelpers/core/host.py b/tests/charmhelpers/core/host.py index fd14d60..322ab2a 100644 --- a/tests/charmhelpers/core/host.py +++ b/tests/charmhelpers/core/host.py @@ -993,7 +993,7 @@ def updatedb(updatedb_text, new_path): return output -def modulo_distribution(modulo=3, wait=30): +def modulo_distribution(modulo=3, wait=30, non_zero_wait=False): """ Modulo distribution This helper uses the unit number, a modulo value and a constant wait time @@ -1015,7 +1015,14 @@ def modulo_distribution(modulo=3, wait=30): @param modulo: int The modulo number creates the group distribution @param wait: int The constant time wait value + @param non_zero_wait: boolean Override unit % modulo == 0, + return modulo * wait. Used to avoid collisions with + leader nodes which are often given priority. @return: int Calculated time to wait for unit operation """ unit_number = int(local_unit().split('/')[1]) - return (unit_number % modulo) * wait + calculated_wait_time = (unit_number % modulo) * wait + if non_zero_wait and calculated_wait_time == 0: + return modulo * wait + else: + return calculated_wait_time diff --git a/tests/charmhelpers/core/services/base.py b/tests/charmhelpers/core/services/base.py index ca9dc99..179ad4f 100644 --- a/tests/charmhelpers/core/services/base.py +++ b/tests/charmhelpers/core/services/base.py @@ -307,23 +307,34 @@ class PortManagerCallback(ManagerCallback): """ def __call__(self, manager, service_name, event_name): service = manager.get_service(service_name) - new_ports = service.get('ports', []) + # turn this generator into a list, + # as we'll be going over it multiple times + new_ports = list(service.get('ports', [])) port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) if os.path.exists(port_file): with open(port_file) as fp: old_ports = fp.read().split(',') for old_port in old_ports: - if bool(old_port): - old_port = int(old_port) - if old_port not in new_ports: - hookenv.close_port(old_port) + if bool(old_port) and not self.ports_contains(old_port, new_ports): + hookenv.close_port(old_port) with open(port_file, 'w') as fp: fp.write(','.join(str(port) for port in new_ports)) for port in new_ports: + # A port is either a number or 'ICMP' + protocol = 'TCP' + if str(port).upper() == 'ICMP': + protocol = 'ICMP' if event_name == 'start': - hookenv.open_port(port) + hookenv.open_port(port, protocol) elif event_name == 'stop': - hookenv.close_port(port) + hookenv.close_port(port, protocol) + + def ports_contains(self, port, ports): + if not bool(port): + return False + if str(port).upper() != 'ICMP': + port = int(port) + return port in ports def service_stop(service_name): diff --git a/tests/charmhelpers/core/sysctl.py b/tests/charmhelpers/core/sysctl.py index 6e413e3..1f188d8 100644 --- a/tests/charmhelpers/core/sysctl.py +++ b/tests/charmhelpers/core/sysctl.py @@ -31,18 +31,22 @@ __author__ = 'Jorge Niedbalski R. ' def create(sysctl_dict, sysctl_file): """Creates a sysctl.conf file from a YAML associative array - :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }" + :param sysctl_dict: a dict or YAML-formatted string of sysctl + options eg "{ 'kernel.max_pid': 1337 }" :type sysctl_dict: str :param sysctl_file: path to the sysctl file to be saved :type sysctl_file: str or unicode :returns: None """ - try: - sysctl_dict_parsed = yaml.safe_load(sysctl_dict) - except yaml.YAMLError: - log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), - level=ERROR) - return + if type(sysctl_dict) is not dict: + try: + sysctl_dict_parsed = yaml.safe_load(sysctl_dict) + except yaml.YAMLError: + log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), + level=ERROR) + return + else: + sysctl_dict_parsed = sysctl_dict with open(sysctl_file, "w") as fd: for key, value in sysctl_dict_parsed.items(): diff --git a/tests/charmhelpers/core/unitdata.py b/tests/charmhelpers/core/unitdata.py index 6d7b494..ab55432 100644 --- a/tests/charmhelpers/core/unitdata.py +++ b/tests/charmhelpers/core/unitdata.py @@ -166,6 +166,10 @@ class Storage(object): To support dicts, lists, integer, floats, and booleans values are automatically json encoded/decoded. + + Note: to facilitate unit testing, ':memory:' can be passed as the + path parameter which causes sqlite3 to only build the db in memory. + This should only be used for testing purposes. """ def __init__(self, path=None): self.db_path = path @@ -175,8 +179,9 @@ class Storage(object): else: self.db_path = os.path.join( os.environ.get('CHARM_DIR', ''), '.unit-state.db') - with open(self.db_path, 'a') as f: - os.fchmod(f.fileno(), 0o600) + if self.db_path != ':memory:': + with open(self.db_path, 'a') as f: + os.fchmod(f.fileno(), 0o600) self.conn = sqlite3.connect('%s' % self.db_path) self.cursor = self.conn.cursor() self.revision = None diff --git a/tox.ini b/tox.ini index 4319064..acb73c0 100644 --- a/tox.ini +++ b/tox.ini @@ -26,6 +26,11 @@ basepython = python3.5 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +[testenv:py36] +basepython = python3.6 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:pep8] basepython = python2.7 deps = -r{toxinidir}/requirements.txt diff --git a/unit_tests/test_swift_storage_relations.py b/unit_tests/test_swift_storage_relations.py index ba7ee3b..d7c53a3 100644 --- a/unit_tests/test_swift_storage_relations.py +++ b/unit_tests/test_swift_storage_relations.py @@ -18,7 +18,7 @@ import json import uuid import tempfile -from test_utils import CharmTestCase, patch_open +from test_utils import CharmTestCase, TestKV, patch_open with patch('hooks.charmhelpers.contrib.hardening.harden.harden') as mock_dec: mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f: @@ -47,7 +47,6 @@ TO_PATCH = [ 'configure_installation_source', 'openstack_upgrade_available', # swift_storage_utils - 'determine_block_devices', 'do_openstack_upgrade', 'ensure_swift_directories', 'execd_preinstall', @@ -66,6 +65,7 @@ TO_PATCH = [ 'ufw', 'setup_ufw', 'revoke_access', + 'kv', ] @@ -93,6 +93,8 @@ class SwiftStorageRelationsTests(CharmTestCase): self.config.side_effect = self.test_config.get self.relation_get.side_effect = self.test_relation.get self.get_relation_ip.return_value = '10.10.10.2' + self.test_kv = TestKV() + self.kv.return_value = self.test_kv @patch.object(hooks, 'add_ufw_gre_rule', lambda *args: None) def test_prunepath(self): @@ -108,8 +110,6 @@ class SwiftStorageRelationsTests(CharmTestCase): ) self.assertTrue(self.apt_update.called) self.apt_install.assert_called_with(PACKAGES, fatal=True) - - self.assertTrue(self.setup_storage.called) self.assertTrue(self.execd_preinstall.called) @patch.object(hooks, 'add_ufw_gre_rule', lambda *args: None) @@ -197,7 +197,7 @@ class SwiftStorageRelationsTests(CharmTestCase): kvstore = mock_kvstore.return_value kvstore.__enter__.return_value = kvstore kvstore.get.return_value = None - self.determine_block_devices.return_value = ['/dev/vdb'] + self.test_kv.set('prepared-devices', ['/dev/vdb']) hooks.swift_storage_relation_joined() @@ -254,8 +254,8 @@ class SwiftStorageRelationsTests(CharmTestCase): test_uuid = uuid.uuid4() test_environ = {'JUJU_ENV_UUID': test_uuid} mock_environ.get.side_effect = test_environ.get - self.determine_block_devices.return_value = ['/dev/vdb', '/dev/vdc', - '/dev/vdd'] + self.test_kv.set('prepared-devices', ['/dev/vdb', '/dev/vdc', + '/dev/vdd']) mock_local_unit.return_value = 'test/0' kvstore = mock_kvstore.return_value kvstore.__enter__.return_value = kvstore @@ -298,8 +298,8 @@ class SwiftStorageRelationsTests(CharmTestCase): test_uuid = uuid.uuid4() test_environ = {'JUJU_ENV_UUID': test_uuid} mock_environ.get.side_effect = test_environ.get - self.determine_block_devices.return_value = ['/dev/vdb', '/dev/vdc', - '/dev/vdd'] + self.test_kv.set('prepared-devices', ['/dev/vdb', '/dev/vdc', + '/dev/vdd']) mock_local_unit.return_value = 'test/0' kvstore = mock_kvstore.return_value kvstore.__enter__.return_value = kvstore diff --git a/unit_tests/test_swift_storage_utils.py b/unit_tests/test_swift_storage_utils.py index 03a61d9..4513e28 100644 --- a/unit_tests/test_swift_storage_utils.py +++ b/unit_tests/test_swift_storage_utils.py @@ -17,7 +17,7 @@ import tempfile from collections import namedtuple from mock import call, patch, MagicMock -from test_utils import CharmTestCase, patch_open +from test_utils import CharmTestCase, TestKV, patch_open import lib.swift_storage_utils as swift_utils @@ -50,6 +50,8 @@ TO_PATCH = [ 'iter_units_for_relation_name', 'ingress_address', 'relation_ids', + 'vaultlocker', + 'kv', ] @@ -104,11 +106,14 @@ TARGET SOURCE FSTYPE OPTIONS """ + class SwiftStorageUtilsTests(CharmTestCase): def setUp(self): super(SwiftStorageUtilsTests, self).setUp(swift_utils, TO_PATCH) self.config.side_effect = self.test_config.get + self.test_kv = TestKV() + self.kv.return_value = self.test_kv def test_ensure_swift_directories(self): with patch('os.path.isdir') as isdir: @@ -229,18 +234,6 @@ class SwiftStorageUtilsTests(CharmTestCase): self.assertTrue(_find.called) self.assertEqual(result, []) - def test_mkfs_xfs(self): - swift_utils.mkfs_xfs('/dev/sdb') - self.check_call.assert_called_with( - ['mkfs.xfs', '-i', 'size=1024', '/dev/sdb'] - ) - - def test_mkfs_xfs_force(self): - swift_utils.mkfs_xfs('/dev/sdb', force=True) - self.check_call.assert_called_with( - ['mkfs.xfs', '-f', '-i', 'size=1024', '/dev/sdb'] - ) - @patch.object(swift_utils.charmhelpers.core.fstab, "Fstab") @patch.object(swift_utils, 'is_device_in_ring') @patch.object(swift_utils, 'clean_storage') @@ -249,6 +242,7 @@ class SwiftStorageUtilsTests(CharmTestCase): def test_setup_storage_no_overwrite(self, determine, mkfs, clean, mock_is_device_in_ring, mock_Fstab): mock_is_device_in_ring.return_value = False + self.is_device_mounted.return_value = False determine.return_value = ['/dev/vdb'] swift_utils.setup_storage() self.assertFalse(clean.called) @@ -260,6 +254,8 @@ class SwiftStorageUtilsTests(CharmTestCase): perms=0o755), call('/srv/node/vdb', group='swift', owner='swift') ]) + self.assertEqual(self.test_kv.get('prepared-devices'), + ['/dev/vdb']) @patch.object(swift_utils, 'is_device_in_ring') @patch.object(swift_utils, 'clean_storage') @@ -270,6 +266,7 @@ class SwiftStorageUtilsTests(CharmTestCase): self.test_config.set('overwrite', True) mock_is_device_in_ring.return_value = False self.is_mapped_loopback_device.return_value = None + self.is_device_mounted.return_value = False determine.return_value = ['/dev/vdb'] swift_utils.setup_storage() clean.assert_called_with('/dev/vdb') @@ -288,6 +285,8 @@ class SwiftStorageUtilsTests(CharmTestCase): perms=0o755), call('/srv/node/vdb', group='swift', owner='swift') ]) + self.assertEqual(self.test_kv.get('prepared-devices'), + ['/dev/vdb']) @patch.object(swift_utils, 'is_device_in_ring') @patch.object(swift_utils, 'determine_block_devices') @@ -304,6 +303,71 @@ class SwiftStorageUtilsTests(CharmTestCase): swift_utils.setup_storage() self.assertEqual(self.check_call.call_count, 0) + @patch.object(swift_utils, "uuid") + @patch.object(swift_utils, "vaultlocker") + @patch.object(swift_utils.charmhelpers.core.fstab, "Fstab") + @patch.object(swift_utils, 'is_device_in_ring') + @patch.object(swift_utils, 'clean_storage') + @patch.object(swift_utils, 'mkfs_xfs') + @patch.object(swift_utils, 'determine_block_devices') + def test_setup_storage_encrypt(self, determine, mkfs, clean, + mock_is_device_in_ring, mock_Fstab, + mock_vaultlocker, mock_uuid): + mock_context = MagicMock() + mock_context.complete = True + mock_context.return_value = 'test_context' + mock_vaultlocker.VaultKVContext.return_value = mock_context + mock_uuid.uuid4.return_value = '7c3ff7c8-fd20-4dca-9be6-6f44f213d3fe' + mock_is_device_in_ring.return_value = False + self.is_device_mounted.return_value = False + self.is_mapped_loopback_device.return_value = None + determine.return_value = ['/dev/vdb'] + swift_utils.setup_storage(encrypt=True) + self.assertFalse(clean.called) + calls = [ + call(['vaultlocker', 'encrypt', + '--uuid', '7c3ff7c8-fd20-4dca-9be6-6f44f213d3fe', + '/dev/vdb']), + call(['chown', '-R', 'swift:swift', + '/srv/node/crypt-7c3ff7c8-fd20-4dca-9be6-6f44f213d3fe']), + call(['chmod', '-R', '0755', + '/srv/node/crypt-7c3ff7c8-fd20-4dca-9be6-6f44f213d3fe']) + ] + self.check_call.assert_has_calls(calls) + self.mkdir.assert_has_calls([ + call('/srv/node', owner='swift', group='swift', + perms=0o755), + call('/srv/node/crypt-7c3ff7c8-fd20-4dca-9be6-6f44f213d3fe', + group='swift', owner='swift') + ]) + self.assertEqual(self.test_kv.get('prepared-devices'), + ['/dev/mapper/crypt-7c3ff7c8-fd20-4dca-9be6-6f44f213d3fe']) + mock_vaultlocker.write_vaultlocker_conf.assert_called_with( + 'test_context', + priority=90 + ) + + @patch.object(swift_utils, "uuid") + @patch.object(swift_utils, "vaultlocker") + @patch.object(swift_utils.charmhelpers.core.fstab, "Fstab") + @patch.object(swift_utils, 'is_device_in_ring') + @patch.object(swift_utils, 'clean_storage') + @patch.object(swift_utils, 'mkfs_xfs') + @patch.object(swift_utils, 'determine_block_devices') + def test_setup_storage_encrypt_noready(self, determine, mkfs, clean, + mock_is_device_in_ring, mock_Fstab, + mock_vaultlocker, mock_uuid): + mock_context = MagicMock() + mock_context.complete = False + mock_context.return_value = {} + mock_vaultlocker.VaultKVContext.return_value = mock_context + swift_utils.setup_storage(encrypt=True) + mock_vaultlocker.write_vaultlocker_conf.assert_not_called() + clean.assert_not_called() + self.check_call.assert_not_called() + self.mkdir.assert_not_called() + self.assertEqual(self.test_kv.get('prepared-devices'), None) + def _fake_is_device_mounted(self, device): if device in ["/dev/sda", "/dev/vda", "/dev/cciss/c0d0"]: return True @@ -373,6 +437,7 @@ class SwiftStorageUtilsTests(CharmTestCase): server.return_value = 'swift_server_context' bind_context.return_value = 'bind_host_context' worker_context.return_value = 'worker_context' + self.vaultlocker.VaultKVContext.return_value = 'vl_context' self.get_os_codename_package.return_value = 'grizzly' configs = MagicMock() configs.register = MagicMock() @@ -386,13 +451,16 @@ class SwiftStorageUtilsTests(CharmTestCase): ['rsync_context', 'swift_context']), call('/etc/swift/account-server.conf', ['swift_context', 'bind_host_context', - 'worker_context']), + 'worker_context', + 'vl_context']), call('/etc/swift/object-server.conf', ['swift_context', 'bind_host_context', - 'worker_context']), + 'worker_context', + 'vl_context']), call('/etc/swift/container-server.conf', ['swift_context', 'bind_host_context', - 'worker_context']) + 'worker_context', + 'vl_context']) ] self.assertEqual(ex, configs.register.call_args_list) @@ -434,6 +502,7 @@ class SwiftStorageUtilsTests(CharmTestCase): mock_is_device_in_ring.return_value = False determine.return_value = ["/dev/loop0", ] self.is_mapped_loopback_device.return_value = "/srv/test.img" + self.is_device_mounted.return_value = False swift_utils.setup_storage() self.mount.assert_called_with( "/dev/loop0", @@ -476,6 +545,7 @@ class SwiftStorageUtilsTests(CharmTestCase): mock_is_device_in_ring.return_value = False determine.return_value = ["/dev/loop0", ] self.is_mapped_loopback_device.return_value = "/srv/test.img" + self.is_device_mounted.return_value = False swift_utils.setup_storage() self.mount.assert_called_with( "/srv/test.img", diff --git a/unit_tests/test_utils.py b/unit_tests/test_utils.py index 6f15bf2..ee81104 100644 --- a/unit_tests/test_utils.py +++ b/unit_tests/test_utils.py @@ -117,6 +117,23 @@ class TestRelation(object): return None +class TestKV(dict): + + def __init__(self): + super(TestKV, self).__init__() + self.flushed = False + self.data = {} + + def get(self, attribute, default=None): + return self.data.get(attribute, default) + + def set(self, attribute, value): + self.data[attribute] = value + + def flush(self): + self.flushed = True + + @contextmanager def patch_open(): '''Patch open() to allow mocking both open() itself and the file that is