diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index 8a541d4..e1d852d 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -392,6 +392,8 @@ def get_swift_codename(version): releases = UBUNTU_OPENSTACK_RELEASE release = [k for k, v in six.iteritems(releases) if codename in v] ret = subprocess.check_output(['apt-cache', 'policy', 'swift']) + if six.PY3: + ret = ret.decode('UTF-8') if codename in ret or release[0] in ret: return codename elif len(codenames) == 1: @@ -2043,14 +2045,25 @@ def token_cache_pkgs(source=None, release=None): def update_json_file(filename, items): """Updates the json `filename` with a given dict. - :param filename: json filename (i.e.: /etc/glance/policy.json) + :param filename: path to json file (e.g. /etc/glance/policy.json) :param items: dict of items to update """ + if not items: + return + with open(filename) as fd: policy = json.load(fd) + + # Compare before and after and if nothing has changed don't write the file + # since that could cause unnecessary service restarts. + before = json.dumps(policy, indent=4, sort_keys=True) policy.update(items) + after = json.dumps(policy, indent=4, sort_keys=True) + if before == after: + return + with open(filename, "w") as fd: - fd.write(json.dumps(policy, indent=4)) + fd.write(after) @cached diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py index 3923161..e13e60a 100644 --- a/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -113,7 +113,7 @@ def validator(value, valid_type, valid_range=None): assert isinstance(valid_range, list), \ "valid_range must be a list, was given {}".format(valid_range) # If we're dealing with strings - if valid_type is six.string_types: + if isinstance(value, six.string_types): assert value in valid_range, \ "{} is not in the list {}".format(value, valid_range) # Integer, float should have a min and max @@ -377,12 +377,12 @@ def get_mon_map(service): try: return json.loads(mon_status) except ValueError as v: - log("Unable to parse mon_status json: {}. Error: {}".format( - mon_status, v.message)) + log("Unable to parse mon_status json: {}. Error: {}" + .format(mon_status, str(v))) raise except CalledProcessError as e: - log("mon_status command failed with message: {}".format( - e.message)) + log("mon_status command failed with message: {}" + .format(str(e))) raise @@ -517,7 +517,8 @@ def pool_set(service, pool_name, key, value): :param value: :return: None. Can raise CalledProcessError """ - cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, value] + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, + str(value).lower()] try: check_call(cmd) except CalledProcessError: @@ -621,16 +622,24 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure' :param durability_estimator: int :return: None. Can raise CalledProcessError """ + version = ceph_version() + # Ensure this failure_domain is allowed by Ceph validator(failure_domain, six.string_types, ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row']) cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name, - 'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks), - 'ruleset_failure_domain=' + failure_domain] + 'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks) + ] if locality is not None and durability_estimator is not None: raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.") + # failure_domain changed in luminous + if version and version >= '12.0.0': + cmd.append('crush-failure-domain=' + failure_domain) + else: + cmd.append('ruleset-failure-domain=' + failure_domain) + # Add plugin specific information if locality is not None: # For local erasure codes @@ -1064,14 +1073,24 @@ class CephBrokerRq(object): self.ops = [] def add_op_request_access_to_group(self, name, namespace=None, - permission=None, key_name=None): + permission=None, key_name=None, + object_prefix_permissions=None): """ Adds the requested permissions to the current service's Ceph key, - allowing the key to access only the specified pools + allowing the key to access only the specified pools or + object prefixes. object_prefix_permissions should be a dictionary + keyed on the permission with the corresponding value being a list + of prefixes to apply that permission to. + { + 'rwx': ['prefix1', 'prefix2'], + 'class-read': ['prefix3']} """ - self.ops.append({'op': 'add-permissions-to-key', 'group': name, - 'namespace': namespace, 'name': key_name or service_name(), - 'group-permission': permission}) + self.ops.append({ + 'op': 'add-permissions-to-key', 'group': name, + 'namespace': namespace, + 'name': key_name or service_name(), + 'group-permission': permission, + 'object-prefix-permissions': object_prefix_permissions}) def add_op_create_pool(self, name, replica_count=3, pg_num=None, weight=None, group=None, namespace=None): @@ -1107,7 +1126,10 @@ class CephBrokerRq(object): def _ops_equal(self, other): if len(self.ops) == len(other.ops): for req_no in range(0, len(self.ops)): - for key in ['replicas', 'name', 'op', 'pg_num', 'weight']: + for key in [ + 'replicas', 'name', 'op', 'pg_num', 'weight', + 'group', 'group-namespace', 'group-permission', + 'object-prefix-permissions']: if self.ops[req_no].get(key) != other.ops[req_no].get(key): return False else: diff --git a/hooks/charmhelpers/contrib/storage/linux/lvm.py b/hooks/charmhelpers/contrib/storage/linux/lvm.py index 7f2a060..79a7a24 100644 --- a/hooks/charmhelpers/contrib/storage/linux/lvm.py +++ b/hooks/charmhelpers/contrib/storage/linux/lvm.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import functools from subprocess import ( CalledProcessError, check_call, @@ -101,3 +102,52 @@ def create_lvm_volume_group(volume_group, block_device): :block_device: str: Full path of PV-initialized block device. ''' check_call(['vgcreate', volume_group, block_device]) + + +def list_logical_volumes(select_criteria=None, path_mode=False): + ''' + List logical volumes + + :param select_criteria: str: Limit list to those volumes matching this + criteria (see 'lvs -S help' for more details) + :param path_mode: bool: return logical volume name in 'vg/lv' format, this + format is required for some commands like lvextend + :returns: [str]: List of logical volumes + ''' + lv_diplay_attr = 'lv_name' + if path_mode: + # Parsing output logic relies on the column order + lv_diplay_attr = 'vg_name,' + lv_diplay_attr + cmd = ['lvs', '--options', lv_diplay_attr, '--noheadings'] + if select_criteria: + cmd.extend(['--select', select_criteria]) + lvs = [] + for lv in check_output(cmd).decode('UTF-8').splitlines(): + if not lv: + continue + if path_mode: + lvs.append('/'.join(lv.strip().split())) + else: + lvs.append(lv.strip()) + return lvs + + +list_thin_logical_volume_pools = functools.partial( + list_logical_volumes, + select_criteria='lv_attr =~ ^t') + +list_thin_logical_volumes = functools.partial( + list_logical_volumes, + select_criteria='lv_attr =~ ^V') + + +def extend_logical_volume_by_device(lv_name, block_device): + ''' + Extends the size of logical volume lv_name by the amount of free space on + physical volume block_device. + + :param lv_name: str: name of logical volume to be extended (vg/lv format) + :param block_device: str: name of block_device to be allocated to lv_name + ''' + cmd = ['lvextend', lv_name, block_device] + check_call(cmd) diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py index 5cc5c86..fd14d60 100644 --- a/hooks/charmhelpers/core/host.py +++ b/hooks/charmhelpers/core/host.py @@ -549,6 +549,8 @@ def write_file(path, content, owner='root', group='root', perms=0o444): with open(path, 'wb') as target: os.fchown(target.fileno(), uid, gid) os.fchmod(target.fileno(), perms) + if six.PY3 and isinstance(content, six.string_types): + content = content.encode('UTF-8') target.write(content) return # the contents were the same, but we might still need to change the diff --git a/hooks/charmhelpers/core/host_factory/ubuntu.py b/hooks/charmhelpers/core/host_factory/ubuntu.py index d8dc378..99451b5 100644 --- a/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -20,6 +20,7 @@ UBUNTU_RELEASES = ( 'yakkety', 'zesty', 'artful', + 'bionic', ) diff --git a/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/tests/charmhelpers/contrib/openstack/amulet/deployment.py index e37f283..5afbbd8 100644 --- a/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -13,6 +13,7 @@ # limitations under the License. import logging +import os import re import sys import six @@ -185,7 +186,7 @@ class OpenStackAmuletDeployment(AmuletDeployment): self.d.configure(service, config) def _auto_wait_for_status(self, message=None, exclude_services=None, - include_only=None, timeout=1800): + include_only=None, timeout=None): """Wait for all units to have a specific extended status, except for any defined as excluded. Unless specified via message, any status containing any case of 'ready' will be considered a match. @@ -215,7 +216,10 @@ class OpenStackAmuletDeployment(AmuletDeployment): :param timeout: Maximum time in seconds to wait for status match :returns: None. Raises if timeout is hit. """ - self.log.info('Waiting for extended status on units...') + if not timeout: + timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 1800)) + self.log.info('Waiting for extended status on units for {}s...' + ''.format(timeout)) all_services = self.d.services.keys() @@ -252,9 +256,9 @@ class OpenStackAmuletDeployment(AmuletDeployment): service_messages = {service: message for service in services} # Check for idleness - self.d.sentry.wait() + self.d.sentry.wait(timeout=timeout) # Check for error states and bail early - self.d.sentry.wait_for_status(self.d.juju_env, services) + self.d.sentry.wait_for_status(self.d.juju_env, services, timeout=timeout) # Check for ready messages self.d.sentry.wait_for_messages(service_messages, timeout=timeout) diff --git a/tests/charmhelpers/contrib/openstack/amulet/utils.py b/tests/charmhelpers/contrib/openstack/amulet/utils.py index b71b2b1..87f364d 100644 --- a/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -858,9 +858,12 @@ class OpenStackAmuletUtils(AmuletUtils): :returns: List of pool name, object count, kb disk space used """ df = self.get_ceph_df(sentry_unit) - pool_name = df['pools'][pool_id]['name'] - obj_count = df['pools'][pool_id]['stats']['objects'] - kb_used = df['pools'][pool_id]['stats']['kb_used'] + for pool in df['pools']: + if pool['id'] == pool_id: + pool_name = pool['name'] + obj_count = pool['stats']['objects'] + kb_used = pool['stats']['kb_used'] + self.log.debug('Ceph {} pool (ID {}): {} objects, ' '{} kb used'.format(pool_name, pool_id, obj_count, kb_used)) diff --git a/tests/charmhelpers/core/host.py b/tests/charmhelpers/core/host.py index 5cc5c86..fd14d60 100644 --- a/tests/charmhelpers/core/host.py +++ b/tests/charmhelpers/core/host.py @@ -549,6 +549,8 @@ def write_file(path, content, owner='root', group='root', perms=0o444): with open(path, 'wb') as target: os.fchown(target.fileno(), uid, gid) os.fchmod(target.fileno(), perms) + if six.PY3 and isinstance(content, six.string_types): + content = content.encode('UTF-8') target.write(content) return # the contents were the same, but we might still need to change the diff --git a/tests/charmhelpers/core/host_factory/ubuntu.py b/tests/charmhelpers/core/host_factory/ubuntu.py index d8dc378..99451b5 100644 --- a/tests/charmhelpers/core/host_factory/ubuntu.py +++ b/tests/charmhelpers/core/host_factory/ubuntu.py @@ -20,6 +20,7 @@ UBUNTU_RELEASES = ( 'yakkety', 'zesty', 'artful', + 'bionic', )