Sync charm-helpers for Stein release

As a part of the Stein release, we need to ensure
that charmhelpers is up to date.

Change-Id: Ic037cadc6eab7ba6fa6379f3b5cc822297cd01a4
This commit is contained in:
Chris MacNaughton 2019-04-04 10:11:48 +02:00
parent 8e4fe57295
commit fe3796593c
12 changed files with 254 additions and 125 deletions

View File

@ -19,9 +19,16 @@ from charmhelpers.core import unitdata
@cmdline.subcommand_builder('unitdata', description="Store and retrieve data")
def unitdata_cmd(subparser):
nested = subparser.add_subparsers()
get_cmd = nested.add_parser('get', help='Retrieve data')
get_cmd.add_argument('key', help='Key to retrieve the value of')
get_cmd.set_defaults(action='get', value=None)
getrange_cmd = nested.add_parser('getrange', help='Retrieve multiple data')
getrange_cmd.add_argument('key', metavar='prefix',
help='Prefix of the keys to retrieve')
getrange_cmd.set_defaults(action='getrange', value=None)
set_cmd = nested.add_parser('set', help='Store data')
set_cmd.add_argument('key', help='Key to set')
set_cmd.add_argument('value', help='Value to store')
@ -30,6 +37,8 @@ def unitdata_cmd(subparser):
def _unitdata_cmd(action, key, value):
if action == 'get':
return unitdata.kv().get(key)
elif action == 'getrange':
return unitdata.kv().getrange(key)
elif action == 'set':
unitdata.kv().set(key, value)
unitdata.kv().flush()

View File

@ -30,14 +30,20 @@ from charmhelpers.core.hookenv import (
cached,
)
"""
The Security Guide suggests a specific list of files inside the
config directory for the service having 640 specifically, but
by ensuring the containing directory is 750, only the owner can
write, and only the group can read files within the directory.
By restricting access to the containing directory, we can more
effectively ensure that there is no accidental leakage if a new
file is added to the service without being added to the security
guide, and to this check.
"""
FILE_ASSERTIONS = {
'barbican': {
# From security guide
'/etc/barbican/barbican.conf': {'group': 'barbican', 'mode': '640'},
'/etc/barbican/barbican-api-paste.ini':
{'group': 'barbican', 'mode': '640'},
'/etc/barbican/policy.json': {'group': 'barbican', 'mode': '640'},
'/etc/barbican': {'group': 'barbican', 'mode': '750'},
},
'ceph-mon': {
'/var/lib/charm/ceph-mon/ceph.conf':
@ -60,82 +66,29 @@ FILE_ASSERTIONS = {
{'owner': 'ceph', 'group': 'ceph', 'mode': '755'},
},
'cinder': {
# From security guide
'/etc/cinder/cinder.conf': {'group': 'cinder', 'mode': '640'},
'/etc/cinder/api-paste.conf': {'group': 'cinder', 'mode': '640'},
'/etc/cinder/rootwrap.conf': {'group': 'cinder', 'mode': '640'},
'/etc/cinder': {'group': 'cinder', 'mode': '750'},
},
'glance': {
# From security guide
'/etc/glance/glance-api-paste.ini': {'group': 'glance', 'mode': '640'},
'/etc/glance/glance-api.conf': {'group': 'glance', 'mode': '640'},
'/etc/glance/glance-cache.conf': {'group': 'glance', 'mode': '640'},
'/etc/glance/glance-manage.conf': {'group': 'glance', 'mode': '640'},
'/etc/glance/glance-registry-paste.ini':
{'group': 'glance', 'mode': '640'},
'/etc/glance/glance-registry.conf': {'group': 'glance', 'mode': '640'},
'/etc/glance/glance-scrubber.conf': {'group': 'glance', 'mode': '640'},
'/etc/glance/glance-swift-store.conf':
{'group': 'glance', 'mode': '640'},
'/etc/glance/policy.json': {'group': 'glance', 'mode': '640'},
'/etc/glance/schema-image.json': {'group': 'glance', 'mode': '640'},
'/etc/glance/schema.json': {'group': 'glance', 'mode': '640'},
'/etc/glance': {'group': 'glance', 'mode': '750'},
},
'keystone': {
# From security guide
'/etc/keystone/keystone.conf': {'group': 'keystone', 'mode': '640'},
'/etc/keystone/keystone-paste.ini':
{'group': 'keystone', 'mode': '640'},
'/etc/keystone/policy.json': {'group': 'keystone', 'mode': '640'},
'/etc/keystone/logging.conf': {'group': 'keystone', 'mode': '640'},
'/etc/keystone/ssl/certs/signing_cert.pem':
{'group': 'keystone', 'mode': '640'},
'/etc/keystone/ssl/private/signing_key.pem':
{'group': 'keystone', 'mode': '640'},
'/etc/keystone/ssl/certs/ca.pem': {'group': 'keystone', 'mode': '640'},
'/etc/keystone':
{'owner': 'keystone', 'group': 'keystone', 'mode': '750'},
},
'manilla': {
# From security guide
'/etc/manila/manila.conf': {'group': 'manilla', 'mode': '640'},
'/etc/manila/api-paste.ini': {'group': 'manilla', 'mode': '640'},
'/etc/manila/policy.json': {'group': 'manilla', 'mode': '640'},
'/etc/manila/rootwrap.conf': {'group': 'manilla', 'mode': '640'},
'/etc/manila': {'group': 'manilla', 'mode': '750'},
},
'neutron-gateway': {
'/etc/neutron/neutron.conf': {'group': 'neutron', 'mode': '640'},
'/etc/neutron/rootwrap.conf': {'mode': '640'},
'/etc/neutron/rootwrap.d': {'mode': '755'},
'/etc/neutron/*': {'group': 'neutron', 'mode': '644'},
'/etc/neutron': {'group': 'neutron', 'mode': '750'},
},
'neutron-api': {
# From security guide
'/etc/neutron/neutron.conf': {'group': 'neutron', 'mode': '640'},
'/etc/nova/api-paste.ini': {'group': 'neutron', 'mode': '640'},
'/etc/neutron/rootwrap.conf': {'group': 'neutron', 'mode': '640'},
# Additional validations
'/etc/neutron/rootwrap.d': {'mode': '755'},
'/etc/neutron/neutron_lbaas.conf': {'mode': '644'},
'/etc/neutron/neutron_vpnaas.conf': {'mode': '644'},
'/etc/neutron/*': {'group': 'neutron', 'mode': '644'},
'/etc/neutron/': {'group': 'neutron', 'mode': '750'},
},
'nova-cloud-controller': {
# From security guide
'/etc/nova/api-paste.ini': {'group': 'nova', 'mode': '640'},
'/etc/nova/nova.conf': {'group': 'nova', 'mode': '750'},
'/etc/nova/*': {'group': 'nova', 'mode': '640'},
# Additional validations
'/etc/nova/logging.conf': {'group': 'nova', 'mode': '640'},
'/etc/nova': {'group': 'nova', 'mode': '750'},
},
'nova-compute': {
# From security guide
'/etc/nova/nova.conf': {'group': 'nova', 'mode': '640'},
'/etc/nova/api-paste.ini': {'group': 'nova', 'mode': '640'},
'/etc/nova/rootwrap.conf': {'group': 'nova', 'mode': '640'},
# Additional Validations
'/etc/nova/nova-compute.conf': {'group': 'nova', 'mode': '640'},
'/etc/nova/logging.conf': {'group': 'nova', 'mode': '640'},
'/etc/nova/nm.conf': {'mode': '644'},
'/etc/nova/*': {'group': 'nova', 'mode': '640'},
'/etc/nova/': {'group': 'nova', 'mode': '750'},
},
'openstack-dashboard': {
# From security guide
@ -178,7 +131,7 @@ def _config_ini(path):
return dict(conf)
def _validate_file_ownership(owner, group, file_name):
def _validate_file_ownership(owner, group, file_name, optional=False):
"""
Validate that a specified file is owned by `owner:group`.
@ -188,12 +141,16 @@ def _validate_file_ownership(owner, group, file_name):
:type group: str
:param file_name: Path to the file to verify
:type file_name: str
:param optional: Is this file optional,
ie: Should this test fail when it's missing
:type optional: bool
"""
try:
ownership = _stat(file_name)
except subprocess.CalledProcessError as e:
print("Error reading file: {}".format(e))
assert False, "Specified file does not exist: {}".format(file_name)
if not optional:
assert False, "Specified file does not exist: {}".format(file_name)
assert owner == ownership.owner, \
"{} has an incorrect owner: {} should be {}".format(
file_name, ownership.owner, owner)
@ -203,7 +160,7 @@ def _validate_file_ownership(owner, group, file_name):
print("Validate ownership of {}: PASS".format(file_name))
def _validate_file_mode(mode, file_name):
def _validate_file_mode(mode, file_name, optional=False):
"""
Validate that a specified file has the specified permissions.
@ -211,12 +168,16 @@ def _validate_file_mode(mode, file_name):
:type owner: str
:param file_name: Path to the file to verify
:type file_name: str
:param optional: Is this file optional,
ie: Should this test fail when it's missing
:type optional: bool
"""
try:
ownership = _stat(file_name)
except subprocess.CalledProcessError as e:
print("Error reading file: {}".format(e))
assert False, "Specified file does not exist: {}".format(file_name)
if not optional:
assert False, "Specified file does not exist: {}".format(file_name)
assert mode == ownership.mode, \
"{} has an incorrect mode: {} should be {}".format(
file_name, ownership.mode, mode)
@ -243,14 +204,15 @@ def validate_file_ownership(config):
"Invalid ownership configuration: {}".format(key))
owner = options.get('owner', config.get('owner', 'root'))
group = options.get('group', config.get('group', 'root'))
optional = options.get('optional', config.get('optional', 'False'))
if '*' in file_name:
for file in glob.glob(file_name):
if file not in files.keys():
if os.path.isfile(file):
_validate_file_ownership(owner, group, file)
_validate_file_ownership(owner, group, file, optional)
else:
if os.path.isfile(file_name):
_validate_file_ownership(owner, group, file_name)
_validate_file_ownership(owner, group, file_name, optional)
@audit(is_audit_type(AuditType.OpenStackSecurityGuide),
@ -264,14 +226,15 @@ def validate_file_permissions(config):
raise RuntimeError(
"Invalid ownership configuration: {}".format(key))
mode = options.get('mode', config.get('permissions', '600'))
optional = options.get('optional', config.get('optional', 'False'))
if '*' in file_name:
for file in glob.glob(file_name):
if file not in files.keys():
if os.path.isfile(file):
_validate_file_mode(mode, file)
_validate_file_mode(mode, file, optional)
else:
if os.path.isfile(file_name):
_validate_file_mode(mode, file_name)
_validate_file_mode(mode, file_name, optional)
@audit(is_audit_type(AuditType.OpenStackSecurityGuide))

View File

@ -180,13 +180,17 @@ def create_ip_cert_links(ssl_dir, custom_hostname_link=None):
os.symlink(hostname_key, custom_key)
def install_certs(ssl_dir, certs, chain=None):
def install_certs(ssl_dir, certs, chain=None, user='root', group='root'):
"""Install the certs passed into the ssl dir and append the chain if
provided.
:param ssl_dir: str Directory to create symlinks in
:param certs: {} {'cn': {'cert': 'CERT', 'key': 'KEY'}}
:param chain: str Chain to be appended to certs
:param user: (Optional) Owner of certificate files. Defaults to 'root'
:type user: str
:param group: (Optional) Group of certificate files. Defaults to 'root'
:type group: str
"""
for cn, bundle in certs.items():
cert_filename = 'cert_{}'.format(cn)
@ -197,21 +201,25 @@ def install_certs(ssl_dir, certs, chain=None):
# trust certs signed by an intermediate in the chain
cert_data = cert_data + os.linesep + chain
write_file(
path=os.path.join(ssl_dir, cert_filename),
path=os.path.join(ssl_dir, cert_filename), owner=user, group=group,
content=cert_data, perms=0o640)
write_file(
path=os.path.join(ssl_dir, key_filename),
path=os.path.join(ssl_dir, key_filename), owner=user, group=group,
content=bundle['key'], perms=0o640)
def process_certificates(service_name, relation_id, unit,
custom_hostname_link=None):
custom_hostname_link=None, user='root', group='root'):
"""Process the certificates supplied down the relation
:param service_name: str Name of service the certifcates are for.
:param relation_id: str Relation id providing the certs
:param unit: str Unit providing the certs
:param custom_hostname_link: str Name of custom link to create
:param user: (Optional) Owner of certificate files. Defaults to 'root'
:type user: str
:param group: (Optional) Group of certificate files. Defaults to 'root'
:type group: str
"""
data = relation_get(rid=relation_id, unit=unit)
ssl_dir = os.path.join('/etc/apache2/ssl/', service_name)
@ -223,7 +231,7 @@ def process_certificates(service_name, relation_id, unit,
if certs:
certs = json.loads(certs)
install_ca_cert(ca.encode())
install_certs(ssl_dir, certs, chain)
install_certs(ssl_dir, certs, chain, user=user, group=group)
create_ip_cert_links(
ssl_dir,
custom_hostname_link=custom_hostname_link)

View File

@ -792,6 +792,7 @@ class ApacheSSLContext(OSContextGenerator):
# and service namespace accordingly.
external_ports = []
service_namespace = None
user = group = 'root'
def enable_modules(self):
cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http', 'headers']
@ -810,9 +811,11 @@ class ApacheSSLContext(OSContextGenerator):
key_filename = 'key'
write_file(path=os.path.join(ssl_dir, cert_filename),
content=b64decode(cert), perms=0o640)
content=b64decode(cert), owner=self.user,
group=self.group, perms=0o640)
write_file(path=os.path.join(ssl_dir, key_filename),
content=b64decode(key), perms=0o640)
content=b64decode(key), owner=self.user,
group=self.group, perms=0o640)
def configure_ca(self):
ca_cert = get_ca_cert()
@ -1932,3 +1935,30 @@ class VersionsContext(OSContextGenerator):
return {
'openstack_release': ostack,
'operating_system_release': osystem}
class LogrotateContext(OSContextGenerator):
"""Common context generator for logrotate."""
def __init__(self, location, interval, count):
"""
:param location: Absolute path for the logrotate config file
:type location: str
:param interval: The interval for the rotations. Valid values are
'daily', 'weekly', 'monthly', 'yearly'
:type interval: str
:param count: The logrotate count option configures the 'count' times
the log files are being rotated before being
:type count: int
"""
self.location = location
self.interval = interval
self.count = 'rotate {}'.format(count)
def __call__(self):
ctxt = {
'logrotate_logs_location': self.location,
'logrotate_interval': self.interval,
'logrotate_count': self.count,
}
return ctxt

View File

@ -159,7 +159,7 @@ def resolve_address(endpoint_type=PUBLIC, override=True):
if is_address_in_network(bound_cidr, vip):
resolved_address = vip
break
except NotImplementedError:
except (NotImplementedError, NoNetworkBinding):
# If no net-splits configured and no support for extra
# bindings/network spaces so we expect a single vip
resolved_address = vips[0]

View File

@ -194,7 +194,7 @@ SWIFT_CODENAMES = OrderedDict([
('rocky',
['2.18.0', '2.19.0']),
('stein',
['2.19.0']),
['2.20.0']),
])
# >= Liberty version->codename mapping
@ -656,7 +656,7 @@ def openstack_upgrade_available(package):
else:
avail_vers = get_os_version_install_source(src)
apt.init()
return apt.version_compare(avail_vers, cur_vers) == 1
return apt.version_compare(avail_vers, cur_vers) >= 1
def ensure_block_device(block_device):

View File

@ -186,7 +186,7 @@ class Pool(object):
elif mode == 'writeback':
pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier',
'cache-mode', cache_pool, 'forward']
if cmp_pkgrevno('ceph', '10.1') >= 0:
if cmp_pkgrevno('ceph-common', '10.1') >= 0:
# Jewel added a mandatory flag
pool_forward_cmd.append('--yes-i-really-mean-it')
@ -582,21 +582,24 @@ def remove_pool_snapshot(service, pool_name, snapshot_name):
raise
# max_bytes should be an int or long
def set_pool_quota(service, pool_name, max_bytes):
def set_pool_quota(service, pool_name, max_bytes=None, max_objects=None):
"""
:param service: six.string_types. The Ceph user name to run the command under
:param pool_name: six.string_types
:param max_bytes: int or long
:return: None. Can raise CalledProcessError
:param service: The Ceph user name to run the command under
:type service: str
:param pool_name: Name of pool
:type pool_name: str
:param max_bytes: Maximum bytes quota to apply
:type max_bytes: int
:param max_objects: Maximum objects quota to apply
:type max_objects: int
:raises: subprocess.CalledProcessError
"""
# Set a byte quota on a RADOS pool in ceph.
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name,
'max_bytes', str(max_bytes)]
try:
check_call(cmd)
except CalledProcessError:
raise
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name]
if max_bytes:
cmd = cmd + ['max_bytes', str(max_bytes)]
if max_objects:
cmd = cmd + ['max_objects', str(max_objects)]
check_call(cmd)
def remove_pool_quota(service, pool_name):
@ -661,7 +664,7 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure'
if locality is not None and durability_estimator is not None:
raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.")
luminous_or_later = cmp_pkgrevno('ceph', '12.0.0') >= 0
luminous_or_later = cmp_pkgrevno('ceph-common', '12.0.0') >= 0
# failure_domain changed in luminous
if luminous_or_later:
cmd.append('crush-failure-domain=' + failure_domain)
@ -766,7 +769,7 @@ def get_osds(service, device_class=None):
:param device_class: Class of storage device for OSD's
:type device_class: str
"""
luminous_or_later = cmp_pkgrevno('ceph', '12.0.0') >= 0
luminous_or_later = cmp_pkgrevno('ceph-common', '12.0.0') >= 0
if luminous_or_later and device_class:
out = check_output(['ceph', '--id', service,
'osd', 'crush', 'class',
@ -832,7 +835,7 @@ def set_app_name_for_pool(client, pool, name):
:raises: CalledProcessError if ceph call fails
"""
if cmp_pkgrevno('ceph', '12.0.0') >= 0:
if cmp_pkgrevno('ceph-common', '12.0.0') >= 0:
cmd = ['ceph', '--id', client, 'osd', 'pool',
'application', 'enable', pool, name]
check_call(cmd)
@ -1153,19 +1156,46 @@ class CephBrokerRq(object):
def add_op_create_pool(self, name, replica_count=3, pg_num=None,
weight=None, group=None, namespace=None,
app_name=None):
"""Adds an operation to create a pool.
app_name=None, max_bytes=None, max_objects=None):
"""DEPRECATED: Use ``add_op_create_replicated_pool()`` or
``add_op_create_erasure_pool()`` instead.
"""
return self.add_op_create_replicated_pool(
name, replica_count=replica_count, pg_num=pg_num, weight=weight,
group=group, namespace=namespace, app_name=app_name,
max_bytes=max_bytes, max_objects=max_objects)
@param pg_num setting: optional setting. If not provided, this value
will be calculated by the broker based on how many OSDs are in the
cluster at the time of creation. Note that, if provided, this value
will be capped at the current available maximum.
@param weight: the percentage of data the pool makes up
def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None,
weight=None, group=None, namespace=None,
app_name=None, max_bytes=None,
max_objects=None):
"""Adds an operation to create a replicated pool.
:param name: Name of pool to create
:type name: str
:param replica_count: Number of copies Ceph should keep of your data.
:type replica_count: int
:param pg_num: Request specific number of Placement Groups to create
for pool.
:type pg_num: int
:param weight: The percentage of data that is expected to be contained
in the pool from the total available space on the OSDs.
Used to calculate number of Placement Groups to create
for pool.
:type weight: float
:param group: Group to add pool to
:type group: str
:param namespace: Group namespace
:type namespace: str
:param app_name: (Optional) Tag pool with application name. Note that
there is certain protocols emerging upstream with
regard to meaningful application names to use.
Examples are ``rbd`` and ``rgw``.
:type app_name: str
:param max_bytes: Maximum bytes quota to apply
:type max_bytes: int
:param max_objects: Maximum objects quota to apply
:type max_objects: int
"""
if pg_num and weight:
raise ValueError('pg_num and weight are mutually exclusive')
@ -1173,7 +1203,41 @@ class CephBrokerRq(object):
self.ops.append({'op': 'create-pool', 'name': name,
'replicas': replica_count, 'pg_num': pg_num,
'weight': weight, 'group': group,
'group-namespace': namespace, 'app-name': app_name})
'group-namespace': namespace, 'app-name': app_name,
'max-bytes': max_bytes, 'max-objects': max_objects})
def add_op_create_erasure_pool(self, name, erasure_profile=None,
weight=None, group=None, app_name=None,
max_bytes=None, max_objects=None):
"""Adds an operation to create a erasure coded pool.
:param name: Name of pool to create
:type name: str
:param erasure_profile: Name of erasure code profile to use. If not
set the ceph-mon unit handling the broker
request will set its default value.
:type erasure_profile: str
:param weight: The percentage of data that is expected to be contained
in the pool from the total available space on the OSDs.
:type weight: float
:param group: Group to add pool to
:type group: str
:param app_name: (Optional) Tag pool with application name. Note that
there is certain protocols emerging upstream with
regard to meaningful application names to use.
Examples are ``rbd`` and ``rgw``.
:type app_name: str
:param max_bytes: Maximum bytes quota to apply
:type max_bytes: int
:param max_objects: Maximum objects quota to apply
:type max_objects: int
"""
self.ops.append({'op': 'create-pool', 'name': name,
'pool-type': 'erasure',
'erasure-profile': erasure_profile,
'weight': weight,
'group': group, 'app-name': app_name,
'max-bytes': max_bytes, 'max-objects': max_objects})
def set_ops(self, ops):
"""Set request ops to provided value.

View File

@ -17,12 +17,53 @@ import re
from stat import S_ISBLK
from subprocess import (
CalledProcessError,
check_call,
check_output,
call
)
def _luks_uuid(dev):
"""
Check to see if dev is a LUKS encrypted volume, returning the UUID
of volume if it is.
:param: dev: path to block device to check.
:returns: str. UUID of LUKS device or None if not a LUKS device
"""
try:
cmd = ['cryptsetup', 'luksUUID', dev]
return check_output(cmd).decode('UTF-8').strip()
except CalledProcessError:
return None
def is_luks_device(dev):
"""
Determine if dev is a LUKS-formatted block device.
:param: dev: A full path to a block device to check for LUKS header
presence
:returns: boolean: indicates whether a device is used based on LUKS header.
"""
return True if _luks_uuid(dev) else False
def is_mapped_luks_device(dev):
"""
Determine if dev is a mapped LUKS device
:param: dev: A full path to a block device to be checked
:returns: boolean: indicates whether a device is mapped
"""
_, dirs, _ = next(os.walk(
'/sys/class/block/{}/holders/'
.format(os.path.basename(os.path.realpath(dev))))
)
is_held = len(dirs) > 0
return is_held and is_luks_device(dev)
def is_block_device(path):
'''
Confirm device at path is a valid block device node.

View File

@ -47,6 +47,7 @@ if __platform__ == "ubuntu":
cmp_pkgrevno,
CompareHostReleases,
get_distrib_codename,
arch
) # flake8: noqa -- ignore F401 for this import
elif __platform__ == "centos":
from charmhelpers.core.host_factory.centos import ( # NOQA:F401

View File

@ -1,5 +1,6 @@
import subprocess
from charmhelpers.core.hookenv import cached
from charmhelpers.core.strutils import BasicStringComparator
@ -97,3 +98,16 @@ def cmp_pkgrevno(package, revno, pkgcache=None):
pkgcache = apt_cache()
pkg = pkgcache[package]
return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
@cached
def arch():
"""Return the package architecture as a string.
:returns: the architecture
:rtype: str
:raises: subprocess.CalledProcessError if dpkg command fails
"""
return subprocess.check_output(
['dpkg', '--print-architecture']
).rstrip().decode('UTF-8')

View File

@ -28,7 +28,7 @@ from charmhelpers.core.hookenv import (
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
def create(sysctl_dict, sysctl_file):
def create(sysctl_dict, sysctl_file, ignore=False):
"""Creates a sysctl.conf file from a YAML associative array
:param sysctl_dict: a dict or YAML-formatted string of sysctl
@ -36,6 +36,8 @@ def create(sysctl_dict, sysctl_file):
:type sysctl_dict: str
:param sysctl_file: path to the sysctl file to be saved
:type sysctl_file: str or unicode
:param ignore: If True, ignore "unknown variable" errors.
:type ignore: bool
:returns: None
"""
if type(sysctl_dict) is not dict:
@ -52,7 +54,12 @@ def create(sysctl_dict, sysctl_file):
for key, value in sysctl_dict_parsed.items():
fd.write("{}={}\n".format(key, value))
log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed),
log("Updating sysctl_file: {} values: {}".format(sysctl_file,
sysctl_dict_parsed),
level=DEBUG)
check_call(["sysctl", "-p", sysctl_file])
call = ["sysctl", "-p", sysctl_file]
if ignore:
call.append("-e")
check_call(call)

View File

@ -20,10 +20,8 @@ import six
import time
import subprocess
from charmhelpers.core.host import (
get_distrib_codename,
CompareHostReleases,
)
from charmhelpers.core.host import get_distrib_codename
from charmhelpers.core.hookenv import (
log,
DEBUG,
@ -362,14 +360,8 @@ def _get_keyid_by_gpg_key(key_material):
:returns: A GPG key fingerprint
:rtype: str
"""
# trusty, xenial and bionic handling differs due to gpg 1.x to 2.x change
release = get_distrib_codename()
is_gpgv2_distro = CompareHostReleases(release) >= "bionic"
if is_gpgv2_distro:
# --import is mandatory, otherwise fingerprint is not printed
cmd = 'gpg --with-colons --import-options show-only --import --dry-run'
else:
cmd = 'gpg --with-colons --with-fingerprint'
# Use the same gpg command for both Xenial and Bionic
cmd = 'gpg --with-colons --with-fingerprint'
ps = subprocess.Popen(cmd.split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,