Sync charm-helpers for Stein release

As a part of the Stein release, we need to ensure
that charmhelpers is up to date.

Change-Id: I2dc2febdaa4a6c4a5b815f95b772cf4c0dda2606
This commit is contained in:
Chris MacNaughton 2019-04-04 10:17:53 +02:00
parent cd505a45e8
commit e0e59083e2
10 changed files with 429 additions and 102 deletions

View File

@ -19,9 +19,16 @@ from charmhelpers.core import unitdata
@cmdline.subcommand_builder('unitdata', description="Store and retrieve data")
def unitdata_cmd(subparser):
nested = subparser.add_subparsers()
get_cmd = nested.add_parser('get', help='Retrieve data')
get_cmd.add_argument('key', help='Key to retrieve the value of')
get_cmd.set_defaults(action='get', value=None)
getrange_cmd = nested.add_parser('getrange', help='Retrieve multiple data')
getrange_cmd.add_argument('key', metavar='prefix',
help='Prefix of the keys to retrieve')
getrange_cmd.set_defaults(action='getrange', value=None)
set_cmd = nested.add_parser('set', help='Store data')
set_cmd.add_argument('key', help='Key to set')
set_cmd.add_argument('value', help='Value to store')
@ -30,6 +37,8 @@ def unitdata_cmd(subparser):
def _unitdata_cmd(action, key, value):
if action == 'get':
return unitdata.kv().get(key)
elif action == 'getrange':
return unitdata.kv().getrange(key)
elif action == 'set':
unitdata.kv().set(key, value)
unitdata.kv().flush()

View File

@ -13,6 +13,7 @@
# limitations under the License.
from charmhelpers.core.hookenv import (
NoNetworkBinding,
config,
unit_get,
service_name,
@ -158,7 +159,7 @@ def resolve_address(endpoint_type=PUBLIC, override=True):
if is_address_in_network(bound_cidr, vip):
resolved_address = vip
break
except NotImplementedError:
except (NotImplementedError, NoNetworkBinding):
# If no net-splits configured and no support for extra
# bindings/network spaces so we expect a single vip
resolved_address = vips[0]
@ -175,7 +176,7 @@ def resolve_address(endpoint_type=PUBLIC, override=True):
# configuration is not in use
try:
resolved_address = network_get_primary_address(binding)
except NotImplementedError:
except (NotImplementedError, NoNetworkBinding):
resolved_address = fallback_addr
if resolved_address is None:

View File

@ -194,7 +194,7 @@ SWIFT_CODENAMES = OrderedDict([
('rocky',
['2.18.0', '2.19.0']),
('stein',
['2.19.0']),
['2.20.0']),
])
# >= Liberty version->codename mapping
@ -656,7 +656,7 @@ def openstack_upgrade_available(package):
else:
avail_vers = get_os_version_install_source(src)
apt.init()
return apt.version_compare(avail_vers, cur_vers) == 1
return apt.version_compare(avail_vers, cur_vers) >= 1
def ensure_block_device(block_device):

View File

@ -59,6 +59,7 @@ from charmhelpers.core.host import (
service_stop,
service_running,
umount,
cmp_pkgrevno,
)
from charmhelpers.fetch import (
apt_install,
@ -178,7 +179,6 @@ class Pool(object):
"""
# read-only is easy, writeback is much harder
mode = get_cache_mode(self.service, cache_pool)
version = ceph_version()
if mode == 'readonly':
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none'])
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
@ -186,7 +186,7 @@ class Pool(object):
elif mode == 'writeback':
pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier',
'cache-mode', cache_pool, 'forward']
if version >= '10.1':
if cmp_pkgrevno('ceph-common', '10.1') >= 0:
# Jewel added a mandatory flag
pool_forward_cmd.append('--yes-i-really-mean-it')
@ -196,7 +196,8 @@ class Pool(object):
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name])
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT):
def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT,
device_class=None):
"""Return the number of placement groups to use when creating the pool.
Returns the number of placement groups which should be specified when
@ -229,6 +230,9 @@ class Pool(object):
increased. NOTE: the default is primarily to handle the scenario
where related charms requiring pools has not been upgraded to
include an update to indicate their relative usage of the pools.
:param device_class: str. class of storage to use for basis of pgs
calculation; ceph supports nvme, ssd and hdd by default based
on presence of devices of each type in the deployment.
:return: int. The number of pgs to use.
"""
@ -243,17 +247,20 @@ class Pool(object):
# If the expected-osd-count is specified, then use the max between
# the expected-osd-count and the actual osd_count
osd_list = get_osds(self.service)
osd_list = get_osds(self.service, device_class)
expected = config('expected-osd-count') or 0
if osd_list:
osd_count = max(expected, len(osd_list))
if device_class:
osd_count = len(osd_list)
else:
osd_count = max(expected, len(osd_list))
# Log a message to provide some insight if the calculations claim
# to be off because someone is setting the expected count and
# there are more OSDs in reality. Try to make a proper guess
# based upon the cluster itself.
if expected and osd_count != expected:
if not device_class and expected and osd_count != expected:
log("Found more OSDs than provided expected count. "
"Using the actual count instead", INFO)
elif expected:
@ -575,21 +582,24 @@ def remove_pool_snapshot(service, pool_name, snapshot_name):
raise
# max_bytes should be an int or long
def set_pool_quota(service, pool_name, max_bytes):
def set_pool_quota(service, pool_name, max_bytes=None, max_objects=None):
"""
:param service: six.string_types. The Ceph user name to run the command under
:param pool_name: six.string_types
:param max_bytes: int or long
:return: None. Can raise CalledProcessError
:param service: The Ceph user name to run the command under
:type service: str
:param pool_name: Name of pool
:type pool_name: str
:param max_bytes: Maximum bytes quota to apply
:type max_bytes: int
:param max_objects: Maximum objects quota to apply
:type max_objects: int
:raises: subprocess.CalledProcessError
"""
# Set a byte quota on a RADOS pool in ceph.
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name,
'max_bytes', str(max_bytes)]
try:
check_call(cmd)
except CalledProcessError:
raise
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name]
if max_bytes:
cmd = cmd + ['max_bytes', str(max_bytes)]
if max_objects:
cmd = cmd + ['max_objects', str(max_objects)]
check_call(cmd)
def remove_pool_quota(service, pool_name):
@ -626,7 +636,8 @@ def remove_erasure_profile(service, profile_name):
def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure',
failure_domain='host',
data_chunks=2, coding_chunks=1,
locality=None, durability_estimator=None):
locality=None, durability_estimator=None,
device_class=None):
"""
Create a new erasure code profile if one does not already exist for it. Updates
the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/
@ -640,10 +651,9 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure'
:param coding_chunks: int
:param locality: int
:param durability_estimator: int
:param device_class: six.string_types
:return: None. Can raise CalledProcessError
"""
version = ceph_version()
# Ensure this failure_domain is allowed by Ceph
validator(failure_domain, six.string_types,
['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row'])
@ -654,12 +664,20 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure'
if locality is not None and durability_estimator is not None:
raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.")
luminous_or_later = cmp_pkgrevno('ceph-common', '12.0.0') >= 0
# failure_domain changed in luminous
if version and version >= '12.0.0':
if luminous_or_later:
cmd.append('crush-failure-domain=' + failure_domain)
else:
cmd.append('ruleset-failure-domain=' + failure_domain)
# device class new in luminous
if luminous_or_later and device_class:
cmd.append('crush-device-class={}'.format(device_class))
else:
log('Skipping device class configuration (ceph < 12.0.0)',
level=DEBUG)
# Add plugin specific information
if locality is not None:
# For local erasure codes
@ -744,20 +762,26 @@ def pool_exists(service, name):
return name in out.split()
def get_osds(service):
def get_osds(service, device_class=None):
"""Return a list of all Ceph Object Storage Daemons currently in the
cluster.
cluster (optionally filtered by storage device class).
:param device_class: Class of storage device for OSD's
:type device_class: str
"""
version = ceph_version()
if version and version >= '0.56':
luminous_or_later = cmp_pkgrevno('ceph-common', '12.0.0') >= 0
if luminous_or_later and device_class:
out = check_output(['ceph', '--id', service,
'osd', 'crush', 'class',
'ls-osd', device_class,
'--format=json'])
else:
out = check_output(['ceph', '--id', service,
'osd', 'ls',
'--format=json'])
if six.PY3:
out = out.decode('UTF-8')
return json.loads(out)
return None
if six.PY3:
out = out.decode('UTF-8')
return json.loads(out)
def install():
@ -811,7 +835,7 @@ def set_app_name_for_pool(client, pool, name):
:raises: CalledProcessError if ceph call fails
"""
if ceph_version() >= '12.0.0':
if cmp_pkgrevno('ceph-common', '12.0.0') >= 0:
cmd = ['ceph', '--id', client, 'osd', 'pool',
'application', 'enable', pool, name]
check_call(cmd)
@ -1091,22 +1115,6 @@ def ensure_ceph_keyring(service, user=None, group=None,
return True
def ceph_version():
"""Retrieve the local version of ceph."""
if os.path.exists('/usr/bin/ceph'):
cmd = ['ceph', '-v']
output = check_output(cmd)
if six.PY3:
output = output.decode('UTF-8')
output = output.split()
if len(output) > 3:
return output[2]
else:
return None
else:
return None
class CephBrokerRq(object):
"""Ceph broker request.
@ -1147,14 +1155,47 @@ class CephBrokerRq(object):
'object-prefix-permissions': object_prefix_permissions})
def add_op_create_pool(self, name, replica_count=3, pg_num=None,
weight=None, group=None, namespace=None):
"""Adds an operation to create a pool.
weight=None, group=None, namespace=None,
app_name=None, max_bytes=None, max_objects=None):
"""DEPRECATED: Use ``add_op_create_replicated_pool()`` or
``add_op_create_erasure_pool()`` instead.
"""
return self.add_op_create_replicated_pool(
name, replica_count=replica_count, pg_num=pg_num, weight=weight,
group=group, namespace=namespace, app_name=app_name,
max_bytes=max_bytes, max_objects=max_objects)
@param pg_num setting: optional setting. If not provided, this value
will be calculated by the broker based on how many OSDs are in the
cluster at the time of creation. Note that, if provided, this value
will be capped at the current available maximum.
@param weight: the percentage of data the pool makes up
def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None,
weight=None, group=None, namespace=None,
app_name=None, max_bytes=None,
max_objects=None):
"""Adds an operation to create a replicated pool.
:param name: Name of pool to create
:type name: str
:param replica_count: Number of copies Ceph should keep of your data.
:type replica_count: int
:param pg_num: Request specific number of Placement Groups to create
for pool.
:type pg_num: int
:param weight: The percentage of data that is expected to be contained
in the pool from the total available space on the OSDs.
Used to calculate number of Placement Groups to create
for pool.
:type weight: float
:param group: Group to add pool to
:type group: str
:param namespace: Group namespace
:type namespace: str
:param app_name: (Optional) Tag pool with application name. Note that
there is certain protocols emerging upstream with
regard to meaningful application names to use.
Examples are ``rbd`` and ``rgw``.
:type app_name: str
:param max_bytes: Maximum bytes quota to apply
:type max_bytes: int
:param max_objects: Maximum objects quota to apply
:type max_objects: int
"""
if pg_num and weight:
raise ValueError('pg_num and weight are mutually exclusive')
@ -1162,7 +1203,41 @@ class CephBrokerRq(object):
self.ops.append({'op': 'create-pool', 'name': name,
'replicas': replica_count, 'pg_num': pg_num,
'weight': weight, 'group': group,
'group-namespace': namespace})
'group-namespace': namespace, 'app-name': app_name,
'max-bytes': max_bytes, 'max-objects': max_objects})
def add_op_create_erasure_pool(self, name, erasure_profile=None,
weight=None, group=None, app_name=None,
max_bytes=None, max_objects=None):
"""Adds an operation to create a erasure coded pool.
:param name: Name of pool to create
:type name: str
:param erasure_profile: Name of erasure code profile to use. If not
set the ceph-mon unit handling the broker
request will set its default value.
:type erasure_profile: str
:param weight: The percentage of data that is expected to be contained
in the pool from the total available space on the OSDs.
:type weight: float
:param group: Group to add pool to
:type group: str
:param app_name: (Optional) Tag pool with application name. Note that
there is certain protocols emerging upstream with
regard to meaningful application names to use.
Examples are ``rbd`` and ``rgw``.
:type app_name: str
:param max_bytes: Maximum bytes quota to apply
:type max_bytes: int
:param max_objects: Maximum objects quota to apply
:type max_objects: int
"""
self.ops.append({'op': 'create-pool', 'name': name,
'pool-type': 'erasure',
'erasure-profile': erasure_profile,
'weight': weight,
'group': group, 'app-name': app_name,
'max-bytes': max_bytes, 'max-objects': max_objects})
def set_ops(self, ops):
"""Set request ops to provided value.

View File

@ -17,12 +17,53 @@ import re
from stat import S_ISBLK
from subprocess import (
CalledProcessError,
check_call,
check_output,
call
)
def _luks_uuid(dev):
"""
Check to see if dev is a LUKS encrypted volume, returning the UUID
of volume if it is.
:param: dev: path to block device to check.
:returns: str. UUID of LUKS device or None if not a LUKS device
"""
try:
cmd = ['cryptsetup', 'luksUUID', dev]
return check_output(cmd).decode('UTF-8').strip()
except CalledProcessError:
return None
def is_luks_device(dev):
"""
Determine if dev is a LUKS-formatted block device.
:param: dev: A full path to a block device to check for LUKS header
presence
:returns: boolean: indicates whether a device is used based on LUKS header.
"""
return True if _luks_uuid(dev) else False
def is_mapped_luks_device(dev):
"""
Determine if dev is a mapped LUKS device
:param: dev: A full path to a block device to be checked
:returns: boolean: indicates whether a device is mapped
"""
_, dirs, _ = next(os.walk(
'/sys/class/block/{}/holders/'
.format(os.path.basename(os.path.realpath(dev))))
)
is_held = len(dirs) > 0
return is_held and is_luks_device(dev)
def is_block_device(path):
'''
Confirm device at path is a valid block device node.

View File

@ -50,6 +50,11 @@ TRACE = "TRACE"
MARKER = object()
SH_MAX_ARG = 131071
RANGE_WARNING = ('Passing NO_PROXY string that includes a cidr. '
'This may not be compatible with software you are '
'running in your shell.')
cache = {}
@ -1414,3 +1419,72 @@ def unit_doomed(unit=None):
# I don't think 'dead' units ever show up in the goal-state, but
# check anyway in addition to 'dying'.
return units[unit]['status'] in ('dying', 'dead')
def env_proxy_settings(selected_settings=None):
"""Get proxy settings from process environment variables.
Get charm proxy settings from environment variables that correspond to
juju-http-proxy, juju-https-proxy and juju-no-proxy (available as of 2.4.2,
see lp:1782236) in a format suitable for passing to an application that
reacts to proxy settings passed as environment variables. Some applications
support lowercase or uppercase notation (e.g. curl), some support only
lowercase (e.g. wget), there are also subjectively rare cases of only
uppercase notation support. no_proxy CIDR and wildcard support also varies
between runtimes and applications as there is no enforced standard.
Some applications may connect to multiple destinations and expose config
options that would affect only proxy settings for a specific destination
these should be handled in charms in an application-specific manner.
:param selected_settings: format only a subset of possible settings
:type selected_settings: list
:rtype: Option(None, dict[str, str])
"""
SUPPORTED_SETTINGS = {
'http': 'HTTP_PROXY',
'https': 'HTTPS_PROXY',
'no_proxy': 'NO_PROXY',
'ftp': 'FTP_PROXY'
}
if selected_settings is None:
selected_settings = SUPPORTED_SETTINGS
selected_vars = [v for k, v in SUPPORTED_SETTINGS.items()
if k in selected_settings]
proxy_settings = {}
for var in selected_vars:
var_val = os.getenv(var)
if var_val:
proxy_settings[var] = var_val
proxy_settings[var.lower()] = var_val
# Now handle juju-prefixed environment variables. The legacy vs new
# environment variable usage is mutually exclusive
charm_var_val = os.getenv('JUJU_CHARM_{}'.format(var))
if charm_var_val:
proxy_settings[var] = charm_var_val
proxy_settings[var.lower()] = charm_var_val
if 'no_proxy' in proxy_settings:
if _contains_range(proxy_settings['no_proxy']):
log(RANGE_WARNING, level=WARNING)
return proxy_settings if proxy_settings else None
def _contains_range(addresses):
"""Check for cidr or wildcard domain in a string.
Given a string comprising a comma seperated list of ip addresses
and domain names, determine whether the string contains IP ranges
or wildcard domains.
:param addresses: comma seperated list of domains and ip addresses.
:type addresses: str
"""
return (
# Test for cidr (e.g. 10.20.20.0/24)
"/" in addresses or
# Test for wildcard domains (*.foo.com or .foo.com)
"*" in addresses or
addresses.startswith(".") or
",." in addresses or
" ." in addresses)

View File

@ -47,6 +47,7 @@ if __platform__ == "ubuntu":
cmp_pkgrevno,
CompareHostReleases,
get_distrib_codename,
arch
) # flake8: noqa -- ignore F401 for this import
elif __platform__ == "centos":
from charmhelpers.core.host_factory.centos import ( # NOQA:F401

View File

@ -1,5 +1,6 @@
import subprocess
from charmhelpers.core.hookenv import cached
from charmhelpers.core.strutils import BasicStringComparator
@ -97,3 +98,16 @@ def cmp_pkgrevno(package, revno, pkgcache=None):
pkgcache = apt_cache()
pkg = pkgcache[package]
return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
@cached
def arch():
"""Return the package architecture as a string.
:returns: the architecture
:rtype: str
:raises: subprocess.CalledProcessError if dpkg command fails
"""
return subprocess.check_output(
['dpkg', '--print-architecture']
).rstrip().decode('UTF-8')

View File

@ -28,7 +28,7 @@ from charmhelpers.core.hookenv import (
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
def create(sysctl_dict, sysctl_file):
def create(sysctl_dict, sysctl_file, ignore=False):
"""Creates a sysctl.conf file from a YAML associative array
:param sysctl_dict: a dict or YAML-formatted string of sysctl
@ -36,6 +36,8 @@ def create(sysctl_dict, sysctl_file):
:type sysctl_dict: str
:param sysctl_file: path to the sysctl file to be saved
:type sysctl_file: str or unicode
:param ignore: If True, ignore "unknown variable" errors.
:type ignore: bool
:returns: None
"""
if type(sysctl_dict) is not dict:
@ -52,7 +54,12 @@ def create(sysctl_dict, sysctl_file):
for key, value in sysctl_dict_parsed.items():
fd.write("{}={}\n".format(key, value))
log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed),
log("Updating sysctl_file: {} values: {}".format(sysctl_file,
sysctl_dict_parsed),
level=DEBUG)
check_call(["sysctl", "-p", sysctl_file])
call = ["sysctl", "-p", sysctl_file]
if ignore:
call.append("-e")
check_call(call)

View File

@ -19,15 +19,14 @@ import re
import six
import time
import subprocess
from tempfile import NamedTemporaryFile
from charmhelpers.core.host import (
lsb_release
)
from charmhelpers.core.host import get_distrib_codename
from charmhelpers.core.hookenv import (
log,
DEBUG,
WARNING,
env_proxy_settings,
)
from charmhelpers.fetch import SourceConfigError, GPGKeyError
@ -303,12 +302,17 @@ def import_key(key):
"""Import an ASCII Armor key.
A Radix64 format keyid is also supported for backwards
compatibility, but should never be used; the key retrieval
mechanism is insecure and subject to man-in-the-middle attacks
voiding all signature checks using that key.
compatibility. In this case Ubuntu keyserver will be
queried for a key via HTTPS by its keyid. This method
is less preferrable because https proxy servers may
require traffic decryption which is equivalent to a
man-in-the-middle attack (a proxy server impersonates
keyserver TLS certificates and has to be explicitly
trusted by the system).
:param keyid: The key in ASCII armor format,
including BEGIN and END markers.
:param key: A GPG key in ASCII armor format,
including BEGIN and END markers or a keyid.
:type key: (bytes, str)
:raises: GPGKeyError if the key could not be imported
"""
key = key.strip()
@ -319,35 +323,131 @@ def import_key(key):
log("PGP key found (looks like ASCII Armor format)", level=DEBUG)
if ('-----BEGIN PGP PUBLIC KEY BLOCK-----' in key and
'-----END PGP PUBLIC KEY BLOCK-----' in key):
log("Importing ASCII Armor PGP key", level=DEBUG)
with NamedTemporaryFile() as keyfile:
with open(keyfile.name, 'w') as fd:
fd.write(key)
fd.write("\n")
cmd = ['apt-key', 'add', keyfile.name]
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
error = "Error importing PGP key '{}'".format(key)
log(error)
raise GPGKeyError(error)
log("Writing provided PGP key in the binary format", level=DEBUG)
if six.PY3:
key_bytes = key.encode('utf-8')
else:
key_bytes = key
key_name = _get_keyid_by_gpg_key(key_bytes)
key_gpg = _dearmor_gpg_key(key_bytes)
_write_apt_gpg_keyfile(key_name=key_name, key_material=key_gpg)
else:
raise GPGKeyError("ASCII armor markers missing from GPG key")
else:
# We should only send things obviously not a keyid offsite
# via this unsecured protocol, as it may be a secret or part
# of one.
log("PGP key found (looks like Radix64 format)", level=WARNING)
log("INSECURLY importing PGP key from keyserver; "
log("SECURELY importing PGP key from keyserver; "
"full key not provided.", level=WARNING)
cmd = ['apt-key', 'adv', '--keyserver',
'hkp://keyserver.ubuntu.com:80', '--recv-keys', key]
try:
_run_with_retries(cmd)
except subprocess.CalledProcessError:
error = "Error importing PGP key '{}'".format(key)
log(error)
raise GPGKeyError(error)
# as of bionic add-apt-repository uses curl with an HTTPS keyserver URL
# to retrieve GPG keys. `apt-key adv` command is deprecated as is
# apt-key in general as noted in its manpage. See lp:1433761 for more
# history. Instead, /etc/apt/trusted.gpg.d is used directly to drop
# gpg
key_asc = _get_key_by_keyid(key)
# write the key in GPG format so that apt-key list shows it
key_gpg = _dearmor_gpg_key(key_asc)
_write_apt_gpg_keyfile(key_name=key, key_material=key_gpg)
def _get_keyid_by_gpg_key(key_material):
"""Get a GPG key fingerprint by GPG key material.
Gets a GPG key fingerprint (40-digit, 160-bit) by the ASCII armor-encoded
or binary GPG key material. Can be used, for example, to generate file
names for keys passed via charm options.
:param key_material: ASCII armor-encoded or binary GPG key material
:type key_material: bytes
:raises: GPGKeyError if invalid key material has been provided
:returns: A GPG key fingerprint
:rtype: str
"""
# Use the same gpg command for both Xenial and Bionic
cmd = 'gpg --with-colons --with-fingerprint'
ps = subprocess.Popen(cmd.split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
out, err = ps.communicate(input=key_material)
if six.PY3:
out = out.decode('utf-8')
err = err.decode('utf-8')
if 'gpg: no valid OpenPGP data found.' in err:
raise GPGKeyError('Invalid GPG key material provided')
# from gnupg2 docs: fpr :: Fingerprint (fingerprint is in field 10)
return re.search(r"^fpr:{9}([0-9A-F]{40}):$", out, re.MULTILINE).group(1)
def _get_key_by_keyid(keyid):
"""Get a key via HTTPS from the Ubuntu keyserver.
Different key ID formats are supported by SKS keyservers (the longer ones
are more secure, see "dead beef attack" and https://evil32.com/). Since
HTTPS is used, if SSLBump-like HTTPS proxies are in place, they will
impersonate keyserver.ubuntu.com and generate a certificate with
keyserver.ubuntu.com in the CN field or in SubjAltName fields of a
certificate. If such proxy behavior is expected it is necessary to add the
CA certificate chain containing the intermediate CA of the SSLBump proxy to
every machine that this code runs on via ca-certs cloud-init directive (via
cloudinit-userdata model-config) or via other means (such as through a
custom charm option). Also note that DNS resolution for the hostname in a
URL is done at a proxy server - not at the client side.
8-digit (32 bit) key ID
https://keyserver.ubuntu.com/pks/lookup?search=0x4652B4E6
16-digit (64 bit) key ID
https://keyserver.ubuntu.com/pks/lookup?search=0x6E85A86E4652B4E6
40-digit key ID:
https://keyserver.ubuntu.com/pks/lookup?search=0x35F77D63B5CEC106C577ED856E85A86E4652B4E6
:param keyid: An 8, 16 or 40 hex digit keyid to find a key for
:type keyid: (bytes, str)
:returns: A key material for the specified GPG key id
:rtype: (str, bytes)
:raises: subprocess.CalledProcessError
"""
# options=mr - machine-readable output (disables html wrappers)
keyserver_url = ('https://keyserver.ubuntu.com'
'/pks/lookup?op=get&options=mr&exact=on&search=0x{}')
curl_cmd = ['curl', keyserver_url.format(keyid)]
# use proxy server settings in order to retrieve the key
return subprocess.check_output(curl_cmd,
env=env_proxy_settings(['https']))
def _dearmor_gpg_key(key_asc):
"""Converts a GPG key in the ASCII armor format to the binary format.
:param key_asc: A GPG key in ASCII armor format.
:type key_asc: (str, bytes)
:returns: A GPG key in binary format
:rtype: (str, bytes)
:raises: GPGKeyError
"""
ps = subprocess.Popen(['gpg', '--dearmor'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
out, err = ps.communicate(input=key_asc)
# no need to decode output as it is binary (invalid utf-8), only error
if six.PY3:
err = err.decode('utf-8')
if 'gpg: no valid OpenPGP data found.' in err:
raise GPGKeyError('Invalid GPG key material. Check your network setup'
' (MTU, routing, DNS) and/or proxy server settings'
' as well as destination keyserver status.')
else:
return out
def _write_apt_gpg_keyfile(key_name, key_material):
"""Writes GPG key material into a file at a provided path.
:param key_name: A key name to use for a key file (could be a fingerprint)
:type key_name: str
:param key_material: A GPG key material (binary)
:type key_material: (str, bytes)
"""
with open('/etc/apt/trusted.gpg.d/{}.gpg'.format(key_name),
'wb') as keyf:
keyf.write(key_material)
def add_source(source, key=None, fail_invalid=False):
@ -442,13 +542,13 @@ def add_source(source, key=None, fail_invalid=False):
def _add_proposed():
"""Add the PROPOSED_POCKET as /etc/apt/source.list.d/proposed.list
Uses lsb_release()['DISTRIB_CODENAME'] to determine the correct staza for
Uses get_distrib_codename to determine the correct stanza for
the deb line.
For intel architecutres PROPOSED_POCKET is used for the release, but for
other architectures PROPOSED_PORTS_POCKET is used for the release.
"""
release = lsb_release()['DISTRIB_CODENAME']
release = get_distrib_codename()
arch = platform.machine()
if arch not in six.iterkeys(ARCH_TO_PROPOSED_POCKET):
raise SourceConfigError("Arch {} not supported for (distro-)proposed"
@ -461,11 +561,16 @@ def _add_apt_repository(spec):
"""Add the spec using add_apt_repository
:param spec: the parameter to pass to add_apt_repository
:type spec: str
"""
if '{series}' in spec:
series = lsb_release()['DISTRIB_CODENAME']
series = get_distrib_codename()
spec = spec.replace('{series}', series)
_run_with_retries(['add-apt-repository', '--yes', spec])
# software-properties package for bionic properly reacts to proxy settings
# passed as environment variables (See lp:1433761). This is not the case
# LTS and non-LTS releases below bionic.
_run_with_retries(['add-apt-repository', '--yes', spec],
cmd_env=env_proxy_settings(['https']))
def _add_cloud_pocket(pocket):
@ -534,7 +639,7 @@ def _verify_is_ubuntu_rel(release, os_release):
:raises: SourceConfigError if the release is not the same as the ubuntu
release.
"""
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
ubuntu_rel = get_distrib_codename()
if release != ubuntu_rel:
raise SourceConfigError(
'Invalid Cloud Archive release specified: {}-{} on this Ubuntu'