Charmhelpers sync

This is required to get the updated broker request handler
containing the new rbd-mirroring-mode flag.

Change-Id: Ia4c1e9741ae8eab2e41d7ba4e4615475914c43e5
This commit is contained in:
Marius Oprin 2020-11-24 18:05:15 +02:00
parent 5f87ef83e3
commit 4114828b93
8 changed files with 193 additions and 42 deletions

View File

@ -396,7 +396,8 @@ def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
if global_addrs: if global_addrs:
# Make sure any found global addresses are not temporary # Make sure any found global addresses are not temporary
cmd = ['ip', 'addr', 'show', iface] cmd = ['ip', 'addr', 'show', iface]
out = subprocess.check_output(cmd).decode('UTF-8') out = subprocess.check_output(
cmd).decode('UTF-8', errors='replace')
if dynamic_only: if dynamic_only:
key = re.compile("inet6 (.+)/[0-9]+ scope global.* dynamic.*") key = re.compile("inet6 (.+)/[0-9]+ scope global.* dynamic.*")
else: else:

View File

@ -34,12 +34,14 @@ from charmhelpers.core.hookenv import (
WARNING, WARNING,
) )
from charmhelpers.contrib.openstack.ip import ( from charmhelpers.contrib.openstack.ip import (
ADMIN,
resolve_address, resolve_address,
get_vip_in_network, get_vip_in_network,
INTERNAL, ADDRESS_MAP,
PUBLIC, get_default_api_bindings,
ADDRESS_MAP) )
from charmhelpers.contrib.network.ip import (
get_relation_ip,
)
from charmhelpers.core.host import ( from charmhelpers.core.host import (
mkdir, mkdir,
@ -113,44 +115,118 @@ class CertRequest(object):
return req return req
def get_certificate_request(json_encode=True): def get_certificate_request(json_encode=True, bindings=None):
"""Generate a certificatee requests based on the network confioguration """Generate a certificate requests based on the network configuration
:param json_encode: Encode request in JSON or not. Used for setting
directly on a relation.
:type json_encode: boolean
:param bindings: List of bindings to check in addition to default api
bindings.
:type bindings: list of strings
:returns: CertRequest request as dictionary or JSON string.
:rtype: Union[dict, json]
""" """
if bindings:
# Add default API bindings to bindings list
bindings = set(bindings + get_default_api_bindings())
else:
# Use default API bindings
bindings = get_default_api_bindings()
req = CertRequest(json_encode=json_encode) req = CertRequest(json_encode=json_encode)
req.add_hostname_cn() req.add_hostname_cn()
# Add os-hostname entries # Add os-hostname entries
for net_type in [INTERNAL, ADMIN, PUBLIC]: _sans = get_certificate_sans()
net_config = config(ADDRESS_MAP[net_type]['override'])
# Handle specific hostnames per binding
for binding in bindings:
hostname_override = config(ADDRESS_MAP[binding]['override'])
try: try:
net_addr = resolve_address(endpoint_type=net_type) net_addr = resolve_address(endpoint_type=binding)
ip = network_get_primary_address( ip = network_get_primary_address(
ADDRESS_MAP[net_type]['binding']) ADDRESS_MAP[binding]['binding'])
addresses = [net_addr, ip] addresses = [net_addr, ip]
vip = get_vip_in_network(resolve_network_cidr(ip)) vip = get_vip_in_network(resolve_network_cidr(ip))
if vip: if vip:
addresses.append(vip) addresses.append(vip)
if net_config: # Add hostname certificate request
if hostname_override:
req.add_entry( req.add_entry(
net_type, binding,
net_config, hostname_override,
addresses) addresses)
else: # Remove hostname specific addresses from _sans
# There is network address with no corresponding hostname. for addr in addresses:
# Add the ip to the hostname cert to allow for this. try:
req.add_hostname_cn_ip(addresses) _sans.remove(addr)
except (ValueError, KeyError):
pass
except NoNetworkBinding: except NoNetworkBinding:
log("Skipping request for certificate for ip in {} space, no " log("Skipping request for certificate for ip in {} space, no "
"local address found".format(net_type), WARNING) "local address found".format(binding), WARNING)
# Gurantee all SANs are covered
# These are network addresses with no corresponding hostname.
# Add the ips to the hostname cert to allow for this.
req.add_hostname_cn_ip(_sans)
return req.get_request() return req.get_request()
def get_certificate_sans(bindings=None):
"""Get all possible IP addresses for certificate SANs.
"""
_sans = [unit_get('private-address')]
if bindings:
# Add default API bindings to bindings list
bindings = set(bindings + get_default_api_bindings())
else:
# Use default API bindings
bindings = get_default_api_bindings()
for binding in bindings:
# Check for config override
try:
net_config = config(ADDRESS_MAP[binding]['config'])
except KeyError:
# There is no configuration network for this binding name
net_config = None
# Using resolve_address is likely redundant. Keeping it here in
# case there is an edge case it handles.
net_addr = resolve_address(endpoint_type=binding)
ip = get_relation_ip(binding, cidr_network=net_config)
_sans = _sans + [net_addr, ip]
vip = get_vip_in_network(resolve_network_cidr(ip))
if vip:
_sans.append(vip)
return set(_sans)
def create_ip_cert_links(ssl_dir, custom_hostname_link=None): def create_ip_cert_links(ssl_dir, custom_hostname_link=None):
"""Create symlinks for SAN records """Create symlinks for SAN records
:param ssl_dir: str Directory to create symlinks in :param ssl_dir: str Directory to create symlinks in
:param custom_hostname_link: str Additional link to be created :param custom_hostname_link: str Additional link to be created
""" """
# This includes the hostname cert and any specific bindng certs:
# admin, internal, public
req = get_certificate_request(json_encode=False)["cert_requests"]
# Specific certs
for cert_req in req.keys():
requested_cert = os.path.join(
ssl_dir,
'cert_{}'.format(cert_req))
requested_key = os.path.join(
ssl_dir,
'key_{}'.format(cert_req))
for addr in req[cert_req]['sans']:
cert = os.path.join(ssl_dir, 'cert_{}'.format(addr))
key = os.path.join(ssl_dir, 'key_{}'.format(addr))
if os.path.isfile(requested_cert) and not os.path.isfile(cert):
os.symlink(requested_cert, cert)
os.symlink(requested_key, key)
# Handle custom hostnames
hostname = get_hostname(unit_get('private-address')) hostname = get_hostname(unit_get('private-address'))
hostname_cert = os.path.join( hostname_cert = os.path.join(
ssl_dir, ssl_dir,
@ -158,18 +234,6 @@ def create_ip_cert_links(ssl_dir, custom_hostname_link=None):
hostname_key = os.path.join( hostname_key = os.path.join(
ssl_dir, ssl_dir,
'key_{}'.format(hostname)) 'key_{}'.format(hostname))
# Add links to hostname cert, used if os-hostname vars not set
for net_type in [INTERNAL, ADMIN, PUBLIC]:
try:
addr = resolve_address(endpoint_type=net_type)
cert = os.path.join(ssl_dir, 'cert_{}'.format(addr))
key = os.path.join(ssl_dir, 'key_{}'.format(addr))
if os.path.isfile(hostname_cert) and not os.path.isfile(cert):
os.symlink(hostname_cert, cert)
os.symlink(hostname_key, key)
except NoNetworkBinding:
log("Skipping creating cert symlink for ip in {} space, no "
"local address found".format(net_type), WARNING)
if custom_hostname_link: if custom_hostname_link:
custom_cert = os.path.join( custom_cert = os.path.join(
ssl_dir, ssl_dir,

View File

@ -33,6 +33,7 @@ INTERNAL = 'int'
ADMIN = 'admin' ADMIN = 'admin'
ACCESS = 'access' ACCESS = 'access'
# TODO: reconcile 'int' vs 'internal' binding names
ADDRESS_MAP = { ADDRESS_MAP = {
PUBLIC: { PUBLIC: {
'binding': 'public', 'binding': 'public',
@ -58,6 +59,14 @@ ADDRESS_MAP = {
'fallback': 'private-address', 'fallback': 'private-address',
'override': 'os-access-hostname', 'override': 'os-access-hostname',
}, },
# Note (thedac) bridge to begin the reconciliation between 'int' vs
# 'internal' binding names
'internal': {
'binding': 'internal',
'config': 'os-internal-network',
'fallback': 'private-address',
'override': 'os-internal-hostname',
},
} }
@ -195,3 +204,10 @@ def get_vip_in_network(network):
if is_address_in_network(network, vip): if is_address_in_network(network, vip):
matching_vip = vip matching_vip = vip
return matching_vip return matching_vip
def get_default_api_bindings():
_default_bindings = []
for binding in [INTERNAL, ADMIN, PUBLIC]:
_default_bindings.append(ADDRESS_MAP[binding]['binding'])
return _default_bindings

View File

@ -15,5 +15,6 @@ password = {{ admin_password }}
{% endif -%} {% endif -%}
{% if region -%} {% if region -%}
os_region_name = {{ region }} os_region_name = {{ region }}
region_name = {{ region }}
{% endif -%} {% endif -%}
randomize_allocation_candidates = true randomize_allocation_candidates = true

View File

@ -18,6 +18,7 @@ from functools import wraps
import subprocess import subprocess
import json import json
import operator
import os import os
import sys import sys
import re import re
@ -33,7 +34,7 @@ from charmhelpers import deprecate
from charmhelpers.contrib.network import ip from charmhelpers.contrib.network import ip
from charmhelpers.core import unitdata from charmhelpers.core import decorators, unitdata
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
WORKLOAD_STATES, WORKLOAD_STATES,
@ -230,7 +231,7 @@ SWIFT_CODENAMES = OrderedDict([
('ussuri', ('ussuri',
['2.24.0', '2.25.0']), ['2.24.0', '2.25.0']),
('victoria', ('victoria',
['2.25.0']), ['2.25.0', '2.26.0']),
]) ])
# >= Liberty version->codename mapping # >= Liberty version->codename mapping
@ -1295,7 +1296,7 @@ def _check_listening_on_ports_list(ports):
Returns a list of ports being listened to and a list of the Returns a list of ports being listened to and a list of the
booleans. booleans.
@param ports: LIST or port numbers. @param ports: LIST of port numbers.
@returns [(port_num, boolean), ...], [boolean] @returns [(port_num, boolean), ...], [boolean]
""" """
ports_open = [port_has_listener('0.0.0.0', p) for p in ports] ports_open = [port_has_listener('0.0.0.0', p) for p in ports]
@ -1564,6 +1565,21 @@ def manage_payload_services(action, services=None, charm_func=None):
return success, messages return success, messages
def make_wait_for_ports_barrier(ports, retry_count=5):
"""Make a function to wait for port shutdowns.
Create a function which closes over the provided ports. The function will
retry probing ports until they are closed or the retry count has been reached.
"""
@decorators.retry_on_predicate(retry_count, operator.not_, base_delay=0.1)
def retry_port_check():
_, ports_states = _check_listening_on_ports_list(ports)
juju_log("Probe ports {}, result: {}".format(ports, ports_states), level="DEBUG")
return any(ports_states)
return retry_port_check
def pause_unit(assess_status_func, services=None, ports=None, def pause_unit(assess_status_func, services=None, ports=None,
charm_func=None): charm_func=None):
"""Pause a unit by stopping the services and setting 'unit-paused' """Pause a unit by stopping the services and setting 'unit-paused'
@ -1599,6 +1615,7 @@ def pause_unit(assess_status_func, services=None, ports=None,
services=services, services=services,
charm_func=charm_func) charm_func=charm_func)
set_unit_paused() set_unit_paused()
if assess_status_func: if assess_status_func:
message = assess_status_func() message = assess_status_func()
if message: if message:

View File

@ -268,6 +268,7 @@ class BasePool(object):
'compression-max-blob-size': (int, None), 'compression-max-blob-size': (int, None),
'compression-max-blob-size-hdd': (int, None), 'compression-max-blob-size-hdd': (int, None),
'compression-max-blob-size-ssd': (int, None), 'compression-max-blob-size-ssd': (int, None),
'rbd-mirroring-mode': (str, ('image', 'pool'))
} }
def __init__(self, service, name=None, percent_data=None, app_name=None, def __init__(self, service, name=None, percent_data=None, app_name=None,
@ -1767,6 +1768,7 @@ class CephBrokerRq(object):
max_bytes=None, max_bytes=None,
max_objects=None, max_objects=None,
namespace=None, namespace=None,
rbd_mirroring_mode='pool',
weight=None): weight=None):
"""Build common part of a create pool operation. """Build common part of a create pool operation.
@ -1825,6 +1827,9 @@ class CephBrokerRq(object):
:type max_objects: Optional[int] :type max_objects: Optional[int]
:param namespace: Group namespace :param namespace: Group namespace
:type namespace: Optional[str] :type namespace: Optional[str]
:param rbd_mirroring_mode: Pool mirroring mode used when Ceph RBD
mirroring is enabled.
:type rbd_mirroring_mode: Optional[str]
:param weight: The percentage of data that is expected to be contained :param weight: The percentage of data that is expected to be contained
in the pool from the total available space on the OSDs. in the pool from the total available space on the OSDs.
Used to calculate number of Placement Groups to create Used to calculate number of Placement Groups to create
@ -1849,6 +1854,7 @@ class CephBrokerRq(object):
'max-bytes': max_bytes, 'max-bytes': max_bytes,
'max-objects': max_objects, 'max-objects': max_objects,
'group-namespace': namespace, 'group-namespace': namespace,
'rbd-mirroring-mode': rbd_mirroring_mode,
'weight': weight, 'weight': weight,
} }

View File

@ -53,3 +53,41 @@ def retry_on_exception(num_retries, base_delay=0, exc_type=Exception):
return _retry_on_exception_inner_2 return _retry_on_exception_inner_2
return _retry_on_exception_inner_1 return _retry_on_exception_inner_1
def retry_on_predicate(num_retries, predicate_fun, base_delay=0):
"""Retry based on return value
The return value of the decorated function is passed to the given predicate_fun. If the
result of the predicate is False, retry the decorated function up to num_retries times
An exponential backoff up to base_delay^num_retries seconds can be introduced by setting
base_delay to a nonzero value. The default is to run with a zero (i.e. no) delay
:param num_retries: Max. number of retries to perform
:type num_retries: int
:param predicate_fun: Predicate function to determine if a retry is necessary
:type predicate_fun: callable
:param base_delay: Starting value in seconds for exponential delay, defaults to 0 (no delay)
:type base_delay: float
"""
def _retry_on_pred_inner_1(f):
def _retry_on_pred_inner_2(*args, **kwargs):
retries = num_retries
multiplier = 1
delay = base_delay
while True:
result = f(*args, **kwargs)
if predicate_fun(result) or retries <= 0:
return result
delay *= multiplier
multiplier += 1
log("Result {}, retrying '{}' {} more times (delay={})".format(
result, f.__name__, retries, delay), level=INFO)
retries -= 1
if delay:
time.sleep(delay)
return _retry_on_pred_inner_2
return _retry_on_pred_inner_1

View File

@ -19,6 +19,7 @@
# Nick Moffitt <nick.moffitt@canonical.com> # Nick Moffitt <nick.moffitt@canonical.com>
# Matthew Wedgwood <matthew.wedgwood@canonical.com> # Matthew Wedgwood <matthew.wedgwood@canonical.com>
import errno
import os import os
import re import re
import pwd import pwd
@ -677,7 +678,7 @@ def check_hash(path, checksum, hash_type='md5'):
:param str checksum: Value of the checksum used to validate the file. :param str checksum: Value of the checksum used to validate the file.
:param str hash_type: Hash algorithm used to generate `checksum`. :param str hash_type: Hash algorithm used to generate `checksum`.
Can be any hash alrgorithm supported by :mod:`hashlib`, Can be any hash algorithm supported by :mod:`hashlib`,
such as md5, sha1, sha256, sha512, etc. such as md5, sha1, sha256, sha512, etc.
:raises ChecksumError: If the file fails the checksum :raises ChecksumError: If the file fails the checksum
@ -825,7 +826,8 @@ def list_nics(nic_type=None):
if nic_type: if nic_type:
for int_type in int_types: for int_type in int_types:
cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
ip_output = subprocess.check_output(cmd).decode('UTF-8') ip_output = subprocess.check_output(
cmd).decode('UTF-8', errors='replace')
ip_output = ip_output.split('\n') ip_output = ip_output.split('\n')
ip_output = (line for line in ip_output if line) ip_output = (line for line in ip_output if line)
for line in ip_output: for line in ip_output:
@ -841,7 +843,8 @@ def list_nics(nic_type=None):
interfaces.append(iface) interfaces.append(iface)
else: else:
cmd = ['ip', 'a'] cmd = ['ip', 'a']
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') ip_output = subprocess.check_output(
cmd).decode('UTF-8', errors='replace').split('\n')
ip_output = (line.strip() for line in ip_output if line) ip_output = (line.strip() for line in ip_output if line)
key = re.compile(r'^[0-9]+:\s+(.+):') key = re.compile(r'^[0-9]+:\s+(.+):')
@ -865,7 +868,8 @@ def set_nic_mtu(nic, mtu):
def get_nic_mtu(nic): def get_nic_mtu(nic):
"""Return the Maximum Transmission Unit (MTU) for a network interface.""" """Return the Maximum Transmission Unit (MTU) for a network interface."""
cmd = ['ip', 'addr', 'show', nic] cmd = ['ip', 'addr', 'show', nic]
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') ip_output = subprocess.check_output(
cmd).decode('UTF-8', errors='replace').split('\n')
mtu = "" mtu = ""
for line in ip_output: for line in ip_output:
words = line.split() words = line.split()
@ -877,7 +881,7 @@ def get_nic_mtu(nic):
def get_nic_hwaddr(nic): def get_nic_hwaddr(nic):
"""Return the Media Access Control (MAC) for a network interface.""" """Return the Media Access Control (MAC) for a network interface."""
cmd = ['ip', '-o', '-0', 'addr', 'show', nic] cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
ip_output = subprocess.check_output(cmd).decode('UTF-8') ip_output = subprocess.check_output(cmd).decode('UTF-8', errors='replace')
hwaddr = "" hwaddr = ""
words = ip_output.split() words = ip_output.split()
if 'link/ether' in words: if 'link/ether' in words:
@ -889,7 +893,7 @@ def get_nic_hwaddr(nic):
def chdir(directory): def chdir(directory):
"""Change the current working directory to a different directory for a code """Change the current working directory to a different directory for a code
block and return the previous directory after the block exits. Useful to block and return the previous directory after the block exits. Useful to
run commands from a specificed directory. run commands from a specified directory.
:param str directory: The directory path to change to for this context. :param str directory: The directory path to change to for this context.
""" """
@ -924,9 +928,13 @@ def chownr(path, owner, group, follow_links=True, chowntopdir=False):
for root, dirs, files in os.walk(path, followlinks=follow_links): for root, dirs, files in os.walk(path, followlinks=follow_links):
for name in dirs + files: for name in dirs + files:
full = os.path.join(root, name) full = os.path.join(root, name)
broken_symlink = os.path.lexists(full) and not os.path.exists(full) try:
if not broken_symlink:
chown(full, uid, gid) chown(full, uid, gid)
except (IOError, OSError) as e:
# Intended to ignore "file not found". Catching both to be
# compatible with both Python 2.7 and 3.x.
if e.errno == errno.ENOENT:
pass
def lchownr(path, owner, group): def lchownr(path, owner, group):