Fix Zaza tests, pre-freeze 'make sync' and Python 3.8 support

Change-Id: If2ac13d703f2f7b6af19f5beba5ae898c64f8584
Closes-Bug: #1828424
This commit is contained in:
Aurelien Lourot 2020-03-12 13:42:02 +01:00
parent 97830b9f93
commit f5eacbd0c9
22 changed files with 1533 additions and 85 deletions

View File

@ -13,13 +13,17 @@
# limitations under the License.
import collections
import copy
import enum
import glob
import hashlib
import json
import math
import os
import re
import socket
import time
from base64 import b64decode
from subprocess import check_call, CalledProcessError
@ -50,7 +54,8 @@ from charmhelpers.core.hookenv import (
INFO,
ERROR,
status_set,
network_get_primary_address
network_get_primary_address,
WARNING,
)
from charmhelpers.core.sysctl import create as sysctl_create
@ -110,6 +115,13 @@ from charmhelpers.contrib.openstack.utils import (
)
from charmhelpers.core.unitdata import kv
try:
from sriov_netplan_shim import pci
except ImportError:
# The use of the function and contexts that require the pci module is
# optional.
pass
try:
import psutil
except ImportError:
@ -263,6 +275,12 @@ class SharedDBContext(OSContextGenerator):
'database_password': rdata.get(password_setting),
'database_type': 'mysql+pymysql'
}
# Port is being introduced with LP Bug #1876188
# but it not currently required and may not be set in all
# cases, particularly in classic charms.
port = rdata.get('db_port')
if port:
ctxt['database_port'] = port
if CompareOpenStackReleases(rel) < 'queens':
ctxt['database_type'] = 'mysql'
if self.context_complete(ctxt):
@ -2396,3 +2414,734 @@ class DHCPAgentContext(OSContextGenerator):
return False
else:
return _config
EntityMac = collections.namedtuple('EntityMac', ['entity', 'mac'])
def resolve_pci_from_mapping_config(config_key):
"""Resolve local PCI devices from MAC addresses in mapping config.
Note that this function keeps record of mac->PCI address lookups
in the local unit db as the devices will disappaear from the system
once bound.
:param config_key: Configuration option key to parse data from
:type config_key: str
:returns: PCI device address to Tuple(entity, mac) map
:rtype: collections.OrderedDict[str,Tuple[str,str]]
"""
devices = pci.PCINetDevices()
resolved_devices = collections.OrderedDict()
db = kv()
# Note that ``parse_data_port_mappings`` returns Dict regardless of input
for mac, entity in parse_data_port_mappings(config(config_key)).items():
pcidev = devices.get_device_from_mac(mac)
if pcidev:
# NOTE: store mac->pci allocation as post binding
# it disappears from PCIDevices.
db.set(mac, pcidev.pci_address)
db.flush()
pci_address = db.get(mac)
if pci_address:
resolved_devices[pci_address] = EntityMac(entity, mac)
return resolved_devices
class DPDKDeviceContext(OSContextGenerator):
def __init__(self, driver_key=None, bridges_key=None, bonds_key=None):
"""Initialize DPDKDeviceContext.
:param driver_key: Key to use when retrieving driver config.
:type driver_key: str
:param bridges_key: Key to use when retrieving bridge config.
:type bridges_key: str
:param bonds_key: Key to use when retrieving bonds config.
:type bonds_key: str
"""
self.driver_key = driver_key or 'dpdk-driver'
self.bridges_key = bridges_key or 'data-port'
self.bonds_key = bonds_key or 'dpdk-bond-mappings'
def __call__(self):
"""Populate context.
:returns: context
:rtype: Dict[str,Union[str,collections.OrderedDict[str,str]]]
"""
driver = config(self.driver_key)
if driver is None:
return {}
# Resolve PCI devices for both directly used devices (_bridges)
# and devices for use in dpdk bonds (_bonds)
pci_devices = resolve_pci_from_mapping_config(self.bridges_key)
pci_devices.update(resolve_pci_from_mapping_config(self.bonds_key))
return {'devices': pci_devices,
'driver': driver}
class OVSDPDKDeviceContext(OSContextGenerator):
def __init__(self, bridges_key=None, bonds_key=None):
"""Initialize OVSDPDKDeviceContext.
:param bridges_key: Key to use when retrieving bridge config.
:type bridges_key: str
:param bonds_key: Key to use when retrieving bonds config.
:type bonds_key: str
"""
self.bridges_key = bridges_key or 'data-port'
self.bonds_key = bonds_key or 'dpdk-bond-mappings'
@staticmethod
def _parse_cpu_list(cpulist):
"""Parses a linux cpulist for a numa node
:returns: list of cores
:rtype: List[int]
"""
cores = []
ranges = cpulist.split(',')
for cpu_range in ranges:
if "-" in cpu_range:
cpu_min_max = cpu_range.split('-')
cores += range(int(cpu_min_max[0]),
int(cpu_min_max[1]) + 1)
else:
cores.append(int(cpu_range))
return cores
def _numa_node_cores(self):
"""Get map of numa node -> cpu core
:returns: map of numa node -> cpu core
:rtype: Dict[str,List[int]]
"""
nodes = {}
node_regex = '/sys/devices/system/node/node*'
for node in glob.glob(node_regex):
index = node.lstrip('/sys/devices/system/node/node')
with open(os.path.join(node, 'cpulist')) as cpulist:
nodes[index] = self._parse_cpu_list(cpulist.read().strip())
return nodes
def cpu_mask(self):
"""Get hex formatted CPU mask
The mask is based on using the first config:dpdk-socket-cores
cores of each NUMA node in the unit.
:returns: hex formatted CPU mask
:rtype: str
"""
num_cores = config('dpdk-socket-cores')
mask = 0
for cores in self._numa_node_cores().values():
for core in cores[:num_cores]:
mask = mask | 1 << core
return format(mask, '#04x')
def socket_memory(self):
"""Formatted list of socket memory configuration per NUMA node
:returns: socket memory configuration per NUMA node
:rtype: str
"""
sm_size = config('dpdk-socket-memory')
node_regex = '/sys/devices/system/node/node*'
mem_list = [str(sm_size) for _ in glob.glob(node_regex)]
if mem_list:
return ','.join(mem_list)
else:
return str(sm_size)
def devices(self):
"""List of PCI devices for use by DPDK
:returns: List of PCI devices for use by DPDK
:rtype: collections.OrderedDict[str,str]
"""
pci_devices = resolve_pci_from_mapping_config(self.bridges_key)
pci_devices.update(resolve_pci_from_mapping_config(self.bonds_key))
return pci_devices
def _formatted_whitelist(self, flag):
"""Flag formatted list of devices to whitelist
:param flag: flag format to use
:type flag: str
:rtype: str
"""
whitelist = []
for device in self.devices():
whitelist.append(flag.format(device=device))
return ' '.join(whitelist)
def device_whitelist(self):
"""Formatted list of devices to whitelist for dpdk
using the old style '-w' flag
:returns: devices to whitelist prefixed by '-w '
:rtype: str
"""
return self._formatted_whitelist('-w {device}')
def pci_whitelist(self):
"""Formatted list of devices to whitelist for dpdk
using the new style '--pci-whitelist' flag
:returns: devices to whitelist prefixed by '--pci-whitelist '
:rtype: str
"""
return self._formatted_whitelist('--pci-whitelist {device}')
def __call__(self):
"""Populate context.
:returns: context
:rtype: Dict[str,Union[bool,str]]
"""
ctxt = {}
whitelist = self.device_whitelist()
if whitelist:
ctxt['dpdk_enabled'] = config('enable-dpdk')
ctxt['device_whitelist'] = self.device_whitelist()
ctxt['socket_memory'] = self.socket_memory()
ctxt['cpu_mask'] = self.cpu_mask()
return ctxt
class BridgePortInterfaceMap(object):
"""Build a map of bridge ports and interaces from charm configuration.
NOTE: the handling of this detail in the charm is pre-deprecated.
The long term goal is for network connectivity detail to be modelled in
the server provisioning layer (such as MAAS) which in turn will provide
a Netplan YAML description that will be used to drive Open vSwitch.
Until we get to that reality the charm will need to configure this
detail based on application level configuration options.
There is a established way of mapping interfaces to ports and bridges
in the ``neutron-openvswitch`` and ``neutron-gateway`` charms and we
will carry that forward.
The relationship between bridge, port and interface(s).
+--------+
| bridge |
+--------+
|
+----------------+
| port aka. bond |
+----------------+
| |
+-+ +-+
|i| |i|
|n| |n|
|t| |t|
|0| |N|
+-+ +-+
"""
class interface_type(enum.Enum):
"""Supported interface types.
Supported interface types can be found in the ``iface_types`` column
in the ``Open_vSwitch`` table on a running system.
"""
dpdk = 'dpdk'
internal = 'internal'
system = 'system'
def __str__(self):
"""Return string representation of value.
:returns: string representation of value.
:rtype: str
"""
return self.value
def __init__(self, bridges_key=None, bonds_key=None, enable_dpdk_key=None,
global_mtu=None):
"""Initialize map.
:param bridges_key: Name of bridge:interface/port map config key
(default: 'data-port')
:type bridges_key: Optional[str]
:param bonds_key: Name of port-name:interface map config key
(default: 'dpdk-bond-mappings')
:type bonds_key: Optional[str]
:param enable_dpdk_key: Name of DPDK toggle config key
(default: 'enable-dpdk')
:type enable_dpdk_key: Optional[str]
:param global_mtu: Set a MTU on all interfaces at map initialization.
The default is to have Open vSwitch get this from the underlying
interface as set up by bare metal provisioning.
Note that you can augment the MTU on an individual interface basis
like this:
ifdatamap = bpi.get_ifdatamap(bridge, port)
ifdatamap = {
port: {
**ifdata,
**{'mtu-request': my_individual_mtu_map[port]},
}
for port, ifdata in ifdatamap.items()
}
:type global_mtu: Optional[int]
"""
bridges_key = bridges_key or 'data-port'
bonds_key = bonds_key or 'dpdk-bond-mappings'
enable_dpdk_key = enable_dpdk_key or 'enable-dpdk'
self._map = collections.defaultdict(
lambda: collections.defaultdict(dict))
self._ifname_mac_map = collections.defaultdict(list)
self._mac_ifname_map = {}
self._mac_pci_address_map = {}
# First we iterate over the list of physical interfaces visible to the
# system and update interface name to mac and mac to interface name map
for ifname in list_nics():
if not is_phy_iface(ifname):
continue
mac = get_nic_hwaddr(ifname)
self._ifname_mac_map[ifname] = [mac]
self._mac_ifname_map[mac] = ifname
# In light of the pre-deprecation notice in the docstring of this
# class we will expose the ability to configure OVS bonds as a
# DPDK-only feature, but generally use the data structures internally.
if config(enable_dpdk_key):
# resolve PCI address of interfaces listed in the bridges and bonds
# charm configuration options. Note that for already bound
# interfaces the helper will retrieve MAC address from the unit
# KV store as the information is no longer available in sysfs.
_pci_bridge_mac = resolve_pci_from_mapping_config(
bridges_key)
_pci_bond_mac = resolve_pci_from_mapping_config(
bonds_key)
for pci_address, bridge_mac in _pci_bridge_mac.items():
if bridge_mac.mac in self._mac_ifname_map:
# if we already have the interface name in our map it is
# visible to the system and therefore not bound to DPDK
continue
ifname = 'dpdk-{}'.format(
hashlib.sha1(
pci_address.encode('UTF-8')).hexdigest()[:7])
self._ifname_mac_map[ifname] = [bridge_mac.mac]
self._mac_ifname_map[bridge_mac.mac] = ifname
self._mac_pci_address_map[bridge_mac.mac] = pci_address
for pci_address, bond_mac in _pci_bond_mac.items():
# for bonds we want to be able to get a list of macs from
# the bond name and also get at the interface name made up
# of the hash of the PCI address
ifname = 'dpdk-{}'.format(
hashlib.sha1(
pci_address.encode('UTF-8')).hexdigest()[:7])
self._ifname_mac_map[bond_mac.entity].append(bond_mac.mac)
self._mac_ifname_map[bond_mac.mac] = ifname
self._mac_pci_address_map[bond_mac.mac] = pci_address
config_bridges = config(bridges_key) or ''
for bridge, ifname_or_mac in (
pair.split(':', 1)
for pair in config_bridges.split()):
if ':' in ifname_or_mac:
try:
ifname = self.ifname_from_mac(ifname_or_mac)
except KeyError:
# The interface is destined for a different unit in the
# deployment.
continue
macs = [ifname_or_mac]
else:
ifname = ifname_or_mac
macs = self.macs_from_ifname(ifname_or_mac)
portname = ifname
for mac in macs:
try:
pci_address = self.pci_address_from_mac(mac)
iftype = self.interface_type.dpdk
ifname = self.ifname_from_mac(mac)
except KeyError:
pci_address = None
iftype = self.interface_type.system
self.add_interface(
bridge, portname, ifname, iftype, pci_address, global_mtu)
def __getitem__(self, key):
"""Provide a Dict-like interface, get value of item.
:param key: Key to look up value from.
:type key: any
:returns: Value
:rtype: any
"""
return self._map.__getitem__(key)
def __iter__(self):
"""Provide a Dict-like interface, iterate over keys.
:returns: Iterator
:rtype: Iterator[any]
"""
return self._map.__iter__()
def __len__(self):
"""Provide a Dict-like interface, measure the length of internal map.
:returns: Length
:rtype: int
"""
return len(self._map)
def items(self):
"""Provide a Dict-like interface, iterate over items.
:returns: Key Value pairs
:rtype: Iterator[any, any]
"""
return self._map.items()
def keys(self):
"""Provide a Dict-like interface, iterate over keys.
:returns: Iterator
:rtype: Iterator[any]
"""
return self._map.keys()
def ifname_from_mac(self, mac):
"""
:returns: Name of interface
:rtype: str
:raises: KeyError
"""
return (get_bond_master(self._mac_ifname_map[mac]) or
self._mac_ifname_map[mac])
def macs_from_ifname(self, ifname):
"""
:returns: List of hardware address (MAC) of interface
:rtype: List[str]
:raises: KeyError
"""
return self._ifname_mac_map[ifname]
def pci_address_from_mac(self, mac):
"""
:param mac: Hardware address (MAC) of interface
:type mac: str
:returns: PCI address of device associated with mac
:rtype: str
:raises: KeyError
"""
return self._mac_pci_address_map[mac]
def add_interface(self, bridge, port, ifname, iftype,
pci_address, mtu_request):
"""Add an interface to the map.
:param bridge: Name of bridge on which the bond will be added
:type bridge: str
:param port: Name of port which will represent the bond on bridge
:type port: str
:param ifname: Name of interface that will make up the bonded port
:type ifname: str
:param iftype: Type of interface
:type iftype: BridgeBondMap.interface_type
:param pci_address: PCI address of interface
:type pci_address: Optional[str]
:param mtu_request: MTU to request for interface
:type mtu_request: Optional[int]
"""
self._map[bridge][port][ifname] = {
'type': str(iftype),
}
if pci_address:
self._map[bridge][port][ifname].update({
'pci-address': pci_address,
})
if mtu_request is not None:
self._map[bridge][port][ifname].update({
'mtu-request': str(mtu_request)
})
def get_ifdatamap(self, bridge, port):
"""Get structure suitable for charmhelpers.contrib.network.ovs helpers.
:param bridge: Name of bridge on which the port will be added
:type bridge: str
:param port: Name of port which will represent one or more interfaces
:type port: str
"""
for _bridge, _ports in self.items():
for _port, _interfaces in _ports.items():
if _bridge == bridge and _port == port:
ifdatamap = {}
for name, data in _interfaces.items():
ifdatamap.update({
name: {
'type': data['type'],
},
})
if data.get('mtu-request') is not None:
ifdatamap[name].update({
'mtu_request': data['mtu-request'],
})
if data.get('pci-address'):
ifdatamap[name].update({
'options': {
'dpdk-devargs': data['pci-address'],
},
})
return ifdatamap
class BondConfig(object):
"""Container and helpers for bond configuration options.
Data is put into a dictionary and a convenient config get interface is
provided.
"""
DEFAULT_LACP_CONFIG = {
'mode': 'balance-tcp',
'lacp': 'active',
'lacp-time': 'fast'
}
ALL_BONDS = 'ALL_BONDS'
BOND_MODES = ['active-backup', 'balance-slb', 'balance-tcp']
BOND_LACP = ['active', 'passive', 'off']
BOND_LACP_TIME = ['fast', 'slow']
def __init__(self, config_key=None):
"""Parse specified configuration option.
:param config_key: Configuration key to retrieve data from
(default: ``dpdk-bond-config``)
:type config_key: Optional[str]
"""
self.config_key = config_key or 'dpdk-bond-config'
self.lacp_config = {
self.ALL_BONDS: copy.deepcopy(self.DEFAULT_LACP_CONFIG)
}
lacp_config = config(self.config_key)
if lacp_config:
lacp_config_map = lacp_config.split()
for entry in lacp_config_map:
bond, entry = entry.partition(':')[0:3:2]
if not bond:
bond = self.ALL_BONDS
mode, entry = entry.partition(':')[0:3:2]
if not mode:
mode = self.DEFAULT_LACP_CONFIG['mode']
assert mode in self.BOND_MODES, \
"Bond mode {} is invalid".format(mode)
lacp, entry = entry.partition(':')[0:3:2]
if not lacp:
lacp = self.DEFAULT_LACP_CONFIG['lacp']
assert lacp in self.BOND_LACP, \
"Bond lacp {} is invalid".format(lacp)
lacp_time, entry = entry.partition(':')[0:3:2]
if not lacp_time:
lacp_time = self.DEFAULT_LACP_CONFIG['lacp-time']
assert lacp_time in self.BOND_LACP_TIME, \
"Bond lacp-time {} is invalid".format(lacp_time)
self.lacp_config[bond] = {
'mode': mode,
'lacp': lacp,
'lacp-time': lacp_time
}
def get_bond_config(self, bond):
"""Get the LACP configuration for a bond
:param bond: the bond name
:return: a dictionary with the configuration of the bond
:rtype: Dict[str,Dict[str,str]]
"""
return self.lacp_config.get(bond, self.lacp_config[self.ALL_BONDS])
def get_ovs_portdata(self, bond):
"""Get structure suitable for charmhelpers.contrib.network.ovs helpers.
:param bond: the bond name
:return: a dictionary with the configuration of the bond
:rtype: Dict[str,Union[str,Dict[str,str]]]
"""
bond_config = self.get_bond_config(bond)
return {
'bond_mode': bond_config['mode'],
'lacp': bond_config['lacp'],
'other_config': {
'lacp-time': bond_config['lacp-time'],
},
}
class SRIOVContext(OSContextGenerator):
"""Provide context for configuring SR-IOV devices."""
class sriov_config_mode(enum.Enum):
"""Mode in which SR-IOV is configured.
The configuration option identified by the ``numvfs_key`` parameter
is overloaded and defines in which mode the charm should interpret
the other SR-IOV-related configuration options.
"""
auto = 'auto'
blanket = 'blanket'
explicit = 'explicit'
def _determine_numvfs(self, device, sriov_numvfs):
"""Determine number of Virtual Functions (VFs) configured for device.
:param device: Object describing a PCI Network interface card (NIC)/
:type device: sriov_netplan_shim.pci.PCINetDevice
:param sriov_numvfs: Number of VFs requested for blanket configuration.
:type sriov_numvfs: int
:returns: Number of VFs to configure for device
:rtype: Optional[int]
"""
def _get_capped_numvfs(requested):
"""Get a number of VFs that does not exceed individual card limits.
Depending and make and model of NIC the number of VFs supported
vary. Requesting more VFs than a card support would be a fatal
error, cap the requested number at the total number of VFs each
individual card supports.
:param requested: Number of VFs requested
:type requested: int
:returns: Number of VFs allowed
:rtype: int
"""
actual = min(int(requested), int(device.sriov_totalvfs))
if actual < int(requested):
log('Requested VFs ({}) too high for device {}. Falling back '
'to value supprted by device: {}'
.format(requested, device.interface_name,
device.sriov_totalvfs),
level=WARNING)
return actual
if self._sriov_config_mode == self.sriov_config_mode.auto:
# auto-mode
#
# If device mapping configuration is present, return information
# on cards with mapping.
#
# If no device mapping configuration is present, return information
# for all cards.
#
# The maximum number of VFs supported by card will be used.
if (self._sriov_mapped_devices and
device.interface_name not in self._sriov_mapped_devices):
log('SR-IOV configured in auto mode: No device mapping for {}'
.format(device.interface_name),
level=DEBUG)
return
return _get_capped_numvfs(device.sriov_totalvfs)
elif self._sriov_config_mode == self.sriov_config_mode.blanket:
# blanket-mode
#
# User has specified a number of VFs that should apply to all
# cards with support for VFs.
return _get_capped_numvfs(sriov_numvfs)
elif self._sriov_config_mode == self.sriov_config_mode.explicit:
# explicit-mode
#
# User has given a list of interface names and associated number of
# VFs
if device.interface_name not in self._sriov_config_devices:
log('SR-IOV configured in explicit mode: No device:numvfs '
'pair for device {}, skipping.'
.format(device.interface_name),
level=DEBUG)
return
return _get_capped_numvfs(
self._sriov_config_devices[device.interface_name])
else:
raise RuntimeError('This should not be reached')
def __init__(self, numvfs_key=None, device_mappings_key=None):
"""Initialize map from PCI devices and configuration options.
:param numvfs_key: Config key for numvfs (default: 'sriov-numvfs')
:type numvfs_key: Optional[str]
:param device_mappings_key: Config key for device mappings
(default: 'sriov-device-mappings')
:type device_mappings_key: Optional[str]
:raises: RuntimeError
"""
numvfs_key = numvfs_key or 'sriov-numvfs'
device_mappings_key = device_mappings_key or 'sriov-device-mappings'
devices = pci.PCINetDevices()
charm_config = config()
sriov_numvfs = charm_config.get(numvfs_key) or ''
sriov_device_mappings = charm_config.get(device_mappings_key) or ''
# create list of devices from sriov_device_mappings config option
self._sriov_mapped_devices = [
pair.split(':', 1)[1]
for pair in sriov_device_mappings.split()
]
# create map of device:numvfs from sriov_numvfs config option
self._sriov_config_devices = {
ifname: numvfs for ifname, numvfs in (
pair.split(':', 1) for pair in sriov_numvfs.split()
if ':' in sriov_numvfs)
}
# determine configuration mode from contents of sriov_numvfs
if sriov_numvfs == 'auto':
self._sriov_config_mode = self.sriov_config_mode.auto
elif sriov_numvfs.isdigit():
self._sriov_config_mode = self.sriov_config_mode.blanket
elif ':' in sriov_numvfs:
self._sriov_config_mode = self.sriov_config_mode.explicit
else:
raise RuntimeError('Unable to determine mode of SR-IOV '
'configuration.')
self._map = {
device.interface_name: self._determine_numvfs(device, sriov_numvfs)
for device in devices.pci_devices
if device.sriov and
self._determine_numvfs(device, sriov_numvfs) is not None
}
def __call__(self):
"""Provide SR-IOV context.
:returns: Map interface name: min(configured, max) virtual functions.
Example:
{
'eth0': 16,
'eth1': 32,
'eth2': 64,
}
:rtype: Dict[str,int]
"""
return self._map

View File

@ -17,7 +17,6 @@ import contextlib
import os
import six
import shutil
import sys
import yaml
import zipfile
@ -531,7 +530,7 @@ def clean_policyd_dir_for(service, keep_paths=None, user=None, group=None):
hookenv.log("Cleaning path: {}".format(path), level=hookenv.DEBUG)
if not os.path.exists(path):
ch_host.mkdir(path, owner=_user, group=_group, perms=0o775)
_scanner = os.scandir if sys.version_info > (3, 4) else _py2_scandir
_scanner = os.scandir if hasattr(os, 'scandir') else _fallback_scandir
for direntry in _scanner(path):
# see if the path should be kept.
if direntry.path in keep_paths:
@ -560,23 +559,25 @@ def maybe_create_directory_for(path, user, group):
@contextlib.contextmanager
def _py2_scandir(path):
"""provide a py2 implementation of os.scandir if this module ever gets used
in a py2 charm (unlikely). uses os.listdir() to get the names in the path,
and then mocks the is_dir() function using os.path.isdir() to check for a
def _fallback_scandir(path):
"""Fallback os.scandir implementation.
provide a fallback implementation of os.scandir if this module ever gets
used in a py2 or py34 charm. Uses os.listdir() to get the names in the path,
and then mocks the is_dir() function using os.path.isdir() to check for
directory.
:param path: the path to list the directories for
:type path: str
:returns: Generator that provides _P27Direntry objects
:rtype: ContextManager[_P27Direntry]
:returns: Generator that provides _FBDirectory objects
:rtype: ContextManager[_FBDirectory]
"""
for f in os.listdir(path):
yield _P27Direntry(f)
yield _FBDirectory(f)
class _P27Direntry(object):
"""Mock a scandir Direntry object with enough to use in
class _FBDirectory(object):
"""Mock a scandir Directory object with enough to use in
clean_policyd_dir_for
"""

View File

@ -13,7 +13,7 @@
# limitations under the License.
# Common python helper functions used for OpenStack charms.
from collections import OrderedDict
from collections import OrderedDict, namedtuple
from functools import wraps
import subprocess
@ -36,15 +36,20 @@ from charmhelpers.contrib.network import ip
from charmhelpers.core import unitdata
from charmhelpers.core.hookenv import (
WORKLOAD_STATES,
action_fail,
action_set,
config,
expected_peer_units,
expected_related_units,
log as juju_log,
charm_dir,
INFO,
ERROR,
metadata,
related_units,
relation_get,
relation_id,
relation_ids,
relation_set,
status_set,
@ -53,6 +58,7 @@ from charmhelpers.core.hookenv import (
cached,
leader_set,
leader_get,
local_unit,
)
from charmhelpers.core.strutils import (
@ -108,6 +114,10 @@ from charmhelpers.contrib.openstack.policyd import (
POLICYD_CONFIG_NAME,
)
from charmhelpers.contrib.openstack.ha.utils import (
expect_ha,
)
CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
@ -278,7 +288,7 @@ PACKAGE_CODENAMES = {
('14', 'rocky'),
('15', 'stein'),
('16', 'train'),
('17', 'ussuri'),
('18', 'ussuri'),
]),
'ceilometer-common': OrderedDict([
('5', 'liberty'),
@ -326,7 +336,7 @@ PACKAGE_CODENAMES = {
('14', 'rocky'),
('15', 'stein'),
('16', 'train'),
('17', 'ussuri'),
('18', 'ussuri'),
]),
}
@ -555,9 +565,8 @@ def reset_os_release():
_os_rel = None
def os_release(package, base=None, reset_cache=False):
'''
Returns OpenStack release codename from a cached global.
def os_release(package, base=None, reset_cache=False, source_key=None):
"""Returns OpenStack release codename from a cached global.
If reset_cache then unset the cached os_release version and return the
freshly determined version.
@ -565,7 +574,20 @@ def os_release(package, base=None, reset_cache=False):
If the codename can not be determined from either an installed package or
the installation source, the earliest release supported by the charm should
be returned.
'''
:param package: Name of package to determine release from
:type package: str
:param base: Fallback codename if endavours to determine from package fail
:type base: Optional[str]
:param reset_cache: Reset any cached codename value
:type reset_cache: bool
:param source_key: Name of source configuration option
(default: 'openstack-origin')
:type source_key: Optional[str]
:returns: OpenStack release codename
:rtype: str
"""
source_key = source_key or 'openstack-origin'
if not base:
base = UBUNTU_OPENSTACK_RELEASE[lsb_release()['DISTRIB_CODENAME']]
global _os_rel
@ -575,7 +597,7 @@ def os_release(package, base=None, reset_cache=False):
return _os_rel
_os_rel = (
get_os_codename_package(package, fatal=False) or
get_os_codename_install_source(config('openstack-origin')) or
get_os_codename_install_source(config(source_key)) or
base)
return _os_rel
@ -658,6 +680,93 @@ def config_value_changed(option):
return current != saved
def get_endpoint_key(service_name, relation_id, unit_name):
"""Return the key used to refer to an ep changed notification from a unit.
:param service_name: Service name eg nova, neutron, placement etc
:type service_name: str
:param relation_id: The id of the relation the unit is on.
:type relation_id: str
:param unit_name: The name of the unit publishing the notification.
:type unit_name: str
:returns: The key used to refer to an ep changed notification from a unit
:rtype: str
"""
return '{}-{}-{}'.format(
service_name,
relation_id.replace(':', '_'),
unit_name.replace('/', '_'))
def get_endpoint_notifications(service_names, rel_name='identity-service'):
"""Return all notifications for the given services.
:param service_names: List of service name.
:type service_name: List
:param rel_name: Name of the relation to query
:type rel_name: str
:returns: A dict containing the source of the notification and its nonce.
:rtype: Dict[str, str]
"""
notifications = {}
for rid in relation_ids(rel_name):
for unit in related_units(relid=rid):
ep_changed_json = relation_get(
rid=rid,
unit=unit,
attribute='ep_changed')
if ep_changed_json:
ep_changed = json.loads(ep_changed_json)
for service in service_names:
if ep_changed.get(service):
key = get_endpoint_key(service, rid, unit)
notifications[key] = ep_changed[service]
return notifications
def endpoint_changed(service_name, rel_name='identity-service'):
"""Whether a new notification has been recieved for an endpoint.
:param service_name: Service name eg nova, neutron, placement etc
:type service_name: str
:param rel_name: Name of the relation to query
:type rel_name: str
:returns: Whether endpoint has changed
:rtype: bool
"""
changed = False
with unitdata.HookData()() as t:
db = t[0]
notifications = get_endpoint_notifications(
[service_name],
rel_name=rel_name)
for key, nonce in notifications.items():
if db.get(key) != nonce:
juju_log(('New endpoint change notification found: '
'{}={}').format(key, nonce),
'INFO')
changed = True
break
return changed
def save_endpoint_changed_triggers(service_names, rel_name='identity-service'):
"""Save the enpoint triggers in db so it can be tracked if they changed.
:param service_names: List of service name.
:type service_name: List
:param rel_name: Name of the relation to query
:type rel_name: str
"""
with unitdata.HookData()() as t:
db = t[0]
notifications = get_endpoint_notifications(
service_names,
rel_name=rel_name)
for key, nonce in notifications.items():
db.set(key, nonce)
def save_script_rc(script_path="scripts/scriptrc", **env_vars):
"""
Write an rc file in the charm-delivered directory containing
@ -1711,6 +1820,16 @@ def os_application_version_set(package):
application_version_set(application_version)
def os_application_status_set(check_function):
"""Run the supplied function and set the application status accordingly.
:param check_function: Function to run to get app states and messages.
:type check_function: function
"""
state, message = check_function()
status_set(state, message, application=True)
def enable_memcache(source=None, release=None, package=None):
"""Determine if memcache should be enabled on the local unit
@ -1947,3 +2066,287 @@ def is_db_maintenance_mode(relid=None):
'WARN')
pass
return True in notifications
@cached
def container_scoped_relations():
"""Get all the container scoped relations
:returns: List of relation names
:rtype: List
"""
md = metadata()
relations = []
for relation_type in ('provides', 'requires', 'peers'):
for relation in md.get(relation_type, []):
if md[relation_type][relation].get('scope') == 'container':
relations.append(relation)
return relations
def is_db_ready(use_current_context=False, rel_name=None):
"""Check remote database is ready to be used.
Database relations are expected to provide a list of 'allowed' units to
confirm that the database is ready for use by those units.
If db relation has provided this information and local unit is a member,
returns True otherwise False.
:param use_current_context: Whether to limit checks to current hook
context.
:type use_current_context: bool
:param rel_name: Name of relation to check
:type rel_name: string
:returns: Whether remote db is ready.
:rtype: bool
:raises: Exception
"""
key = 'allowed_units'
rel_name = rel_name or 'shared-db'
this_unit = local_unit()
if use_current_context:
if relation_id() in relation_ids(rel_name):
rids_units = [(None, None)]
else:
raise Exception("use_current_context=True but not in {} "
"rel hook contexts (currently in {})."
.format(rel_name, relation_id()))
else:
rids_units = [(r_id, u)
for r_id in relation_ids(rel_name)
for u in related_units(r_id)]
for rid, unit in rids_units:
allowed_units = relation_get(rid=rid, unit=unit, attribute=key)
if allowed_units and this_unit in allowed_units.split():
juju_log("This unit ({}) is in allowed unit list from {}".format(
this_unit,
unit), 'DEBUG')
return True
juju_log("This unit was not found in any allowed unit list")
return False
def is_expected_scale(peer_relation_name='cluster'):
"""Query juju goal-state to determine whether our peer- and dependency-
relations are at the expected scale.
Useful for deferring per unit per relation housekeeping work until we are
ready to complete it successfully and without unnecessary repetiton.
Always returns True if version of juju used does not support goal-state.
:param peer_relation_name: Name of peer relation
:type rel_name: string
:returns: True or False
:rtype: bool
"""
def _get_relation_id(rel_type):
return next((rid for rid in relation_ids(reltype=rel_type)), None)
Relation = namedtuple('Relation', 'rel_type rel_id')
peer_rid = _get_relation_id(peer_relation_name)
# Units with no peers should still have a peer relation.
if not peer_rid:
juju_log('Not at expected scale, no peer relation found', 'DEBUG')
return False
expected_relations = [
Relation(rel_type='shared-db', rel_id=_get_relation_id('shared-db'))]
if expect_ha():
expected_relations.append(
Relation(
rel_type='ha',
rel_id=_get_relation_id('ha')))
juju_log(
'Checking scale of {} relations'.format(
','.join([r.rel_type for r in expected_relations])),
'DEBUG')
try:
if (len(related_units(relid=peer_rid)) <
len(list(expected_peer_units()))):
return False
for rel in expected_relations:
if not rel.rel_id:
juju_log(
'Expected to find {} relation, but it is missing'.format(
rel.rel_type),
'DEBUG')
return False
# Goal state returns every unit even for container scoped
# relations but the charm only ever has a relation with
# the local unit.
if rel.rel_type in container_scoped_relations():
expected_count = 1
else:
expected_count = len(
list(expected_related_units(reltype=rel.rel_type)))
if len(related_units(relid=rel.rel_id)) < expected_count:
juju_log(
('Not at expected scale, not enough units on {} '
'relation'.format(rel.rel_type)),
'DEBUG')
return False
except NotImplementedError:
return True
juju_log('All checks have passed, unit is at expected scale', 'DEBUG')
return True
def get_peer_key(unit_name):
"""Get the peer key for this unit.
The peer key is the key a unit uses to publish its status down the peer
relation
:param unit_name: Name of unit
:type unit_name: string
:returns: Peer key for given unit
:rtype: string
"""
return 'unit-state-{}'.format(unit_name.replace('/', '-'))
UNIT_READY = 'READY'
UNIT_NOTREADY = 'NOTREADY'
UNIT_UNKNOWN = 'UNKNOWN'
UNIT_STATES = [UNIT_READY, UNIT_NOTREADY, UNIT_UNKNOWN]
def inform_peers_unit_state(state, relation_name='cluster'):
"""Inform peers of the state of this unit.
:param state: State of unit to publish
:type state: string
:param relation_name: Name of relation to publish state on
:type relation_name: string
"""
if state not in UNIT_STATES:
raise ValueError(
"Setting invalid state {} for unit".format(state))
for r_id in relation_ids(relation_name):
relation_set(relation_id=r_id,
relation_settings={
get_peer_key(local_unit()): state})
def get_peers_unit_state(relation_name='cluster'):
"""Get the state of all peers.
:param relation_name: Name of relation to check peers on.
:type relation_name: string
:returns: Unit states keyed on unit name.
:rtype: dict
:raises: ValueError
"""
r_ids = relation_ids(relation_name)
rids_units = [(r, u) for r in r_ids for u in related_units(r)]
unit_states = {}
for r_id, unit in rids_units:
settings = relation_get(unit=unit, rid=r_id)
unit_states[unit] = settings.get(get_peer_key(unit), UNIT_UNKNOWN)
if unit_states[unit] not in UNIT_STATES:
raise ValueError(
"Unit in unknown state {}".format(unit_states[unit]))
return unit_states
def are_peers_ready(relation_name='cluster'):
"""Check if all peers are ready.
:param relation_name: Name of relation to check peers on.
:type relation_name: string
:returns: Whether all units are ready.
:rtype: bool
"""
unit_states = get_peers_unit_state(relation_name)
return all(v == UNIT_READY for v in unit_states.values())
def inform_peers_if_ready(check_unit_ready_func, relation_name='cluster'):
"""Inform peers if this unit is ready.
The check function should return a tuple (state, message). A state
of 'READY' indicates the unit is READY.
:param check_unit_ready_func: Function to run to check readiness
:type check_unit_ready_func: function
:param relation_name: Name of relation to check peers on.
:type relation_name: string
"""
unit_ready, msg = check_unit_ready_func()
if unit_ready:
state = UNIT_READY
else:
state = UNIT_NOTREADY
juju_log('Telling peers this unit is: {}'.format(state), 'DEBUG')
inform_peers_unit_state(state, relation_name)
def check_api_unit_ready(check_db_ready=True):
"""Check if this unit is ready.
:param check_db_ready: Include checks of database readiness.
:type check_db_ready: bool
:returns: Whether unit state is ready and status message
:rtype: (bool, str)
"""
unit_state, msg = get_api_unit_status(check_db_ready=check_db_ready)
return unit_state == WORKLOAD_STATES.ACTIVE, msg
def get_api_unit_status(check_db_ready=True):
"""Return a workload status and message for this unit.
:param check_db_ready: Include checks of database readiness.
:type check_db_ready: bool
:returns: Workload state and message
:rtype: (bool, str)
"""
unit_state = WORKLOAD_STATES.ACTIVE
msg = 'Unit is ready'
if is_db_maintenance_mode():
unit_state = WORKLOAD_STATES.MAINTENANCE
msg = 'Database in maintenance mode.'
elif is_unit_paused_set():
unit_state = WORKLOAD_STATES.BLOCKED
msg = 'Unit paused.'
elif check_db_ready and not is_db_ready():
unit_state = WORKLOAD_STATES.WAITING
msg = 'Allowed_units list provided but this unit not present'
elif not is_db_initialised():
unit_state = WORKLOAD_STATES.WAITING
msg = 'Database not initialised'
elif not is_expected_scale():
unit_state = WORKLOAD_STATES.WAITING
msg = 'Charm and its dependencies not yet at expected scale'
juju_log(msg, 'DEBUG')
return unit_state, msg
def check_api_application_ready():
"""Check if this application is ready.
:returns: Whether application state is ready and status message
:rtype: (bool, str)
"""
app_state, msg = get_api_application_status()
return app_state == WORKLOAD_STATES.ACTIVE, msg
def get_api_application_status():
"""Return a workload status and message for this application.
:returns: Workload state and message
:rtype: (bool, str)
"""
app_state, msg = get_api_unit_status()
if app_state == WORKLOAD_STATES.ACTIVE:
if are_peers_ready():
return WORKLOAD_STATES.ACTIVE, 'Application Ready'
else:
return WORKLOAD_STATES.WAITING, 'Some units are not ready'
return app_state, msg

View File

@ -37,7 +37,19 @@ class VaultKVContext(context.OSContextGenerator):
)
def __call__(self):
import hvac
try:
import hvac
except ImportError:
# BUG: #1862085 - if the relation is made to vault, but the
# 'encrypt' option is not made, then the charm errors with an
# import warning. This catches that, logs a warning, and returns
# with an empty context.
hookenv.log("VaultKVContext: trying to use hvac pythong module "
"but it's not available. Is secrets-stroage relation "
"made, but encrypt option not set?",
level=hookenv.WARNING)
# return an emptry context on hvac import error
return {}
ctxt = {}
# NOTE(hopem): see https://bugs.launchpad.net/charm-helpers/+bug/1849323
db = unitdata.kv()
@ -128,9 +140,16 @@ def vault_relation_complete(backend=None):
:ptype backend: string
:returns: whether the relation to vault is complete
:rtype: bool"""
vault_kv = VaultKVContext(secret_backend=backend or VAULTLOCKER_BACKEND)
vault_kv()
return vault_kv.complete
try:
import hvac
except ImportError:
return False
try:
vault_kv = VaultKVContext(secret_backend=backend or VAULTLOCKER_BACKEND)
vault_kv()
return vault_kv.complete
except hvac.exceptions.InvalidRequest:
return False
# TODO: contrib a high level unwrap method to hvac that works
@ -144,7 +163,16 @@ def retrieve_secret_id(url, token):
:returns: secret_id to use for Vault Access
:rtype: str"""
import hvac
client = hvac.Client(url=url, token=token)
try:
# hvac 0.10.1 changed default adapter to JSONAdapter
client = hvac.Client(url=url, token=token, adapter=hvac.adapters.Request)
except AttributeError:
# hvac < 0.6.2 doesn't have adapter but uses the same response interface
client = hvac.Client(url=url, token=token)
else:
# hvac < 0.9.2 assumes adapter is an instance, so doesn't instantiate
if not isinstance(client.adapter, hvac.adapters.Request):
client.adapter = hvac.adapters.Request(base_uri=url, token=token)
response = client._post('/v1/sys/wrapping/unwrap')
if response.status_code == 200:
data = response.json()

View File

@ -22,6 +22,7 @@
# Adam Gandelman <adamg@ubuntu.com>
#
import collections
import errno
import hashlib
import math
@ -91,6 +92,89 @@ DEFAULT_PGS_PER_OSD_TARGET = 100
DEFAULT_POOL_WEIGHT = 10.0
LEGACY_PG_COUNT = 200
DEFAULT_MINIMUM_PGS = 2
AUTOSCALER_DEFAULT_PGS = 32
class OsdPostUpgradeError(Exception):
"""Error class for OSD post-upgrade operations."""
pass
class OSDSettingConflict(Exception):
"""Error class for conflicting osd setting requests."""
pass
class OSDSettingNotAllowed(Exception):
"""Error class for a disallowed setting."""
pass
OSD_SETTING_EXCEPTIONS = (OSDSettingConflict, OSDSettingNotAllowed)
OSD_SETTING_WHITELIST = [
'osd heartbeat grace',
'osd heartbeat interval',
]
def _order_dict_by_key(rdict):
"""Convert a dictionary into an OrderedDict sorted by key.
:param rdict: Dictionary to be ordered.
:type rdict: dict
:returns: Ordered Dictionary.
:rtype: collections.OrderedDict
"""
return collections.OrderedDict(sorted(rdict.items(), key=lambda k: k[0]))
def get_osd_settings(relation_name):
"""Consolidate requested osd settings from all clients.
Consolidate requested osd settings from all clients. Check that the
requested setting is on the whitelist and it does not conflict with
any other requested settings.
:returns: Dictionary of settings
:rtype: dict
:raises: OSDSettingNotAllowed
:raises: OSDSettingConflict
"""
rel_ids = relation_ids(relation_name)
osd_settings = {}
for relid in rel_ids:
for unit in related_units(relid):
unit_settings = relation_get('osd-settings', unit, relid) or '{}'
unit_settings = json.loads(unit_settings)
for key, value in unit_settings.items():
if key not in OSD_SETTING_WHITELIST:
msg = 'Illegal settings "{}"'.format(key)
raise OSDSettingNotAllowed(msg)
if key in osd_settings:
if osd_settings[key] != unit_settings[key]:
msg = 'Conflicting settings for "{}"'.format(key)
raise OSDSettingConflict(msg)
else:
osd_settings[key] = value
return _order_dict_by_key(osd_settings)
def send_osd_settings():
"""Pass on requested OSD settings to osd units."""
try:
settings = get_osd_settings('client')
except OSD_SETTING_EXCEPTIONS as e:
# There is a problem with the settings, not passing them on. Update
# status will notify the user.
log(e, level=ERROR)
return
data = {
'osd-settings': json.dumps(settings, sort_keys=True)}
for relid in relation_ids('osd'):
relation_set(relation_id=relid,
relation_settings=data)
def validator(value, valid_type, valid_range=None):
@ -316,16 +400,28 @@ class ReplicatedPool(Pool):
def create(self):
if not pool_exists(self.service, self.name):
nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0
# Create it
cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create',
self.name, str(self.pg_num)]
if nautilus_or_later:
cmd = [
'ceph', '--id', self.service, 'osd', 'pool', 'create',
'--pg-num-min={}'.format(
min(AUTOSCALER_DEFAULT_PGS, self.pg_num)
),
self.name, str(self.pg_num)
]
else:
cmd = [
'ceph', '--id', self.service, 'osd', 'pool', 'create',
self.name, str(self.pg_num)
]
try:
check_call(cmd)
# Set the pool replica size
update_pool(client=self.service,
pool=self.name,
settings={'size': str(self.replicas)})
nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0
if nautilus_or_later:
# Ensure we set the expected pool ratio
update_pool(client=self.service,
@ -383,10 +479,24 @@ class ErasurePool(Pool):
k = int(erasure_profile['k'])
m = int(erasure_profile['m'])
pgs = self.get_pgs(k + m, self.percent_data)
nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0
# Create it
cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create',
self.name, str(pgs), str(pgs),
'erasure', self.erasure_code_profile]
if nautilus_or_later:
cmd = [
'ceph', '--id', self.service, 'osd', 'pool', 'create',
'--pg-num-min={}'.format(
min(AUTOSCALER_DEFAULT_PGS, pgs)
),
self.name, str(pgs), str(pgs),
'erasure', self.erasure_code_profile
]
else:
cmd = [
'ceph', '--id', self.service, 'osd', 'pool', 'create',
self.name, str(pgs), str(pgs),
'erasure', self.erasure_code_profile
]
try:
check_call(cmd)
try:
@ -395,7 +505,6 @@ class ErasurePool(Pool):
name=self.app_name)
except CalledProcessError:
log('Could not set app name for pool {}'.format(self.name, level=WARNING))
nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0
if nautilus_or_later:
# Ensure we set the expected pool ratio
update_pool(client=self.service,
@ -1042,7 +1151,7 @@ def filesystem_mounted(fs):
def make_filesystem(blk_device, fstype='ext4', timeout=10):
"""Make a new filesystem on the specified block device."""
count = 0
e_noent = os.errno.ENOENT
e_noent = errno.ENOENT
while not os.path.exists(blk_device):
if count >= timeout:
log('Gave up waiting on block device %s' % blk_device,
@ -1635,5 +1744,67 @@ class CephConfContext(object):
continue
ceph_conf[key] = conf[key]
return ceph_conf
class CephOSDConfContext(CephConfContext):
"""Ceph config (ceph.conf) context.
Consolidates settings from config-flags via CephConfContext with
settings provided by the mons. The config-flag values are preserved in
conf['osd'], settings from the mons which do not clash with config-flag
settings are in conf['osd_from_client'] and finally settings which do
clash are in conf['osd_from_client_conflict']. Rather than silently drop
the conflicting settings they are provided in the context so they can be
rendered commented out to give some visability to the admin.
"""
def __init__(self, permitted_sections=None):
super(CephOSDConfContext, self).__init__(
permitted_sections=permitted_sections)
try:
self.settings_from_mons = get_osd_settings('mon')
except OSDSettingConflict:
log(
"OSD settings from mons are inconsistent, ignoring them",
level=WARNING)
self.settings_from_mons = {}
def filter_osd_from_mon_settings(self):
"""Filter settings from client relation against config-flags.
:returns: A tuple (
,config-flag values,
,client settings which do not conflict with config-flag values,
,client settings which confilct with config-flag values)
:rtype: (OrderedDict, OrderedDict, OrderedDict)
"""
ceph_conf = super(CephOSDConfContext, self).__call__()
conflicting_entries = {}
clear_entries = {}
for key, value in self.settings_from_mons.items():
if key in ceph_conf.get('osd', {}):
if ceph_conf['osd'][key] != value:
conflicting_entries[key] = value
else:
clear_entries[key] = value
clear_entries = _order_dict_by_key(clear_entries)
conflicting_entries = _order_dict_by_key(conflicting_entries)
return ceph_conf, clear_entries, conflicting_entries
def __call__(self):
"""Construct OSD config context.
Standard context with two additional special keys.
osd_from_client_conflict: client settings which confilct with
config-flag values
osd_from_client: settings which do not conflict with config-flag
values
:returns: OSD config context dict.
:rtype: dict
"""
conf, osd_clear, osd_conflict = self.filter_osd_from_mon_settings()
conf['osd_from_client_conflict'] = osd_conflict
conf['osd_from_client'] = osd_clear
return conf

View File

@ -32,6 +32,10 @@ def loopback_devices():
/dev/loop0: [0807]:961814 (/tmp/my.img)
or:
/dev/loop0: [0807]:961814 (/tmp/my.img (deleted))
:returns: dict: a dict mapping {loopback_dev: backing_file}
'''
loopbacks = {}
@ -39,9 +43,9 @@ def loopback_devices():
output = check_output(cmd)
if six.PY3:
output = output.decode('utf-8')
devs = [d.strip().split(' ') for d in output.splitlines() if d != '']
devs = [d.strip().split(' ', 2) for d in output.splitlines() if d != '']
for dev, _, f in devs:
loopbacks[dev.replace(':', '')] = re.search(r'\((\S+)\)', f).groups()[0]
loopbacks[dev.replace(':', '')] = re.search(r'\((.+)\)', f).groups()[0]
return loopbacks

View File

@ -21,6 +21,7 @@
from __future__ import print_function
import copy
from distutils.version import LooseVersion
from enum import Enum
from functools import wraps
from collections import namedtuple
import glob
@ -57,6 +58,14 @@ RANGE_WARNING = ('Passing NO_PROXY string that includes a cidr. '
'This may not be compatible with software you are '
'running in your shell.')
class WORKLOAD_STATES(Enum):
ACTIVE = 'active'
BLOCKED = 'blocked'
MAINTENANCE = 'maintenance'
WAITING = 'waiting'
cache = {}
@ -1088,22 +1097,33 @@ def function_tag():
return os.environ.get('JUJU_FUNCTION_TAG') or action_tag()
def status_set(workload_state, message):
def status_set(workload_state, message, application=False):
"""Set the workload state with a message
Use status-set to set the workload state with a message which is visible
to the user via juju status. If the status-set command is not found then
assume this is juju < 1.23 and juju-log the message unstead.
assume this is juju < 1.23 and juju-log the message instead.
workload_state -- valid juju workload state.
message -- status update message
workload_state -- valid juju workload state. str or WORKLOAD_STATES
message -- status update message
application -- Whether this is an application state set
"""
valid_states = ['maintenance', 'blocked', 'waiting', 'active']
if workload_state not in valid_states:
raise ValueError(
'{!r} is not a valid workload state'.format(workload_state)
)
cmd = ['status-set', workload_state, message]
bad_state_msg = '{!r} is not a valid workload state'
if isinstance(workload_state, str):
try:
# Convert string to enum.
workload_state = WORKLOAD_STATES[workload_state.upper()]
except KeyError:
raise ValueError(bad_state_msg.format(workload_state))
if workload_state not in WORKLOAD_STATES:
raise ValueError(bad_state_msg.format(workload_state))
cmd = ['status-set']
if application:
cmd.append('--application')
cmd.extend([workload_state.value, message])
try:
ret = subprocess.call(cmd)
if ret == 0:
@ -1111,7 +1131,7 @@ def status_set(workload_state, message):
except OSError as e:
if e.errno != errno.ENOENT:
raise
log_message = 'status-set failed: {} {}'.format(workload_state,
log_message = 'status-set failed: {} {}'.format(workload_state.value,
message)
log(log_message, level='INFO')
@ -1526,13 +1546,13 @@ def env_proxy_settings(selected_settings=None):
"""Get proxy settings from process environment variables.
Get charm proxy settings from environment variables that correspond to
juju-http-proxy, juju-https-proxy and juju-no-proxy (available as of 2.4.2,
see lp:1782236) in a format suitable for passing to an application that
reacts to proxy settings passed as environment variables. Some applications
support lowercase or uppercase notation (e.g. curl), some support only
lowercase (e.g. wget), there are also subjectively rare cases of only
uppercase notation support. no_proxy CIDR and wildcard support also varies
between runtimes and applications as there is no enforced standard.
juju-http-proxy, juju-https-proxy juju-no-proxy (available as of 2.4.2, see
lp:1782236) and juju-ftp-proxy in a format suitable for passing to an
application that reacts to proxy settings passed as environment variables.
Some applications support lowercase or uppercase notation (e.g. curl), some
support only lowercase (e.g. wget), there are also subjectively rare cases
of only uppercase notation support. no_proxy CIDR and wildcard support also
varies between runtimes and applications as there is no enforced standard.
Some applications may connect to multiple destinations and expose config
options that would affect only proxy settings for a specific destination

View File

@ -25,6 +25,7 @@ UBUNTU_RELEASES = (
'cosmic',
'disco',
'eoan',
'focal'
)

View File

@ -17,14 +17,17 @@
import yaml
from subprocess import check_call
from subprocess import check_call, CalledProcessError
from charmhelpers.core.hookenv import (
log,
DEBUG,
ERROR,
WARNING,
)
from charmhelpers.core.host import is_container
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
@ -62,4 +65,11 @@ def create(sysctl_dict, sysctl_file, ignore=False):
if ignore:
call.append("-e")
check_call(call)
try:
check_call(call)
except CalledProcessError as e:
if is_container():
log("Error setting some sysctl keys in this container: {}".format(e.output),
level=WARNING)
else:
raise e

View File

@ -1,4 +1,5 @@
import platform
import os
def get_platform():
@ -9,9 +10,13 @@ def get_platform():
This string is used to decide which platform module should be imported.
"""
# linux_distribution is deprecated and will be removed in Python 3.7
# Warings *not* disabled, as we certainly need to fix this.
tuple_platform = platform.linux_distribution()
current_platform = tuple_platform[0]
# Warnings *not* disabled, as we certainly need to fix this.
if hasattr(platform, 'linux_distribution'):
tuple_platform = platform.linux_distribution()
current_platform = tuple_platform[0]
else:
current_platform = _get_platform_from_fs()
if "Ubuntu" in current_platform:
return "ubuntu"
elif "CentOS" in current_platform:
@ -26,3 +31,16 @@ def get_platform():
else:
raise RuntimeError("This module is not supported on {}."
.format(current_platform))
def _get_platform_from_fs():
"""Get Platform from /etc/os-release."""
with open(os.path.join(os.sep, 'etc', 'os-release')) as fin:
content = dict(
line.split('=', 1)
for line in fin.read().splitlines()
if '=' in line
)
for k, v in content.items():
content[k] = v.strip('"')
return content["NAME"]

View File

@ -1,6 +1,12 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
# This file is managed centrally by release-tools and should not be modified
# within individual charm repos. See the 'global' dir contents for available
# choices of *requirements.txt files for OpenStack Charms:
# https://github.com/openstack-charmers/release-tools
#
# TODO: Distill the func test requirements from the lint/unit test
# requirements. They are intertwined. Also, Zaza itself should specify
# all of its own requirements and if it doesn't, fix it there.
#
pbr>=1.8.0,<1.9.0
simplejson>=2.2.0
netifaces>=0.10.4

View File

@ -1,11 +1,18 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
# This file is managed centrally by release-tools and should not be modified
# within individual charm repos. See the 'global' dir contents for available
# choices of *requirements.txt files for OpenStack Charms:
# https://github.com/openstack-charmers/release-tools
#
# TODO: Distill the func test requirements from the lint/unit test
# requirements. They are intertwined. Also, Zaza itself should specify
# all of its own requirements and if it doesn't, fix it there.
#
charm-tools>=2.4.4
coverage>=3.6
requests>=2.18.4
mock>=1.2
flake8>=2.2.4,<=2.4.1
stestr>=2.2.0
requests>=2.18.4
coverage>=4.5.2
pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking)
git+https://github.com/openstack-charmers/zaza.git#egg=zaza;python_version>='3.0'
git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack

View File

@ -42,6 +42,8 @@ applications:
num_units: 3
storage:
osd-devices: 'cinder,10G'
options:
osd-devices: '/dev/test-non-existent'
cinder:
charm: cs:~openstack-charmers-next/cinder
num_units: 1
@ -56,4 +58,4 @@ applications:
cinder-ceph:
charm: cs:~openstack-charmers-next/cinder-ceph
options:
ceph-osd-replication-count: 3
ceph-osd-replication-count: 3

View File

@ -47,6 +47,7 @@ applications:
storage:
osd-devices: 'cinder,10G'
options:
osd-devices: '/dev/test-non-existent'
source: cloud:bionic-rocky
cinder:
charm: cs:~openstack-charmers-next/cinder
@ -63,4 +64,4 @@ applications:
cinder-ceph:
charm: cs:~openstack-charmers-next/cinder-ceph
options:
ceph-osd-replication-count: 3
ceph-osd-replication-count: 3

View File

@ -47,6 +47,7 @@ applications:
storage:
osd-devices: 'cinder,10G'
options:
osd-devices: '/dev/test-non-existent'
source: cloud:bionic-stein
cinder:
charm: cs:~openstack-charmers-next/cinder
@ -63,4 +64,4 @@ applications:
cinder-ceph:
charm: cs:~openstack-charmers-next/cinder-ceph
options:
ceph-osd-replication-count: 3
ceph-osd-replication-count: 3

View File

@ -46,6 +46,7 @@ applications:
storage:
osd-devices: 'cinder,10G'
options:
osd-devices: '/dev/test-non-existent'
source: cloud:bionic-train
cinder:
charm: cs:~openstack-charmers-next/cinder
@ -62,4 +63,4 @@ applications:
cinder-ceph:
charm: cs:~openstack-charmers-next/cinder-ceph
options:
ceph-osd-replication-count: 3
ceph-osd-replication-count: 3

View File

@ -47,6 +47,7 @@ applications:
storage:
osd-devices: 'cinder,10G'
options:
osd-devices: '/dev/test-non-existent'
source: cloud:trusty-mitaka
cinder:
charm: cs:~openstack-charmers-next/cinder
@ -63,4 +64,4 @@ applications:
cinder-ceph:
charm: cs:~openstack-charmers-next/cinder-ceph
options:
ceph-osd-replication-count: 3
ceph-osd-replication-count: 3

View File

@ -42,6 +42,8 @@ applications:
num_units: 3
storage:
osd-devices: 'cinder,10G'
options:
osd-devices: '/dev/test-non-existent'
cinder:
charm: cs:~openstack-charmers-next/cinder
num_units: 1
@ -56,4 +58,4 @@ applications:
cinder-ceph:
charm: cs:~openstack-charmers-next/cinder-ceph
options:
ceph-osd-replication-count: 3
ceph-osd-replication-count: 3

View File

@ -47,6 +47,7 @@ applications:
storage:
osd-devices: 'cinder,10G'
options:
osd-devices: '/dev/test-non-existent'
source: cloud:xenial-ocata
cinder:
charm: cs:~openstack-charmers-next/cinder
@ -63,4 +64,4 @@ applications:
cinder-ceph:
charm: cs:~openstack-charmers-next/cinder-ceph
options:
ceph-osd-replication-count: 3
ceph-osd-replication-count: 3

View File

@ -47,6 +47,7 @@ applications:
storage:
osd-devices: 'cinder,10G'
options:
osd-devices: '/dev/test-non-existent'
source: cloud:xenial-pike
cinder:
charm: cs:~openstack-charmers-next/cinder
@ -63,4 +64,4 @@ applications:
cinder-ceph:
charm: cs:~openstack-charmers-next/cinder-ceph
options:
ceph-osd-replication-count: 3
ceph-osd-replication-count: 3

View File

@ -47,6 +47,7 @@ applications:
storage:
osd-devices: 'cinder,10G'
options:
osd-devices: '/dev/test-non-existent'
source: cloud:xenial-queens
cinder:
charm: cs:~openstack-charmers-next/cinder
@ -63,4 +64,4 @@ applications:
cinder-ceph:
charm: cs:~openstack-charmers-next/cinder-ceph
options:
ceph-osd-replication-count: 3
ceph-osd-replication-count: 3

37
tox.ini
View File

@ -1,20 +1,29 @@
# Classic charm: ./tox.ini
# Classic charm (with zaza): ./tox.ini
# This file is managed centrally by release-tools and should not be modified
# within individual charm repos.
# within individual charm repos. See the 'global' dir contents for available
# choices of tox.ini for OpenStack Charms:
# https://github.com/openstack-charmers/release-tools
#
# TODO: Distill the func test requirements from the lint/unit test
# requirements. They are intertwined. Also, Zaza itself should specify
# all of its own requirements and if it doesn't, fix it there.
[tox]
envlist = pep8,py37
envlist = pep8,py3
skipsdist = True
# NOTE: Avoid build/test env pollution by not enabling sitepackages.
sitepackages = False
# NOTE: Avoid false positives by not skipping missing interpreters.
skip_missing_interpreters = False
[testenv]
setenv = VIRTUAL_ENV={envdir}
PYTHONHASHSEED=0
CHARM_DIR={envdir}
AMULET_SETUP_TIMEOUT=5400
install_command =
pip install {opts} {packages}
commands = stestr run {posargs}
commands = stestr run --slowest {posargs}
whitelist_externals = juju
passenv = HOME TERM AMULET_* CS_API_* OS_*
passenv = HOME TERM CS_* OS_* TEST_*
deps = -r{toxinidir}/test-requirements.txt
[testenv:py35]
@ -32,6 +41,11 @@ basepython = python3.7
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
[testenv:py38]
basepython = python3.8
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
[testenv:py3]
basepython = python3
deps = -r{toxinidir}/requirements.txt
@ -41,7 +55,7 @@ deps = -r{toxinidir}/requirements.txt
basepython = python3
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
commands = flake8 {posargs} hooks unit_tests tests actions lib
commands = flake8 {posargs} hooks unit_tests tests actions lib files
charm-proof
[testenv:cover]
@ -55,7 +69,7 @@ setenv =
PYTHON=coverage run
commands =
coverage erase
stestr run {posargs}
stestr run --slowest {posargs}
coverage combine
coverage html -d cover
coverage xml -o cover/coverage.xml
@ -76,6 +90,11 @@ omit =
basepython = python3
commands = {posargs}
[testenv:func-noop]
basepython = python3
commands =
functest-run-suite --help
[testenv:func]
basepython = python3
commands =
@ -98,4 +117,4 @@ commands =
[flake8]
ignore = E402,E226
exclude = */charmhelpers
exclude = */charmhelpers