Sync helpers for 20.05

Change-Id: I05439966556978d44eb72f695152e50548f77496
This commit is contained in:
Aurelien Lourot 2020-05-18 14:38:09 +02:00
parent 9bee0c4589
commit b66f1bbb4a
5 changed files with 211 additions and 19 deletions

View File

@ -54,7 +54,8 @@ from charmhelpers.core.hookenv import (
INFO,
ERROR,
status_set,
network_get_primary_address
network_get_primary_address,
WARNING,
)
from charmhelpers.core.sysctl import create as sysctl_create
@ -2994,3 +2995,153 @@ class BondConfig(object):
'lacp-time': bond_config['lacp-time'],
},
}
class SRIOVContext(OSContextGenerator):
"""Provide context for configuring SR-IOV devices."""
class sriov_config_mode(enum.Enum):
"""Mode in which SR-IOV is configured.
The configuration option identified by the ``numvfs_key`` parameter
is overloaded and defines in which mode the charm should interpret
the other SR-IOV-related configuration options.
"""
auto = 'auto'
blanket = 'blanket'
explicit = 'explicit'
def _determine_numvfs(self, device, sriov_numvfs):
"""Determine number of Virtual Functions (VFs) configured for device.
:param device: Object describing a PCI Network interface card (NIC)/
:type device: sriov_netplan_shim.pci.PCINetDevice
:param sriov_numvfs: Number of VFs requested for blanket configuration.
:type sriov_numvfs: int
:returns: Number of VFs to configure for device
:rtype: Optional[int]
"""
def _get_capped_numvfs(requested):
"""Get a number of VFs that does not exceed individual card limits.
Depending and make and model of NIC the number of VFs supported
vary. Requesting more VFs than a card support would be a fatal
error, cap the requested number at the total number of VFs each
individual card supports.
:param requested: Number of VFs requested
:type requested: int
:returns: Number of VFs allowed
:rtype: int
"""
actual = min(int(requested), int(device.sriov_totalvfs))
if actual < int(requested):
log('Requested VFs ({}) too high for device {}. Falling back '
'to value supprted by device: {}'
.format(requested, device.interface_name,
device.sriov_totalvfs),
level=WARNING)
return actual
if self._sriov_config_mode == self.sriov_config_mode.auto:
# auto-mode
#
# If device mapping configuration is present, return information
# on cards with mapping.
#
# If no device mapping configuration is present, return information
# for all cards.
#
# The maximum number of VFs supported by card will be used.
if (self._sriov_mapped_devices and
device.interface_name not in self._sriov_mapped_devices):
log('SR-IOV configured in auto mode: No device mapping for {}'
.format(device.interface_name),
level=DEBUG)
return
return _get_capped_numvfs(device.sriov_totalvfs)
elif self._sriov_config_mode == self.sriov_config_mode.blanket:
# blanket-mode
#
# User has specified a number of VFs that should apply to all
# cards with support for VFs.
return _get_capped_numvfs(sriov_numvfs)
elif self._sriov_config_mode == self.sriov_config_mode.explicit:
# explicit-mode
#
# User has given a list of interface names and associated number of
# VFs
if device.interface_name not in self._sriov_config_devices:
log('SR-IOV configured in explicit mode: No device:numvfs '
'pair for device {}, skipping.'
.format(device.interface_name),
level=DEBUG)
return
return _get_capped_numvfs(
self._sriov_config_devices[device.interface_name])
else:
raise RuntimeError('This should not be reached')
def __init__(self, numvfs_key=None, device_mappings_key=None):
"""Initialize map from PCI devices and configuration options.
:param numvfs_key: Config key for numvfs (default: 'sriov-numvfs')
:type numvfs_key: Optional[str]
:param device_mappings_key: Config key for device mappings
(default: 'sriov-device-mappings')
:type device_mappings_key: Optional[str]
:raises: RuntimeError
"""
numvfs_key = numvfs_key or 'sriov-numvfs'
device_mappings_key = device_mappings_key or 'sriov-device-mappings'
devices = pci.PCINetDevices()
charm_config = config()
sriov_numvfs = charm_config.get(numvfs_key) or ''
sriov_device_mappings = charm_config.get(device_mappings_key) or ''
# create list of devices from sriov_device_mappings config option
self._sriov_mapped_devices = [
pair.split(':', 1)[1]
for pair in sriov_device_mappings.split()
]
# create map of device:numvfs from sriov_numvfs config option
self._sriov_config_devices = {
ifname: numvfs for ifname, numvfs in (
pair.split(':', 1) for pair in sriov_numvfs.split()
if ':' in sriov_numvfs)
}
# determine configuration mode from contents of sriov_numvfs
if sriov_numvfs == 'auto':
self._sriov_config_mode = self.sriov_config_mode.auto
elif sriov_numvfs.isdigit():
self._sriov_config_mode = self.sriov_config_mode.blanket
elif ':' in sriov_numvfs:
self._sriov_config_mode = self.sriov_config_mode.explicit
else:
raise RuntimeError('Unable to determine mode of SR-IOV '
'configuration.')
self._map = {
device.interface_name: self._determine_numvfs(device, sriov_numvfs)
for device in devices.pci_devices
if device.sriov and
self._determine_numvfs(device, sriov_numvfs) is not None
}
def __call__(self):
"""Provide SR-IOV context.
:returns: Map interface name: min(configured, max) virtual functions.
Example:
{
'eth0': 16,
'eth1': 32,
'eth2': 64,
}
:rtype: Dict[str,int]
"""
return self._map

View File

@ -92,6 +92,7 @@ DEFAULT_PGS_PER_OSD_TARGET = 100
DEFAULT_POOL_WEIGHT = 10.0
LEGACY_PG_COUNT = 200
DEFAULT_MINIMUM_PGS = 2
AUTOSCALER_DEFAULT_PGS = 32
class OsdPostUpgradeError(Exception):
@ -399,16 +400,28 @@ class ReplicatedPool(Pool):
def create(self):
if not pool_exists(self.service, self.name):
nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0
# Create it
cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create',
self.name, str(self.pg_num)]
if nautilus_or_later:
cmd = [
'ceph', '--id', self.service, 'osd', 'pool', 'create',
'--pg-num-min={}'.format(
min(AUTOSCALER_DEFAULT_PGS, self.pg_num)
),
self.name, str(self.pg_num)
]
else:
cmd = [
'ceph', '--id', self.service, 'osd', 'pool', 'create',
self.name, str(self.pg_num)
]
try:
check_call(cmd)
# Set the pool replica size
update_pool(client=self.service,
pool=self.name,
settings={'size': str(self.replicas)})
nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0
if nautilus_or_later:
# Ensure we set the expected pool ratio
update_pool(client=self.service,
@ -466,10 +479,24 @@ class ErasurePool(Pool):
k = int(erasure_profile['k'])
m = int(erasure_profile['m'])
pgs = self.get_pgs(k + m, self.percent_data)
nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0
# Create it
cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create',
self.name, str(pgs), str(pgs),
'erasure', self.erasure_code_profile]
if nautilus_or_later:
cmd = [
'ceph', '--id', self.service, 'osd', 'pool', 'create',
'--pg-num-min={}'.format(
min(AUTOSCALER_DEFAULT_PGS, pgs)
),
self.name, str(pgs), str(pgs),
'erasure', self.erasure_code_profile
]
else:
cmd = [
'ceph', '--id', self.service, 'osd', 'pool', 'create',
self.name, str(pgs), str(pgs),
'erasure', self.erasure_code_profile
]
try:
check_call(cmd)
try:
@ -478,7 +505,6 @@ class ErasurePool(Pool):
name=self.app_name)
except CalledProcessError:
log('Could not set app name for pool {}'.format(self.name, level=WARNING))
nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0
if nautilus_or_later:
# Ensure we set the expected pool ratio
update_pool(client=self.service,

View File

@ -1,6 +1,12 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
# This file is managed centrally by release-tools and should not be modified
# within individual charm repos. See the 'global' dir contents for available
# choices of *requirements.txt files for OpenStack Charms:
# https://github.com/openstack-charmers/release-tools
#
# TODO: Distill the func test requirements from the lint/unit test
# requirements. They are intertwined. Also, Zaza itself should specify
# all of its own requirements and if it doesn't, fix it there.
#
pbr>=1.8.0,<1.9.0
simplejson>=2.2.0
netifaces>=0.10.4

View File

@ -1,14 +1,18 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
# This file is managed centrally by release-tools and should not be modified
# within individual charm repos. See the 'global' dir contents for available
# choices of *requirements.txt files for OpenStack Charms:
# https://github.com/openstack-charmers/release-tools
#
# TODO: Distill the func test requirements from the lint/unit test
# requirements. They are intertwined. Also, Zaza itself should specify
# all of its own requirements and if it doesn't, fix it there.
#
charm-tools>=2.4.4
coverage>=3.6
requests>=2.18.4
mock>=1.2
flake8>=2.2.4,<=2.4.1
stestr>=2.2.0
requests>=2.18.4
# NOTE: workaround for 14.04 pip/tox
pytz
pyudev # for ceph-* charm unit tests (not mocked?)
coverage>=4.5.2
pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking)
git+https://github.com/openstack-charmers/zaza.git#egg=zaza;python_version>='3.0'
git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack

View File

@ -41,6 +41,11 @@ basepython = python3.7
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
[testenv:py38]
basepython = python3.8
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
[testenv:py3]
basepython = python3
deps = -r{toxinidir}/requirements.txt