Sync libraries & common files prior to freeze

* charm-helpers sync for classic charms
* charms.ceph sync for ceph charms
* rebuild for reactive charms
* sync tox.ini files as needed
* sync requirements.txt files to sync to standard

Required additional fix:

* Also sync section-ceph-bluestore-compression template

Change-Id: I621c3a0e4e3594808b4c6ec298ed79aada44bae0
This commit is contained in:
Alex Kavanagh 2020-09-26 18:27:01 +01:00
parent f001f5c3d7
commit 1adefd359b
8 changed files with 86 additions and 16 deletions

View File

@ -23,3 +23,4 @@ include:
- contrib.charmsupport
- contrib.hardening|inc=*
- contrib.openstack.policyd
- contrib.openstack.templates|inc=*/section-ceph-bluestore-compression

View File

@ -58,6 +58,7 @@ from charmhelpers.core.hookenv import (
status_set,
network_get_primary_address,
WARNING,
service_name,
)
from charmhelpers.core.sysctl import create as sysctl_create
@ -810,6 +811,12 @@ class CephContext(OSContextGenerator):
ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts))
if config('pool-type') and config('pool-type') == 'erasure-coded':
base_pool_name = config('rbd-pool') or config('rbd-pool-name')
if not base_pool_name:
base_pool_name = service_name()
ctxt['rbd_default_data_pool'] = base_pool_name
if not os.path.isdir('/etc/ceph'):
os.mkdir('/etc/ceph')
@ -3238,6 +3245,18 @@ class CephBlueStoreCompressionContext(OSContextGenerator):
"""
return self.op
def get_kwargs(self):
"""Get values for use as keyword arguments.
:returns: Context values with key suitable for use as kwargs to
CephBrokerRq add_op_create_*_pool methods.
:rtype: Dict[str,any]
"""
return {
k.replace('-', '_'): v
for k, v in self.op.items()
}
def validate(self):
"""Validate options.

View File

@ -41,6 +41,7 @@ from subprocess import (
)
from charmhelpers import deprecate
from charmhelpers.core.hookenv import (
application_name,
config,
service_name,
local_unit,
@ -162,6 +163,17 @@ def get_osd_settings(relation_name):
return _order_dict_by_key(osd_settings)
def send_application_name(relid=None):
"""Send the application name down the relation.
:param relid: Relation id to set application name in.
:type relid: str
"""
relation_set(
relation_id=relid,
relation_settings={'application-name': application_name()})
def send_osd_settings():
"""Pass on requested OSD settings to osd units."""
try:
@ -1074,7 +1086,10 @@ def create_erasure_profile(service, profile_name,
erasure_plugin_technique=None):
"""Create a new erasure code profile if one does not already exist for it.
Updates the profile if it exists. Please refer to [0] for more details.
Profiles are considered immutable so will not be updated if the named
profile already exists.
Please refer to [0] for more details.
0: http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/
@ -1110,6 +1125,11 @@ def create_erasure_profile(service, profile_name,
:type erasure_plugin_technique: str
:return: None. Can raise CalledProcessError, ValueError or AssertionError
"""
if erasure_profile_exists(service, profile_name):
log('EC profile {} exists, skipping update'.format(profile_name),
level=WARNING)
return
plugin_techniques = {
'jerasure': [
'reed_sol_van',
@ -1209,9 +1229,6 @@ def create_erasure_profile(service, profile_name,
if scalar_mds:
cmd.append('scalar-mds={}'.format(scalar_mds))
if erasure_profile_exists(service, profile_name):
cmd.append('--force')
check_call(cmd)

View File

@ -750,6 +750,7 @@ def handle_create_cephfs(request, service):
"""
cephfs_name = request.get('mds_name')
data_pool = request.get('data_pool')
extra_pools = request.get('extra_pools', [])
metadata_pool = request.get('metadata_pool')
# Check if the user params were provided
if not cephfs_name or not data_pool or not metadata_pool:
@ -758,14 +759,12 @@ def handle_create_cephfs(request, service):
return {'exit-code': 1, 'stderr': msg}
# Sanity check that the required pools exist
if not pool_exists(service=service, name=data_pool):
msg = "CephFS data pool does not exist. Cannot create CephFS"
log(msg, level=ERROR)
return {'exit-code': 1, 'stderr': msg}
if not pool_exists(service=service, name=metadata_pool):
msg = "CephFS metadata pool does not exist. Cannot create CephFS"
log(msg, level=ERROR)
return {'exit-code': 1, 'stderr': msg}
for pool_name in [data_pool, metadata_pool] + extra_pools:
if not pool_exists(service=service, name=pool_name):
msg = "CephFS pool {} does not exist. Cannot create CephFS".format(
pool_name)
log(msg, level=ERROR)
return {'exit-code': 1, 'stderr': msg}
if get_cephfs(service=service):
# CephFS new has already been called
@ -786,6 +785,14 @@ def handle_create_cephfs(request, service):
else:
log(err.output, level=ERROR)
return {'exit-code': 1, 'stderr': err.output}
for pool_name in extra_pools:
cmd = ["ceph", '--id', service, "fs", "add_data_pool", cephfs_name,
pool_name]
try:
check_output(cmd)
except CalledProcessError as err:
log(err.output, level=ERROR)
return {'exit-code': 1, 'stderr': err.output}
def handle_rgw_region_set(request, service):

View File

@ -41,6 +41,7 @@ from charmhelpers.core.host import (
service_stop,
CompareHostReleases,
write_file,
is_container,
)
from charmhelpers.core.hookenv import (
cached,
@ -54,8 +55,12 @@ from charmhelpers.core.hookenv import (
storage_list,
)
from charmhelpers.fetch import (
add_source,
apt_cache,
add_source, apt_install, apt_update
apt_install,
apt_purge,
apt_update,
filter_missing_packages
)
from charmhelpers.contrib.storage.linux.ceph import (
get_mon_map,
@ -85,6 +90,9 @@ PACKAGES = ['ceph', 'gdisk',
'radosgw', 'xfsprogs',
'lvm2', 'parted', 'smartmontools']
REMOVE_PACKAGES = []
CHRONY_PACKAGE = 'chrony'
CEPH_KEY_MANAGER = 'ceph'
VAULT_KEY_MANAGER = 'vault'
KEY_MANAGERS = [
@ -623,7 +631,7 @@ def _get_child_dirs(path):
OSError if an error occurs reading the directory listing
"""
if not os.path.exists(path):
raise ValueError('Specfied path "%s" does not exist' % path)
raise ValueError('Specified path "%s" does not exist' % path)
if not os.path.isdir(path):
raise ValueError('Specified path "%s" is not a directory' % path)
@ -2209,6 +2217,9 @@ def upgrade_monitor(new_version, kick_function=None):
else:
service_stop('ceph-mon-all')
apt_install(packages=determine_packages(), fatal=True)
rm_packages = determine_packages_to_remove()
if rm_packages:
apt_purge(packages=rm_packages, fatal=True)
kick_function()
owner = ceph_user()
@ -3252,6 +3263,19 @@ def determine_packages():
return packages
def determine_packages_to_remove():
"""Determines packages for removal
:returns: list of packages to be removed
"""
rm_packages = REMOVE_PACKAGES.copy()
if is_container():
install_list = filter_missing_packages(CHRONY_PACKAGE)
if not install_list:
rm_packages.append(CHRONY_PACKAGE)
return rm_packages
def bootstrap_manager():
hostname = socket.gethostname()
path = '/var/lib/ceph/mgr/ceph-{}'.format(hostname)
@ -3307,7 +3331,7 @@ def apply_osd_settings(settings):
present. Settings stop being applied on encountering an error.
:param settings: dict. Dictionary of settings to apply.
:returns: bool. True if commands ran succesfully.
:returns: bool. True if commands ran successfully.
:raises: OSDConfigSetError
"""
current_settings = {}

View File

@ -7,6 +7,7 @@
# requirements. They are intertwined. Also, Zaza itself should specify
# all of its own requirements and if it doesn't, fix it there.
#
setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85
pbr>=1.8.0,<1.9.0
simplejson>=2.2.0
netifaces>=0.10.4

View File

@ -7,6 +7,7 @@
# requirements. They are intertwined. Also, Zaza itself should specify
# all of its own requirements and if it doesn't, fix it there.
#
setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85
charm-tools>=2.4.4
requests>=2.18.4
mock>=1.2

View File

@ -116,5 +116,5 @@ commands =
functest-run-suite --keep-model --bundle {posargs}
[flake8]
ignore = E402,E226,W504
ignore = E402,E226,W503,W504
exclude = */charmhelpers