Add 2023.2 Bobcat support

* sync charm-helpers to classic charms
* change openstack-origin/source default to quincy
* add mantic to metadata series
* align testing with bobcat
* add new bobcat bundles
* add bobcat bundles to tests.yaml
* add bobcat tests to osci.yaml
* update build-on and run-on bases
* drop kinetic

Change-Id: I7449eba63107b43525359fb92ae1a0ad9e648bab
This commit is contained in:
Corey Bryant 2023-07-18 16:47:18 -04:00
parent 6a0b48e916
commit 986981c6f4
17 changed files with 180 additions and 94 deletions

View File

@ -33,9 +33,9 @@ bases:
- name: ubuntu
channel: "22.04"
architectures: [amd64, s390x, ppc64el, arm64]
- name: ubuntu
channel: "22.10"
architectures: [amd64, s390x, ppc64el, arm64]
- name: ubuntu
channel: "23.04"
architectures: [amd64, s390x, ppc64el, arm64]
- name: ubuntu
channel: "23.10"
architectures: [amd64, s390x, ppc64el, arm64]

View File

@ -5,7 +5,7 @@ options:
description: OSD debug level. Max is 20.
source:
type: string
default: yoga
default: quincy
description: |
Optional configuration to support use of additional sources such as:
.

View File

@ -221,6 +221,13 @@ def https():
return True
if config_get('ssl_cert') and config_get('ssl_key'):
return True
# Local import to avoid ciruclar dependency.
import charmhelpers.contrib.openstack.cert_utils as cert_utils
if (
cert_utils.get_certificate_request() and not
cert_utils.get_requests_for_local_unit("certificates")
):
return False
for r_id in relation_ids('certificates'):
for unit in relation_list(r_id):
ca = relation_get('ca', rid=r_id, unit=unit)

View File

@ -409,6 +409,9 @@ def get_requests_for_local_unit(relation_name=None):
relation_name = relation_name or 'certificates'
bundles = []
for rid in relation_ids(relation_name):
sent = relation_get(rid=rid, unit=local_unit())
legacy_keys = ['certificate_name', 'common_name']
is_legacy_request = set(sent).intersection(legacy_keys)
for unit in related_units(rid):
data = relation_get(rid=rid, unit=unit)
if data.get(raw_certs_key):
@ -416,6 +419,14 @@ def get_requests_for_local_unit(relation_name=None):
'ca': data['ca'],
'chain': data.get('chain'),
'certs': json.loads(data[raw_certs_key])})
elif is_legacy_request:
bundles.append({
'ca': data['ca'],
'chain': data.get('chain'),
'certs': {sent['common_name']:
{'cert': data.get(local_name + '.server.cert'),
'key': data.get(local_name + '.server.key')}}})
return bundles

View File

@ -1748,6 +1748,9 @@ class WSGIWorkerConfigContext(WorkerConfigContext):
def __call__(self):
total_processes = _calculate_workers()
enable_wsgi_rotation = config('wsgi-rotation')
if enable_wsgi_rotation is None:
enable_wsgi_rotation = True
ctxt = {
"service_name": self.service_name,
"user": self.user,
@ -1761,6 +1764,7 @@ class WSGIWorkerConfigContext(WorkerConfigContext):
"public_processes": int(math.ceil(self.public_process_weight *
total_processes)),
"threads": 1,
"wsgi_rotation": enable_wsgi_rotation,
}
return ctxt

View File

@ -160,6 +160,7 @@ OPENSTACK_CODENAMES = OrderedDict([
('2022.1', 'yoga'),
('2022.2', 'zed'),
('2023.1', 'antelope'),
('2023.2', 'bobcat'),
])
# The ugly duckling - must list releases oldest to newest
@ -957,7 +958,7 @@ def os_requires_version(ostack_release, pkg):
def wrap(f):
@wraps(f)
def wrapped_f(*args):
if os_release(pkg) < ostack_release:
if CompareOpenStackReleases(os_release(pkg)) < ostack_release:
raise Exception("This hook is not supported on releases"
" before %s" % ostack_release)
f(*args)

View File

@ -28,7 +28,6 @@ import os
import shutil
import json
import time
import uuid
from subprocess import (
check_call,
@ -1677,6 +1676,10 @@ class CephBrokerRq(object):
The API is versioned and defaults to version 1.
"""
# The below hash is the result of running
# `hashlib.sha1('[]'.encode()).hexdigest()`
EMPTY_LIST_SHA = '97d170e1550eee4afc0af065b78cda302a97674c'
def __init__(self, api_version=1, request_id=None, raw_request_data=None):
"""Initialize CephBrokerRq object.
@ -1685,8 +1688,12 @@ class CephBrokerRq(object):
:param api_version: API version for request (default: 1).
:type api_version: Optional[int]
:param request_id: Unique identifier for request.
(default: string representation of generated UUID)
:param request_id: Unique identifier for request. The identifier will
be updated as ops are added or removed from the
broker request. This ensures that Ceph will
correctly process requests where operations are
added after the initial request is processed.
(default: sha1 of operations)
:type request_id: Optional[str]
:param raw_request_data: JSON-encoded string to build request from.
:type raw_request_data: Optional[str]
@ -1695,16 +1702,20 @@ class CephBrokerRq(object):
if raw_request_data:
request_data = json.loads(raw_request_data)
self.api_version = request_data['api-version']
self.request_id = request_data['request-id']
self.set_ops(request_data['ops'])
self.request_id = request_data['request-id']
else:
self.api_version = api_version
if request_id:
self.request_id = request_id
else:
self.request_id = str(uuid.uuid1())
self.request_id = CephBrokerRq.EMPTY_LIST_SHA
self.ops = []
def _hash_ops(self):
"""Return the sha1 of the requested Broker ops."""
return hashlib.sha1(json.dumps(self.ops, sort_keys=True).encode()).hexdigest()
def add_op(self, op):
"""Add an op if it is not already in the list.
@ -1713,6 +1724,7 @@ class CephBrokerRq(object):
"""
if op not in self.ops:
self.ops.append(op)
self.request_id = self._hash_ops()
def add_op_request_access_to_group(self, name, namespace=None,
permission=None, key_name=None,
@ -1991,6 +2003,7 @@ class CephBrokerRq(object):
to allow comparisons to ensure validity.
"""
self.ops = ops
self.request_id = self._hash_ops()
@property
def request(self):

View File

@ -32,6 +32,7 @@ UBUNTU_RELEASES = (
'jammy',
'kinetic',
'lunar',
'mantic',
)

View File

@ -238,6 +238,14 @@ CLOUD_ARCHIVE_POCKETS = {
'antelope/proposed': 'jammy-proposed/antelope',
'jammy-antelope/proposed': 'jammy-proposed/antelope',
'jammy-proposed/antelope': 'jammy-proposed/antelope',
# bobcat
'bobcat': 'jammy-updates/bobcat',
'jammy-bobcat': 'jammy-updates/bobcat',
'jammy-bobcat/updates': 'jammy-updates/bobcat',
'jammy-updates/bobcat': 'jammy-updates/bobcat',
'bobcat/proposed': 'jammy-proposed/bobcat',
'jammy-bobcat/proposed': 'jammy-proposed/bobcat',
'jammy-proposed/bobcat': 'jammy-proposed/bobcat',
# OVN
'focal-ovn-22.03': 'focal-updates/ovn-22.03',
@ -270,6 +278,7 @@ OPENSTACK_RELEASES = (
'yoga',
'zed',
'antelope',
'bobcat',
)
@ -298,6 +307,7 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([
('jammy', 'yoga'),
('kinetic', 'zed'),
('lunar', 'antelope'),
('mantic', 'bobcat'),
])
@ -591,7 +601,7 @@ def _get_key_by_keyid(keyid):
curl_cmd = ['curl', keyserver_url.format(keyid)]
# use proxy server settings in order to retrieve the key
return subprocess.check_output(curl_cmd,
env=env_proxy_settings(['https']))
env=env_proxy_settings(['https', 'no_proxy']))
def _dearmor_gpg_key(key_asc):

View File

@ -122,13 +122,12 @@ class Cache(object):
:raises: subprocess.CalledProcessError
"""
pkgs = {}
cmd = ['dpkg-query', '--list']
cmd = [
'dpkg-query', '--show',
'--showformat',
r'${db:Status-Abbrev}\t${Package}\t${Version}\t${Architecture}\t${binary:Summary}\n'
]
cmd.extend(packages)
if locale.getlocale() == (None, None):
# subprocess calls out to locale.getpreferredencoding(False) to
# determine encoding. Workaround for Trusty where the
# environment appears to not be set up correctly.
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
try:
output = subprocess.check_output(cmd,
stderr=subprocess.STDOUT,
@ -140,24 +139,17 @@ class Cache(object):
if cp.returncode != 1:
raise
output = cp.output
headings = []
for line in output.splitlines():
if line.startswith('||/'):
headings = line.split()
headings.pop(0)
# only process lines for successfully installed packages
if not (line.startswith('ii ') or line.startswith('hi ')):
continue
elif (line.startswith('|') or line.startswith('+') or
line.startswith('dpkg-query:')):
continue
else:
data = line.split(None, 4)
status = data.pop(0)
if status not in ('ii', 'hi'):
continue
pkg = {}
pkg.update({k.lower(): v for k, v in zip(headings, data)})
if 'name' in pkg:
pkgs.update({pkg['name']: pkg})
status, name, version, arch, desc = line.split('\t', 4)
pkgs[name] = {
'name': name,
'version': version,
'architecture': arch,
'description': desc,
}
return pkgs
def _apt_cache_show(self, packages):

View File

@ -291,7 +291,8 @@ def pool_permission_list_for_service(service):
for prefix in prefixes:
permissions.append("allow {} object_prefix {}".format(permission,
prefix))
return ['mon', 'allow r, allow command "osd blacklist"',
return ['mon', ('allow r, allow command "osd blacklist"'
', allow command "osd blocklist"'),
'osd', ', '.join(permissions)]

View File

@ -681,15 +681,29 @@ def _get_osd_num_from_dirname(dirname):
return match.group('osd_id')
def get_crimson_osd_ids():
"""Return a set of the OSDs that are running with the Crimson backend."""
rv = set()
try:
out = subprocess.check_output(['pgrep', 'crimson-osd', '-a'])
for line in out.decode('utf8').splitlines():
rv.add(line.split()[-1])
except Exception:
pass
return rv
def get_local_osd_ids():
"""This will list the /var/lib/ceph/osd/* directories and try
to split the ID off of the directory name and return it in
a list.
a list. Excludes crimson OSD's from the returned list.
:returns: list. A list of OSD identifiers
:raises: OSError if something goes wrong with listing the directory.
"""
osd_ids = []
crimson_osds = get_crimson_osd_ids()
osd_path = os.path.join(os.sep, 'var', 'lib', 'ceph', 'osd')
if os.path.exists(osd_path):
try:
@ -698,7 +712,8 @@ def get_local_osd_ids():
osd_id = osd_dir.split('-')[1] if '-' in osd_dir else ''
if (_is_int(osd_id) and
filesystem_mounted(os.path.join(
os.sep, osd_path, osd_dir))):
os.sep, osd_path, osd_dir)) and
osd_id not in crimson_osds):
osd_ids.append(osd_id)
except OSError:
raise
@ -1134,7 +1149,8 @@ def get_mds_bootstrap_key():
_default_caps = collections.OrderedDict([
('mon', ['allow r',
'allow command "osd blacklist"']),
'allow command "osd blacklist"',
'allow command "osd blocklist"']),
('osd', ['allow rwx']),
])
@ -1166,7 +1182,10 @@ osd_upgrade_caps = collections.OrderedDict([
])
rbd_mirror_caps = collections.OrderedDict([
('mon', ['profile rbd; allow r']),
('mon', ['allow profile rbd-mirror-peer',
'allow command "service dump"',
'allow command "service status"'
]),
('osd', ['profile rbd']),
('mgr', ['allow r']),
])
@ -1212,28 +1231,15 @@ def get_named_key(name, caps=None, pool_list=None):
:param caps: dict of cephx capabilities
:returns: Returns a cephx key
"""
key_name = 'client.{}'.format(name)
try:
# Does the key already exist?
output = str(subprocess.check_output(
[
'sudo',
'-u', ceph_user(),
'ceph',
'--name', 'mon.',
'--keyring',
'/var/lib/ceph/mon/ceph-{}/keyring'.format(
socket.gethostname()
),
'auth',
'get',
key_name,
]).decode('UTF-8')).strip()
return parse_key(output)
except subprocess.CalledProcessError:
# Couldn't get the key, time to create it!
log("Creating new key for {}".format(name), level=DEBUG)
caps = caps or _default_caps
key_name = 'client.{}'.format(name)
key = ceph_auth_get(key_name)
if key:
upgrade_key_caps(key_name, caps)
return key
log("Creating new key for {}".format(name), level=DEBUG)
cmd = [
"sudo",
"-u",
@ -1255,6 +1261,7 @@ def get_named_key(name, caps=None, pool_list=None):
pools = " ".join(['pool={0}'.format(i) for i in pool_list])
subcaps[0] = subcaps[0] + " " + pools
cmd.extend([subsystem, '; '.join(subcaps)])
ceph_auth_get.cache_clear()
log("Calling check_output: {}".format(cmd), level=DEBUG)
return parse_key(str(subprocess
@ -1263,6 +1270,30 @@ def get_named_key(name, caps=None, pool_list=None):
.strip()) # IGNORE:E1103
@functools.lru_cache()
def ceph_auth_get(key_name):
try:
# Does the key already exist?
output = str(subprocess.check_output(
[
'sudo',
'-u', ceph_user(),
'ceph',
'--name', 'mon.',
'--keyring',
'/var/lib/ceph/mon/ceph-{}/keyring'.format(
socket.gethostname()
),
'auth',
'get',
key_name,
]).decode('UTF-8')).strip()
return parse_key(output)
except subprocess.CalledProcessError:
# Couldn't get the key
pass
def upgrade_key_caps(key, caps, pool_list=None):
"""Upgrade key to have capabilities caps"""
if not is_leader():
@ -2063,7 +2094,7 @@ def filesystem_mounted(fs):
def get_running_osds():
"""Returns a list of the pids of the current running OSD daemons"""
cmd = ['pgrep', 'ceph-osd']
cmd = ['pgrep', 'ceph-osd|crimson-osd']
try:
result = str(subprocess.check_output(cmd).decode('UTF-8'))
return result.split()
@ -2514,7 +2545,7 @@ class WatchDog(object):
:type timeout: int
"""
start_time = time.time()
while(not wait_f()):
while not wait_f():
now = time.time()
if now > start_time + timeout:
raise WatchDog.WatchDogTimeoutException()
@ -3215,6 +3246,9 @@ UCA_CODENAME_MAP = {
'wallaby': 'pacific',
'xena': 'pacific',
'yoga': 'quincy',
'zed': 'quincy',
'antelope': 'quincy',
'bobcat': 'quincy',
}
@ -3414,7 +3448,7 @@ def apply_osd_settings(settings):
set_cmd = base_cmd + ' set {key} {value}'
def _get_cli_key(key):
return(key.replace(' ', '_'))
return key.replace(' ', '_')
# Retrieve the current values to check keys are correct and to make this a
# noop if setting are already applied.
for osd_id in get_local_osd_ids():
@ -3453,6 +3487,9 @@ def enabled_manager_modules():
:rtype: List[str]
"""
cmd = ['ceph', 'mgr', 'module', 'ls']
quincy_or_later = cmp_pkgrevno('ceph-common', '17.1.0') >= 0
if quincy_or_later:
cmd.append('--format=json')
try:
modules = subprocess.check_output(cmd).decode('UTF-8')
except subprocess.CalledProcessError as e:

View File

@ -13,8 +13,8 @@ tags:
series:
- focal
- jammy
- kinetic
- lunar
- mantic
description: |
Ceph is a distributed storage and network file system designed to provide
excellent performance, reliability, and scalability.

View File

@ -4,7 +4,6 @@
- charm-unit-jobs-py310
- charm-xena-functional-jobs
- charm-yoga-functional-jobs
- charm-zed-functional-jobs
- charm-functional-jobs
vars:
needs_charm_build: true

View File

@ -1,5 +1,5 @@
variables:
openstack-origin: &openstack-origin distro
openstack-origin: &openstack-origin cloud:jammy-bobcat
series: jammy
@ -47,8 +47,6 @@ applications:
mysql-innodb-cluster:
charm: ch:mysql-innodb-cluster
num_units: 3
options:
source: *openstack-origin
to:
- '0'
- '1'
@ -79,13 +77,11 @@ applications:
- '6'
- '7'
- '8'
channel: quincy/edge
channel: latest/edge
rabbitmq-server:
charm: ch:rabbitmq-server
num_units: 1
options:
source: *openstack-origin
to:
- '9'
channel: latest/edge
@ -98,7 +94,7 @@ applications:
openstack-origin: *openstack-origin
to:
- '10'
channel: yoga/edge
channel: latest/edge
nova-compute:
charm: ch:nova-compute
@ -107,7 +103,7 @@ applications:
openstack-origin: *openstack-origin
to:
- '11'
channel: yoga/edge
channel: latest/edge
glance:
expose: True
@ -117,7 +113,7 @@ applications:
openstack-origin: *openstack-origin
to:
- '12'
channel: yoga/edge
channel: latest/edge
cinder:
expose: True
@ -129,11 +125,11 @@ applications:
glance-api-version: '2'
to:
- '13'
channel: yoga/edge
channel: latest/edge
cinder-ceph:
charm: ch:cinder-ceph
channel: yoga/edge
channel: latest/edge
nova-cloud-controller:
expose: True
@ -143,7 +139,7 @@ applications:
openstack-origin: *openstack-origin
to:
- '14'
channel: yoga/edge
channel: latest/edge
placement:
charm: ch:placement
@ -152,7 +148,7 @@ applications:
openstack-origin: *openstack-origin
to:
- '15'
channel: yoga/edge
channel: latest/edge
relations:
- - 'nova-compute:amqp'

View File

@ -1,17 +1,22 @@
variables:
openstack-origin: &openstack-origin cloud:jammy-zed
openstack-origin: &openstack-origin distro
# use infra (mysql, rabbit) from lts for stability
infra-series: &infra-series jammy
series: jammy
series: mantic
comment:
- 'machines section to decide order of deployment. database sooner = faster'
machines:
'0':
constraints: mem=3072M
series: *infra-series
'1':
constraints: mem=3072M
series: *infra-series
'2':
constraints: mem=3072M
series: *infra-series
'3':
'4':
'5':
@ -19,30 +24,37 @@ machines:
'7':
'8':
'9':
series: *infra-series
'10':
series: *infra-series
'11':
series: *infra-series
'12':
series: *infra-series
'13':
series: *infra-series
'14':
series: *infra-series
'15':
series: *infra-series
applications:
keystone-mysql-router:
charm: ch:mysql-router
channel: 8.0/edge
channel: latest/edge
glance-mysql-router:
charm: ch:mysql-router
channel: 8.0/edge
channel: latest/edge
cinder-mysql-router:
charm: ch:mysql-router
channel: 8.0/edge
channel: latest/edge
nova-cloud-controller-mysql-router:
charm: ch:mysql-router
channel: 8.0/edge
channel: latest/edge
placement-mysql-router:
charm: ch:mysql-router
channel: 8.0/edge
channel: latest/edge
mysql-innodb-cluster:
charm: ch:mysql-innodb-cluster
@ -51,7 +63,7 @@ applications:
- '0'
- '1'
- '2'
channel: 8.0/edge
channel: latest/edge
ceph-osd:
charm: ../../ceph-osd.charm
@ -77,14 +89,14 @@ applications:
- '6'
- '7'
- '8'
channel: quincy/edge
channel: latest/edge
rabbitmq-server:
charm: ch:rabbitmq-server
num_units: 1
to:
- '9'
channel: 3.9/edge
channel: latest/edge
keystone:
expose: True
@ -94,7 +106,7 @@ applications:
openstack-origin: *openstack-origin
to:
- '10'
channel: zed/edge
channel: latest/edge
nova-compute:
charm: ch:nova-compute
@ -103,7 +115,7 @@ applications:
openstack-origin: *openstack-origin
to:
- '11'
channel: zed/edge
channel: latest/edge
glance:
expose: True
@ -113,7 +125,7 @@ applications:
openstack-origin: *openstack-origin
to:
- '12'
channel: zed/edge
channel: latest/edge
cinder:
expose: True
@ -125,11 +137,11 @@ applications:
glance-api-version: '2'
to:
- '13'
channel: zed/edge
channel: latest/edge
cinder-ceph:
charm: ch:cinder-ceph
channel: zed/edge
channel: latest/edge
nova-cloud-controller:
expose: True
@ -139,7 +151,7 @@ applications:
openstack-origin: *openstack-origin
to:
- '14'
channel: zed/edge
channel: latest/edge
placement:
charm: ch:placement
@ -148,7 +160,7 @@ applications:
openstack-origin: *openstack-origin
to:
- '15'
channel: zed/edge
channel: latest/edge
relations:
- - 'nova-compute:amqp'

View File

@ -4,6 +4,8 @@ gate_bundles:
- focal-xena
- focal-yoga
- jammy-yoga
- jammy-bobcat
- mantic-bobcat
smoke_bundles:
- focal-xena