Add 2023.2 Bobcat support

* sync charm-helpers to classic charms
* change openstack-origin/source default to quincy
* add mantic to metadata series
* align testing with bobcat
* add new bobcat bundles
* add bobcat bundles to tests.yaml
* add bobcat tests to osci.yaml
* update build-on and run-on bases
* drop kinetic
* update charmcraft_channel to 2.x/edge

Change-Id: If59e6c4db7688c0819da2b3feb0c7bda89de6780
This commit is contained in:
Corey Bryant 2023-07-18 16:47:18 -04:00
parent a2ffd3a497
commit 529bad96e0
13 changed files with 93 additions and 276 deletions

View File

@ -31,9 +31,9 @@ bases:
- name: ubuntu
channel: "22.04"
architectures: [amd64, s390x, ppc64el, arm64]
- name: ubuntu
channel: "22.10"
architectures: [amd64, s390x, ppc64el, arm64]
- name: ubuntu
channel: "23.04"
architectures: [amd64, s390x, ppc64el, arm64]
- name: ubuntu
channel: "23.10"
architectures: [amd64, s390x, ppc64el, arm64]

View File

@ -160,6 +160,7 @@ OPENSTACK_CODENAMES = OrderedDict([
('2022.1', 'yoga'),
('2022.2', 'zed'),
('2023.1', 'antelope'),
('2023.2', 'bobcat'),
])
# The ugly duckling - must list releases oldest to newest

View File

@ -32,6 +32,7 @@ UBUNTU_RELEASES = (
'jammy',
'kinetic',
'lunar',
'mantic',
)

View File

@ -238,6 +238,14 @@ CLOUD_ARCHIVE_POCKETS = {
'antelope/proposed': 'jammy-proposed/antelope',
'jammy-antelope/proposed': 'jammy-proposed/antelope',
'jammy-proposed/antelope': 'jammy-proposed/antelope',
# bobcat
'bobcat': 'jammy-updates/bobcat',
'jammy-bobcat': 'jammy-updates/bobcat',
'jammy-bobcat/updates': 'jammy-updates/bobcat',
'jammy-updates/bobcat': 'jammy-updates/bobcat',
'bobcat/proposed': 'jammy-proposed/bobcat',
'jammy-bobcat/proposed': 'jammy-proposed/bobcat',
'jammy-proposed/bobcat': 'jammy-proposed/bobcat',
# OVN
'focal-ovn-22.03': 'focal-updates/ovn-22.03',
@ -270,6 +278,7 @@ OPENSTACK_RELEASES = (
'yoga',
'zed',
'antelope',
'bobcat',
)
@ -298,6 +307,7 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([
('jammy', 'yoga'),
('kinetic', 'zed'),
('lunar', 'antelope'),
('mantic', 'bobcat'),
])

View File

@ -10,7 +10,7 @@ options:
Setting this to True will allow supporting services to log to syslog.
source:
type: string
default: antelope
default: quincy
description: |
Repository from which to install. May be one of the following:
distro (default), ppa:somecustom/ppa, a deb url sources entry,

View File

@ -681,24 +681,39 @@ def _get_osd_num_from_dirname(dirname):
return match.group('osd_id')
def get_crimson_osd_ids():
"""Return a set of the OSDs that are running with the Crimson backend."""
rv = set()
try:
out = subprocess.check_output(['pgrep', 'crimson-osd', '-a'])
for line in out.decode('utf8').splitlines():
rv.add(line.split()[-1])
except Exception:
pass
return rv
def get_local_osd_ids():
"""This will list the /var/lib/ceph/osd/* directories and try
to split the ID off of the directory name and return it in
a list.
a list. Excludes crimson OSD's from the returned list.
:returns: list. A list of OSD identifiers
:raises: OSError if something goes wrong with listing the directory.
"""
osd_ids = []
crimson_osds = get_crimson_osd_ids()
osd_path = os.path.join(os.sep, 'var', 'lib', 'ceph', 'osd')
if os.path.exists(osd_path):
try:
dirs = os.listdir(osd_path)
for osd_dir in dirs:
osd_id = osd_dir.split('-')[1]
osd_id = osd_dir.split('-')[1] if '-' in osd_dir else ''
if (_is_int(osd_id) and
filesystem_mounted(os.path.join(
os.sep, osd_path, osd_dir))):
os.sep, osd_path, osd_dir)) and
osd_id not in crimson_osds):
osd_ids.append(osd_id)
except OSError:
raise
@ -1216,28 +1231,15 @@ def get_named_key(name, caps=None, pool_list=None):
:param caps: dict of cephx capabilities
:returns: Returns a cephx key
"""
key_name = 'client.{}'.format(name)
try:
# Does the key already exist?
output = str(subprocess.check_output(
[
'sudo',
'-u', ceph_user(),
'ceph',
'--name', 'mon.',
'--keyring',
'/var/lib/ceph/mon/ceph-{}/keyring'.format(
socket.gethostname()
),
'auth',
'get',
key_name,
]).decode('UTF-8')).strip()
return parse_key(output)
except subprocess.CalledProcessError:
# Couldn't get the key, time to create it!
log("Creating new key for {}".format(name), level=DEBUG)
caps = caps or _default_caps
key_name = 'client.{}'.format(name)
key = ceph_auth_get(key_name)
if key:
upgrade_key_caps(key_name, caps)
return key
log("Creating new key for {}".format(name), level=DEBUG)
cmd = [
"sudo",
"-u",
@ -1259,6 +1261,7 @@ def get_named_key(name, caps=None, pool_list=None):
pools = " ".join(['pool={0}'.format(i) for i in pool_list])
subcaps[0] = subcaps[0] + " " + pools
cmd.extend([subsystem, '; '.join(subcaps)])
ceph_auth_get.cache_clear()
log("Calling check_output: {}".format(cmd), level=DEBUG)
return parse_key(str(subprocess
@ -1267,6 +1270,30 @@ def get_named_key(name, caps=None, pool_list=None):
.strip()) # IGNORE:E1103
@functools.lru_cache()
def ceph_auth_get(key_name):
try:
# Does the key already exist?
output = str(subprocess.check_output(
[
'sudo',
'-u', ceph_user(),
'ceph',
'--name', 'mon.',
'--keyring',
'/var/lib/ceph/mon/ceph-{}/keyring'.format(
socket.gethostname()
),
'auth',
'get',
key_name,
]).decode('UTF-8')).strip()
return parse_key(output)
except subprocess.CalledProcessError:
# Couldn't get the key
pass
def upgrade_key_caps(key, caps, pool_list=None):
"""Upgrade key to have capabilities caps"""
if not is_leader():
@ -2067,7 +2094,7 @@ def filesystem_mounted(fs):
def get_running_osds():
"""Returns a list of the pids of the current running OSD daemons"""
cmd = ['pgrep', 'ceph-osd']
cmd = ['pgrep', 'ceph-osd|crimson-osd']
try:
result = str(subprocess.check_output(cmd).decode('UTF-8'))
return result.split()
@ -2518,7 +2545,7 @@ class WatchDog(object):
:type timeout: int
"""
start_time = time.time()
while(not wait_f()):
while not wait_f():
now = time.time()
if now > start_time + timeout:
raise WatchDog.WatchDogTimeoutException()
@ -3219,6 +3246,9 @@ UCA_CODENAME_MAP = {
'wallaby': 'pacific',
'xena': 'pacific',
'yoga': 'quincy',
'zed': 'quincy',
'antelope': 'quincy',
'bobcat': 'quincy',
}
@ -3418,7 +3448,7 @@ def apply_osd_settings(settings):
set_cmd = base_cmd + ' set {key} {value}'
def _get_cli_key(key):
return(key.replace(' ', '_'))
return key.replace(' ', '_')
# Retrieve the current values to check keys are correct and to make this a
# noop if setting are already applied.
for osd_id in get_local_osd_ids():

View File

@ -11,8 +11,8 @@ tags:
- misc
series:
- jammy
- kinetic
- lunar
- mantic
extra-bindings:
public:
cluster:

View File

@ -3,20 +3,19 @@
- charm-unit-jobs-py38
- charm-unit-jobs-py39
- charm-unit-jobs-py310
- charm-zed-functional-jobs
- charm-functional-jobs
check:
jobs:
- jammy-yoga-ec
- kinetic-zed-ec:
voting: false
- lunar-antelope-ec:
voting: false
- mantic-bobcat-ec:
voting: false
vars:
needs_charm_build: true
charm_build_name: ceph-proxy
build_type: charmcraft
charmcraft_channel: 2.0/stable
charmcraft_channel: 2.x/edge
- job:
name: jammy-yoga-ec
parent: func-target
@ -27,13 +26,6 @@
soft: true
vars:
tox_extra_args: '-- erasure-coded:jammy-yoga-ec'
- job:
name: kinetic-zed-ec
parent: func-target
dependencies:
- jammy-yoga-ec
vars:
tox_extra_args: -- erasure-coded:kinetic-zed-ec
- job:
name: lunar-antelope-ec
parent: func-target
@ -41,3 +33,10 @@
- jammy-yoga-ec
vars:
tox_extra_args: -- erasure-coded:lunar-antelope-ec
- job:
name: mantic-bobcat-ec
parent: func-target
dependencies:
- jammy-yoga-ec
vars:
tox_extra_args: -- erasure-coded:mantic-bobcat-ec

View File

@ -1,5 +1,5 @@
variables:
openstack-origin: &openstack-origin cloud:jammy-zed
openstack-origin: &openstack-origin cloud:jammy-bobcat
series: jammy

View File

@ -1,224 +0,0 @@
variables:
openstack-origin: &openstack-origin cloud:jammy-zed
series: jammy
comment:
- 'machines section to decide order of deployment. database sooner = faster'
machines:
'0':
constraints: mem=3072M
'1':
constraints: mem=3072M
'2':
constraints: mem=3072M
'3':
'4':
'5':
'6':
'7':
'8':
'9':
'10':
'11':
'12':
'13':
'14':
'15':
'16':
'17':
'18':
applications:
cinder-mysql-router:
charm: ch:mysql-router
channel: latest/edge
glance-mysql-router:
charm: ch:mysql-router
channel: latest/edge
keystone-mysql-router:
charm: ch:mysql-router
channel: latest/edge
mysql-innodb-cluster:
charm: ch:mysql-innodb-cluster
num_units: 3
to:
- '0'
- '1'
- '2'
channel: latest/edge
ceph-mon:
charm: ch:ceph-mon
num_units: 3
options:
expected-osd-count: 3
source: *openstack-origin
to:
- '3'
- '4'
- '5'
channel: latest/edge
ceph-osd:
charm: ch:ceph-osd
num_units: 6
storage:
osd-devices: 10G
options:
source: *openstack-origin
to:
- '6'
- '7'
- '8'
- '16'
- '17'
- '18'
channel: latest/edge
ceph-proxy:
charm: ../../ceph-proxy.charm
num_units: 1
options:
source: *openstack-origin
to:
- '9'
ceph-radosgw:
charm: ch:ceph-radosgw
num_units: 1
options:
source: *openstack-origin
pool-type: erasure-coded
ec-profile-k: 4
ec-profile-m: 2
to:
- '10'
channel: latest/edge
cinder:
charm: ch:cinder
num_units: 1
options:
openstack-origin: *openstack-origin
block-device: ""
ephemeral-unmount: ""
glance-api-version: 2
overwrite: "false"
constraints: mem=2048
to:
- '11'
channel: latest/edge
cinder-ceph:
charm: ch:cinder-ceph
options:
restrict-ceph-pools: True
pool-type: erasure-coded
ec-profile-k: 4
ec-profile-m: 2
ec-profile-plugin: lrc
ec-profile-locality: 3
channel: latest/edge
keystone:
charm: ch:keystone
num_units: 1
options:
openstack-origin: *openstack-origin
admin-password: openstack
constraints: mem=1024
to:
- '12'
channel: latest/edge
rabbitmq-server:
charm: ch:rabbitmq-server
num_units: 1
constraints: mem=1024
to:
- '13'
channel: latest/edge
glance:
charm: ch:glance
num_units: 1
options:
openstack-origin: *openstack-origin
pool-type: erasure-coded
ec-profile-k: 4
ec-profile-m: 2
ec-profile-plugin: jerasure
to:
- '14'
channel: latest/edge
nova-compute:
charm: ch:nova-compute
num_units: 1
options:
openstack-origin: *openstack-origin
pool-type: erasure-coded
ec-profile-k: 4
ec-profile-m: 2
ec-profile-plugin: isa
libvirt-image-backend: rbd
to:
- '15'
channel: latest/edge
relations:
- - 'ceph-osd:mon'
- 'ceph-mon:osd'
- - 'ceph-proxy:radosgw'
- 'ceph-radosgw:mon'
- - 'cinder:amqp'
- 'rabbitmq-server:amqp'
- - 'cinder:shared-db'
- 'cinder-mysql-router:shared-db'
- - 'cinder-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'keystone:shared-db'
- 'keystone-mysql-router:shared-db'
- - 'keystone-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'cinder:identity-service'
- 'keystone:identity-service'
- - 'cinder-ceph:storage-backend'
- 'cinder:storage-backend'
- - 'cinder-ceph:ceph'
- 'ceph-proxy:client'
- - 'glance:image-service'
- 'nova-compute:image-service'
- - 'glance:identity-service'
- 'keystone:identity-service'
- - 'glance:shared-db'
- 'glance-mysql-router:shared-db'
- - 'glance-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'glance:ceph'
- 'ceph-proxy:client'
- - 'nova-compute:ceph-access'
- 'cinder-ceph:ceph-access'
- - 'nova-compute:amqp'
- 'rabbitmq-server:amqp'
- - 'nova-compute:ceph'
- 'ceph-proxy:client'

View File

@ -1,7 +1,7 @@
variables:
openstack-origin: &openstack-origin distro
series: kinetic
series: mantic
comment:
- 'machines section to decide order of deployment. database sooner = faster'

View File

@ -1,7 +1,7 @@
variables:
openstack-origin: &openstack-origin distro
series: kinetic
series: mantic
comment:
- 'machines section to decide order of deployment. database sooner = faster'

View File

@ -19,14 +19,14 @@ gate_bundles:
dev_bundles:
- jammy-yoga
- erasure-coded: jammy-yoga-ec
- jammy-zed
- lunar-antelope
- erasure-coded: jammy-zed-ec
- mantic-bobcat
- erasure-coded: lunar-antelope-ec
- kinetic-zed
- erasure-coded: mantic-bobcat-ec
- jammy-antelope
- erasure-coded: kinetic-zed-ec
- jammy-bobcat
- erasure-coded: jammy-antelope-ec
- erasure-coded: jammy-bobcat-ec
smoke_bundles:
- jammy-yoga
@ -59,7 +59,7 @@ target_deploy_status:
tests_options:
force_deploy:
- kinetic-zed
- jammy-antelope
- kinetic-zed-ec
- jammy-bobcat
- jammy-antelope-ec
- jammy-bobcat-ec