Updates for jammy enablement

- charmcraft: build-on 20.04 -> run-on 20.04/22.04 [*archs]
- Refresh tox targets
- Drop impish bundles and OSCI testing
- Add jammy metadata
- Default source is yoga
- Resync charmhelpers and charms.ceph

Change-Id: Ib62d7f882f22146419dfe920045b73452f9af2cb
This commit is contained in:
Billy Olsen 2022-04-05 11:52:25 -07:00 committed by James Page
parent b6bcec8072
commit d72e8db254
11 changed files with 197 additions and 276 deletions

View File

@ -21,7 +21,16 @@ parts:
- README.md
bases:
- name: ubuntu
channel: "20.04"
architectures:
- amd64
- build-on:
- name: ubuntu
channel: "20.04"
architectures:
- amd64
run-on:
- name: ubuntu
channel: "20.04"
architectures: [amd64, s390x, ppc64el, arm64]
- name: ubuntu
channel: "22.04"
architectures: [amd64, s390x, ppc64el, arm64]

View File

@ -10,7 +10,7 @@ options:
If set to True, supporting services will log to syslog.
source:
type: string
default:
default: yoga
description: |
Optional configuration to support use of additional sources such as:
.

View File

@ -118,12 +118,7 @@ from charmhelpers.contrib.openstack.utils import (
)
from charmhelpers.core.unitdata import kv
try:
from sriov_netplan_shim import pci
except ImportError:
# The use of the function and contexts that require the pci module is
# optional.
pass
from charmhelpers.contrib.hardware import pci
try:
import psutil
@ -426,6 +421,9 @@ class IdentityServiceContext(OSContextGenerator):
('password', ctxt.get('admin_password', '')),
('signing_dir', ctxt.get('signing_dir', '')),))
if ctxt.get('service_type'):
c.update((('service_type', ctxt.get('service_type')),))
return c
def __call__(self):
@ -468,6 +466,9 @@ class IdentityServiceContext(OSContextGenerator):
'internal_protocol': int_protocol,
'api_version': api_version})
if rdata.get('service_type'):
ctxt['service_type'] = rdata.get('service_type')
if float(api_version) > 2:
ctxt.update({
'admin_domain_name': rdata.get('service_domain'),
@ -539,6 +540,9 @@ class IdentityCredentialsContext(IdentityServiceContext):
'api_version': api_version
})
if rdata.get('service_type'):
ctxt['service_type'] = rdata.get('service_type')
if float(api_version) > 2:
ctxt.update({'admin_domain_name':
rdata.get('domain')})
@ -3120,7 +3124,7 @@ class SRIOVContext(OSContextGenerator):
"""Determine number of Virtual Functions (VFs) configured for device.
:param device: Object describing a PCI Network interface card (NIC)/
:type device: sriov_netplan_shim.pci.PCINetDevice
:type device: contrib.hardware.pci.PCINetDevice
:param sriov_numvfs: Number of VFs requested for blanket configuration.
:type sriov_numvfs: int
:returns: Number of VFs to configure for device

View File

@ -0,0 +1,128 @@
#!/usr/bin/python3
# Copyright 2014-2022 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Checks for services with deferred restarts.
This Nagios check will parse /var/lib/policy-rd.d/
to find any restarts that are currently deferred.
"""
import argparse
import glob
import sys
import yaml
DEFERRED_EVENTS_DIR = '/var/lib/policy-rc.d'
def get_deferred_events():
"""Return a list of deferred events dicts from policy-rc.d files.
Events are read from DEFERRED_EVENTS_DIR and are of the form:
{
action: restart,
policy_requestor_name: rabbitmq-server,
policy_requestor_type: charm,
reason: 'Pkg update',
service: rabbitmq-server,
time: 1614328743
}
:raises OSError: Raised in case of a system error while reading a policy file
:raises yaml.YAMLError: Raised if parsing a policy file fails
:returns: List of deferred event dictionaries
:rtype: list
"""
deferred_events_files = glob.glob(
'{}/*.deferred'.format(DEFERRED_EVENTS_DIR))
deferred_events = []
for event_file in deferred_events_files:
with open(event_file, 'r') as f:
event = yaml.safe_load(f)
deferred_events.append(event)
return deferred_events
def get_deferred_restart_services(application=None):
"""Returns a list of services with deferred restarts.
:param str application: Name of the application that blocked the service restart.
If application is None, all services with deferred restarts
are returned. Services which are blocked by a non-charm
requestor are always returned.
:raises OSError: Raised in case of a system error while reading a policy file
:raises yaml.YAMLError: Raised if parsing a policy file fails
:returns: List of services with deferred restarts belonging to application.
:rtype: list
"""
deferred_restart_events = filter(
lambda e: e['action'] == 'restart', get_deferred_events())
deferred_restart_services = set()
for restart_event in deferred_restart_events:
if application:
if (
restart_event['policy_requestor_type'] != 'charm' or
restart_event['policy_requestor_type'] == 'charm' and
restart_event['policy_requestor_name'] == application
):
deferred_restart_services.add(restart_event['service'])
else:
deferred_restart_services.add(restart_event['service'])
return list(deferred_restart_services)
def main():
"""Check for services with deferred restarts."""
parser = argparse.ArgumentParser(
description='Check for services with deferred restarts')
parser.add_argument(
'--application', help='Check services belonging to this application only')
args = parser.parse_args()
services = set(get_deferred_restart_services(args.application))
if len(services) == 0:
print('OK: No deferred service restarts.')
sys.exit(0)
else:
print(
'CRITICAL: Restarts are deferred for services: {}.'.format(', '.join(services)))
sys.exit(1)
if __name__ == '__main__':
try:
main()
except OSError as e:
print('CRITICAL: A system error occurred: {} ({})'.format(e.errno, e.strerror))
sys.exit(1)
except yaml.YAMLError as e:
print('CRITICAL: Failed to parse a policy file: {}'.format(str(e)))
sys.exit(1)
except Exception as e:
print('CRITICAL: An unknown error occurred: {}'.format(str(e)))
sys.exit(1)

View File

@ -114,6 +114,33 @@ def service_stop(service_name, **kwargs):
return service('stop', service_name, **kwargs)
def service_enable(service_name, **kwargs):
"""Enable a system service.
The specified service name is managed via the system level init system.
Some init systems (e.g. upstart) require that additional arguments be
provided in order to directly control service instances whereas other init
systems allow for addressing instances of a service directly by name (e.g.
systemd).
The kwargs allow for the additional parameters to be passed to underlying
init systems for those systems which require/allow for them. For example,
the ceph-osd upstart script requires the id parameter to be passed along
in order to identify which running daemon should be restarted. The follow-
ing example restarts the ceph-osd service for instance id=4:
service_enable('ceph-osd', id=4)
:param service_name: the name of the service to enable
:param **kwargs: additional parameters to pass to the init system when
managing services. These will be passed as key=value
parameters to the init system's commandline. kwargs
are ignored for init systems not allowing additional
parameters via the commandline (systemd).
"""
return service('enable', service_name, **kwargs)
def service_restart(service_name, **kwargs):
"""Restart a system service.
@ -134,7 +161,7 @@ def service_restart(service_name, **kwargs):
:param service_name: the name of the service to restart
:param **kwargs: additional parameters to pass to the init system when
managing services. These will be passed as key=value
parameters to the init system's commandline. kwargs
parameters to the init system's commandline. kwargs
are ignored for init systems not allowing additional
parameters via the commandline (systemd).
"""

View File

@ -11,7 +11,7 @@ tags:
- misc
series:
- focal
- impish
- jammy
peers:
mon:
interface: ceph

View File

@ -1,7 +1,7 @@
- project:
templates:
- charm-unit-jobs-py38
- charm-unit-jobs-py39
- charm-unit-jobs-py310
- charm-xena-functional-jobs
- charm-yoga-functional-jobs
vars:

View File

@ -11,6 +11,13 @@ pyparsing<3.0.0 # aodhclient is pinned in zaza and needs pyparsing < 3.0.0, but
cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35.
setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85
# NOTE: newer versions of cryptography require a Rust compiler to build,
# see
# * https://github.com/openstack-charmers/zaza/issues/421
# * https://mail.python.org/pipermail/cryptography-dev/2021-January/001003.html
#
cryptography<3.4
requests>=2.18.4
stestr>=2.2.0

View File

@ -1,252 +0,0 @@
variables:
openstack-origin: &openstack-origin distro
series: impish
comment:
- 'machines section to decide order of deployment. database sooner = faster'
machines:
'0':
constraints: mem=3072M
'1':
constraints: mem=3072M
'2':
constraints: mem=3072M
'3':
'4':
'5':
'6':
'7':
'8':
'9':
'10':
'11':
'12':
'13':
'14':
'15':
'16':
series: focal
applications:
keystone-mysql-router:
charm: ch:mysql-router
channel: 8.0.19/edge
glance-mysql-router:
charm: ch:mysql-router
channel: 8.0.19/edge
cinder-mysql-router:
charm: ch:mysql-router
channel: 8.0.19/edge
nova-cloud-controller-mysql-router:
charm: ch:mysql-router
channel: 8.0.19/edge
placement-mysql-router:
charm: ch:mysql-router
channel: 8.0.19/edge
mysql-innodb-cluster:
charm: ch:mysql-innodb-cluster
num_units: 3
options:
source: *openstack-origin
to:
- '0'
- '1'
- '2'
channel: 8.0.19/edge
ceph-osd:
charm: ch:ceph-osd
num_units: 3
storage:
osd-devices: '10G'
options:
source: *openstack-origin
osd-devices: '/dev/test-non-existent'
to:
- '3'
- '4'
- '5'
channel: quincy/edge
ceph-mon:
charm: ../../ceph-mon.charm
num_units: 3
options:
source: *openstack-origin
monitor-count: '3'
to:
- '6'
- '7'
- '8'
rabbitmq-server:
charm: ch:rabbitmq-server
num_units: 1
options:
source: *openstack-origin
to:
- '9'
channel: 3.9/edge
keystone:
expose: True
charm: ch:keystone
num_units: 1
options:
openstack-origin: *openstack-origin
to:
- '10'
channel: yoga/edge
nova-compute:
charm: ch:nova-compute
num_units: 1
options:
openstack-origin: *openstack-origin
libvirt-image-backend: rbd
to:
- '11'
channel: yoga/edge
glance:
expose: True
charm: ch:glance
num_units: 1
options:
openstack-origin: *openstack-origin
to:
- '12'
channel: yoga/edge
cinder:
expose: True
charm: ch:cinder
num_units: 1
options:
block-device: 'None'
glance-api-version: '2'
openstack-origin: *openstack-origin
to:
- '13'
channel: yoga/edge
cinder-ceph:
charm: ch:cinder-ceph
channel: yoga/edge
nova-cloud-controller:
expose: True
charm: ch:nova-cloud-controller
num_units: 1
options:
openstack-origin: *openstack-origin
to:
- '14'
channel: yoga/edge
placement:
charm: ch:placement
num_units: 1
options:
openstack-origin: *openstack-origin
to:
- '15'
channel: yoga/edge
prometheus2:
# Pin prometheus2 charm version Bug #1891942
charm: cs:prometheus2-18
num_units: 1
series: focal
to:
- '16'
relations:
- - 'nova-compute:amqp'
- 'rabbitmq-server:amqp'
- - 'nova-compute:image-service'
- 'glance:image-service'
- - 'nova-compute:ceph'
- 'ceph-mon:client'
- - nova-compute:ceph-access
- cinder-ceph:ceph-access
- - 'keystone:shared-db'
- 'keystone-mysql-router:shared-db'
- - 'keystone-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'glance:shared-db'
- 'glance-mysql-router:shared-db'
- - 'glance-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'glance:identity-service'
- 'keystone:identity-service'
- - 'glance:amqp'
- 'rabbitmq-server:amqp'
- - 'glance:ceph'
- 'ceph-mon:client'
- - 'cinder:shared-db'
- 'cinder-mysql-router:shared-db'
- - 'cinder-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'cinder:identity-service'
- 'keystone:identity-service'
- - 'cinder:amqp'
- 'rabbitmq-server:amqp'
- - 'cinder:image-service'
- 'glance:image-service'
- - 'cinder-ceph:storage-backend'
- 'cinder:storage-backend'
- - 'cinder-ceph:ceph'
- 'ceph-mon:client'
- - 'ceph-osd:mon'
- 'ceph-mon:osd'
- - 'nova-cloud-controller:shared-db'
- 'nova-cloud-controller-mysql-router:shared-db'
- - 'nova-cloud-controller-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'nova-cloud-controller:identity-service'
- 'keystone:identity-service'
- - 'nova-cloud-controller:amqp'
- 'rabbitmq-server:amqp'
- - 'nova-cloud-controller:cloud-compute'
- 'nova-compute:cloud-compute'
- - 'nova-cloud-controller:image-service'
- 'glance:image-service'
- - 'placement:shared-db'
- 'placement-mysql-router:shared-db'
- - 'placement-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'placement'
- 'keystone'
- - 'placement'
- 'nova-cloud-controller'
- - 'ceph-mon:prometheus'
- 'prometheus2:target'

View File

@ -2,14 +2,13 @@ charm_name: ceph-mon
gate_bundles:
- focal-xena
- impish-xena
- focal-yoga
dev_bundles:
- focal-yoga
- jammy-yoga
smoke_bundles:
- focal-xena
- focal-yoga
configure:
- zaza.openstack.charm_tests.glance.setup.add_lts_image
@ -24,5 +23,4 @@ tests:
tests_options:
force_deploy:
- impish-xena
- jammy-yoga

10
tox.ini
View File

@ -51,11 +51,6 @@ commands =
charmcraft -v build
{toxinidir}/rename.sh
[testenv:py35]
basepython = python3.5
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
[testenv:py36]
basepython = python3.6
deps = -r{toxinidir}/requirements.txt
@ -76,6 +71,11 @@ basepython = python3.9
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
[testenv:py310]
basepython = python3.10
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
[testenv:py3]
basepython = python3
deps = -r{toxinidir}/requirements.txt