Add xena bundles

- add non-voting focal-xena bundle
- add non-voting impish-xena bundle
- charm-helpers sync for new charm-helpers changes
- update tox/pip.sh to ensure setuptools<50.0.0

Change-Id: If511b7fee8cf676b6ba7017aa60fe916ac9a26d9
This commit is contained in:
Alex Kavanagh 2021-09-21 14:11:16 +01:00
parent af32b3186d
commit cb580a0b91
21 changed files with 944 additions and 2112 deletions

View File

@ -1,13 +0,0 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -1,387 +0,0 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import re
import sys
import six
from collections import OrderedDict
from charmhelpers.contrib.amulet.deployment import (
AmuletDeployment
)
from charmhelpers.contrib.openstack.amulet.utils import (
OPENSTACK_RELEASES_PAIRS
)
DEBUG = logging.DEBUG
ERROR = logging.ERROR
class OpenStackAmuletDeployment(AmuletDeployment):
"""OpenStack amulet deployment.
This class inherits from AmuletDeployment and has additional support
that is specifically for use by OpenStack charms.
"""
def __init__(self, series=None, openstack=None, source=None,
stable=True, log_level=DEBUG):
"""Initialize the deployment environment."""
super(OpenStackAmuletDeployment, self).__init__(series)
self.log = self.get_logger(level=log_level)
self.log.info('OpenStackAmuletDeployment: init')
self.openstack = openstack
self.source = source
self.stable = stable
def get_logger(self, name="deployment-logger", level=logging.DEBUG):
"""Get a logger object that will log to stdout."""
log = logging
logger = log.getLogger(name)
fmt = log.Formatter("%(asctime)s %(funcName)s "
"%(levelname)s: %(message)s")
handler = log.StreamHandler(stream=sys.stdout)
handler.setLevel(level)
handler.setFormatter(fmt)
logger.addHandler(handler)
logger.setLevel(level)
return logger
def _determine_branch_locations(self, other_services):
"""Determine the branch locations for the other services.
Determine if the local branch being tested is derived from its
stable or next (dev) branch, and based on this, use the corresonding
stable or next branches for the other_services."""
self.log.info('OpenStackAmuletDeployment: determine branch locations')
# Charms outside the ~openstack-charmers
base_charms = {
'mysql': ['trusty'],
'mongodb': ['trusty'],
'nrpe': ['trusty', 'xenial'],
}
for svc in other_services:
# If a location has been explicitly set, use it
if svc.get('location'):
continue
if svc['name'] in base_charms:
# NOTE: not all charms have support for all series we
# want/need to test against, so fix to most recent
# that each base charm supports
target_series = self.series
if self.series not in base_charms[svc['name']]:
target_series = base_charms[svc['name']][-1]
svc['location'] = 'cs:{}/{}'.format(target_series,
svc['name'])
elif self.stable:
svc['location'] = 'cs:{}/{}'.format(self.series,
svc['name'])
else:
svc['location'] = 'cs:~openstack-charmers-next/{}/{}'.format(
self.series,
svc['name']
)
return other_services
def _add_services(self, this_service, other_services, use_source=None,
no_origin=None):
"""Add services to the deployment and optionally set
openstack-origin/source.
:param this_service dict: Service dictionary describing the service
whose amulet tests are being run
:param other_services dict: List of service dictionaries describing
the services needed to support the target
service
:param use_source list: List of services which use the 'source' config
option rather than 'openstack-origin'
:param no_origin list: List of services which do not support setting
the Cloud Archive.
Service Dict:
{
'name': str charm-name,
'units': int number of units,
'constraints': dict of juju constraints,
'location': str location of charm,
}
eg
this_service = {
'name': 'openvswitch-odl',
'constraints': {'mem': '8G'},
}
other_services = [
{
'name': 'nova-compute',
'units': 2,
'constraints': {'mem': '4G'},
'location': cs:~bob/xenial/nova-compute
},
{
'name': 'mysql',
'constraints': {'mem': '2G'},
},
{'neutron-api-odl'}]
use_source = ['mysql']
no_origin = ['neutron-api-odl']
"""
self.log.info('OpenStackAmuletDeployment: adding services')
other_services = self._determine_branch_locations(other_services)
super(OpenStackAmuletDeployment, self)._add_services(this_service,
other_services)
services = other_services
services.append(this_service)
use_source = use_source or []
no_origin = no_origin or []
# Charms which should use the source config option
use_source = list(set(
use_source + ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
'ceph-osd', 'ceph-radosgw', 'ceph-mon',
'ceph-proxy', 'percona-cluster', 'lxd']))
# Charms which can not use openstack-origin, ie. many subordinates
no_origin = list(set(
no_origin + ['cinder-ceph', 'hacluster', 'neutron-openvswitch',
'nrpe', 'openvswitch-odl', 'neutron-api-odl',
'odl-controller', 'cinder-backup', 'nexentaedge-data',
'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
'cinder-nexentaedge', 'nexentaedge-mgmt',
'ceilometer-agent']))
if self.openstack:
for svc in services:
if svc['name'] not in use_source + no_origin:
config = {'openstack-origin': self.openstack}
self.d.configure(svc['name'], config)
if self.source:
for svc in services:
if svc['name'] in use_source and svc['name'] not in no_origin:
config = {'source': self.source}
self.d.configure(svc['name'], config)
def _configure_services(self, configs):
"""Configure all of the services."""
self.log.info('OpenStackAmuletDeployment: configure services')
for service, config in six.iteritems(configs):
self.d.configure(service, config)
def _auto_wait_for_status(self, message=None, exclude_services=None,
include_only=None, timeout=None):
"""Wait for all units to have a specific extended status, except
for any defined as excluded. Unless specified via message, any
status containing any case of 'ready' will be considered a match.
Examples of message usage:
Wait for all unit status to CONTAIN any case of 'ready' or 'ok':
message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE)
Wait for all units to reach this status (exact match):
message = re.compile('^Unit is ready and clustered$')
Wait for all units to reach any one of these (exact match):
message = re.compile('Unit is ready|OK|Ready')
Wait for at least one unit to reach this status (exact match):
message = {'ready'}
See Amulet's sentry.wait_for_messages() for message usage detail.
https://github.com/juju/amulet/blob/master/amulet/sentry.py
:param message: Expected status match
:param exclude_services: List of juju service names to ignore,
not to be used in conjuction with include_only.
:param include_only: List of juju service names to exclusively check,
not to be used in conjuction with exclude_services.
:param timeout: Maximum time in seconds to wait for status match
:returns: None. Raises if timeout is hit.
"""
if not timeout:
timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 1800))
self.log.info('Waiting for extended status on units for {}s...'
''.format(timeout))
all_services = self.d.services.keys()
if exclude_services and include_only:
raise ValueError('exclude_services can not be used '
'with include_only')
if message:
if isinstance(message, re._pattern_type):
match = message.pattern
else:
match = message
self.log.debug('Custom extended status wait match: '
'{}'.format(match))
else:
self.log.debug('Default extended status wait match: contains '
'READY (case-insensitive)')
message = re.compile('.*ready.*', re.IGNORECASE)
if exclude_services:
self.log.debug('Excluding services from extended status match: '
'{}'.format(exclude_services))
else:
exclude_services = []
if include_only:
services = include_only
else:
services = list(set(all_services) - set(exclude_services))
self.log.debug('Waiting up to {}s for extended status on services: '
'{}'.format(timeout, services))
service_messages = {service: message for service in services}
# Check for idleness
self.d.sentry.wait(timeout=timeout)
# Check for error states and bail early
self.d.sentry.wait_for_status(self.d.juju_env, services, timeout=timeout)
# Check for ready messages
self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
self.log.info('OK')
def _get_openstack_release(self):
"""Get openstack release.
Return an integer representing the enum value of the openstack
release.
"""
# Must be ordered by OpenStack release (not by Ubuntu release):
for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS):
setattr(self, os_pair, i)
releases = {
('trusty', None): self.trusty_icehouse,
('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka,
('xenial', None): self.xenial_mitaka,
('xenial', 'cloud:xenial-newton'): self.xenial_newton,
('xenial', 'cloud:xenial-ocata'): self.xenial_ocata,
('xenial', 'cloud:xenial-pike'): self.xenial_pike,
('xenial', 'cloud:xenial-queens'): self.xenial_queens,
('yakkety', None): self.yakkety_newton,
('zesty', None): self.zesty_ocata,
('artful', None): self.artful_pike,
('bionic', None): self.bionic_queens,
('bionic', 'cloud:bionic-rocky'): self.bionic_rocky,
('bionic', 'cloud:bionic-stein'): self.bionic_stein,
('bionic', 'cloud:bionic-train'): self.bionic_train,
('bionic', 'cloud:bionic-ussuri'): self.bionic_ussuri,
('cosmic', None): self.cosmic_rocky,
('disco', None): self.disco_stein,
('eoan', None): self.eoan_train,
('focal', None): self.focal_ussuri,
('focal', 'cloud:focal-victoria'): self.focal_victoria,
('groovy', None): self.groovy_victoria,
}
return releases[(self.series, self.openstack)]
def _get_openstack_release_string(self):
"""Get openstack release string.
Return a string representing the openstack release.
"""
releases = OrderedDict([
('trusty', 'icehouse'),
('xenial', 'mitaka'),
('yakkety', 'newton'),
('zesty', 'ocata'),
('artful', 'pike'),
('bionic', 'queens'),
('cosmic', 'rocky'),
('disco', 'stein'),
('eoan', 'train'),
('focal', 'ussuri'),
('groovy', 'victoria'),
])
if self.openstack:
os_origin = self.openstack.split(':')[1]
return os_origin.split('%s-' % self.series)[1].split('/')[0]
else:
return releases[self.series]
def get_percona_service_entry(self, memory_constraint=None):
"""Return a amulet service entry for percona cluster.
:param memory_constraint: Override the default memory constraint
in the service entry.
:type memory_constraint: str
:returns: Amulet service entry.
:rtype: dict
"""
memory_constraint = memory_constraint or '3072M'
svc_entry = {
'name': 'percona-cluster',
'constraints': {'mem': memory_constraint}}
if self._get_openstack_release() <= self.trusty_mitaka:
svc_entry['location'] = 'cs:trusty/percona-cluster'
return svc_entry
def get_ceph_expected_pools(self, radosgw=False):
"""Return a list of expected ceph pools in a ceph + cinder + glance
test scenario, based on OpenStack release and whether ceph radosgw
is flagged as present or not."""
if self._get_openstack_release() == self.trusty_icehouse:
# Icehouse
pools = [
'data',
'metadata',
'rbd',
'cinder-ceph',
'glance'
]
elif (self.trusty_kilo <= self._get_openstack_release() <=
self.zesty_ocata):
# Kilo through Ocata
pools = [
'rbd',
'cinder-ceph',
'glance'
]
else:
# Pike and later
pools = [
'cinder-ceph',
'glance'
]
if radosgw:
pools.extend([
'.rgw.root',
'.rgw.control',
'.rgw',
'.rgw.gc',
'.users.uid'
])
return pools

File diff suppressed because it is too large Load Diff

View File

@ -25,7 +25,10 @@ import socket
import time
from base64 import b64decode
from subprocess import check_call, CalledProcessError
from subprocess import (
check_call,
check_output,
CalledProcessError)
import six
@ -453,18 +456,24 @@ class IdentityServiceContext(OSContextGenerator):
serv_host = format_ipv6_addr(serv_host) or serv_host
auth_host = rdata.get('auth_host')
auth_host = format_ipv6_addr(auth_host) or auth_host
int_host = rdata.get('internal_host')
int_host = format_ipv6_addr(int_host) or int_host
svc_protocol = rdata.get('service_protocol') or 'http'
auth_protocol = rdata.get('auth_protocol') or 'http'
int_protocol = rdata.get('internal_protocol') or 'http'
api_version = rdata.get('api_version') or '2.0'
ctxt.update({'service_port': rdata.get('service_port'),
'service_host': serv_host,
'auth_host': auth_host,
'auth_port': rdata.get('auth_port'),
'internal_host': int_host,
'internal_port': rdata.get('internal_port'),
'admin_tenant_name': rdata.get('service_tenant'),
'admin_user': rdata.get('service_username'),
'admin_password': rdata.get('service_password'),
'service_protocol': svc_protocol,
'auth_protocol': auth_protocol,
'internal_protocol': int_protocol,
'api_version': api_version})
if float(api_version) > 2:
@ -1781,6 +1790,10 @@ class NeutronAPIContext(OSContextGenerator):
'rel_key': 'enable-port-forwarding',
'default': False,
},
'enable_fwaas': {
'rel_key': 'enable-fwaas',
'default': False,
},
'global_physnet_mtu': {
'rel_key': 'global-physnet-mtu',
'default': 1500,
@ -1815,6 +1828,11 @@ class NeutronAPIContext(OSContextGenerator):
if ctxt['enable_port_forwarding']:
l3_extension_plugins.append('port_forwarding')
if ctxt['enable_fwaas']:
l3_extension_plugins.append('fwaas_v2')
if ctxt['enable_nfg_logging']:
l3_extension_plugins.append('fwaas_v2_log')
ctxt['l3_extension_plugins'] = l3_extension_plugins
return ctxt
@ -2379,6 +2397,12 @@ class DHCPAgentContext(OSContextGenerator):
ctxt['enable_metadata_network'] = True
ctxt['enable_isolated_metadata'] = True
ctxt['append_ovs_config'] = False
cmp_release = CompareOpenStackReleases(
os_release('neutron-common', base='icehouse'))
if cmp_release >= 'queens' and config('enable-dpdk'):
ctxt['append_ovs_config'] = True
return ctxt
@staticmethod
@ -2570,22 +2594,48 @@ class OVSDPDKDeviceContext(OSContextGenerator):
:returns: hex formatted CPU mask
:rtype: str
"""
num_cores = config('dpdk-socket-cores')
mask = 0
return self.cpu_masks()['dpdk_lcore_mask']
def cpu_masks(self):
"""Get hex formatted CPU masks
The mask is based on using the first config:dpdk-socket-cores
cores of each NUMA node in the unit, followed by the
next config:pmd-socket-cores
:returns: Dict of hex formatted CPU masks
:rtype: Dict[str, str]
"""
num_lcores = config('dpdk-socket-cores')
pmd_cores = config('pmd-socket-cores')
lcore_mask = 0
pmd_mask = 0
for cores in self._numa_node_cores().values():
for core in cores[:num_cores]:
mask = mask | 1 << core
return format(mask, '#04x')
for core in cores[:num_lcores]:
lcore_mask = lcore_mask | 1 << core
for core in cores[num_lcores:][:pmd_cores]:
pmd_mask = pmd_mask | 1 << core
return {
'pmd_cpu_mask': format(pmd_mask, '#04x'),
'dpdk_lcore_mask': format(lcore_mask, '#04x')}
def socket_memory(self):
"""Formatted list of socket memory configuration per NUMA node
"""Formatted list of socket memory configuration per socket.
:returns: socket memory configuration per NUMA node
:returns: socket memory configuration per socket.
:rtype: str
"""
lscpu_out = check_output(
['lscpu', '-p=socket']).decode('UTF-8').strip()
sockets = set()
for line in lscpu_out.split('\n'):
try:
sockets.add(int(line))
except ValueError:
# lscpu output is headed by comments so ignore them.
pass
sm_size = config('dpdk-socket-memory')
node_regex = '/sys/devices/system/node/node*'
mem_list = [str(sm_size) for _ in glob.glob(node_regex)]
mem_list = [str(sm_size) for _ in sockets]
if mem_list:
return ','.join(mem_list)
else:

View File

@ -334,7 +334,7 @@ def maybe_do_policyd_overrides(openstack_release,
restart_handler()
@charmhelpers.deprecate("Use maybe_do_poliyd_overrrides instead")
@charmhelpers.deprecate("Use maybe_do_policyd_overrides instead")
def maybe_do_policyd_overrides_on_config_changed(*args, **kwargs):
"""This function is designed to be called from the config changed hook.

View File

@ -106,6 +106,8 @@ from charmhelpers.fetch import (
filter_installed_packages,
filter_missing_packages,
ubuntu_apt_pkg as apt,
OPENSTACK_RELEASES,
UBUNTU_OPENSTACK_RELEASE,
)
from charmhelpers.fetch.snap import (
@ -132,54 +134,9 @@ CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
'restricted main multiverse universe')
OPENSTACK_RELEASES = (
'diablo',
'essex',
'folsom',
'grizzly',
'havana',
'icehouse',
'juno',
'kilo',
'liberty',
'mitaka',
'newton',
'ocata',
'pike',
'queens',
'rocky',
'stein',
'train',
'ussuri',
'victoria',
'wallaby',
)
UBUNTU_OPENSTACK_RELEASE = OrderedDict([
('oneiric', 'diablo'),
('precise', 'essex'),
('quantal', 'folsom'),
('raring', 'grizzly'),
('saucy', 'havana'),
('trusty', 'icehouse'),
('utopic', 'juno'),
('vivid', 'kilo'),
('wily', 'liberty'),
('xenial', 'mitaka'),
('yakkety', 'newton'),
('zesty', 'ocata'),
('artful', 'pike'),
('bionic', 'queens'),
('cosmic', 'rocky'),
('disco', 'stein'),
('eoan', 'train'),
('focal', 'ussuri'),
('groovy', 'victoria'),
('hirsute', 'wallaby'),
])
OPENSTACK_CODENAMES = OrderedDict([
# NOTE(lourot): 'yyyy.i' isn't actually mapping with any real version
# number. This just means the i-th version of the year yyyy.
('2011.2', 'diablo'),
('2012.1', 'essex'),
('2012.2', 'folsom'),
@ -200,6 +157,8 @@ OPENSTACK_CODENAMES = OrderedDict([
('2020.1', 'ussuri'),
('2020.2', 'victoria'),
('2021.1', 'wallaby'),
('2021.2', 'xena'),
('2022.1', 'yoga'),
])
# The ugly duckling - must list releases oldest to newest

View File

@ -28,6 +28,7 @@ UBUNTU_RELEASES = (
'focal',
'groovy',
'hirsute',
'impish',
)

View File

@ -18,8 +18,11 @@
import six
import re
TRUTHY_STRINGS = {'y', 'yes', 'true', 't', 'on'}
FALSEY_STRINGS = {'n', 'no', 'false', 'f', 'off'}
def bool_from_string(value):
def bool_from_string(value, truthy_strings=TRUTHY_STRINGS, falsey_strings=FALSEY_STRINGS, assume_false=False):
"""Interpret string value as boolean.
Returns True if value translates to True otherwise False.
@ -32,9 +35,9 @@ def bool_from_string(value):
value = value.strip().lower()
if value in ['y', 'yes', 'true', 't', 'on']:
if value in truthy_strings:
return True
elif value in ['n', 'no', 'false', 'f', 'off']:
elif value in falsey_strings or assume_false:
return False
msg = "Unable to interpret string value '%s' as boolean" % (value)

View File

@ -106,6 +106,8 @@ if __platform__ == "ubuntu":
apt_pkg = fetch.ubuntu_apt_pkg
get_apt_dpkg_env = fetch.get_apt_dpkg_env
get_installed_version = fetch.get_installed_version
OPENSTACK_RELEASES = fetch.OPENSTACK_RELEASES
UBUNTU_OPENSTACK_RELEASE = fetch.UBUNTU_OPENSTACK_RELEASE
elif __platform__ == "centos":
yum_search = fetch.yum_search

View File

@ -142,8 +142,10 @@ def pip_create_virtualenv(path=None):
"""Create an isolated Python environment."""
if six.PY2:
apt_install('python-virtualenv')
extra_flags = []
else:
apt_install('python3-virtualenv')
apt_install(['python3-virtualenv', 'virtualenv'])
extra_flags = ['--python=python3']
if path:
venv_path = path
@ -151,4 +153,4 @@ def pip_create_virtualenv(path=None):
venv_path = os.path.join(charm_dir(), 'venv')
if not os.path.exists(venv_path):
subprocess.check_call(['virtualenv', venv_path])
subprocess.check_call(['virtualenv', venv_path] + extra_flags)

View File

@ -208,12 +208,79 @@ CLOUD_ARCHIVE_POCKETS = {
'wallaby/proposed': 'focal-proposed/wallaby',
'focal-wallaby/proposed': 'focal-proposed/wallaby',
'focal-proposed/wallaby': 'focal-proposed/wallaby',
# Xena
'xena': 'focal-updates/xena',
'focal-xena': 'focal-updates/xena',
'focal-xena/updates': 'focal-updates/xena',
'focal-updates/xena': 'focal-updates/xena',
'xena/proposed': 'focal-proposed/xena',
'focal-xena/proposed': 'focal-proposed/xena',
'focal-proposed/xena': 'focal-proposed/xena',
# Yoga
'yoga': 'focal-updates/yoga',
'focal-yoga': 'focal-updates/yoga',
'focal-yoga/updates': 'focal-updates/yoga',
'focal-updates/yoga': 'focal-updates/yoga',
'yoga/proposed': 'focal-proposed/yoga',
'focal-yoga/proposed': 'focal-proposed/yoga',
'focal-proposed/yoga': 'focal-proposed/yoga',
}
OPENSTACK_RELEASES = (
'diablo',
'essex',
'folsom',
'grizzly',
'havana',
'icehouse',
'juno',
'kilo',
'liberty',
'mitaka',
'newton',
'ocata',
'pike',
'queens',
'rocky',
'stein',
'train',
'ussuri',
'victoria',
'wallaby',
'xena',
'yoga',
)
UBUNTU_OPENSTACK_RELEASE = OrderedDict([
('oneiric', 'diablo'),
('precise', 'essex'),
('quantal', 'folsom'),
('raring', 'grizzly'),
('saucy', 'havana'),
('trusty', 'icehouse'),
('utopic', 'juno'),
('vivid', 'kilo'),
('wily', 'liberty'),
('xenial', 'mitaka'),
('yakkety', 'newton'),
('zesty', 'ocata'),
('artful', 'pike'),
('bionic', 'queens'),
('cosmic', 'rocky'),
('disco', 'stein'),
('eoan', 'train'),
('focal', 'ussuri'),
('groovy', 'victoria'),
('hirsute', 'wallaby'),
('impish', 'xena'),
])
APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
CMD_RETRY_DELAY = 10 # Wait 10 seconds between command retries.
CMD_RETRY_COUNT = 3 # Retry a failing fatal command X times.
CMD_RETRY_COUNT = 10 # Retry a failing fatal command X times.
def filter_installed_packages(packages):
@ -574,6 +641,10 @@ def add_source(source, key=None, fail_invalid=False):
with be used. If staging is NOT used then the cloud archive [3] will be
added, and the 'ubuntu-cloud-keyring' package will be added for the
current distro.
'<openstack-version>': translate to cloud:<release> based on the current
distro version (i.e. for 'ussuri' this will either be 'bionic-ussuri' or
'distro'.
'<openstack-version>/proposed': as above, but for proposed.
Otherwise the source is not recognised and this is logged to the juju log.
However, no error is raised, unless sys_error_on_exit is True.
@ -600,6 +671,12 @@ def add_source(source, key=None, fail_invalid=False):
@raises SourceConfigError() if for cloud:<pocket>, the <pocket> is not a
valid pocket in CLOUD_ARCHIVE_POCKETS
"""
# extract the OpenStack versions from the CLOUD_ARCHIVE_POCKETS; can't use
# the list in contrib.openstack.utils as it might not be included in
# classic charms and would break everything. Having OpenStack specific
# code in this file is a bit of an antipattern, anyway.
os_versions_regex = "({})".format("|".join(OPENSTACK_RELEASES))
_mapping = OrderedDict([
(r"^distro$", lambda: None), # This is a NOP
(r"^(?:proposed|distro-proposed)$", _add_proposed),
@ -609,6 +686,9 @@ def add_source(source, key=None, fail_invalid=False):
(r"^cloud:(.*)-(.*)$", _add_cloud_distro_check),
(r"^cloud:(.*)$", _add_cloud_pocket),
(r"^snap:.*-(.*)-(.*)$", _add_cloud_distro_check),
(r"^{}\/proposed$".format(os_versions_regex),
_add_bare_openstack_proposed),
(r"^{}$".format(os_versions_regex), _add_bare_openstack),
])
if source is None:
source = ''
@ -662,7 +742,8 @@ def _add_apt_repository(spec):
series = get_distrib_codename()
spec = spec.replace('{series}', series)
_run_with_retries(['add-apt-repository', '--yes', spec],
cmd_env=env_proxy_settings(['https', 'http']))
cmd_env=env_proxy_settings(['https', 'http', 'no_proxy'])
)
def _add_cloud_pocket(pocket):
@ -738,6 +819,73 @@ def _verify_is_ubuntu_rel(release, os_release):
'version ({})'.format(release, os_release, ubuntu_rel))
def _add_bare_openstack(openstack_release):
"""Add cloud or distro based on the release given.
The spec given is, say, 'ussuri', but this could apply cloud:bionic-ussuri
or 'distro' depending on whether the ubuntu release is bionic or focal.
:param openstack_release: the OpenStack codename to determine the release
for.
:type openstack_release: str
:raises: SourceConfigError
"""
# TODO(ajkavanagh) - surely this means we should be removing cloud archives
# if they exist?
__add_bare_helper(openstack_release, "{}-{}", lambda: None)
def _add_bare_openstack_proposed(openstack_release):
"""Add cloud of distro but with proposed.
The spec given is, say, 'ussuri' but this could apply
cloud:bionic-ussuri/proposed or 'distro/proposed' depending on whether the
ubuntu release is bionic or focal.
:param openstack_release: the OpenStack codename to determine the release
for.
:type openstack_release: str
:raises: SourceConfigError
"""
__add_bare_helper(openstack_release, "{}-{}/proposed", _add_proposed)
def __add_bare_helper(openstack_release, pocket_format, final_function):
"""Helper for _add_bare_openstack[_proposed]
The bulk of the work between the two functions is exactly the same except
for the pocket format and the function that is run if it's the distro
version.
:param openstack_release: the OpenStack codename. e.g. ussuri
:type openstack_release: str
:param pocket_format: the pocket formatter string to construct a pocket str
from the openstack_release and the current ubuntu version.
:type pocket_format: str
:param final_function: the function to call if it is the distro version.
:type final_function: Callable
:raises SourceConfigError on error
"""
ubuntu_version = get_distrib_codename()
possible_pocket = pocket_format.format(ubuntu_version, openstack_release)
if possible_pocket in CLOUD_ARCHIVE_POCKETS:
_add_cloud_pocket(possible_pocket)
return
# Otherwise it's almost certainly the distro version; verify that it
# exists.
try:
assert UBUNTU_OPENSTACK_RELEASE[ubuntu_version] == openstack_release
except KeyError:
raise SourceConfigError(
"Invalid ubuntu version {} isn't known to this library"
.format(ubuntu_version))
except AssertionError:
raise SourceConfigError(
'Invalid OpenStack release specificed: {} for ubuntu version {}'
.format(openstack_release, ubuntu_version))
final_function()
def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,),
retry_message="", cmd_env=None, quiet=False):
"""Run a command and retry until success or max_retries is reached.

View File

@ -264,7 +264,7 @@ def version_compare(a, b):
else:
raise RuntimeError('Unable to compare "{}" and "{}", according to '
'our logic they are neither greater, equal nor '
'less than each other.')
'less than each other.'.format(a, b))
class PkgVersion():

View File

@ -28,6 +28,9 @@ def get_platform():
elif "elementary" in current_platform:
# ElementaryOS fails to run tests locally without this.
return "ubuntu"
elif "Pop!_OS" in current_platform:
# Pop!_OS also fails to run tests locally without this.
return "ubuntu"
else:
raise RuntimeError("This module is not supported on {}."
.format(current_platform))

View File

@ -79,9 +79,9 @@ class Crushmap(object):
stdin=crush.stdout)
.decode('UTF-8'))
except CalledProcessError as e:
log("Error occured while loading and decompiling CRUSH map:"
log("Error occurred while loading and decompiling CRUSH map:"
"{}".format(e), ERROR)
raise "Failed to read CRUSH map"
raise
def ensure_bucket_is_present(self, bucket_name):
if bucket_name not in [bucket.name for bucket in self.buckets()]:
@ -111,7 +111,7 @@ class Crushmap(object):
return ceph_output
except CalledProcessError as e:
log("save error: {}".format(e))
raise "Failed to save CRUSH map."
raise
def build_crushmap(self):
"""Modifies the current CRUSH map to include the new buckets"""

View File

@ -14,6 +14,7 @@
import collections
import glob
import itertools
import json
import os
import pyudev
@ -24,6 +25,7 @@ import subprocess
import sys
import time
import uuid
import functools
from contextlib import contextmanager
from datetime import datetime
@ -501,30 +503,33 @@ def ceph_user():
class CrushLocation(object):
def __init__(self,
name,
identifier,
host,
rack,
row,
datacenter,
chassis,
root):
self.name = name
def __init__(self, identifier, name, osd="", host="", chassis="",
rack="", row="", pdu="", pod="", room="",
datacenter="", zone="", region="", root=""):
self.identifier = identifier
self.name = name
self.osd = osd
self.host = host
self.chassis = chassis
self.rack = rack
self.row = row
self.pdu = pdu
self.pod = pod
self.room = room
self.datacenter = datacenter
self.chassis = chassis
self.zone = zone
self.region = region
self.root = root
def __str__(self):
return "name: {} id: {} host: {} rack: {} row: {} datacenter: {} " \
"chassis :{} root: {}".format(self.name, self.identifier,
self.host, self.rack, self.row,
self.datacenter, self.chassis,
self.root)
return "name: {} id: {} osd: {} host: {} chassis: {} rack: {} " \
"row: {} pdu: {} pod: {} room: {} datacenter: {} zone: {} " \
"region: {} root: {}".format(self.name, self.identifier,
self.osd, self.host, self.chassis,
self.rack, self.row, self.pdu,
self.pod, self.room,
self.datacenter, self.zone,
self.region, self.root)
def __eq__(self, other):
return not self.name < other.name and not other.name < self.name
@ -571,10 +576,53 @@ def get_osd_weight(osd_id):
raise
def _filter_nodes_and_set_attributes(node, node_lookup_map, lookup_type):
"""Get all nodes of the desired type, with all their attributes.
These attributes can be direct or inherited from ancestors.
"""
attribute_dict = {node['type']: node['name']}
if node['type'] == lookup_type:
attribute_dict['name'] = node['name']
attribute_dict['identifier'] = node['id']
return [attribute_dict]
elif not node.get('children'):
return [attribute_dict]
else:
descendant_attribute_dicts = [
_filter_nodes_and_set_attributes(node_lookup_map[node_id],
node_lookup_map, lookup_type)
for node_id in node.get('children', [])
]
return [dict(attribute_dict, **descendant_attribute_dict)
for descendant_attribute_dict
in itertools.chain.from_iterable(descendant_attribute_dicts)]
def _flatten_roots(nodes, lookup_type='host'):
"""Get a flattened list of nodes of the desired type.
:param nodes: list of nodes defined as a dictionary of attributes and
children
:type nodes: List[Dict[int, Any]]
:param lookup_type: type of searched node
:type lookup_type: str
:returns: flattened list of nodes
:rtype: List[Dict[str, Any]]
"""
lookup_map = {node['id']: node for node in nodes}
root_attributes_dicts = [_filter_nodes_and_set_attributes(node, lookup_map,
lookup_type)
for node in nodes if node['type'] == 'root']
# get a flattened list of roots.
return list(itertools.chain.from_iterable(root_attributes_dicts))
def get_osd_tree(service):
"""Returns the current osd map in JSON.
:returns: List.
:rtype: List[CrushLocation]
:raises: ValueError if the monmap fails to parse.
Also raises CalledProcessError if our ceph command fails
"""
@ -585,35 +633,14 @@ def get_osd_tree(service):
.decode('UTF-8'))
try:
json_tree = json.loads(tree)
crush_list = []
# Make sure children are present in the json
if not json_tree['nodes']:
return None
host_nodes = [
node for node in json_tree['nodes']
if node['type'] == 'host'
]
for host in host_nodes:
crush_list.append(
CrushLocation(
name=host.get('name'),
identifier=host['id'],
host=host.get('host'),
rack=host.get('rack'),
row=host.get('row'),
datacenter=host.get('datacenter'),
chassis=host.get('chassis'),
root=host.get('root')
)
)
return crush_list
roots = _flatten_roots(json_tree["nodes"])
return [CrushLocation(**host) for host in roots]
except ValueError as v:
log("Unable to parse ceph tree json: {}. Error: {}".format(
tree, v))
raise
except subprocess.CalledProcessError as e:
log("ceph osd tree command failed with message: {}".format(
e))
log("ceph osd tree command failed with message: {}".format(e))
raise
@ -669,7 +696,9 @@ def get_local_osd_ids():
dirs = os.listdir(osd_path)
for osd_dir in dirs:
osd_id = osd_dir.split('-')[1]
if _is_int(osd_id):
if (_is_int(osd_id) and
filesystem_mounted(os.path.join(
os.sep, osd_path, osd_dir))):
osd_ids.append(osd_id)
except OSError:
raise
@ -3271,13 +3300,14 @@ def determine_packages():
def determine_packages_to_remove():
"""Determines packages for removal
Note: if in a container, then the CHRONY_PACKAGE is removed.
:returns: list of packages to be removed
:rtype: List[str]
"""
rm_packages = REMOVE_PACKAGES.copy()
if is_container():
install_list = filter_missing_packages(CHRONY_PACKAGE)
if not install_list:
rm_packages.append(CHRONY_PACKAGE)
rm_packages.extend(filter_missing_packages([CHRONY_PACKAGE]))
return rm_packages
@ -3376,3 +3406,132 @@ def apply_osd_settings(settings):
level=ERROR)
raise OSDConfigSetError
return True
def enabled_manager_modules():
"""Return a list of enabled manager modules.
:rtype: List[str]
"""
cmd = ['ceph', 'mgr', 'module', 'ls']
try:
modules = subprocess.check_output(cmd).decode('UTF-8')
except subprocess.CalledProcessError as e:
log("Failed to list ceph modules: {}".format(e), WARNING)
return []
modules = json.loads(modules)
return modules['enabled_modules']
def is_mgr_module_enabled(module):
"""Is a given manager module enabled.
:param module:
:type module: str
:returns: Whether the named module is enabled
:rtype: bool
"""
return module in enabled_manager_modules()
is_dashboard_enabled = functools.partial(is_mgr_module_enabled, 'dashboard')
def mgr_enable_module(module):
"""Enable a Ceph Manager Module.
:param module: The module name to enable
:type module: str
:raises: subprocess.CalledProcessError
"""
if not is_mgr_module_enabled(module):
subprocess.check_call(['ceph', 'mgr', 'module', 'enable', module])
return True
return False
mgr_enable_dashboard = functools.partial(mgr_enable_module, 'dashboard')
def mgr_disable_module(module):
"""Enable a Ceph Manager Module.
:param module: The module name to enable
:type module: str
:raises: subprocess.CalledProcessError
"""
if is_mgr_module_enabled(module):
subprocess.check_call(['ceph', 'mgr', 'module', 'disable', module])
return True
return False
mgr_disable_dashboard = functools.partial(mgr_disable_module, 'dashboard')
def ceph_config_set(name, value, who):
"""Set a ceph config option
:param name: key to set
:type name: str
:param value: value corresponding to key
:type value: str
:param who: Config area the key is associated with (e.g. 'dashboard')
:type who: str
:raises: subprocess.CalledProcessError
"""
subprocess.check_call(['ceph', 'config', 'set', who, name, value])
mgr_config_set = functools.partial(ceph_config_set, who='mgr')
def ceph_config_get(name, who):
"""Retrieve the value of a ceph config option
:param name: key to lookup
:type name: str
:param who: Config area the key is associated with (e.g. 'dashboard')
:type who: str
:returns: Value associated with key
:rtype: str
:raises: subprocess.CalledProcessError
"""
return subprocess.check_output(
['ceph', 'config', 'get', who, name]).decode('UTF-8')
mgr_config_get = functools.partial(ceph_config_get, who='mgr')
def _dashboard_set_ssl_artifact(path, artifact_name, hostname=None):
"""Set SSL dashboard config option.
:param path: Path to file
:type path: str
:param artifact_name: Option name for setting the artifact
:type artifact_name: str
:param hostname: If hostname is set artifact will only be associated with
the dashboard on that host.
:type hostname: str
:raises: subprocess.CalledProcessError
"""
cmd = ['ceph', 'dashboard', artifact_name]
if hostname:
cmd.append(hostname)
cmd.extend(['-i', path])
log(cmd, level=DEBUG)
subprocess.check_call(cmd)
dashboard_set_ssl_certificate = functools.partial(
_dashboard_set_ssl_artifact,
artifact_name='set-ssl-certificate')
dashboard_set_ssl_certificate_key = functools.partial(
_dashboard_set_ssl_artifact,
artifact_name='set-ssl-certificate-key')

View File

@ -6,6 +6,10 @@
jobs:
- focal-ussuri-ec-ceph-mon
- bionic-train-with-fsid
- focal-xena:
voting: false
- impish-xena:
voting: false
- job:
name: focal-ussuri-ec-ceph-mon

18
pip.sh Executable file
View File

@ -0,0 +1,18 @@
#!/usr/bin/env bash
#
# This file is managed centrally by release-tools and should not be modified
# within individual charm repos. See the 'global' dir contents for available
# choices of tox.ini for OpenStack Charms:
# https://github.com/openstack-charmers/release-tools
#
# setuptools 58.0 dropped the support for use_2to3=true which is needed to
# install blessings (an indirect dependency of charm-tools).
#
# More details on the beahvior of tox and virtualenv creation can be found at
# https://github.com/tox-dev/tox/issues/448
#
# This script is wrapper to force the use of the pinned versions early in the
# process when the virtualenv was created and upgraded before installing the
# depedencies declared in the target.
pip install 'pip<20.3' 'setuptools<50.0.0'
pip "$@"

View File

@ -0,0 +1,235 @@
variables:
openstack-origin: &openstack-origin cloud:focal-xena
series: focal
comment:
- 'machines section to decide order of deployment. database sooner = faster'
machines:
'0':
constraints: mem=3072M
'1':
constraints: mem=3072M
'2':
constraints: mem=3072M
'3':
'4':
'5':
'6':
'7':
'8':
'9':
'10':
'11':
'12':
'13':
'14':
'15':
'16':
applications:
keystone-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
glance-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
cinder-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
nova-cloud-controller-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
placement-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
mysql-innodb-cluster:
charm: cs:~openstack-charmers-next/mysql-innodb-cluster
num_units: 3
options:
source: *openstack-origin
to:
- '0'
- '1'
- '2'
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
storage:
osd-devices: '10G'
options:
source: *openstack-origin
osd-devices: '/dev/test-non-existent'
to:
- '3'
- '4'
- '5'
ceph-mon:
charm: ../../../ceph-mon
num_units: 3
options:
source: *openstack-origin
monitor-count: '3'
to:
- '6'
- '7'
- '8'
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
options:
source: *openstack-origin
to:
- '9'
keystone:
expose: True
charm: cs:~openstack-charmers-next/keystone
num_units: 1
options:
openstack-origin: *openstack-origin
to:
- '10'
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
options:
openstack-origin: *openstack-origin
libvirt-image-backend: rbd
to:
- '11'
glance:
expose: True
charm: cs:~openstack-charmers-next/glance
num_units: 1
options:
openstack-origin: *openstack-origin
to:
- '12'
cinder:
expose: True
charm: cs:~openstack-charmers-next/cinder
num_units: 1
options:
block-device: 'None'
glance-api-version: '2'
openstack-origin: *openstack-origin
to:
- '13'
cinder-ceph:
charm: cs:~openstack-charmers-next/cinder-ceph
nova-cloud-controller:
expose: True
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
options:
openstack-origin: *openstack-origin
to:
- '14'
placement:
charm: cs:~openstack-charmers-next/placement
num_units: 1
options:
openstack-origin: *openstack-origin
to:
- '15'
prometheus2:
# Pin prometheus2 charm version Bug #1891942
charm: cs:prometheus2-18
num_units: 1
to:
- '16'
relations:
- - 'nova-compute:amqp'
- 'rabbitmq-server:amqp'
- - 'nova-compute:image-service'
- 'glance:image-service'
- - 'nova-compute:ceph'
- 'ceph-mon:client'
- - nova-compute:ceph-access
- cinder-ceph:ceph-access
- - 'keystone:shared-db'
- 'keystone-mysql-router:shared-db'
- - 'keystone-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'glance:shared-db'
- 'glance-mysql-router:shared-db'
- - 'glance-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'glance:identity-service'
- 'keystone:identity-service'
- - 'glance:amqp'
- 'rabbitmq-server:amqp'
- - 'glance:ceph'
- 'ceph-mon:client'
- - 'cinder:shared-db'
- 'cinder-mysql-router:shared-db'
- - 'cinder-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'cinder:identity-service'
- 'keystone:identity-service'
- - 'cinder:amqp'
- 'rabbitmq-server:amqp'
- - 'cinder:image-service'
- 'glance:image-service'
- - 'cinder-ceph:storage-backend'
- 'cinder:storage-backend'
- - 'cinder-ceph:ceph'
- 'ceph-mon:client'
- - 'ceph-osd:mon'
- 'ceph-mon:osd'
- - 'nova-cloud-controller:shared-db'
- 'nova-cloud-controller-mysql-router:shared-db'
- - 'nova-cloud-controller-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'nova-cloud-controller:identity-service'
- 'keystone:identity-service'
- - 'nova-cloud-controller:amqp'
- 'rabbitmq-server:amqp'
- - 'nova-cloud-controller:cloud-compute'
- 'nova-compute:cloud-compute'
- - 'nova-cloud-controller:image-service'
- 'glance:image-service'
- - 'placement:shared-db'
- 'placement-mysql-router:shared-db'
- - 'placement-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'placement'
- 'keystone'
- - 'placement'
- 'nova-cloud-controller'
- - 'ceph-mon:prometheus'
- 'prometheus2:target'

View File

@ -0,0 +1,237 @@
variables:
openstack-origin: &openstack-origin distro
series: impist
comment:
- 'machines section to decide order of deployment. database sooner = faster'
machines:
'0':
constraints: mem=3072M
'1':
constraints: mem=3072M
'2':
constraints: mem=3072M
'3':
'4':
'5':
'6':
'7':
'8':
'9':
'10':
'11':
'12':
'13':
'14':
'15':
'16':
series: focal
applications:
keystone-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
glance-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
cinder-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
nova-cloud-controller-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
placement-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
mysql-innodb-cluster:
charm: cs:~openstack-charmers-next/mysql-innodb-cluster
num_units: 3
options:
source: *openstack-origin
to:
- '0'
- '1'
- '2'
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
storage:
osd-devices: '10G'
options:
source: *openstack-origin
osd-devices: '/dev/test-non-existent'
to:
- '3'
- '4'
- '5'
ceph-mon:
charm: ../../../ceph-mon
num_units: 3
options:
source: *openstack-origin
monitor-count: '3'
to:
- '6'
- '7'
- '8'
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
options:
source: *openstack-origin
to:
- '9'
keystone:
expose: True
charm: cs:~openstack-charmers-next/keystone
num_units: 1
options:
openstack-origin: *openstack-origin
to:
- '10'
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
options:
openstack-origin: *openstack-origin
libvirt-image-backend: rbd
to:
- '11'
glance:
expose: True
charm: cs:~openstack-charmers-next/glance
num_units: 1
options:
openstack-origin: *openstack-origin
to:
- '12'
cinder:
expose: True
charm: cs:~openstack-charmers-next/cinder
num_units: 1
options:
block-device: 'None'
glance-api-version: '2'
openstack-origin: *openstack-origin
to:
- '13'
cinder-ceph:
charm: cs:~openstack-charmers-next/cinder-ceph
nova-cloud-controller:
expose: True
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
options:
openstack-origin: *openstack-origin
to:
- '14'
placement:
charm: cs:~openstack-charmers-next/placement
num_units: 1
options:
openstack-origin: *openstack-origin
to:
- '15'
prometheus2:
# Pin prometheus2 charm version Bug #1891942
charm: cs:prometheus2-18
num_units: 1
series: focal
to:
- '16'
relations:
- - 'nova-compute:amqp'
- 'rabbitmq-server:amqp'
- - 'nova-compute:image-service'
- 'glance:image-service'
- - 'nova-compute:ceph'
- 'ceph-mon:client'
- - nova-compute:ceph-access
- cinder-ceph:ceph-access
- - 'keystone:shared-db'
- 'keystone-mysql-router:shared-db'
- - 'keystone-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'glance:shared-db'
- 'glance-mysql-router:shared-db'
- - 'glance-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'glance:identity-service'
- 'keystone:identity-service'
- - 'glance:amqp'
- 'rabbitmq-server:amqp'
- - 'glance:ceph'
- 'ceph-mon:client'
- - 'cinder:shared-db'
- 'cinder-mysql-router:shared-db'
- - 'cinder-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'cinder:identity-service'
- 'keystone:identity-service'
- - 'cinder:amqp'
- 'rabbitmq-server:amqp'
- - 'cinder:image-service'
- 'glance:image-service'
- - 'cinder-ceph:storage-backend'
- 'cinder:storage-backend'
- - 'cinder-ceph:ceph'
- 'ceph-mon:client'
- - 'ceph-osd:mon'
- 'ceph-mon:osd'
- - 'nova-cloud-controller:shared-db'
- 'nova-cloud-controller-mysql-router:shared-db'
- - 'nova-cloud-controller-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'nova-cloud-controller:identity-service'
- 'keystone:identity-service'
- - 'nova-cloud-controller:amqp'
- 'rabbitmq-server:amqp'
- - 'nova-cloud-controller:cloud-compute'
- 'nova-compute:cloud-compute'
- - 'nova-cloud-controller:image-service'
- 'glance:image-service'
- - 'placement:shared-db'
- 'placement-mysql-router:shared-db'
- - 'placement-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'placement'
- 'keystone'
- - 'placement'
- 'nova-cloud-controller'
- - 'ceph-mon:prometheus'
- 'prometheus2:target'

View File

@ -2,6 +2,7 @@ charm_name: ceph-mon
gate_bundles:
- groovy-victoria
- focal-xena
- focal-wallaby
- focal-victoria
- focal-ussuri-ec
@ -20,6 +21,7 @@ dev_bundles:
- xenial-queens
- bionic-rocky
- hirsute-wallaby
- impish-xena
smoke_bundles:
- bionic-train
@ -38,5 +40,6 @@ tests:
tests_options:
force_deploy:
- trusty-mitaka
- hirsute-wallaby
- groovy-victoria
- hirsute-wallaby
- impish-xena

View File

@ -22,8 +22,11 @@ skip_missing_interpreters = False
# * It is also necessary to pin virtualenv as a newer virtualenv would still
# lead to fetching the latest pip in the func* tox targets, see
# https://stackoverflow.com/a/38133283
requires = pip < 20.3
virtualenv < 20.0
requires =
pip < 20.3
virtualenv < 20.0
setuptools < 50.0.0
# NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci
minversion = 3.18.0
@ -32,7 +35,7 @@ setenv = VIRTUAL_ENV={envdir}
PYTHONHASHSEED=0
CHARM_DIR={envdir}
install_command =
pip install {opts} {packages}
{toxinidir}/pip.sh install {opts} {packages}
commands = stestr run --slowest {posargs}
allowlist_externals = juju
passenv = HOME TERM CS_* OS_* TEST_*