Add yoga bundles and release-tool syncs

* charm-helpers sync for classic charms
* sync from release-tools
* switch to release-specific zosci functional tests
* run focal-ussuri as smoke tests
* remove trusty, xenial, and groovy metadata/tests
* drop py35 and add py39
* charms.ceph sync

Change-Id: I2dda45edafeee8173a9fcb174f3dc18718d664e3
This commit is contained in:
Corey Bryant 2021-10-29 17:00:39 -04:00
parent 77c75f62f9
commit 05da8ca6ae
19 changed files with 482 additions and 542 deletions

View File

@ -1413,7 +1413,8 @@ def incomplete_relation_data(configs, required_interfaces):
for i in incomplete_relations}
def do_action_openstack_upgrade(package, upgrade_callback, configs):
def do_action_openstack_upgrade(package, upgrade_callback, configs,
force_upgrade=False):
"""Perform action-managed OpenStack upgrade.
Upgrades packages to the configured openstack-origin version and sets
@ -1427,12 +1428,13 @@ def do_action_openstack_upgrade(package, upgrade_callback, configs):
@param package: package name for determining if upgrade available
@param upgrade_callback: function callback to charm's upgrade function
@param configs: templating object derived from OSConfigRenderer class
@param force_upgrade: perform dist-upgrade regardless of new openstack
@return: True if upgrade successful; False if upgrade failed or skipped
"""
ret = False
if openstack_upgrade_available(package):
if openstack_upgrade_available(package) or force_upgrade:
if config('action-managed-upgrade'):
juju_log('Upgrading OpenStack release')
@ -2599,6 +2601,23 @@ def get_subordinate_release_packages(os_release, package_type='deb'):
return SubordinatePackages(install, purge)
def get_subordinate_services():
"""Iterate over subordinate relations and get service information.
In a similar fashion as with get_subordinate_release_packages(),
principle charms can retrieve a list of services advertised by their
subordinate charms. This is useful to know about subordinate services when
pausing, resuming or upgrading a principle unit.
:returns: Name of all services advertised by all subordinates
:rtype: Set[str]
"""
services = set()
for rdata in container_scoped_relation_get('services'):
services |= set(json.loads(rdata or '[]'))
return services
os_restart_on_change = partial(
pausable_restart_on_change,
can_restart_now_f=deferred_events.check_and_record_restart_request,

View File

@ -294,7 +294,6 @@ class BasePool(object):
# NOTE: Do not perform initialization steps that require live data from
# a running cluster here. The *Pool classes may be used for validation.
self.service = service
self.nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0
self.op = op or {}
if op:
@ -341,7 +340,8 @@ class BasePool(object):
Do not add calls for a specific pool type here, those should go into
one of the pool specific classes.
"""
if self.nautilus_or_later:
nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0
if nautilus_or_later:
# Ensure we set the expected pool ratio
update_pool(
client=self.service,
@ -660,8 +660,9 @@ class ReplicatedPool(BasePool):
else:
self.pg_num = self.get_pgs(self.replicas, self.percent_data)
nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0
# Create it
if self.nautilus_or_later:
if nautilus_or_later:
cmd = [
'ceph', '--id', self.service, 'osd', 'pool', 'create',
'--pg-num-min={}'.format(
@ -745,9 +746,9 @@ class ErasurePool(BasePool):
k = int(erasure_profile['k'])
m = int(erasure_profile['m'])
pgs = self.get_pgs(k + m, self.percent_data)
self.nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0
nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0
# Create it
if self.nautilus_or_later:
if nautilus_or_later:
cmd = [
'ceph', '--id', self.service, 'osd', 'pool', 'create',
'--pg-num-min={}'.format(

View File

@ -29,6 +29,7 @@ UBUNTU_RELEASES = (
'groovy',
'hirsute',
'impish',
'jammy',
)

View File

@ -275,6 +275,7 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([
('groovy', 'victoria'),
('hirsute', 'wallaby'),
('impish', 'xena'),
('jammy', 'yoga'),
])

View File

@ -3169,6 +3169,8 @@ UPGRADE_PATHS = collections.OrderedDict([
('luminous', 'mimic'),
('mimic', 'nautilus'),
('nautilus', 'octopus'),
('octopus', 'pacific'),
('pacific', 'quincy'),
])
# Map UCA codenames to ceph codenames
@ -3186,6 +3188,10 @@ UCA_CODENAME_MAP = {
'stein': 'mimic',
'train': 'nautilus',
'ussuri': 'octopus',
'victoria': 'octopus',
'wallaby': 'pacific',
'xena': 'pacific',
'yoga': 'quincy',
}

View File

@ -10,7 +10,6 @@ tags:
- file-servers
- misc
series:
- xenial
- bionic
- focal
- groovy

View File

@ -1,31 +1,26 @@
- project:
templates:
- charm-unit-jobs
- charm-yoga-unit-jobs
- charm-yoga-functional-jobs
- charm-xena-functional-jobs
- charm-wallaby-functional-jobs
- charm-victoria-functional-jobs
- charm-ussuri-functional-jobs
- charm-stein-functional-jobs
- charm-queens-functional-jobs
check:
jobs:
- bionic-queens # luminous
- bionic-stein
- bionic-train
- bionic-ussuri
- focal-ussuri
- focal-ussuri-ec
- focal-victoria
- focal-victoria-ec
- focal-wallaby
- focal-wallaby-ec
- focal-xena:
- focal-xena-ec
- focal-yoga-ec:
voting: false
- focal-wallaby-ec:
voting: false
- groovy-victoria
- groovy-victoria-ec
- hirsute-wallaby
- hirsute-wallaby-ec
- impish-xena:
voting: false
- impish-xena-ec:
voting: false
- hirsute-wallaby-ec
- jammy-yoga-ec:
voting: false
- job:
name: focal-ussuri-ec
parent: func-target
@ -46,11 +41,17 @@
vars:
tox_extra_args: erasure-coded:focal-wallaby-ec
- job:
name: groovy-victoria-ec
name: focal-xena-ec
parent: func-target
dependencies: *smoke-jobs
vars:
tox_extra_args: erasure-coded:groovy-victoria-ec
tox_extra_args: erasure-coded:focal-xena-ec
- job:
name: focal-yoga-ec
parent: func-target
dependencies: *smoke-jobs
vars:
tox_extra_args: erasure-coded:focal-yoga-ec
- job:
name: hirsute-wallaby-ec
parent: func-target
@ -63,3 +64,9 @@
dependencies: *smoke-jobs
vars:
tox_extra_args: erasure-coded:impish-xena-ec
- job:
name: jammy-yoga-ec
parent: func-target
dependencies: *smoke-jobs
vars:
tox_extra_args: erasure-coded:jammy-yoga-ec

View File

@ -7,6 +7,8 @@
# requirements. They are intertwined. Also, Zaza itself should specify
# all of its own requirements and if it doesn't, fix it there.
#
pyparsing<3.0.0 # aodhclient is pinned in zaza and needs pyparsing < 3.0.0, but cffi also needs it, so pin here.
cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35.
setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85
requests>=2.18.4

View File

@ -0,0 +1,215 @@
variables:
openstack-origin: &openstack-origin cloud:focal-yoga
series: focal
comment:
- 'machines section to decide order of deployment. database sooner = faster'
machines:
'0':
constraints: mem=3072M
'1':
constraints: mem=3072M
'2':
constraints: mem=3072M
'3':
'4':
'5':
'6':
'7':
'8':
'9':
'10':
'11':
'12':
'13':
'14':
'15':
'16':
'17':
'18':
applications:
cinder-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
glance-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
keystone-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
mysql-innodb-cluster:
charm: cs:~openstack-charmers-next/mysql-innodb-cluster
num_units: 3
options:
source: *openstack-origin
to:
- '0'
- '1'
- '2'
ceph-mon:
charm: cs:~openstack-charmers-next/ceph-mon
num_units: 3
options:
expected-osd-count: 3
source: *openstack-origin
to:
- '3'
- '4'
- '5'
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 6
storage:
osd-devices: 10G
options:
source: *openstack-origin
to:
- '6'
- '7'
- '8'
- '16'
- '17'
- '18'
ceph-proxy:
charm: ceph-proxy
num_units: 1
options:
source: *openstack-origin
to:
- '9'
ceph-radosgw:
charm: cs:~openstack-charmers-next/ceph-radosgw
num_units: 1
options:
source: *openstack-origin
pool-type: erasure-coded
ec-profile-k: 4
ec-profile-m: 2
to:
- '10'
cinder:
charm: cs:~openstack-charmers-next/cinder
num_units: 1
options:
openstack-origin: *openstack-origin
block-device: ""
ephemeral-unmount: ""
glance-api-version: 2
overwrite: "false"
constraints: mem=2048
to:
- '11'
cinder-ceph:
charm: cs:~openstack-charmers-next/cinder-ceph
options:
restrict-ceph-pools: True
pool-type: erasure-coded
ec-profile-k: 4
ec-profile-m: 2
ec-profile-plugin: lrc
ec-profile-locality: 3
keystone:
charm: cs:~openstack-charmers-next/keystone
num_units: 1
options:
openstack-origin: *openstack-origin
admin-password: openstack
constraints: mem=1024
to:
- '12'
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
constraints: mem=1024
options:
source: *openstack-origin
to:
- '13'
glance:
charm: cs:~openstack-charmers-next/glance
num_units: 1
options:
openstack-origin: *openstack-origin
pool-type: erasure-coded
ec-profile-k: 4
ec-profile-m: 2
ec-profile-plugin: jerasure
to:
- '14'
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
options:
openstack-origin: *openstack-origin
pool-type: erasure-coded
ec-profile-k: 4
ec-profile-m: 2
ec-profile-plugin: isa
libvirt-image-backend: rbd
to:
- '15'
relations:
- - 'ceph-osd:mon'
- 'ceph-mon:osd'
- - 'ceph-proxy:radosgw'
- 'ceph-radosgw:mon'
- - 'cinder:amqp'
- 'rabbitmq-server:amqp'
- - 'cinder:shared-db'
- 'cinder-mysql-router:shared-db'
- - 'cinder-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'keystone:shared-db'
- 'keystone-mysql-router:shared-db'
- - 'keystone-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'cinder:identity-service'
- 'keystone:identity-service'
- - 'cinder-ceph:storage-backend'
- 'cinder:storage-backend'
- - 'cinder-ceph:ceph'
- 'ceph-proxy:client'
- - 'glance:image-service'
- 'nova-compute:image-service'
- - 'glance:identity-service'
- 'keystone:identity-service'
- - 'glance:shared-db'
- 'glance-mysql-router:shared-db'
- - 'glance-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'glance:ceph'
- 'ceph-proxy:client'
- - 'nova-compute:ceph-access'
- 'cinder-ceph:ceph-access'
- - 'nova-compute:amqp'
- 'rabbitmq-server:amqp'
- - 'nova-compute:ceph'
- 'ceph-proxy:client'

View File

@ -0,0 +1,186 @@
variables:
openstack-origin: &openstack-origin cloud:focal-yoga
series: focal
comment:
- 'machines section to decide order of deployment. database sooner = faster'
machines:
'0':
constraints: mem=3072M
'1':
constraints: mem=3072M
'2':
constraints: mem=3072M
'3':
'4':
'5':
'6':
'7':
'8':
'9':
'10':
'11':
'12':
'13':
'14':
'15':
applications:
cinder-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
glance-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
keystone-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
mysql-innodb-cluster:
charm: cs:~openstack-charmers-next/mysql-innodb-cluster
num_units: 3
options:
source: *openstack-origin
to:
- '0'
- '1'
- '2'
ceph-mon:
charm: cs:~openstack-charmers-next/ceph-mon
num_units: 3
options:
expected-osd-count: 3
source: *openstack-origin
to:
- '3'
- '4'
- '5'
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
storage:
osd-devices: 10G
options:
source: *openstack-origin
to:
- '6'
- '7'
- '8'
ceph-proxy:
charm: ceph-proxy
num_units: 1
options:
source: *openstack-origin
to:
- '9'
ceph-radosgw:
charm: cs:~openstack-charmers-next/ceph-radosgw
num_units: 1
options:
source: *openstack-origin
to:
- '10'
cinder:
charm: cs:~openstack-charmers-next/cinder
num_units: 1
options:
openstack-origin: *openstack-origin
block-device: ""
ephemeral-unmount: ""
glance-api-version: 2
overwrite: "false"
constraints: mem=2048
to:
- '11'
cinder-ceph:
charm: cs:~openstack-charmers-next/cinder-ceph
options:
restrict-ceph-pools: True
keystone:
charm: cs:~openstack-charmers-next/keystone
num_units: 1
options:
openstack-origin: *openstack-origin
admin-password: openstack
constraints: mem=1024
to:
- '12'
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
constraints: mem=1024
options:
source: *openstack-origin
to:
- '13'
glance:
charm: cs:~openstack-charmers-next/glance
num_units: 1
options:
openstack-origin: *openstack-origin
to:
- '14'
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
options:
openstack-origin: *openstack-origin
to:
- '15'
relations:
- - 'ceph-osd:mon'
- 'ceph-mon:osd'
- - 'ceph-proxy:radosgw'
- 'ceph-radosgw:mon'
- - 'cinder:amqp'
- 'rabbitmq-server:amqp'
- - 'cinder:shared-db'
- 'cinder-mysql-router:shared-db'
- - 'cinder-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'keystone:shared-db'
- 'keystone-mysql-router:shared-db'
- - 'keystone-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'cinder:identity-service'
- 'keystone:identity-service'
- - 'cinder-ceph:storage-backend'
- 'cinder:storage-backend'
- - 'cinder-ceph:ceph'
- 'ceph-proxy:client'
- - 'glance:image-service'
- 'nova-compute:image-service'
- - 'glance:identity-service'
- 'keystone:identity-service'
- - 'glance:shared-db'
- 'glance-mysql-router:shared-db'
- - 'glance-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'nova-compute:ceph-access'
- 'cinder-ceph:ceph-access'
- - 'nova-compute:amqp'
- 'rabbitmq-server:amqp'

View File

@ -1,7 +1,7 @@
variables:
openstack-origin: &openstack-origin distro
series: groovy
series: jammy
comment:
- 'machines section to decide order of deployment. database sooner = faster'

View File

@ -1,7 +1,7 @@
variables:
openstack-origin: &openstack-origin distro
series: groovy
series: jammy
comment:
- 'machines section to decide order of deployment. database sooner = faster'

View File

@ -1,115 +0,0 @@
series: trusty
applications:
ceph-mon:
charm: 'cs:~openstack-charmers-next/ceph-mon'
num_units: 3
options:
expected-osd-count: 3
source: cloud:trusty-mitaka
ceph-osd:
charm: 'cs:~openstack-charmers-next/ceph-osd'
num_units: 3
storage:
osd-devices: 10G
options:
source: cloud:trusty-mitaka
ceph-proxy:
charm: 'ceph-proxy'
num_units: 1
options:
source: cloud:trusty-mitaka
ceph-radosgw:
charm: 'cs:~openstack-charmers-next/ceph-radosgw'
num_units: 1
options:
source: trusty-mitaka
cinder:
charm: 'cs:~openstack-charmers-next/cinder'
num_units: 1
options:
openstack-origin: cloud:trusty-mitaka
block-device: ""
ephemeral-unmount: ""
glance-api-version: 2
overwrite: "false"
constraints: mem=2048
glance:
charm: cs:~openstack-charmers-next/glance
num_units: 1
options:
openstack-origin: cloud:trusty-mitaka
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
options:
openstack-origin: cloud:trusty-mitaka
nova-cloud-controller:
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
options:
openstack-origin: cloud:trusty-mitaka
cinder-ceph:
charm: 'cs:~openstack-charmers-next/cinder-ceph'
options:
restrict-ceph-pools: True
keystone:
charm: 'cs:~openstack-charmers-next/keystone'
num_units: 1
options:
openstack-origin: cloud:trusty-mitaka
constraints: mem=1024
percona-cluster:
charm: 'cs:trusty/percona-cluster'
num_units: 1
options:
source: cloud:trusty-mitaka
dataset-size: 50%
max-connections: 1000
innodb-buffer-pool-size: 256M
root-password: ChangeMe123
sst-password: ChangeMe123
constraints: mem=4096
rabbitmq-server:
charm: 'cs:~openstack-charmers-next/rabbitmq-server'
num_units: 1
constraints: mem=1024
options:
source: cloud:trusty-mitaka
relations:
- - 'ceph-osd:mon'
- 'ceph-mon:osd'
- - 'ceph-proxy:radosgw'
- 'ceph-radosgw:mon'
- - 'cinder:amqp'
- 'rabbitmq-server:amqp'
- - 'cinder:shared-db'
- 'percona-cluster:shared-db'
- - 'keystone:shared-db'
- 'percona-cluster:shared-db'
- - 'cinder:identity-service'
- 'keystone:identity-service'
- - 'cinder-ceph:storage-backend'
- 'cinder:storage-backend'
- - 'cinder-ceph:ceph'
- 'ceph-proxy:client'
- - 'glance:image-service'
- 'nova-compute:image-service'
- - 'glance:identity-service'
- 'keystone:identity-service'
- - 'glance:shared-db'
- 'percona-cluster:shared-db'
- - 'nova-compute:ceph-access'
- 'cinder-ceph:ceph-access'
- - 'nova-compute:amqp'
- 'rabbitmq-server:amqp'
- - 'glance:image-service'
- 'nova-cloud-controller:image-service'
- - 'keystone:identity-service'
- 'nova-cloud-controller:identity-service'
- - 'nova-compute:cloud-compute'
- 'nova-cloud-controller:cloud-compute'
- - 'percona-cluster:shared-db'
- 'nova-cloud-controller:shared-db'
- - 'rabbitmq-server:amqp'
- 'nova-cloud-controller:amqp'

View File

@ -1,85 +0,0 @@
series: xenial
applications:
ceph-mon:
charm: 'cs:~openstack-charmers-next/ceph-mon'
num_units: 3
options:
expected-osd-count: 3
ceph-osd:
charm: 'cs:~openstack-charmers-next/ceph-osd'
num_units: 3
storage:
osd-devices: 10G
options:
ceph-proxy:
charm: 'ceph-proxy'
num_units: 1
options:
ceph-radosgw:
charm: 'cs:~openstack-charmers-next/ceph-radosgw'
num_units: 1
options:
cinder:
charm: 'cs:~openstack-charmers-next/cinder'
num_units: 1
options:
block-device: ""
ephemeral-unmount: ""
glance-api-version: 2
overwrite: "false"
constraints: mem=2048
cinder-ceph:
charm: 'cs:~openstack-charmers-next/cinder-ceph'
options:
restrict-ceph-pools: True
glance:
charm: cs:~openstack-charmers-next/glance
num_units: 1
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
keystone:
charm: 'cs:~openstack-charmers-next/keystone'
num_units: 1
constraints: mem=1024
percona-cluster:
charm: 'cs:~openstack-charmers-next/percona-cluster'
num_units: 1
options:
dataset-size: 50%
max-connections: 1000
innodb-buffer-pool-size: 256M
root-password: ChangeMe123
sst-password: ChangeMe123
constraints: mem=4096
rabbitmq-server:
charm: 'cs:~openstack-charmers-next/rabbitmq-server'
num_units: 1
constraints: mem=1024
relations:
- - 'ceph-osd:mon'
- 'ceph-mon:osd'
- - 'ceph-proxy:radosgw'
- 'ceph-radosgw:mon'
- - 'cinder:amqp'
- 'rabbitmq-server:amqp'
- - 'cinder:shared-db'
- 'percona-cluster:shared-db'
- - 'keystone:shared-db'
- 'percona-cluster:shared-db'
- - 'cinder:identity-service'
- 'keystone:identity-service'
- - 'cinder-ceph:storage-backend'
- 'cinder:storage-backend'
- - 'cinder-ceph:ceph'
- 'ceph-proxy:client'
- - 'glance:image-service'
- 'nova-compute:image-service'
- - 'glance:identity-service'
- 'keystone:identity-service'
- - 'glance:shared-db'
- 'percona-cluster:shared-db'
- - 'nova-compute:ceph-access'
- 'cinder-ceph:ceph-access'
- - 'nova-compute:amqp'
- 'rabbitmq-server:amqp'

View File

@ -1,99 +0,0 @@
series: xenial
applications:
ceph-mon:
charm: 'cs:~openstack-charmers-next/ceph-mon'
num_units: 3
options:
expected-osd-count: 3
source: cloud:xenial-ocata
ceph-osd:
charm: 'cs:~openstack-charmers-next/ceph-osd'
num_units: 3
storage:
osd-devices: 10G
options:
source: cloud:xenial-ocata
ceph-proxy:
charm: 'ceph-proxy'
num_units: 1
options:
source: cloud:xenial-ocata
ceph-radosgw:
charm: 'cs:~openstack-charmers-next/ceph-radosgw'
num_units: 1
options:
source: xenial-ocata
cinder:
charm: 'cs:~openstack-charmers-next/cinder'
num_units: 1
options:
openstack-origin: cloud:xenial-ocata
block-device: ""
ephemeral-unmount: ""
glance-api-version: 2
overwrite: "false"
constraints: mem=2048
cinder-ceph:
charm: 'cs:~openstack-charmers-next/cinder-ceph'
options:
restrict-ceph-pools: True
keystone:
charm: 'cs:~openstack-charmers-next/keystone'
num_units: 1
options:
openstack-origin: cloud:xenial-ocata
constraints: mem=1024
glance:
charm: cs:~openstack-charmers-next/glance
num_units: 1
options:
openstack-origin: cloud:xenial-ocata
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
options:
openstack-origin: cloud:xenial-ocata
percona-cluster:
charm: 'cs:~openstack-charmers-next/percona-cluster'
num_units: 1
options:
source: cloud:xenial-ocata
dataset-size: 50%
max-connections: 1000
innodb-buffer-pool-size: 256M
root-password: ChangeMe123
sst-password: ChangeMe123
constraints: mem=4096
rabbitmq-server:
charm: 'cs:~openstack-charmers-next/rabbitmq-server'
num_units: 1
constraints: mem=1024
options:
source: cloud:xenial-ocata
relations:
- - 'ceph-osd:mon'
- 'ceph-mon:osd'
- - 'ceph-proxy:radosgw'
- 'ceph-radosgw:mon'
- - 'cinder:amqp'
- 'rabbitmq-server:amqp'
- - 'cinder:shared-db'
- 'percona-cluster:shared-db'
- - 'keystone:shared-db'
- 'percona-cluster:shared-db'
- - 'cinder:identity-service'
- 'keystone:identity-service'
- - 'cinder-ceph:storage-backend'
- 'cinder:storage-backend'
- - 'cinder-ceph:ceph'
- 'ceph-proxy:client'
- - 'glance:image-service'
- 'nova-compute:image-service'
- - 'glance:identity-service'
- 'keystone:identity-service'
- - 'glance:shared-db'
- 'percona-cluster:shared-db'
- - 'nova-compute:ceph-access'
- 'cinder-ceph:ceph-access'
- - 'nova-compute:amqp'
- 'rabbitmq-server:amqp'

View File

@ -1,99 +0,0 @@
series: bionic
applications:
ceph-mon:
charm: 'cs:~openstack-charmers-next/ceph-mon'
num_units: 3
options:
expected-osd-count: 3
source: cloud:xenial-pike
ceph-osd:
charm: 'cs:~openstack-charmers-next/ceph-osd'
num_units: 3
storage:
osd-devices: 10G
options:
source: cloud:xenial-pike
ceph-proxy:
charm: 'ceph-proxy'
num_units: 1
options:
source: cloud:xenial-pike
ceph-radosgw:
charm: 'cs:~openstack-charmers-next/ceph-radosgw'
num_units: 1
options:
source: xenial-pike
cinder:
charm: 'cs:~openstack-charmers-next/cinder'
num_units: 1
options:
openstack-origin: cloud:xenial-pike
block-device: ""
ephemeral-unmount: ""
glance-api-version: 2
overwrite: "false"
constraints: mem=2048
cinder-ceph:
charm: 'cs:~openstack-charmers-next/cinder-ceph'
options:
restrict-ceph-pools: True
keystone:
charm: 'cs:~openstack-charmers-next/keystone'
num_units: 1
options:
openstack-origin: cloud:xenial-pike
constraints: mem=1024
glance:
charm: cs:~openstack-charmers-next/glance
num_units: 1
options:
openstack-origin: cloud:xenial-pike
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
options:
openstack-origin: cloud:xenial-pike
percona-cluster:
charm: 'cs:~openstack-charmers-next/percona-cluster'
num_units: 1
options:
source: cloud:xenial-pike
dataset-size: 50%
max-connections: 1000
innodb-buffer-pool-size: 256M
root-password: ChangeMe123
sst-password: ChangeMe123
constraints: mem=4096
rabbitmq-server:
charm: 'cs:~openstack-charmers-next/rabbitmq-server'
num_units: 1
constraints: mem=1024
options:
source: cloud:xenial-pike
relations:
- - 'ceph-osd:mon'
- 'ceph-mon:osd'
- - 'ceph-proxy:radosgw'
- 'ceph-radosgw:mon'
- - 'cinder:amqp'
- 'rabbitmq-server:amqp'
- - 'cinder:shared-db'
- 'percona-cluster:shared-db'
- - 'keystone:shared-db'
- 'percona-cluster:shared-db'
- - 'cinder:identity-service'
- 'keystone:identity-service'
- - 'cinder-ceph:storage-backend'
- 'cinder:storage-backend'
- - 'cinder-ceph:ceph'
- 'ceph-proxy:client'
- - 'glance:image-service'
- 'nova-compute:image-service'
- - 'glance:identity-service'
- 'keystone:identity-service'
- - 'glance:shared-db'
- 'percona-cluster:shared-db'
- - 'nova-compute:ceph-access'
- 'cinder-ceph:ceph-access'
- - 'nova-compute:amqp'
- 'rabbitmq-server:amqp'

View File

@ -1,99 +0,0 @@
series: xenial
applications:
ceph-mon:
charm: 'cs:~openstack-charmers-next/ceph-mon'
num_units: 3
options:
expected-osd-count: 3
source: cloud:xenial-queens
ceph-osd:
charm: 'cs:~openstack-charmers-next/ceph-osd'
num_units: 3
storage:
osd-devices: 10G
options:
source: cloud:xenial-queens
ceph-proxy:
charm: 'ceph-proxy'
num_units: 1
options:
source: cloud:xenial-queens
ceph-radosgw:
charm: 'cs:~openstack-charmers-next/ceph-radosgw'
num_units: 1
options:
source: cloud:xenial-queens
cinder:
charm: 'cs:~openstack-charmers-next/cinder'
num_units: 1
options:
openstack-origin: cloud:xenial-queens
block-device: ""
ephemeral-unmount: ""
glance-api-version: 2
overwrite: "false"
constraints: mem=2048
cinder-ceph:
charm: 'cs:~openstack-charmers-next/cinder-ceph'
options:
restrict-ceph-pools: True
keystone:
charm: 'cs:~openstack-charmers-next/keystone'
num_units: 1
options:
openstack-origin: cloud:xenial-queens
constraints: mem=1024
glance:
charm: cs:~openstack-charmers-next/glance
num_units: 1
options:
openstack-origin: cloud:xenial-queens
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
options:
openstack-origin: cloud:xenial-queens
percona-cluster:
charm: 'cs:~openstack-charmers-next/percona-cluster'
num_units: 1
options:
source: cloud:xenial-queens
dataset-size: 50%
max-connections: 1000
innodb-buffer-pool-size: 256M
root-password: ChangeMe123
sst-password: ChangeMe123
constraints: mem=4096
rabbitmq-server:
charm: 'cs:~openstack-charmers-next/rabbitmq-server'
num_units: 1
constraints: mem=1024
options:
source: cloud:xenial-queens
relations:
- - 'ceph-osd:mon'
- 'ceph-mon:osd'
- - 'ceph-proxy:radosgw'
- 'ceph-radosgw:mon'
- - 'cinder:amqp'
- 'rabbitmq-server:amqp'
- - 'cinder:shared-db'
- 'percona-cluster:shared-db'
- - 'keystone:shared-db'
- 'percona-cluster:shared-db'
- - 'cinder:identity-service'
- 'keystone:identity-service'
- - 'cinder-ceph:storage-backend'
- 'cinder:storage-backend'
- - 'cinder-ceph:ceph'
- 'ceph-proxy:client'
- - 'glance:image-service'
- 'nova-compute:image-service'
- - 'glance:identity-service'
- 'keystone:identity-service'
- - 'glance:shared-db'
- 'percona-cluster:shared-db'
- - 'nova-compute:ceph-access'
- 'cinder-ceph:ceph-access'
- - 'nova-compute:amqp'
- 'rabbitmq-server:amqp'

View File

@ -12,10 +12,8 @@ tests:
- zaza.openstack.charm_tests.ceph.tests.CheckPoolTypes
gate_bundles:
- xenial-mitaka # jewel
- bionic-queens # luminous
- bionic-queens
- bionic-stein
- bionic-train
- bionic-ussuri
- focal-ussuri
- erasure-coded: focal-ussuri-ec
@ -25,24 +23,19 @@ gate_bundles:
- erasure-coded: focal-wallaby-ec
- focal-xena
- erasure-coded: focal-xena-ec
- groovy-victoria
- erasure-coded: groovy-victoria-ec
dev_bundles:
# Icehouse
- trusty-icehouse
# Jewel
- trusty-mitaka
- xenial-ocata
# Pike
- xenial-pike
- xenial-queens # luminous
- bionic-rocky # mimic
- hirsute-wallaby
- erasure-coded: hirsute-wallaby-ec
- impish-xena
- erasure-coded: impish-xena-ec
dev_bundles:
- bionic-rocky # mimic
- bionic-train
- focal-yoga
- erasure-coded: focal-yoga-ec
- jammy-yoga
- erasure-coded: jammy-yoga-ec
smoke_bundles:
- focal-ussuri
@ -75,3 +68,5 @@ tests_options:
- hirsute-wallaby-ec
- impish-xena
- impish-xena-ec
- jammy-yoga
- jammy-yoga-ec

View File

@ -61,6 +61,11 @@ basepython = python3.8
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
[testenv:py39]
basepython = python3.9
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
[testenv:py3]
basepython = python3
deps = -r{toxinidir}/requirements.txt