Updates for 20.08 cycle start for groovy and libs

- Adds groovy to the series in the metadata
- Classic charms: sync charm-helpers.
- Classic ceph based charms:  also sync charms.ceph
- Reactive charms: trigger a rebuild

Change-Id: I523f4cbd22f65faa0959943324101a114fa16ae8
This commit is contained in:
Alex Kavanagh 2020-06-02 14:22:19 +01:00
parent bcdd74ed92
commit 385b65439b
6 changed files with 59 additions and 32 deletions

View File

@ -1,5 +1,5 @@
#!/usr/bin/make
PYTHON := /usr/bin/env python
PYTHON := /usr/bin/env python3
lint:
@tox -e pep8

View File

@ -225,7 +225,7 @@ SWIFT_CODENAMES = OrderedDict([
('train',
['2.22.0', '2.23.0']),
('ussuri',
['2.24.0']),
['2.24.0', '2.25.0']),
])
# >= Liberty version->codename mapping

View File

@ -248,7 +248,7 @@ def peer_store_and_set(relation_id=None, peer_relation_name='cluster',
@param relation_id: the id of the relation to store the data on. Defaults
to the current relation.
@param peer_store_fatal: Set to True, the function will raise an exception
should the peer sotrage not be avialable."""
should the peer storage not be available."""
relation_settings = relation_settings if relation_settings else {}
relation_set(relation_id=relation_id,

View File

@ -92,6 +92,7 @@ DEFAULT_PGS_PER_OSD_TARGET = 100
DEFAULT_POOL_WEIGHT = 10.0
LEGACY_PG_COUNT = 200
DEFAULT_MINIMUM_PGS = 2
AUTOSCALER_DEFAULT_PGS = 32
class OsdPostUpgradeError(Exception):
@ -399,16 +400,28 @@ class ReplicatedPool(Pool):
def create(self):
if not pool_exists(self.service, self.name):
nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0
# Create it
cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create',
self.name, str(self.pg_num)]
if nautilus_or_later:
cmd = [
'ceph', '--id', self.service, 'osd', 'pool', 'create',
'--pg-num-min={}'.format(
min(AUTOSCALER_DEFAULT_PGS, self.pg_num)
),
self.name, str(self.pg_num)
]
else:
cmd = [
'ceph', '--id', self.service, 'osd', 'pool', 'create',
self.name, str(self.pg_num)
]
try:
check_call(cmd)
# Set the pool replica size
update_pool(client=self.service,
pool=self.name,
settings={'size': str(self.replicas)})
nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0
if nautilus_or_later:
# Ensure we set the expected pool ratio
update_pool(client=self.service,
@ -419,13 +432,13 @@ class ReplicatedPool(Pool):
pool=self.name,
name=self.app_name)
except CalledProcessError:
log('Could not set app name for pool {}'.format(self.name, level=WARNING))
log('Could not set app name for pool {}'.format(self.name), level=WARNING)
if 'pg_autoscaler' in enabled_manager_modules():
try:
enable_pg_autoscale(self.service, self.name)
except CalledProcessError as e:
log('Could not configure auto scaling for pool {}: {}'.format(
self.name, e, level=WARNING))
self.name, e), level=WARNING)
except CalledProcessError:
raise
@ -466,10 +479,24 @@ class ErasurePool(Pool):
k = int(erasure_profile['k'])
m = int(erasure_profile['m'])
pgs = self.get_pgs(k + m, self.percent_data)
nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0
# Create it
cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create',
self.name, str(pgs), str(pgs),
'erasure', self.erasure_code_profile]
if nautilus_or_later:
cmd = [
'ceph', '--id', self.service, 'osd', 'pool', 'create',
'--pg-num-min={}'.format(
min(AUTOSCALER_DEFAULT_PGS, pgs)
),
self.name, str(pgs), str(pgs),
'erasure', self.erasure_code_profile
]
else:
cmd = [
'ceph', '--id', self.service, 'osd', 'pool', 'create',
self.name, str(pgs), str(pgs),
'erasure', self.erasure_code_profile
]
try:
check_call(cmd)
try:
@ -477,8 +504,7 @@ class ErasurePool(Pool):
pool=self.name,
name=self.app_name)
except CalledProcessError:
log('Could not set app name for pool {}'.format(self.name, level=WARNING))
nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0
log('Could not set app name for pool {}'.format(self.name), level=WARNING)
if nautilus_or_later:
# Ensure we set the expected pool ratio
update_pool(client=self.service,
@ -489,7 +515,7 @@ class ErasurePool(Pool):
enable_pg_autoscale(self.service, self.name)
except CalledProcessError as e:
log('Could not configure auto scaling for pool {}: {}'.format(
self.name, e, level=WARNING))
self.name, e), level=WARNING)
except CalledProcessError:
raise

View File

@ -69,7 +69,7 @@ def _snap_exec(commands):
.format(SNAP_NO_LOCK_RETRY_COUNT))
return_code = e.returncode
log('Snap failed to acquire lock, trying again in {} seconds.'
.format(SNAP_NO_LOCK_RETRY_DELAY, level='WARN'))
.format(SNAP_NO_LOCK_RETRY_DELAY), level='WARN')
sleep(SNAP_NO_LOCK_RETRY_DELAY)
return return_code

View File

@ -2,25 +2,26 @@ name: nova-cloud-controller
summary: OpenStack Compute - Nova cloud controller service
maintainer: OpenStack Charmers <openstack-charmers@lists.ubuntu.com>
description: |
OpenStack is a reliable cloud infrastructure. Its mission is to produce
the ubiquitous cloud computing platform that will meet the needs of public
and private cloud providers regardless of size, by being simple to implement
and massively scalable.
.
OpenStack Compute, codenamed Nova, is a cloud computing fabric controller. In
addition to its "native" API (the OpenStack API), it also supports the Amazon
EC2 API.
.
This charm provides the cloud controller service for OpenStack Nova and includes
nova-scheduler, nova-api and nova-conductor services.
OpenStack is a reliable cloud infrastructure. Its mission is to produce
the ubiquitous cloud computing platform that will meet the needs of public
and private cloud providers regardless of size, by being simple to implement
and massively scalable.
.
OpenStack Compute, codenamed Nova, is a cloud computing fabric controller. In
addition to its "native" API (the OpenStack API), it also supports the Amazon
EC2 API.
.
This charm provides the cloud controller service for OpenStack Nova and includes
nova-scheduler, nova-api and nova-conductor services.
tags:
- openstack
- openstack
series:
- xenial
- bionic
- eoan
- focal
- trusty
- xenial
- bionic
- eoan
- focal
- trusty
- groovy
extra-bindings:
public:
admin: