Centralize pool mirror eligibility decision making

Pool mirror eligibility is currently done in generator expressions
repeated where needed.

Centralize this in one function.

Change-Id: I6e151621cefb4fe2c6339c9b16cd31c01b95e6f2
This commit is contained in:
Frode Nordahl 2019-03-30 18:25:58 +01:00 committed by Frode Nordahl
parent 87677bf23d
commit 0770640158
5 changed files with 45 additions and 28 deletions

View File

@ -44,8 +44,7 @@ def rbd_mirror_action(args):
action_name = os.path.basename(args[0])
with charms_openstack.charm.provide_charm_instance() as charm:
ceph_local = reactive.endpoint_from_name('ceph-local')
pools = (pool for pool, attrs in ceph_local.pools.items()
if 'rbd' in attrs['applications'])
pools = charm.eligible_pools(ceph_local.pools)
result = {}
cmd = ['rbd', '--id', charm.ceph_id, 'mirror', 'pool', action_name]
if ch_core.hookenv.action_get('force'):

View File

@ -59,6 +59,20 @@ class CephRBDMirrorCharm(charms_openstack.plugins.CephCharm):
}
super().__init__(**kwargs)
def eligible_pools(self, pools):
"""Filter eligible pools.
:param pools: Dictionary with detailed pool information as provided
over the ``ceph-rbd-mirror`` interface provided by the
``ceph-mon`` charm.
:type pools: dict
:returns: Dictionary with detailed pool information for pools eligible
for mirroring.
:rtype: dict
"""
return {pool: attrs for pool, attrs in pools.items()
if 'rbd' in attrs['applications']}
def custom_assess_status_check(self):
"""Provide mirrored pool statistics through juju status."""
if (reactive.is_flag_set('config.rendered') and
@ -66,8 +80,7 @@ class CephRBDMirrorCharm(charms_openstack.plugins.CephCharm):
reactive.is_flag_set('ceph-remote.available')):
endpoint = reactive.endpoint_from_flag('ceph-local.available')
stats = self.mirror_pools_summary(
(pool for pool, attrs in endpoint.pools.items()
if 'rbd' in attrs['applications']))
self.eligible_pools(endpoint.pools))
ch_core.hookenv.log('mirror_pools_summary = "{}"'
.format(stats),
level=ch_core.hookenv.DEBUG)

View File

@ -110,27 +110,26 @@ def configure_pools():
local = reactive.endpoint_from_flag('ceph-local.available')
remote = reactive.endpoint_from_flag('ceph-remote.available')
with charm.provide_charm_instance() as charm_instance:
for pool, attrs in local.pools.items():
if 'rbd' in attrs['applications']:
if not (charm_instance.mirror_pool_enabled(pool) and
charm_instance.mirror_pool_has_peers(pool)):
charm_instance.mirror_pool_enable(pool)
pg_num = attrs['parameters'].get('pg_num', None)
max_bytes = attrs['quota'].get('max_bytes', None)
max_objects = attrs['quota'].get('max_objects', None)
if 'erasure_code_profile' in attrs['parameters']:
ec_profile = attrs['parameters'].get(
'erasure_code_profile', None)
remote.create_erasure_pool(pool,
erasure_profile=ec_profile,
pg_num=pg_num,
app_name='rbd',
max_bytes=max_bytes,
max_objects=max_objects)
else:
size = attrs['parameters'].get('size', None)
remote.create_replicated_pool(pool, replicas=size,
pg_num=pg_num,
app_name='rbd',
max_bytes=max_bytes,
max_objects=max_objects)
for pool, attrs in charm_instance.eligible_pools(local.pools).items():
if not (charm_instance.mirror_pool_enabled(pool) and
charm_instance.mirror_pool_has_peers(pool)):
charm_instance.mirror_pool_enable(pool)
pg_num = attrs['parameters'].get('pg_num', None)
max_bytes = attrs['quota'].get('max_bytes', None)
max_objects = attrs['quota'].get('max_objects', None)
if 'erasure_code_profile' in attrs['parameters']:
ec_profile = attrs['parameters'].get(
'erasure_code_profile', None)
remote.create_erasure_pool(pool,
erasure_profile=ec_profile,
pg_num=pg_num,
app_name='rbd',
max_bytes=max_bytes,
max_objects=max_objects)
else:
size = attrs['parameters'].get('size', None)
remote.create_replicated_pool(pool, replicas=size,
pg_num=pg_num,
app_name='rbd',
max_bytes=max_bytes,
max_objects=max_objects)

View File

@ -46,11 +46,13 @@ class TestCephRBDMirrorActions(test_utils.PatchHelper):
{'apool': {'applications': {'rbd': {}}},
'bpool': {'applications': {'rbd': {}}}})
self.endpoint_from_name.return_value = endpoint
self.crm_charm.eligible_pools.return_value = endpoint.pools
self.crm_charm.ceph_id = 'acephid'
self.action_get.return_value = False
self.check_output.return_value = 'Promoted 0 mirrored images\n'
actions.rbd_mirror_action(['promote'])
self.endpoint_from_name.assert_called_once_with('ceph-local')
self.crm_charm.eligible_pools.assert_called_once_with(endpoint.pools)
self.action_get.assert_has_calls([
mock.call('force'),
mock.call('verbose'),

View File

@ -180,12 +180,15 @@ class TestCephRBDMirrorHandlers(test_utils.PatchHelper):
endpoint_remote.endpoint_name = 'ceph-remote'
self.endpoint_from_flag.side_effect = [endpoint_local,
endpoint_remote]
self.crm_charm.eligible_pools.return_value = endpoint_local.pools
self.crm_charm.mirror_pool_enabled.return_value = False
handlers.configure_pools()
self.endpoint_from_flag.assert_has_calls([
mock.call('ceph-local.available'),
mock.call('ceph-remote.available'),
])
self.crm_charm.eligible_pools.assert_called_once_with(
endpoint_local.pools)
self.crm_charm.mirror_pool_enabled.assert_called_once_with(
'cinder-ceph')
self.crm_charm.mirror_pool_enable.assert_called_once_with(
@ -216,6 +219,7 @@ class TestCephRBDMirrorHandlers(test_utils.PatchHelper):
self.endpoint_from_flag.side_effect = [endpoint_local,
endpoint_remote]
endpoint_remote.create_replicated_pool.reset_mock()
self.crm_charm.eligible_pools.return_value = endpoint_local.pools
handlers.configure_pools()
endpoint_remote.create_erasure_pool.assert_called_once_with(
'cinder-ceph', erasure_profile='prof', pg_num=42, app_name='rbd',