Handle additional pools for CephFS

If ceph-fs requests additional pools are added to the ceph fs
share to be created then add them once the share is created.

This allows ceph-fs to specify replicated pools for metadata and
default datapool but to then extend the fs with EC pools.

Change-Id: I80b7a5cc87d7d53bb55d4d65999a0f9b3cdcb77d
This commit is contained in:
Liam Young 2020-09-10 14:34:58 +00:00
parent af0eac506d
commit 648cac3ba8
2 changed files with 52 additions and 8 deletions

View File

@ -750,6 +750,7 @@ def handle_create_cephfs(request, service):
"""
cephfs_name = request.get('mds_name')
data_pool = request.get('data_pool')
extra_pools = request.get('extra_pools', [])
metadata_pool = request.get('metadata_pool')
# Check if the user params were provided
if not cephfs_name or not data_pool or not metadata_pool:
@ -758,14 +759,12 @@ def handle_create_cephfs(request, service):
return {'exit-code': 1, 'stderr': msg}
# Sanity check that the required pools exist
if not pool_exists(service=service, name=data_pool):
msg = "CephFS data pool does not exist. Cannot create CephFS"
log(msg, level=ERROR)
return {'exit-code': 1, 'stderr': msg}
if not pool_exists(service=service, name=metadata_pool):
msg = "CephFS metadata pool does not exist. Cannot create CephFS"
log(msg, level=ERROR)
return {'exit-code': 1, 'stderr': msg}
for pool_name in [data_pool, metadata_pool] + extra_pools:
if not pool_exists(service=service, name=pool_name):
msg = "CephFS pool {} does not exist. Cannot create CephFS".format(
pool_name)
log(msg, level=ERROR)
return {'exit-code': 1, 'stderr': msg}
if get_cephfs(service=service):
# CephFS new has already been called
@ -786,6 +785,14 @@ def handle_create_cephfs(request, service):
else:
log(err.output, level=ERROR)
return {'exit-code': 1, 'stderr': err.output}
for pool_name in extra_pools:
cmd = ["ceph", '--id', service, "fs", "add_data_pool", cephfs_name,
pool_name]
try:
check_output(cmd)
except CalledProcessError as err:
log(err.output, level=ERROR)
return {'exit-code': 1, 'stderr': err.output}
def handle_rgw_region_set(request, service):

View File

@ -494,6 +494,43 @@ class CephBrokerTestCase(unittest.TestCase):
self.assertEqual(json.loads(rc)['exit-code'], 0)
self.assertEqual(json.loads(rc)['request-id'], '1ef5aede')
@patch.object(charms_ceph.broker, 'get_cephfs')
@patch.object(charms_ceph.broker, 'check_output')
@patch.object(charms_ceph.broker, 'pool_exists')
@patch.object(charms_ceph.broker, 'log')
def test_process_requests_create_cephfs_ec(self,
mock_log,
mock_pool_exists,
check_output,
get_cephfs):
get_cephfs.return_value = []
mock_pool_exists.return_value = True
reqs = json.dumps({'api-version': 1,
'request-id': '1ef5aede',
'ops': [{
'op': 'create-cephfs',
'mds_name': 'foo',
'extra_pools': ['ec_pool'],
'data_pool': 'data',
'metadata_pool': 'metadata',
}]})
rc = charms_ceph.broker.process_requests(reqs)
mock_pool_exists.assert_has_calls(
[
call(service='admin', name='data'),
call(service='admin', name='ec_pool'),
call(service='admin', name='metadata'),
],
any_order=True)
check_output.assert_has_calls(
[
call(['ceph', '--id', 'admin', 'fs', 'new', 'foo', 'metadata',
'data']),
call(['ceph', '--id', 'admin', 'fs', 'add_data_pool', 'foo',
'ec_pool'])])
self.assertEqual(json.loads(rc)['exit-code'], 0)
self.assertEqual(json.loads(rc)['request-id'], '1ef5aede')
@patch.object(charms_ceph.broker, 'check_output')
@patch.object(charms_ceph.broker, 'get_osd_weight')
@patch.object(charms_ceph.broker, 'log')