Add BlueStore Compression support

Sync in updates from charm-helpers and charms.ceph.

Remove unit tests that belong to charms.ceph.

Depends-On: Ibec4e3221387199adbc1a920e130975d7b25343c
Change-Id: I153c22efb952fc38c5e3d36eed5d85c953e695f7
This commit is contained in:
Frode Nordahl 2020-08-03 10:52:42 +02:00
parent c0113217bf
commit 210493277e
No known key found for this signature in database
GPG Key ID: 6A5D59A3BA48373F
3 changed files with 37 additions and 103 deletions

View File

@ -705,12 +705,12 @@ class ErasurePool(BasePool):
# from different handling of this in the `charms.ceph` library.
self.erasure_code_profile = op.get('erasure-profile',
'default-canonical')
self.allow_ec_overwrites = op.get('allow-ec-overwrites')
else:
# We keep the class default when initialized from keyword arguments
# to not break the API for any other consumers.
self.erasure_code_profile = erasure_code_profile or 'default'
self.allow_ec_overwrites = allow_ec_overwrites
self.allow_ec_overwrites = allow_ec_overwrites
def _create(self):
# Try to find the erasure profile information in order to properly
@ -1972,12 +1972,14 @@ class CephBrokerRq(object):
'request-id': self.request_id})
def _ops_equal(self, other):
keys_to_compare = [
'replicas', 'name', 'op', 'pg_num', 'group-permission',
'object-prefix-permissions',
]
keys_to_compare += list(self._partial_build_common_op_create().keys())
if len(self.ops) == len(other.ops):
for req_no in range(0, len(self.ops)):
for key in [
'replicas', 'name', 'op', 'pg_num', 'weight',
'group', 'group-namespace', 'group-permission',
'object-prefix-permissions']:
for key in keys_to_compare:
if self.ops[req_no].get(key) != other.ops[req_no].get(key):
return False
else:

View File

@ -16,6 +16,7 @@ import collections
import json
import os
from subprocess import check_call, check_output, CalledProcessError
from tempfile import NamedTemporaryFile
from charms_ceph.utils import (
@ -41,18 +42,16 @@ from charmhelpers.contrib.storage.linux.ceph import (
pool_set,
remove_pool_snapshot,
rename_pool,
set_pool_quota,
snapshot_pool,
validator,
ErasurePool,
Pool,
BasePool,
ReplicatedPool,
)
# This comes from http://docs.ceph.com/docs/master/rados/operations/pools/
# This should do a decent job of preventing people from passing in bad values.
# It will give a useful error message
from subprocess import check_call, check_output, CalledProcessError
POOL_KEYS = {
# "Ceph Key Name": [Python type, [Valid Range]]
@ -405,23 +404,11 @@ def handle_erasure_pool(request, service):
"""
pool_name = request.get('name')
erasure_profile = request.get('erasure-profile')
max_bytes = request.get('max-bytes')
max_objects = request.get('max-objects')
weight = request.get('weight')
group_name = request.get('group')
allow_ec_overwrites = request.get('allow-ec-overwrites')
if erasure_profile is None:
erasure_profile = "default-canonical"
app_name = request.get('app-name')
# Check for missing params
if pool_name is None:
msg = "Missing parameter. name is required for the pool"
log(msg, level=ERROR)
return {'exit-code': 1, 'stderr': msg}
if group_name:
group_namespace = request.get('group-namespace')
# Add the pool to the group named "group_name"
@ -437,21 +424,22 @@ def handle_erasure_pool(request, service):
log(msg, level=ERROR)
return {'exit-code': 1, 'stderr': msg}
pool = ErasurePool(service=service, name=pool_name,
erasure_code_profile=erasure_profile,
percent_data=weight,
app_name=app_name,
allow_ec_overwrites=allow_ec_overwrites)
try:
pool = ErasurePool(service=service,
op=request)
except KeyError:
msg = "Missing parameter."
log(msg, level=ERROR)
return {'exit-code': 1, 'stderr': msg}
# Ok make the erasure pool
if not pool_exists(service=service, name=pool_name):
log("Creating pool '{}' (erasure_profile={})"
.format(pool.name, erasure_profile), level=INFO)
pool.create()
# Set a quota if requested
if max_bytes or max_objects:
set_pool_quota(service=service, pool_name=pool_name,
max_bytes=max_bytes, max_objects=max_objects)
# Set/update properties that are allowed to change after pool creation.
pool.update()
def handle_replicated_pool(request, service):
@ -462,26 +450,19 @@ def handle_replicated_pool(request, service):
:returns: dict. exit-code and reason if not 0.
"""
pool_name = request.get('name')
replicas = request.get('replicas')
max_bytes = request.get('max-bytes')
max_objects = request.get('max-objects')
weight = request.get('weight')
group_name = request.get('group')
# Optional params
# NOTE: Check this against the handling in the Pool classes, reconcile and
# remove.
pg_num = request.get('pg_num')
replicas = request.get('replicas')
if pg_num:
# Cap pg_num to max allowed just in case.
osds = get_osds(service)
if osds:
pg_num = min(pg_num, (len(osds) * 100 // replicas))
app_name = request.get('app-name')
# Check for missing params
if pool_name is None or replicas is None:
msg = "Missing parameter. name and replicas are required"
log(msg, level=ERROR)
return {'exit-code': 1, 'stderr': msg}
request.update({'pg_num': pg_num})
if group_name:
group_namespace = request.get('group-namespace')
@ -490,18 +471,14 @@ def handle_replicated_pool(request, service):
group=group_name,
namespace=group_namespace)
kwargs = {}
if pg_num:
kwargs['pg_num'] = pg_num
if weight:
kwargs['percent_data'] = weight
if replicas:
kwargs['replicas'] = replicas
if app_name:
kwargs['app_name'] = app_name
try:
pool = ReplicatedPool(service=service,
op=request)
except KeyError:
msg = "Missing parameter."
log(msg, level=ERROR)
return {'exit-code': 1, 'stderr': msg}
pool = ReplicatedPool(service=service,
name=pool_name, **kwargs)
if not pool_exists(service=service, name=pool_name):
log("Creating pool '{}' (replicas={})".format(pool.name, replicas),
level=INFO)
@ -510,10 +487,8 @@ def handle_replicated_pool(request, service):
log("Pool '{}' already exists - skipping create".format(pool.name),
level=DEBUG)
# Set a quota if requested
if max_bytes or max_objects:
set_pool_quota(service=service, pool_name=pool_name,
max_bytes=max_bytes, max_objects=max_objects)
# Set/update properties that are allowed to change after pool creation.
pool.update()
def handle_create_cache_tier(request, service):
@ -540,7 +515,7 @@ def handle_create_cache_tier(request, service):
log(msg, level=ERROR)
return {'exit-code': 1, 'stderr': msg}
p = Pool(service=service, name=storage_pool)
p = BasePool(service=service, name=storage_pool)
p.add_cache_tier(cache_pool=cache_pool, mode=cache_mode)
@ -561,7 +536,7 @@ def handle_remove_cache_tier(request, service):
log(msg, level=ERROR)
return {'exit-code': 1, 'stderr': msg}
pool = Pool(name=storage_pool, service=service)
pool = BasePool(name=storage_pool, service=service)
pool.remove_cache_tier(cache_pool=cache_pool)

View File

@ -16,7 +16,6 @@ import json
import unittest
from unittest.mock import (
call,
patch,
)
@ -53,48 +52,6 @@ class TestCephOps(unittest.TestCase):
erasure_plugin_technique=None)
self.assertEqual(json.loads(rc), {'exit-code': 0})
@patch.object(broker, 'pool_exists')
@patch.object(broker, 'ReplicatedPool')
@patch.object(broker, 'log', lambda *args, **kwargs: None)
def test_process_requests_create_replicated_pool(self,
mock_replicated_pool,
mock_pool_exists):
mock_pool_exists.return_value = False
reqs = json.dumps({'api-version': 1,
'ops': [{
'op': 'create-pool',
'pool-type': 'replicated',
'name': 'foo',
'replicas': 3
}]})
rc = broker.process_requests(reqs)
mock_pool_exists.assert_called_with(service='admin', name='foo')
calls = [call(name=u'foo', service='admin', replicas=3)]
mock_replicated_pool.assert_has_calls(calls)
self.assertEqual(json.loads(rc), {'exit-code': 0})
@patch.object(broker, 'pool_exists')
@patch.object(broker, 'ReplicatedPool')
@patch.object(broker, 'log', lambda *args, **kwargs: None)
def test_process_requests_replicated_pool_weight(self,
mock_replicated_pool,
mock_pool_exists):
mock_pool_exists.return_value = False
reqs = json.dumps({'api-version': 1,
'ops': [{
'op': 'create-pool',
'pool-type': 'replicated',
'name': 'foo',
'weight': 40.0,
'replicas': 3
}]})
rc = broker.process_requests(reqs)
mock_pool_exists.assert_called_with(service='admin', name='foo')
calls = [call(name=u'foo', service='admin', replicas=3,
percent_data=40.0)]
mock_replicated_pool.assert_has_calls(calls)
self.assertEqual(json.loads(rc), {'exit-code': 0})
@patch.object(broker, 'delete_pool')
@patch.object(broker, 'log', lambda *args, **kwargs: None)
def test_process_requests_delete_pool(self,
@ -135,7 +92,7 @@ class TestCephOps(unittest.TestCase):
@patch('charmhelpers.contrib.storage.linux.ceph.cmp_pkgrevno')
@patch.object(broker, 'pool_exists')
@patch.object(broker.Pool, 'add_cache_tier')
@patch.object(broker.BasePool, 'add_cache_tier')
@patch.object(broker, 'log', lambda *args, **kwargs: None)
def test_process_requests_create_cache_tier(self, mock_pool,
mock_pool_exists,
@ -160,7 +117,7 @@ class TestCephOps(unittest.TestCase):
@patch('charmhelpers.contrib.storage.linux.ceph.cmp_pkgrevno')
@patch.object(broker, 'pool_exists')
@patch.object(broker.Pool, 'remove_cache_tier')
@patch.object(broker.BasePool, 'remove_cache_tier')
@patch.object(broker, 'log', lambda *args, **kwargs: None)
def test_process_requests_remove_cache_tier(self, mock_pool,
mock_pool_exists,