Updates for improved EC support

Sync charmhelpers and charms.openstack to pickup changes for
improved Erasure Coded pool support.

Update action code for EC profile creation for extended
option support and other charmhelpers changes.

Depends-On: I2547933964849f7af1c623b2fbc014fb332839ef
Change-Id: Iec4de19f7b39f0b08158d96c5cc1561b40aefa10
This commit is contained in:
James Page 2020-07-27 14:56:09 +01:00
parent e60d30630f
commit 4fd788d3a2
7 changed files with 1175 additions and 460 deletions

View File

@ -55,6 +55,7 @@ create-pool:
enum:
- replicated
- erasure
- erasure-coded
description: "The pool type which may either be replicated to recover from lost OSDs by keeping multiple copies of the objects or erasure to get a kind of generalized RAID5 capability."
replicas:
type: integer
@ -68,6 +69,9 @@ create-pool:
type: integer
default: 10
description: "The percentage of data that is expected to be contained in the pool for the specific OSD set. Default value is to assume 10% of the data is for this pool, which is a relatively low % of the data but allows for the pg_num to be increased."
allow-ec-overwrites:
type: boolean
description: "Permit overwrites for erasure coded pool types."
required:
- name
additionalProperties: false
@ -111,7 +115,6 @@ create-erasure-profile:
description: "The name of the profile"
failure-domain:
type: string
default: host
enum:
- chassis
- datacenter
@ -133,6 +136,7 @@ create-erasure-profile:
- isa
- lrc
- shec
- clay
description: "The erasure plugin to use for this profile. See http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ for more details"
data-chunks:
type: integer
@ -144,10 +148,35 @@ create-erasure-profile:
description: "The number of coding chunks, i.e. the number of additional chunks computed by the encoding functions. If there are 2 coding chunks, it means 2 OSDs can be out without losing data."
locality-chunks:
type: integer
description: "Group the coding and data chunks into sets of size locality. For instance, for k=4 and m=2, when locality=3 two groups of three are created. Each set can be recovered without reading chunks from another set."
description: "LRC plugin - Group the coding and data chunks into sets of size locality. For instance, for k=4 and m=2, when locality=3 two groups of three are created. Each set can be recovered without reading chunks from another set."
crush-locality:
type: string
enum:
- chassis
- datacenter
- host
- osd
- pdu
- pod
- rack
- region
- room
- root
- row
description: "LRC plugin - The type of CRUSH bucket in which each set of chunks defined by locality-chunks will be stored."
durability-estimator:
type: integer
description: "The number of parity chunks each of which includes each data chunk in its calculation range. The number is used as a durability estimator. For instance, if c=2, 2 OSDs can be down without losing data."
description: "SHEC plugin - the number of parity chunks each of which includes each data chunk in its calculation range. The number is used as a durability estimator. For instance, if c=2, 2 OSDs can be down without losing data."
helper-chunks:
type: integer
description: "CLAY plugin - number of OSDs requests to send data during recovery of a single chunk."
scalar-mds:
type: string
enum:
- jerasure
- isa
- shec
description: "CLAY plugin - specifies the plugin that is used as a building block in the layered construction."
device-class:
type: string
enum:

View File

@ -28,6 +28,8 @@ def make_erasure_profile():
plugin = action_get("plugin")
failure_domain = action_get("failure-domain")
device_class = action_get("device-class")
k = action_get("data-chunks")
m = action_get("coding-chunks")
# jerasure requires k+m
# isa requires k+m
@ -35,8 +37,6 @@ def make_erasure_profile():
# shec requires k+m+c
if plugin == "jerasure":
k = action_get("data-chunks")
m = action_get("coding-chunks")
try:
create_erasure_profile(service='admin',
erasure_plugin_name=plugin,
@ -50,8 +50,6 @@ def make_erasure_profile():
action_fail("Create erasure profile failed with "
"message: {}".format(str(e)))
elif plugin == "isa":
k = action_get("data-chunks")
m = action_get("coding-chunks")
try:
create_erasure_profile(service='admin',
erasure_plugin_name=plugin,
@ -64,10 +62,9 @@ def make_erasure_profile():
log(e)
action_fail("Create erasure profile failed with "
"message: {}".format(str(e)))
elif plugin == "local":
k = action_get("data-chunks")
m = action_get("coding-chunks")
elif plugin == "lrc":
l = action_get("locality-chunks")
crush_locality = action_get('crush-locality')
try:
create_erasure_profile(service='admin',
erasure_plugin_name=plugin,
@ -75,6 +72,7 @@ def make_erasure_profile():
data_chunks=k,
coding_chunks=m,
locality=l,
crush_locality=crush_locality,
failure_domain=failure_domain,
device_class=device_class)
except CalledProcessError as e:
@ -82,8 +80,6 @@ def make_erasure_profile():
action_fail("Create erasure profile failed with "
"message: {}".format(str(e)))
elif plugin == "shec":
k = action_get("data-chunks")
m = action_get("coding-chunks")
c = action_get("durability-estimator")
try:
create_erasure_profile(service='admin',
@ -98,10 +94,27 @@ def make_erasure_profile():
log(e)
action_fail("Create erasure profile failed with "
"message: {}".format(str(e)))
elif plugin == "clay":
d = action_get("helper-chunks")
scalar_mds = action_get('scalar-mds')
try:
create_erasure_profile(service='admin',
erasure_plugin_name=plugin,
profile_name=name,
data_chunks=k,
coding_chunks=m,
helper_chunks=d,
scalar_mds=scalar_mds,
failure_domain=failure_domain,
device_class=device_class)
except CalledProcessError as e:
log(e)
action_fail("Create erasure profile failed with "
"message: {}".format(str(e)))
else:
# Unknown erasure plugin
action_fail("Unknown erasure-plugin type of {}. "
"Only jerasure, isa, local or shec is "
"Only jerasure, isa, lrc, shec or clay is "
"allowed".format(plugin))

View File

@ -38,13 +38,15 @@ def create_pool():
)
replicated_pool.create()
elif pool_type == "erasure":
elif pool_type in ("erasure", "erasure-coded"):
crush_profile_name = action_get("erasure-profile-name")
allow_ec_overwrites = action_get("allow-ec-overwrites")
erasure_pool = ErasurePool(name=pool_name,
erasure_code_profile=crush_profile_name,
service='admin',
app_name=app_name,
percent_data=float(percent_data),
allow_ec_overwrites=allow_ec_overwrites,
)
erasure_pool.create()
else:

View File

@ -29,6 +29,8 @@ from subprocess import check_call, CalledProcessError
import six
import charmhelpers.contrib.storage.linux.ceph as ch_ceph
from charmhelpers.contrib.openstack.audits.openstack_security_guide import (
_config_ini as config_ini
)
@ -56,6 +58,7 @@ from charmhelpers.core.hookenv import (
status_set,
network_get_primary_address,
WARNING,
service_name,
)
from charmhelpers.core.sysctl import create as sysctl_create
@ -808,6 +811,12 @@ class CephContext(OSContextGenerator):
ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts))
if config('pool-type') and config('pool-type') == 'erasure-coded':
base_pool_name = config('rbd-pool') or config('rbd-pool-name')
if not base_pool_name:
base_pool_name = service_name()
ctxt['rbd_default_data_pool'] = base_pool_name
if not os.path.isdir('/etc/ceph'):
os.mkdir('/etc/ceph')
@ -3175,3 +3184,78 @@ class SRIOVContext(OSContextGenerator):
:rtype: Dict[str,int]
"""
return self._map
class CephBlueStoreCompressionContext(OSContextGenerator):
"""Ceph BlueStore compression options."""
# Tuple with Tuples that map configuration option name to CephBrokerRq op
# property name
options = (
('bluestore-compression-algorithm',
'compression-algorithm'),
('bluestore-compression-mode',
'compression-mode'),
('bluestore-compression-required-ratio',
'compression-required-ratio'),
('bluestore-compression-min-blob-size',
'compression-min-blob-size'),
('bluestore-compression-min-blob-size-hdd',
'compression-min-blob-size-hdd'),
('bluestore-compression-min-blob-size-ssd',
'compression-min-blob-size-ssd'),
('bluestore-compression-max-blob-size',
'compression-max-blob-size'),
('bluestore-compression-max-blob-size-hdd',
'compression-max-blob-size-hdd'),
('bluestore-compression-max-blob-size-ssd',
'compression-max-blob-size-ssd'),
)
def __init__(self):
"""Initialize context by loading values from charm config.
We keep two maps, one suitable for use with CephBrokerRq's and one
suitable for template generation.
"""
charm_config = config()
# CephBrokerRq op map
self.op = {}
# Context exposed for template generation
self.ctxt = {}
for config_key, op_key in self.options:
value = charm_config.get(config_key)
self.ctxt.update({config_key.replace('-', '_'): value})
self.op.update({op_key: value})
def __call__(self):
"""Get context.
:returns: Context
:rtype: Dict[str,any]
"""
return self.ctxt
def get_op(self):
"""Get values for use in CephBrokerRq op.
:returns: Context values with CephBrokerRq op property name as key.
:rtype: Dict[str,any]
"""
return self.op
def validate(self):
"""Validate options.
:raises: AssertionError
"""
# We slip in a dummy name on class instantiation to allow validation of
# the other options. It will not affect further use.
#
# NOTE: once we retire Python 3.5 we can fold this into a in-line
# dictionary comprehension in the call to the initializer.
dummy_op = {'name': 'dummy-name'}
dummy_op.update(self.op)
pool = ch_ceph.BasePool('dummy-service', op=dummy_op)
pool.validate()

File diff suppressed because it is too large Load Diff

View File

@ -155,25 +155,47 @@ def handle_create_erasure_profile(request, service):
:param service: The ceph client to run the command under.
:returns: dict. exit-code and reason if not 0
"""
# "local" | "shec" or it defaults to "jerasure"
# "isa" | "lrc" | "shec" | "clay" or it defaults to "jerasure"
erasure_type = request.get('erasure-type')
# "host" | "rack" or it defaults to "host" # Any valid Ceph bucket
# dependent on erasure coding type
erasure_technique = request.get('erasure-technique')
# "host" | "rack" | ...
failure_domain = request.get('failure-domain')
name = request.get('name')
# Binary Distribution Matrix (BDM) parameters
bdm_k = request.get('k')
bdm_m = request.get('m')
# LRC parameters
bdm_l = request.get('l')
crush_locality = request.get('crush-locality')
# SHEC parameters
bdm_c = request.get('c')
# CLAY parameters
bdm_d = request.get('d')
scalar_mds = request.get('scalar-mds')
# Device Class
device_class = request.get('device-class')
if failure_domain not in CEPH_BUCKET_TYPES:
if failure_domain and failure_domain not in CEPH_BUCKET_TYPES:
msg = "failure-domain must be one of {}".format(CEPH_BUCKET_TYPES)
log(msg, level=ERROR)
return {'exit-code': 1, 'stderr': msg}
create_erasure_profile(service=service, erasure_plugin_name=erasure_type,
profile_name=name, failure_domain=failure_domain,
data_chunks=bdm_k, coding_chunks=bdm_m,
locality=bdm_l)
create_erasure_profile(service=service,
erasure_plugin_name=erasure_type,
profile_name=name,
failure_domain=failure_domain,
data_chunks=bdm_k,
coding_chunks=bdm_m,
locality=bdm_l,
durability_estimator=bdm_d,
helper_chunks=bdm_c,
scalar_mds=scalar_mds,
crush_locality=crush_locality,
device_class=device_class,
erasure_plugin_technique=erasure_technique)
return {'exit-code': 0}
def handle_add_permissions_to_key(request, service):
@ -387,6 +409,7 @@ def handle_erasure_pool(request, service):
max_objects = request.get('max-objects')
weight = request.get('weight')
group_name = request.get('group')
allow_ec_overwrites = request.get('allow-ec-overwrites')
if erasure_profile is None:
erasure_profile = "default-canonical"
@ -416,7 +439,9 @@ def handle_erasure_pool(request, service):
pool = ErasurePool(service=service, name=pool_name,
erasure_code_profile=erasure_profile,
percent_data=weight, app_name=app_name)
percent_data=weight,
app_name=app_name,
allow_ec_overwrites=allow_ec_overwrites)
# Ok make the erasure pool
if not pool_exists(service=service, name=pool_name):
log("Creating pool '{}' (erasure_profile={})"

View File

@ -38,13 +38,19 @@ class TestCephOps(unittest.TestCase):
'm': 2,
}]})
rc = broker.process_requests(req)
mock_create_erasure.assert_called_with(service='admin',
profile_name='foo',
coding_chunks=2,
data_chunks=3,
locality=None,
failure_domain='rack',
erasure_plugin_name='jerasure')
mock_create_erasure.assert_called_with(
service='admin',
erasure_plugin_name='jerasure',
profile_name='foo',
failure_domain='rack',
data_chunks=3, coding_chunks=2,
locality=None,
durability_estimator=None,
helper_chunks=None,
scalar_mds=None,
crush_locality=None,
device_class=None,
erasure_plugin_technique=None)
self.assertEqual(json.loads(rc), {'exit-code': 0})
@patch.object(broker, 'pool_exists')
@ -103,14 +109,17 @@ class TestCephOps(unittest.TestCase):
mock_delete_pool.assert_called_with(service='admin', name='foo')
self.assertEqual(json.loads(rc), {'exit-code': 0})
@patch('charmhelpers.contrib.storage.linux.ceph.cmp_pkgrevno')
@patch.object(broker, 'pool_exists')
@patch.object(broker.ErasurePool, 'create')
@patch.object(broker, 'erasure_profile_exists')
@patch.object(broker, 'log', lambda *args, **kwargs: None)
def test_process_requests_create_erasure_pool(self, mock_profile_exists,
mock_erasure_pool,
mock_pool_exists):
mock_pool_exists,
mock_cmp_pkgrevno):
mock_pool_exists.return_value = False
mock_cmp_pkgrevno.return_value = 1
reqs = json.dumps({'api-version': 1,
'ops': [{
'op': 'create-pool',
@ -124,12 +133,15 @@ class TestCephOps(unittest.TestCase):
mock_erasure_pool.assert_called_with()
self.assertEqual(json.loads(rc), {'exit-code': 0})
@patch('charmhelpers.contrib.storage.linux.ceph.cmp_pkgrevno')
@patch.object(broker, 'pool_exists')
@patch.object(broker.Pool, 'add_cache_tier')
@patch.object(broker, 'log', lambda *args, **kwargs: None)
def test_process_requests_create_cache_tier(self, mock_pool,
mock_pool_exists):
mock_pool_exists,
mock_cmp_pkgrevno):
mock_pool_exists.return_value = True
mock_cmp_pkgrevno.return_value = 1
reqs = json.dumps({'api-version': 1,
'ops': [{
'op': 'create-cache-tier',
@ -139,28 +151,32 @@ class TestCephOps(unittest.TestCase):
'erasure-profile': 'default'
}]})
rc = broker.process_requests(reqs)
self.assertEqual(json.loads(rc), {'exit-code': 0})
mock_pool_exists.assert_any_call(service='admin', name='foo')
mock_pool_exists.assert_any_call(service='admin', name='foo-ssd')
mock_pool.assert_called_with(cache_pool='foo-ssd', mode='writeback')
self.assertEqual(json.loads(rc), {'exit-code': 0})
@patch('charmhelpers.contrib.storage.linux.ceph.cmp_pkgrevno')
@patch.object(broker, 'pool_exists')
@patch.object(broker.Pool, 'remove_cache_tier')
@patch.object(broker, 'log', lambda *args, **kwargs: None)
def test_process_requests_remove_cache_tier(self, mock_pool,
mock_pool_exists):
mock_pool_exists,
mock_cmp_pkgrevno):
mock_pool_exists.return_value = True
mock_cmp_pkgrevno.return_value = 1
reqs = json.dumps({'api-version': 1,
'ops': [{
'op': 'remove-cache-tier',
'hot-pool': 'foo-ssd',
}]})
rc = broker.process_requests(reqs)
self.assertEqual(json.loads(rc), {'exit-code': 0})
mock_pool_exists.assert_any_call(service='admin', name='foo-ssd')
mock_pool.assert_called_with(cache_pool='foo-ssd')
self.assertEqual(json.loads(rc), {'exit-code': 0})
@patch.object(broker, 'snapshot_pool')
@patch.object(broker, 'log', lambda *args, **kwargs: None)
@ -173,10 +189,11 @@ class TestCephOps(unittest.TestCase):
}]})
mock_snapshot_pool.return_value = {'exit-code': 0}
rc = broker.process_requests(reqs)
self.assertEqual(json.loads(rc), {'exit-code': 0})
mock_snapshot_pool.assert_called_with(service='admin',
pool_name='foo',
snapshot_name='foo-snap1')
self.assertEqual(json.loads(rc), {'exit-code': 0})
@patch.object(broker, 'rename_pool')
@patch.object(broker, 'log', lambda *args, **kwargs: None)