Fix pool names in RadosGW charm
The latest Ceph versions forbid pool names that start with a dot. Since the RadosGW charm uses pools named so extensively, this patchset fixes that issue. In addition, the Ceph libraries are synced as well, since they were outdated. Change-Id: I50112480bb3669de08ee85a9bf9a594b379e9ec3
This commit is contained in:
parent
7afb253cf6
commit
fd4497f8dc
|
@ -1,7 +1,7 @@
|
||||||
pause:
|
pause:
|
||||||
description: Pause the ceph-radosgw unit.
|
description: Pause the ceph-radosgw unit.
|
||||||
resume:
|
resume:
|
||||||
descrpition: Resume the ceph-radosgw unit.
|
description: Resume the ceph-radosgw unit.
|
||||||
promote:
|
promote:
|
||||||
description: Promote the zone associated with the local units to master/default (multi-site).
|
description: Promote the zone associated with the local units to master/default (multi-site).
|
||||||
readonly:
|
readonly:
|
||||||
|
|
|
@ -79,6 +79,10 @@ def import_radosgw_key(key, name=None):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def normalize_pool_name(pool):
|
||||||
|
return pool[1:] if pool.startswith('.') else pool
|
||||||
|
|
||||||
|
|
||||||
def get_create_rgw_pools_rq(prefix=None):
|
def get_create_rgw_pools_rq(prefix=None):
|
||||||
"""Pre-create RGW pools so that they have the correct settings.
|
"""Pre-create RGW pools so that they have the correct settings.
|
||||||
|
|
||||||
|
@ -101,6 +105,8 @@ def get_create_rgw_pools_rq(prefix=None):
|
||||||
w = weights.get(pool, 0.10)
|
w = weights.get(pool, 0.10)
|
||||||
if prefix:
|
if prefix:
|
||||||
pool = "{prefix}{pool}".format(prefix=prefix, pool=pool)
|
pool = "{prefix}{pool}".format(prefix=prefix, pool=pool)
|
||||||
|
|
||||||
|
pool = normalize_pool_name(pool)
|
||||||
if pg_num > 0:
|
if pg_num > 0:
|
||||||
rq.add_op_create_pool(name=pool, replica_count=replicas,
|
rq.add_op_create_pool(name=pool, replica_count=replicas,
|
||||||
pg_num=pg_num, group='objects',
|
pg_num=pg_num, group='objects',
|
||||||
|
@ -162,7 +168,7 @@ def get_create_rgw_pools_rq(prefix=None):
|
||||||
# the function arguments. Until then we need to build the dict
|
# the function arguments. Until then we need to build the dict
|
||||||
# prior to the function call.
|
# prior to the function call.
|
||||||
kwargs = {
|
kwargs = {
|
||||||
'name': pool,
|
'name': normalize_pool_name(pool),
|
||||||
'erasure_profile': profile_name,
|
'erasure_profile': profile_name,
|
||||||
'weight': bucket_weight,
|
'weight': bucket_weight,
|
||||||
'group': "objects",
|
'group': "objects",
|
||||||
|
@ -178,7 +184,7 @@ def get_create_rgw_pools_rq(prefix=None):
|
||||||
# the function arguments. Until then we need to build the dict
|
# the function arguments. Until then we need to build the dict
|
||||||
# prior to the function call.
|
# prior to the function call.
|
||||||
kwargs = {
|
kwargs = {
|
||||||
'name': pool,
|
'name': normalize_pool_name(pool),
|
||||||
'replica_count': replicas,
|
'replica_count': replicas,
|
||||||
'weight': bucket_weight,
|
'weight': bucket_weight,
|
||||||
'group': 'objects',
|
'group': 'objects',
|
||||||
|
@ -209,7 +215,8 @@ def get_create_rgw_pools_rq(prefix=None):
|
||||||
for pool in light:
|
for pool in light:
|
||||||
_add_light_pool(rq, pool, pg_num, prefix)
|
_add_light_pool(rq, pool, pg_num, prefix)
|
||||||
|
|
||||||
_add_light_pool(rq, '.rgw.root', pg_num)
|
# RadosGW creates this pool automatically from Quincy on.
|
||||||
|
# _add_light_pool(rq, '.rgw.root', pg_num)
|
||||||
|
|
||||||
if config('restrict-ceph-pools'):
|
if config('restrict-ceph-pools'):
|
||||||
rq.add_op_request_access_to_group(name="objects",
|
rq.add_op_request_access_to_group(name="objects",
|
||||||
|
|
|
@ -414,18 +414,27 @@ def get_requests_for_local_unit(relation_name=None):
|
||||||
is_legacy_request = set(sent).intersection(legacy_keys)
|
is_legacy_request = set(sent).intersection(legacy_keys)
|
||||||
for unit in related_units(rid):
|
for unit in related_units(rid):
|
||||||
data = relation_get(rid=rid, unit=unit)
|
data = relation_get(rid=rid, unit=unit)
|
||||||
if data.get(raw_certs_key):
|
# Note: Bug#2028683 - data may not be available if the certificates
|
||||||
bundles.append({
|
# relation hasn't been populated by the providing charm. If no 'ca'
|
||||||
'ca': data['ca'],
|
# in the data then don't attempt the bundle at all.
|
||||||
'chain': data.get('chain'),
|
if data.get('ca'):
|
||||||
'certs': json.loads(data[raw_certs_key])})
|
if data.get(raw_certs_key):
|
||||||
elif is_legacy_request:
|
bundles.append({
|
||||||
bundles.append({
|
'ca': data['ca'],
|
||||||
'ca': data['ca'],
|
'chain': data.get('chain'),
|
||||||
'chain': data.get('chain'),
|
'certs': json.loads(data[raw_certs_key])
|
||||||
'certs': {sent['common_name']:
|
})
|
||||||
{'cert': data.get(local_name + '.server.cert'),
|
elif is_legacy_request:
|
||||||
'key': data.get(local_name + '.server.key')}}})
|
bundles.append({
|
||||||
|
'ca': data['ca'],
|
||||||
|
'chain': data.get('chain'),
|
||||||
|
'certs': {
|
||||||
|
sent['common_name']: {
|
||||||
|
'cert': data.get(local_name + '.server.cert'),
|
||||||
|
'key': data.get(local_name + '.server.key')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
return bundles
|
return bundles
|
||||||
|
|
||||||
|
|
|
@ -151,6 +151,7 @@ import contextlib
|
||||||
import datetime
|
import datetime
|
||||||
import itertools
|
import itertools
|
||||||
import json
|
import json
|
||||||
|
import logging
|
||||||
import os
|
import os
|
||||||
import pprint
|
import pprint
|
||||||
import sqlite3
|
import sqlite3
|
||||||
|
@ -521,6 +522,41 @@ _KV = None
|
||||||
|
|
||||||
def kv():
|
def kv():
|
||||||
global _KV
|
global _KV
|
||||||
|
|
||||||
|
# If we are running unit tests, it is useful to go into memory-backed KV store to
|
||||||
|
# avoid concurrency issues when running multiple tests. This is not a
|
||||||
|
# problem when juju is running normally.
|
||||||
|
|
||||||
|
env_var = os.environ.get("CHARM_HELPERS_TESTMODE", "auto").lower()
|
||||||
|
if env_var not in ["auto", "no", "yes"]:
|
||||||
|
logging.warning(f"Unknown value for CHARM_HELPERS_TESTMODE '{env_var}', assuming 'no'")
|
||||||
|
env_var = "no"
|
||||||
|
|
||||||
|
if env_var == "no":
|
||||||
|
in_memory_db = False
|
||||||
|
elif env_var == "yes":
|
||||||
|
in_memory_db = True
|
||||||
|
elif env_var == "auto":
|
||||||
|
# If UNIT_STATE_DB is set, respect this request
|
||||||
|
if "UNIT_STATE_DB" in os.environ:
|
||||||
|
in_memory_db = False
|
||||||
|
# Autodetect normal juju execution by looking for juju variables
|
||||||
|
elif "JUJU_CHARM_DIR" in os.environ or "JUJU_UNIT_NAME" in os.environ:
|
||||||
|
in_memory_db = False
|
||||||
|
else:
|
||||||
|
# We are probably running in unit test mode
|
||||||
|
logging.warning("Auto-detected unit test environment for KV store.")
|
||||||
|
in_memory_db = True
|
||||||
|
else:
|
||||||
|
# Help the linter realise that in_memory_db is always set
|
||||||
|
raise Exception("Cannot reach this line")
|
||||||
|
|
||||||
if _KV is None:
|
if _KV is None:
|
||||||
_KV = Storage()
|
if in_memory_db:
|
||||||
|
_KV = Storage(":memory:")
|
||||||
|
else:
|
||||||
|
_KV = Storage()
|
||||||
|
else:
|
||||||
|
if in_memory_db and _KV.db_path != ":memory:":
|
||||||
|
logging.warning("Running with in_memory_db and KV is not set to :memory:")
|
||||||
return _KV
|
return _KV
|
||||||
|
|
|
@ -1223,6 +1223,11 @@ def get_upgrade_key():
|
||||||
return get_named_key('upgrade-osd', _upgrade_caps)
|
return get_named_key('upgrade-osd', _upgrade_caps)
|
||||||
|
|
||||||
|
|
||||||
|
def is_internal_client(name):
|
||||||
|
keys = ('osd-upgrade', 'osd-removal', 'admin', 'rbd-mirror', 'mds')
|
||||||
|
return any(name.startswith(key) for key in keys)
|
||||||
|
|
||||||
|
|
||||||
def get_named_key(name, caps=None, pool_list=None):
|
def get_named_key(name, caps=None, pool_list=None):
|
||||||
"""Retrieve a specific named cephx key.
|
"""Retrieve a specific named cephx key.
|
||||||
|
|
||||||
|
@ -1236,7 +1241,8 @@ def get_named_key(name, caps=None, pool_list=None):
|
||||||
|
|
||||||
key = ceph_auth_get(key_name)
|
key = ceph_auth_get(key_name)
|
||||||
if key:
|
if key:
|
||||||
upgrade_key_caps(key_name, caps)
|
if is_internal_client(name):
|
||||||
|
upgrade_key_caps(key_name, caps)
|
||||||
return key
|
return key
|
||||||
|
|
||||||
log("Creating new key for {}".format(name), level=DEBUG)
|
log("Creating new key for {}".format(name), level=DEBUG)
|
||||||
|
|
|
@ -107,9 +107,6 @@ class CephRadosGWCephTests(CharmTestCase):
|
||||||
call('us-east.rgw.buckets.index', replica_count=3, pg_num=10,
|
call('us-east.rgw.buckets.index', replica_count=3, pg_num=10,
|
||||||
weight=None, group='objects', namespace=None, app_name='rgw',
|
weight=None, group='objects', namespace=None, app_name='rgw',
|
||||||
max_bytes=None, max_objects=None),
|
max_bytes=None, max_objects=None),
|
||||||
call('.rgw.root', replica_count=3, pg_num=10, weight=None,
|
|
||||||
group='objects', namespace=None, app_name='rgw',
|
|
||||||
max_bytes=None, max_objects=None),
|
|
||||||
])
|
])
|
||||||
|
|
||||||
# confirm operation with bluestore compression
|
# confirm operation with bluestore compression
|
||||||
|
@ -163,9 +160,6 @@ class CephRadosGWCephTests(CharmTestCase):
|
||||||
call('us-east.rgw.buckets.index', replica_count=3, pg_num=10,
|
call('us-east.rgw.buckets.index', replica_count=3, pg_num=10,
|
||||||
weight=None, group='objects', namespace=None, app_name='rgw',
|
weight=None, group='objects', namespace=None, app_name='rgw',
|
||||||
max_bytes=None, max_objects=None),
|
max_bytes=None, max_objects=None),
|
||||||
call('.rgw.root', replica_count=3, pg_num=10, weight=None,
|
|
||||||
group='objects', namespace=None, app_name='rgw',
|
|
||||||
max_bytes=None, max_objects=None),
|
|
||||||
])
|
])
|
||||||
|
|
||||||
@patch.object(utils.context, 'CephBlueStoreCompressionContext')
|
@patch.object(utils.context, 'CephBlueStoreCompressionContext')
|
||||||
|
@ -228,9 +222,6 @@ class CephRadosGWCephTests(CharmTestCase):
|
||||||
call('default.rgw.buckets.index', replica_count=3, pg_num=None,
|
call('default.rgw.buckets.index', replica_count=3, pg_num=None,
|
||||||
weight=3.0, group='objects', namespace=None, app_name='rgw',
|
weight=3.0, group='objects', namespace=None, app_name='rgw',
|
||||||
max_bytes=None, max_objects=None),
|
max_bytes=None, max_objects=None),
|
||||||
call('.rgw.root', replica_count=3, pg_num=None, weight=0.1,
|
|
||||||
group='objects', namespace=None, app_name='rgw',
|
|
||||||
max_bytes=None, max_objects=None),
|
|
||||||
])
|
])
|
||||||
mock_request_access.assert_called_with(key_name='radosgw.gateway',
|
mock_request_access.assert_called_with(key_name='radosgw.gateway',
|
||||||
name='objects',
|
name='objects',
|
||||||
|
@ -287,9 +278,6 @@ class CephRadosGWCephTests(CharmTestCase):
|
||||||
call('default.rgw.buckets.index', replica_count=3, pg_num=None,
|
call('default.rgw.buckets.index', replica_count=3, pg_num=None,
|
||||||
weight=3.0, group='objects', namespace=None, app_name='rgw',
|
weight=3.0, group='objects', namespace=None, app_name='rgw',
|
||||||
max_bytes=None, max_objects=None),
|
max_bytes=None, max_objects=None),
|
||||||
call('.rgw.root', replica_count=3, pg_num=None, weight=0.1,
|
|
||||||
group='objects', namespace=None, app_name='rgw',
|
|
||||||
max_bytes=None, max_objects=None),
|
|
||||||
])
|
])
|
||||||
|
|
||||||
@patch.object(utils.context, 'CephBlueStoreCompressionContext')
|
@patch.object(utils.context, 'CephBlueStoreCompressionContext')
|
||||||
|
@ -365,9 +353,7 @@ class CephRadosGWCephTests(CharmTestCase):
|
||||||
call(weight=3.00, replica_count=3,
|
call(weight=3.00, replica_count=3,
|
||||||
name='default.rgw.buckets.index',
|
name='default.rgw.buckets.index',
|
||||||
group='objects', app_name='rgw'),
|
group='objects', app_name='rgw'),
|
||||||
call(weight=0.10, replica_count=3, name='.rgw.root',
|
])
|
||||||
group='objects', app_name='rgw')],
|
|
||||||
)
|
|
||||||
mock_request_access.assert_called_with(key_name='radosgw.gateway',
|
mock_request_access.assert_called_with(key_name='radosgw.gateway',
|
||||||
name='objects',
|
name='objects',
|
||||||
permission='rwx')
|
permission='rwx')
|
||||||
|
|
Loading…
Reference in New Issue