Add BlueStore Compression support

Ceph Bluestore Compression is a post-deploy configurable option
and allowing to update the broker request is required. Drop code
that gates the sending of pool broker request, the original issue
has been fixed in the interface code and it is now safe to call
multiple times.

Fold Erasure Coding test into regular bundles from Mimic and up
to allow testing both EC and BlueStore Compression at the same
time without test bundle explosion.

Unpin flake8

Change-Id: I9b529e61a8832a62f4db12cab8f352d468c8a3ad
This commit is contained in:
Frode Nordahl 2020-10-06 13:40:18 +02:00
parent 9c9b3f0aa5
commit 8b2b48f0ea
No known key found for this signature in database
GPG Key ID: 6A5D59A3BA48373F
12 changed files with 169 additions and 261 deletions

View File

@ -192,3 +192,69 @@ options:
Device class from CRUSH map to use for placement groups for
erasure profile - valid values: ssd, hdd or nvme (or leave
unset to not use a device class).
bluestore-compression-algorithm:
type: string
default:
description: |
Compressor to use (if any) for pools requested by this charm.
.
NOTE: The ceph-osd charm sets a global default for this value (defaults
to 'lz4' unless configured by the end user) which will be used unless
specified for individual pools.
bluestore-compression-mode:
type: string
default:
description: |
Policy for using compression on pools requested by this charm.
.
'none' means never use compression.
'passive' means use compression when clients hint that data is
compressible.
'aggressive' means use compression unless clients hint that
data is not compressible.
'force' means use compression under all circumstances even if the clients
hint that the data is not compressible.
bluestore-compression-required-ratio:
type: float
default:
description: |
The ratio of the size of the data chunk after compression relative to the
original size must be at least this small in order to store the
compressed version on pools requested by this charm.
bluestore-compression-min-blob-size:
type: int
default:
description: |
Chunks smaller than this are never compressed on pools requested by
this charm.
bluestore-compression-min-blob-size-hdd:
type: int
default:
description: |
Value of bluestore compression min blob size for rotational media on
pools requested by this charm.
bluestore-compression-min-blob-size-ssd:
type: int
default:
description: |
Value of bluestore compression min blob size for solid state media on
pools requested by this charm.
bluestore-compression-max-blob-size:
type: int
default:
description: |
Chunks larger than this are broken into smaller blobs sizing bluestore
compression max blob size before being compressed on pools requested by
this charm.
bluestore-compression-max-blob-size-hdd:
type: int
default:
description: |
Value of bluestore compression max blob size for rotational media on
pools requested by this charm.
bluestore-compression-max-blob-size-ssd:
type: int
default:
description: |
Value of bluestore compression max blob size for solid state media on
pools requested by this charm.

View File

@ -13,6 +13,9 @@
# limitations under the License.
from charms import reactive
import charmhelpers.core as ch_core
from charmhelpers.core.hookenv import (
service_name,
config)
@ -50,7 +53,6 @@ def config_changed():
cephfs_charm.assess_status()
@reactive.when_not('ceph.create_pool.req.sent')
@reactive.when('ceph-mds.connected')
def storage_ceph_connected(ceph):
ceph_mds = reactive.endpoint_from_flag('ceph-mds.connected')
@ -78,6 +80,18 @@ def storage_ceph_connected(ceph):
weight = weight - metadata_weight
extra_pools = []
bluestore_compression = None
with charm.provide_charm_instance() as cephfs_charm:
# TODO: move this whole method into the charm class and add to the
# common pool creation logic in charms.openstack. For now we reuse
# the common bluestore compression wrapper here.
try:
bluestore_compression = cephfs_charm._get_bluestore_compression()
except ValueError as e:
ch_core.hookenv.log('Invalid value(s) provided for Ceph BlueStore '
'compression: "{}"'
.format(str(e)))
if config('pool-type') == 'erasure-coded':
# General EC plugin config
plugin = config('ec-profile-plugin')
@ -115,18 +129,34 @@ def storage_ceph_connected(ceph):
# Create EC data pool
ec_pool_name = 'ec_{}'.format(pool_name)
ceph_mds.create_erasure_pool(
name=ec_pool_name,
erasure_profile=profile_name,
weight=ec_pool_weight,
app_name=ceph_mds.ceph_pool_app_name,
allow_ec_overwrites=True
)
ceph_mds.create_replicated_pool(
name=pool_name,
weight=weight,
app_name=ceph_mds.ceph_pool_app_name
)
# NOTE(fnordahl): once we deprecate Python 3.5 support we can do
# the unpacking of the BlueStore compression arguments as part of
# the function arguments. Until then we need to build the dict
# prior to the function call.
kwargs = {
'name': ec_pool_name,
'erasure_profile': profile_name,
'weight': ec_pool_weight,
'app_name': ceph_mds.ceph_pool_app_name,
'allow_ec_overwrites': True,
}
if bluestore_compression:
kwargs.update(bluestore_compression)
ceph_mds.create_erasure_pool(**kwargs)
# NOTE(fnordahl): once we deprecate Python 3.5 support we can do
# the unpacking of the BlueStore compression arguments as part of
# the function arguments. Until then we need to build the dict
# prior to the function call.
kwargs = {
'name': pool_name,
'weight': weight,
'app_name': ceph_mds.ceph_pool_app_name,
}
if bluestore_compression:
kwargs.update(bluestore_compression)
ceph_mds.create_replicated_pool(**kwargs)
ceph_mds.create_replicated_pool(
name=metadata_pool_name,
weight=metadata_weight,
@ -134,15 +164,22 @@ def storage_ceph_connected(ceph):
)
extra_pools = [ec_pool_name]
else:
ceph_mds.create_replicated_pool(
name=pool_name,
replicas=replicas,
weight=weight,
app_name=ceph_mds.ceph_pool_app_name)
# NOTE(fnordahl): once we deprecate Python 3.5 support we can do
# the unpacking of the BlueStore compression arguments as part of
# the function arguments. Until then we need to build the dict
# prior to the function call.
kwargs = {
'name': pool_name,
'replicas': replicas,
'weight': weight,
'app_name': ceph_mds.ceph_pool_app_name,
}
if bluestore_compression:
kwargs.update(bluestore_compression)
ceph_mds.create_replicated_pool(**kwargs)
ceph_mds.create_replicated_pool(
name=metadata_pool_name,
replicas=replicas,
weight=metadata_weight,
app_name=ceph_mds.ceph_pool_app_name)
ceph_mds.request_cephfs(service, extra_pools=extra_pools)
reactive.set_state('ceph.create_pool.req.sent')

View File

@ -6,9 +6,12 @@ applications:
num_units: 1
options:
source: cloud:bionic-rocky
pool-type: erasure-coded
ec-profile-k: 4
ec-profile-m: 2
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
num_units: 6
storage:
osd-devices: 'cinder,10G'
options:
@ -124,4 +127,4 @@ relations:
- - 'neutron-openvswitch:amqp'
- 'rabbitmq-server:amqp'
- - 'nova-cloud-controller:quantum-network-service'
- 'neutron-gateway:quantum-network-service'
- 'neutron-gateway:quantum-network-service'

View File

@ -8,9 +8,12 @@ applications:
num_units: 1
options:
source: *source
pool-type: erasure-coded
ec-profile-k: 4
ec-profile-m: 2
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
num_units: 6
storage:
osd-devices: 'cinder,10G'
options:

View File

@ -6,9 +6,12 @@ applications:
num_units: 1
options:
source: cloud:bionic-train
pool-type: erasure-coded
ec-profile-k: 4
ec-profile-m: 2
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
num_units: 6
storage:
osd-devices: 'cinder,10G'
options:

View File

@ -5,9 +5,12 @@ applications:
num_units: 1
options:
source: cloud:bionic-ussuri
pool-type: erasure-coded
ec-profile-k: 4
ec-profile-m: 2
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
num_units: 6
storage:
osd-devices: 'cinder,10G'
options:

View File

@ -1,222 +0,0 @@
variables:
openstack-origin: &openstack-origin distro
series: &series focal
machines:
'0':
constraints: "mem=3072M"
'1':
constraints: "mem=3072M"
'2':
constraints: "mem=3072M"
'3':
applications:
keystone-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
glance-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
nova-cloud-controller-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
placement-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
neutron-api-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
mysql-innodb-cluster:
charm: cs:~openstack-charmers-next/mysql-innodb-cluster
num_units: 3
options:
source: *openstack-origin
to:
- '0'
- '1'
- '2'
ceph-fs:
charm: ceph-fs
num_units: 1
options:
source: *openstack-origin
pool-type: erasure-coded
ec-profile-k: 4
ec-profile-m: 2
to:
- '3'
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 6
storage:
osd-devices: 'cinder,10G'
options:
osd-devices: '/dev/test-non-existent'
source: *openstack-origin
ceph-mon:
charm: cs:~openstack-charmers-next/ceph-mon
num_units: 3
options:
monitor-count: '3'
source: *openstack-origin
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
options:
source: *openstack-origin
keystone:
expose: True
charm: cs:~openstack-charmers-next/keystone
num_units: 1
options:
openstack-origin: *openstack-origin
glance:
expose: True
charm: cs:~openstack-charmers-next/glance
num_units: 1
options:
openstack-origin: *openstack-origin
nova-cloud-controller:
expose: True
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
options:
network-manager: Neutron
openstack-origin: *openstack-origin
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 2
constraints: mem=8G
options:
config-flags: default_ephemeral_format=ext4
enable-live-migration: true
enable-resize: true
migration-auth-type: ssh
openstack-origin: *openstack-origin
placement:
charm: cs:~openstack-charmers-next/placement
num_units: 1
options:
openstack-origin: *openstack-origin
neutron-api:
charm: cs:~openstack-charmers-next/neutron-api
num_units: 1
options:
manage-neutron-plugin-legacy-mode: true
neutron-plugin: ovs
flat-network-providers: physnet1
neutron-security-groups: true
openstack-origin: *openstack-origin
neutron-openvswitch:
charm: cs:~openstack-charmers-next/neutron-openvswitch
neutron-gateway:
charm: cs:~openstack-charmers-next/neutron-gateway
num_units: 1
options:
bridge-mappings: physnet1:br-ex
openstack-origin: *openstack-origin
relations:
- - 'ceph-mon:mds'
- 'ceph-fs:ceph-mds'
- - 'nova-compute:amqp'
- 'rabbitmq-server:amqp'
- - 'nova-compute:image-service'
- 'glance:image-service'
- - 'nova-compute:ceph'
- 'ceph-mon:client'
- - 'keystone:shared-db'
- 'keystone-mysql-router:shared-db'
- - 'keystone-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'glance:shared-db'
- 'glance-mysql-router:shared-db'
- - 'glance-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'glance:identity-service'
- 'keystone:identity-service'
- - 'glance:amqp'
- 'rabbitmq-server:amqp'
- - 'glance:ceph'
- 'ceph-mon:client'
- - 'ceph-osd:mon'
- 'ceph-mon:osd'
- - 'nova-cloud-controller:shared-db'
- 'nova-cloud-controller-mysql-router:shared-db'
- - 'nova-cloud-controller-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'nova-cloud-controller:identity-service'
- 'keystone:identity-service'
- - 'nova-cloud-controller:amqp'
- 'rabbitmq-server:amqp'
- - 'nova-cloud-controller:cloud-compute'
- 'nova-compute:cloud-compute'
- - 'nova-cloud-controller:image-service'
- 'glance:image-service'
- - 'placement:shared-db'
- 'placement-mysql-router:shared-db'
- - 'placement-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'placement'
- 'keystone'
- - 'placement'
- 'nova-cloud-controller'
- - 'neutron-api:shared-db'
- 'neutron-api-mysql-router:shared-db'
- - 'neutron-api-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'neutron-api:amqp'
- 'rabbitmq-server:amqp'
- - 'neutron-api:neutron-api'
- 'nova-cloud-controller:neutron-api'
- - 'neutron-api:neutron-plugin-api'
- 'neutron-gateway:neutron-plugin-api'
- - 'neutron-api:identity-service'
- 'keystone:identity-service'
- - 'nova-compute:neutron-plugin'
- 'neutron-openvswitch:neutron-plugin'
- - 'neutron-gateway:amqp'
- 'rabbitmq-server:amqp'
- - 'neutron-openvswitch:amqp'
- 'rabbitmq-server:amqp'
- - 'nova-cloud-controller:quantum-network-service'
- 'neutron-gateway:quantum-network-service'

View File

@ -41,12 +41,15 @@ applications:
num_units: 1
options:
source: *openstack-origin
pool-type: erasure-coded
ec-profile-k: 4
ec-profile-m: 2
to:
- '3'
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
num_units: 6
storage:
osd-devices: 'cinder,10G'
options:

View File

@ -41,12 +41,15 @@ applications:
num_units: 1
options:
source: *openstack-origin
pool-type: erasure-coded
ec-profile-k: 4
ec-profile-m: 2
to:
- '3'
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
num_units: 6
storage:
osd-devices: 'cinder,10G'
options:

View File

@ -41,12 +41,15 @@ applications:
num_units: 1
options:
source: *openstack-origin
pool-type: erasure-coded
ec-profile-k: 4
ec-profile-m: 2
to:
- '3'
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
num_units: 6
storage:
osd-devices: 'cinder,10G'
options:

View File

@ -1,12 +1,11 @@
charm_name: ceph-fs
gate_bundles:
- focal-ussuri-ec
- focal-victoria
- focal-ussuri
- bionic-ussuri
- bionic-train
- bionic-stein
- bionic-rocky
- bluestore-compression: focal-victoria
- bluestore-compression: focal-ussuri
- bluestore-compression: bionic-ussuri
- bluestore-compression: bionic-train
- bluestore-compression: bionic-stein
- bluestore-compression: bionic-rocky
- bionic-queens
- xenial-queens
# Xenial-pike is missing because of
@ -14,18 +13,28 @@ gate_bundles:
- xenial-ocata
- xenial-mitaka
smoke_bundles:
- bionic-stein
- bluestore-compression: bionic-stein
dev_bundles:
- groovy-victoria
- bluestore-compression: groovy-victoria
configure:
- zaza.openstack.charm_tests.glance.setup.add_lts_image
- zaza.openstack.charm_tests.neutron.setup.basic_overcloud_network
- zaza.openstack.charm_tests.nova.setup.create_flavors
- zaza.openstack.charm_tests.nova.setup.manage_ssh_key
- zaza.openstack.charm_tests.keystone.setup.add_demo_user
- bluestore-compression:
- zaza.openstack.charm_tests.glance.setup.add_lts_image
- zaza.openstack.charm_tests.neutron.setup.basic_overcloud_network
- zaza.openstack.charm_tests.nova.setup.create_flavors
- zaza.openstack.charm_tests.nova.setup.manage_ssh_key
- zaza.openstack.charm_tests.keystone.setup.add_demo_user
tests:
- zaza.openstack.charm_tests.ceph.fs.tests.CephFSTests
- zaza.openstack.charm_tests.ceph.fs.tests.CharmOperationTest
- bluestore-compression:
- zaza.openstack.charm_tests.ceph.fs.tests.CephFSTests
- zaza.openstack.charm_tests.ceph.fs.tests.CharmOperationTest
- zaza.openstack.charm_tests.ceph.tests.BlueStoreCompressionCharmOperation
tests_options:
force_deploy:
- groovy-victoria

View File

@ -35,9 +35,6 @@ class TestRegisteredHooks(test_utils.TestRegisteredHooks):
'config_changed': ('ceph-mds.pools.available',),
'storage_ceph_connected': ('ceph-mds.connected',),
},
'when_not': {
'storage_ceph_connected': ('ceph.create_pool.req.sent',),
},
'when_none': {
'config_changed': ('charm.paused',
'run-default-update-status',),