diff --git a/src/config.yaml b/src/config.yaml index f8488c4..675c9b6 100644 --- a/src/config.yaml +++ b/src/config.yaml @@ -192,3 +192,69 @@ options: Device class from CRUSH map to use for placement groups for erasure profile - valid values: ssd, hdd or nvme (or leave unset to not use a device class). + bluestore-compression-algorithm: + type: string + default: + description: | + Compressor to use (if any) for pools requested by this charm. + . + NOTE: The ceph-osd charm sets a global default for this value (defaults + to 'lz4' unless configured by the end user) which will be used unless + specified for individual pools. + bluestore-compression-mode: + type: string + default: + description: | + Policy for using compression on pools requested by this charm. + . + 'none' means never use compression. + 'passive' means use compression when clients hint that data is + compressible. + 'aggressive' means use compression unless clients hint that + data is not compressible. + 'force' means use compression under all circumstances even if the clients + hint that the data is not compressible. + bluestore-compression-required-ratio: + type: float + default: + description: | + The ratio of the size of the data chunk after compression relative to the + original size must be at least this small in order to store the + compressed version on pools requested by this charm. + bluestore-compression-min-blob-size: + type: int + default: + description: | + Chunks smaller than this are never compressed on pools requested by + this charm. + bluestore-compression-min-blob-size-hdd: + type: int + default: + description: | + Value of bluestore compression min blob size for rotational media on + pools requested by this charm. + bluestore-compression-min-blob-size-ssd: + type: int + default: + description: | + Value of bluestore compression min blob size for solid state media on + pools requested by this charm. + bluestore-compression-max-blob-size: + type: int + default: + description: | + Chunks larger than this are broken into smaller blobs sizing bluestore + compression max blob size before being compressed on pools requested by + this charm. + bluestore-compression-max-blob-size-hdd: + type: int + default: + description: | + Value of bluestore compression max blob size for rotational media on + pools requested by this charm. + bluestore-compression-max-blob-size-ssd: + type: int + default: + description: | + Value of bluestore compression max blob size for solid state media on + pools requested by this charm. diff --git a/src/reactive/ceph_fs.py b/src/reactive/ceph_fs.py index fcda705..a9bbe94 100644 --- a/src/reactive/ceph_fs.py +++ b/src/reactive/ceph_fs.py @@ -13,6 +13,9 @@ # limitations under the License. from charms import reactive + +import charmhelpers.core as ch_core + from charmhelpers.core.hookenv import ( service_name, config) @@ -50,7 +53,6 @@ def config_changed(): cephfs_charm.assess_status() -@reactive.when_not('ceph.create_pool.req.sent') @reactive.when('ceph-mds.connected') def storage_ceph_connected(ceph): ceph_mds = reactive.endpoint_from_flag('ceph-mds.connected') @@ -78,6 +80,18 @@ def storage_ceph_connected(ceph): weight = weight - metadata_weight extra_pools = [] + bluestore_compression = None + with charm.provide_charm_instance() as cephfs_charm: + # TODO: move this whole method into the charm class and add to the + # common pool creation logic in charms.openstack. For now we reuse + # the common bluestore compression wrapper here. + try: + bluestore_compression = cephfs_charm._get_bluestore_compression() + except ValueError as e: + ch_core.hookenv.log('Invalid value(s) provided for Ceph BlueStore ' + 'compression: "{}"' + .format(str(e))) + if config('pool-type') == 'erasure-coded': # General EC plugin config plugin = config('ec-profile-plugin') @@ -115,18 +129,34 @@ def storage_ceph_connected(ceph): # Create EC data pool ec_pool_name = 'ec_{}'.format(pool_name) - ceph_mds.create_erasure_pool( - name=ec_pool_name, - erasure_profile=profile_name, - weight=ec_pool_weight, - app_name=ceph_mds.ceph_pool_app_name, - allow_ec_overwrites=True - ) - ceph_mds.create_replicated_pool( - name=pool_name, - weight=weight, - app_name=ceph_mds.ceph_pool_app_name - ) + + # NOTE(fnordahl): once we deprecate Python 3.5 support we can do + # the unpacking of the BlueStore compression arguments as part of + # the function arguments. Until then we need to build the dict + # prior to the function call. + kwargs = { + 'name': ec_pool_name, + 'erasure_profile': profile_name, + 'weight': ec_pool_weight, + 'app_name': ceph_mds.ceph_pool_app_name, + 'allow_ec_overwrites': True, + } + if bluestore_compression: + kwargs.update(bluestore_compression) + ceph_mds.create_erasure_pool(**kwargs) + + # NOTE(fnordahl): once we deprecate Python 3.5 support we can do + # the unpacking of the BlueStore compression arguments as part of + # the function arguments. Until then we need to build the dict + # prior to the function call. + kwargs = { + 'name': pool_name, + 'weight': weight, + 'app_name': ceph_mds.ceph_pool_app_name, + } + if bluestore_compression: + kwargs.update(bluestore_compression) + ceph_mds.create_replicated_pool(**kwargs) ceph_mds.create_replicated_pool( name=metadata_pool_name, weight=metadata_weight, @@ -134,15 +164,22 @@ def storage_ceph_connected(ceph): ) extra_pools = [ec_pool_name] else: - ceph_mds.create_replicated_pool( - name=pool_name, - replicas=replicas, - weight=weight, - app_name=ceph_mds.ceph_pool_app_name) + # NOTE(fnordahl): once we deprecate Python 3.5 support we can do + # the unpacking of the BlueStore compression arguments as part of + # the function arguments. Until then we need to build the dict + # prior to the function call. + kwargs = { + 'name': pool_name, + 'replicas': replicas, + 'weight': weight, + 'app_name': ceph_mds.ceph_pool_app_name, + } + if bluestore_compression: + kwargs.update(bluestore_compression) + ceph_mds.create_replicated_pool(**kwargs) ceph_mds.create_replicated_pool( name=metadata_pool_name, replicas=replicas, weight=metadata_weight, app_name=ceph_mds.ceph_pool_app_name) ceph_mds.request_cephfs(service, extra_pools=extra_pools) - reactive.set_state('ceph.create_pool.req.sent') diff --git a/src/tests/bundles/bionic-rocky.yaml b/src/tests/bundles/bionic-rocky.yaml index 90c488c..222a1ae 100644 --- a/src/tests/bundles/bionic-rocky.yaml +++ b/src/tests/bundles/bionic-rocky.yaml @@ -6,9 +6,12 @@ applications: num_units: 1 options: source: cloud:bionic-rocky + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 ceph-osd: charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 + num_units: 6 storage: osd-devices: 'cinder,10G' options: @@ -124,4 +127,4 @@ relations: - - 'neutron-openvswitch:amqp' - 'rabbitmq-server:amqp' - - 'nova-cloud-controller:quantum-network-service' - - 'neutron-gateway:quantum-network-service' \ No newline at end of file + - 'neutron-gateway:quantum-network-service' diff --git a/src/tests/bundles/bionic-stein.yaml b/src/tests/bundles/bionic-stein.yaml index 3b05d7c..2e59c83 100644 --- a/src/tests/bundles/bionic-stein.yaml +++ b/src/tests/bundles/bionic-stein.yaml @@ -8,9 +8,12 @@ applications: num_units: 1 options: source: *source + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 ceph-osd: charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 + num_units: 6 storage: osd-devices: 'cinder,10G' options: diff --git a/src/tests/bundles/bionic-train.yaml b/src/tests/bundles/bionic-train.yaml index da7feee..3dfe9e6 100644 --- a/src/tests/bundles/bionic-train.yaml +++ b/src/tests/bundles/bionic-train.yaml @@ -6,9 +6,12 @@ applications: num_units: 1 options: source: cloud:bionic-train + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 ceph-osd: charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 + num_units: 6 storage: osd-devices: 'cinder,10G' options: diff --git a/src/tests/bundles/bionic-ussuri.yaml b/src/tests/bundles/bionic-ussuri.yaml index 5eada6c..b479d66 100644 --- a/src/tests/bundles/bionic-ussuri.yaml +++ b/src/tests/bundles/bionic-ussuri.yaml @@ -5,9 +5,12 @@ applications: num_units: 1 options: source: cloud:bionic-ussuri + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 ceph-osd: charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 + num_units: 6 storage: osd-devices: 'cinder,10G' options: diff --git a/src/tests/bundles/focal-ussuri-ec.yaml b/src/tests/bundles/focal-ussuri-ec.yaml deleted file mode 100644 index 39d9fed..0000000 --- a/src/tests/bundles/focal-ussuri-ec.yaml +++ /dev/null @@ -1,222 +0,0 @@ -variables: - openstack-origin: &openstack-origin distro - -series: &series focal - -machines: - '0': - constraints: "mem=3072M" - '1': - constraints: "mem=3072M" - '2': - constraints: "mem=3072M" - '3': - - -applications: - - keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - nova-cloud-controller-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - placement-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - neutron-api-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - - ceph-fs: - charm: ceph-fs - num_units: 1 - options: - source: *openstack-origin - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - to: - - '3' - - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 6 - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/dev/test-non-existent' - source: *openstack-origin - - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - monitor-count: '3' - source: *openstack-origin - - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: *openstack-origin - - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: *openstack-origin - - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - options: - network-manager: Neutron - openstack-origin: *openstack-origin - - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 2 - constraints: mem=8G - options: - config-flags: default_ephemeral_format=ext4 - enable-live-migration: true - enable-resize: true - migration-auth-type: ssh - openstack-origin: *openstack-origin - - placement: - charm: cs:~openstack-charmers-next/placement - num_units: 1 - options: - openstack-origin: *openstack-origin - - neutron-api: - charm: cs:~openstack-charmers-next/neutron-api - num_units: 1 - options: - manage-neutron-plugin-legacy-mode: true - neutron-plugin: ovs - flat-network-providers: physnet1 - neutron-security-groups: true - openstack-origin: *openstack-origin - - neutron-openvswitch: - charm: cs:~openstack-charmers-next/neutron-openvswitch - - neutron-gateway: - charm: cs:~openstack-charmers-next/neutron-gateway - num_units: 1 - options: - bridge-mappings: physnet1:br-ex - openstack-origin: *openstack-origin - -relations: - - - - 'ceph-mon:mds' - - 'ceph-fs:ceph-mds' - - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-compute:image-service' - - 'glance:image-service' - - - - 'nova-compute:ceph' - - 'ceph-mon:client' - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:amqp' - - 'rabbitmq-server:amqp' - - - - 'glance:ceph' - - 'ceph-mon:client' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'nova-cloud-controller:shared-db' - - 'nova-cloud-controller-mysql-router:shared-db' - - - 'nova-cloud-controller-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'nova-cloud-controller:identity-service' - - 'keystone:identity-service' - - - - 'nova-cloud-controller:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-cloud-controller:cloud-compute' - - 'nova-compute:cloud-compute' - - - - 'nova-cloud-controller:image-service' - - 'glance:image-service' - - - - 'placement:shared-db' - - 'placement-mysql-router:shared-db' - - - 'placement-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'placement' - - 'keystone' - - - - 'placement' - - 'nova-cloud-controller' - - - - 'neutron-api:shared-db' - - 'neutron-api-mysql-router:shared-db' - - - 'neutron-api-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'neutron-api:amqp' - - 'rabbitmq-server:amqp' - - - - 'neutron-api:neutron-api' - - 'nova-cloud-controller:neutron-api' - - - - 'neutron-api:neutron-plugin-api' - - 'neutron-gateway:neutron-plugin-api' - - - - 'neutron-api:identity-service' - - 'keystone:identity-service' - - - - 'nova-compute:neutron-plugin' - - 'neutron-openvswitch:neutron-plugin' - - - - 'neutron-gateway:amqp' - - 'rabbitmq-server:amqp' - - - - 'neutron-openvswitch:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-cloud-controller:quantum-network-service' - - 'neutron-gateway:quantum-network-service' diff --git a/src/tests/bundles/focal-ussuri.yaml b/src/tests/bundles/focal-ussuri.yaml index 7f348f1..39d9fed 100644 --- a/src/tests/bundles/focal-ussuri.yaml +++ b/src/tests/bundles/focal-ussuri.yaml @@ -41,12 +41,15 @@ applications: num_units: 1 options: source: *openstack-origin + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 to: - '3' ceph-osd: charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 + num_units: 6 storage: osd-devices: 'cinder,10G' options: diff --git a/src/tests/bundles/focal-victoria.yaml b/src/tests/bundles/focal-victoria.yaml index d9309b3..b23a8d5 100644 --- a/src/tests/bundles/focal-victoria.yaml +++ b/src/tests/bundles/focal-victoria.yaml @@ -41,12 +41,15 @@ applications: num_units: 1 options: source: *openstack-origin + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 to: - '3' ceph-osd: charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 + num_units: 6 storage: osd-devices: 'cinder,10G' options: diff --git a/src/tests/bundles/groovy-victoria.yaml b/src/tests/bundles/groovy-victoria.yaml index 4cd78fd..bdc91e1 100644 --- a/src/tests/bundles/groovy-victoria.yaml +++ b/src/tests/bundles/groovy-victoria.yaml @@ -41,12 +41,15 @@ applications: num_units: 1 options: source: *openstack-origin + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 to: - '3' ceph-osd: charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 + num_units: 6 storage: osd-devices: 'cinder,10G' options: diff --git a/src/tests/tests.yaml b/src/tests/tests.yaml index dd58950..9a379ea 100644 --- a/src/tests/tests.yaml +++ b/src/tests/tests.yaml @@ -1,12 +1,11 @@ charm_name: ceph-fs gate_bundles: - - focal-ussuri-ec - - focal-victoria - - focal-ussuri - - bionic-ussuri - - bionic-train - - bionic-stein - - bionic-rocky + - bluestore-compression: focal-victoria + - bluestore-compression: focal-ussuri + - bluestore-compression: bionic-ussuri + - bluestore-compression: bionic-train + - bluestore-compression: bionic-stein + - bluestore-compression: bionic-rocky - bionic-queens - xenial-queens # Xenial-pike is missing because of @@ -14,18 +13,28 @@ gate_bundles: - xenial-ocata - xenial-mitaka smoke_bundles: - - bionic-stein + - bluestore-compression: bionic-stein dev_bundles: - - groovy-victoria + - bluestore-compression: groovy-victoria configure: - zaza.openstack.charm_tests.glance.setup.add_lts_image - zaza.openstack.charm_tests.neutron.setup.basic_overcloud_network - zaza.openstack.charm_tests.nova.setup.create_flavors - zaza.openstack.charm_tests.nova.setup.manage_ssh_key - zaza.openstack.charm_tests.keystone.setup.add_demo_user + - bluestore-compression: + - zaza.openstack.charm_tests.glance.setup.add_lts_image + - zaza.openstack.charm_tests.neutron.setup.basic_overcloud_network + - zaza.openstack.charm_tests.nova.setup.create_flavors + - zaza.openstack.charm_tests.nova.setup.manage_ssh_key + - zaza.openstack.charm_tests.keystone.setup.add_demo_user tests: - zaza.openstack.charm_tests.ceph.fs.tests.CephFSTests - zaza.openstack.charm_tests.ceph.fs.tests.CharmOperationTest + - bluestore-compression: + - zaza.openstack.charm_tests.ceph.fs.tests.CephFSTests + - zaza.openstack.charm_tests.ceph.fs.tests.CharmOperationTest + - zaza.openstack.charm_tests.ceph.tests.BlueStoreCompressionCharmOperation tests_options: force_deploy: - groovy-victoria diff --git a/unit_tests/test_reactive_ceph_fs.py b/unit_tests/test_reactive_ceph_fs.py index 1499959..8b9be2a 100644 --- a/unit_tests/test_reactive_ceph_fs.py +++ b/unit_tests/test_reactive_ceph_fs.py @@ -35,9 +35,6 @@ class TestRegisteredHooks(test_utils.TestRegisteredHooks): 'config_changed': ('ceph-mds.pools.available',), 'storage_ceph_connected': ('ceph-mds.connected',), }, - 'when_not': { - 'storage_ceph_connected': ('ceph.create_pool.req.sent',), - }, 'when_none': { 'config_changed': ('charm.paused', 'run-default-update-status',),