From b04b5f3b5f00f67eef2b76fbb5b64aba2b80719d Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Wed, 5 Sep 2018 14:31:15 +0100 Subject: [PATCH] Add support for rbd_exclusive_cinder_pool As of the of the queens release cinder supports this config option which, if enabled, stops cinder from query all volumes in a pool every time it does a delete in order to get accurate pool usage stats. The problem is that this causes tons of non-fatal race conditions and slows down deletes to the point where the rpc thread pool fills up blocking further requests. Our charms do not configure pool by default and we are not aware of anyone doing this in the field so this patch enables this option by default. Change-Id: I5377e2886a6e206d30bd7dc38a7e43a085aa524c Closes-Bug: 1789828 --- hooks/cinder_contexts.py | 28 ++++++++++++---------------- tests/basic_deployment.py | 4 ++++ unit_tests/test_cinder_contexts.py | 25 +++++++++++++++++++++++++ 3 files changed, 41 insertions(+), 16 deletions(-) diff --git a/hooks/cinder_contexts.py b/hooks/cinder_contexts.py index cd8c999..31130b8 100644 --- a/hooks/cinder_contexts.py +++ b/hooks/cinder_contexts.py @@ -50,19 +50,15 @@ class CephSubordinateContext(OSContextGenerator): volume_driver = 'cinder.volume.drivers.rbd.RBDDriver' else: volume_driver = 'cinder.volume.driver.RBDDriver' - return { - "cinder": { - "/etc/cinder/cinder.conf": { - "sections": { - service: [ - ('volume_backend_name', service), - ('volume_driver', volume_driver), - ('rbd_pool', service), - ('rbd_user', service), - ('rbd_secret_uuid', leader_get('secret-uuid')), - ('rbd_ceph_conf', ceph_config_file()), - ] - } - } - } - } + + section = {service: [('volume_backend_name', service), + ('volume_driver', volume_driver), + ('rbd_pool', service), + ('rbd_user', service), + ('rbd_secret_uuid', leader_get('secret-uuid')), + ('rbd_ceph_conf', ceph_config_file())]} + + if CompareOpenStackReleases(os_codename) >= "queens": + section[service].append(('rbd_exclusive_cinder_pool', True)) + + return {'cinder': {'/etc/cinder/cinder.conf': {'sections': section}}} diff --git a/tests/basic_deployment.py b/tests/basic_deployment.py index 93138a4..f733be2 100644 --- a/tests/basic_deployment.py +++ b/tests/basic_deployment.py @@ -358,6 +358,10 @@ class CinderCephBasicDeployment(OpenStackAmuletDeployment): } } + if self._get_openstack_release() >= self.xenial_queens: + section = sub_dict['cinder']["/etc/cinder/cinder.conf"]["sections"] + section["cinder-ceph"].append(('rbd_exclusive_cinder_pool', True)) + expected = { 'subordinate_configuration': json.dumps(sub_dict), 'private-address': u.valid_ip, diff --git a/unit_tests/test_cinder_contexts.py b/unit_tests/test_cinder_contexts.py index b6bae55..bf7c97b 100644 --- a/unit_tests/test_cinder_contexts.py +++ b/unit_tests/test_cinder_contexts.py @@ -84,3 +84,28 @@ class TestCinderContext(CharmTestCase): } } }}) + + def test_ceph_related_queens(self): + self.is_relation_made.return_value = True + self.get_os_codename_package.return_value = "queens" + service = 'mycinder' + self.service_name.return_value = service + self.assertEqual( + contexts.CephSubordinateContext()(), + {"cinder": { + "/etc/cinder/cinder.conf": { + "sections": { + service: [ + ('volume_backend_name', service), + ('volume_driver', + 'cinder.volume.drivers.rbd.RBDDriver'), + ('rbd_pool', service), + ('rbd_user', service), + ('rbd_secret_uuid', 'libvirt-uuid'), + ('rbd_ceph_conf', + '/var/lib/charm/mycinder/ceph.conf'), + ('rbd_exclusive_cinder_pool', True) + ] + } + } + }})