From d9873fd22cf1d5081344bde58b324cfb84a5c693 Mon Sep 17 00:00:00 2001 From: Nobuto Murata Date: Tue, 30 Mar 2021 15:08:59 +0900 Subject: [PATCH] Add support of a common volume_backend_name Previously, we didn't have a control over volume_backend_name other than the default app name in the Juju model. A common backend name to multiple backends with the same character is useful because those can be treated as a single virtual backend associated with a single volume type. Change-Id: I4b57f7979837d21a1b116007f3da707ee154792b Closes-Bug: #1884511 --- config.yaml | 10 ++++++++++ hooks/cinder_contexts.py | 8 ++++++-- unit_tests/test_cinder_contexts.py | 26 ++++++++++++++++++++++++++ 3 files changed, 42 insertions(+), 2 deletions(-) diff --git a/config.yaml b/config.yaml index aa0d6da..ce1469e 100644 --- a/config.yaml +++ b/config.yaml @@ -24,6 +24,16 @@ options: created for the pool. The number of placement groups for a pool can only be increased, never decreased - so it is important to identify the percent of data that will likely reside in the pool. + volume-backend-name: + default: + type: string + description: | + Volume backend name for the backend. The default value is the + application name in the Juju model, e.g. "cinder-ceph-mybackend" + if it's deployed as `juju deploy cinder-ceph cinder-ceph-mybackend`. + A common backend name can be set to multiple backends with the + same characters so that those can be treated as a single virtual + backend associated with a single volume type. backend-availability-zone: default: type: string diff --git a/hooks/cinder_contexts.py b/hooks/cinder_contexts.py index 6919119..ead0e9c 100644 --- a/hooks/cinder_contexts.py +++ b/hooks/cinder_contexts.py @@ -73,6 +73,11 @@ class CephSubordinateContext(OSContextGenerator): else: volume_driver = 'cinder.volume.driver.RBDDriver' + if config('volume-backend-name'): + volume_backend_name = config('volume-backend-name') + else: + volume_backend_name = service + if config('pool-type') == 'erasure-coded': pool_name = ( config('ec-rbd-metadata-pool') or @@ -81,8 +86,7 @@ class CephSubordinateContext(OSContextGenerator): ) else: pool_name = config('rbd-pool-name') or service - - section = {service: [('volume_backend_name', service), + section = {service: [('volume_backend_name', volume_backend_name), ('volume_driver', volume_driver), ('rbd_pool', pool_name), ('rbd_user', service), diff --git a/unit_tests/test_cinder_contexts.py b/unit_tests/test_cinder_contexts.py index e35bba4..fd95b06 100644 --- a/unit_tests/test_cinder_contexts.py +++ b/unit_tests/test_cinder_contexts.py @@ -118,6 +118,32 @@ class TestCinderContext(CharmTestCase): } }}) + def test_ceph_explicit_volume_backend_name(self): + self.test_config.set('volume-backend-name', 'special-backend-name') + self.is_relation_made.return_value = True + self.get_os_codename_package.return_value = "mitaka" + service = 'mycinder' + self.service_name.return_value = service + self.assertEqual( + contexts.CephSubordinateContext()(), + {"cinder": { + "/etc/cinder/cinder.conf": { + "sections": { + service: [ + ('volume_backend_name', 'special-backend-name'), + ('volume_driver', + 'cinder.volume.drivers.rbd.RBDDriver'), + ('rbd_pool', service), + ('rbd_user', service), + ('rbd_secret_uuid', 'libvirt-uuid'), + ('rbd_ceph_conf', + '/var/lib/charm/mycinder/ceph.conf'), + ('report_discard_supported', True) + ] + } + } + }}) + def test_ceph_related_erasure_coded(self): self.is_relation_made.return_value = True self.get_os_codename_package.return_value = "queens"