Move backends into their own sections

Using DEFAULT section to configure drivers is not supported since
Ocata. This change lists backends in enabled_backends.

Note: Using sectional config causes the os-vol-host-attr:host
volume attribute to change which invalidates existing volumes.
A subsequant change is needed to add an action for renaming
the host attribute of existing volumes *1

*1 https://docs.openstack.org/admin-guide/blockstorage-multi-backend.html#enable-multiple-storage-back-ends

Partial-Bug: #1665272
Change-Id: I22c3e74b0874c051bee89e4609088facf95b4664
This commit is contained in:
Liam Young 2017-02-16 12:13:03 +00:00
parent 806d9a1cba
commit 8641e81498
7 changed files with 210 additions and 14 deletions

View File

@ -38,6 +38,14 @@ from charmhelpers.contrib.hahelpers.cluster import (
)
def enable_lvm():
"""Check whether the LVM backend should be configured
@returns boolean - If LVM should be enabled"""
block_device = config('block-device') or 'none'
return block_device.lower() != 'none'
class ImageServiceContext(OSContextGenerator):
interfaces = ['image-service']
@ -63,7 +71,7 @@ class CephContext(OSContextGenerator):
else:
volume_driver = 'cinder.volume.driver.RBDDriver'
return {
'volume_driver': volume_driver,
'ceph_volume_driver': volume_driver,
# ensure_ceph_pool() creates pool based on service name.
'rbd_pool': service,
'rbd_user': service,
@ -117,8 +125,16 @@ class StorageBackendContext(OSContextGenerator):
unit, rid)
if backend_name:
backends.append(backend_name)
# Ocata onwards all backends must be in there own sectional config
if os_release('cinder-common') >= "ocata":
if relation_ids('ceph'):
backends.append('CEPH')
if enable_lvm():
backends.append('LVM')
if len(backends) > 0:
return {'backends': ",".join(backends)}
return {
'active_backends': backends,
'backends': ",".join(backends)}
else:
return {}
@ -185,6 +201,36 @@ class RegionContext(OSContextGenerator):
return {}
class SectionalConfigContext(OSContextGenerator):
"""Using DEFAULT config section to configure backends cannot be used
with Ocata+. In this case each backend needs its own section
@returns dict - Context dictating if sectional config needs to be used
"""
def __call__(self):
return {
'sectional_default_config': os_release('cinder-common') >= "ocata"
}
class LVMContext(OSContextGenerator):
"""Context describing the configuration of the LVM backend
@returns dict - Context describing LVM config
"""
def __call__(self):
ctxt = {}
if enable_lvm():
ctxt = {
'volumes_dir': '/var/lib/cinder/volumes',
'volume_name_template': 'volume-%s',
'volume_group': config('volume-group'),
'volume_driver': 'cinder.volume.drivers.lvm.LVMVolumeDriver',
'volume_backend_name': 'LVM'}
return ctxt
class VolumeUsageAuditContext(OSContextGenerator):
"""This context provides the configuration directive
volume_usage_audit_period and also creates a crontab entry

View File

@ -237,7 +237,9 @@ BASE_RESOURCE_MAP = OrderedDict([
cinder_contexts.RegionContext(),
context.InternalEndpointContext(),
cinder_contexts.VolumeUsageAuditContext(),
context.MemcacheContext()],
context.MemcacheContext(),
cinder_contexts.SectionalConfigContext(),
cinder_contexts.LVMContext()],
'services': ['cinder-api', 'cinder-volume', 'cinder-scheduler',
'haproxy']
}),

View File

@ -0,0 +1,72 @@
###############################################################################
# [ WARNING ]
# cinder configuration file maintained by Juju
# local changes may be overwritten.
###############################################################################
[DEFAULT]
rootwrap_config = /etc/cinder/rootwrap.conf
api_paste_confg = /etc/cinder/api-paste.ini
iscsi_helper = tgtadm
verbose = {{ verbose }}
debug = {{ debug }}
use_syslog = {{ use_syslog }}
auth_strategy = keystone
state_path = /var/lib/cinder
osapi_volume_workers = {{ workers }}
{% if rabbitmq_host or rabbitmq_hosts -%}
notification_driver = cinder.openstack.common.notifier.rpc_notifier
control_exchange = cinder
{% endif -%}
{% if use_internal_endpoints -%}
swift_catalog_info = object-store:swift:internalURL
keystone_catalog_info = identity:Identity Service:internalURL
glance_catalog_info = image:glance:internalURL
nova_catalog_info = compute:Compute Service:internalURL
{% endif %}
osapi_volume_listen = {{ bind_host }}
{% if osapi_volume_listen_port -%}
osapi_volume_listen_port = {{ osapi_volume_listen_port }}
{% endif -%}
{% if glance_api_servers -%}
glance_api_servers = {{ glance_api_servers }}
{% endif -%}
{% if glance_api_version -%}
glance_api_version = {{ glance_api_version }}
{% endif -%}
{% if region -%}
os_region_name = {{ region }}
{% endif -%}
{% if user_config_flags -%}
{% for key, value in user_config_flags.iteritems() -%}
{{ key }} = {{ value }}
{% endfor -%}
{% endif -%}
volume_usage_audit_period = {{ volume_usage_audit_period }}
{% include "parts/backends" %}
{% include "section-keystone-authtoken-mitaka" %}
{% include "parts/section-database" %}
{% include "section-rabbitmq-oslo" %}
[oslo_concurrency]
lock_path = /var/lock/cinder
[keymgr]
# XXX: hack to work around http://pad.lv/1516085
# will be superceeded by SRU to cinder package
encryption_auth_url = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/v3
[oslo_messaging_notifications]
# .openstack.common.* is pre-icehouse option.
# Check change-id: I90dff1b5c2a7dd2943cfa7ff25bb63c08eb7986d
driver = messagingv2

View File

@ -15,4 +15,22 @@ enabled_backends = {{ backends }}
{{ key }} = {{ value }}
{% endfor %}
{% endif %}
{%- endfor %}
{%- endfor %}
{% if sectional_default_config -%}
{% if volume_driver -%}
[LVM]
volumes_dir = {{ volumes_dir }}
volume_name_template = {{ volume_name_template }}
volume_group = {{ volume_group }}
volume_driver = {{ volume_driver }}
volume_backend_name = {{ volume_backend_name }}
{% endif -%}
{% if rbd_pool -%}
[CEPH]
rbd_pool = {{ rbd_pool }}
host = {{ host }}
rbd_user = {{ rbd_user }}
volume_driver = {{ ceph_volume_driver }}
{% endif %}
{% endif %}

View File

@ -567,9 +567,7 @@ class CinderBasicDeployment(OpenStackAmuletDeployment):
'debug': 'False',
'verbose': 'False',
'iscsi_helper': 'tgtadm',
'volume_group': 'cinder-volumes',
'auth_strategy': 'keystone',
'volumes_dir': '/var/lib/cinder/volumes'
},
'keystone_authtoken': {
'admin_user': rel_ks_ci['service_username'],
@ -579,7 +577,17 @@ class CinderBasicDeployment(OpenStackAmuletDeployment):
'signing_dir': '/var/cache/cinder'
}
}
if self._get_openstack_release() < self.xenial_ocata:
expected['DEFAULT']['volume_group'] = 'cinder-volumes'
expected['DEFAULT']['volumes_dir'] = '/var/lib/cinder/volumes'
else:
expected['DEFAULT']['enabled_backends'] = 'LVM'
expected['LVM'] = {
'volume_group': 'cinder-volumes',
'volumes_dir': '/var/lib/cinder/volumes',
'volume_name_template': 'volume-%s',
'volume_driver': 'cinder.volume.drivers.lvm.LVMVolumeDriver',
'volume_backend_name': 'LVM'}
expected_rmq = {
'rabbit_userid': 'cinder',
'rabbit_virtual_host': 'openstack',

0
tests/gate-basic-xenial-ocata Normal file → Executable file
View File

View File

@ -39,6 +39,15 @@ class TestCinderContext(CharmTestCase):
def setUp(self):
super(TestCinderContext, self).setUp(contexts, TO_PATCH)
def test_enable_lvm_disabled(self):
for v in [None, 'None', 'none']:
self.config.return_value = v
self.assertFalse(contexts.enable_lvm())
def test_enable_lvm_enabled(self):
self.config.return_value = '/dev/sdd'
self.assertTrue(contexts.enable_lvm())
def test_glance_not_related(self):
self.relation_ids.return_value = []
self.assertEquals(contexts.ImageServiceContext()(), {})
@ -66,7 +75,7 @@ class TestCinderContext(CharmTestCase):
self.service_name.return_value = service
self.assertEquals(
contexts.CephContext()(),
{'volume_driver': 'cinder.volume.driver.RBDDriver',
{'ceph_volume_driver': 'cinder.volume.driver.RBDDriver',
'rbd_pool': service,
'rbd_user': service,
'host': service})
@ -78,7 +87,7 @@ class TestCinderContext(CharmTestCase):
self.service_name.return_value = service
self.assertEquals(
contexts.CephContext()(),
{'volume_driver': 'cinder.volume.drivers.rbd.RBDDriver',
{'ceph_volume_driver': 'cinder.volume.drivers.rbd.RBDDriver',
'rbd_pool': service,
'rbd_user': service,
'host': service})
@ -89,23 +98,35 @@ class TestCinderContext(CharmTestCase):
self.assertEquals(contexts.ApacheSSLContext()(), {})
def test_storage_backend_no_backends(self):
self.config.return_value = None
self.relation_ids.return_value = []
self.assertEquals(contexts.StorageBackendContext()(), {})
def test_storage_backend_single_backend(self):
self.relation_ids.return_value = ['cinder-ceph:0']
rel_dict = {
'storage-backend': ['cinder-ceph:0'],
'ceph': []}
self.config.return_value = None
self.relation_ids.side_effect = lambda x: rel_dict[x]
self.related_units.return_value = ['cinder-ceph/0']
self.relation_get.return_value = 'cinder-ceph'
self.assertEquals(contexts.StorageBackendContext()(),
{'backends': 'cinder-ceph'})
{'backends': 'cinder-ceph',
'active_backends': ['cinder-ceph']})
def test_storage_backend_multi_backend(self):
self.relation_ids.return_value = ['cinder-ceph:0', 'cinder-vmware:0']
self.config.return_value = None
rel_dict = {
'storage-backend': ['cinder-ceph:0', 'cinder-vmware:0'],
'ceph': []}
self.relation_ids.side_effect = lambda x: rel_dict[x]
self.related_units.side_effect = [['cinder-ceph/0'],
['cinder-vmware/0']]
self.relation_get.side_effect = ['cinder-ceph', 'cinder-vmware']
self.assertEquals(contexts.StorageBackendContext()(),
{'backends': 'cinder-ceph,cinder-vmware'})
self.assertEquals(
contexts.StorageBackendContext()(),
{'backends': 'cinder-ceph,cinder-vmware',
'active_backends': ['cinder-ceph', 'cinder-vmware']})
mod_ch_context = 'charmhelpers.contrib.openstack.context'
@ -359,6 +380,35 @@ class TestCinderContext(CharmTestCase):
ctxt = contexts.RegionContext()()
self.assertEqual('two', ctxt['region'])
def test_sectional_config_context_ocata(self):
self.os_release.return_value = 'ocata'
ctxt = contexts.SectionalConfigContext()()
self.assertTrue(ctxt['sectional_default_config'])
def test_sectional_config_context_newton(self):
self.os_release.return_value = 'newton'
ctxt = contexts.SectionalConfigContext()()
self.assertFalse(ctxt['sectional_default_config'])
@patch.object(contexts, 'enable_lvm')
def test_lvm_context_disabled(self, enable_lvm):
enable_lvm.return_value = False
ctxt = contexts.LVMContext()()
self.assertEqual(ctxt, {})
@patch.object(contexts, 'enable_lvm')
def test_lvm_context_enabled(self, enable_lvm):
enable_lvm.return_value = True
self.config.return_value = 'cinder-vol1'
ctxt = contexts.LVMContext()()
expect = {
'volume_backend_name': 'LVM',
'volume_driver': 'cinder.volume.drivers.lvm.LVMVolumeDriver',
'volume_group': 'cinder-vol1',
'volume_name_template': 'volume-%s',
'volumes_dir': '/var/lib/cinder/volumes'}
self.assertEqual(ctxt, expect)
@patch('__builtin__.open')
def test_volume_usage_audit_context(self, _open):
self.config.return_value = 'month'