Fix support for cinder ceph rbd on Ocata

As of Ocata, the ceph key used to access a specific Cinder
Ceph backend must match the name of the key used by cinder,
with an appropriate secret configured for libvirt use with
the cephx key used by the cinder-ceph charm.

Add support for the new ceph-access relation to allow
nova-compute units to communicate with multiple ceph
backends using different cephx keys and user names.

The lead cinder-ceph unit will generate a UUID for use in
the cinder configuration file, and for use by the remote
nova-compute units when configuring libvirt secrets,
ensuring that both ends of the integration match up.

The side effect of this change is that nova-compute will
have a key for use with its own ephemeral backend ceph
access, and a key for each cinder ceph backend configured
in the deployment.

Change-Id: I974ecb39132feddfffabd6dcef401e91b5548d05
Closes-Bug: 1671422
(cherry picked from commit 62613456e7)
This commit is contained in:
James Page 2017-03-09 12:59:06 +00:00
parent 92f3f36109
commit fcd1afbe8b
10 changed files with 145 additions and 7 deletions

View File

@ -5,5 +5,6 @@
<pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH">
<path>/cinder-ceph/hooks</path>
<path>/cinder-ceph/unit_tests</path>
<path>/cinder-ceph/tests</path>
</pydev_pathproperty>
</pydev_project>

View File

@ -0,0 +1 @@
cinder_hooks.py

View File

@ -0,0 +1 @@
cinder_hooks.py

View File

@ -15,6 +15,7 @@
from charmhelpers.core.hookenv import (
service_name,
is_relation_made,
leader_get,
)
from charmhelpers.contrib.openstack.context import (
@ -48,6 +49,7 @@ class CephSubordinateContext(OSContextGenerator):
('volume_driver', volume_driver),
('rbd_pool', service),
('rbd_user', service),
('rbd_secret_uuid', leader_get('secret-uuid')),
]
}
}

View File

@ -17,6 +17,7 @@
import os
import sys
import json
import uuid
from cinder_utils import (
register_configs,
@ -27,6 +28,7 @@ from cinder_utils import (
VERSION_PACKAGE,
)
from cinder_contexts import CephSubordinateContext
from charmhelpers.contrib.openstack.context import CephContext
from charmhelpers.core.hookenv import (
Hooks,
@ -37,6 +39,9 @@ from charmhelpers.core.hookenv import (
relation_ids,
status_set,
log,
leader_get,
leader_set,
is_leader,
)
from charmhelpers.fetch import apt_install, apt_update
from charmhelpers.core.host import (
@ -131,6 +136,10 @@ def ceph_broken():
@hooks.hook('config-changed')
@restart_on_change(restart_map())
def write_and_restart():
# NOTE(jamespage): seed uuid for use on compute nodes with libvirt
if not leader_get('secret-uuid') and is_leader():
leader_set({'secret-uuid': str(uuid.uuid4())})
# NOTE(jamespage): trigger any configuration related changes
# for cephx permissions restrictions
ceph_changed()
@ -168,6 +177,40 @@ def upgrade_charm():
storage_backend(rid)
@hooks.hook('leader-settings-changed')
def leader_settings_changed():
# NOTE(jamespage): lead unit will seed libvirt secret UUID
# re-exec relations that use this data.
for r_id in relation_ids('ceph-access'):
ceph_access_joined(r_id)
for r_id in relation_ids('storage-backend'):
storage_backend(r_id)
@hooks.hook('ceph-access-relation-joined')
def ceph_access_joined(relation_id=None):
if 'ceph' not in CONFIGS.complete_contexts():
log('Deferring key provision until ceph relation complete')
return
secret_uuid = leader_get('secret-uuid')
if not secret_uuid:
if is_leader():
leader_set({'secret-uuid': str(uuid.uuid4())})
else:
log('Deferring key provision until leader seeds libvirt uuid')
return
# NOTE(jamespage): get key from ceph using a context
ceph_keys = CephContext()()
relation_set(
relation_id=relation_id,
relation_settings={'key': ceph_keys.get('key'),
'secret-uuid': leader_get('secret-uuid')}
)
if __name__ == '__main__':
try:
hooks.execute(sys.argv)

View File

@ -0,0 +1 @@
cinder_hooks.py

View File

@ -19,6 +19,8 @@ provides:
storage-backend:
interface: cinder-backend
scope: container
ceph-access:
interface: cinder-ceph-key
requires:
juju-info:
interface: juju-info

View File

@ -391,14 +391,27 @@ class CinderCephBasicDeployment(OpenStackAmuletDeployment):
'cinder:storage-backend relation data...')
unit = self.cinder_ceph_sentry
relation = ['storage-backend', 'cinder:storage-backend']
backend_uuid, _ = unit.run('leader-get secret-uuid')
sub = ('{"cinder": {"/etc/cinder/cinder.conf": {"sections": '
'{"cinder-ceph": [["volume_backend_name", "cinder-ceph"], '
'["volume_driver", "cinder.volume.drivers.rbd.RBDDriver"], '
'["rbd_pool", "cinder-ceph"], ["rbd_user", "cinder-ceph"]]}}}}')
sub_dict = {
"cinder": {
"/etc/cinder/cinder.conf": {
"sections": {
"cinder-ceph": [
["volume_backend_name", "cinder-ceph"],
["volume_driver",
"cinder.volume.drivers.rbd.RBDDriver"],
["rbd_pool", "cinder-ceph"],
["rbd_user", "cinder-ceph"],
["rbd_secret_uuid", backend_uuid],
]
}
}
}
}
expected = {
'subordinate_configuration': sub,
'subordinate_configuration': json.dumps(sub_dict),
'private-address': u.valid_ip,
'backend_name': 'cinder-ceph'
}

View File

@ -21,7 +21,8 @@ from test_utils import (
TO_PATCH = [
'is_relation_made',
'service_name',
'get_os_codename_package'
'get_os_codename_package',
'leader_get',
]
@ -29,6 +30,7 @@ class TestCinderContext(CharmTestCase):
def setUp(self):
super(TestCinderContext, self).setUp(contexts, TO_PATCH)
self.leader_get.return_value = 'libvirt-uuid'
def test_ceph_not_related(self):
self.is_relation_made.return_value = False
@ -50,6 +52,7 @@ class TestCinderContext(CharmTestCase):
'cinder.volume.driver.RBDDriver'),
('rbd_pool', service),
('rbd_user', service),
('rbd_secret_uuid', 'libvirt-uuid'),
]
}
}
@ -71,6 +74,7 @@ class TestCinderContext(CharmTestCase):
'cinder.volume.drivers.rbd.RBDDriver'),
('rbd_pool', service),
('rbd_user', service),
('rbd_secret_uuid', 'libvirt-uuid'),
]
}
}

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from mock import MagicMock, patch, call
from mock import MagicMock, patch, call, ANY
import json
import cinder_utils as utils
@ -42,6 +42,9 @@ TO_PATCH = [
'service_name',
'service_restart',
'log',
'leader_get',
'leader_set',
'is_leader',
# charmhelpers.core.host
'apt_install',
'apt_update',
@ -195,3 +198,70 @@ class TestCinderHooks(CharmTestCase):
subordinate_configuration=json.dumps({'test': 1}),
stateless=True,
)
@patch.object(hooks, 'ceph_access_joined')
@patch.object(hooks, 'storage_backend')
def test_leader_settings_changed(self,
storage_backend,
ceph_access_joined):
self.relation_ids.side_effect = [['ceph-access:1'],
['storage-backend:23']]
hooks.leader_settings_changed()
ceph_access_joined.assert_called_with('ceph-access:1')
storage_backend.assert_called_with('storage-backend:23')
@patch.object(hooks, 'CONFIGS')
def test_ceph_access_joined_no_ceph(self,
CONFIGS):
CONFIGS.complete_contexts.return_value = []
hooks.ceph_access_joined()
self.relation_set.assert_not_called()
@patch.object(hooks, 'CONFIGS')
def test_ceph_access_joined_follower_unseeded(self,
CONFIGS):
CONFIGS.complete_contexts.return_value = ['ceph']
self.is_leader.return_value = False
self.leader_get.return_value = None
hooks.ceph_access_joined()
self.relation_set.assert_not_called()
@patch.object(hooks, 'CephContext')
@patch.object(hooks, 'CONFIGS')
def test_ceph_access_joined_leader(self,
CONFIGS,
CephContext):
CONFIGS.complete_contexts.return_value = ['ceph']
self.is_leader.return_value = True
self.leader_get.side_effect = [None, 'newuuid']
context = MagicMock()
context.return_value = {'key': 'mykey'}
CephContext.return_value = context
hooks.ceph_access_joined()
self.leader_get.assert_called_with('secret-uuid')
self.leader_set.assert_called_with({'secret-uuid': ANY})
self.relation_set.assert_called_with(
relation_id=None,
relation_settings={'key': 'mykey',
'secret-uuid': 'newuuid'}
)
@patch.object(hooks, 'CephContext')
@patch.object(hooks, 'CONFIGS')
def test_ceph_access_joined_follower_seeded(self,
CONFIGS,
CephContext):
CONFIGS.complete_contexts.return_value = ['ceph']
self.is_leader.return_value = False
self.leader_get.return_value = 'newuuid'
context = MagicMock()
context.return_value = {'key': 'mykey'}
CephContext.return_value = context
hooks.ceph_access_joined()
self.leader_get.assert_called_with('secret-uuid')
self.leader_set.assert_not_called()
self.relation_set.assert_called_with(
relation_id=None,
relation_settings={'key': 'mykey',
'secret-uuid': 'newuuid'}
)