Make usernames predictable for multi-endpoints

Whenm generating a username associated with multiple charm the
username was derived from the keys of an unordered dict making the
username liable to change. This patch sorts the keys and makes the
username stable.

Change-Id: I0f857d7c2d5c4abf4843bc3fe1a9848164048fe2
Closes-Bug: #1739409
This commit is contained in:
Liam Young 2017-12-20 13:45:24 +00:00
parent e1ac46f342
commit ee6db34c16
3 changed files with 46 additions and 3 deletions

View File

@ -2126,7 +2126,7 @@ def add_service_to_keystone(relation_id=None, remote_unit=None):
https_cns.append(urlparse.urlparse(ep['public_url']).hostname)
https_cns.append(urlparse.urlparse(ep['admin_url']).hostname)
service_username = '_'.join(services)
service_username = '_'.join(sorted(services))
# If an admin username prefix is provided, ensure all services use it.
prefix = config('service-admin-prefix')

View File

@ -426,7 +426,7 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
if self._get_openstack_release() < self.xenial_pike:
cinder_user = 'cinder_cinderv2'
else:
cinder_user = 'cinderv3_cinderv2'
cinder_user = 'cinderv2_cinderv3'
base = [
{'name': 'demoUser',
'enabled': True,
@ -713,7 +713,7 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
'service_host': u.valid_ip
}
if self._get_openstack_release() >= self.xenial_pike:
expected['service_username'] = 'cinderv3_cinderv2'
expected['service_username'] = 'cinderv2_cinderv3'
for unit in self.keystone_sentries:
ret = u.validate_relation_data(unit, relation, expected)
if ret:

View File

@ -430,6 +430,49 @@ class TestKeystoneUtils(CharmTestCase):
adminurl='10.0.0.2',
internalurl='192.168.1.2')
@patch.object(utils, 'get_requested_roles')
@patch.object(utils, 'create_service_credentials')
@patch.object(utils, 'leader_get')
@patch('charmhelpers.contrib.openstack.ip.config')
@patch.object(utils, 'ensure_valid_service')
@patch.object(utils, 'add_endpoint')
@patch.object(utils, 'get_manager')
def test_add_service_to_keystone_multi_endpoints_bug_1739409(
self, KeystoneManager, add_endpoint, ensure_valid_service,
ip_config, leader_get, create_service_credentials,
get_requested_roles):
relation_id = 'identity-service:8'
remote_unit = 'nova-cloud-controller/0'
get_requested_roles.return_value = 'role1'
self.relation_get.return_value = {
'ec2_admin_url': 'http://10.5.0.16:8773/services/Cloud',
'ec2_internal_url': 'http://10.5.0.16:8773/services/Cloud',
'ec2_public_url': 'http://10.5.0.16:8773/services/Cloud',
'ec2_region': 'RegionOne',
'ec2_service': 'ec2',
'nova_admin_url': 'http://10.5.0.16:8774/v2/$(tenant_id)s',
'nova_internal_url': 'http://10.5.0.16:8774/v2/$(tenant_id)s',
'nova_public_url': 'http://10.5.0.16:8774/v2/$(tenant_id)s',
'nova_region': 'RegionOne',
'nova_service': 'nova',
'private-address': '10.5.0.16',
's3_admin_url': 'http://10.5.0.16:3333',
's3_internal_url': 'http://10.5.0.16:3333',
's3_public_url': 'http://10.5.0.16:3333',
's3_region': 'RegionOne',
's3_service': 's3'}
self.get_local_endpoint.return_value = 'http://localhost:80/v2.0/'
KeystoneManager.resolve_tenant_id.return_value = 'tenant_id'
leader_get.return_value = None
utils.add_service_to_keystone(
relation_id=relation_id,
remote_unit=remote_unit)
create_service_credentials.assert_called_once_with(
'ec2_nova_s3',
new_roles='role1')
@patch.object(utils, 'set_service_password')
@patch.object(utils, 'get_service_password')
@patch.object(utils, 'user_exists')