diff --git a/actions/actions.py b/actions/actions.py index 37cdb251..083d262a 100755 --- a/actions/actions.py +++ b/actions/actions.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python3 # # Copyright 2016 Canonical Ltd # @@ -17,9 +17,21 @@ import sys import os +_path = os.path.dirname(os.path.realpath(__file__)) +_hooks = os.path.abspath(os.path.join(_path, '../hooks')) +_root = os.path.abspath(os.path.join(_path, '..')) + + +def _add_path(path): + if path not in sys.path: + sys.path.insert(1, path) + +_add_path(_hooks) +_add_path(_root) + from charmhelpers.core.hookenv import action_fail -from hooks.keystone_utils import ( +from keystone_utils import ( pause_unit_helper, resume_unit_helper, register_configs, @@ -52,7 +64,7 @@ def main(args): try: action = ACTIONS[action_name] except KeyError: - return "Action %s undefined" % action_name + return "Action {} undefined".format(action_name) else: try: action(args) diff --git a/actions/charmhelpers b/actions/charmhelpers deleted file mode 120000 index 702de734..00000000 --- a/actions/charmhelpers +++ /dev/null @@ -1 +0,0 @@ -../charmhelpers \ No newline at end of file diff --git a/actions/hooks b/actions/hooks deleted file mode 120000 index f631275e..00000000 --- a/actions/hooks +++ /dev/null @@ -1 +0,0 @@ -../hooks \ No newline at end of file diff --git a/actions/openstack_upgrade.py b/actions/openstack_upgrade.py index fde3284c..6c04ac1f 100755 --- a/actions/openstack_upgrade.py +++ b/actions/openstack_upgrade.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python3 # # Copyright 2016 Canonical Ltd # @@ -17,7 +17,17 @@ import os import sys -sys.path.append('hooks/') +_path = os.path.dirname(os.path.realpath(__file__)) +_hooks = os.path.abspath(os.path.join(_path, '../hooks')) +_root = os.path.abspath(os.path.join(_path, '..')) + + +def _add_path(path): + if path not in sys.path: + sys.path.insert(1, path) + +_add_path(_hooks) +_add_path(_root) from charmhelpers.contrib.openstack.utils import ( do_action_openstack_upgrade, diff --git a/hooks/charmhelpers b/hooks/charmhelpers deleted file mode 120000 index 702de734..00000000 --- a/hooks/charmhelpers +++ /dev/null @@ -1 +0,0 @@ -../charmhelpers \ No newline at end of file diff --git a/hooks/install b/hooks/install index 29ff6894..50b8cad9 100755 --- a/hooks/install +++ b/hooks/install @@ -11,7 +11,7 @@ check_and_install() { fi } -PYTHON="python" +PYTHON="python3" for dep in ${DEPS[@]}; do check_and_install ${PYTHON} ${dep} diff --git a/hooks/keystone_hooks.py b/hooks/keystone_hooks.py index 429169c9..25f7cfbf 100755 --- a/hooks/keystone_hooks.py +++ b/hooks/keystone_hooks.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python3 # # Copyright 2016 Canonical Ltd # @@ -16,8 +16,20 @@ import hashlib import json +import os import sys +_path = os.path.dirname(os.path.realpath(__file__)) +_root = os.path.abspath(os.path.join(_path, '..')) + + +def _add_path(path): + if path not in sys.path: + sys.path.insert(1, path) + + +_add_path(_root) + from subprocess import check_call from charmhelpers.core import unitdata @@ -408,9 +420,9 @@ def identity_changed(relation_id=None, remote_unit=None): # We base the decision to notify on whether these parameters have # changed (if csum is unchanged from previous notify, relation will # not fire). - csum.update(settings.get('public_url', None)) - csum.update(settings.get('admin_url', None)) - csum.update(settings.get('internal_url', None)) + csum.update(settings.get('public_url', None).encode('utf-8')) + csum.update(settings.get('admin_url', None).encode('utf-8')) + csum.update(settings.get('internal_url', None).encode('utf-8')) notifications['%s-endpoint-changed' % (service)] = csum.hexdigest() else: # Each unit needs to set the db information otherwise if the unit @@ -480,7 +492,7 @@ def cluster_changed(): # NOTE(jamespage) re-echo passwords for peer storage echo_whitelist = ['_passwd', 'identity-service:'] - log("Peer echo whitelist: %s" % (echo_whitelist), level=DEBUG) + log("Peer echo whitelist: {}".format(echo_whitelist), level=DEBUG) peer_echo(includes=echo_whitelist, force=True) update_all_identity_relation_units() @@ -560,8 +572,9 @@ def ha_joined(relation_id=None): if vip not in resource_params[vip_key]: vip_key = '{}_{}'.format(vip_key, vip_params) else: - log("Resource '%s' (vip='%s') already exists in " - "vip group - skipping" % (vip_key, vip), WARNING) + log("Resource '{0}' (vip='{1}') already exists in " + "vip group - skipping" + .format(vip_key, vip), WARNING) continue vip_group.append(vip_key) diff --git a/hooks/keystone_utils.py b/hooks/keystone_utils.py index 4b843468..70f443bb 100644 --- a/hooks/keystone_utils.py +++ b/hooks/keystone_utils.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python3 # # Copyright 2016 Canonical Ltd # @@ -18,10 +18,10 @@ import json import os import shutil import subprocess +import tempfile import time -import urlparse +import urllib.parse import uuid -import sys from itertools import chain from collections import OrderedDict @@ -71,6 +71,7 @@ from charmhelpers.core.decorators import ( ) from charmhelpers.core.hookenv import ( + atexit, config, is_leader, leader_get, @@ -112,6 +113,7 @@ from charmhelpers.contrib.peerstorage import ( import keystone_context +import uds_comms as uds TEMPLATES = 'templates/' @@ -124,7 +126,7 @@ BASE_PACKAGES = [ 'python-keystoneclient', 'python-mysqldb', 'python-psycopg2', - 'python-six', + 'python3-six', 'pwgen', 'uuid', ] @@ -132,7 +134,7 @@ BASE_PACKAGES = [ BASE_PACKAGES_SNAP = [ 'haproxy', 'openssl', - 'python-six', + 'python3-six', 'pwgen', 'uuid', ] @@ -431,7 +433,7 @@ def filter_null(settings, null='__null__'): so that the value is actually unset. """ filtered = {} - for k, v in settings.iteritems(): + for k, v in settings.items(): if v == null: filtered[k] = None else: @@ -558,14 +560,14 @@ def register_configs(): release = os_release('keystone') configs = templating.OSConfigRenderer(templates_dir=TEMPLATES, openstack_release=release) - for cfg, rscs in resource_map().iteritems(): + for cfg, rscs in resource_map().items(): configs.register(cfg, rscs['contexts']) return configs def restart_map(): return OrderedDict([(cfg, v['services']) - for cfg, v in resource_map().iteritems() + for cfg, v in resource_map().items() if v['services']]) @@ -577,7 +579,7 @@ def services(): def determine_ports(): """Assemble a list of API ports for services we are managing""" ports = [config('admin-port'), config('service-port')] - return list(set(ports)) + return sorted(list(set(ports))) def api_port(service): @@ -779,29 +781,31 @@ def delete_service_entry(service_name, service_type): manager = get_manager() service_id = manager.resolve_service_id(service_name, service_type) if service_id: - manager.api.services.delete(service_id) - log("Deleted service entry '%s'" % service_name, level=DEBUG) + manager.delete_service_by_id(service_id) + log("Deleted service entry '{}'".format(service_name), level=DEBUG) def create_service_entry(service_name, service_type, service_desc, owner=None): """ Add a new service entry to keystone if one does not already exist """ manager = get_manager() - for service in [s._info for s in manager.api.services.list()]: + for service in manager.list_services(): if service['name'] == service_name: - log("Service entry for '%s' already exists." % service_name, + log("Service entry for '{}' already exists.".format(service_name), level=DEBUG) return - manager.api.services.create(service_name, - service_type, - description=service_desc) - log("Created new service entry '%s'" % service_name, level=DEBUG) + manager.create_service(service_name, service_type, + description=service_desc) + + log("Created new service entry '{}'".format(service_name), level=DEBUG) def create_endpoint_template(region, service, publicurl, adminurl, internalurl): manager = get_manager() - if manager.api_version == 2: + # this needs to be a round-trip to the manager.py script to discover what + # the "current" api_version might be, as it can't just be asserted. + if manager.resolved_api_version() == 2: create_endpoint_template_v2(manager, region, service, publicurl, adminurl, internalurl) else: @@ -814,7 +818,7 @@ def create_endpoint_template_v2(manager, region, service, publicurl, adminurl, """ Create a new endpoint template for service if one does not already exist matching name *and* region """ service_id = manager.resolve_service_id(service) - for ep in [e._info for e in manager.api.endpoints.list()]: + for ep in manager.list_endpoints(): if ep['service_id'] == service_id and ep['region'] == region: log("Endpoint template already exists for '%s' in '%s'" % (service, region)) @@ -829,15 +833,15 @@ def create_endpoint_template_v2(manager, region, service, publicurl, adminurl, else: # delete endpoint and recreate if endpoint urls need updating. log("Updating endpoint template with new endpoint urls.") - manager.api.endpoints.delete(ep['id']) + manager.delete_endpoint_by_id(ep['id']) manager.create_endpoints(region=region, service_id=service_id, publicurl=publicurl, adminurl=adminurl, internalurl=internalurl) - log("Created new endpoint template for '%s' in '%s'" % (region, service), - level=DEBUG) + log("Created new endpoint template for '{}' in '{}'" + .format(region, service), level=DEBUG) def create_endpoint_template_v3(manager, region, service, publicurl, adminurl, @@ -862,11 +866,11 @@ def create_endpoint_template_v3(manager, region, service, publicurl, adminurl, region ) if ep_deleted or not ep_exists: - manager.api.endpoints.create( - service_id, - endpoints[ep_type], + manager.create_endpoint_by_type( + region=region, + service_id=service_id, interface=ep_type, - region=region + endpoint=endpoints[ep_type], ) @@ -878,11 +882,11 @@ def create_tenant(name, domain): manager.create_tenant(tenant_name=name, domain=domain, description='Created by Juju') - log("Created new tenant '%s' in domain '%s'" % (name, domain), + log("Created new tenant '{}' in domain '{}'".format(name, domain), level=DEBUG) return - log("Tenant '%s' already exists." % name, level=DEBUG) + log("Tenant '{}' already exists.".format(name), level=DEBUG) def create_or_show_domain(name): @@ -890,88 +894,209 @@ def create_or_show_domain(name): manager = get_manager() domain_id = manager.resolve_domain_id(name) if domain_id: - log("Domain '%s' already exists." % name, level=DEBUG) + log("Domain '{}' already exists.".format(name), level=DEBUG) else: manager.create_domain(domain_name=name, description='Created by Juju') - log("Created new domain: %s" % name, level=DEBUG) + log("Created new domain: {}".format(name), level=DEBUG) domain_id = manager.resolve_domain_id(name) return domain_id def user_exists(name, domain=None): manager = get_manager() - domain_id = None - if domain: - domain_id = manager.resolve_domain_id(domain) - if not domain_id: - error_out('Could not resolve domain_id for {} when checking if ' - ' user {} exists'.format(domain, name)) - if manager.resolve_user_id(name, user_domain=domain): - if manager.api_version == 2: - users = manager.api.users.list() - else: - users = manager.api.users.list(domain=domain_id) - for user in users: - if user.name.lower() == name.lower(): - # In v3 Domains are seperate user namespaces so need to check - # that the domain matched if provided - if domain: - if domain_id == user.domain_id: - return True - else: - return True - - return False + return manager.user_exists(name, domain=domain) def create_user(name, password, tenant=None, domain=None): """Creates a user if it doesn't already exist, as a member of tenant""" manager = get_manager() if user_exists(name, domain=domain): - log("A user named '%s' already exists in domain '%s'" % (name, domain), - level=DEBUG) + log("A user named '{}' already exists in domain '{}'" + .format(name, domain), level=DEBUG) return tenant_id = None if tenant: tenant_id = manager.resolve_tenant_id(tenant, domain=domain) if not tenant_id: - error_out("Could not resolve tenant_id for tenant '%s' in domain " - "'%s'" % (tenant, domain)) + error_out("Could not resolve tenant_id for tenant '{}' in domain " + "'{}'".format(tenant, domain)) domain_id = None if domain: domain_id = manager.resolve_domain_id(domain) if not domain_id: - error_out('Could not resolve domain_id for domain %s when creating' - ' user %s' % (domain, name)) + error_out('Could not resolve domain_id for domain {} when creating' + ' user {}'.format(domain, name)) manager.create_user(name=name, password=password, email='juju@localhost', tenant_id=tenant_id, domain_id=domain_id) - log("Created new user '%s' tenant: '%s' domain: '%s'" % (name, tenant_id, - domain_id), level=DEBUG) + log("Created new user '{}' tenant: '{}' domain: '{}'" + .format(name, tenant_id, domain_id), level=DEBUG) def get_manager(api_version=None): - """Return a keystonemanager for the correct API version""" - set_python_path() - from manager import get_keystone_manager - return get_keystone_manager(get_local_endpoint(), get_admin_token(), - api_version) + return KeystoneManagerProxy(api_version=api_version) + + +class KeystoneManagerProxy(object): + + def __init__(self, api_version=None, path=None): + self._path = path or [] + self.api_version = api_version + + def __getattribute__(self, attr): + if attr in ['__class__', '_path', 'api_version']: + return super().__getattribute__(attr) + return self.__class__(api_version=self.api_version, + path=self._path + [attr]) + + def __call__(self, *args, **kwargs): + # Following line retained commented-out for future debugging + # print("Called: {} ({}, {})".format(self._path, args, kwargs)) + return _proxy_manager_call(self._path, self.api_version, args, kwargs) + + +JSON_ENCODE_OPTIONS = dict( + sort_keys=True, + allow_nan=False, + indent=None, + separators=(',', ':'), +) + + +def _proxy_manager_call(path, api_version, args, kwargs): + package = dict(path=path, + api_version=api_version, + api_local_endpoint=get_local_endpoint(), + admin_token=get_admin_token(), + args=args, + kwargs=kwargs) + serialized = json.dumps(package, **JSON_ENCODE_OPTIONS) + server = _get_server_instance() + try: + server.send(serialized) + # wait for the reply + result_str = server.receive() + result = json.loads(result_str) + if 'error' in result: + s = ("The call within manager.py failed with the error: '{}'. " + "The call was: path={}, args={}, kwargs={}, api_version={}" + .format(result['error'], path, args, kwargs, api_version)) + log(s, level=ERROR) + raise RuntimeError(s) + return json.loads(result_str)['result'] + except RuntimeError as e: + raise e + except Exception as e: + s = ("Decoding the result from the call to manager.py resulted in " + "error '{}' (command: path={}, args={}, kwargs={}" + .format(str(e), path, args, kwargs)) + log(s, level=ERROR) + raise RuntimeError(s) + + +# singleton to ensure that there's only one manager instance. +_the_manager_instance = None + + +def _get_server_instance(): + """Get a SockServer instance and run up the manager to connect to it. + Ensure that the manager.py is running and is ready to receive messages (i.e + do the handshake. Check that it is still running, and if not, start it + again. In that instance, restart the SockServer + """ + global _the_manager_instance + if _the_manager_instance is None: + _the_manager_instance = ManagerServer() + return _the_manager_instance.server + + +class ManagerServer(): + """This is a singleton server that launches and kills the manager.py script + that is used to allow 'calling' into Keystone when it is in a completely + different process. The object handles kill/quiting the manager.py script + when this keystone charm exits using the atexit charmhelpers `atexit()` + command to do the cleanup. + + The server() method also ensures that the manager.py script is still + running, and if not, relaunches it. This is to try to make the using the + manager.py methods as transparent, and speedy, as possible. + """ + + def __init__(self): + self.pvar = None + self._server = None + self.socket_file = os.path.join(tempfile.gettempdir(), "keystone-uds") + atexit(lambda: self.clean_up()) + + @property + def server(self): + self._ensure_running() + return self._server + + def _ensure_running(self): + if self.pvar is None or self.pvar.poll() is not None: + if self._server is not None: + self._server.close() + self._server = uds.UDSServer(self.socket_file) + self._launch_manager() + self._server.wait_for_connection() + + def _launch_manager(self): + script = os.path.abspath(os.path.join(os.path.dirname(__file__), + 'manager.py')) + # need to set the environment variable PYTHONPATH to include the + # payload's directory for the manager.py to find the various keystone + # clients + env = os.environ + _python_path = determine_python_path() + if _python_path: + if _python_path not in os.environ.get('PYTHONPATH', ''): + env['PYTHONPATH'] = ':'.join( + os.environ.get('PYTHONPATH', '').split(':') + + [_python_path]) + # also ensure that the python executable is available if snap + # installed. + if snap_install_requested(): + _bin_path = os.path.join(SNAP_BASE_DIR, 'usr/bin') + if _bin_path not in os.environ.get('PATH', ''): + env['PATH'] = ':'.join( + os.environ.get('PATH', '').split(':') + + [_bin_path]) + # launch the process and return immediately + self.pvar = subprocess.Popen([script, self.socket_file], + env=env, close_fds=True) + + def clean_up(self): + if self.pvar is not None and self.pvar.poll() is None: + self._server.send("QUIT") + try: + self.pvar.wait(timeout=10) + except subprocess.TimeoutExpired: + self.pvar.kill() + self.pvar = None + if self._server is not None: + self._server.close() + self._server = None + try: + os.remove(self.socket_file) + except OSError: + pass def create_role(name, user=None, tenant=None, domain=None): """Creates a role if it doesn't already exist. grants role to user""" manager = get_manager() if not manager.resolve_role_id(name): - manager.api.roles.create(name=name) - log("Created new role '%s'" % name, level=DEBUG) + manager.create_role(name=name) + log("Created new role '{}'".format(name), level=DEBUG) else: - log("A role named '%s' already exists" % name, level=DEBUG) + log("A role named '{}' already exists".format(name), level=DEBUG) if not user and not tenant: return @@ -1010,8 +1135,8 @@ def grant_role(user, role, tenant=None, domain=None, user_domain=None, if tenant: tenant_id = manager.resolve_tenant_id(tenant, domain=project_domain) if not tenant_id: - error_out("Could not resolve tenant_id for tenant '%s' in domain " - "'%s'" % (tenant, domain)) + error_out("Could not resolve tenant_id for tenant '{}' in domain " + "'{}'".format(tenant, domain)) domain_id = None if domain: @@ -1021,7 +1146,7 @@ def grant_role(user, role, tenant=None, domain=None, user_domain=None, cur_roles = manager.roles_for_user(user_id, tenant_id=tenant_id, domain_id=domain_id) - if not cur_roles or role_id not in [r.id for r in cur_roles]: + if not cur_roles or role_id not in [r['id'] for r in cur_roles]: manager.add_user_role(user=user_id, role=role_id, tenant=tenant_id, @@ -1043,7 +1168,7 @@ def grant_role(user, role, tenant=None, domain=None, user_domain=None, def store_data(backing_file, data): with open(backing_file, 'w+') as fd: - fd.writelines("%s\n" % data) + fd.writelines("{}\n".format(data)) def get_admin_passwd(user=None): @@ -1058,7 +1183,7 @@ def get_admin_passwd(user=None): log("Generating new passwd for user: %s" % config("admin-user")) cmd = ['pwgen', '-c', '16', '1'] - passwd = str(subprocess.check_output(cmd)).strip() + passwd = str(subprocess.check_output(cmd).decode('UTF-8')).strip() return passwd @@ -1088,31 +1213,11 @@ def get_api_version(): return api_version -def set_python_path(): - """ Set the Python path to include snap installed python libraries - - The charm itself requires access to the python client. When installed as a - snap the client libraries are in /snap/$SNAP/common/lib/python2.7. This - function sets the python path to allow clients to be imported from snap - installs. - """ - if snap_install_requested(): - sys.path.append(determine_python_path()) - - def ensure_initial_admin(config): # Allow retry on fail since leader may not be ready yet. # NOTE(hopem): ks client may not be installed at module import time so we # use this wrapped approach instead. - set_python_path() - try: - from keystoneclient.apiclient.exceptions import InternalServerError - except: - # Backwards-compatibility for earlier versions of keystoneclient (< I) - from keystoneclient.exceptions import (ClientException as - InternalServerError) - - @retry_on_exception(3, base_delay=3, exc_type=InternalServerError) + @retry_on_exception(3, base_delay=3, exc_type=RuntimeError) def _ensure_initial_admin(config): """Ensures the minimum admin stuff exists in whatever database we're using. @@ -1193,9 +1298,9 @@ def endpoint_url(ip, port, suffix=None): if is_ipv6(ip): ip = "[{}]".format(ip) if suffix: - ep = "%s://%s:%s/%s" % (proto, ip, port, suffix) + ep = "{}://{}:{}/{}".format(proto, ip, port, suffix) else: - ep = "%s://%s:%s" % (proto, ip, port) + ep = "{}://{}:{}".format(proto, ip, port) return ep @@ -1212,15 +1317,14 @@ def create_keystone_endpoint(public_ip, service_port, def update_user_password(username, password, domain): manager = get_manager() - log("Updating password for user '%s'" % username) + log("Updating password for user '{}'".format(username)) user_id = manager.resolve_user_id(username, user_domain=domain) if user_id is None: - error_out("Could not resolve user id for '%s'" % username) + error_out("Could not resolve user id for '{}'".format(username)) manager.update_password(user=user_id, password=password) - log("Successfully updated password for user '%s'" % - username) + log("Successfully updated password for user '{}'".format(username)) def load_stored_passwords(path=SERVICE_PASSWD_PATH): @@ -1250,7 +1354,7 @@ def _migrate_service_passwords(): if is_leader() and os.path.exists(SERVICE_PASSWD_PATH): log('Migrating on-disk stored passwords to leader storage') creds = load_stored_passwords() - for k, v in creds.iteritems(): + for k, v in creds.items(): leader_set({"{}_passwd".format(k): v}) os.unlink(SERVICE_PASSWD_PATH) @@ -1273,18 +1377,6 @@ def is_password_changed(username, passwd): return (_passwd is None or passwd != _passwd) -def relation_list(rid): - cmd = [ - 'relation-list', - '-r', rid, - ] - result = str(subprocess.check_output(cmd)).split() - if result == "": - return None - else: - return result - - def create_user_credentials(user, passwd_get_callback, passwd_set_callback, tenant=None, new_roles=None, grants=None, domain=None): @@ -1298,9 +1390,9 @@ def create_user_credentials(user, passwd_get_callback, passwd_set_callback, level=INFO) return - log("Creating service credentials for '%s'" % user, level=DEBUG) + log("Creating service credentials for '{}'".format(user), level=DEBUG) if user_exists(user, domain=domain): - log("User '%s' already exists" % (user), level=DEBUG) + log("User '{}' already exists".format(user), level=DEBUG) # NOTE(dosaboy): see LP #1648677 if is_password_changed(user, passwd): update_user_password(user, passwd, domain) @@ -1315,13 +1407,13 @@ def create_user_credentials(user, passwd_get_callback, passwd_set_callback, grant_role(user, role, tenant=tenant, user_domain=domain, project_domain=domain) else: - log("No role grants requested for user '%s'" % (user), level=DEBUG) + log("No role grants requested for user '{}'".format(user), level=DEBUG) if new_roles: # Allow the remote service to request creation of any additional roles. # Currently used by Swift and Ceilometer. for role in new_roles: - log("Creating requested role '%s'" % role, level=DEBUG) + log("Creating requested role '{}'".format(role), level=DEBUG) create_role(role, user=user, tenant=tenant, domain=domain) return passwd @@ -1344,22 +1436,25 @@ def create_service_credentials(user, new_roles=None): if not tenant: raise Exception("No service tenant provided in config") - domain = None - if get_api_version() > 2: - domain = DEFAULT_DOMAIN - passwd = create_user_credentials(user, get_service_password, - set_service_password, - tenant=tenant, new_roles=new_roles, - grants=[config('admin-role')], - domain=domain) - if get_api_version() > 2: - # Create account in SERVICE_DOMAIN as well using same password - domain = SERVICE_DOMAIN + if get_api_version() < 3: passwd = create_user_credentials(user, get_service_password, set_service_password, tenant=tenant, new_roles=new_roles, grants=[config('admin-role')], - domain=domain) + domain=None) + else: + # api version 3 or above + create_user_credentials(user, get_service_password, + set_service_password, + tenant=tenant, new_roles=new_roles, + grants=[config('admin-role')], + domain=DEFAULT_DOMAIN) + # Create account in SERVICE_DOMAIN as well using same password + passwd = create_user_credentials(user, get_service_password, + set_service_password, + tenant=tenant, new_roles=new_roles, + grants=[config('admin-role')], + domain=SERVICE_DOMAIN) return passwd @@ -1367,15 +1462,14 @@ def add_service_to_keystone(relation_id=None, remote_unit=None): manager = get_manager() settings = relation_get(rid=relation_id, unit=remote_unit) # the minimum settings needed per endpoint - single = set(['service', 'region', 'public_url', 'admin_url', - 'internal_url']) + single = {'service', 'region', 'public_url', 'admin_url', 'internal_url'} https_cns = [] protocol = get_protocol() if single.issubset(settings): # other end of relation advertised only one endpoint - if 'None' in settings.itervalues(): + if 'None' in settings.values(): # Some backend services advertise no endpoint but require a # hook execution to update auth strategy. relation_data = {} @@ -1395,7 +1489,7 @@ def add_service_to_keystone(relation_id=None, remote_unit=None): # Allow the remote service to request creation of any additional # roles. Currently used by Horizon for role in get_requested_roles(settings): - log("Creating requested role: %s" % role) + log("Creating requested role: {}".format(role)) create_role(role) peer_store_and_set(relation_id=relation_id, **relation_data) @@ -1413,14 +1507,16 @@ def add_service_to_keystone(relation_id=None, remote_unit=None): service_username = settings['service'] prefix = config('service-admin-prefix') if prefix: - service_username = "%s%s" % (prefix, service_username) + service_username = "{}{}".format(prefix, service_username) # NOTE(jamespage) internal IP for backwards compat for SSL certs - internal_cn = urlparse.urlparse(settings['internal_url']).hostname + internal_cn = (urllib.parse + .urlparse(settings['internal_url']).hostname) https_cns.append(internal_cn) - public_cn = urlparse.urlparse(settings['public_url']).hostname + public_cn = urllib.parse.urlparse(settings['public_url']).hostname https_cns.append(public_cn) - https_cns.append(urlparse.urlparse(settings['admin_url']).hostname) + https_cns.append( + urllib.parse.urlparse(settings['admin_url']).hostname) else: # assemble multiple endpoints from relation data. service name # should be prepended to setting name, ie: @@ -1438,8 +1534,8 @@ def add_service_to_keystone(relation_id=None, remote_unit=None): # 'public_url': $foo # } # } - endpoints = {} - for k, v in settings.iteritems(): + endpoints = OrderedDict() # for Python3 we need a consistent order + for k, v in settings.items(): ep = k.split('_')[0] x = '_'.join(k.split('_')[1:]) if ep not in endpoints: @@ -1461,19 +1557,22 @@ def add_service_to_keystone(relation_id=None, remote_unit=None): services.append(ep['service']) # NOTE(jamespage) internal IP for backwards compat for # SSL certs - internal_cn = urlparse.urlparse(ep['internal_url']).hostname + internal_cn = (urllib.parse + .urlparse(ep['internal_url']).hostname) https_cns.append(internal_cn) - https_cns.append(urlparse.urlparse(ep['public_url']).hostname) - https_cns.append(urlparse.urlparse(ep['admin_url']).hostname) + https_cns.append( + urllib.parse.urlparse(ep['public_url']).hostname) + https_cns.append( + urllib.parse.urlparse(ep['admin_url']).hostname) service_username = '_'.join(sorted(services)) # If an admin username prefix is provided, ensure all services use it. prefix = config('service-admin-prefix') if service_username and prefix: - service_username = "%s%s" % (prefix, service_username) + service_username = "{}{}".format(prefix, service_username) - if 'None' in settings.itervalues(): + if 'None' in settings.values(): return if not service_username: @@ -1599,7 +1698,7 @@ def get_protocol(): def ensure_valid_service(service): if service not in valid_services.keys(): - log("Invalid service requested: '%s'" % service) + log("Invalid service requested: '{}'".format(service)) relation_set(admin_token=-1) return @@ -1686,14 +1785,14 @@ def send_notifications(data, force=False): for rid in rel_ids: rs = relation_get(unit=local_unit(), rid=rid) if rs: - keys += rs.keys() + keys += list(rs.keys()) # Don't bother checking if we have already identified a diff if diff: continue # Work out if this notification changes anything - for k, v in data.iteritems(): + for k, v in data.items(): if rs.get(k, None) != v: diff = True break @@ -1707,14 +1806,14 @@ def send_notifications(data, force=False): _notifications = {k: None for k in set(keys)} # Set new values - for k, v in data.iteritems(): + for k, v in data.items(): _notifications[k] = v if force: _notifications['trigger'] = str(uuid.uuid4()) # Broadcast - log("Sending identity-service notifications (trigger=%s)" % (force), + log("Sending identity-service notifications (trigger={})".format(force), level=DEBUG) for rid in rel_ids: relation_set(relation_id=rid, relation_settings=_notifications) @@ -1736,16 +1835,16 @@ def is_db_ready(use_current_context=False, db_rel=None): if use_current_context: if not any([relation_id() in relation_ids(r) for r in db_rels]): - raise Exception("use_current_context=True but not in one of %s " - "rel hook contexts (currently in %s)." % - (', '.join(db_rels), relation_id())) + raise Exception("use_current_context=True but not in one of {} " + "rel hook contexts (currently in {})." + .format(', '.join(db_rels), relation_id())) allowed_units = relation_get(attribute=key) if allowed_units and local_unit() in allowed_units.split(): return True # We are in shared-db rel but don't yet have permissions - log("%s does not yet have db permissions" % (local_unit()), + log("{} does not yet have db permissions".format(local_unit()), level=DEBUG) return False else: diff --git a/hooks/manager.py b/hooks/manager.py old mode 100644 new mode 100755 index 489018e4..0223bcbf --- a/hooks/manager.py +++ b/hooks/manager.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python2 # # Copyright 2016 Canonical Ltd # @@ -14,11 +14,60 @@ # See the License for the specific language governing permissions and # limitations under the License. +# NOTE(tinwood): This file needs to remain Python2 as it uses keystoneclient +# from the payload software to do it's work. + +from __future__ import print_function + +import json +import os +import stat +import sys +import time + from keystoneclient.v2_0 import client from keystoneclient.v3 import client as keystoneclient_v3 from keystoneclient.auth import token_endpoint from keystoneclient import session, exceptions -from charmhelpers.core.decorators import retry_on_exception + +import uds_comms as uds + + +_usage = """This file is called from the keystone_utils.py file to implement +various keystone calls and functions. It is called with one parameter which is +the path to a Unix Domain Socket file. + +The messages passed to the this process from the keystone_utils.py includes the +following keys: + +{ + 'path': The api path on the keystone manager object. + 'api_version': the keystone API version to use. + 'api_local_endpoint': the local endpoint to connect to. + 'admin_token': the admin token to use with keystone. + 'args': the non-keyword argument to supply to the keystone manager call. + 'kwargs': any keyword args to supply to the keystone manager call. +} + +The result of the call, or an error, is returned as a json encoded result in +the same file that sent the arguments. + +{ + 'result': + 'error': -1: + # see if the end of the message is available + for i, b in enumerate(self.buffer): + if i > self.found_start + 1 and b == END_CHAR: + # found the end + start = self.found_start + 1 + self.message = (base64 + .b64decode(self.buffer[start:i]) + .decode('UTF-8')) + self.buffer = self.buffer[i + 1:] + self.found_start = -1 + return self.message + return None + + def receive(self, _callable): + """Continuously calls the param _callable() until it returns None or a + full message is received. + + If the message is already in the buffer, then it grabs it and doesn't + call the _callable(). + + _callable() should return bytes until it wants receive() to terminate, + when it should return None. receive() also returns when a message is + complete. + + receive() will return a decoded UTF-8 string when a complete message is + received. + + Any left over bytes are retained in the Codec object, and further calls + to receive() will consume these first. + + :param _callable: A function that returns None or bytes + :type _callable: Callable() + :returns: None or a UTF-8 decoded string + :rtype: Option[None, str] + """ + # first see if the message is already in the buffer? + message = self._add(b'') + if message: + return message + while True: + # receive the data in chunks + data = _callable() + if data: + message = self._add(data) + if message: + return message + else: + break + return None + + def encode(self, message): + """Encode a message for sending on a channel with inconsistent + buffering (e.g. like a unix domain socket. + + Encodes the message by UTF-8, then base64 and finally adds '%' and '$' + to the start and end of the message. This is so the message can be + recovered by searching through a receiving buffer. + + :param message: The string that needs encoding. + :type message: str + :returns: the encoded message + :rtype: bytes + """ + buffer = base64.b64encode(message.encode('UTF-8')) + return b"%" + buffer + b"$" + + +# client and socket classes for the channel +# +# The Client connects to the server, and performs a READY handshake as part of +# the connect(). The server has to respond 'OK'. Once this is done the client +# and server are synchronised. Note that it is a one-to-one, synchronised +# connection with client and server exchanging messages. The theory is that +# the server initiates the Server, to bind to the socket, launches the script +# and then waits for the connection. There is no race as the client will wait +# until the servers calls wait_for_connection() which can be after the client +# has connected to the socket. +# +# The server then sends a "QUIT" to the client to get it to clean up and exit +# (but this is outside of the protocol in the Client() and Server() classes + +class UDSException(Exception): + """Used to gather up all exceptions and return a single one so that the + client/server can error out on comms failures. + """ + + +class UDSClient(): + """Unix Domain Socket Client class. + + Provides a synchronised message/receive client for connecting to the + equivalent UDSServer() running in a different process. + + The client/server is backwards, as the UDSClient() is expecting to receive + a message, which its user will then reply with a result. i.e. the Client + is implemented in a process that expects to get commands from the server. + This is so that the server can launch a child script, communicate with it, + and then terminate it when finished. + + Example use: + + client = Client(server_address) + client.connect() + + message = client.receive() + if message == "DONE": + client.close() + return + client.send("OK") + # etc. + """ + + BUFFER_SIZE = 256 + + def __init__(self, socket_path): + """Initialise the Client. + + :param socket_path: the file to use as a Unix Domain Socket + :type socket_path: str + :raises: UDSException on Error + """ + self.socket_path = socket_path + try: + self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + except Exception as e: + raise UDSException(str(e)) + self.codec = Codec() + + def connect(self): + """Attempt to connect to the other side. + When the connection is made, automatically calls _ready() to indicate + that the client is ready as part of the handshake. When connect() + completes the user should call receive() to receive the first message + from the server. + + :raises: UDSException on Error + """ + try: + self.sock.connect(self.socket_path) + self._ready() + except Exception as e: + raise UDSException(str(e)) + + def _ready(self): + """Internal method to provide a handshake to the server""" + self.sock.sendall(self.codec.encode("READY")) + message = self.receive() + if message != "OK": + raise RuntimeError("Handshake failed") + + def receive(self): + """Receives a message from the Server() in the other process on the + other end of the UDS. Uses the Codec() class to ensure that the + messages are properly received and sent. + + :returns: the string send by the Server.send() methdod. + :rtype: str + :raises: UDSException on Error + """ + try: + return self.codec.receive( + lambda: self.sock.recv(self.BUFFER_SIZE)) + except Exception as e: + raise UDSException(str(e)) + + def send(self, buffer): + """Send a message to the Server() in the other process. + + :param buffer: the string to send + :type buffer: str + :raises: UDSException on Error + """ + try: + self.sock.sendall(self.codec.encode(buffer)) + except Exception as e: + raise UDSException(str(e)) + + def close(self): + """Close the socket -- good housekeeping, so should do it at the end of + the process. + :raises: UDSException on Error + """ + try: + self.sock.close() + except Exception as e: + raise UDSException(str(e)) + + +class UDSServer(): + """The Server (or listening) end of the Unix Domain Socket chat protocol. + Uses Codec() to encode and decode messages on the channel. + + The Server listens for a connection, performs a handshake, and then is in + control of the conversation. The user of Server() should then send a + message and wait for a reponse. It's up to the client to disconnect, so an + protocol level message should be used (e.g. QUIT) that the user of Client() + will use to close the connection. + + Example use: + + server = Server(server_address) + input("Press enter to continue ....") + server.wait_for_connection() + try: + # send some data + server.send(data) + # and await the reply + message = server.receive() + finally: + # clean up + server.send("DONE") + message = server.receive() + server.close() + """ + + BUFFER_SIZE = 256 + + def __init__(self, socket_path): + """Initialise the listener on the UDS. This binds to the socket and + ensures that a client can connect. The conversation doesn't get + started until the wait_for_connection() method is called. + + The server can initialse the Server, then ask the client to connect, + and then at any point later call wait_for_connection() to get the + conversation going. + + :param socket_path: the filename for the UDS. + :type socket_path: str + :raises: UDSException on Error + """ + self.socket_path = socket_path + self.sock = None + # Make sure the socket does not already exist + try: + os.unlink(socket_path) + except OSError: + if os.path.exists(socket_path): + raise + try: + self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + # ensure the socket is created with 600 permissions + _mask = os.umask(0o177) + self.sock.bind(socket_path) + os.umask(_mask) + self.sock.listen(1) + except Exception as e: + raise UDSException(str(e)) + self.codec = Codec() + + def wait_for_connection(self): + """Blocking method to wait for a connection from the client. + + Performs the handshake to ensure that both ends are in sync. + :raises: UDSException on Error + """ + try: + self.connection, self.client_address = self.sock.accept() + self._handshake() + except Exception as e: + raise UDSException(str(e)) + + def _handshake(self): + """Internal method to sync up the client and server""" + while True: + message = self.receive() + if message == 'READY': + self.send('OK') + break + + def receive(self): + """Receives a message from the Client() in the other process on the + other end of the UDS. Uses the Codec() class to ensure that the + messages are properly received and sent. + + :returns: the string send by the Client.send() methdod. + :rtype: str + :raises: UDSException on Error + """ + try: + return self.codec.receive( + lambda: self.connection.recv(self.BUFFER_SIZE)) + except Exception as e: + raise UDSException(str(e)) + + def send(self, buffer): + """Send a message to the Client() in the other process. + + :param buffer: the string to send + :type buffer: str + :raises: UDSException on Error + """ + try: + self.connection.sendall(self.codec.encode(buffer)) + except Exception as e: + raise UDSException(str(e)) + + def close(self): + """Close the socket -- good housekeeping, so should do it at the end of + the process. + :raises: UDSException on Error + """ + try: + self.connection.close() + except Exception as e: + raise UDSException(str(e)) diff --git a/scripts/fernet_rotate_and_sync.py b/scripts/fernet_rotate_and_sync.py index 7fe814d1..c948c16e 100755 --- a/scripts/fernet_rotate_and_sync.py +++ b/scripts/fernet_rotate_and_sync.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # Copyright 2018 Canonical Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,9 +20,11 @@ import time dir_path = os.path.dirname(os.path.realpath(__file__)) hooks_path = os.path.abspath(os.path.join(dir_path, "..", "hooks")) +root_path = os.path.abspath(os.path.join(dir_path, "..")) -if hooks_path not in sys.path: - sys.path.append(hooks_path) +for p in [hooks_path, root_path]: + if p not in sys.path: + sys.path.append(p) # now we can import charm related items import charmhelpers.core.hookenv diff --git a/templates/icehouse/keystone.conf b/templates/icehouse/keystone.conf index a1468405..330afef2 100644 --- a/templates/icehouse/keystone.conf +++ b/templates/icehouse/keystone.conf @@ -81,7 +81,7 @@ password = {{ ldap_password }} suffix = {{ ldap_suffix }} {% if ldap_config_flags -%} -{% for key, value in ldap_config_flags.iteritems() -%} +{% for key, value in ldap_config_flags.items() -%} {{ key }} = {{ value }} {% endfor -%} {% endif -%} diff --git a/templates/kilo/keystone.conf b/templates/kilo/keystone.conf index 6621334f..08f6ea6a 100644 --- a/templates/kilo/keystone.conf +++ b/templates/kilo/keystone.conf @@ -92,7 +92,7 @@ password = {{ ldap_password }} suffix = {{ ldap_suffix }} {% if ldap_config_flags -%} -{% for key, value in ldap_config_flags.iteritems() -%} +{% for key, value in ldap_config_flags.items() -%} {{ key }} = {{ value }} {% endfor -%} {% endif -%} diff --git a/templates/mitaka/keystone.conf b/templates/mitaka/keystone.conf index 8d97144d..9a837a3f 100644 --- a/templates/mitaka/keystone.conf +++ b/templates/mitaka/keystone.conf @@ -80,7 +80,7 @@ password = {{ ldap_password }} suffix = {{ ldap_suffix }} {% if ldap_config_flags -%} -{% for key, value in ldap_config_flags.iteritems() -%} +{% for key, value in ldap_config_flags.items() -%} {{ key }} = {{ value }} {% endfor -%} {% endif -%} diff --git a/templates/ocata/keystone.conf b/templates/ocata/keystone.conf index a0d335bc..c84dc9d8 100644 --- a/templates/ocata/keystone.conf +++ b/templates/ocata/keystone.conf @@ -89,7 +89,7 @@ password = {{ ldap_password }} suffix = {{ ldap_suffix }} {% if ldap_config_flags -%} -{% for key, value in ldap_config_flags.iteritems() -%} +{% for key, value in ldap_config_flags.items() -%} {{ key }} = {{ value }} {% endfor -%} {% endif -%} diff --git a/tox.ini b/tox.ini index d806de1f..6ae2b448 100644 --- a/tox.ini +++ b/tox.ini @@ -2,8 +2,9 @@ # This file is managed centrally by release-tools and should not be modified # within individual charm repos. [tox] -envlist = pep8,py27 +envlist = pep8,py27,py35,py36 skipsdist = True +skip_missing_interpreters = True [testenv] setenv = VIRTUAL_ENV={envdir} @@ -21,6 +22,7 @@ deps = -r{toxinidir}/test-requirements.txt basepython = python2.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +commands = /bin/true [testenv:py35] basepython = python3.5 @@ -33,7 +35,7 @@ deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt [testenv:pep8] -basepython = python2.7 +basepython = python3 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt commands = flake8 {posargs} hooks unit_tests tests actions lib diff --git a/unit_tests/__init__.py b/unit_tests/__init__.py index 7ab7abaf..c7dc9d98 100644 --- a/unit_tests/__init__.py +++ b/unit_tests/__init__.py @@ -12,8 +12,23 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os import sys -sys.path.append('actions/') -sys.path.append('hooks/') -sys.path.append('scripts/') +_path = os.path.dirname(os.path.realpath(__file__)) +_actions = os.path.abspath(os.path.join(_path, '../actions')) +_hooks = os.path.abspath(os.path.join(_path, '../hooks')) +_charmhelpers = os.path.abspath(os.path.join(_path, '../charmhelpers')) +_unit_tests = os.path.abspath(os.path.join(_path, '../unit_tests')) +_scripts = os.path.abspath(os.path.join(_path, '../scripts')) + + +def _add_path(path): + if path not in sys.path: + sys.path.insert(1, path) + +_add_path(_actions) +_add_path(_hooks) +_add_path(_charmhelpers) +_add_path(_unit_tests) +_add_path(_scripts) diff --git a/unit_tests/test_actions.py b/unit_tests/test_actions.py index a8083899..61e9ce77 100644 --- a/unit_tests/test_actions.py +++ b/unit_tests/test_actions.py @@ -17,9 +17,9 @@ from mock import patch from test_utils import CharmTestCase -with patch('actions.hooks.charmhelpers.contrib.openstack.utils.' +with patch('charmhelpers.contrib.openstack.utils.' 'snap_install_requested') as snap_install_requested, \ - patch('actions.hooks.keystone_utils.register_configs') as configs: + patch('keystone_utils.register_configs') as configs: snap_install_requested.return_value = False configs.return_value = 'test-config' import actions.actions diff --git a/unit_tests/test_actions_openstack_upgrade.py b/unit_tests/test_actions_openstack_upgrade.py index 2ce82aed..b0c10a33 100644 --- a/unit_tests/test_actions_openstack_upgrade.py +++ b/unit_tests/test_actions_openstack_upgrade.py @@ -20,7 +20,7 @@ os.environ['JUJU_UNIT_NAME'] = 'keystone' with patch('charmhelpers.contrib.openstack.utils' '.snap_install_requested') as snap_install_requested: snap_install_requested.return_value = False - import openstack_upgrade + import openstack_upgrade as openstack_upgrade from test_utils import ( CharmTestCase diff --git a/unit_tests/test_keystone_contexts.py b/unit_tests/test_keystone_contexts.py index 372a3750..8a78329a 100644 --- a/unit_tests/test_keystone_contexts.py +++ b/unit_tests/test_keystone_contexts.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +import collections +import importlib import os from mock import patch, MagicMock @@ -20,6 +22,7 @@ with patch('charmhelpers.contrib.openstack.' snap_install_requested.return_value = False import keystone_utils # noqa import keystone_context as context + importlib.reload(keystone_utils) from test_utils import ( CharmTestCase @@ -99,7 +102,7 @@ class TestKeystoneContexts(CharmTestCase): @patch('charmhelpers.contrib.openstack.context.relation_get') @patch('charmhelpers.contrib.openstack.context.log') @patch('charmhelpers.contrib.openstack.context.kv') - @patch('__builtin__.open') + @patch('builtins.open') def test_haproxy_context_service_enabled( self, mock_open, mock_kv, mock_log, mock_relation_get, mock_related_units, mock_unit_get, mock_relation_ids, mock_config, @@ -122,27 +125,35 @@ class TestKeystoneContexts(CharmTestCase): ctxt = context.HAProxyContext() self.maxDiff = None - self.assertEqual( - ctxt(), - {'listen_ports': {'admin_port': '12', - 'public_port': '12'}, - 'local_host': '127.0.0.1', - 'haproxy_host': '0.0.0.0', - 'stat_port': '8888', - 'stat_password': 'abcdefghijklmnopqrstuvwxyz123456', - 'service_ports': {'admin-port': ['12', '34'], - 'public-port': ['12', '34']}, - 'default_backend': '1.2.3.4', - 'ipv6_enabled': True, - 'frontends': {'1.2.3.4': { - 'network': '1.2.3.4/255.255.255.0', - 'backends': { - 'keystone': '1.2.3.4', - 'unit-0': '10.0.0.0' - } - }} - } - ) + _ctxt = ctxt() + test_ctxt = { + 'listen_ports': { + 'admin_port': '12', + 'public_port': '12' + }, + 'ipv6_enabled': True, + 'local_host': '127.0.0.1', + 'haproxy_host': '0.0.0.0', + 'stat_port': '8888', + 'stat_password': 'abcdefghijklmnopqrstuvwxyz123456', + 'service_ports': { + 'admin-port': ['12', '34'], + 'public-port': ['12', '34'] + }, + 'default_backend': '1.2.3.4', + 'frontends': { + '1.2.3.4': { + 'network': '1.2.3.4/255.255.255.0', + 'backends': collections.OrderedDict([ + ('keystone', '1.2.3.4'), + ('unit-0', '10.0.0.0') + ]), + } + } + } + self.assertEqual(sorted(list(_ctxt.keys())), + sorted(list(test_ctxt.keys()))) + self.assertEqual(_ctxt, test_ctxt) @patch.object(context, 'config') def test_keystone_logger_context(self, mock_config): @@ -285,7 +296,7 @@ class TestKeystoneContexts(CharmTestCase): ctxt = context.KeystoneFIDServiceProviderContext() self.maxDiff = None - self.assertItemsEqual( + self.assertCountEqual( ctxt(), { "fid_sps": [ @@ -319,7 +330,7 @@ class TestKeystoneContexts(CharmTestCase): ctxt = context.KeystoneFIDServiceProviderContext() self.maxDiff = None - self.assertItemsEqual(ctxt(), {}) + self.assertCountEqual(ctxt(), {}) @patch.object(context, 'relation_ids') @patch.object(context, 'related_units') @@ -427,4 +438,4 @@ class TestKeystoneContexts(CharmTestCase): ctxt = context.WebSSOTrustedDashboardContext() self.maxDiff = None - self.assertItemsEqual(ctxt(), {}) + self.assertCountEqual(ctxt(), {}) diff --git a/unit_tests/test_keystone_hooks.py b/unit_tests/test_keystone_hooks.py index 12a6e31c..c033c9d2 100644 --- a/unit_tests/test_keystone_hooks.py +++ b/unit_tests/test_keystone_hooks.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import importlib import os import sys @@ -29,21 +30,17 @@ with patch('charmhelpers.core.hookenv.config') as config, \ snap_install_requested.return_value = False config.return_value = 'keystone' import keystone_utils as utils + importlib.reload(utils) -_reg = utils.register_configs -_map = utils.restart_map + with patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec, \ + patch('keystone_utils.register_configs'), \ + patch('keystone_utils.restart_map'): + mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f: + lambda *args, **kwargs: f(*args, **kwargs)) + with patch.object(utils, 'run_in_apache') as mock_run_in_apache: + import keystone_hooks as hooks + importlib.reload(hooks) -utils.register_configs = MagicMock() -utils.restart_map = MagicMock() - -with patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec: - mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f: - lambda *args, **kwargs: f(*args, **kwargs)) - with patch('keystone_utils.run_in_apache') as mock_run_in_apache: - import keystone_hooks as hooks - -utils.register_configs = _reg -utils.restart_map = _map TO_PATCH = [ # charmhelpers.core.hookenv @@ -133,7 +130,7 @@ class KeystoneRelationTests(CharmTestCase): self.apt_install.assert_called_with( ['apache2', 'haproxy', 'keystone', 'openssl', 'pwgen', 'python-keystoneclient', 'python-mysqldb', 'python-psycopg2', - 'python-six', 'uuid'], fatal=True) + 'python3-six', 'uuid'], fatal=True) self.disable_unused_apache_sites.assert_not_called() @patch.object(utils, 'os_release') @@ -151,9 +148,8 @@ class KeystoneRelationTests(CharmTestCase): self.apt_install.assert_called_with( ['apache2', 'haproxy', 'keystone', 'openssl', 'pwgen', 'python-keystoneclient', 'python-mysqldb', 'python-psycopg2', - 'python-six', 'uuid'], fatal=True) + 'python3-six', 'uuid'], fatal=True) self.disable_unused_apache_sites.assert_called_with() - mod_ch_openstack_utils = 'charmhelpers.contrib.openstack.utils' @patch.object(utils, 'os_release') diff --git a/unit_tests/test_keystone_utils.py b/unit_tests/test_keystone_utils.py index 94a6ed89..b30db3d7 100644 --- a/unit_tests/test_keystone_utils.py +++ b/unit_tests/test_keystone_utils.py @@ -12,26 +12,26 @@ # See the License for the specific language governing permissions and # limitations under the License. +import builtins +import collections +from mock import patch, call, MagicMock, mock_open, Mock import json import os import subprocess -import sys import time -from mock import MagicMock, call, mock_open, patch from test_utils import CharmTestCase -if sys.version_info.major == 2: - import __builtin__ as builtins -else: - import builtins - os.environ['JUJU_UNIT_NAME'] = 'keystone' with patch('charmhelpers.core.hookenv.config') as config, \ patch('charmhelpers.contrib.openstack.' - 'utils.snap_install_requested') as snap_install_requested: - snap_install_requested.return_value = False + 'utils.snap_install_requested', + Mock(return_value=False)): + import importlib import keystone_utils as utils + # we have to force utils to reload as another test module may already have + # pulled it in, and thus all this fancy patching will just fail + importlib.reload(utils) TO_PATCH = [ 'api_port', @@ -123,7 +123,7 @@ class TestKeystoneUtils(CharmTestCase): '/etc/apache2/sites-available/openstack_https_frontend.conf', [self.ctxt]), ] - self.assertEqual(fake_renderer.register.call_args_list, ex_reg) + fake_renderer.register.assert_has_calls(ex_reg, any_order=True) @patch.object(utils, 'snap_install_requested') @patch.object(utils, 'os') @@ -337,6 +337,7 @@ class TestKeystoneUtils(CharmTestCase): 'admin_url': '10.0.0.2', 'internal_url': '192.168.1.2'} + mock_keystone.user_exists.return_value = False utils.add_service_to_keystone( relation_id=relation_id, remote_unit=remote_unit) @@ -374,8 +375,8 @@ class TestKeystoneUtils(CharmTestCase): 'service_tenant_id': 'tenant_id', 'api_version': test_api_version} - filtered = {} - for k, v in relation_data.iteritems(): + filtered = collections.OrderedDict() + for k, v in relation_data.items(): if v == '__null__': filtered[k] = None else: @@ -415,6 +416,7 @@ class TestKeystoneUtils(CharmTestCase): 'ec2_internal_url': '192.168.1.2'} self.get_local_endpoint.return_value = 'http://localhost:80/v2.0/' KeystoneManager.resolve_tenant_id.return_value = 'tenant_id' + KeystoneManager.user_exists.return_value = False leader_get.return_value = None utils.add_service_to_keystone( @@ -559,43 +561,6 @@ class TestKeystoneUtils(CharmTestCase): def test_create_user_credentials_user_exists_v3(self): self.test_create_user_credentials_user_exists(test_api_version=3) - @patch.object(utils, 'get_manager') - def test_create_user_case_sensitivity(self, KeystoneManager): - """ Test case sensitivity of check for existence in - the user creation process """ - mock_keystone = MagicMock() - KeystoneManager.return_value = mock_keystone - - mock_user = MagicMock() - mock_keystone.resolve_user_id.return_value = mock_user - mock_keystone.api.users.list.return_value = [mock_user] - - # User found is the same i.e. userA == userA - mock_user.name = 'userA' - utils.create_user('userA', 'passA') - mock_keystone.resolve_user_id.assert_called_with('userA', - user_domain=None) - mock_keystone.create_user.assert_not_called() - - # User found has different case but is the same - # i.e. Usera != userA - mock_user.name = 'Usera' - utils.create_user('userA', 'passA') - mock_keystone.resolve_user_id.assert_called_with('userA', - user_domain=None) - mock_keystone.create_user.assert_not_called() - - # User is different i.e. UserB != userA - mock_user.name = 'UserB' - utils.create_user('userA', 'passA') - mock_keystone.resolve_user_id.assert_called_with('userA', - user_domain=None) - mock_keystone.create_user.assert_called_with(name='userA', - password='passA', - tenant_id=None, - domain_id=None, - email='juju@localhost') - @patch.object(utils, 'set_service_password') @patch.object(utils, 'get_service_password') @patch.object(utils, 'create_user_credentials') @@ -705,7 +670,8 @@ class TestKeystoneUtils(CharmTestCase): self.assertFalse(utils.is_db_ready(use_current_context=True)) self.relation_ids.return_value = ['acme:0'] - self.assertRaises(utils.is_db_ready, use_current_context=True) + with self.assertRaises(Exception): + utils.is_db_ready(use_current_context=True) allowed_units = 'unit/0' self.related_units.return_value = ['unit/0'] @@ -772,7 +738,7 @@ class TestKeystoneUtils(CharmTestCase): mock_keystone.resolve_service_id.return_value = 'sid1' KeystoneManager.return_value = mock_keystone utils.delete_service_entry('bob', 'bill') - mock_keystone.api.services.delete.assert_called_with('sid1') + mock_keystone.delete_service_by_id.assert_called_once_with('sid1') @patch('os.path.isfile') def test_get_file_stored_domain_id(self, isfile_mock): diff --git a/unit_tests/test_scripts_fernet_rotate_and_sync.py b/unit_tests/test_scripts_fernet_rotate_and_sync.py index d91c12aa..153daf44 100644 --- a/unit_tests/test_scripts_fernet_rotate_and_sync.py +++ b/unit_tests/test_scripts_fernet_rotate_and_sync.py @@ -29,7 +29,7 @@ class FernetRotateAndSync(CharmTestCase): @patch('charmhelpers.core.hookenv.log') @patch('time.ctime') - @patch('__builtin__.print') + @patch('builtins.print') def test_cli_log(self, mock_print, mock_ctime, mock_ch_log): mock_ctime.return_value = 'FAKE_TIMESTAMP' script.cli_log('message', level='DEBUG') diff --git a/unit_tests/test_utils.py b/unit_tests/test_utils.py index 81bc3acd..d0d35493 100644 --- a/unit_tests/test_utils.py +++ b/unit_tests/test_utils.py @@ -17,8 +17,7 @@ import os import unittest import yaml -from contextlib import contextmanager -from mock import patch, MagicMock +from mock import patch patch('charmhelpers.contrib.openstack.utils.set_os_workload_status').start() patch('charmhelpers.core.hookenv.status_set').start() @@ -39,7 +38,7 @@ def load_config(): if not config: logging.error('Could not find config.yaml in any parent directory ' - 'of %s. ' % file) + 'of %s. ' % __file__) raise Exception with open(config) as f: @@ -52,7 +51,7 @@ def get_default_config(): ''' default_config = {} config = load_config() - for k, v in config.iteritems(): + for k, v in config.items(): if 'default' in v: default_config[k] = v['default'] else: @@ -117,21 +116,3 @@ class TestRelation(object): elif attr in self.relation_data: return self.relation_data[attr] return None - - -@contextmanager -def patch_open(): - '''Patch open() to allow mocking both open() itself and the file that is - yielded. - Yields the mock for "open" and "file", respectively. - ''' - mock_open = MagicMock(spec=open) - mock_file = MagicMock(spec=file) - - @contextmanager - def stub_open(*args, **kwargs): - mock_open(*args, **kwargs) - yield mock_file - - with patch('__builtin__.open', stub_open): - yield mock_open, mock_file