From 6cc6019aceec9ecb4a2455c071a60d726cd63c85 Mon Sep 17 00:00:00 2001 From: Kiall Mac Innes Date: Sun, 12 Jan 2014 13:44:49 +0000 Subject: [PATCH] Sync with oslo-incubator Modules held back: - openstack.common.policy. Bug #1268315. Compatibility changes made: - Additional arguments added to the Context object - RPC Services no longer need to explicitly wait on the rpc consumer thread - Our openstack-common.conf file explicitly includes transitive dependencies. This is no longer necessary. Bug #1268314. Additional changes still necessary: - We use some depreciated modules (WSGI), we need to remove our dependence on these. Bug #1268313. Change-Id: Ia52b386a928e1d03709dfdb23f0d504c7d01bc19 Oslo-SHA1: 8710dbacfd7d8dad58fccbdd4ffda7246dcbbb7c Closes-Bug: 1268314 --- designate/agent/service.py | 4 - designate/api/service.py | 2 +- designate/central/service.py | 4 - designate/context.py | 16 +- designate/openstack/common/context.py | 35 +- .../openstack/common/eventlet_backdoor.py | 79 +++- designate/openstack/common/excutils.py | 74 +++- designate/openstack/common/fileutils.py | 4 +- designate/openstack/common/fixture/config.py | 1 - .../openstack/common/fixture/lockutils.py | 6 +- .../openstack/common/fixture/mockpatch.py | 2 - .../openstack/common/fixture/moxstubout.py | 2 - designate/openstack/common/gettextutils.py | 386 +++++++++++++++++- designate/openstack/common/importutils.py | 9 +- designate/openstack/common/jsonutils.py | 81 +++- designate/openstack/common/local.py | 15 +- designate/openstack/common/lockutils.py | 122 +++--- designate/openstack/common/log.py | 240 ++++++++--- designate/openstack/common/loopingcall.py | 68 ++- designate/openstack/common/network_utils.py | 25 +- .../openstack/common/notifier/__init__.py | 14 - designate/openstack/common/notifier/api.py | 40 +- .../openstack/common/notifier/log_notifier.py | 4 +- .../common/notifier/no_op_notifier.py | 2 +- designate/openstack/common/notifier/proxy.py | 79 ++++ .../openstack/common/notifier/rpc_notifier.py | 7 +- .../common/notifier/rpc_notifier2.py | 7 +- designate/openstack/common/periodic_task.py | 115 ------ designate/openstack/common/processutils.py | 184 +++++++-- .../openstack/common/py3kcompat/__init__.py | 0 .../openstack/common/py3kcompat/urlutils.py | 65 +++ .../openstack/common/rootwrap/__init__.py | 16 - designate/openstack/common/rootwrap/cmd.py | 136 ++++++ .../openstack/common/rootwrap/filters.py | 232 ++++++++--- .../openstack/common/rootwrap/wrapper.py | 48 ++- designate/openstack/common/rpc/__init__.py | 11 +- designate/openstack/common/rpc/amqp.py | 231 +++++------ designate/openstack/common/rpc/common.py | 157 +++---- designate/openstack/common/rpc/dispatcher.py | 54 ++- designate/openstack/common/rpc/impl_fake.py | 23 +- designate/openstack/common/rpc/impl_kombu.py | 169 ++++---- designate/openstack/common/rpc/impl_qpid.py | 350 +++++++++++----- designate/openstack/common/rpc/impl_zmq.py | 179 ++++---- designate/openstack/common/rpc/matchmaker.py | 221 +++------- .../openstack/common/rpc/matchmaker_redis.py | 16 +- .../openstack/common/rpc/matchmaker_ring.py | 106 +++++ designate/openstack/common/rpc/proxy.py | 86 +++- designate/openstack/common/rpc/serializer.py | 54 +++ designate/openstack/common/rpc/service.py | 11 +- .../zmq_receiver.py} | 29 +- designate/openstack/common/service.py | 298 +++++++++++--- designate/openstack/common/sslutils.py | 24 +- designate/openstack/common/strutils.py | 163 ++++++-- designate/openstack/common/test.py | 5 +- designate/openstack/common/threadgroup.py | 37 +- designate/openstack/common/timeutils.py | 66 ++- designate/openstack/common/uuidutils.py | 2 - designate/openstack/common/versionutils.py | 148 +++++++ designate/openstack/common/xmlutils.py | 4 +- designate/openstack/deprecated/__init__.py | 0 .../{common => deprecated}/exception.py | 0 .../openstack/{common => deprecated}/wsgi.py | 2 +- designate/sink/service.py | 4 - designate/wsgi.py | 2 +- etc/designate/designate.conf.sample | 2 +- openstack-common.conf | 16 +- tox.ini | 2 +- 67 files changed, 3208 insertions(+), 1388 deletions(-) create mode 100644 designate/openstack/common/notifier/proxy.py delete mode 100644 designate/openstack/common/periodic_task.py create mode 100644 designate/openstack/common/py3kcompat/__init__.py create mode 100644 designate/openstack/common/py3kcompat/urlutils.py create mode 100644 designate/openstack/common/rootwrap/cmd.py create mode 100644 designate/openstack/common/rpc/matchmaker_ring.py create mode 100644 designate/openstack/common/rpc/serializer.py rename designate/openstack/common/{notifier/rabbit_notifier.py => rpc/zmq_receiver.py} (54%) create mode 100644 designate/openstack/common/versionutils.py create mode 100644 designate/openstack/deprecated/__init__.py rename designate/openstack/{common => deprecated}/exception.py (100%) rename designate/openstack/{common => deprecated}/wsgi.py (99%) diff --git a/designate/agent/service.py b/designate/agent/service.py index b9acf2fbb..5eb91fc65 100644 --- a/designate/agent/service.py +++ b/designate/agent/service.py @@ -40,10 +40,6 @@ class Service(rpc_service.Service): self.manager.start() super(Service, self).start() - def wait(self): - super(Service, self).wait() - self.conn.consumer_thread.wait() - def stop(self): super(Service, self).stop() self.manager.stop() diff --git a/designate/api/service.py b/designate/api/service.py index c7e9c6b45..ff0b54c44 100644 --- a/designate/api/service.py +++ b/designate/api/service.py @@ -15,7 +15,7 @@ # under the License. from paste import deploy from designate.openstack.common import log as logging -from designate.openstack.common import wsgi +from designate.openstack.deprecated import wsgi from oslo.config import cfg from designate import exceptions from designate import utils diff --git a/designate/central/service.py b/designate/central/service.py index b01996a26..2049488ca 100644 --- a/designate/central/service.py +++ b/designate/central/service.py @@ -74,10 +74,6 @@ class Service(rpc_service.Service): super(Service, self).start() - def wait(self): - super(Service, self).wait() - self.conn.consumer_thread.wait() - def stop(self): super(Service, self).stop() diff --git a/designate/context.py b/designate/context.py index f0625c28f..6689dbfa3 100644 --- a/designate/context.py +++ b/designate/context.py @@ -21,17 +21,23 @@ LOG = logging.getLogger(__name__) class DesignateContext(context.RequestContext): - def __init__(self, auth_token=None, user=None, tenant=None, is_admin=False, + def __init__(self, auth_token=None, user=None, tenant=None, domain=None, + user_domain=None, project_domain=None, is_admin=False, read_only=False, show_deleted=False, request_id=None, - roles=[], service_catalog=None, all_tenants=False): + instance_uuid=None, roles=[], service_catalog=None, + all_tenants=False): super(DesignateContext, self).__init__( auth_token=auth_token, user=user, tenant=tenant, + domain=domain, + user_domain=user_domain, + project_domain=project_domain, is_admin=is_admin, read_only=read_only, show_deleted=show_deleted, - request_id=request_id) + request_id=request_id, + instance_uuid=instance_uuid) self.roles = roles self.service_catalog = service_catalog @@ -43,6 +49,7 @@ class DesignateContext(context.RequestContext): # Remove the user and tenant id fields, this map to user and tenant d.pop('user_id') d.pop('tenant_id') + d.pop('user_identity') return self.from_dict(d) @@ -61,6 +68,9 @@ class DesignateContext(context.RequestContext): @classmethod def from_dict(cls, values): + if 'user_identity' in values: + values.pop('user_identity') + return cls(**values) def elevated(self, show_deleted=None): diff --git a/designate/openstack/common/context.py b/designate/openstack/common/context.py index e9cfd73cc..182b04436 100644 --- a/designate/openstack/common/context.py +++ b/designate/openstack/common/context.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # @@ -27,39 +25,60 @@ import uuid def generate_request_id(): - return 'req-' + str(uuid.uuid4()) + return 'req-%s' % str(uuid.uuid4()) class RequestContext(object): - """ + """Helper class to represent useful information about a request context. + Stores information about the security context under which the user accesses the system, as well as additional request information. """ - def __init__(self, auth_token=None, user=None, tenant=None, is_admin=False, - read_only=False, show_deleted=False, request_id=None): + user_idt_format = '{user} {tenant} {domain} {user_domain} {p_domain}' + + def __init__(self, auth_token=None, user=None, tenant=None, domain=None, + user_domain=None, project_domain=None, is_admin=False, + read_only=False, show_deleted=False, request_id=None, + instance_uuid=None): self.auth_token = auth_token self.user = user self.tenant = tenant + self.domain = domain + self.user_domain = user_domain + self.project_domain = project_domain self.is_admin = is_admin self.read_only = read_only self.show_deleted = show_deleted + self.instance_uuid = instance_uuid if not request_id: request_id = generate_request_id() self.request_id = request_id def to_dict(self): + user_idt = ( + self.user_idt_format.format(user=self.user or '-', + tenant=self.tenant or '-', + domain=self.domain or '-', + user_domain=self.user_domain or '-', + p_domain=self.project_domain or '-')) + return {'user': self.user, 'tenant': self.tenant, + 'domain': self.domain, + 'user_domain': self.user_domain, + 'project_domain': self.project_domain, 'is_admin': self.is_admin, 'read_only': self.read_only, 'show_deleted': self.show_deleted, 'auth_token': self.auth_token, - 'request_id': self.request_id} + 'request_id': self.request_id, + 'instance_uuid': self.instance_uuid, + 'user_identity': user_idt} -def get_admin_context(show_deleted="no"): +def get_admin_context(show_deleted=False): context = RequestContext(None, tenant=None, is_admin=True, diff --git a/designate/openstack/common/eventlet_backdoor.py b/designate/openstack/common/eventlet_backdoor.py index c0ad460fe..472a0575d 100644 --- a/designate/openstack/common/eventlet_backdoor.py +++ b/designate/openstack/common/eventlet_backdoor.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright (c) 2012 OpenStack Foundation. # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. @@ -16,8 +14,13 @@ # License for the specific language governing permissions and limitations # under the License. +from __future__ import print_function + +import errno import gc +import os import pprint +import socket import sys import traceback @@ -26,36 +29,83 @@ import eventlet.backdoor import greenlet from oslo.config import cfg +from designate.openstack.common.gettextutils import _ +from designate.openstack.common import log as logging + +help_for_backdoor_port = ( + "Acceptable values are 0, , and :, where 0 results " + "in listening on a random tcp port number; results in listening " + "on the specified port number (and not enabling backdoor if that port " + "is in use); and : results in listening on the smallest " + "unused port number within the specified range of port numbers. The " + "chosen port is displayed in the service's log file.") eventlet_backdoor_opts = [ - cfg.IntOpt('backdoor_port', + cfg.StrOpt('backdoor_port', default=None, - help='port for eventlet backdoor to listen') + help="Enable eventlet backdoor. %s" % help_for_backdoor_port) ] CONF = cfg.CONF CONF.register_opts(eventlet_backdoor_opts) +LOG = logging.getLogger(__name__) + + +class EventletBackdoorConfigValueError(Exception): + def __init__(self, port_range, help_msg, ex): + msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. ' + '%(help)s' % + {'range': port_range, 'ex': ex, 'help': help_msg}) + super(EventletBackdoorConfigValueError, self).__init__(msg) + self.port_range = port_range def _dont_use_this(): - print "Don't use this, just disconnect instead" + print("Don't use this, just disconnect instead") def _find_objects(t): - return filter(lambda o: isinstance(o, t), gc.get_objects()) + return [o for o in gc.get_objects() if isinstance(o, t)] def _print_greenthreads(): for i, gt in enumerate(_find_objects(greenlet.greenlet)): - print i, gt + print(i, gt) traceback.print_stack(gt.gr_frame) - print + print() def _print_nativethreads(): for threadId, stack in sys._current_frames().items(): - print threadId + print(threadId) traceback.print_stack(stack) - print + print() + + +def _parse_port_range(port_range): + if ':' not in port_range: + start, end = port_range, port_range + else: + start, end = port_range.split(':', 1) + try: + start, end = int(start), int(end) + if end < start: + raise ValueError + return start, end + except ValueError as ex: + raise EventletBackdoorConfigValueError(port_range, ex, + help_for_backdoor_port) + + +def _listen(host, start_port, end_port, listen_func): + try_port = start_port + while True: + try: + return listen_func((host, try_port)) + except socket.error as exc: + if (exc.errno != errno.EADDRINUSE or + try_port >= end_port): + raise + try_port += 1 def initialize_if_enabled(): @@ -70,6 +120,8 @@ def initialize_if_enabled(): if CONF.backdoor_port is None: return None + start_port, end_port = _parse_port_range(str(CONF.backdoor_port)) + # NOTE(johannes): The standard sys.displayhook will print the value of # the last expression and set it to __builtin__._, which overwrites # the __builtin__._ that gettext sets. Let's switch to using pprint @@ -80,8 +132,13 @@ def initialize_if_enabled(): pprint.pprint(val) sys.displayhook = displayhook - sock = eventlet.listen(('localhost', CONF.backdoor_port)) + sock = _listen('localhost', start_port, end_port, eventlet.listen) + + # In the case of backdoor port being zero, a port number is assigned by + # listen(). In any case, pull the port number out here. port = sock.getsockname()[1] + LOG.info(_('Eventlet backdoor listening on %(port)s for process %(pid)d') % + {'port': port, 'pid': os.getpid()}) eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock, locals=backdoor_locals) return port diff --git a/designate/openstack/common/excutils.py b/designate/openstack/common/excutils.py index 329b0c72f..089f92823 100644 --- a/designate/openstack/common/excutils.py +++ b/designate/openstack/common/excutils.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 OpenStack Foundation. # Copyright 2012, Red Hat, Inc. # @@ -19,16 +17,17 @@ Exception related utilities. """ -import contextlib import logging import sys +import time import traceback +import six + from designate.openstack.common.gettextutils import _ -@contextlib.contextmanager -def save_and_reraise_exception(): +class save_and_reraise_exception(object): """Save current exception, run some code and then re-raise. In some cases the exception context can be cleared, resulting in None @@ -40,12 +39,61 @@ def save_and_reraise_exception(): To work around this, we save the exception state, run handler code, and then re-raise the original exception. If another exception occurs, the saved exception is logged and the new exception is re-raised. + + In some cases the caller may not want to re-raise the exception, and + for those circumstances this context provides a reraise flag that + can be used to suppress the exception. For example:: + + except Exception: + with save_and_reraise_exception() as ctxt: + decide_if_need_reraise() + if not should_be_reraised: + ctxt.reraise = False """ - type_, value, tb = sys.exc_info() - try: - yield - except Exception: - logging.error(_('Original exception being dropped: %s'), - traceback.format_exception(type_, value, tb)) - raise - raise type_, value, tb + def __init__(self): + self.reraise = True + + def __enter__(self): + self.type_, self.value, self.tb, = sys.exc_info() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if exc_type is not None: + logging.error(_('Original exception being dropped: %s'), + traceback.format_exception(self.type_, + self.value, + self.tb)) + return False + if self.reraise: + six.reraise(self.type_, self.value, self.tb) + + +def forever_retry_uncaught_exceptions(infunc): + def inner_func(*args, **kwargs): + last_log_time = 0 + last_exc_message = None + exc_count = 0 + while True: + try: + return infunc(*args, **kwargs) + except Exception as exc: + this_exc_message = six.u(str(exc)) + if this_exc_message == last_exc_message: + exc_count += 1 + else: + exc_count = 1 + # Do not log any more frequently than once a minute unless + # the exception message changes + cur_time = int(time.time()) + if (cur_time - last_log_time > 60 or + this_exc_message != last_exc_message): + logging.exception( + _('Unexpected exception occurred %d time(s)... ' + 'retrying.') % exc_count) + last_log_time = cur_time + last_exc_message = this_exc_message + exc_count = 0 + # This should be a very rare event. In case it isn't, do + # a sleep. + time.sleep(1) + return inner_func diff --git a/designate/openstack/common/fileutils.py b/designate/openstack/common/fileutils.py index 8fb7c2859..e6c65e134 100644 --- a/designate/openstack/common/fileutils.py +++ b/designate/openstack/common/fileutils.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # @@ -22,7 +20,7 @@ import os import tempfile from designate.openstack.common import excutils -from designate.openstack.common.gettextutils import _ # noqa +from designate.openstack.common.gettextutils import _ from designate.openstack.common import log as logging LOG = logging.getLogger(__name__) diff --git a/designate/openstack/common/fixture/config.py b/designate/openstack/common/fixture/config.py index 93748c1c9..0bf90ff7a 100644 --- a/designate/openstack/common/fixture/config.py +++ b/designate/openstack/common/fixture/config.py @@ -1,4 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright 2013 Mirantis, Inc. # Copyright 2013 OpenStack Foundation diff --git a/designate/openstack/common/fixture/lockutils.py b/designate/openstack/common/fixture/lockutils.py index 2c0dd1010..3de4bc73b 100644 --- a/designate/openstack/common/fixture/lockutils.py +++ b/designate/openstack/common/fixture/lockutils.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # @@ -17,7 +15,7 @@ import fixtures -from designate.openstack.common.lockutils import lock +from designate.openstack.common import lockutils class LockFixture(fixtures.Fixture): @@ -45,7 +43,7 @@ class LockFixture(fixtures.Fixture): test method exits. (either by completing or raising an exception) """ def __init__(self, name, lock_file_prefix=None): - self.mgr = lock(name, lock_file_prefix, True) + self.mgr = lockutils.lock(name, lock_file_prefix, True) def setUp(self): super(LockFixture, self).setUp() diff --git a/designate/openstack/common/fixture/mockpatch.py b/designate/openstack/common/fixture/mockpatch.py index cd0d6ca6b..858e77cd0 100644 --- a/designate/openstack/common/fixture/mockpatch.py +++ b/designate/openstack/common/fixture/mockpatch.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2013 Hewlett-Packard Development Company, L.P. diff --git a/designate/openstack/common/fixture/moxstubout.py b/designate/openstack/common/fixture/moxstubout.py index a0e74fd11..e8c031f08 100644 --- a/designate/openstack/common/fixture/moxstubout.py +++ b/designate/openstack/common/fixture/moxstubout.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2013 Hewlett-Packard Development Company, L.P. diff --git a/designate/openstack/common/gettextutils.py b/designate/openstack/common/gettextutils.py index b8c61c507..d1c943ba2 100644 --- a/designate/openstack/common/gettextutils.py +++ b/designate/openstack/common/gettextutils.py @@ -1,6 +1,5 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2012 Red Hat, Inc. +# Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -23,11 +22,390 @@ Usual usage in an openstack.common module: from designate.openstack.common.gettextutils import _ """ +import copy import gettext +import locale +from logging import handlers +import os +import re + +from babel import localedata +import six + +_localedir = os.environ.get('designate'.upper() + '_LOCALEDIR') +_t = gettext.translation('designate', localedir=_localedir, fallback=True) + +_AVAILABLE_LANGUAGES = {} +USE_LAZY = False -t = gettext.translation('openstack-common', 'locale', fallback=True) +def enable_lazy(): + """Convenience function for configuring _() to use lazy gettext + + Call this at the start of execution to enable the gettextutils._ + function to use lazy gettext functionality. This is useful if + your project is importing _ directly instead of using the + gettextutils.install() way of importing the _ function. + """ + global USE_LAZY + USE_LAZY = True def _(msg): - return t.ugettext(msg) + if USE_LAZY: + return Message(msg, domain='designate') + else: + if six.PY3: + return _t.gettext(msg) + return _t.ugettext(msg) + + +def install(domain, lazy=False): + """Install a _() function using the given translation domain. + + Given a translation domain, install a _() function using gettext's + install() function. + + The main difference from gettext.install() is that we allow + overriding the default localedir (e.g. /usr/share/locale) using + a translation-domain-specific environment variable (e.g. + NOVA_LOCALEDIR). + + :param domain: the translation domain + :param lazy: indicates whether or not to install the lazy _() function. + The lazy _() introduces a way to do deferred translation + of messages by installing a _ that builds Message objects, + instead of strings, which can then be lazily translated into + any available locale. + """ + if lazy: + # NOTE(mrodden): Lazy gettext functionality. + # + # The following introduces a deferred way to do translations on + # messages in OpenStack. We override the standard _() function + # and % (format string) operation to build Message objects that can + # later be translated when we have more information. + def _lazy_gettext(msg): + """Create and return a Message object. + + Lazy gettext function for a given domain, it is a factory method + for a project/module to get a lazy gettext function for its own + translation domain (i.e. nova, glance, cinder, etc.) + + Message encapsulates a string so that we can translate + it later when needed. + """ + return Message(msg, domain=domain) + + from six import moves + moves.builtins.__dict__['_'] = _lazy_gettext + else: + localedir = '%s_LOCALEDIR' % domain.upper() + if six.PY3: + gettext.install(domain, + localedir=os.environ.get(localedir)) + else: + gettext.install(domain, + localedir=os.environ.get(localedir), + unicode=True) + + +class Message(six.text_type): + """A Message object is a unicode object that can be translated. + + Translation of Message is done explicitly using the translate() method. + For all non-translation intents and purposes, a Message is simply unicode, + and can be treated as such. + """ + + def __new__(cls, msgid, msgtext=None, params=None, domain='designate', *args): + """Create a new Message object. + + In order for translation to work gettext requires a message ID, this + msgid will be used as the base unicode text. It is also possible + for the msgid and the base unicode text to be different by passing + the msgtext parameter. + """ + # If the base msgtext is not given, we use the default translation + # of the msgid (which is in English) just in case the system locale is + # not English, so that the base text will be in that locale by default. + if not msgtext: + msgtext = Message._translate_msgid(msgid, domain) + # We want to initialize the parent unicode with the actual object that + # would have been plain unicode if 'Message' was not enabled. + msg = super(Message, cls).__new__(cls, msgtext) + msg.msgid = msgid + msg.domain = domain + msg.params = params + return msg + + def translate(self, desired_locale=None): + """Translate this message to the desired locale. + + :param desired_locale: The desired locale to translate the message to, + if no locale is provided the message will be + translated to the system's default locale. + + :returns: the translated message in unicode + """ + + translated_message = Message._translate_msgid(self.msgid, + self.domain, + desired_locale) + if self.params is None: + # No need for more translation + return translated_message + + # This Message object may have been formatted with one or more + # Message objects as substitution arguments, given either as a single + # argument, part of a tuple, or as one or more values in a dictionary. + # When translating this Message we need to translate those Messages too + translated_params = _translate_args(self.params, desired_locale) + + translated_message = translated_message % translated_params + + return translated_message + + @staticmethod + def _translate_msgid(msgid, domain, desired_locale=None): + if not desired_locale: + system_locale = locale.getdefaultlocale() + # If the system locale is not available to the runtime use English + if not system_locale[0]: + desired_locale = 'en_US' + else: + desired_locale = system_locale[0] + + locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR') + lang = gettext.translation(domain, + localedir=locale_dir, + languages=[desired_locale], + fallback=True) + if six.PY3: + translator = lang.gettext + else: + translator = lang.ugettext + + translated_message = translator(msgid) + return translated_message + + def __mod__(self, other): + # When we mod a Message we want the actual operation to be performed + # by the parent class (i.e. unicode()), the only thing we do here is + # save the original msgid and the parameters in case of a translation + unicode_mod = super(Message, self).__mod__(other) + modded = Message(self.msgid, + msgtext=unicode_mod, + params=self._sanitize_mod_params(other), + domain=self.domain) + return modded + + def _sanitize_mod_params(self, other): + """Sanitize the object being modded with this Message. + + - Add support for modding 'None' so translation supports it + - Trim the modded object, which can be a large dictionary, to only + those keys that would actually be used in a translation + - Snapshot the object being modded, in case the message is + translated, it will be used as it was when the Message was created + """ + if other is None: + params = (other,) + elif isinstance(other, dict): + params = self._trim_dictionary_parameters(other) + else: + params = self._copy_param(other) + return params + + def _trim_dictionary_parameters(self, dict_param): + """Return a dict that only has matching entries in the msgid.""" + # NOTE(luisg): Here we trim down the dictionary passed as parameters + # to avoid carrying a lot of unnecessary weight around in the message + # object, for example if someone passes in Message() % locals() but + # only some params are used, and additionally we prevent errors for + # non-deepcopyable objects by unicoding() them. + + # Look for %(param) keys in msgid; + # Skip %% and deal with the case where % is first character on the line + keys = re.findall('(?:[^%]|^)?%\((\w*)\)[a-z]', self.msgid) + + # If we don't find any %(param) keys but have a %s + if not keys and re.findall('(?:[^%]|^)%[a-z]', self.msgid): + # Apparently the full dictionary is the parameter + params = self._copy_param(dict_param) + else: + params = {} + for key in keys: + params[key] = self._copy_param(dict_param[key]) + + return params + + def _copy_param(self, param): + try: + return copy.deepcopy(param) + except TypeError: + # Fallback to casting to unicode this will handle the + # python code-like objects that can't be deep-copied + return six.text_type(param) + + def __add__(self, other): + msg = _('Message objects do not support addition.') + raise TypeError(msg) + + def __radd__(self, other): + return self.__add__(other) + + def __str__(self): + # NOTE(luisg): Logging in python 2.6 tries to str() log records, + # and it expects specifically a UnicodeError in order to proceed. + msg = _('Message objects do not support str() because they may ' + 'contain non-ascii characters. ' + 'Please use unicode() or translate() instead.') + raise UnicodeError(msg) + + +def get_available_languages(domain): + """Lists the available languages for the given translation domain. + + :param domain: the domain to get languages for + """ + if domain in _AVAILABLE_LANGUAGES: + return copy.copy(_AVAILABLE_LANGUAGES[domain]) + + localedir = '%s_LOCALEDIR' % domain.upper() + find = lambda x: gettext.find(domain, + localedir=os.environ.get(localedir), + languages=[x]) + + # NOTE(mrodden): en_US should always be available (and first in case + # order matters) since our in-line message strings are en_US + language_list = ['en_US'] + # NOTE(luisg): Babel <1.0 used a function called list(), which was + # renamed to locale_identifiers() in >=1.0, the requirements master list + # requires >=0.9.6, uncapped, so defensively work with both. We can remove + # this check when the master list updates to >=1.0, and update all projects + list_identifiers = (getattr(localedata, 'list', None) or + getattr(localedata, 'locale_identifiers')) + locale_identifiers = list_identifiers() + for i in locale_identifiers: + if find(i) is not None: + language_list.append(i) + _AVAILABLE_LANGUAGES[domain] = language_list + return copy.copy(language_list) + + +def translate(obj, desired_locale=None): + """Gets the translated unicode representation of the given object. + + If the object is not translatable it is returned as-is. + If the locale is None the object is translated to the system locale. + + :param obj: the object to translate + :param desired_locale: the locale to translate the message to, if None the + default system locale will be used + :returns: the translated object in unicode, or the original object if + it could not be translated + """ + message = obj + if not isinstance(message, Message): + # If the object to translate is not already translatable, + # let's first get its unicode representation + message = six.text_type(obj) + if isinstance(message, Message): + # Even after unicoding() we still need to check if we are + # running with translatable unicode before translating + return message.translate(desired_locale) + return obj + + +def _translate_args(args, desired_locale=None): + """Translates all the translatable elements of the given arguments object. + + This method is used for translating the translatable values in method + arguments which include values of tuples or dictionaries. + If the object is not a tuple or a dictionary the object itself is + translated if it is translatable. + + If the locale is None the object is translated to the system locale. + + :param args: the args to translate + :param desired_locale: the locale to translate the args to, if None the + default system locale will be used + :returns: a new args object with the translated contents of the original + """ + if isinstance(args, tuple): + return tuple(translate(v, desired_locale) for v in args) + if isinstance(args, dict): + translated_dict = {} + for (k, v) in six.iteritems(args): + translated_v = translate(v, desired_locale) + translated_dict[k] = translated_v + return translated_dict + return translate(args, desired_locale) + + +class TranslationHandler(handlers.MemoryHandler): + """Handler that translates records before logging them. + + The TranslationHandler takes a locale and a target logging.Handler object + to forward LogRecord objects to after translating them. This handler + depends on Message objects being logged, instead of regular strings. + + The handler can be configured declaratively in the logging.conf as follows: + + [handlers] + keys = translatedlog, translator + + [handler_translatedlog] + class = handlers.WatchedFileHandler + args = ('/var/log/api-localized.log',) + formatter = context + + [handler_translator] + class = openstack.common.log.TranslationHandler + target = translatedlog + args = ('zh_CN',) + + If the specified locale is not available in the system, the handler will + log in the default locale. + """ + + def __init__(self, locale=None, target=None): + """Initialize a TranslationHandler + + :param locale: locale to use for translating messages + :param target: logging.Handler object to forward + LogRecord objects to after translation + """ + # NOTE(luisg): In order to allow this handler to be a wrapper for + # other handlers, such as a FileHandler, and still be able to + # configure it using logging.conf, this handler has to extend + # MemoryHandler because only the MemoryHandlers' logging.conf + # parsing is implemented such that it accepts a target handler. + handlers.MemoryHandler.__init__(self, capacity=0, target=target) + self.locale = locale + + def setFormatter(self, fmt): + self.target.setFormatter(fmt) + + def emit(self, record): + # We save the message from the original record to restore it + # after translation, so other handlers are not affected by this + original_msg = record.msg + original_args = record.args + + try: + self._translate_and_log_record(record) + finally: + record.msg = original_msg + record.args = original_args + + def _translate_and_log_record(self, record): + record.msg = translate(record.msg, self.locale) + + # In addition to translating the message, we also need to translate + # arguments that were passed to the log method that were not part + # of the main message e.g., log.info(_('Some message %s'), this_one)) + record.args = _translate_args(record.args, self.locale) + + self.target.emit(record) diff --git a/designate/openstack/common/importutils.py b/designate/openstack/common/importutils.py index 3bd277f47..4fd9ae2bc 100644 --- a/designate/openstack/common/importutils.py +++ b/designate/openstack/common/importutils.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # @@ -24,7 +22,7 @@ import traceback def import_class(import_str): - """Returns a class from a string including module and class""" + """Returns a class from a string including module and class.""" mod_str, _sep, class_str = import_str.rpartition('.') try: __import__(mod_str) @@ -41,8 +39,9 @@ def import_object(import_str, *args, **kwargs): def import_object_ns(name_space, import_str, *args, **kwargs): - """ - Import a class and return an instance of it, first by trying + """Tries to import object from default namespace. + + Imports a class and return an instance of it, first by trying to find the class in a default namespace, then failing back to a full path if not found in the default namespace. """ diff --git a/designate/openstack/common/jsonutils.py b/designate/openstack/common/jsonutils.py index fce4fd30c..891947938 100644 --- a/designate/openstack/common/jsonutils.py +++ b/designate/openstack/common/jsonutils.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara @@ -38,10 +36,33 @@ import functools import inspect import itertools import json -import xmlrpclib +try: + import xmlrpclib +except ImportError: + # NOTE(jaypipes): xmlrpclib was renamed to xmlrpc.client in Python3 + # however the function and object call signatures + # remained the same. This whole try/except block should + # be removed and replaced with a call to six.moves once + # six 1.4.2 is released. See http://bit.ly/1bqrVzu + import xmlrpc.client as xmlrpclib +import six + +from designate.openstack.common import gettextutils +from designate.openstack.common import importutils from designate.openstack.common import timeutils +netaddr = importutils.try_import("netaddr") + +_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod, + inspect.isfunction, inspect.isgeneratorfunction, + inspect.isgenerator, inspect.istraceback, inspect.isframe, + inspect.iscode, inspect.isbuiltin, inspect.isroutine, + inspect.isabstract] + +_simple_types = (six.string_types + six.integer_types + + (type(None), bool, float)) + def to_primitive(value, convert_instances=False, convert_datetime=True, level=0, max_depth=3): @@ -58,19 +79,32 @@ def to_primitive(value, convert_instances=False, convert_datetime=True, Therefore, convert_instances=True is lossy ... be aware. """ - nasty = [inspect.ismodule, inspect.isclass, inspect.ismethod, - inspect.isfunction, inspect.isgeneratorfunction, - inspect.isgenerator, inspect.istraceback, inspect.isframe, - inspect.iscode, inspect.isbuiltin, inspect.isroutine, - inspect.isabstract] - for test in nasty: - if test(value): - return unicode(value) + # handle obvious types first - order of basic types determined by running + # full tests on nova project, resulting in the following counts: + # 572754 + # 460353 + # 379632 + # 274610 + # 199918 + # 114200 + # 51817 + # 26164 + # 6491 + # 283 + # 19 + if isinstance(value, _simple_types): + return value - # value of itertools.count doesn't get caught by inspects - # above and results in infinite loop when list(value) is called. + if isinstance(value, datetime.datetime): + if convert_datetime: + return timeutils.strtime(value) + else: + return value + + # value of itertools.count doesn't get caught by nasty_type_tests + # and results in infinite loop when list(value) is called. if type(value) == itertools.count: - return unicode(value) + return six.text_type(value) # FIXME(vish): Workaround for LP bug 852095. Without this workaround, # tests that raise an exception in a mocked method that @@ -91,18 +125,21 @@ def to_primitive(value, convert_instances=False, convert_datetime=True, convert_datetime=convert_datetime, level=level, max_depth=max_depth) + if isinstance(value, dict): + return dict((k, recursive(v)) for k, v in six.iteritems(value)) + elif isinstance(value, (list, tuple)): + return [recursive(lv) for lv in value] + # It's not clear why xmlrpclib created their own DateTime type, but # for our purposes, make it a datetime type which is explicitly # handled if isinstance(value, xmlrpclib.DateTime): value = datetime.datetime(*tuple(value.timetuple())[:6]) - if isinstance(value, (list, tuple)): - return [recursive(v) for v in value] - elif isinstance(value, dict): - return dict((k, recursive(v)) for k, v in value.iteritems()) - elif convert_datetime and isinstance(value, datetime.datetime): + if convert_datetime and isinstance(value, datetime.datetime): return timeutils.strtime(value) + elif isinstance(value, gettextutils.Message): + return value.data elif hasattr(value, 'iteritems'): return recursive(dict(value.iteritems()), level=level + 1) elif hasattr(value, '__iter__'): @@ -111,12 +148,16 @@ def to_primitive(value, convert_instances=False, convert_datetime=True, # Likely an instance of something. Watch for cycles. # Ignore class member vars. return recursive(value.__dict__, level=level + 1) + elif netaddr and isinstance(value, netaddr.IPAddress): + return six.text_type(value) else: + if any(test(value) for test in _nasty_type_tests): + return six.text_type(value) return value except TypeError: # Class objects are tricky since they may define something like # __iter__ defined but it isn't callable as list(). - return unicode(value) + return six.text_type(value) def dumps(value, default=to_primitive, **kwargs): diff --git a/designate/openstack/common/local.py b/designate/openstack/common/local.py index f1bfc824b..0819d5b97 100644 --- a/designate/openstack/common/local.py +++ b/designate/openstack/common/local.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # @@ -15,16 +13,15 @@ # License for the specific language governing permissions and limitations # under the License. -"""Greenthread local storage of variables using weak references""" +"""Local storage of variables using weak references""" +import threading import weakref -from eventlet import corolocal - -class WeakLocal(corolocal.local): +class WeakLocal(threading.local): def __getattribute__(self, attr): - rval = corolocal.local.__getattribute__(self, attr) + rval = super(WeakLocal, self).__getattribute__(attr) if rval: # NOTE(mikal): this bit is confusing. What is stored is a weak # reference, not the value itself. We therefore need to lookup @@ -34,7 +31,7 @@ class WeakLocal(corolocal.local): def __setattr__(self, attr, value): value = weakref.ref(value) - return corolocal.local.__setattr__(self, attr, value) + return super(WeakLocal, self).__setattr__(attr, value) # NOTE(mikal): the name "store" should be deprecated in the future @@ -45,4 +42,4 @@ store = WeakLocal() # "strong" store will hold a reference to the object so that it never falls out # of scope. weak_store = WeakLocal() -strong_store = corolocal.local +strong_store = threading.local() diff --git a/designate/openstack/common/lockutils.py b/designate/openstack/common/lockutils.py index 413a7f000..4e9df2a65 100644 --- a/designate/openstack/common/lockutils.py +++ b/designate/openstack/common/lockutils.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # @@ -31,7 +29,7 @@ import weakref from oslo.config import cfg from designate.openstack.common import fileutils -from designate.openstack.common.gettextutils import _ # noqa +from designate.openstack.common.gettextutils import _ from designate.openstack.common import local from designate.openstack.common import log as logging @@ -140,25 +138,46 @@ _semaphores_lock = threading.Lock() @contextlib.contextmanager -def lock(name, lock_file_prefix=None, external=False, lock_path=None): - """Context based lock +def external_lock(name, lock_file_prefix=None, lock_path=None): + with internal_lock(name): + LOG.debug(_('Attempting to grab file lock "%(lock)s"'), + {'lock': name}) - This function yields a `threading.Semaphore` instance (if we don't use - eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is - True, in which case, it'll yield an InterProcessLock instance. + # We need a copy of lock_path because it is non-local + local_lock_path = lock_path or CONF.lock_path + if not local_lock_path: + raise cfg.RequiredOptError('lock_path') - :param lock_file_prefix: The lock_file_prefix argument is used to provide - lock files on disk with a meaningful prefix. + if not os.path.exists(local_lock_path): + fileutils.ensure_tree(local_lock_path) + LOG.info(_('Created lock path: %s'), local_lock_path) - :param external: The external keyword argument denotes whether this lock - should work across multiple processes. This means that if two different - workers both run a a method decorated with @synchronized('mylock', - external=True), only one of them will execute at a time. + def add_prefix(name, prefix): + if not prefix: + return name + sep = '' if prefix.endswith('-') else '-' + return '%s%s%s' % (prefix, sep, name) - :param lock_path: The lock_path keyword argument is used to specify a - special location for external lock files to live. If nothing is set, then - CONF.lock_path is used as a default. - """ + # NOTE(mikal): the lock name cannot contain directory + # separators + lock_file_name = add_prefix(name.replace(os.sep, '_'), + lock_file_prefix) + + lock_file_path = os.path.join(local_lock_path, lock_file_name) + + try: + lock = InterProcessLock(lock_file_path) + with lock as lock: + LOG.debug(_('Got file lock "%(lock)s" at %(path)s'), + {'lock': name, 'path': lock_file_path}) + yield lock + finally: + LOG.debug(_('Released file lock "%(lock)s" at %(path)s'), + {'lock': name, 'path': lock_file_path}) + + +@contextlib.contextmanager +def internal_lock(name): with _semaphores_lock: try: sem = _semaphores[name] @@ -175,48 +194,39 @@ def lock(name, lock_file_prefix=None, external=False, lock_path=None): local.strong_store.locks_held.append(name) try: - if external and not CONF.disable_process_locking: - LOG.debug(_('Attempting to grab file lock "%(lock)s"'), - {'lock': name}) - - # We need a copy of lock_path because it is non-local - local_lock_path = lock_path or CONF.lock_path - if not local_lock_path: - raise cfg.RequiredOptError('lock_path') - - if not os.path.exists(local_lock_path): - fileutils.ensure_tree(local_lock_path) - LOG.info(_('Created lock path: %s'), local_lock_path) - - def add_prefix(name, prefix): - if not prefix: - return name - sep = '' if prefix.endswith('-') else '-' - return '%s%s%s' % (prefix, sep, name) - - # NOTE(mikal): the lock name cannot contain directory - # separators - lock_file_name = add_prefix(name.replace(os.sep, '_'), - lock_file_prefix) - - lock_file_path = os.path.join(local_lock_path, lock_file_name) - - try: - lock = InterProcessLock(lock_file_path) - with lock as lock: - LOG.debug(_('Got file lock "%(lock)s" at %(path)s'), - {'lock': name, 'path': lock_file_path}) - yield lock - finally: - LOG.debug(_('Released file lock "%(lock)s" at %(path)s'), - {'lock': name, 'path': lock_file_path}) - else: - yield sem - + yield sem finally: local.strong_store.locks_held.remove(name) +@contextlib.contextmanager +def lock(name, lock_file_prefix=None, external=False, lock_path=None): + """Context based lock + + This function yields a `threading.Semaphore` instance (if we don't use + eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is + True, in which case, it'll yield an InterProcessLock instance. + + :param lock_file_prefix: The lock_file_prefix argument is used to provide + lock files on disk with a meaningful prefix. + + :param external: The external keyword argument denotes whether this lock + should work across multiple processes. This means that if two different + workers both run a a method decorated with @synchronized('mylock', + external=True), only one of them will execute at a time. + + :param lock_path: The lock_path keyword argument is used to specify a + special location for external lock files to live. If nothing is set, then + CONF.lock_path is used as a default. + """ + if external and not CONF.disable_process_locking: + with external_lock(name, lock_file_prefix, lock_path) as lock: + yield lock + else: + with internal_lock(name) as lock: + yield lock + + def synchronized(name, lock_file_prefix=None, external=False, lock_path=None): """Synchronization decorator. diff --git a/designate/openstack/common/log.py b/designate/openstack/common/log.py index f3df57733..28f46acdb 100644 --- a/designate/openstack/common/log.py +++ b/designate/openstack/common/log.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 OpenStack Foundation. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. @@ -29,28 +27,46 @@ It also allows setting of formatting information through conf. """ -import cStringIO import inspect import itertools import logging import logging.config import logging.handlers import os -import stat +import re import sys import traceback from oslo.config import cfg +import six +from six import moves from designate.openstack.common.gettextutils import _ +from designate.openstack.common import importutils from designate.openstack.common import jsonutils from designate.openstack.common import local -from designate.openstack.common import notifier -_DEFAULT_LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s" _DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" +_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password'] + +# NOTE(ldbragst): Let's build a list of regex objects using the list of +# _SANITIZE_KEYS we already have. This way, we only have to add the new key +# to the list of _SANITIZE_KEYS and we can generate regular expressions +# for XML and JSON automatically. +_SANITIZE_PATTERNS = [] +_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])', + r'(<%(key)s>).*?()', + r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])', + r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])'] + +for key in _SANITIZE_KEYS: + for pattern in _FORMAT_PATTERNS: + reg_ex = re.compile(pattern % {'key': key}, re.DOTALL) + _SANITIZE_PATTERNS.append(reg_ex) + + common_cli_opts = [ cfg.BoolOpt('debug', short='d', @@ -65,19 +81,24 @@ common_cli_opts = [ ] logging_cli_opts = [ - cfg.StrOpt('log-config', + cfg.StrOpt('log-config-append', metavar='PATH', - help='If this option is specified, the logging configuration ' - 'file specified is used and overrides any other logging ' - 'options specified. Please see the Python logging module ' + deprecated_name='log-config', + help='The name of logging configuration file. It does not ' + 'disable existing loggers, but just appends specified ' + 'logging configuration to any other existing logging ' + 'options. Please see the Python logging module ' 'documentation for details on logging configuration ' 'files.'), cfg.StrOpt('log-format', - default=_DEFAULT_LOG_FORMAT, + default=None, metavar='FORMAT', - help='A logging.Formatter log message format string which may ' + help='DEPRECATED. ' + 'A logging.Formatter log message format string which may ' 'use any of the available logging.LogRecord attributes. ' - 'Default: %(default)s'), + 'This option is deprecated. Please use ' + 'logging_context_format_string and ' + 'logging_default_format_string instead.'), cfg.StrOpt('log-date-format', default=_DEFAULT_LOG_DATE_FORMAT, metavar='DATE_FORMAT', @@ -87,11 +108,11 @@ logging_cli_opts = [ metavar='PATH', deprecated_name='logfile', help='(Optional) Name of log file to output to. ' - 'If not set, logging will go to stdout.'), + 'If no default is set, logging will go to stdout.'), cfg.StrOpt('log-dir', deprecated_name='logdir', - help='(Optional) The directory to keep log files in ' - '(will be prepended to --log-file)'), + help='(Optional) The base directory used for relative ' + '--log-file paths'), cfg.BoolOpt('use-syslog', default=False, help='Use syslog for logging.'), @@ -103,17 +124,14 @@ logging_cli_opts = [ generic_log_opts = [ cfg.BoolOpt('use_stderr', default=True, - help='Log output to standard error'), - cfg.StrOpt('logfile_mode', - default='0644', - help='Default file mode used when creating log files'), + help='Log output to standard error') ] log_opts = [ cfg.StrOpt('logging_context_format_string', - default='%(asctime)s.%(msecs)03d %(levelname)s %(name)s ' - '[%(request_id)s %(user)s %(tenant)s] %(instance)s' - '%(message)s', + default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' + '%(name)s [%(request_id)s %(user_identity)s] ' + '%(instance)s%(message)s', help='format string to use for log messages with context'), cfg.StrOpt('logging_default_format_string', default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' @@ -128,12 +146,13 @@ log_opts = [ help='prefix each line of exception output with this format'), cfg.ListOpt('default_log_levels', default=[ + 'amqp=WARN', 'amqplib=WARN', - 'sqlalchemy=WARN', 'boto=WARN', + 'qpid=WARN', + 'sqlalchemy=WARN', 'suds=INFO', - 'keystone=INFO', - 'eventlet.wsgi.server=WARN' + 'iso8601=WARN', ], help='list of logger=LEVEL pairs'), cfg.BoolOpt('publish_errors', @@ -209,8 +228,64 @@ def _get_log_file_path(binary=None): binary = binary or _get_binary_name() return '%s.log' % (os.path.join(logdir, binary),) + return None -class ContextAdapter(logging.LoggerAdapter): + +def mask_password(message, secret="***"): + """Replace password with 'secret' in message. + + :param message: The string which includes security information. + :param secret: value with which to replace passwords. + :returns: The unicode value of message with the password fields masked. + + For example: + + >>> mask_password("'adminPass' : 'aaaaa'") + "'adminPass' : '***'" + >>> mask_password("'admin_pass' : 'aaaaa'") + "'admin_pass' : '***'" + >>> mask_password('"password" : "aaaaa"') + '"password" : "***"' + >>> mask_password("'original_password' : 'aaaaa'") + "'original_password' : '***'" + >>> mask_password("u'original_password' : u'aaaaa'") + "u'original_password' : u'***'" + """ + message = six.text_type(message) + + # NOTE(ldbragst): Check to see if anything in message contains any key + # specified in _SANITIZE_KEYS, if not then just return the message since + # we don't have to mask any passwords. + if not any(key in message for key in _SANITIZE_KEYS): + return message + + secret = r'\g<1>' + secret + r'\g<2>' + for pattern in _SANITIZE_PATTERNS: + message = re.sub(pattern, secret, message) + return message + + +class BaseLoggerAdapter(logging.LoggerAdapter): + + def audit(self, msg, *args, **kwargs): + self.log(logging.AUDIT, msg, *args, **kwargs) + + +class LazyAdapter(BaseLoggerAdapter): + def __init__(self, name='unknown', version='unknown'): + self._logger = None + self.extra = {} + self.name = name + self.version = version + + @property + def logger(self): + if not self._logger: + self._logger = getLogger(self.name, self.version) + return self._logger + + +class ContextAdapter(BaseLoggerAdapter): warn = logging.LoggerAdapter.warning def __init__(self, logger, project_name, version_string): @@ -218,8 +293,9 @@ class ContextAdapter(logging.LoggerAdapter): self.project = project_name self.version = version_string - def audit(self, msg, *args, **kwargs): - self.log(logging.AUDIT, msg, *args, **kwargs) + @property + def handlers(self): + return self.logger.handlers def deprecated(self, msg, *args, **kwargs): stdmsg = _("Deprecated: %s") % msg @@ -230,6 +306,13 @@ class ContextAdapter(logging.LoggerAdapter): self.warn(stdmsg, *args, **kwargs) def process(self, msg, kwargs): + # NOTE(mrodden): catch any Message/other object and + # coerce to unicode before they can get + # to the python logging and possibly + # cause string encoding trouble + if not isinstance(msg, six.string_types): + msg = six.text_type(msg) + if 'extra' not in kwargs: kwargs['extra'] = {} extra = kwargs['extra'] @@ -241,18 +324,20 @@ class ContextAdapter(logging.LoggerAdapter): extra.update(_dictify_context(context)) instance = kwargs.pop('instance', None) + instance_uuid = (extra.get('instance_uuid', None) or + kwargs.pop('instance_uuid', None)) instance_extra = '' if instance: instance_extra = CONF.instance_format % instance - else: - instance_uuid = kwargs.pop('instance_uuid', None) - if instance_uuid: - instance_extra = (CONF.instance_uuid_format - % {'uuid': instance_uuid}) - extra.update({'instance': instance_extra}) + elif instance_uuid: + instance_extra = (CONF.instance_uuid_format + % {'uuid': instance_uuid}) + extra['instance'] = instance_extra - extra.update({"project": self.project}) - extra.update({"version": self.version}) + extra.setdefault('user_identity', kwargs.pop('user_identity', None)) + + extra['project'] = self.project + extra['version'] = self.version extra['extra'] = extra.copy() return msg, kwargs @@ -266,7 +351,7 @@ class JSONFormatter(logging.Formatter): def formatException(self, ei, strip_newlines=True): lines = traceback.format_exception(*ei) if strip_newlines: - lines = [itertools.ifilter( + lines = [moves.filter( lambda x: x, line.rstrip().splitlines()) for line in lines] lines = list(itertools.chain(*lines)) @@ -303,30 +388,40 @@ class JSONFormatter(logging.Formatter): return jsonutils.dumps(message) -class PublishErrorsHandler(logging.Handler): - def emit(self, record): - if ('designate.openstack.common.notifier.log_notifier' in - CONF.notification_driver): - return - notifier.api.notify(None, 'error.publisher', - 'error_notification', - notifier.api.ERROR, - dict(error=record.msg)) - - def _create_logging_excepthook(product_name): - def logging_excepthook(type, value, tb): + def logging_excepthook(exc_type, value, tb): extra = {} - if CONF.verbose: - extra['exc_info'] = (type, value, tb) + if CONF.verbose or CONF.debug: + extra['exc_info'] = (exc_type, value, tb) getLogger(product_name).critical(str(value), **extra) return logging_excepthook +class LogConfigError(Exception): + + message = _('Error loading logging config %(log_config)s: %(err_msg)s') + + def __init__(self, log_config, err_msg): + self.log_config = log_config + self.err_msg = err_msg + + def __str__(self): + return self.message % dict(log_config=self.log_config, + err_msg=self.err_msg) + + +def _load_log_config(log_config_append): + try: + logging.config.fileConfig(log_config_append, + disable_existing_loggers=False) + except moves.configparser.Error as exc: + raise LogConfigError(log_config_append, str(exc)) + + def setup(product_name): """Setup logging.""" - if CONF.log_config: - logging.config.fileConfig(CONF.log_config) + if CONF.log_config_append: + _load_log_config(CONF.log_config_append) else: _setup_logging_from_conf() sys.excepthook = _create_logging_excepthook(product_name) @@ -378,31 +473,33 @@ def _setup_logging_from_conf(): filelog = logging.handlers.WatchedFileHandler(logpath) log_root.addHandler(filelog) - mode = int(CONF.logfile_mode, 8) - st = os.stat(logpath) - if st.st_mode != (stat.S_IFREG | mode): - os.chmod(logpath, mode) - if CONF.use_stderr: streamlog = ColorHandler() log_root.addHandler(streamlog) - elif not CONF.log_file: + elif not logpath: # pass sys.stdout as a positional argument # python2.6 calls the argument strm, in 2.7 it's stream streamlog = logging.StreamHandler(sys.stdout) log_root.addHandler(streamlog) if CONF.publish_errors: - log_root.addHandler(PublishErrorsHandler(logging.ERROR)) + handler = importutils.import_object( + "designate.openstack.common.log_handler.PublishErrorsHandler", + logging.ERROR) + log_root.addHandler(handler) + datefmt = CONF.log_date_format for handler in log_root.handlers: - datefmt = CONF.log_date_format + # NOTE(alaski): CONF.log_format overrides everything currently. This + # should be deprecated in favor of context aware formatting. if CONF.log_format: handler.setFormatter(logging.Formatter(fmt=CONF.log_format, datefmt=datefmt)) + log_root.info('Deprecated: log_format is now deprecated and will ' + 'be removed in the next release') else: - handler.setFormatter(LegacyFormatter(datefmt=datefmt)) + handler.setFormatter(ContextFormatter(datefmt=datefmt)) if CONF.debug: log_root.setLevel(logging.DEBUG) @@ -411,14 +508,11 @@ def _setup_logging_from_conf(): else: log_root.setLevel(logging.WARNING) - level = logging.NOTSET for pair in CONF.default_log_levels: mod, _sep, level_name = pair.partition('=') level = logging.getLevelName(level_name) logger = logging.getLogger(mod) logger.setLevel(level) - for handler in log_root.handlers: - logger.addHandler(handler) _loggers = {} @@ -431,6 +525,16 @@ def getLogger(name='unknown', version='unknown'): return _loggers[name] +def getLazyLogger(name='unknown', version='unknown'): + """Returns lazy logger. + + Creates a pass-through logger that does not create the real logger + until it is really needed and delegates all calls to the real logger + once it is created. + """ + return LazyAdapter(name, version) + + class WritableLogger(object): """A thin wrapper that responds to `write` and logs.""" @@ -442,7 +546,7 @@ class WritableLogger(object): self.logger.log(self.level, msg) -class LegacyFormatter(logging.Formatter): +class ContextFormatter(logging.Formatter): """A context.RequestContext aware formatter configured through flags. The flags used to set format strings are: logging_context_format_string @@ -483,7 +587,7 @@ class LegacyFormatter(logging.Formatter): if not record: return logging.Formatter.formatException(self, exc_info) - stringbuffer = cStringIO.StringIO() + stringbuffer = moves.StringIO() traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], None, stringbuffer) lines = stringbuffer.getvalue().split('\n') diff --git a/designate/openstack/common/loopingcall.py b/designate/openstack/common/loopingcall.py index bbac8bb2f..2fecdde4f 100644 --- a/designate/openstack/common/loopingcall.py +++ b/designate/openstack/common/loopingcall.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara @@ -46,12 +44,23 @@ class LoopingCallDone(Exception): self.retvalue = retvalue -class LoopingCall(object): +class LoopingCallBase(object): def __init__(self, f=None, *args, **kw): self.args = args self.kw = kw self.f = f self._running = False + self.done = None + + def stop(self): + self._running = False + + def wait(self): + return self.done.wait() + + +class FixedIntervalLoopingCall(LoopingCallBase): + """A fixed interval looping call.""" def start(self, interval, initial_delay=None): self._running = True @@ -73,11 +82,11 @@ class LoopingCall(object): LOG.warn(_('task run outlasted interval by %s sec') % -delay) greenthread.sleep(delay if delay > 0 else 0) - except LoopingCallDone, e: + except LoopingCallDone as e: self.stop() done.send(e.retvalue) except Exception: - LOG.exception(_('in looping call')) + LOG.exception(_('in fixed duration looping call')) done.send_exception(*sys.exc_info()) return else: @@ -88,8 +97,49 @@ class LoopingCall(object): greenthread.spawn_n(_inner) return self.done - def stop(self): - self._running = False - def wait(self): - return self.done.wait() +# TODO(mikal): this class name is deprecated in Havana and should be removed +# in the I release +LoopingCall = FixedIntervalLoopingCall + + +class DynamicLoopingCall(LoopingCallBase): + """A looping call which sleeps until the next known event. + + The function called should return how long to sleep for before being + called again. + """ + + def start(self, initial_delay=None, periodic_interval_max=None): + self._running = True + done = event.Event() + + def _inner(): + if initial_delay: + greenthread.sleep(initial_delay) + + try: + while self._running: + idle = self.f(*self.args, **self.kw) + if not self._running: + break + + if periodic_interval_max is not None: + idle = min(idle, periodic_interval_max) + LOG.debug(_('Dynamic looping call sleeping for %.02f ' + 'seconds'), idle) + greenthread.sleep(idle) + except LoopingCallDone as e: + self.stop() + done.send(e.retvalue) + except Exception: + LOG.exception(_('in dynamic looping call')) + done.send_exception(*sys.exc_info()) + return + else: + done.send(True) + + self.done = done + + greenthread.spawn(_inner) + return self.done diff --git a/designate/openstack/common/network_utils.py b/designate/openstack/common/network_utils.py index 5224e01aa..c2f6b6594 100644 --- a/designate/openstack/common/network_utils.py +++ b/designate/openstack/common/network_utils.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2012 OpenStack Foundation. # All Rights Reserved. # @@ -19,14 +17,12 @@ Network-related utilities and helper functions. """ -import logging - -LOG = logging.getLogger(__name__) +from designate.openstack.common.py3kcompat import urlutils def parse_host_port(address, default_port=None): - """ - Interpret a string as a host:port pair. + """Interpret a string as a host:port pair. + An IPv6 address MUST be escaped if accompanied by a port, because otherwise ambiguity ensues: 2001:db8:85a3::8a2e:370:7334 means both [2001:db8:85a3::8a2e:370:7334] and @@ -66,3 +62,18 @@ def parse_host_port(address, default_port=None): port = default_port return (host, None if port is None else int(port)) + + +def urlsplit(url, scheme='', allow_fragments=True): + """Parse a URL using urlparse.urlsplit(), splitting query and fragments. + This function papers over Python issue9374 when needed. + + The parameters are the same as urlparse.urlsplit. + """ + scheme, netloc, path, query, fragment = urlutils.urlsplit( + url, scheme, allow_fragments) + if allow_fragments and '#' in path: + path, fragment = path.split('#', 1) + if '?' in path: + path, query = path.split('?', 1) + return urlutils.SplitResult(scheme, netloc, path, query, fragment) diff --git a/designate/openstack/common/notifier/__init__.py b/designate/openstack/common/notifier/__init__.py index 45c3b46ae..e69de29bb 100644 --- a/designate/openstack/common/notifier/__init__.py +++ b/designate/openstack/common/notifier/__init__.py @@ -1,14 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/designate/openstack/common/notifier/api.py b/designate/openstack/common/notifier/api.py index bd4825dcd..8530aee0f 100644 --- a/designate/openstack/common/notifier/api.py +++ b/designate/openstack/common/notifier/api.py @@ -13,6 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. +import socket import uuid from oslo.config import cfg @@ -30,13 +31,12 @@ LOG = logging.getLogger(__name__) notifier_opts = [ cfg.MultiStrOpt('notification_driver', default=[], - deprecated_name='list_notifier_drivers', help='Driver or drivers to handle sending notifications'), cfg.StrOpt('default_notification_level', default='INFO', help='Default notification level for outgoing notifications'), cfg.StrOpt('default_publisher_id', - default='$host', + default=None, help='Default publisher_id for outgoing notifications'), ] @@ -57,7 +57,7 @@ class BadPriorityException(Exception): def notify_decorator(name, fn): - """ decorator for notify which is used from utils.monkey_patch() + """Decorator for notify which is used from utils.monkey_patch(). :param name: name of the function :param function: - object of the function @@ -75,7 +75,7 @@ def notify_decorator(name, fn): ctxt = context.get_context_from_function_and_args(fn, args, kwarg) notify(ctxt, - CONF.default_publisher_id, + CONF.default_publisher_id or socket.gethostname(), name, CONF.default_notification_level, body) @@ -85,7 +85,10 @@ def notify_decorator(name, fn): def publisher_id(service, host=None): if not host: - host = CONF.host + try: + host = CONF.host + except AttributeError: + host = CONF.default_publisher_id or socket.gethostname() return "%s.%s" % (service, host) @@ -154,29 +157,16 @@ def _get_drivers(): if _drivers is None: _drivers = {} for notification_driver in CONF.notification_driver: - add_driver(notification_driver) - + try: + driver = importutils.import_module(notification_driver) + _drivers[notification_driver] = driver + except ImportError: + LOG.exception(_("Failed to load notifier %s. " + "These notifications will not be sent.") % + notification_driver) return _drivers.values() -def add_driver(notification_driver): - """Add a notification driver at runtime.""" - # Make sure the driver list is initialized. - _get_drivers() - if isinstance(notification_driver, basestring): - # Load and add - try: - driver = importutils.import_module(notification_driver) - _drivers[notification_driver] = driver - except ImportError: - LOG.exception(_("Failed to load notifier %s. " - "These notifications will not be sent.") % - notification_driver) - else: - # Driver is already loaded; just add the object. - _drivers[notification_driver] = notification_driver - - def _reset_drivers(): """Used by unit tests to reset the drivers.""" global _drivers diff --git a/designate/openstack/common/notifier/log_notifier.py b/designate/openstack/common/notifier/log_notifier.py index 975d3fe15..74600e6e6 100644 --- a/designate/openstack/common/notifier/log_notifier.py +++ b/designate/openstack/common/notifier/log_notifier.py @@ -24,7 +24,9 @@ CONF = cfg.CONF def notify(_context, message): """Notifies the recipient of the desired event given the model. - Log notifications using openstack's default logging system""" + + Log notifications using OpenStack's default logging system. + """ priority = message.get('priority', CONF.default_notification_level) diff --git a/designate/openstack/common/notifier/no_op_notifier.py b/designate/openstack/common/notifier/no_op_notifier.py index bc7a56ca7..13d946e36 100644 --- a/designate/openstack/common/notifier/no_op_notifier.py +++ b/designate/openstack/common/notifier/no_op_notifier.py @@ -15,5 +15,5 @@ def notify(_context, message): - """Notifies the recipient of the desired event given the model""" + """Notifies the recipient of the desired event given the model.""" pass diff --git a/designate/openstack/common/notifier/proxy.py b/designate/openstack/common/notifier/proxy.py new file mode 100644 index 000000000..9c66477f7 --- /dev/null +++ b/designate/openstack/common/notifier/proxy.py @@ -0,0 +1,79 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +A temporary helper which emulates designate.messaging.Notifier. + +This helper method allows us to do the tedious porting to the new Notifier API +as a standalone commit so that the commit which switches us to designate.messaging +is smaller and easier to review. This file will be removed as part of that +commit. +""" + +from oslo.config import cfg + +from designate.openstack.common.notifier import api as notifier_api + +CONF = cfg.CONF + + +class Notifier(object): + + def __init__(self, publisher_id): + super(Notifier, self).__init__() + self.publisher_id = publisher_id + + _marker = object() + + def prepare(self, publisher_id=_marker): + ret = self.__class__(self.publisher_id) + if publisher_id is not self._marker: + ret.publisher_id = publisher_id + return ret + + def _notify(self, ctxt, event_type, payload, priority): + notifier_api.notify(ctxt, + self.publisher_id, + event_type, + priority, + payload) + + def audit(self, ctxt, event_type, payload): + # No audit in old notifier. + self._notify(ctxt, event_type, payload, 'INFO') + + def debug(self, ctxt, event_type, payload): + self._notify(ctxt, event_type, payload, 'DEBUG') + + def info(self, ctxt, event_type, payload): + self._notify(ctxt, event_type, payload, 'INFO') + + def warn(self, ctxt, event_type, payload): + self._notify(ctxt, event_type, payload, 'WARN') + + warning = warn + + def error(self, ctxt, event_type, payload): + self._notify(ctxt, event_type, payload, 'ERROR') + + def critical(self, ctxt, event_type, payload): + self._notify(ctxt, event_type, payload, 'CRITICAL') + + +def get_notifier(service=None, host=None, publisher_id=None): + if not publisher_id: + publisher_id = "%s.%s" % (service, host or CONF.host) + return Notifier(publisher_id) diff --git a/designate/openstack/common/notifier/rpc_notifier.py b/designate/openstack/common/notifier/rpc_notifier.py index 2c85f038c..3e41e443b 100644 --- a/designate/openstack/common/notifier/rpc_notifier.py +++ b/designate/openstack/common/notifier/rpc_notifier.py @@ -24,14 +24,14 @@ LOG = logging.getLogger(__name__) notification_topic_opt = cfg.ListOpt( 'notification_topics', default=['notifications', ], - help='AMQP topic used for openstack notifications') + help='AMQP topic used for OpenStack notifications') CONF = cfg.CONF CONF.register_opt(notification_topic_opt) def notify(context, message): - """Sends a notification via RPC""" + """Sends a notification via RPC.""" if not context: context = req_context.get_admin_context() priority = message.get('priority', @@ -43,4 +43,5 @@ def notify(context, message): rpc.notify(context, topic, message) except Exception: LOG.exception(_("Could not send notification to %(topic)s. " - "Payload=%(message)s"), locals()) + "Payload=%(message)s"), + {"topic": topic, "message": message}) diff --git a/designate/openstack/common/notifier/rpc_notifier2.py b/designate/openstack/common/notifier/rpc_notifier2.py index e31c0a6c5..ba9aa0116 100644 --- a/designate/openstack/common/notifier/rpc_notifier2.py +++ b/designate/openstack/common/notifier/rpc_notifier2.py @@ -26,7 +26,7 @@ LOG = logging.getLogger(__name__) notification_topic_opt = cfg.ListOpt( 'topics', default=['notifications', ], - help='AMQP topic(s) used for openstack notifications') + help='AMQP topic(s) used for OpenStack notifications') opt_group = cfg.OptGroup(name='rpc_notifier2', title='Options for rpc_notifier2') @@ -37,7 +37,7 @@ CONF.register_opt(notification_topic_opt, opt_group) def notify(context, message): - """Sends a notification via RPC""" + """Sends a notification via RPC.""" if not context: context = req_context.get_admin_context() priority = message.get('priority', @@ -49,4 +49,5 @@ def notify(context, message): rpc.notify(context, topic, message, envelope=True) except Exception: LOG.exception(_("Could not send notification to %(topic)s. " - "Payload=%(message)s"), locals()) + "Payload=%(message)s"), + {"topic": topic, "message": message}) diff --git a/designate/openstack/common/periodic_task.py b/designate/openstack/common/periodic_task.py deleted file mode 100644 index eaa6a4869..000000000 --- a/designate/openstack/common/periodic_task.py +++ /dev/null @@ -1,115 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from designate.openstack.common.gettextutils import _ -from designate.openstack.common import log as logging - -LOG = logging.getLogger(__name__) - - -def periodic_task(*args, **kwargs): - """Decorator to indicate that a method is a periodic task. - - This decorator can be used in two ways: - - 1. Without arguments '@periodic_task', this will be run on every tick - of the periodic scheduler. - - 2. With arguments, @periodic_task(ticks_between_runs=N), this will be - run on every N ticks of the periodic scheduler. - """ - def decorator(f): - f._periodic_task = True - f._ticks_between_runs = kwargs.pop('ticks_between_runs', 0) - return f - - # NOTE(sirp): The `if` is necessary to allow the decorator to be used with - # and without parens. - # - # In the 'with-parens' case (with kwargs present), this function needs to - # return a decorator function since the interpreter will invoke it like: - # - # periodic_task(*args, **kwargs)(f) - # - # In the 'without-parens' case, the original function will be passed - # in as the first argument, like: - # - # periodic_task(f) - if kwargs: - return decorator - else: - return decorator(args[0]) - - -class _PeriodicTasksMeta(type): - def __init__(cls, names, bases, dict_): - """Metaclass that allows us to collect decorated periodic tasks.""" - super(_PeriodicTasksMeta, cls).__init__(names, bases, dict_) - - # NOTE(sirp): if the attribute is not present then we must be the base - # class, so, go ahead and initialize it. If the attribute is present, - # then we're a subclass so make a copy of it so we don't step on our - # parent's toes. - try: - cls._periodic_tasks = cls._periodic_tasks[:] - except AttributeError: - cls._periodic_tasks = [] - - try: - cls._ticks_to_skip = cls._ticks_to_skip.copy() - except AttributeError: - cls._ticks_to_skip = {} - - # This uses __dict__ instead of - # inspect.getmembers(cls, inspect.ismethod) so only the methods of the - # current class are added when this class is scanned, and base classes - # are not added redundantly. - for value in cls.__dict__.values(): - if getattr(value, '_periodic_task', False): - task = value - name = task.__name__ - cls._periodic_tasks.append((name, task)) - cls._ticks_to_skip[name] = task._ticks_between_runs - - -class PeriodicTasks(object): - __metaclass__ = _PeriodicTasksMeta - - def run_periodic_tasks(self, context, raise_on_error=False): - """Tasks to be run at a periodic interval.""" - for task_name, task in self._periodic_tasks: - full_task_name = '.'.join([self.__class__.__name__, task_name]) - - ticks_to_skip = self._ticks_to_skip[task_name] - if ticks_to_skip > 0: - LOG.debug(_("Skipping %(full_task_name)s, %(ticks_to_skip)s" - " ticks left until next run"), - dict(full_task_name=full_task_name, - ticks_to_skip=ticks_to_skip)) - self._ticks_to_skip[task_name] -= 1 - continue - - self._ticks_to_skip[task_name] = task._ticks_between_runs - LOG.debug(_("Running periodic task %(full_task_name)s"), - dict(full_task_name=full_task_name)) - - try: - task(self, context) - except Exception as e: - if raise_on_error: - raise - LOG.exception(_("Error during %(full_task_name)s:" - " %(e)s"), - dict(e=e, full_task_name=full_task_name)) diff --git a/designate/openstack/common/processutils.py b/designate/openstack/common/processutils.py index e1154f79c..df9314140 100644 --- a/designate/openstack/common/processutils.py +++ b/designate/openstack/common/processutils.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # @@ -19,19 +17,27 @@ System-level utilities and helper functions. """ -import logging +import logging as stdlib_logging +import os import random import shlex +import signal from eventlet.green import subprocess from eventlet import greenthread from designate.openstack.common.gettextutils import _ +from designate.openstack.common import log as logging LOG = logging.getLogger(__name__) +class InvalidArgumentError(Exception): + def __init__(self, message=None): + super(InvalidArgumentError, self).__init__(message) + + class UnknownArgumentError(Exception): def __init__(self, message=None): super(UnknownArgumentError, self).__init__(message) @@ -40,29 +46,53 @@ class UnknownArgumentError(Exception): class ProcessExecutionError(Exception): def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None, description=None): + self.exit_code = exit_code + self.stderr = stderr + self.stdout = stdout + self.cmd = cmd + self.description = description + if description is None: - description = "Unexpected error while running command." + description = _("Unexpected error while running command.") if exit_code is None: exit_code = '-' - message = ("%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r" - % (description, cmd, exit_code, stdout, stderr)) + message = _('%(description)s\n' + 'Command: %(cmd)s\n' + 'Exit code: %(exit_code)s\n' + 'Stdout: %(stdout)r\n' + 'Stderr: %(stderr)r') % {'description': description, + 'cmd': cmd, + 'exit_code': exit_code, + 'stdout': stdout, + 'stderr': stderr} super(ProcessExecutionError, self).__init__(message) +class NoRootWrapSpecified(Exception): + def __init__(self, message=None): + super(NoRootWrapSpecified, self).__init__(message) + + +def _subprocess_setup(): + # Python installs a SIGPIPE handler by default. This is usually not what + # non-Python subprocesses expect. + signal.signal(signal.SIGPIPE, signal.SIG_DFL) + + def execute(*cmd, **kwargs): - """ - Helper method to shell out and execute a command through subprocess with - optional retry. + """Helper method to shell out and execute a command through subprocess. + + Allows optional retry. :param cmd: Passed to subprocess.Popen. :type cmd: string :param process_input: Send to opened process. - :type proces_input: string - :param check_exit_code: Defaults to 0. Will raise - :class:`ProcessExecutionError` - if the command exits without returning this value - as a returncode - :type check_exit_code: int + :type process_input: string + :param check_exit_code: Single bool, int, or list of allowed exit + codes. Defaults to [0]. Raise + :class:`ProcessExecutionError` unless + program exits with one of these code. + :type check_exit_code: boolean, int, or [int] :param delay_on_retry: True | False. Defaults to True. If set to True, wait a short amount of time before retrying. :type delay_on_retry: boolean @@ -72,8 +102,15 @@ def execute(*cmd, **kwargs): the command is prefixed by the command specified in the root_helper kwarg. :type run_as_root: boolean - :param root_helper: command to prefix all cmd's with + :param root_helper: command to prefix to commands called with + run_as_root=True :type root_helper: string + :param shell: whether or not there should be a shell used to + execute this command. Defaults to false. + :type shell: boolean + :param loglevel: log level for execute commands. + :type loglevel: int. (Should be stdlib_logging.DEBUG or + stdlib_logging.INFO) :returns: (stdout, stderr) from process execution :raises: :class:`UnknownArgumentError` on receiving unknown arguments @@ -81,28 +118,54 @@ def execute(*cmd, **kwargs): """ process_input = kwargs.pop('process_input', None) - check_exit_code = kwargs.pop('check_exit_code', 0) + check_exit_code = kwargs.pop('check_exit_code', [0]) + ignore_exit_code = False delay_on_retry = kwargs.pop('delay_on_retry', True) attempts = kwargs.pop('attempts', 1) run_as_root = kwargs.pop('run_as_root', False) root_helper = kwargs.pop('root_helper', '') - if len(kwargs): + shell = kwargs.pop('shell', False) + loglevel = kwargs.pop('loglevel', stdlib_logging.DEBUG) + + if isinstance(check_exit_code, bool): + ignore_exit_code = not check_exit_code + check_exit_code = [0] + elif isinstance(check_exit_code, int): + check_exit_code = [check_exit_code] + + if kwargs: raise UnknownArgumentError(_('Got unknown keyword args ' 'to utils.execute: %r') % kwargs) - if run_as_root: + + if run_as_root and hasattr(os, 'geteuid') and os.geteuid() != 0: + if not root_helper: + raise NoRootWrapSpecified( + message=_('Command requested root, but did not ' + 'specify a root helper.')) cmd = shlex.split(root_helper) + list(cmd) + cmd = map(str, cmd) while attempts > 0: attempts -= 1 try: - LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd)) + LOG.log(loglevel, _('Running cmd (subprocess): %s'), ' '.join(cmd)) _PIPE = subprocess.PIPE # pylint: disable=E1101 + + if os.name == 'nt': + preexec_fn = None + close_fds = False + else: + preexec_fn = _subprocess_setup + close_fds = True + obj = subprocess.Popen(cmd, stdin=_PIPE, stdout=_PIPE, stderr=_PIPE, - close_fds=True) + close_fds=close_fds, + preexec_fn=preexec_fn, + shell=shell) result = None if process_input is not None: result = obj.communicate(process_input) @@ -110,22 +173,19 @@ def execute(*cmd, **kwargs): result = obj.communicate() obj.stdin.close() # pylint: disable=E1101 _returncode = obj.returncode # pylint: disable=E1101 - if _returncode: - LOG.debug(_('Result was %s') % _returncode) - if (isinstance(check_exit_code, int) and - not isinstance(check_exit_code, bool) and - _returncode != check_exit_code): - (stdout, stderr) = result - raise ProcessExecutionError(exit_code=_returncode, - stdout=stdout, - stderr=stderr, - cmd=' '.join(cmd)) + LOG.log(loglevel, _('Result was %s') % _returncode) + if not ignore_exit_code and _returncode not in check_exit_code: + (stdout, stderr) = result + raise ProcessExecutionError(exit_code=_returncode, + stdout=stdout, + stderr=stderr, + cmd=' '.join(cmd)) return result except ProcessExecutionError: if not attempts: raise else: - LOG.debug(_('%r failed. Retrying.'), cmd) + LOG.log(loglevel, _('%r failed. Retrying.'), cmd) if delay_on_retry: greenthread.sleep(random.randint(20, 200) / 100.0) finally: @@ -133,3 +193,63 @@ def execute(*cmd, **kwargs): # call clean something up in between calls, without # it two execute calls in a row hangs the second one greenthread.sleep(0) + + +def trycmd(*args, **kwargs): + """A wrapper around execute() to more easily handle warnings and errors. + + Returns an (out, err) tuple of strings containing the output of + the command's stdout and stderr. If 'err' is not empty then the + command can be considered to have failed. + + :discard_warnings True | False. Defaults to False. If set to True, + then for succeeding commands, stderr is cleared + + """ + discard_warnings = kwargs.pop('discard_warnings', False) + + try: + out, err = execute(*args, **kwargs) + failed = False + except ProcessExecutionError as exn: + out, err = '', str(exn) + failed = True + + if not failed and discard_warnings and err: + # Handle commands that output to stderr but otherwise succeed + err = '' + + return out, err + + +def ssh_execute(ssh, cmd, process_input=None, + addl_env=None, check_exit_code=True): + LOG.debug(_('Running cmd (SSH): %s'), cmd) + if addl_env: + raise InvalidArgumentError(_('Environment not supported over SSH')) + + if process_input: + # This is (probably) fixable if we need it... + raise InvalidArgumentError(_('process_input not supported over SSH')) + + stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd) + channel = stdout_stream.channel + + # NOTE(justinsb): This seems suspicious... + # ...other SSH clients have buffering issues with this approach + stdout = stdout_stream.read() + stderr = stderr_stream.read() + stdin_stream.close() + + exit_status = channel.recv_exit_status() + + # exit_status == -1 if no exit code was returned + if exit_status != -1: + LOG.debug(_('Result was %s') % exit_status) + if check_exit_code and exit_status != 0: + raise ProcessExecutionError(exit_code=exit_status, + stdout=stdout, + stderr=stderr, + cmd=cmd) + + return (stdout, stderr) diff --git a/designate/openstack/common/py3kcompat/__init__.py b/designate/openstack/common/py3kcompat/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/designate/openstack/common/py3kcompat/urlutils.py b/designate/openstack/common/py3kcompat/urlutils.py new file mode 100644 index 000000000..6200271f3 --- /dev/null +++ b/designate/openstack/common/py3kcompat/urlutils.py @@ -0,0 +1,65 @@ +# +# Copyright 2013 Canonical Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +""" +Python2/Python3 compatibility layer for OpenStack +""" + +import six + +if six.PY3: + # python3 + import urllib.error + import urllib.parse + import urllib.request + + urlencode = urllib.parse.urlencode + urljoin = urllib.parse.urljoin + quote = urllib.parse.quote + parse_qsl = urllib.parse.parse_qsl + unquote = urllib.parse.unquote + unquote_plus = urllib.parse.unquote_plus + urlparse = urllib.parse.urlparse + urlsplit = urllib.parse.urlsplit + urlunsplit = urllib.parse.urlunsplit + SplitResult = urllib.parse.SplitResult + + urlopen = urllib.request.urlopen + URLError = urllib.error.URLError + pathname2url = urllib.request.pathname2url +else: + # python2 + import urllib + import urllib2 + import urlparse + + urlencode = urllib.urlencode + quote = urllib.quote + unquote = urllib.unquote + unquote_plus = urllib.unquote_plus + + parse = urlparse + parse_qsl = parse.parse_qsl + urljoin = parse.urljoin + urlparse = parse.urlparse + urlsplit = parse.urlsplit + urlunsplit = parse.urlunsplit + SplitResult = parse.SplitResult + + urlopen = urllib2.urlopen + URLError = urllib2.URLError + pathname2url = urllib.pathname2url diff --git a/designate/openstack/common/rootwrap/__init__.py b/designate/openstack/common/rootwrap/__init__.py index 2d32e4ef3..e69de29bb 100644 --- a/designate/openstack/common/rootwrap/__init__.py +++ b/designate/openstack/common/rootwrap/__init__.py @@ -1,16 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/designate/openstack/common/rootwrap/cmd.py b/designate/openstack/common/rootwrap/cmd.py new file mode 100644 index 000000000..96af2ef5c --- /dev/null +++ b/designate/openstack/common/rootwrap/cmd.py @@ -0,0 +1,136 @@ +# Copyright (c) 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Root wrapper for OpenStack services + + Filters which commands a service is allowed to run as another user. + + To use this with designate, you should set the following in + designate.conf: + rootwrap_config=/etc/designate/rootwrap.conf + + You also need to let the designate user run designate-rootwrap + as root in sudoers: + designate ALL = (root) NOPASSWD: /usr/bin/designate-rootwrap + /etc/designate/rootwrap.conf * + + Service packaging should deploy .filters files only on nodes where + they are needed, to avoid allowing more than is necessary. +""" + +from __future__ import print_function + +import ConfigParser +import logging +import os +import pwd +import signal +import subprocess +import sys + + +RC_UNAUTHORIZED = 99 +RC_NOCOMMAND = 98 +RC_BADCONFIG = 97 +RC_NOEXECFOUND = 96 + + +def _subprocess_setup(): + # Python installs a SIGPIPE handler by default. This is usually not what + # non-Python subprocesses expect. + signal.signal(signal.SIGPIPE, signal.SIG_DFL) + + +def _exit_error(execname, message, errorcode, log=True): + print("%s: %s" % (execname, message), file=sys.stderr) + if log: + logging.error(message) + sys.exit(errorcode) + + +def _getlogin(): + try: + return os.getlogin() + except OSError: + return (os.getenv('USER') or + os.getenv('USERNAME') or + os.getenv('LOGNAME')) + + +def main(): + # Split arguments, require at least a command + execname = sys.argv.pop(0) + if len(sys.argv) < 2: + _exit_error(execname, "No command specified", RC_NOCOMMAND, log=False) + + configfile = sys.argv.pop(0) + userargs = sys.argv[:] + + # Add ../ to sys.path to allow running from branch + possible_topdir = os.path.normpath(os.path.join(os.path.abspath(execname), + os.pardir, os.pardir)) + if os.path.exists(os.path.join(possible_topdir, "designate", "__init__.py")): + sys.path.insert(0, possible_topdir) + + from designate.openstack.common.rootwrap import wrapper + + # Load configuration + try: + rawconfig = ConfigParser.RawConfigParser() + rawconfig.read(configfile) + config = wrapper.RootwrapConfig(rawconfig) + except ValueError as exc: + msg = "Incorrect value in %s: %s" % (configfile, exc.message) + _exit_error(execname, msg, RC_BADCONFIG, log=False) + except ConfigParser.Error: + _exit_error(execname, "Incorrect configuration file: %s" % configfile, + RC_BADCONFIG, log=False) + + if config.use_syslog: + wrapper.setup_syslog(execname, + config.syslog_log_facility, + config.syslog_log_level) + + # Execute command if it matches any of the loaded filters + filters = wrapper.load_filters(config.filters_path) + try: + filtermatch = wrapper.match_filter(filters, userargs, + exec_dirs=config.exec_dirs) + if filtermatch: + command = filtermatch.get_command(userargs, + exec_dirs=config.exec_dirs) + if config.use_syslog: + logging.info("(%s > %s) Executing %s (filter match = %s)" % ( + _getlogin(), pwd.getpwuid(os.getuid())[0], + command, filtermatch.name)) + + obj = subprocess.Popen(command, + stdin=sys.stdin, + stdout=sys.stdout, + stderr=sys.stderr, + preexec_fn=_subprocess_setup, + env=filtermatch.get_environment(userargs)) + obj.wait() + sys.exit(obj.returncode) + + except wrapper.FilterMatchNotExecutable as exc: + msg = ("Executable not found: %s (filter match = %s)" + % (exc.match.exec_path, exc.match.name)) + _exit_error(execname, msg, RC_NOEXECFOUND, log=config.use_syslog) + + except wrapper.NoFilterMatched: + msg = ("Unauthorized command: %s (no filter matched)" + % ' '.join(userargs)) + _exit_error(execname, msg, RC_UNAUTHORIZED, log=config.use_syslog) diff --git a/designate/openstack/common/rootwrap/filters.py b/designate/openstack/common/rootwrap/filters.py index eadda256c..cdc32f80d 100644 --- a/designate/openstack/common/rootwrap/filters.py +++ b/designate/openstack/common/rootwrap/filters.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright (c) 2011 OpenStack Foundation. # All Rights Reserved. # @@ -20,7 +18,7 @@ import re class CommandFilter(object): - """Command filter only checking that the 1st argument matches exec_path""" + """Command filter only checking that the 1st argument matches exec_path.""" def __init__(self, exec_path, run_as, *args): self.name = '' @@ -30,11 +28,11 @@ class CommandFilter(object): self.real_exec = None def get_exec(self, exec_dirs=[]): - """Returns existing executable, or empty string if none found""" + """Returns existing executable, or empty string if none found.""" if self.real_exec is not None: return self.real_exec self.real_exec = "" - if self.exec_path.startswith('/'): + if os.path.isabs(self.exec_path): if os.access(self.exec_path, os.X_OK): self.real_exec = self.exec_path else: @@ -46,10 +44,8 @@ class CommandFilter(object): return self.real_exec def match(self, userargs): - """Only check that the first argument (command) matches exec_path""" - if (os.path.basename(self.exec_path) == userargs[0]): - return True - return False + """Only check that the first argument (command) matches exec_path.""" + return userargs and os.path.basename(self.exec_path) == userargs[0] def get_command(self, userargs, exec_dirs=[]): """Returns command to execute (with sudo -u if run_as != root).""" @@ -60,16 +56,16 @@ class CommandFilter(object): return [to_exec] + userargs[1:] def get_environment(self, userargs): - """Returns specific environment to set, None if none""" + """Returns specific environment to set, None if none.""" return None class RegExpFilter(CommandFilter): - """Command filter doing regexp matching for every argument""" + """Command filter doing regexp matching for every argument.""" def match(self, userargs): # Early skip if command or number of args don't match - if (len(self.args) != len(userargs)): + if (not userargs or len(self.args) != len(userargs)): # DENY: argument numbers don't match return False # Compare each arg (anchoring pattern explicitly at end of string) @@ -88,40 +84,61 @@ class RegExpFilter(CommandFilter): return False -class DnsmasqFilter(CommandFilter): - """Specific filter for the dnsmasq call (which includes env)""" +class PathFilter(CommandFilter): + """Command filter checking that path arguments are within given dirs - CONFIG_FILE_ARG = 'CONFIG_FILE' + One can specify the following constraints for command arguments: + 1) pass - pass an argument as is to the resulting command + 2) some_str - check if an argument is equal to the given string + 3) abs path - check if a path argument is within the given base dir + + A typical rootwrapper filter entry looks like this: + # cmdname: filter name, raw command, user, arg_i_constraint [, ...] + chown: PathFilter, /bin/chown, root, nova, /var/lib/images + + """ def match(self, userargs): - if (userargs[0] == 'env' and - userargs[1].startswith(self.CONFIG_FILE_ARG) and - userargs[2].startswith('NETWORK_ID=') and - userargs[3] == 'dnsmasq'): - return True - return False + if not userargs or len(userargs) < 2: + return False + + command, arguments = userargs[0], userargs[1:] + + equal_args_num = len(self.args) == len(arguments) + exec_is_valid = super(PathFilter, self).match(userargs) + args_equal_or_pass = all( + arg == 'pass' or arg == value + for arg, value in zip(self.args, arguments) + if not os.path.isabs(arg) # arguments not specifying abs paths + ) + paths_are_within_base_dirs = all( + os.path.commonprefix([arg, os.path.realpath(value)]) == arg + for arg, value in zip(self.args, arguments) + if os.path.isabs(arg) # arguments specifying abs paths + ) + + return (equal_args_num and + exec_is_valid and + args_equal_or_pass and + paths_are_within_base_dirs) def get_command(self, userargs, exec_dirs=[]): - to_exec = self.get_exec(exec_dirs=exec_dirs) or self.exec_path - dnsmasq_pos = userargs.index('dnsmasq') - return [to_exec] + userargs[dnsmasq_pos + 1:] + command, arguments = userargs[0], userargs[1:] - def get_environment(self, userargs): - env = os.environ.copy() - env[self.CONFIG_FILE_ARG] = userargs[1].split('=')[-1] - env['NETWORK_ID'] = userargs[2].split('=')[-1] - return env + # convert path values to canonical ones; copy other args as is + args = [os.path.realpath(value) if os.path.isabs(arg) else value + for arg, value in zip(self.args, arguments)] - -class DeprecatedDnsmasqFilter(DnsmasqFilter): - """Variant of dnsmasq filter to support old-style FLAGFILE""" - CONFIG_FILE_ARG = 'FLAGFILE' + return super(PathFilter, self).get_command([command] + args, + exec_dirs) class KillFilter(CommandFilter): """Specific filter for the kill calls. + 1st argument is the user to run /bin/kill under 2nd argument is the location of the affected executable + if the argument is not absolute, it is checked against $PATH Subsequent arguments list the accepted signals (if any) This filter relies on /proc to accurately determine affected @@ -132,7 +149,7 @@ class KillFilter(CommandFilter): super(KillFilter, self).__init__("/bin/kill", *args) def match(self, userargs): - if userargs[0] != "kill": + if not userargs or userargs[0] != "kill": return False args = list(userargs) if len(args) == 3: @@ -150,31 +167,150 @@ class KillFilter(CommandFilter): return False try: command = os.readlink("/proc/%d/exe" % int(args[1])) - # NOTE(dprince): /proc/PID/exe may have ' (deleted)' on - # the end if an executable is updated or deleted - if command.endswith(" (deleted)"): - command = command[:command.rindex(" ")] - if command != self.args[0]: - # Affected executable does not match - return False except (ValueError, OSError): # Incorrect PID return False - return True + + # NOTE(yufang521247): /proc/PID/exe may have '\0' on the + # end, because python doen't stop at '\0' when read the + # target path. + command = command.partition('\0')[0] + + # NOTE(dprince): /proc/PID/exe may have ' (deleted)' on + # the end if an executable is updated or deleted + if command.endswith(" (deleted)"): + command = command[:-len(" (deleted)")] + + kill_command = self.args[0] + + if os.path.isabs(kill_command): + return kill_command == command + + return (os.path.isabs(command) and + kill_command == os.path.basename(command) and + os.path.dirname(command) in os.environ.get('PATH', '' + ).split(':')) class ReadFileFilter(CommandFilter): - """Specific filter for the utils.read_file_as_root call""" + """Specific filter for the utils.read_file_as_root call.""" def __init__(self, file_path, *args): self.file_path = file_path super(ReadFileFilter, self).__init__("/bin/cat", "root", *args) def match(self, userargs): - if userargs[0] != 'cat': + return (userargs == ['cat', self.file_path]) + + +class IpFilter(CommandFilter): + """Specific filter for the ip utility to that does not match exec.""" + + def match(self, userargs): + if userargs[0] == 'ip': + if userargs[1] == 'netns': + return (userargs[2] in ('list', 'add', 'delete')) + else: + return True + + +class EnvFilter(CommandFilter): + """Specific filter for the env utility. + + Behaves like CommandFilter, except that it handles + leading env A=B.. strings appropriately. + """ + + def _extract_env(self, arglist): + """Extract all leading NAME=VALUE arguments from arglist.""" + + envs = set() + for arg in arglist: + if '=' not in arg: + break + envs.add(arg.partition('=')[0]) + return envs + + def __init__(self, exec_path, run_as, *args): + super(EnvFilter, self).__init__(exec_path, run_as, *args) + + env_list = self._extract_env(self.args) + # Set exec_path to X when args are in the form of + # env A=a B=b C=c X Y Z + if "env" in exec_path and len(env_list) < len(self.args): + self.exec_path = self.args[len(env_list)] + + def match(self, userargs): + # ignore leading 'env' + if userargs[0] == 'env': + userargs.pop(0) + + # require one additional argument after configured ones + if len(userargs) < len(self.args): return False - if userargs[1] != self.file_path: + + # extract all env args + user_envs = self._extract_env(userargs) + filter_envs = self._extract_env(self.args) + user_command = userargs[len(user_envs):len(user_envs) + 1] + + # match first non-env argument with CommandFilter + return (super(EnvFilter, self).match(user_command) + and len(filter_envs) and user_envs == filter_envs) + + def exec_args(self, userargs): + args = userargs[:] + + # ignore leading 'env' + if args[0] == 'env': + args.pop(0) + + # Throw away leading NAME=VALUE arguments + while args and '=' in args[0]: + args.pop(0) + + return args + + def get_command(self, userargs, exec_dirs=[]): + to_exec = self.get_exec(exec_dirs=exec_dirs) or self.exec_path + return [to_exec] + self.exec_args(userargs)[1:] + + def get_environment(self, userargs): + env = os.environ.copy() + + # ignore leading 'env' + if userargs[0] == 'env': + userargs.pop(0) + + # Handle leading NAME=VALUE pairs + for a in userargs: + env_name, equals, env_value = a.partition('=') + if not equals: + break + if env_name and env_value: + env[env_name] = env_value + + return env + + +class ChainingFilter(CommandFilter): + def exec_args(self, userargs): + return [] + + +class IpNetnsExecFilter(ChainingFilter): + """Specific filter for the ip utility to that does match exec.""" + + def match(self, userargs): + # Network namespaces currently require root + # require argument + if self.run_as != "root" or len(userargs) < 4: return False - if len(userargs) != 2: - return False - return True + + return (userargs[:3] == ['ip', 'netns', 'exec']) + + def exec_args(self, userargs): + args = userargs[4:] + if args: + args[0] = os.path.basename(args[0]) + return args diff --git a/designate/openstack/common/rootwrap/wrapper.py b/designate/openstack/common/rootwrap/wrapper.py index 2c95fc1fc..257363263 100644 --- a/designate/openstack/common/rootwrap/wrapper.py +++ b/designate/openstack/common/rootwrap/wrapper.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright (c) 2011 OpenStack Foundation. # All Rights Reserved. # @@ -16,12 +14,13 @@ # under the License. -import ConfigParser import logging import logging.handlers import os import string +from six import moves + from designate.openstack.common.rootwrap import filters @@ -31,10 +30,7 @@ class NoFilterMatched(Exception): class FilterMatchNotExecutable(Exception): - """ - This exception is raised when a filter matched but no executable was - found. - """ + """Raised when a filter matched but no executable was found.""" def __init__(self, match=None, **kwargs): self.match = match @@ -49,8 +45,10 @@ class RootwrapConfig(object): if config.has_option("DEFAULT", "exec_dirs"): self.exec_dirs = config.get("DEFAULT", "exec_dirs").split(",") else: + self.exec_dirs = [] # Use system PATH if exec_dirs is not specified - self.exec_dirs = os.environ["PATH"].split(':') + if "PATH" in os.environ: + self.exec_dirs = os.environ['PATH'].split(':') # syslog_log_facility if config.has_option("DEFAULT", "syslog_log_facility"): @@ -93,7 +91,7 @@ def setup_syslog(execname, facility, level): def build_filter(class_name, *args): - """Returns a filter object of class class_name""" + """Returns a filter object of class class_name.""" if not hasattr(filters, class_name): logging.warning("Skipping unknown filter class (%s) specified " "in filter definitions" % class_name) @@ -103,13 +101,14 @@ def build_filter(class_name, *args): def load_filters(filters_path): - """Load filters from a list of directories""" + """Load filters from a list of directories.""" filterlist = [] for filterdir in filters_path: if not os.path.isdir(filterdir): continue - for filterfile in os.listdir(filterdir): - filterconfig = ConfigParser.RawConfigParser() + for filterfile in filter(lambda f: not f.startswith('.'), + os.listdir(filterdir)): + filterconfig = moves.configparser.RawConfigParser() filterconfig.read(os.path.join(filterdir, filterfile)) for (name, value) in filterconfig.items("Filters"): filterdefinition = [string.strip(s) for s in value.split(',')] @@ -121,18 +120,33 @@ def load_filters(filters_path): return filterlist -def match_filter(filters, userargs, exec_dirs=[]): - """ - Checks user command and arguments through command filters and - returns the first matching filter. +def match_filter(filter_list, userargs, exec_dirs=[]): + """Checks user command and arguments through command filters. + + Returns the first matching filter. + Raises NoFilterMatched if no filter matched. Raises FilterMatchNotExecutable if no executable was found for the best filter match. """ first_not_executable_filter = None - for f in filters: + for f in filter_list: if f.match(userargs): + if isinstance(f, filters.ChainingFilter): + # This command calls exec verify that remaining args + # matches another filter. + def non_chain_filter(fltr): + return (fltr.run_as == f.run_as + and not isinstance(fltr, filters.ChainingFilter)) + + leaf_filters = [fltr for fltr in filter_list + if non_chain_filter(fltr)] + args = f.exec_args(userargs) + if (not args or not match_filter(leaf_filters, + args, exec_dirs=exec_dirs)): + continue + # Try other filters if executable is absent if not f.get_exec(exec_dirs=exec_dirs): if not first_not_executable_filter: diff --git a/designate/openstack/common/rpc/__init__.py b/designate/openstack/common/rpc/__init__.py index d7275e331..5d6f8c93f 100644 --- a/designate/openstack/common/rpc/__init__.py +++ b/designate/openstack/common/rpc/__init__.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. @@ -26,13 +24,13 @@ For some wrappers that add message versioning to rpc, see: """ import inspect -import logging from oslo.config import cfg from designate.openstack.common.gettextutils import _ from designate.openstack.common import importutils from designate.openstack.common import local +from designate.openstack.common import log as logging LOG = logging.getLogger(__name__) @@ -56,13 +54,12 @@ rpc_opts = [ help='Seconds to wait before a cast expires (TTL). ' 'Only supported by impl_zmq.'), cfg.ListOpt('allowed_rpc_exception_modules', - default=['designate.openstack.common.exception', - 'nova.exception', + default=['nova.exception', 'cinder.exception', 'exceptions', ], help='Modules of exceptions that are permitted to be recreated' - 'upon receiving exception data from an rpc call.'), + ' upon receiving exception data from an rpc call.'), cfg.BoolOpt('fake_rabbit', default=False, help='If passed, use a fake RabbitMQ provider'), @@ -228,7 +225,7 @@ def notify(context, topic, msg, envelope=False): def cleanup(): - """Clean up resoruces in use by implementation. + """Clean up resources in use by implementation. Clean up any resources that have been allocated by the RPC implementation. This is typically open connections to a messaging service. This function diff --git a/designate/openstack/common/rpc/amqp.py b/designate/openstack/common/rpc/amqp.py index 071442f52..a3d3703f8 100644 --- a/designate/openstack/common/rpc/amqp.py +++ b/designate/openstack/common/rpc/amqp.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. @@ -20,9 +18,9 @@ """ Shared code between AMQP based openstack.common.rpc implementations. -The code in this module is shared between the rpc implemenations based on AMQP. -Specifically, this includes impl_kombu and impl_qpid. impl_carrot also uses -AMQP, but is deprecated and predates this code. +The code in this module is shared between the rpc implementations based on +AMQP. Specifically, this includes impl_kombu and impl_qpid. impl_carrot also +uses AMQP, but is deprecated and predates this code. """ import collections @@ -34,10 +32,9 @@ from eventlet import greenpool from eventlet import pools from eventlet import queue from eventlet import semaphore -# TODO(pekowsk): Remove import cfg and below comment in Havana. -# This import should no longer be needed when the amqp_rpc_single_reply_queue -# option is removed. from oslo.config import cfg +import six + from designate.openstack.common import excutils from designate.openstack.common.gettextutils import _ @@ -46,12 +43,15 @@ from designate.openstack.common import log as logging from designate.openstack.common.rpc import common as rpc_common -# TODO(pekowski): Remove this option in Havana. amqp_opts = [ - cfg.BoolOpt('amqp_rpc_single_reply_queue', + cfg.BoolOpt('amqp_durable_queues', default=False, - help='Enable a fast single reply queue if using AMQP based ' - 'RPC like RabbitMQ or Qpid.'), + deprecated_name='rabbit_durable_queues', + deprecated_group='DEFAULT', + help='Use durable queues in amqp.'), + cfg.BoolOpt('amqp_auto_delete', + default=False, + help='Auto-delete queues in amqp.'), ] cfg.CONF.register_opts(amqp_opts) @@ -83,7 +83,7 @@ class Pool(pools.Pool): # is the above "while loop" gets all the cached connections from the # pool and closes them, but never returns them to the pool, a pool # leak. The unit tests hang waiting for an item to be returned to the - # pool. The unit tests get here via the teatDown() method. In the run + # pool. The unit tests get here via the tearDown() method. In the run # time code, it gets here via cleanup() and only appears in service.py # just before doing a sys.exit(), so cleanup() only happens once and # the leakage is not a problem. @@ -102,19 +102,19 @@ def get_connection_pool(conf, connection_cls): class ConnectionContext(rpc_common.Connection): - """The class that is actually returned to the caller of - create_connection(). This is essentially a wrapper around - Connection that supports 'with'. It can also return a new - Connection, or one from a pool. The function will also catch - when an instance of this class is to be deleted. With that - we can return Connections to the pool on exceptions and so - forth without making the caller be responsible for catching - them. If possible the function makes sure to return a - connection to the pool. + """The class that is actually returned to the create_connection() caller. + + This is essentially a wrapper around Connection that supports 'with'. + It can also return a new Connection, or one from a pool. + + The function will also catch when an instance of this class is to be + deleted. With that we can return Connections to the pool on exceptions + and so forth without making the caller be responsible for catching them. + If possible the function makes sure to return a connection to the pool. """ def __init__(self, conf, connection_pool, pooled=True, server_params=None): - """Create a new connection, or get one from the pool""" + """Create a new connection, or get one from the pool.""" self.connection = None self.conf = conf self.connection_pool = connection_pool @@ -127,7 +127,7 @@ class ConnectionContext(rpc_common.Connection): self.pooled = pooled def __enter__(self): - """When with ConnectionContext() is used, return self""" + """When with ConnectionContext() is used, return self.""" return self def _done(self): @@ -165,17 +165,19 @@ class ConnectionContext(rpc_common.Connection): def create_worker(self, topic, proxy, pool_name): self.connection.create_worker(topic, proxy, pool_name) - def join_consumer_pool(self, callback, pool_name, topic, exchange_name): + def join_consumer_pool(self, callback, pool_name, topic, exchange_name, + ack_on_error=True): self.connection.join_consumer_pool(callback, pool_name, topic, - exchange_name) + exchange_name, + ack_on_error) def consume_in_thread(self): self.connection.consume_in_thread() def __getattr__(self, key): - """Proxy all other calls to the Connection instance""" + """Proxy all other calls to the Connection instance.""" if self.connection: return getattr(self.connection, key) else: @@ -183,11 +185,11 @@ class ConnectionContext(rpc_common.Connection): class ReplyProxy(ConnectionContext): - """ Connection class for RPC replies / callbacks """ + """Connection class for RPC replies / callbacks.""" def __init__(self, conf, connection_pool): self._call_waiters = {} self._num_call_waiters = 0 - self._num_call_waiters_wrn_threshhold = 10 + self._num_call_waiters_wrn_threshold = 10 self._reply_q = 'reply_' + uuid.uuid4().hex super(ReplyProxy, self).__init__(conf, connection_pool, pooled=False) self.declare_direct_consumer(self._reply_q, self._process_data) @@ -197,18 +199,20 @@ class ReplyProxy(ConnectionContext): msg_id = message_data.pop('_msg_id', None) waiter = self._call_waiters.get(msg_id) if not waiter: - LOG.warn(_('no calling threads waiting for msg_id : %s' - ', message : %s') % (msg_id, message_data)) + LOG.warn(_('No calling threads waiting for msg_id : %(msg_id)s' + ', message : %(data)s'), {'msg_id': msg_id, + 'data': message_data}) + LOG.warn(_('_call_waiters: %s') % str(self._call_waiters)) else: waiter.put(message_data) def add_call_waiter(self, waiter, msg_id): self._num_call_waiters += 1 - if self._num_call_waiters > self._num_call_waiters_wrn_threshhold: + if self._num_call_waiters > self._num_call_waiters_wrn_threshold: LOG.warn(_('Number of call waiters is greater than warning ' - 'threshhold: %d. There could be a MulticallProxyWaiter ' - 'leak.') % self._num_call_waiters_wrn_threshhold) - self._num_call_waiters_wrn_threshhold *= 2 + 'threshold: %d. There could be a MulticallProxyWaiter ' + 'leak.') % self._num_call_waiters_wrn_threshold) + self._num_call_waiters_wrn_threshold *= 2 self._call_waiters[msg_id] = waiter def del_call_waiter(self, msg_id): @@ -231,18 +235,13 @@ def msg_reply(conf, msg_id, reply_q, connection_pool, reply=None, failure = rpc_common.serialize_remote_exception(failure, log_failure) - try: - msg = {'result': reply, 'failure': failure} - except TypeError: - msg = {'result': dict((k, repr(v)) - for k, v in reply.__dict__.iteritems()), - 'failure': failure} + msg = {'result': reply, 'failure': failure} if ending: msg['ending'] = True _add_unique_id(msg) # If a reply_q exists, add the msg_id to the reply and pass the # reply_q to direct_send() to use it as the response queue. - # Otherwise use the msg_id for backward compatibilty. + # Otherwise use the msg_id for backward compatibility. if reply_q: msg['_msg_id'] = msg_id conn.direct_send(reply_q, rpc_common.serialize_msg(msg)) @@ -251,7 +250,7 @@ def msg_reply(conf, msg_id, reply_q, connection_pool, reply=None, class RpcContext(rpc_common.CommonRpcContext): - """Context that supports replying to a rpc.call""" + """Context that supports replying to a rpc.call.""" def __init__(self, **kwargs): self.msg_id = kwargs.pop('msg_id', None) self.reply_q = kwargs.pop('reply_q', None) @@ -301,8 +300,14 @@ def pack_context(msg, context): for args at some point. """ - context_d = dict([('_context_%s' % key, value) - for (key, value) in context.to_dict().iteritems()]) + if isinstance(context, dict): + context_d = dict([('_context_%s' % key, value) + for (key, value) in six.iteritems(context)]) + else: + context_d = dict([('_context_%s' % key, value) + for (key, value) in + six.iteritems(context.to_dict())]) + msg.update(context_d) @@ -338,8 +343,9 @@ def _add_unique_id(msg): class _ThreadPoolWithWait(object): - """Base class for a delayed invocation manager used by - the Connection class to start up green threads + """Base class for a delayed invocation manager. + + Used by the Connection class to start up green threads to handle incoming messages. """ @@ -354,25 +360,48 @@ class _ThreadPoolWithWait(object): class CallbackWrapper(_ThreadPoolWithWait): - """Wraps a straight callback to allow it to be invoked in a green - thread. + """Wraps a straight callback. + + Allows it to be invoked in a green thread. """ - def __init__(self, conf, callback, connection_pool): - """ + def __init__(self, conf, callback, connection_pool, + wait_for_consumers=False): + """Initiates CallbackWrapper object. + :param conf: cfg.CONF instance :param callback: a callable (probably a function) :param connection_pool: connection pool as returned by get_connection_pool() + :param wait_for_consumers: wait for all green threads to + complete and raise the last + caught exception, if any. + """ super(CallbackWrapper, self).__init__( conf=conf, connection_pool=connection_pool, ) self.callback = callback + self.wait_for_consumers = wait_for_consumers + self.exc_info = None + + def _wrap(self, message_data, **kwargs): + """Wrap the callback invocation to catch exceptions. + """ + try: + self.callback(message_data, **kwargs) + except Exception: + self.exc_info = sys.exc_info() def __call__(self, message_data): - self.pool.spawn_n(self.callback, message_data) + self.exc_info = None + self.pool.spawn_n(self._wrap, message_data) + + if self.wait_for_consumers: + self.pool.waitall() + if self.exc_info: + six.reraise(self.exc_info[1], None, self.exc_info[2]) class ProxyCallback(_ThreadPoolWithWait): @@ -408,15 +437,17 @@ class ProxyCallback(_ThreadPoolWithWait): ctxt = unpack_context(self.conf, message_data) method = message_data.get('method') args = message_data.get('args', {}) - version = message_data.get('version', None) + version = message_data.get('version') + namespace = message_data.get('namespace') if not method: LOG.warn(_('no method for message: %s') % message_data) ctxt.reply(_('No method for message: %s') % message_data, connection_pool=self.connection_pool) return - self.pool.spawn_n(self._process_data, ctxt, version, method, args) + self.pool.spawn_n(self._process_data, ctxt, version, method, + namespace, args) - def _process_data(self, ctxt, version, method, args): + def _process_data(self, ctxt, version, method, namespace, args): """Process a message in a new thread. If the proxy object we have has a dispatch method @@ -427,7 +458,8 @@ class ProxyCallback(_ThreadPoolWithWait): """ ctxt.update_store() try: - rval = self.proxy.dispatch(ctxt, version, method, **args) + rval = self.proxy.dispatch(ctxt, version, method, namespace, + **args) # Check if the result was a generator if inspect.isgenerator(rval): for x in rval: @@ -487,7 +519,7 @@ class MulticallProxyWaiter(object): return result def __iter__(self): - """Return a result until we get a reply with an 'ending" flag""" + """Return a result until we get a reply with an 'ending' flag.""" if self._done: raise StopIteration while True: @@ -509,61 +541,8 @@ class MulticallProxyWaiter(object): yield result -#TODO(pekowski): Remove MulticallWaiter() in Havana. -class MulticallWaiter(object): - def __init__(self, conf, connection, timeout): - self._connection = connection - self._iterator = connection.iterconsume(timeout=timeout or - conf.rpc_response_timeout) - self._result = None - self._done = False - self._got_ending = False - self._conf = conf - self.msg_id_cache = _MsgIdCache() - - def done(self): - if self._done: - return - self._done = True - self._iterator.close() - self._iterator = None - self._connection.close() - - def __call__(self, data): - """The consume() callback will call this. Store the result.""" - self.msg_id_cache.check_duplicate_message(data) - if data['failure']: - failure = data['failure'] - self._result = rpc_common.deserialize_remote_exception(self._conf, - failure) - - elif data.get('ending', False): - self._got_ending = True - else: - self._result = data['result'] - - def __iter__(self): - """Return a result until we get a 'None' response from consumer""" - if self._done: - raise StopIteration - while True: - try: - self._iterator.next() - except Exception: - with excutils.save_and_reraise_exception(): - self.done() - if self._got_ending: - self.done() - raise StopIteration - result = self._result - if isinstance(result, Exception): - self.done() - raise result - yield result - - def create_connection(conf, new, connection_pool): - """Create a connection""" + """Create a connection.""" return ConnectionContext(conf, connection_pool, pooled=not new) @@ -572,14 +551,6 @@ _reply_proxy_create_sem = semaphore.Semaphore() def multicall(conf, context, topic, msg, timeout, connection_pool): """Make a call that returns multiple times.""" - # TODO(pekowski): Remove all these comments in Havana. - # For amqp_rpc_single_reply_queue = False, - # Can't use 'with' for multicall, as it returns an iterator - # that will continue to use the connection. When it's done, - # connection.close() will get called which will put it back into - # the pool - # For amqp_rpc_single_reply_queue = True, - # The 'with' statement is mandatory for closing the connection LOG.debug(_('Making synchronous call on %s ...'), topic) msg_id = uuid.uuid4().hex msg.update({'_msg_id': msg_id}) @@ -587,21 +558,13 @@ def multicall(conf, context, topic, msg, timeout, connection_pool): _add_unique_id(msg) pack_context(msg, context) - # TODO(pekowski): Remove this flag and the code under the if clause - # in Havana. - if not conf.amqp_rpc_single_reply_queue: - conn = ConnectionContext(conf, connection_pool) - wait_msg = MulticallWaiter(conf, conn, timeout) - conn.declare_direct_consumer(msg_id, wait_msg) + with _reply_proxy_create_sem: + if not connection_pool.reply_proxy: + connection_pool.reply_proxy = ReplyProxy(conf, connection_pool) + msg.update({'_reply_q': connection_pool.reply_proxy.get_reply_q()}) + wait_msg = MulticallProxyWaiter(conf, msg_id, timeout, connection_pool) + with ConnectionContext(conf, connection_pool) as conn: conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout) - else: - with _reply_proxy_create_sem: - if not connection_pool.reply_proxy: - connection_pool.reply_proxy = ReplyProxy(conf, connection_pool) - msg.update({'_reply_q': connection_pool.reply_proxy.get_reply_q()}) - wait_msg = MulticallProxyWaiter(conf, msg_id, timeout, connection_pool) - with ConnectionContext(conf, connection_pool) as conn: - conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout) return wait_msg @@ -661,7 +624,7 @@ def notify(conf, context, topic, msg, connection_pool, envelope): pack_context(msg, context) with ConnectionContext(conf, connection_pool) as conn: if envelope: - msg = rpc_common.serialize_msg(msg, force_envelope=True) + msg = rpc_common.serialize_msg(msg) conn.notify_send(topic, msg) diff --git a/designate/openstack/common/rpc/common.py b/designate/openstack/common/rpc/common.py index 1d4194eac..0321858e9 100644 --- a/designate/openstack/common/rpc/common.py +++ b/designate/openstack/common/rpc/common.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. @@ -22,18 +20,21 @@ import sys import traceback from oslo.config import cfg +import six from designate.openstack.common.gettextutils import _ from designate.openstack.common import importutils from designate.openstack.common import jsonutils from designate.openstack.common import local from designate.openstack.common import log as logging +from designate.openstack.common import versionutils CONF = cfg.CONF LOG = logging.getLogger(__name__) +_RPC_ENVELOPE_VERSION = '2.0' '''RPC Envelope Version. This version number applies to the top level structure of messages sent out. @@ -46,7 +47,7 @@ This version number applies to the message envelope that is used in the serialization done inside the rpc layer. See serialize_msg() and deserialize_msg(). -The current message format (version 2.0) is very simple. It is: +The current message format (version 2.0) is very simple. It is:: { 'oslo.version': , @@ -64,34 +65,31 @@ We will JSON encode the application message payload. The message envelope, which includes the JSON encoded application message body, will be passed down to the messaging libraries as a dict. ''' -_RPC_ENVELOPE_VERSION = '2.0' _VERSION_KEY = 'oslo.version' _MESSAGE_KEY = 'oslo.message' - -# TODO(russellb) Turn this on after Grizzly. -_SEND_RPC_ENVELOPE = False +_REMOTE_POSTFIX = '_Remote' class RPCException(Exception): - message = _("An unknown RPC related exception occurred.") + msg_fmt = _("An unknown RPC related exception occurred.") def __init__(self, message=None, **kwargs): self.kwargs = kwargs if not message: try: - message = self.message % kwargs + message = self.msg_fmt % kwargs except Exception: # kwargs doesn't match a variable in the message # log the issue and the kwargs LOG.exception(_('Exception in string format operation')) - for name, value in kwargs.iteritems(): + for name, value in six.iteritems(kwargs): LOG.error("%s: %s" % (name, value)) # at least get the core message out if something happened - message = self.message + message = self.msg_fmt super(RPCException, self).__init__(message) @@ -105,7 +103,7 @@ class RemoteError(RPCException): contains all of the relevant info. """ - message = _("Remote error: %(exc_type)s %(value)s\n%(traceback)s.") + msg_fmt = _("Remote error: %(exc_type)s %(value)s\n%(traceback)s.") def __init__(self, exc_type=None, value=None, traceback=None): self.exc_type = exc_type @@ -122,27 +120,50 @@ class Timeout(RPCException): This exception is raised if the rpc_response_timeout is reached while waiting for a response from the remote side. """ - message = _("Timeout while waiting on RPC response.") + msg_fmt = _('Timeout while waiting on RPC response - ' + 'topic: "%(topic)s", RPC method: "%(method)s" ' + 'info: "%(info)s"') + + def __init__(self, info=None, topic=None, method=None): + """Initiates Timeout object. + + :param info: Extra info to convey to the user + :param topic: The topic that the rpc call was sent to + :param rpc_method_name: The name of the rpc method being + called + """ + self.info = info + self.topic = topic + self.method = method + super(Timeout, self).__init__( + None, + info=info or _(''), + topic=topic or _(''), + method=method or _('')) class DuplicateMessageError(RPCException): - message = _("Found duplicate message(%(msg_id)s). Skipping it.") + msg_fmt = _("Found duplicate message(%(msg_id)s). Skipping it.") class InvalidRPCConnectionReuse(RPCException): - message = _("Invalid reuse of an RPC connection.") + msg_fmt = _("Invalid reuse of an RPC connection.") class UnsupportedRpcVersion(RPCException): - message = _("Specified RPC version, %(version)s, not supported by " + msg_fmt = _("Specified RPC version, %(version)s, not supported by " "this endpoint.") class UnsupportedRpcEnvelopeVersion(RPCException): - message = _("Specified RPC envelope version, %(version)s, " + msg_fmt = _("Specified RPC envelope version, %(version)s, " "not supported by this endpoint.") +class RpcVersionCapError(RPCException): + msg_fmt = _("Specified RPC version cap, %(version_cap)s, is too low") + + class Connection(object): """A connection, returned by rpc.create_connection(). @@ -202,9 +223,9 @@ class Connection(object): raise NotImplementedError() def join_consumer_pool(self, callback, pool_name, topic, exchange_name): - """Register as a member of a group of consumers for a given topic from - the specified exchange. + """Register as a member of a group of consumers. + Uses given topic from the specified exchange. Exactly one member of a given pool will receive each message. A message will be delivered to multiple pools, if more than @@ -239,41 +260,20 @@ class Connection(object): def _safe_log(log_func, msg, msg_data): """Sanitizes the msg_data field before logging.""" - SANITIZE = {'set_admin_password': [('args', 'new_pass')], - 'run_instance': [('args', 'admin_password')], - 'route_message': [('args', 'message', 'args', 'method_info', - 'method_kwargs', 'password'), - ('args', 'message', 'args', 'method_info', - 'method_kwargs', 'admin_password')]} + SANITIZE = ['_context_auth_token', 'auth_token', 'new_pass'] - has_method = 'method' in msg_data and msg_data['method'] in SANITIZE - has_context_token = '_context_auth_token' in msg_data - has_token = 'auth_token' in msg_data + def _fix_passwords(d): + """Sanitizes the password fields in the dictionary.""" + for k in six.iterkeys(d): + if k.lower().find('password') != -1: + d[k] = '' + elif k.lower() in SANITIZE: + d[k] = '' + elif isinstance(d[k], dict): + _fix_passwords(d[k]) + return d - if not any([has_method, has_context_token, has_token]): - return log_func(msg, msg_data) - - msg_data = copy.deepcopy(msg_data) - - if has_method: - for arg in SANITIZE.get(msg_data['method'], []): - try: - d = msg_data - for elem in arg[:-1]: - d = d[elem] - d[arg[-1]] = '' - except KeyError, e: - LOG.info(_('Failed to sanitize %(item)s. Key error %(err)s'), - {'item': arg, - 'err': e}) - - if has_context_token: - msg_data['_context_auth_token'] = '' - - if has_token: - msg_data['auth_token'] = '' - - return log_func(msg, msg_data) + return log_func(msg, _fix_passwords(copy.deepcopy(msg_data))) def serialize_remote_exception(failure_info, log_failure=True): @@ -285,17 +285,27 @@ def serialize_remote_exception(failure_info, log_failure=True): tb = traceback.format_exception(*failure_info) failure = failure_info[1] if log_failure: - LOG.error(_("Returning exception %s to caller"), unicode(failure)) + LOG.error(_("Returning exception %s to caller"), + six.text_type(failure)) LOG.error(tb) kwargs = {} if hasattr(failure, 'kwargs'): kwargs = failure.kwargs + # NOTE(matiu): With cells, it's possible to re-raise remote, remote + # exceptions. Lets turn it back into the original exception type. + cls_name = str(failure.__class__.__name__) + mod_name = str(failure.__class__.__module__) + if (cls_name.endswith(_REMOTE_POSTFIX) and + mod_name.endswith(_REMOTE_POSTFIX)): + cls_name = cls_name[:-len(_REMOTE_POSTFIX)] + mod_name = mod_name[:-len(_REMOTE_POSTFIX)] + data = { - 'class': str(failure.__class__.__name__), - 'module': str(failure.__class__.__module__), - 'message': unicode(failure), + 'class': cls_name, + 'module': mod_name, + 'message': six.text_type(failure), 'tb': tb, 'args': failure.args, 'kwargs': kwargs @@ -325,14 +335,15 @@ def deserialize_remote_exception(conf, data): if not issubclass(klass, Exception): raise TypeError("Can only deserialize Exceptions") - failure = klass(**failure.get('kwargs', {})) + failure = klass(*failure.get('args', []), **failure.get('kwargs', {})) except (AttributeError, TypeError, ImportError): return RemoteError(name, failure.get('message'), trace) ex_type = type(failure) str_override = lambda self: message - new_ex_type = type(ex_type.__name__ + "_Remote", (ex_type,), + new_ex_type = type(ex_type.__name__ + _REMOTE_POSTFIX, (ex_type,), {'__str__': str_override, '__unicode__': str_override}) + new_ex_type.__module__ = '%s%s' % (module, _REMOTE_POSTFIX) try: # NOTE(ameade): Dynamically create a new exception type and swap it in # as the new type for the exception. This only works on user defined @@ -394,10 +405,11 @@ class CommonRpcContext(object): class ClientException(Exception): - """This encapsulates some actual exception that is expected to be - hit by an RPC proxy object. Merely instantiating it records the - current exception information, which will be passed back to the - RPC client without exceptional logging.""" + """Encapsulates actual exception expected to be hit by a RPC proxy object. + + Merely instantiating it records the current exception information, which + will be passed back to the RPC client without exceptional logging. + """ def __init__(self): self._exc_info = sys.exc_info() @@ -405,7 +417,7 @@ class ClientException(Exception): def catch_client_exception(exceptions, func, *args, **kwargs): try: return func(*args, **kwargs) - except Exception, e: + except Exception as e: if type(e) in exceptions: raise ClientException() else: @@ -414,11 +426,13 @@ def catch_client_exception(exceptions, func, *args, **kwargs): def client_exceptions(*exceptions): """Decorator for manager methods that raise expected exceptions. + Marking a Manager method with this decorator allows the declaration of expected exceptions that the RPC layer should not consider fatal, and not log as if they were generated in a real error scenario. Note that this will cause listed exceptions to be wrapped in a - ClientException, which is used internally by the RPC layer.""" + ClientException, which is used internally by the RPC layer. + """ def outer(func): def inner(*args, **kwargs): return catch_client_exception(exceptions, func, *args, **kwargs) @@ -426,25 +440,18 @@ def client_exceptions(*exceptions): return outer +# TODO(sirp): we should deprecate this in favor of +# using `versionutils.is_compatible` directly def version_is_compatible(imp_version, version): """Determine whether versions are compatible. :param imp_version: The version implemented :param version: The version requested by an incoming message. """ - version_parts = version.split('.') - imp_version_parts = imp_version.split('.') - if int(version_parts[0]) != int(imp_version_parts[0]): # Major - return False - if int(version_parts[1]) > int(imp_version_parts[1]): # Minor - return False - return True + return versionutils.is_compatible(version, imp_version) -def serialize_msg(raw_msg, force_envelope=False): - if not _SEND_RPC_ENVELOPE and not force_envelope: - return raw_msg - +def serialize_msg(raw_msg): # NOTE(russellb) See the docstring for _RPC_ENVELOPE_VERSION for more # information about this format. msg = {_VERSION_KEY: _RPC_ENVELOPE_VERSION, diff --git a/designate/openstack/common/rpc/dispatcher.py b/designate/openstack/common/rpc/dispatcher.py index 29135f945..424f72ab8 100644 --- a/designate/openstack/common/rpc/dispatcher.py +++ b/designate/openstack/common/rpc/dispatcher.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -83,7 +81,10 @@ On the client side, the same changes should be made as in example 1. The minimum version that supports the new parameter should be specified. """ +import six + from designate.openstack.common.rpc import common as rpc_common +from designate.openstack.common.rpc import serializer as rpc_serializer class RpcDispatcher(object): @@ -93,23 +94,48 @@ class RpcDispatcher(object): contains a list of underlying managers that have an API_VERSION attribute. """ - def __init__(self, callbacks): + def __init__(self, callbacks, serializer=None): """Initialize the rpc dispatcher. :param callbacks: List of proxy objects that are an instance of a class with rpc methods exposed. Each proxy object should have an RPC_API_VERSION attribute. + :param serializer: The Serializer object that will be used to + deserialize arguments before the method call and + to serialize the result after it returns. """ self.callbacks = callbacks + if serializer is None: + serializer = rpc_serializer.NoOpSerializer() + self.serializer = serializer super(RpcDispatcher, self).__init__() - def dispatch(self, ctxt, version, method, **kwargs): + def _deserialize_args(self, context, kwargs): + """Helper method called to deserialize args before dispatch. + + This calls our serializer on each argument, returning a new set of + args that have been deserialized. + + :param context: The request context + :param kwargs: The arguments to be deserialized + :returns: A new set of deserialized args + """ + new_kwargs = dict() + for argname, arg in six.iteritems(kwargs): + new_kwargs[argname] = self.serializer.deserialize_entity(context, + arg) + return new_kwargs + + def dispatch(self, ctxt, version, method, namespace, **kwargs): """Dispatch a message based on a requested version. :param ctxt: The request context :param version: The requested API version from the incoming message :param method: The method requested to be called by the incoming message. + :param namespace: The namespace for the requested method. If None, + the dispatcher will look for a method on a callback + object with no namespace set. :param kwargs: A dict of keyword arguments to be passed to the method. :returns: Whatever is returned by the underlying method that gets @@ -120,17 +146,31 @@ class RpcDispatcher(object): had_compatible = False for proxyobj in self.callbacks: - if hasattr(proxyobj, 'RPC_API_VERSION'): + # Check for namespace compatibility + try: + cb_namespace = proxyobj.RPC_API_NAMESPACE + except AttributeError: + cb_namespace = None + + if namespace != cb_namespace: + continue + + # Check for version compatibility + try: rpc_api_version = proxyobj.RPC_API_VERSION - else: + except AttributeError: rpc_api_version = '1.0' + is_compatible = rpc_common.version_is_compatible(rpc_api_version, version) had_compatible = had_compatible or is_compatible + if not hasattr(proxyobj, method): continue if is_compatible: - return getattr(proxyobj, method)(ctxt, **kwargs) + kwargs = self._deserialize_args(ctxt, kwargs) + result = getattr(proxyobj, method)(ctxt, **kwargs) + return self.serializer.serialize_entity(ctxt, result) if had_compatible: raise AttributeError("No such RPC function '%s'" % method) diff --git a/designate/openstack/common/rpc/impl_fake.py b/designate/openstack/common/rpc/impl_fake.py index 2f4ce858e..2a7b5e926 100644 --- a/designate/openstack/common/rpc/impl_fake.py +++ b/designate/openstack/common/rpc/impl_fake.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -26,6 +24,7 @@ import json import time import eventlet +import six from designate.openstack.common.rpc import common as rpc_common @@ -57,18 +56,19 @@ class Consumer(object): self.topic = topic self.proxy = proxy - def call(self, context, version, method, args, timeout): + def call(self, context, version, method, namespace, args, timeout): done = eventlet.event.Event() def _inner(): ctxt = RpcContext.from_dict(context.to_dict()) try: - rval = self.proxy.dispatch(context, version, method, **args) + rval = self.proxy.dispatch(context, version, method, + namespace, **args) res = [] # Caller might have called ctxt.reply() manually for (reply, failure) in ctxt._response: if failure: - raise failure[0], failure[1], failure[2] + six.reraise(failure[0], failure[1], failure[2]) res.append(reply) # if ending not 'sent'...we might have more data to # return from the function itself @@ -121,7 +121,7 @@ class Connection(object): def create_connection(conf, new=True): - """Create a connection""" + """Create a connection.""" return Connection() @@ -140,13 +140,15 @@ def multicall(conf, context, topic, msg, timeout=None): return args = msg.get('args', {}) version = msg.get('version', None) + namespace = msg.get('namespace', None) try: consumer = CONSUMERS[topic][0] except (KeyError, IndexError): - return iter([None]) + raise rpc_common.Timeout("No consumers available") else: - return consumer.call(context, version, method, args, timeout) + return consumer.call(context, version, method, namespace, args, + timeout) def call(conf, context, topic, msg, timeout=None): @@ -176,16 +178,17 @@ def cleanup(): def fanout_cast(conf, context, topic, msg): - """Cast to all consumers of a topic""" + """Cast to all consumers of a topic.""" check_serialize(msg) method = msg.get('method') if not method: return args = msg.get('args', {}) version = msg.get('version', None) + namespace = msg.get('namespace', None) for consumer in CONSUMERS.get(topic, []): try: - consumer.call(context, version, method, args, None) + consumer.call(context, version, method, namespace, args, None) except Exception: pass diff --git a/designate/openstack/common/rpc/impl_kombu.py b/designate/openstack/common/rpc/impl_kombu.py index 97e3698b9..119204551 100644 --- a/designate/openstack/common/rpc/impl_kombu.py +++ b/designate/openstack/common/rpc/impl_kombu.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -18,7 +16,6 @@ import functools import itertools import socket import ssl -import sys import time import uuid @@ -29,16 +26,22 @@ import kombu.connection import kombu.entity import kombu.messaging from oslo.config import cfg +import six +from designate.openstack.common import excutils from designate.openstack.common.gettextutils import _ from designate.openstack.common import network_utils from designate.openstack.common.rpc import amqp as rpc_amqp from designate.openstack.common.rpc import common as rpc_common +from designate.openstack.common import sslutils kombu_opts = [ cfg.StrOpt('kombu_ssl_version', default='', - help='SSL version to use (valid only if SSL enabled)'), + help='SSL version to use (valid only if SSL enabled). ' + 'valid values are TLSv1, SSLv23 and SSLv3. SSLv2 may ' + 'be available on some distributions' + ), cfg.StrOpt('kombu_ssl_keyfile', default='', help='SSL key file (valid only if SSL enabled)'), @@ -82,9 +85,6 @@ kombu_opts = [ default=0, help='maximum retries with trying to connect to RabbitMQ ' '(the default of 0 implies an infinite retry count)'), - cfg.BoolOpt('rabbit_durable_queues', - default=False, - help='use durable queues in RabbitMQ'), cfg.BoolOpt('rabbit_ha_queues', default=False, help='use H/A queues in RabbitMQ (x-ha-policy: all).' @@ -129,15 +129,40 @@ class ConsumerBase(object): self.tag = str(tag) self.kwargs = kwargs self.queue = None + self.ack_on_error = kwargs.get('ack_on_error', True) self.reconnect(channel) def reconnect(self, channel): - """Re-declare the queue after a rabbit reconnect""" + """Re-declare the queue after a rabbit reconnect.""" self.channel = channel self.kwargs['channel'] = channel self.queue = kombu.entity.Queue(**self.kwargs) self.queue.declare() + def _callback_handler(self, message, callback): + """Call callback with deserialized message. + + Messages that are processed without exception are ack'ed. + + If the message processing generates an exception, it will be + ack'ed if ack_on_error=True. Otherwise it will be .requeue()'ed. + """ + + try: + msg = rpc_common.deserialize_msg(message.payload) + callback(msg) + except Exception: + if self.ack_on_error: + LOG.exception(_("Failed to process message" + " ... skipping it.")) + message.ack() + else: + LOG.exception(_("Failed to process message" + " ... will requeue.")) + message.requeue() + else: + message.ack() + def consume(self, *args, **kwargs): """Actually declare the consumer on the amqp channel. This will start the flow of messages from the queue. Using the @@ -150,8 +175,6 @@ class ConsumerBase(object): If kwargs['nowait'] is True, then this call will block until a message is read. - Messages will automatically be acked if the callback doesn't - raise an exception """ options = {'consumer_tag': self.tag} @@ -162,21 +185,15 @@ class ConsumerBase(object): def _callback(raw_message): message = self.channel.message_to_python(raw_message) - try: - msg = rpc_common.deserialize_msg(message.payload) - callback(msg) - except Exception: - LOG.exception(_("Failed to process message... skipping it.")) - finally: - message.ack() + self._callback_handler(message, callback) self.queue.consume(*args, callback=_callback, **options) def cancel(self): - """Cancel the consuming from the queue, if it has started""" + """Cancel the consuming from the queue, if it has started.""" try: self.queue.cancel(self.tag) - except KeyError, e: + except KeyError as e: # NOTE(comstud): Kludge to get around a amqplib bug if str(e) != "u'%s'" % self.tag: raise @@ -184,7 +201,7 @@ class ConsumerBase(object): class DirectConsumer(ConsumerBase): - """Queue/consumer class for 'direct'""" + """Queue/consumer class for 'direct'.""" def __init__(self, conf, channel, msg_id, callback, tag, **kwargs): """Init a 'direct' queue. @@ -216,7 +233,7 @@ class DirectConsumer(ConsumerBase): class TopicConsumer(ConsumerBase): - """Consumer class for 'topic'""" + """Consumer class for 'topic'.""" def __init__(self, conf, channel, topic, callback, tag, name=None, exchange_name=None, **kwargs): @@ -233,9 +250,9 @@ class TopicConsumer(ConsumerBase): Other kombu options may be passed as keyword arguments """ # Default options - options = {'durable': conf.rabbit_durable_queues, + options = {'durable': conf.amqp_durable_queues, 'queue_arguments': _get_queue_arguments(conf), - 'auto_delete': False, + 'auto_delete': conf.amqp_auto_delete, 'exclusive': False} options.update(kwargs) exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf) @@ -253,7 +270,7 @@ class TopicConsumer(ConsumerBase): class FanoutConsumer(ConsumerBase): - """Consumer class for 'fanout'""" + """Consumer class for 'fanout'.""" def __init__(self, conf, channel, topic, callback, tag, **kwargs): """Init a 'fanout' queue. @@ -286,7 +303,7 @@ class FanoutConsumer(ConsumerBase): class Publisher(object): - """Base Publisher class""" + """Base Publisher class.""" def __init__(self, channel, exchange_name, routing_key, **kwargs): """Init the Publisher class with the exchange_name, routing_key, @@ -298,7 +315,7 @@ class Publisher(object): self.reconnect(channel) def reconnect(self, channel): - """Re-establish the Producer after a rabbit reconnection""" + """Re-establish the Producer after a rabbit reconnection.""" self.exchange = kombu.entity.Exchange(name=self.exchange_name, **self.kwargs) self.producer = kombu.messaging.Producer(exchange=self.exchange, @@ -306,7 +323,7 @@ class Publisher(object): routing_key=self.routing_key) def send(self, msg, timeout=None): - """Send a message""" + """Send a message.""" if timeout: # # AMQP TTL is in milliseconds when set in the header. @@ -317,7 +334,7 @@ class Publisher(object): class DirectPublisher(Publisher): - """Publisher class for 'direct'""" + """Publisher class for 'direct'.""" def __init__(self, conf, channel, msg_id, **kwargs): """init a 'direct' publisher. @@ -333,14 +350,14 @@ class DirectPublisher(Publisher): class TopicPublisher(Publisher): - """Publisher class for 'topic'""" + """Publisher class for 'topic'.""" def __init__(self, conf, channel, topic, **kwargs): """init a 'topic' publisher. Kombu options may be passed as keyword args to override defaults """ - options = {'durable': conf.rabbit_durable_queues, - 'auto_delete': False, + options = {'durable': conf.amqp_durable_queues, + 'auto_delete': conf.amqp_auto_delete, 'exclusive': False} options.update(kwargs) exchange_name = rpc_amqp.get_control_exchange(conf) @@ -352,7 +369,7 @@ class TopicPublisher(Publisher): class FanoutPublisher(Publisher): - """Publisher class for 'fanout'""" + """Publisher class for 'fanout'.""" def __init__(self, conf, channel, topic, **kwargs): """init a 'fanout' publisher. @@ -367,10 +384,10 @@ class FanoutPublisher(Publisher): class NotifyPublisher(TopicPublisher): - """Publisher class for 'notify'""" + """Publisher class for 'notify'.""" def __init__(self, conf, channel, topic, **kwargs): - self.durable = kwargs.pop('durable', conf.rabbit_durable_queues) + self.durable = kwargs.pop('durable', conf.amqp_durable_queues) self.queue_arguments = _get_queue_arguments(conf) super(NotifyPublisher, self).__init__(conf, channel, topic, **kwargs) @@ -428,7 +445,7 @@ class Connection(object): 'virtual_host': self.conf.rabbit_virtual_host, } - for sp_key, value in server_params.iteritems(): + for sp_key, value in six.iteritems(server_params): p_key = server_params_to_kombu_params.get(sp_key, sp_key) params[p_key] = value @@ -447,13 +464,15 @@ class Connection(object): self.reconnect() def _fetch_ssl_params(self): - """Handles fetching what ssl params - should be used for the connection (if any)""" + """Handles fetching what ssl params should be used for the connection + (if any). + """ ssl_params = dict() # http://docs.python.org/library/ssl.html - ssl.wrap_socket if self.conf.kombu_ssl_version: - ssl_params['ssl_version'] = self.conf.kombu_ssl_version + ssl_params['ssl_version'] = sslutils.validate_ssl_version( + self.conf.kombu_ssl_version) if self.conf.kombu_ssl_keyfile: ssl_params['keyfile'] = self.conf.kombu_ssl_keyfile if self.conf.kombu_ssl_certfile: @@ -464,12 +483,8 @@ class Connection(object): # future with this? ssl_params['cert_reqs'] = ssl.CERT_REQUIRED - if not ssl_params: - # Just have the default behavior - return True - else: - # Return the extended behavior - return ssl_params + # Return the extended behavior or just have the default behavior + return ssl_params or True def _connect(self, params): """Connect to rabbit. Re-establish any queues that may have @@ -520,7 +535,7 @@ class Connection(object): return except (IOError, self.connection_errors) as e: pass - except Exception, e: + except Exception as e: # NOTE(comstud): Unfortunately it's possible for amqplib # to return an error not covered by its transport # connection_errors in the case of a timeout waiting for @@ -536,13 +551,11 @@ class Connection(object): log_info.update(params) if self.max_retries and attempt == self.max_retries: - LOG.error(_('Unable to connect to AMQP server on ' - '%(hostname)s:%(port)d after %(max_retries)d ' - 'tries: %(err_str)s') % log_info) - # NOTE(comstud): Copied from original code. There's - # really no better recourse because if this was a queue we - # need to consume on, we have no way to consume anymore. - sys.exit(1) + msg = _('Unable to connect to AMQP server on ' + '%(hostname)s:%(port)d after %(max_retries)d ' + 'tries: %(err_str)s') % log_info + LOG.error(msg) + raise rpc_common.RPCException(msg) if attempt == 1: sleep_time = self.interval_start or 1 @@ -561,10 +574,10 @@ class Connection(object): while True: try: return method(*args, **kwargs) - except (self.connection_errors, socket.timeout, IOError), e: + except (self.connection_errors, socket.timeout, IOError) as e: if error_callback: error_callback(e) - except Exception, e: + except Exception as e: # NOTE(comstud): Unfortunately it's possible for amqplib # to return an error not covered by its transport # connection_errors in the case of a timeout waiting for @@ -578,18 +591,18 @@ class Connection(object): self.reconnect() def get_channel(self): - """Convenience call for bin/clear_rabbit_queues""" + """Convenience call for bin/clear_rabbit_queues.""" return self.channel def close(self): - """Close/release this connection""" + """Close/release this connection.""" self.cancel_consumer_thread() self.wait_on_proxy_callbacks() self.connection.release() self.connection = None def reset(self): - """Reset a connection so it can be used again""" + """Reset a connection so it can be used again.""" self.cancel_consumer_thread() self.wait_on_proxy_callbacks() self.channel.close() @@ -611,14 +624,14 @@ class Connection(object): def _declare_consumer(): consumer = consumer_cls(self.conf, self.channel, topic, callback, - self.consumer_num.next()) + six.next(self.consumer_num)) self.consumers.append(consumer) return consumer return self.ensure(_connect_error, _declare_consumer) def iterconsume(self, limit=None, timeout=None): - """Return an iterator that will consume from all queues/consumers""" + """Return an iterator that will consume from all queues/consumers.""" info = {'do_consume': True} @@ -634,8 +647,8 @@ class Connection(object): def _consume(): if info['do_consume']: - queues_head = self.consumers[:-1] - queues_tail = self.consumers[-1] + queues_head = self.consumers[:-1] # not fanout. + queues_tail = self.consumers[-1] # fanout for queue in queues_head: queue.consume(nowait=True) queues_tail.consume(nowait=False) @@ -648,7 +661,7 @@ class Connection(object): yield self.ensure(_error_callback, _consume) def cancel_consumer_thread(self): - """Cancel a consumer thread""" + """Cancel a consumer thread.""" if self.consumer_thread is not None: self.consumer_thread.kill() try: @@ -663,7 +676,7 @@ class Connection(object): proxy_cb.wait() def publisher_send(self, cls, topic, msg, timeout=None, **kwargs): - """Send to a publisher based on the publisher class""" + """Send to a publisher based on the publisher class.""" def _error_callback(exc): log_info = {'topic': topic, 'err_str': str(exc)} @@ -684,45 +697,47 @@ class Connection(object): self.declare_consumer(DirectConsumer, topic, callback) def declare_topic_consumer(self, topic, callback=None, queue_name=None, - exchange_name=None): + exchange_name=None, ack_on_error=True): """Create a 'topic' consumer.""" self.declare_consumer(functools.partial(TopicConsumer, name=queue_name, exchange_name=exchange_name, + ack_on_error=ack_on_error, ), topic, callback) def declare_fanout_consumer(self, topic, callback): - """Create a 'fanout' consumer""" + """Create a 'fanout' consumer.""" self.declare_consumer(FanoutConsumer, topic, callback) def direct_send(self, msg_id, msg): - """Send a 'direct' message""" + """Send a 'direct' message.""" self.publisher_send(DirectPublisher, msg_id, msg) def topic_send(self, topic, msg, timeout=None): - """Send a 'topic' message""" + """Send a 'topic' message.""" self.publisher_send(TopicPublisher, topic, msg, timeout) def fanout_send(self, topic, msg): - """Send a 'fanout' message""" + """Send a 'fanout' message.""" self.publisher_send(FanoutPublisher, topic, msg) def notify_send(self, topic, msg, **kwargs): - """Send a notify message on a topic""" + """Send a notify message on a topic.""" self.publisher_send(NotifyPublisher, topic, msg, None, **kwargs) def consume(self, limit=None): - """Consume from all queues/consumers""" + """Consume from all queues/consumers.""" it = self.iterconsume(limit=limit) while True: try: - it.next() + six.next(it) except StopIteration: return def consume_in_thread(self): - """Consumer from all queues/consumers in a greenthread""" + """Consumer from all queues/consumers in a greenthread.""" + @excutils.forever_retry_uncaught_exceptions def _consumer_thread(): try: self.consume() @@ -733,7 +748,7 @@ class Connection(object): return self.consumer_thread def create_consumer(self, topic, proxy, fanout=False): - """Create a consumer that calls a method in a proxy object""" + """Create a consumer that calls a method in a proxy object.""" proxy_cb = rpc_amqp.ProxyCallback( self.conf, proxy, rpc_amqp.get_connection_pool(self.conf, Connection)) @@ -745,7 +760,7 @@ class Connection(object): self.declare_topic_consumer(topic, proxy_cb) def create_worker(self, topic, proxy, pool_name): - """Create a worker that calls a method in a proxy object""" + """Create a worker that calls a method in a proxy object.""" proxy_cb = rpc_amqp.ProxyCallback( self.conf, proxy, rpc_amqp.get_connection_pool(self.conf, Connection)) @@ -753,7 +768,7 @@ class Connection(object): self.declare_topic_consumer(topic, proxy_cb, pool_name) def join_consumer_pool(self, callback, pool_name, topic, - exchange_name=None): + exchange_name=None, ack_on_error=True): """Register as a member of a group of consumers for a given topic from the specified exchange. @@ -767,6 +782,7 @@ class Connection(object): callback=callback, connection_pool=rpc_amqp.get_connection_pool(self.conf, Connection), + wait_for_consumers=not ack_on_error ) self.proxy_callbacks.append(callback_wrapper) self.declare_topic_consumer( @@ -774,11 +790,12 @@ class Connection(object): topic=topic, exchange_name=exchange_name, callback=callback_wrapper, + ack_on_error=ack_on_error, ) def create_connection(conf, new=True): - """Create a connection""" + """Create a connection.""" return rpc_amqp.create_connection( conf, new, rpc_amqp.get_connection_pool(conf, Connection)) diff --git a/designate/openstack/common/rpc/impl_qpid.py b/designate/openstack/common/rpc/impl_qpid.py index f3f445133..c1cafe9bc 100644 --- a/designate/openstack/common/rpc/impl_qpid.py +++ b/designate/openstack/common/rpc/impl_qpid.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 OpenStack Foundation # Copyright 2011 - 2012, Red Hat, Inc. # @@ -18,12 +16,13 @@ import functools import itertools import time -import uuid import eventlet import greenlet from oslo.config import cfg +import six +from designate.openstack.common import excutils from designate.openstack.common.gettextutils import _ from designate.openstack.common import importutils from designate.openstack.common import jsonutils @@ -31,6 +30,7 @@ from designate.openstack.common import log as logging from designate.openstack.common.rpc import amqp as rpc_amqp from designate.openstack.common.rpc import common as rpc_common +qpid_codec = importutils.try_import("qpid.codec010") qpid_messaging = importutils.try_import("qpid.messaging") qpid_exceptions = importutils.try_import("qpid.messaging.exceptions") @@ -65,6 +65,17 @@ qpid_opts = [ cfg.BoolOpt('qpid_tcp_nodelay', default=True, help='Disable Nagle algorithm'), + # NOTE(russellb) If any additional versions are added (beyond 1 and 2), + # this file could probably use some additional refactoring so that the + # differences between each version are split into different classes. + cfg.IntOpt('qpid_topology_version', + default=1, + help="The qpid topology version to use. Version 1 is what " + "was originally used by impl_qpid. Version 2 includes " + "some backwards-incompatible changes that allow broker " + "federation to work. Users should update to version 2 " + "when they are able to take everything down, as it " + "requires a clean break."), ] cfg.CONF.register_opts(qpid_opts) @@ -72,10 +83,17 @@ cfg.CONF.register_opts(qpid_opts) JSON_CONTENT_TYPE = 'application/json; charset=utf8' +def raise_invalid_topology_version(conf): + msg = (_("Invalid value for qpid_topology_version: %d") % + conf.qpid_topology_version) + LOG.error(msg) + raise Exception(msg) + + class ConsumerBase(object): """Consumer base class.""" - def __init__(self, session, callback, node_name, node_opts, + def __init__(self, conf, session, callback, node_name, node_opts, link_name, link_opts): """Declare a queue on an amqp session. @@ -93,34 +111,55 @@ class ConsumerBase(object): self.receiver = None self.session = None - addr_opts = { - "create": "always", - "node": { - "type": "topic", - "x-declare": { + if conf.qpid_topology_version == 1: + addr_opts = { + "create": "always", + "node": { + "type": "topic", + "x-declare": { + "durable": True, + "auto-delete": True, + }, + }, + "link": { "durable": True, - "auto-delete": True, + "x-declare": { + "durable": False, + "auto-delete": True, + "exclusive": False, + }, }, - }, - "link": { - "name": link_name, - "durable": True, - "x-declare": { - "durable": False, - "auto-delete": True, - "exclusive": False, + } + addr_opts["node"]["x-declare"].update(node_opts) + elif conf.qpid_topology_version == 2: + addr_opts = { + "link": { + "x-declare": { + "auto-delete": True, + "exclusive": False, + }, }, - }, - } - addr_opts["node"]["x-declare"].update(node_opts) + } + else: + raise_invalid_topology_version() + addr_opts["link"]["x-declare"].update(link_opts) + if link_name: + addr_opts["link"]["name"] = link_name self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts)) - self.reconnect(session) + self.connect(session) + + def connect(self, session): + """Declare the receiver on connect.""" + self._declare_receiver(session) def reconnect(self, session): - """Re-declare the receiver after a qpid reconnect""" + """Re-declare the receiver after a qpid reconnect.""" + self._declare_receiver(session) + + def _declare_receiver(self, session): self.session = session self.receiver = session.receiver(self.address) self.receiver.capacity = 1 @@ -142,7 +181,7 @@ class ConsumerBase(object): msg.content_type = 'amqp/map' def consume(self): - """Fetch the message and pass it to the callback object""" + """Fetch the message and pass it to the callback object.""" message = self.receiver.fetch() try: self._unpack_json_msg(message) @@ -151,14 +190,18 @@ class ConsumerBase(object): except Exception: LOG.exception(_("Failed to process message... skipping it.")) finally: + # TODO(sandy): Need support for optional ack_on_error. self.session.acknowledge(message) def get_receiver(self): return self.receiver + def get_node_name(self): + return self.address.split(';')[0] + class DirectConsumer(ConsumerBase): - """Queue/consumer class for 'direct'""" + """Queue/consumer class for 'direct'.""" def __init__(self, conf, session, msg_id, callback): """Init a 'direct' queue. @@ -168,15 +211,30 @@ class DirectConsumer(ConsumerBase): 'callback' is the callback to call when messages are received """ - super(DirectConsumer, self).__init__(session, callback, - "%s/%s" % (msg_id, msg_id), - {"type": "direct"}, - msg_id, - {"exclusive": True}) + link_opts = { + "auto-delete": conf.amqp_auto_delete, + "exclusive": True, + "durable": conf.amqp_durable_queues, + } + + if conf.qpid_topology_version == 1: + node_name = "%s/%s" % (msg_id, msg_id) + node_opts = {"type": "direct"} + link_name = msg_id + elif conf.qpid_topology_version == 2: + node_name = "amq.direct/%s" % msg_id + node_opts = {} + link_name = None + else: + raise_invalid_topology_version() + + super(DirectConsumer, self).__init__(conf, session, callback, + node_name, node_opts, link_name, + link_opts) class TopicConsumer(ConsumerBase): - """Consumer class for 'topic'""" + """Consumer class for 'topic'.""" def __init__(self, conf, session, topic, callback, name=None, exchange_name=None): @@ -190,13 +248,24 @@ class TopicConsumer(ConsumerBase): """ exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf) - super(TopicConsumer, self).__init__(session, callback, - "%s/%s" % (exchange_name, topic), - {}, name or topic, {}) + link_opts = { + "auto-delete": conf.amqp_auto_delete, + "durable": conf.amqp_durable_queues, + } + + if conf.qpid_topology_version == 1: + node_name = "%s/%s" % (exchange_name, topic) + elif conf.qpid_topology_version == 2: + node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic) + else: + raise_invalid_topology_version() + + super(TopicConsumer, self).__init__(conf, session, callback, node_name, + {}, name or topic, link_opts) class FanoutConsumer(ConsumerBase): - """Consumer class for 'fanout'""" + """Consumer class for 'fanout'.""" def __init__(self, conf, session, topic, callback): """Init a 'fanout' queue. @@ -205,90 +274,165 @@ class FanoutConsumer(ConsumerBase): 'topic' is the topic to listen on 'callback' is the callback to call when messages are received """ + self.conf = conf - super(FanoutConsumer, self).__init__( - session, callback, - "%s_fanout" % topic, - {"durable": False, "type": "fanout"}, - "%s_fanout_%s" % (topic, uuid.uuid4().hex), - {"exclusive": True}) + link_opts = {"exclusive": True} + + if conf.qpid_topology_version == 1: + node_name = "%s_fanout" % topic + node_opts = {"durable": False, "type": "fanout"} + elif conf.qpid_topology_version == 2: + node_name = "amq.topic/fanout/%s" % topic + node_opts = {} + else: + raise_invalid_topology_version() + + super(FanoutConsumer, self).__init__(conf, session, callback, + node_name, node_opts, None, + link_opts) class Publisher(object): - """Base Publisher class""" + """Base Publisher class.""" - def __init__(self, session, node_name, node_opts=None): + def __init__(self, conf, session, node_name, node_opts=None): """Init the Publisher class with the exchange_name, routing_key, and other options """ self.sender = None self.session = session - addr_opts = { - "create": "always", - "node": { - "type": "topic", - "x-declare": { - "durable": False, - # auto-delete isn't implemented for exchanges in qpid, - # but put in here anyway - "auto-delete": True, + if conf.qpid_topology_version == 1: + addr_opts = { + "create": "always", + "node": { + "type": "topic", + "x-declare": { + "durable": False, + # auto-delete isn't implemented for exchanges in qpid, + # but put in here anyway + "auto-delete": True, + }, }, - }, - } - if node_opts: - addr_opts["node"]["x-declare"].update(node_opts) + } + if node_opts: + addr_opts["node"]["x-declare"].update(node_opts) - self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts)) + self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts)) + elif conf.qpid_topology_version == 2: + self.address = node_name + else: + raise_invalid_topology_version() self.reconnect(session) def reconnect(self, session): - """Re-establish the Sender after a reconnection""" + """Re-establish the Sender after a reconnection.""" self.sender = session.sender(self.address) + def _pack_json_msg(self, msg): + """Qpid cannot serialize dicts containing strings longer than 65535 + characters. This function dumps the message content to a JSON + string, which Qpid is able to handle. + + :param msg: May be either a Qpid Message object or a bare dict. + :returns: A Qpid Message with its content field JSON encoded. + """ + try: + msg.content = jsonutils.dumps(msg.content) + except AttributeError: + # Need to have a Qpid message so we can set the content_type. + msg = qpid_messaging.Message(jsonutils.dumps(msg)) + msg.content_type = JSON_CONTENT_TYPE + return msg + def send(self, msg): - """Send a message""" + """Send a message.""" + try: + # Check if Qpid can encode the message + check_msg = msg + if not hasattr(check_msg, 'content_type'): + check_msg = qpid_messaging.Message(msg) + content_type = check_msg.content_type + enc, dec = qpid_messaging.message.get_codec(content_type) + enc(check_msg.content) + except qpid_codec.CodecException: + # This means the message couldn't be serialized as a dict. + msg = self._pack_json_msg(msg) self.sender.send(msg) class DirectPublisher(Publisher): - """Publisher class for 'direct'""" + """Publisher class for 'direct'.""" def __init__(self, conf, session, msg_id): """Init a 'direct' publisher.""" - super(DirectPublisher, self).__init__(session, msg_id, - {"type": "Direct"}) + + if conf.qpid_topology_version == 1: + node_name = msg_id + node_opts = {"type": "direct"} + elif conf.qpid_topology_version == 2: + node_name = "amq.direct/%s" % msg_id + node_opts = {} + else: + raise_invalid_topology_version() + + super(DirectPublisher, self).__init__(conf, session, node_name, + node_opts) class TopicPublisher(Publisher): - """Publisher class for 'topic'""" + """Publisher class for 'topic'.""" def __init__(self, conf, session, topic): - """init a 'topic' publisher. + """Init a 'topic' publisher. """ exchange_name = rpc_amqp.get_control_exchange(conf) - super(TopicPublisher, self).__init__(session, - "%s/%s" % (exchange_name, topic)) + + if conf.qpid_topology_version == 1: + node_name = "%s/%s" % (exchange_name, topic) + elif conf.qpid_topology_version == 2: + node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic) + else: + raise_invalid_topology_version() + + super(TopicPublisher, self).__init__(conf, session, node_name) class FanoutPublisher(Publisher): - """Publisher class for 'fanout'""" + """Publisher class for 'fanout'.""" def __init__(self, conf, session, topic): - """init a 'fanout' publisher. + """Init a 'fanout' publisher. """ - super(FanoutPublisher, self).__init__( - session, - "%s_fanout" % topic, {"type": "fanout"}) + + if conf.qpid_topology_version == 1: + node_name = "%s_fanout" % topic + node_opts = {"type": "fanout"} + elif conf.qpid_topology_version == 2: + node_name = "amq.topic/fanout/%s" % topic + node_opts = {} + else: + raise_invalid_topology_version() + + super(FanoutPublisher, self).__init__(conf, session, node_name, + node_opts) class NotifyPublisher(Publisher): - """Publisher class for notifications""" + """Publisher class for notifications.""" def __init__(self, conf, session, topic): - """init a 'topic' publisher. + """Init a 'topic' publisher. """ exchange_name = rpc_amqp.get_control_exchange(conf) - super(NotifyPublisher, self).__init__(session, - "%s/%s" % (exchange_name, topic), - {"durable": True}) + node_opts = {"durable": True} + + if conf.qpid_topology_version == 1: + node_name = "%s/%s" % (exchange_name, topic) + elif conf.qpid_topology_version == 2: + node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic) + else: + raise_invalid_topology_version() + + super(NotifyPublisher, self).__init__(conf, session, node_name, + node_opts) class Connection(object): @@ -339,7 +483,7 @@ class Connection(object): # Reconnection is done by self.reconnect() self.connection.reconnect = False self.connection.heartbeat = self.conf.qpid_heartbeat - self.connection.protocol = self.conf.qpid_protocol + self.connection.transport = self.conf.qpid_protocol self.connection.tcp_nodelay = self.conf.qpid_tcp_nodelay def _register_consumer(self, consumer): @@ -349,7 +493,7 @@ class Connection(object): return self.consumers[str(receiver)] def reconnect(self): - """Handles reconnecting and re-establishing sessions and queues""" + """Handles reconnecting and re-establishing sessions and queues.""" attempt = 0 delay = 1 while True: @@ -366,7 +510,7 @@ class Connection(object): try: self.connection_create(broker) self.connection.open() - except qpid_exceptions.ConnectionError, e: + except qpid_exceptions.ConnectionError as e: msg_dict = dict(e=e, delay=delay) msg = _("Unable to connect to AMQP server: %(e)s. " "Sleeping %(delay)s seconds") % msg_dict @@ -383,7 +527,7 @@ class Connection(object): consumers = self.consumers self.consumers = {} - for consumer in consumers.itervalues(): + for consumer in six.itervalues(consumers): consumer.reconnect(self.session) self._register_consumer(consumer) @@ -394,20 +538,26 @@ class Connection(object): try: return method(*args, **kwargs) except (qpid_exceptions.Empty, - qpid_exceptions.ConnectionError), e: + qpid_exceptions.ConnectionError) as e: if error_callback: error_callback(e) self.reconnect() def close(self): - """Close/release this connection""" + """Close/release this connection.""" self.cancel_consumer_thread() self.wait_on_proxy_callbacks() - self.connection.close() + try: + self.connection.close() + except Exception: + # NOTE(dripton) Logging exceptions that happen during cleanup just + # causes confusion; there's really nothing useful we can do with + # them. + pass self.connection = None def reset(self): - """Reset a connection so it can be used again""" + """Reset a connection so it can be used again.""" self.cancel_consumer_thread() self.wait_on_proxy_callbacks() self.session.close() @@ -431,7 +581,7 @@ class Connection(object): return self.ensure(_connect_error, _declare_consumer) def iterconsume(self, limit=None, timeout=None): - """Return an iterator that will consume from all queues/consumers""" + """Return an iterator that will consume from all queues/consumers.""" def _error_callback(exc): if isinstance(exc, qpid_exceptions.Empty): @@ -455,7 +605,7 @@ class Connection(object): yield self.ensure(_error_callback, _consume) def cancel_consumer_thread(self): - """Cancel a consumer thread""" + """Cancel a consumer thread.""" if self.consumer_thread is not None: self.consumer_thread.kill() try: @@ -470,7 +620,7 @@ class Connection(object): proxy_cb.wait() def publisher_send(self, cls, topic, msg): - """Send to a publisher based on the publisher class""" + """Send to a publisher based on the publisher class.""" def _connect_error(exc): log_info = {'topic': topic, 'err_str': str(exc)} @@ -500,15 +650,15 @@ class Connection(object): topic, callback) def declare_fanout_consumer(self, topic, callback): - """Create a 'fanout' consumer""" + """Create a 'fanout' consumer.""" self.declare_consumer(FanoutConsumer, topic, callback) def direct_send(self, msg_id, msg): - """Send a 'direct' message""" + """Send a 'direct' message.""" self.publisher_send(DirectPublisher, msg_id, msg) def topic_send(self, topic, msg, timeout=None): - """Send a 'topic' message""" + """Send a 'topic' message.""" # # We want to create a message with attributes, e.g. a TTL. We # don't really need to keep 'msg' in its JSON format any longer @@ -523,24 +673,25 @@ class Connection(object): self.publisher_send(TopicPublisher, topic, qpid_message) def fanout_send(self, topic, msg): - """Send a 'fanout' message""" + """Send a 'fanout' message.""" self.publisher_send(FanoutPublisher, topic, msg) def notify_send(self, topic, msg, **kwargs): - """Send a notify message on a topic""" + """Send a notify message on a topic.""" self.publisher_send(NotifyPublisher, topic, msg) def consume(self, limit=None): - """Consume from all queues/consumers""" + """Consume from all queues/consumers.""" it = self.iterconsume(limit=limit) while True: try: - it.next() + six.next(it) except StopIteration: return def consume_in_thread(self): - """Consumer from all queues/consumers in a greenthread""" + """Consumer from all queues/consumers in a greenthread.""" + @excutils.forever_retry_uncaught_exceptions def _consumer_thread(): try: self.consume() @@ -551,7 +702,7 @@ class Connection(object): return self.consumer_thread def create_consumer(self, topic, proxy, fanout=False): - """Create a consumer that calls a method in a proxy object""" + """Create a consumer that calls a method in a proxy object.""" proxy_cb = rpc_amqp.ProxyCallback( self.conf, proxy, rpc_amqp.get_connection_pool(self.conf, Connection)) @@ -567,7 +718,7 @@ class Connection(object): return consumer def create_worker(self, topic, proxy, pool_name): - """Create a worker that calls a method in a proxy object""" + """Create a worker that calls a method in a proxy object.""" proxy_cb = rpc_amqp.ProxyCallback( self.conf, proxy, rpc_amqp.get_connection_pool(self.conf, Connection)) @@ -581,7 +732,7 @@ class Connection(object): return consumer def join_consumer_pool(self, callback, pool_name, topic, - exchange_name=None): + exchange_name=None, ack_on_error=True): """Register as a member of a group of consumers for a given topic from the specified exchange. @@ -595,6 +746,7 @@ class Connection(object): callback=callback, connection_pool=rpc_amqp.get_connection_pool(self.conf, Connection), + wait_for_consumers=not ack_on_error ) self.proxy_callbacks.append(callback_wrapper) @@ -610,7 +762,7 @@ class Connection(object): def create_connection(conf, new=True): - """Create a connection""" + """Create a connection.""" return rpc_amqp.create_connection( conf, new, rpc_amqp.get_connection_pool(conf, Connection)) diff --git a/designate/openstack/common/rpc/impl_zmq.py b/designate/openstack/common/rpc/impl_zmq.py index 81196b05c..43bc07f77 100644 --- a/designate/openstack/common/rpc/impl_zmq.py +++ b/designate/openstack/common/rpc/impl_zmq.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 Cloudscaling Group, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -25,12 +23,13 @@ import uuid import eventlet import greenlet from oslo.config import cfg +import six +from six import moves from designate.openstack.common import excutils from designate.openstack.common.gettextutils import _ from designate.openstack.common import importutils from designate.openstack.common import jsonutils -from designate.openstack.common import processutils as utils from designate.openstack.common.rpc import common as rpc_common zmq = importutils.try_import('eventlet.green.zmq') @@ -85,8 +84,8 @@ matchmaker = None # memoized matchmaker object def _serialize(data): - """ - Serialization wrapper + """Serialization wrapper. + We prefer using JSON, but it cannot encode all types. Error if a developer passes us bad data. """ @@ -98,18 +97,15 @@ def _serialize(data): def _deserialize(data): - """ - Deserialization wrapper - """ + """Deserialization wrapper.""" LOG.debug(_("Deserializing: %s"), data) return jsonutils.loads(data) class ZmqSocket(object): - """ - A tiny wrapper around ZeroMQ to simplify the send/recv protocol - and connection management. + """A tiny wrapper around ZeroMQ. + Simplifies the send/recv protocol and connection management. Can be used as a Context (supports the 'with' statement). """ @@ -180,7 +176,7 @@ class ZmqSocket(object): return # We must unsubscribe, or we'll leak descriptors. - if len(self.subscriptions) > 0: + if self.subscriptions: for f in self.subscriptions: try: self.sock.setsockopt(zmq.UNSUBSCRIBE, f) @@ -196,38 +192,36 @@ class ZmqSocket(object): # it would be much worse if some of the code calling this # were to fail. For now, lets log, and later evaluate # if we can safely raise here. - LOG.error("ZeroMQ socket could not be closed.") + LOG.error(_("ZeroMQ socket could not be closed.")) self.sock = None - def recv(self): + def recv(self, **kwargs): if not self.can_recv: raise RPCException(_("You cannot recv on this socket.")) - return self.sock.recv_multipart() + return self.sock.recv_multipart(**kwargs) - def send(self, data): + def send(self, data, **kwargs): if not self.can_send: raise RPCException(_("You cannot send on this socket.")) - self.sock.send_multipart(data) + self.sock.send_multipart(data, **kwargs) class ZmqClient(object): """Client for ZMQ sockets.""" - def __init__(self, addr, socket_type=None, bind=False): - if socket_type is None: - socket_type = zmq.PUSH - self.outq = ZmqSocket(addr, socket_type, bind=bind) + def __init__(self, addr): + self.outq = ZmqSocket(addr, zmq.PUSH, bind=False) - def cast(self, msg_id, topic, data, envelope=False): + def cast(self, msg_id, topic, data, envelope): msg_id = msg_id or 0 - if not (envelope or rpc_common._SEND_RPC_ENVELOPE): + if not envelope: self.outq.send(map(bytes, (msg_id, topic, 'cast', _serialize(data)))) return rpc_envelope = rpc_common.serialize_msg(data[1], envelope) - zmq_msg = reduce(lambda x, y: x + y, rpc_envelope.items()) + zmq_msg = moves.reduce(lambda x, y: x + y, rpc_envelope.items()) self.outq.send(map(bytes, (msg_id, topic, 'impl_zmq_v2', data[0]) + zmq_msg)) @@ -276,12 +270,13 @@ class InternalContext(object): try: result = proxy.dispatch( - ctx, data['version'], data['method'], **data['args']) + ctx, data['version'], data['method'], + data.get('namespace'), **data['args']) return ConsumerBase.normalize_reply(result, ctx.replies) except greenlet.GreenletExit: # ignore these since they are just from shutdowns pass - except rpc_common.ClientException, e: + except rpc_common.ClientException as e: LOG.debug(_("Expected exception during message handling (%s)") % e._exc_info[1]) return {'exc': @@ -295,11 +290,16 @@ class InternalContext(object): def reply(self, ctx, proxy, msg_id=None, context=None, topic=None, msg=None): """Reply to a casted call.""" - # Our real method is curried into msg['args'] + # NOTE(ewindisch): context kwarg exists for Grizzly compat. + # this may be able to be removed earlier than + # 'I' if ConsumerBase.process were refactored. + if type(msg) is list: + payload = msg[-1] + else: + payload = msg - child_ctx = RpcContext.unmarshal(msg[0]) response = ConsumerBase.normalize_reply( - self._get_response(child_ctx, proxy, topic, msg[1]), + self._get_response(ctx, proxy, topic, payload), ctx.replies) LOG.debug(_("Sending reply")) @@ -346,20 +346,18 @@ class ConsumerBase(object): return proxy.dispatch(ctx, data['version'], - data['method'], **data['args']) + data['method'], data.get('namespace'), **data['args']) class ZmqBaseReactor(ConsumerBase): - """ - A consumer class implementing a - centralized casting broker (PULL-PUSH) - for RoundRobin requests. + """A consumer class implementing a centralized casting broker (PULL-PUSH). + + Used for RoundRobin requests. """ def __init__(self, conf): super(ZmqBaseReactor, self).__init__() - self.mapping = {} self.proxies = {} self.threads = [] self.sockets = [] @@ -367,9 +365,8 @@ class ZmqBaseReactor(ConsumerBase): self.pool = eventlet.greenpool.GreenPool(conf.rpc_thread_pool_size) - def register(self, proxy, in_addr, zmq_type_in, out_addr=None, - zmq_type_out=None, in_bind=True, out_bind=True, - subscribe=None): + def register(self, proxy, in_addr, zmq_type_in, + in_bind=True, subscribe=None): LOG.info(_("Registering reactor")) @@ -385,22 +382,8 @@ class ZmqBaseReactor(ConsumerBase): LOG.info(_("In reactor registered")) - if not out_addr: - return - - if zmq_type_out not in (zmq.PUSH, zmq.PUB): - raise RPCException("Bad output socktype") - - # Items push out. - outq = ZmqSocket(out_addr, zmq_type_out, bind=out_bind) - - self.mapping[inq] = outq - self.mapping[outq] = inq - self.sockets.append(outq) - - LOG.info(_("Out reactor registered")) - def consume_in_thread(self): + @excutils.forever_retry_uncaught_exceptions def _consume(sock): LOG.info(_("Consuming socket")) while True: @@ -424,10 +407,9 @@ class ZmqBaseReactor(ConsumerBase): class ZmqProxy(ZmqBaseReactor): - """ - A consumer class implementing a - topic-based proxy, forwarding to - IPC sockets. + """A consumer class implementing a topic-based proxy. + + Forwards to IPC sockets. """ def __init__(self, conf): @@ -440,11 +422,8 @@ class ZmqProxy(ZmqBaseReactor): def consume(self, sock): ipc_dir = CONF.rpc_zmq_ipc_dir - #TODO(ewindisch): use zero-copy (i.e. references, not copying) - data = sock.recv() - topic = data[1] - - LOG.debug(_("CONSUMER GOT %s"), ' '.join(map(pformat, data))) + data = sock.recv(copy=False) + topic = data[1].bytes if topic.startswith('fanout~'): sock_type = zmq.PUB @@ -486,9 +465,7 @@ class ZmqProxy(ZmqBaseReactor): while(True): data = self.topic_proxy[topic].get() - out_sock.send(data) - LOG.debug(_("ROUTER RELAY-OUT SUCCEEDED %(data)s") % - {'data': data}) + out_sock.send(data, copy=False) wait_sock_creation = eventlet.event.Event() eventlet.spawn(publisher, wait_sock_creation) @@ -501,37 +478,34 @@ class ZmqProxy(ZmqBaseReactor): try: self.topic_proxy[topic].put_nowait(data) - LOG.debug(_("ROUTER RELAY-OUT QUEUED %(data)s") % - {'data': data}) except eventlet.queue.Full: LOG.error(_("Local per-topic backlog buffer full for topic " "%(topic)s. Dropping message.") % {'topic': topic}) def consume_in_thread(self): - """Runs the ZmqProxy service""" + """Runs the ZmqProxy service.""" ipc_dir = CONF.rpc_zmq_ipc_dir consume_in = "tcp://%s:%s" % \ (CONF.rpc_zmq_bind_address, CONF.rpc_zmq_port) consumption_proxy = InternalContext(None) - if not os.path.isdir(ipc_dir): - try: - utils.execute('mkdir', '-p', ipc_dir, run_as_root=True) - utils.execute('chown', "%s:%s" % (os.getuid(), os.getgid()), - ipc_dir, run_as_root=True) - utils.execute('chmod', '750', ipc_dir, run_as_root=True) - except utils.ProcessExecutionError: + try: + os.makedirs(ipc_dir) + except os.error: + if not os.path.isdir(ipc_dir): with excutils.save_and_reraise_exception(): - LOG.error(_("Could not create IPC directory %s") % - (ipc_dir, )) - + LOG.error(_("Required IPC directory does not exist at" + " %s") % (ipc_dir, )) try: self.register(consumption_proxy, consume_in, - zmq.PULL, - out_bind=True) + zmq.PULL) except zmq.ZMQError: + if os.access(ipc_dir, os.X_OK): + with excutils.save_and_reraise_exception(): + LOG.error(_("Permission denied to IPC directory at" + " %s") % (ipc_dir, )) with excutils.save_and_reraise_exception(): LOG.error(_("Could not create ZeroMQ receiver daemon. " "Socket may already be in use.")) @@ -541,24 +515,24 @@ class ZmqProxy(ZmqBaseReactor): def unflatten_envelope(packenv): """Unflattens the RPC envelope. - Takes a list and returns a dictionary. - i.e. [1,2,3,4] => {1: 2, 3: 4} + + Takes a list and returns a dictionary. + i.e. [1,2,3,4] => {1: 2, 3: 4} """ i = iter(packenv) h = {} try: while True: - k = i.next() - h[k] = i.next() + k = six.next(i) + h[k] = six.next(i) except StopIteration: return h class ZmqReactor(ZmqBaseReactor): - """ - A consumer class implementing a - consumer for messages. Can also be - used as a 1:1 proxy + """A consumer class implementing a consumer for messages. + + Can also be used as a 1:1 proxy """ def __init__(self, conf): @@ -568,11 +542,6 @@ class ZmqReactor(ZmqBaseReactor): #TODO(ewindisch): use zero-copy (i.e. references, not copying) data = sock.recv() LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data) - if sock in self.mapping: - LOG.debug(_("ROUTER RELAY-OUT %(data)s") % { - 'data': data}) - self.mapping[sock].send(data) - return proxy = self.proxies[sock] @@ -685,8 +654,8 @@ def _call(addr, context, topic, msg, timeout=None, 'method': '-reply', 'args': { 'msg_id': msg_id, - 'context': mcontext, 'topic': reply_topic, + # TODO(ewindisch): safe to remove mcontext in I. 'msg': [mcontext, msg] } } @@ -745,10 +714,9 @@ def _call(addr, context, topic, msg, timeout=None, def _multi_send(method, context, topic, msg, timeout=None, envelope=False, _msg_id=None): - """ - Wraps the sending of messages, - dispatches to the matchmaker and sends - message to all relevant hosts. + """Wraps the sending of messages. + + Dispatches to the matchmaker and sends message to all relevant hosts. """ conf = CONF LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))}) @@ -757,7 +725,7 @@ def _multi_send(method, context, topic, msg, timeout=None, LOG.debug(_("Sending message(s) to: %s"), queues) # Don't stack if we have no matchmaker results - if len(queues) == 0: + if not queues: LOG.warn(_("No matchmaker results. Not casting.")) # While not strictly a timeout, callers know how to handle # this exception and a timeout isn't too big a lie. @@ -805,8 +773,8 @@ def fanout_cast(conf, context, topic, msg, **kwargs): def notify(conf, context, topic, msg, envelope): - """ - Send notification event. + """Send notification event. + Notifications are sent to topic-priority. This differs from the AMQP drivers which send to topic.priority. """ @@ -840,6 +808,11 @@ def _get_ctxt(): def _get_matchmaker(*args, **kwargs): global matchmaker if not matchmaker: - matchmaker = importutils.import_object( - CONF.rpc_zmq_matchmaker, *args, **kwargs) + mm = CONF.rpc_zmq_matchmaker + if mm.endswith('matchmaker.MatchMakerRing'): + mm.replace('matchmaker', 'matchmaker_ring') + LOG.warn(_('rpc_zmq_matchmaker = %(orig)s is deprecated; use' + ' %(new)s instead') % dict( + orig=CONF.rpc_zmq_matchmaker, new=mm)) + matchmaker = importutils.import_object(mm, *args, **kwargs) return matchmaker diff --git a/designate/openstack/common/rpc/matchmaker.py b/designate/openstack/common/rpc/matchmaker.py index de532c8bf..fbae0a735 100644 --- a/designate/openstack/common/rpc/matchmaker.py +++ b/designate/openstack/common/rpc/matchmaker.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 Cloudscaling Group, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -19,8 +17,6 @@ return keys for direct exchanges, per (approximate) AMQP parlance. """ import contextlib -import itertools -import json import eventlet from oslo.config import cfg @@ -30,10 +26,6 @@ from designate.openstack.common import log as logging matchmaker_opts = [ - # Matchmaker ring file - cfg.StrOpt('matchmaker_ringfile', - default='/etc/nova/matchmaker_ring.json', - help='Matchmaker ring file (JSON)'), cfg.IntOpt('matchmaker_heartbeat_freq', default=300, help='Heartbeat frequency'), @@ -54,8 +46,8 @@ class MatchMakerException(Exception): class Exchange(object): - """ - Implements lookups. + """Implements lookups. + Subclass this to support hashtables, dns, etc. """ def __init__(self): @@ -66,9 +58,7 @@ class Exchange(object): class Binding(object): - """ - A binding on which to perform a lookup. - """ + """A binding on which to perform a lookup.""" def __init__(self): pass @@ -77,10 +67,10 @@ class Binding(object): class MatchMakerBase(object): - """ - Match Maker Base Class. - Build off HeartbeatMatchMakerBase if building a - heartbeat-capable MatchMaker. + """Match Maker Base Class. + + Build off HeartbeatMatchMakerBase if building a heartbeat-capable + MatchMaker. """ def __init__(self): # Array of tuples. Index [2] toggles negation, [3] is last-if-true @@ -90,58 +80,47 @@ class MatchMakerBase(object): 'registration or heartbeat.') def register(self, key, host): - """ - Register a host on a backend. + """Register a host on a backend. + Heartbeats, if applicable, may keepalive registration. """ pass def ack_alive(self, key, host): - """ - Acknowledge that a key.host is alive. - Used internally for updating heartbeats, - but may also be used publically to acknowledge - a system is alive (i.e. rpc message successfully - sent to host) + """Acknowledge that a key.host is alive. + + Used internally for updating heartbeats, but may also be used + publicly to acknowledge a system is alive (i.e. rpc message + successfully sent to host) """ pass def is_alive(self, topic, host): - """ - Checks if a host is alive. - """ + """Checks if a host is alive.""" pass def expire(self, topic, host): - """ - Explicitly expire a host's registration. - """ + """Explicitly expire a host's registration.""" pass def send_heartbeats(self): - """ - Send all heartbeats. + """Send all heartbeats. + Use start_heartbeat to spawn a heartbeat greenthread, which loops this method. """ pass def unregister(self, key, host): - """ - Unregister a topic. - """ + """Unregister a topic.""" pass def start_heartbeat(self): - """ - Spawn heartbeat greenthread. - """ + """Spawn heartbeat greenthread.""" pass def stop_heartbeat(self): - """ - Destroys the heartbeat greenthread. - """ + """Destroys the heartbeat greenthread.""" pass def add_binding(self, binding, rule, last=True): @@ -168,10 +147,10 @@ class MatchMakerBase(object): class HeartbeatMatchMakerBase(MatchMakerBase): - """ - Base for a heart-beat capable MatchMaker. - Provides common methods for registering, - unregistering, and maintaining heartbeats. + """Base for a heart-beat capable MatchMaker. + + Provides common methods for registering, unregistering, and maintaining + heartbeats. """ def __init__(self): self.hosts = set() @@ -181,8 +160,8 @@ class HeartbeatMatchMakerBase(MatchMakerBase): super(HeartbeatMatchMakerBase, self).__init__() def send_heartbeats(self): - """ - Send all heartbeats. + """Send all heartbeats. + Use start_heartbeat to spawn a heartbeat greenthread, which loops this method. """ @@ -190,32 +169,31 @@ class HeartbeatMatchMakerBase(MatchMakerBase): self.ack_alive(key, host) def ack_alive(self, key, host): - """ - Acknowledge that a host.topic is alive. - Used internally for updating heartbeats, - but may also be used publically to acknowledge - a system is alive (i.e. rpc message successfully - sent to host) + """Acknowledge that a host.topic is alive. + + Used internally for updating heartbeats, but may also be used + publicly to acknowledge a system is alive (i.e. rpc message + successfully sent to host) """ raise NotImplementedError("Must implement ack_alive") def backend_register(self, key, host): - """ - Implements registration logic. + """Implements registration logic. + Called by register(self,key,host) """ raise NotImplementedError("Must implement backend_register") def backend_unregister(self, key, key_host): - """ - Implements de-registration logic. + """Implements de-registration logic. + Called by unregister(self,key,host) """ raise NotImplementedError("Must implement backend_unregister") def register(self, key, host): - """ - Register a host on a backend. + """Register a host on a backend. + Heartbeats, if applicable, may keepalive registration. """ self.hosts.add(host) @@ -227,25 +205,24 @@ class HeartbeatMatchMakerBase(MatchMakerBase): self.ack_alive(key, host) def unregister(self, key, host): - """ - Unregister a topic. - """ + """Unregister a topic.""" if (key, host) in self.host_topic: del self.host_topic[(key, host)] self.hosts.discard(host) self.backend_unregister(key, '.'.join((key, host))) - LOG.info(_("Matchmaker unregistered: %s, %s" % (key, host))) + LOG.info(_("Matchmaker unregistered: %(key)s, %(host)s"), + {'key': key, 'host': host}) def start_heartbeat(self): - """ - Implementation of MatchMakerBase.start_heartbeat + """Implementation of MatchMakerBase.start_heartbeat. + Launches greenthread looping send_heartbeats(), yielding for CONF.matchmaker_heartbeat_freq seconds between iterations. """ - if len(self.hosts) == 0: + if not self.hosts: raise MatchMakerException( _("Register before starting heartbeat.")) @@ -257,45 +234,37 @@ class HeartbeatMatchMakerBase(MatchMakerBase): self._heart = eventlet.spawn(do_heartbeat) def stop_heartbeat(self): - """ - Destroys the heartbeat greenthread. - """ + """Destroys the heartbeat greenthread.""" if self._heart: self._heart.kill() class DirectBinding(Binding): - """ - Specifies a host in the key via a '.' character + """Specifies a host in the key via a '.' character. + Although dots are used in the key, the behavior here is that it maps directly to a host, thus direct. """ def test(self, key): - if '.' in key: - return True - return False + return '.' in key class TopicBinding(Binding): - """ - Where a 'bare' key without dots. + """Where a 'bare' key without dots. + AMQP generally considers topic exchanges to be those *with* dots, but we deviate here in terminology as the behavior here matches that of a topic exchange (whereas where there are dots, behavior matches that of a direct exchange. """ def test(self, key): - if '.' not in key: - return True - return False + return '.' not in key class FanoutBinding(Binding): """Match on fanout keys, where key starts with 'fanout.' string.""" def test(self, key): - if key.startswith('fanout~'): - return True - return False + return key.startswith('fanout~') class StubExchange(Exchange): @@ -304,67 +273,6 @@ class StubExchange(Exchange): return [(key, None)] -class RingExchange(Exchange): - """ - Match Maker where hosts are loaded from a static file containing - a hashmap (JSON formatted). - - __init__ takes optional ring dictionary argument, otherwise - loads the ringfile from CONF.mathcmaker_ringfile. - """ - def __init__(self, ring=None): - super(RingExchange, self).__init__() - - if ring: - self.ring = ring - else: - fh = open(CONF.matchmaker_ringfile, 'r') - self.ring = json.load(fh) - fh.close() - - self.ring0 = {} - for k in self.ring.keys(): - self.ring0[k] = itertools.cycle(self.ring[k]) - - def _ring_has(self, key): - if key in self.ring0: - return True - return False - - -class RoundRobinRingExchange(RingExchange): - """A Topic Exchange based on a hashmap.""" - def __init__(self, ring=None): - super(RoundRobinRingExchange, self).__init__(ring) - - def run(self, key): - if not self._ring_has(key): - LOG.warn( - _("No key defining hosts for topic '%s', " - "see ringfile") % (key, ) - ) - return [] - host = next(self.ring0[key]) - return [(key + '.' + host, host)] - - -class FanoutRingExchange(RingExchange): - """Fanout Exchange based on a hashmap.""" - def __init__(self, ring=None): - super(FanoutRingExchange, self).__init__(ring) - - def run(self, key): - # Assume starts with "fanout~", strip it for lookup. - nkey = key.split('fanout~')[1:][0] - if not self._ring_has(nkey): - LOG.warn( - _("No key defining hosts for topic '%s', " - "see ringfile") % (nkey, ) - ) - return [] - return map(lambda x: (key + '.' + x, x), self.ring[nkey]) - - class LocalhostExchange(Exchange): """Exchange where all direct topics are local.""" def __init__(self, host='localhost'): @@ -376,8 +284,8 @@ class LocalhostExchange(Exchange): class DirectExchange(Exchange): - """ - Exchange where all topic keys are split, sending to second half. + """Exchange where all topic keys are split, sending to second half. + i.e. "compute.host" sends a message to "compute.host" running on "host" """ def __init__(self): @@ -388,20 +296,9 @@ class DirectExchange(Exchange): return [(key, e)] -class MatchMakerRing(MatchMakerBase): - """ - Match Maker where hosts are loaded from a static hashmap. - """ - def __init__(self, ring=None): - super(MatchMakerRing, self).__init__() - self.add_binding(FanoutBinding(), FanoutRingExchange(ring)) - self.add_binding(DirectBinding(), DirectExchange()) - self.add_binding(TopicBinding(), RoundRobinRingExchange(ring)) - - class MatchMakerLocalhost(MatchMakerBase): - """ - Match Maker where all bare topics resolve to localhost. + """Match Maker where all bare topics resolve to localhost. + Useful for testing. """ def __init__(self, host='localhost'): @@ -412,13 +309,13 @@ class MatchMakerLocalhost(MatchMakerBase): class MatchMakerStub(MatchMakerBase): - """ - Match Maker where topics are untouched. + """Match Maker where topics are untouched. + Useful for testing, or for AMQP/brokered queues. Will not work where knowledge of hosts is known (i.e. zeromq) """ def __init__(self): - super(MatchMakerLocalhost, self).__init__() + super(MatchMakerStub, self).__init__() self.add_binding(FanoutBinding(), StubExchange()) self.add_binding(DirectBinding(), StubExchange()) diff --git a/designate/openstack/common/rpc/matchmaker_redis.py b/designate/openstack/common/rpc/matchmaker_redis.py index f2142161a..66a2ef49c 100644 --- a/designate/openstack/common/rpc/matchmaker_redis.py +++ b/designate/openstack/common/rpc/matchmaker_redis.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2013 Cloudscaling Group, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -55,8 +53,8 @@ class RedisExchange(mm_common.Exchange): class RedisTopicExchange(RedisExchange): - """ - Exchange where all topic keys are split, sending to second half. + """Exchange where all topic keys are split, sending to second half. + i.e. "compute.host" sends a message to "compute" running on "host" """ def run(self, topic): @@ -77,9 +75,7 @@ class RedisTopicExchange(RedisExchange): class RedisFanoutExchange(RedisExchange): - """ - Return a list of all hosts. - """ + """Return a list of all hosts.""" def run(self, topic): topic = topic.split('~', 1)[1] hosts = self.redis.smembers(topic) @@ -90,16 +86,14 @@ class RedisFanoutExchange(RedisExchange): class MatchMakerRedis(mm_common.HeartbeatMatchMakerBase): - """ - MatchMaker registering and looking-up hosts with a Redis server. - """ + """MatchMaker registering and looking-up hosts with a Redis server.""" def __init__(self): super(MatchMakerRedis, self).__init__() if not redis: raise ImportError("Failed to import module redis.") - self.redis = redis.StrictRedis( + self.redis = redis.Redis( host=CONF.matchmaker_redis.host, port=CONF.matchmaker_redis.port, password=CONF.matchmaker_redis.password) diff --git a/designate/openstack/common/rpc/matchmaker_ring.py b/designate/openstack/common/rpc/matchmaker_ring.py new file mode 100644 index 000000000..19dc61779 --- /dev/null +++ b/designate/openstack/common/rpc/matchmaker_ring.py @@ -0,0 +1,106 @@ +# Copyright 2011-2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +The MatchMaker classes should except a Topic or Fanout exchange key and +return keys for direct exchanges, per (approximate) AMQP parlance. +""" + +import itertools +import json + +from oslo.config import cfg + +from designate.openstack.common.gettextutils import _ +from designate.openstack.common import log as logging +from designate.openstack.common.rpc import matchmaker as mm + + +matchmaker_opts = [ + # Matchmaker ring file + cfg.StrOpt('ringfile', + deprecated_name='matchmaker_ringfile', + deprecated_group='DEFAULT', + default='/etc/oslo/matchmaker_ring.json', + help='Matchmaker ring file (JSON)'), +] + +CONF = cfg.CONF +CONF.register_opts(matchmaker_opts, 'matchmaker_ring') +LOG = logging.getLogger(__name__) + + +class RingExchange(mm.Exchange): + """Match Maker where hosts are loaded from a static JSON formatted file. + + __init__ takes optional ring dictionary argument, otherwise + loads the ringfile from CONF.mathcmaker_ringfile. + """ + def __init__(self, ring=None): + super(RingExchange, self).__init__() + + if ring: + self.ring = ring + else: + fh = open(CONF.matchmaker_ring.ringfile, 'r') + self.ring = json.load(fh) + fh.close() + + self.ring0 = {} + for k in self.ring.keys(): + self.ring0[k] = itertools.cycle(self.ring[k]) + + def _ring_has(self, key): + return key in self.ring0 + + +class RoundRobinRingExchange(RingExchange): + """A Topic Exchange based on a hashmap.""" + def __init__(self, ring=None): + super(RoundRobinRingExchange, self).__init__(ring) + + def run(self, key): + if not self._ring_has(key): + LOG.warn( + _("No key defining hosts for topic '%s', " + "see ringfile") % (key, ) + ) + return [] + host = next(self.ring0[key]) + return [(key + '.' + host, host)] + + +class FanoutRingExchange(RingExchange): + """Fanout Exchange based on a hashmap.""" + def __init__(self, ring=None): + super(FanoutRingExchange, self).__init__(ring) + + def run(self, key): + # Assume starts with "fanout~", strip it for lookup. + nkey = key.split('fanout~')[1:][0] + if not self._ring_has(nkey): + LOG.warn( + _("No key defining hosts for topic '%s', " + "see ringfile") % (nkey, ) + ) + return [] + return map(lambda x: (key + '.' + x, x), self.ring[nkey]) + + +class MatchMakerRing(mm.MatchMakerBase): + """Match Maker where hosts are loaded from a static hashmap.""" + def __init__(self, ring=None): + super(MatchMakerRing, self).__init__() + self.add_binding(mm.FanoutBinding(), FanoutRingExchange(ring)) + self.add_binding(mm.DirectBinding(), mm.DirectExchange()) + self.add_binding(mm.TopicBinding(), RoundRobinRingExchange(ring)) diff --git a/designate/openstack/common/rpc/proxy.py b/designate/openstack/common/rpc/proxy.py index 6d187dd32..06c4826fb 100644 --- a/designate/openstack/common/rpc/proxy.py +++ b/designate/openstack/common/rpc/proxy.py @@ -1,6 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012 Red Hat, Inc. +# Copyright 2012-2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -21,8 +19,11 @@ For more information about rpc API version numbers, see: rpc/dispatcher.py """ +import six from designate.openstack.common import rpc +from designate.openstack.common.rpc import common as rpc_common +from designate.openstack.common.rpc import serializer as rpc_serializer class RpcProxy(object): @@ -34,16 +35,28 @@ class RpcProxy(object): rpc API. """ - def __init__(self, topic, default_version): + # The default namespace, which can be overridden in a subclass. + RPC_API_NAMESPACE = None + + def __init__(self, topic, default_version, version_cap=None, + serializer=None): """Initialize an RpcProxy. :param topic: The topic to use for all messages. :param default_version: The default API version to request in all outgoing messages. This can be overridden on a per-message basis. + :param version_cap: Optionally cap the maximum version used for sent + messages. + :param serializer: Optionaly (de-)serialize entities with a + provided helper. """ self.topic = topic self.default_version = default_version + self.version_cap = version_cap + if serializer is None: + serializer = rpc_serializer.NoOpSerializer() + self.serializer = serializer super(RpcProxy, self).__init__() def _set_version(self, msg, vers): @@ -52,15 +65,44 @@ class RpcProxy(object): :param msg: The message having a version added to it. :param vers: The version number to add to the message. """ - msg['version'] = vers if vers else self.default_version + v = vers if vers else self.default_version + if (self.version_cap and not + rpc_common.version_is_compatible(self.version_cap, v)): + raise rpc_common.RpcVersionCapError(version_cap=self.version_cap) + msg['version'] = v def _get_topic(self, topic): """Return the topic to use for a message.""" return topic if topic else self.topic + def can_send_version(self, version): + """Check to see if a version is compatible with the version cap.""" + return (not self.version_cap or + rpc_common.version_is_compatible(self.version_cap, version)) + @staticmethod - def make_msg(method, **kwargs): - return {'method': method, 'args': kwargs} + def make_namespaced_msg(method, namespace, **kwargs): + return {'method': method, 'namespace': namespace, 'args': kwargs} + + def make_msg(self, method, **kwargs): + return self.make_namespaced_msg(method, self.RPC_API_NAMESPACE, + **kwargs) + + def _serialize_msg_args(self, context, kwargs): + """Helper method called to serialize message arguments. + + This calls our serializer on each argument, returning a new + set of args that have been serialized. + + :param context: The request context + :param kwargs: The arguments to serialize + :returns: A new set of serialized arguments + """ + new_kwargs = dict() + for argname, arg in six.iteritems(kwargs): + new_kwargs[argname] = self.serializer.serialize_entity(context, + arg) + return new_kwargs def call(self, context, msg, topic=None, version=None, timeout=None): """rpc.call() a remote method. @@ -68,16 +110,23 @@ class RpcProxy(object): :param context: The request context :param msg: The message to send, including the method and args. :param topic: Override the topic for this message. + :param version: (Optional) Override the requested API version in this + message. :param timeout: (Optional) A timeout to use when waiting for the response. If no timeout is specified, a default timeout will be used that is usually sufficient. - :param version: (Optional) Override the requested API version in this - message. :returns: The return value from the remote method. """ self._set_version(msg, version) - return rpc.call(context, self._get_topic(topic), msg, timeout) + msg['args'] = self._serialize_msg_args(context, msg['args']) + real_topic = self._get_topic(topic) + try: + result = rpc.call(context, real_topic, msg, timeout) + return self.serializer.deserialize_entity(context, result) + except rpc.common.Timeout as exc: + raise rpc.common.Timeout( + exc.info, real_topic, msg.get('method')) def multicall(self, context, msg, topic=None, version=None, timeout=None): """rpc.multicall() a remote method. @@ -85,17 +134,24 @@ class RpcProxy(object): :param context: The request context :param msg: The message to send, including the method and args. :param topic: Override the topic for this message. + :param version: (Optional) Override the requested API version in this + message. :param timeout: (Optional) A timeout to use when waiting for the response. If no timeout is specified, a default timeout will be used that is usually sufficient. - :param version: (Optional) Override the requested API version in this - message. :returns: An iterator that lets you process each of the returned values from the remote method as they arrive. """ self._set_version(msg, version) - return rpc.multicall(context, self._get_topic(topic), msg, timeout) + msg['args'] = self._serialize_msg_args(context, msg['args']) + real_topic = self._get_topic(topic) + try: + result = rpc.multicall(context, real_topic, msg, timeout) + return self.serializer.deserialize_entity(context, result) + except rpc.common.Timeout as exc: + raise rpc.common.Timeout( + exc.info, real_topic, msg.get('method')) def cast(self, context, msg, topic=None, version=None): """rpc.cast() a remote method. @@ -110,6 +166,7 @@ class RpcProxy(object): remote method. """ self._set_version(msg, version) + msg['args'] = self._serialize_msg_args(context, msg['args']) rpc.cast(context, self._get_topic(topic), msg) def fanout_cast(self, context, msg, topic=None, version=None): @@ -125,6 +182,7 @@ class RpcProxy(object): from the remote method. """ self._set_version(msg, version) + msg['args'] = self._serialize_msg_args(context, msg['args']) rpc.fanout_cast(context, self._get_topic(topic), msg) def cast_to_server(self, context, server_params, msg, topic=None, @@ -143,6 +201,7 @@ class RpcProxy(object): return values. """ self._set_version(msg, version) + msg['args'] = self._serialize_msg_args(context, msg['args']) rpc.cast_to_server(context, server_params, self._get_topic(topic), msg) def fanout_cast_to_server(self, context, server_params, msg, topic=None, @@ -161,5 +220,6 @@ class RpcProxy(object): return values. """ self._set_version(msg, version) + msg['args'] = self._serialize_msg_args(context, msg['args']) rpc.fanout_cast_to_server(context, server_params, self._get_topic(topic), msg) diff --git a/designate/openstack/common/rpc/serializer.py b/designate/openstack/common/rpc/serializer.py new file mode 100644 index 000000000..9bc6e2a3a --- /dev/null +++ b/designate/openstack/common/rpc/serializer.py @@ -0,0 +1,54 @@ +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Provides the definition of an RPC serialization handler""" + +import abc + +import six + + +@six.add_metaclass(abc.ABCMeta) +class Serializer(object): + """Generic (de-)serialization definition base class.""" + + @abc.abstractmethod + def serialize_entity(self, context, entity): + """Serialize something to primitive form. + + :param context: Security context + :param entity: Entity to be serialized + :returns: Serialized form of entity + """ + pass + + @abc.abstractmethod + def deserialize_entity(self, context, entity): + """Deserialize something from primitive form. + + :param context: Security context + :param entity: Primitive to be deserialized + :returns: Deserialized form of entity + """ + pass + + +class NoOpSerializer(Serializer): + """A serializer that does nothing.""" + + def serialize_entity(self, context, entity): + return entity + + def deserialize_entity(self, context, entity): + return entity diff --git a/designate/openstack/common/rpc/service.py b/designate/openstack/common/rpc/service.py index a0d072190..5a9b53c8a 100644 --- a/designate/openstack/common/rpc/service.py +++ b/designate/openstack/common/rpc/service.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. @@ -30,11 +28,13 @@ LOG = logging.getLogger(__name__) class Service(service.Service): """Service object for binaries running on hosts. - A service enables rpc by listening to queues based on topic and host.""" - def __init__(self, host, topic, manager=None): + A service enables rpc by listening to queues based on topic and host. + """ + def __init__(self, host, topic, manager=None, serializer=None): super(Service, self).__init__() self.host = host self.topic = topic + self.serializer = serializer if manager is None: self.manager = self else: @@ -47,7 +47,8 @@ class Service(service.Service): LOG.debug(_("Creating Consumer connection for Service %s") % self.topic) - dispatcher = rpc_dispatcher.RpcDispatcher([self.manager]) + dispatcher = rpc_dispatcher.RpcDispatcher([self.manager], + self.serializer) # Share this same connection for these Consumers self.conn.create_consumer(self.topic, dispatcher, fanout=False) diff --git a/designate/openstack/common/notifier/rabbit_notifier.py b/designate/openstack/common/rpc/zmq_receiver.py similarity index 54% rename from designate/openstack/common/notifier/rabbit_notifier.py rename to designate/openstack/common/rpc/zmq_receiver.py index 761a317df..fc9d6e40e 100644 --- a/designate/openstack/common/notifier/rabbit_notifier.py +++ b/designate/openstack/common/rpc/zmq_receiver.py @@ -1,5 +1,4 @@ -# Copyright 2012 Red Hat, Inc. -# All Rights Reserved. +# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -13,17 +12,27 @@ # License for the specific language governing permissions and limitations # under the License. +import eventlet +eventlet.monkey_patch() + +import contextlib +import sys + +from oslo.config import cfg -from designate.openstack.common.gettextutils import _ from designate.openstack.common import log as logging -from designate.openstack.common.notifier import rpc_notifier +from designate.openstack.common import rpc +from designate.openstack.common.rpc import impl_zmq -LOG = logging.getLogger(__name__) +CONF = cfg.CONF +CONF.register_opts(rpc.rpc_opts) +CONF.register_opts(impl_zmq.zmq_opts) -def notify(context, message): - """Deprecated in Grizzly. Please use rpc_notifier instead.""" +def main(): + CONF(sys.argv[1:], project='oslo') + logging.setup("oslo") - LOG.deprecated(_("The rabbit_notifier is now deprecated." - " Please use rpc_notifier instead.")) - rpc_notifier.notify(context, message) + with contextlib.closing(impl_zmq.ZmqProxy(CONF)) as reactor: + reactor.consume_in_thread() + reactor.wait() diff --git a/designate/openstack/common/service.py b/designate/openstack/common/service.py index 75394bc7f..2b830a0e5 100644 --- a/designate/openstack/common/service.py +++ b/designate/openstack/common/service.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara @@ -20,14 +18,23 @@ """Generic Node base class for all workers that run on hosts.""" import errno +import logging as std_logging import os import random import signal import sys +import threading import time +try: + # Importing just the symbol here because the io module does not + # exist in Python 2.6. + from io import UnsupportedOperation # noqa +except ImportError: + # Python 2.6 + UnsupportedOperation = None + import eventlet -import logging as std_logging from oslo.config import cfg from designate.openstack.common import eventlet_backdoor @@ -42,6 +49,53 @@ CONF = cfg.CONF LOG = logging.getLogger(__name__) +def _sighup_supported(): + return hasattr(signal, 'SIGHUP') + + +def _is_daemon(): + # The process group for a foreground process will match the + # process group of the controlling terminal. If those values do + # not match, or ioctl() fails on the stdout file handle, we assume + # the process is running in the background as a daemon. + # http://www.gnu.org/software/bash/manual/bashref.html#Job-Control-Basics + try: + is_daemon = os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno()) + except OSError as err: + if err.errno == errno.ENOTTY: + # Assume we are a daemon because there is no terminal. + is_daemon = True + else: + raise + except UnsupportedOperation: + # Could not get the fileno for stdout, so we must be a daemon. + is_daemon = True + return is_daemon + + +def _is_sighup_and_daemon(signo): + if not (_sighup_supported() and signo == signal.SIGHUP): + # Avoid checking if we are a daemon, because the signal isn't + # SIGHUP. + return False + return _is_daemon() + + +def _signo_to_signame(signo): + signals = {signal.SIGTERM: 'SIGTERM', + signal.SIGINT: 'SIGINT'} + if _sighup_supported(): + signals[signal.SIGHUP] = 'SIGHUP' + return signals[signo] + + +def _set_signals_handler(handler): + signal.signal(signal.SIGTERM, handler) + signal.signal(signal.SIGINT, handler) + if _sighup_supported(): + signal.signal(signal.SIGHUP, handler) + + class Launcher(object): """Launch one or more services and wait for them to complete.""" @@ -51,19 +105,8 @@ class Launcher(object): :returns: None """ - self._services = threadgroup.ThreadGroup() - eventlet_backdoor.initialize_if_enabled() - - @staticmethod - def run_service(service): - """Start and wait for a service to finish. - - :param service: service to run and wait for. - :returns: None - - """ - service.start() - service.wait() + self.services = Services() + self.backdoor_port = eventlet_backdoor.initialize_if_enabled() def launch_service(self, service): """Load and start the given service. @@ -72,7 +115,8 @@ class Launcher(object): :returns: None """ - self._services.add_thread(self.run_service, service) + service.backdoor_port = self.backdoor_port + self.services.add(service) def stop(self): """Stop all services which are currently running. @@ -80,7 +124,7 @@ class Launcher(object): :returns: None """ - self._services.stop() + self.services.stop() def wait(self): """Waits until all services have been stopped, and then returns. @@ -88,7 +132,16 @@ class Launcher(object): :returns: None """ - self._services.wait() + self.services.wait() + + def restart(self): + """Reload config files and restart service. + + :returns: None + + """ + cfg.CONF.reload_config_files() + self.services.restart() class SignalExit(SystemExit): @@ -100,33 +153,48 @@ class SignalExit(SystemExit): class ServiceLauncher(Launcher): def _handle_signal(self, signo, frame): # Allow the process to be killed again and die from natural causes - signal.signal(signal.SIGTERM, signal.SIG_DFL) - signal.signal(signal.SIGINT, signal.SIG_DFL) - + _set_signals_handler(signal.SIG_DFL) raise SignalExit(signo) - def wait(self): - signal.signal(signal.SIGTERM, self._handle_signal) - signal.signal(signal.SIGINT, self._handle_signal) + def handle_signal(self): + _set_signals_handler(self._handle_signal) + + def _wait_for_exit_or_signal(self, ready_callback=None): + status = None + signo = 0 LOG.debug(_('Full set of CONF:')) CONF.log_opt_values(LOG, std_logging.DEBUG) - status = None try: + if ready_callback: + ready_callback() super(ServiceLauncher, self).wait() except SignalExit as exc: - signame = {signal.SIGTERM: 'SIGTERM', - signal.SIGINT: 'SIGINT'}[exc.signo] + signame = _signo_to_signame(exc.signo) LOG.info(_('Caught %s, exiting'), signame) status = exc.code + signo = exc.signo except SystemExit as exc: status = exc.code finally: - if rpc: - rpc.cleanup() self.stop() - return status + if rpc: + try: + rpc.cleanup() + except Exception: + # We're shutting down, so it doesn't matter at this point. + LOG.exception(_('Exception during rpc cleanup.')) + + return status, signo + + def wait(self, ready_callback=None): + while True: + self.handle_signal() + status, signo = self._wait_for_exit_or_signal(ready_callback) + if not _is_sighup_and_daemon(signo): + return status + self.restart() class ServiceWrapper(object): @@ -138,23 +206,29 @@ class ServiceWrapper(object): class ProcessLauncher(object): - def __init__(self): + def __init__(self, wait_interval=0.01): + """Constructor. + + :param wait_interval: The interval to sleep for between checks + of child process exit. + """ self.children = {} self.sigcaught = None self.running = True + self.wait_interval = wait_interval rfd, self.writepipe = os.pipe() self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r') + self.handle_signal() - signal.signal(signal.SIGTERM, self._handle_signal) - signal.signal(signal.SIGINT, self._handle_signal) + def handle_signal(self): + _set_signals_handler(self._handle_signal) def _handle_signal(self, signo, frame): self.sigcaught = signo self.running = False # Allow the process to be killed again and die from natural causes - signal.signal(signal.SIGTERM, signal.SIG_DFL) - signal.signal(signal.SIGINT, signal.SIG_DFL) + _set_signals_handler(signal.SIG_DFL) def _pipe_watcher(self): # This will block until the write end is closed when the parent @@ -165,16 +239,49 @@ class ProcessLauncher(object): sys.exit(1) - def _child_process(self, service): + def _child_process_handle_signal(self): # Setup child signal handlers differently def _sigterm(*args): signal.signal(signal.SIGTERM, signal.SIG_DFL) raise SignalExit(signal.SIGTERM) + def _sighup(*args): + signal.signal(signal.SIGHUP, signal.SIG_DFL) + raise SignalExit(signal.SIGHUP) + signal.signal(signal.SIGTERM, _sigterm) + if _sighup_supported(): + signal.signal(signal.SIGHUP, _sighup) # Block SIGINT and let the parent send us a SIGTERM signal.signal(signal.SIGINT, signal.SIG_IGN) + def _child_wait_for_exit_or_signal(self, launcher): + status = 0 + signo = 0 + + # NOTE(johannes): All exceptions are caught to ensure this + # doesn't fallback into the loop spawning children. It would + # be bad for a child to spawn more children. + try: + launcher.wait() + except SignalExit as exc: + signame = _signo_to_signame(exc.signo) + LOG.info(_('Caught %s, exiting'), signame) + status = exc.code + signo = exc.signo + except SystemExit as exc: + status = exc.code + except BaseException: + LOG.exception(_('Unhandled exception')) + status = 2 + finally: + launcher.stop() + + return status, signo + + def _child_process(self, service): + self._child_process_handle_signal() + # Reopen the eventlet hub to make sure we don't share an epoll # fd with parent and/or siblings, which would be bad eventlet.hubs.use_hub() @@ -188,7 +295,8 @@ class ProcessLauncher(object): random.seed() launcher = Launcher() - launcher.run_service(service) + launcher.launch_service(service) + return launcher def _start_child(self, wrap): if len(wrap.forktimes) > wrap.workers: @@ -206,24 +314,13 @@ class ProcessLauncher(object): pid = os.fork() if pid == 0: - # NOTE(johannes): All exceptions are caught to ensure this - # doesn't fallback into the loop spawning children. It would - # be bad for a child to spawn more children. - status = 0 - try: - self._child_process(wrap.service) - except SignalExit as exc: - signame = {signal.SIGTERM: 'SIGTERM', - signal.SIGINT: 'SIGINT'}[exc.signo] - LOG.info(_('Caught %s, exiting'), signame) - status = exc.code - except SystemExit as exc: - status = exc.code - except BaseException: - LOG.exception(_('Unhandled exception')) - status = 2 - finally: - wrap.service.stop() + launcher = self._child_process(wrap.service) + while True: + self._child_process_handle_signal() + status, signo = self._child_wait_for_exit_or_signal(launcher) + if not _is_sighup_and_daemon(signo): + break + launcher.restart() os._exit(status) @@ -269,28 +366,37 @@ class ProcessLauncher(object): wrap.children.remove(pid) return wrap - def wait(self): - """Loop waiting on children to die and respawning as necessary""" - - LOG.debug(_('Full set of CONF:')) - CONF.log_opt_values(LOG, std_logging.DEBUG) - + def _respawn_children(self): while self.running: wrap = self._wait_child() if not wrap: # Yield to other threads if no children have exited # Sleep for a short time to avoid excessive CPU usage # (see bug #1095346) - eventlet.greenthread.sleep(.01) + eventlet.greenthread.sleep(self.wait_interval) continue - while self.running and len(wrap.children) < wrap.workers: self._start_child(wrap) - if self.sigcaught: - signame = {signal.SIGTERM: 'SIGTERM', - signal.SIGINT: 'SIGINT'}[self.sigcaught] - LOG.info(_('Caught %s, stopping children'), signame) + def wait(self): + """Loop waiting on children to die and respawning as necessary.""" + + LOG.debug(_('Full set of CONF:')) + CONF.log_opt_values(LOG, std_logging.DEBUG) + + while True: + self.handle_signal() + self._respawn_children() + if self.sigcaught: + signame = _signo_to_signame(self.sigcaught) + LOG.info(_('Caught %s, stopping children'), signame) + if not _is_sighup_and_daemon(self.sigcaught): + break + + for pid in self.children: + os.kill(pid, signal.SIGHUP) + self.running = True + self.sigcaught = None for pid in self.children: try: @@ -312,15 +418,71 @@ class Service(object): def __init__(self, threads=1000): self.tg = threadgroup.ThreadGroup(threads) + # signal that the service is done shutting itself down: + self._done = threading.Event() + + def reset(self): + self._done = threading.Event() + def start(self): pass def stop(self): self.tg.stop() + self.tg.wait() + # Signal that service cleanup is done: + self._done.set() + + def wait(self): + self._done.wait() + + +class Services(object): + + def __init__(self): + self.services = [] + self.tg = threadgroup.ThreadGroup() + self.done = threading.Event() + + def add(self, service): + self.services.append(service) + self.tg.add_thread(self.run_service, service, self.done) + + def stop(self): + # wait for graceful shutdown of services: + for service in self.services: + service.stop() + service.wait() + + # Each service has performed cleanup, now signal that the run_service + # wrapper threads can now die: + self.done.set() + + # reap threads: + self.tg.stop() def wait(self): self.tg.wait() + def restart(self): + self.stop() + self.done = threading.Event() + for restart_service in self.services: + restart_service.reset() + self.tg.add_thread(self.run_service, restart_service, self.done) + + @staticmethod + def run_service(service, done): + """Service start wrapper. + + :param service: service to run + :param done: event to wait on until a shutdown is triggered + :returns: None + + """ + service.start() + done.wait() + def launch(service, workers=None): if workers: diff --git a/designate/openstack/common/sslutils.py b/designate/openstack/common/sslutils.py index e264ef62f..915a98007 100644 --- a/designate/openstack/common/sslutils.py +++ b/designate/openstack/common/sslutils.py @@ -1,6 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 IBM +# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -78,3 +76,23 @@ def wrap(sock): ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED return ssl.wrap_socket(sock, **ssl_kwargs) + + +_SSL_PROTOCOLS = { + "tlsv1": ssl.PROTOCOL_TLSv1, + "sslv23": ssl.PROTOCOL_SSLv23, + "sslv3": ssl.PROTOCOL_SSLv3 +} + +try: + _SSL_PROTOCOLS["sslv2"] = ssl.PROTOCOL_SSLv2 +except AttributeError: + pass + + +def validate_ssl_version(version): + key = version.lower() + try: + return _SSL_PROTOCOLS[key] + except KeyError: + raise RuntimeError(_("Invalid SSL version : %s") % version) diff --git a/designate/openstack/common/strutils.py b/designate/openstack/common/strutils.py index ecf3cfdc4..570f1d3bd 100644 --- a/designate/openstack/common/strutils.py +++ b/designate/openstack/common/strutils.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # @@ -19,15 +17,35 @@ System-level utilities and helper functions. """ -import logging +import re import sys +import unicodedata -LOG = logging.getLogger(__name__) +import six + +from designate.openstack.common.gettextutils import _ + + +# Used for looking up extensions of text +# to their 'multiplied' byte amount +BYTE_MULTIPLIERS = { + '': 1, + 't': 1024 ** 4, + 'g': 1024 ** 3, + 'm': 1024 ** 2, + 'k': 1024, +} +BYTE_REGEX = re.compile(r'(^-?\d+)(\D*)') + +TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes') +FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no') + +SLUGIFY_STRIP_RE = re.compile(r"[^\w\s-]") +SLUGIFY_HYPHENATE_RE = re.compile(r"[-\s]+") def int_from_bool_as_string(subject): - """ - Interpret a string as a boolean and return either 1 or 0. + """Interpret a string as a boolean and return either 1 or 0. Any string value in: @@ -40,42 +58,53 @@ def int_from_bool_as_string(subject): return bool_from_string(subject) and 1 or 0 -def bool_from_string(subject): +def bool_from_string(subject, strict=False): + """Interpret a string as a boolean. + + A case-insensitive match is performed such that strings matching 't', + 'true', 'on', 'y', 'yes', or '1' are considered True and, when + `strict=False`, anything else is considered False. + + Useful for JSON-decoded stuff and config file parsing. + + If `strict=True`, unrecognized values, including None, will raise a + ValueError which is useful when parsing values passed in from an API call. + Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'. """ - Interpret a string as a boolean. + if not isinstance(subject, six.string_types): + subject = str(subject) - Any string value in: + lowered = subject.strip().lower() - ('True', 'true', 'On', 'on', 'Yes', 'yes', '1') - - is interpreted as a boolean True. - - Useful for JSON-decoded stuff and config file parsing - """ - if isinstance(subject, bool): - return subject - if isinstance(subject, basestring): - if subject.strip().lower() in ('true', 'on', 'yes', '1'): - return True - return False + if lowered in TRUE_STRINGS: + return True + elif lowered in FALSE_STRINGS: + return False + elif strict: + acceptable = ', '.join( + "'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS)) + msg = _("Unrecognized value '%(val)s', acceptable values are:" + " %(acceptable)s") % {'val': subject, + 'acceptable': acceptable} + raise ValueError(msg) + else: + return False def safe_decode(text, incoming=None, errors='strict'): - """ - Decodes incoming str using `incoming` if they're - not already unicode. + """Decodes incoming str using `incoming` if they're not already unicode. :param incoming: Text's current encoding :param errors: Errors handling policy. See here for valid values http://docs.python.org/2/library/codecs.html :returns: text or a unicode `incoming` encoded representation of it. - :raises TypeError: If text is not an isntance of basestring + :raises TypeError: If text is not an instance of str """ - if not isinstance(text, basestring): + if not isinstance(text, six.string_types): raise TypeError("%s can't be decoded" % type(text)) - if isinstance(text, unicode): + if isinstance(text, six.text_type): return text if not incoming: @@ -102,11 +131,10 @@ def safe_decode(text, incoming=None, errors='strict'): def safe_encode(text, incoming=None, encoding='utf-8', errors='strict'): - """ - Encodes incoming str/unicode using `encoding`. If - incoming is not specified, text is expected to - be encoded with current python's default encoding. - (`sys.getdefaultencoding`) + """Encodes incoming str/unicode using `encoding`. + + If incoming is not specified, text is expected to be encoded with + current python's default encoding. (`sys.getdefaultencoding`) :param incoming: Text's current encoding :param encoding: Expected encoding for text (Default UTF-8) @@ -114,20 +142,81 @@ def safe_encode(text, incoming=None, values http://docs.python.org/2/library/codecs.html :returns: text or a bytestring `encoding` encoded representation of it. - :raises TypeError: If text is not an isntance of basestring + :raises TypeError: If text is not an instance of str """ - if not isinstance(text, basestring): + if not isinstance(text, six.string_types): raise TypeError("%s can't be encoded" % type(text)) if not incoming: incoming = (sys.stdin.encoding or sys.getdefaultencoding()) - if isinstance(text, unicode): - return text.encode(encoding, errors) + if isinstance(text, six.text_type): + if six.PY3: + return text.encode(encoding, errors).decode(incoming) + else: + return text.encode(encoding, errors) elif text and encoding != incoming: # Decode text before encoding it with `encoding` text = safe_decode(text, incoming, errors) - return text.encode(encoding, errors) + if six.PY3: + return text.encode(encoding, errors).decode(incoming) + else: + return text.encode(encoding, errors) return text + + +def to_bytes(text, default=0): + """Converts a string into an integer of bytes. + + Looks at the last characters of the text to determine + what conversion is needed to turn the input text into a byte number. + Supports "B, K(B), M(B), G(B), and T(B)". (case insensitive) + + :param text: String input for bytes size conversion. + :param default: Default return value when text is blank. + + """ + match = BYTE_REGEX.search(text) + if match: + magnitude = int(match.group(1)) + mult_key_org = match.group(2) + if not mult_key_org: + return magnitude + elif text: + msg = _('Invalid string format: %s') % text + raise TypeError(msg) + else: + return default + mult_key = mult_key_org.lower().replace('b', '', 1) + multiplier = BYTE_MULTIPLIERS.get(mult_key) + if multiplier is None: + msg = _('Unknown byte multiplier: %s') % mult_key_org + raise TypeError(msg) + return magnitude * multiplier + + +def to_slug(value, incoming=None, errors="strict"): + """Normalize string. + + Convert to lowercase, remove non-word characters, and convert spaces + to hyphens. + + Inspired by Django's `slugify` filter. + + :param value: Text to slugify + :param incoming: Text's current encoding + :param errors: Errors handling policy. See here for valid + values http://docs.python.org/2/library/codecs.html + :returns: slugified unicode representation of `value` + :raises TypeError: If text is not an instance of str + """ + value = safe_decode(value, incoming, errors) + # NOTE(aababilov): no need to use safe_(encode|decode) here: + # encodings are always "ascii", error handling is always "ignore" + # and types are always known (first: unicode; second: str) + value = unicodedata.normalize("NFKD", value).encode( + "ascii", "ignore").decode("ascii") + value = SLUGIFY_STRIP_RE.sub("", value).strip().lower() + return SLUGIFY_HYPHENATE_RE.sub("-", value) diff --git a/designate/openstack/common/test.py b/designate/openstack/common/test.py index 09d9210a8..43a656e4a 100644 --- a/designate/openstack/common/test.py +++ b/designate/openstack/common/test.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # @@ -26,11 +24,12 @@ _TRUE_VALUES = ('True', 'true', '1', 'yes') class BaseTestCase(testtools.TestCase): + def setUp(self): super(BaseTestCase, self).setUp() self._set_timeout() self._fake_output() - self.useFixture(fixtures.FakeLogger('designate.openstack.common')) + self.useFixture(fixtures.FakeLogger()) self.useFixture(fixtures.NestedTempfile()) self.useFixture(fixtures.TempHomeDir()) diff --git a/designate/openstack/common/threadgroup.py b/designate/openstack/common/threadgroup.py index fbf6916bd..00cc54112 100644 --- a/designate/openstack/common/threadgroup.py +++ b/designate/openstack/common/threadgroup.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -14,7 +12,7 @@ # License for the specific language governing permissions and limitations # under the License. -from eventlet import greenlet +import eventlet from eventlet import greenpool from eventlet import greenthread @@ -26,7 +24,7 @@ LOG = logging.getLogger(__name__) def _thread_done(gt, *args, **kwargs): - """ Callback function to be passed to GreenThread.link() when we spawn() + """Callback function to be passed to GreenThread.link() when we spawn() Calls the :class:`ThreadGroup` to notify if. """ @@ -34,7 +32,7 @@ def _thread_done(gt, *args, **kwargs): class Thread(object): - """ Wrapper around a greenthread, that holds a reference to the + """Wrapper around a greenthread, that holds a reference to the :class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when it has done so it can be removed from the threads list. """ @@ -48,9 +46,12 @@ class Thread(object): def wait(self): return self.thread.wait() + def link(self, func, *args, **kwargs): + self.thread.link(func, *args, **kwargs) + class ThreadGroup(object): - """ The point of the ThreadGroup classis to: + """The point of the ThreadGroup classis to: * keep track of timers and greenthreads (making it easier to stop them when need be). @@ -61,9 +62,16 @@ class ThreadGroup(object): self.threads = [] self.timers = [] + def add_dynamic_timer(self, callback, initial_delay=None, + periodic_interval_max=None, *args, **kwargs): + timer = loopingcall.DynamicLoopingCall(callback, *args, **kwargs) + timer.start(initial_delay=initial_delay, + periodic_interval_max=periodic_interval_max) + self.timers.append(timer) + def add_timer(self, interval, callback, initial_delay=None, *args, **kwargs): - pulse = loopingcall.LoopingCall(callback, *args, **kwargs) + pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs) pulse.start(interval=interval, initial_delay=initial_delay) self.timers.append(pulse) @@ -72,13 +80,17 @@ class ThreadGroup(object): gt = self.pool.spawn(callback, *args, **kwargs) th = Thread(gt, self) self.threads.append(th) + return th def thread_done(self, thread): self.threads.remove(thread) def stop(self): current = greenthread.getcurrent() - for x in self.threads: + + # Iterate over a copy of self.threads so thread_done doesn't + # modify the list while we're iterating + for x in self.threads[:]: if x is current: # don't kill the current thread. continue @@ -98,17 +110,20 @@ class ThreadGroup(object): for x in self.timers: try: x.wait() - except greenlet.GreenletExit: + except eventlet.greenlet.GreenletExit: pass except Exception as ex: LOG.exception(ex) current = greenthread.getcurrent() - for x in self.threads: + + # Iterate over a copy of self.threads so thread_done doesn't + # modify the list while we're iterating + for x in self.threads[:]: if x is current: continue try: x.wait() - except greenlet.GreenletExit: + except eventlet.greenlet.GreenletExit: pass except Exception as ex: LOG.exception(ex) diff --git a/designate/openstack/common/timeutils.py b/designate/openstack/common/timeutils.py index 609436590..d5ed81d3e 100644 --- a/designate/openstack/common/timeutils.py +++ b/designate/openstack/common/timeutils.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # @@ -21,8 +19,10 @@ Time related utilities and helper functions. import calendar import datetime +import time import iso8601 +import six # ISO 8601 extended time format with microseconds @@ -32,7 +32,7 @@ PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND def isotime(at=None, subsecond=False): - """Stringify time in ISO 8601 format""" + """Stringify time in ISO 8601 format.""" if not at: at = utcnow() st = at.strftime(_ISO8601_TIME_FORMAT @@ -44,13 +44,13 @@ def isotime(at=None, subsecond=False): def parse_isotime(timestr): - """Parse time from ISO 8601 format""" + """Parse time from ISO 8601 format.""" try: return iso8601.parse_date(timestr) except iso8601.ParseError as e: - raise ValueError(e.message) + raise ValueError(six.text_type(e)) except TypeError as e: - raise ValueError(e.message) + raise ValueError(six.text_type(e)) def strtime(at=None, fmt=PERFECT_TIME_FORMAT): @@ -66,7 +66,7 @@ def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT): def normalize_time(timestamp): - """Normalize time in arbitrary timezone to UTC naive object""" + """Normalize time in arbitrary timezone to UTC naive object.""" offset = timestamp.utcoffset() if offset is None: return timestamp @@ -75,20 +75,31 @@ def normalize_time(timestamp): def is_older_than(before, seconds): """Return True if before is older than seconds.""" - if isinstance(before, basestring): + if isinstance(before, six.string_types): before = parse_strtime(before).replace(tzinfo=None) + else: + before = before.replace(tzinfo=None) + return utcnow() - before > datetime.timedelta(seconds=seconds) def is_newer_than(after, seconds): """Return True if after is newer than seconds.""" - if isinstance(after, basestring): + if isinstance(after, six.string_types): after = parse_strtime(after).replace(tzinfo=None) + else: + after = after.replace(tzinfo=None) + return after - utcnow() > datetime.timedelta(seconds=seconds) def utcnow_ts(): """Timestamp version of our utcnow function.""" + if utcnow.override_time is None: + # NOTE(kgriffs): This is several times faster + # than going through calendar.timegm(...) + return int(time.time()) + return calendar.timegm(utcnow().timetuple()) @@ -103,19 +114,22 @@ def utcnow(): def iso8601_from_timestamp(timestamp): - """Returns a iso8601 formated date from timestamp""" + """Returns a iso8601 formated date from timestamp.""" return isotime(datetime.datetime.utcfromtimestamp(timestamp)) utcnow.override_time = None -def set_time_override(override_time=datetime.datetime.utcnow()): +def set_time_override(override_time=None): + """Overrides utils.utcnow. + + Make it return a constant time or a list thereof, one at a time. + + :param override_time: datetime instance or list thereof. If not + given, defaults to the current UTC time. """ - Override utils.utcnow to return a constant time or a list thereof, - one at a time. - """ - utcnow.override_time = override_time + utcnow.override_time = override_time or datetime.datetime.utcnow() def advance_time_delta(timedelta): @@ -141,7 +155,8 @@ def clear_time_override(): def marshall_now(now=None): """Make an rpc-safe datetime with microseconds. - Note: tzinfo is stripped, but not required for relative times.""" + Note: tzinfo is stripped, but not required for relative times. + """ if not now: now = utcnow() return dict(day=now.day, month=now.month, year=now.year, hour=now.hour, @@ -161,11 +176,21 @@ def unmarshall_time(tyme): def delta_seconds(before, after): - """ + """Return the difference between two timing objects. + Compute the difference in seconds between two date, time, or datetime objects (as a float, to microsecond resolution). """ delta = after - before + return total_seconds(delta) + + +def total_seconds(delta): + """Return the total seconds of datetime.timedelta object. + + Compute total seconds of datetime.timedelta, datetime.timedelta + doesn't have method total_seconds in Python2.6, calculate it manually. + """ try: return delta.total_seconds() except AttributeError: @@ -174,11 +199,10 @@ def delta_seconds(before, after): def is_soon(dt, window): - """ - Determines if time is going to happen in the next window seconds. + """Determines if time is going to happen in the next window seconds. - :params dt: the time - :params window: minimum seconds to remain to consider the time not soon + :param dt: the time + :param window: minimum seconds to remain to consider the time not soon :return: True if expiration is within the given duration """ diff --git a/designate/openstack/common/uuidutils.py b/designate/openstack/common/uuidutils.py index 7608acb94..234b880c9 100644 --- a/designate/openstack/common/uuidutils.py +++ b/designate/openstack/common/uuidutils.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright (c) 2012 Intel Corporation. # All Rights Reserved. # diff --git a/designate/openstack/common/versionutils.py b/designate/openstack/common/versionutils.py new file mode 100644 index 000000000..20b61db63 --- /dev/null +++ b/designate/openstack/common/versionutils.py @@ -0,0 +1,148 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Helpers for comparing version strings. +""" + +import functools +import pkg_resources + +from designate.openstack.common.gettextutils import _ +from designate.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class deprecated(object): + """A decorator to mark callables as deprecated. + + This decorator logs a deprecation message when the callable it decorates is + used. The message will include the release where the callable was + deprecated, the release where it may be removed and possibly an optional + replacement. + + Examples: + + 1. Specifying the required deprecated release + + >>> @deprecated(as_of=deprecated.ICEHOUSE) + ... def a(): pass + + 2. Specifying a replacement: + + >>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()') + ... def b(): pass + + 3. Specifying the release where the functionality may be removed: + + >>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=+1) + ... def c(): pass + + """ + + FOLSOM = 'F' + GRIZZLY = 'G' + HAVANA = 'H' + ICEHOUSE = 'I' + + _RELEASES = { + 'F': 'Folsom', + 'G': 'Grizzly', + 'H': 'Havana', + 'I': 'Icehouse', + } + + _deprecated_msg_with_alternative = _( + '%(what)s is deprecated as of %(as_of)s in favor of ' + '%(in_favor_of)s and may be removed in %(remove_in)s.') + + _deprecated_msg_no_alternative = _( + '%(what)s is deprecated as of %(as_of)s and may be ' + 'removed in %(remove_in)s. It will not be superseded.') + + def __init__(self, as_of, in_favor_of=None, remove_in=2, what=None): + """Initialize decorator + + :param as_of: the release deprecating the callable. Constants + are define in this class for convenience. + :param in_favor_of: the replacement for the callable (optional) + :param remove_in: an integer specifying how many releases to wait + before removing (default: 2) + :param what: name of the thing being deprecated (default: the + callable's name) + + """ + self.as_of = as_of + self.in_favor_of = in_favor_of + self.remove_in = remove_in + self.what = what + + def __call__(self, func): + if not self.what: + self.what = func.__name__ + '()' + + @functools.wraps(func) + def wrapped(*args, **kwargs): + msg, details = self._build_message() + LOG.deprecated(msg, details) + return func(*args, **kwargs) + return wrapped + + def _get_safe_to_remove_release(self, release): + # TODO(dstanek): this method will have to be reimplemented once + # when we get to the X release because once we get to the Y + # release, what is Y+2? + new_release = chr(ord(release) + self.remove_in) + if new_release in self._RELEASES: + return self._RELEASES[new_release] + else: + return new_release + + def _build_message(self): + details = dict(what=self.what, + as_of=self._RELEASES[self.as_of], + remove_in=self._get_safe_to_remove_release(self.as_of)) + + if self.in_favor_of: + details['in_favor_of'] = self.in_favor_of + msg = self._deprecated_msg_with_alternative + else: + msg = self._deprecated_msg_no_alternative + return msg, details + + +def is_compatible(requested_version, current_version, same_major=True): + """Determine whether `requested_version` is satisfied by + `current_version`; in other words, `current_version` is >= + `requested_version`. + + :param requested_version: version to check for compatibility + :param current_version: version to check against + :param same_major: if True, the major version must be identical between + `requested_version` and `current_version`. This is used when a + major-version difference indicates incompatibility between the two + versions. Since this is the common-case in practice, the default is + True. + :returns: True if compatible, False if not + """ + requested_parts = pkg_resources.parse_version(requested_version) + current_parts = pkg_resources.parse_version(current_version) + + if same_major and (requested_parts[0] != current_parts[0]): + return False + + return current_parts >= requested_parts diff --git a/designate/openstack/common/xmlutils.py b/designate/openstack/common/xmlutils.py index 33700485b..1231a5902 100644 --- a/designate/openstack/common/xmlutils.py +++ b/designate/openstack/common/xmlutils.py @@ -1,6 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 IBM +# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain diff --git a/designate/openstack/deprecated/__init__.py b/designate/openstack/deprecated/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/designate/openstack/common/exception.py b/designate/openstack/deprecated/exception.py similarity index 100% rename from designate/openstack/common/exception.py rename to designate/openstack/deprecated/exception.py diff --git a/designate/openstack/common/wsgi.py b/designate/openstack/deprecated/wsgi.py similarity index 99% rename from designate/openstack/common/wsgi.py rename to designate/openstack/deprecated/wsgi.py index 38a7c6e9d..c53424bd0 100644 --- a/designate/openstack/common/wsgi.py +++ b/designate/openstack/deprecated/wsgi.py @@ -35,13 +35,13 @@ import webob.exc from xml.dom import minidom from xml.parsers import expat -from designate.openstack.common import exception from designate.openstack.common.gettextutils import _ from designate.openstack.common import jsonutils from designate.openstack.common import log as logging from designate.openstack.common import service from designate.openstack.common import sslutils from designate.openstack.common import xmlutils +from designate.openstack.deprecated import exception socket_opts = [ cfg.IntOpt('backlog', diff --git a/designate/sink/service.py b/designate/sink/service.py index 6b566ec50..9bd9e26ae 100644 --- a/designate/sink/service.py +++ b/designate/sink/service.py @@ -61,10 +61,6 @@ class Service(service.Service): self._setup_subscriptions() self.rpc_conn.consume_in_thread() - def wait(self): - super(Service, self).wait() - self.rpc_conn.consumer_thread.wait() - def stop(self): # Try to shut the connection down, but if we get any sort of # errors, go ahead and ignore them.. as we're shutting down anyway diff --git a/designate/wsgi.py b/designate/wsgi.py index 803f20895..3fdc41e96 100644 --- a/designate/wsgi.py +++ b/designate/wsgi.py @@ -13,7 +13,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -from designate.openstack.common import wsgi +from designate.openstack.deprecated import wsgi class Middleware(wsgi.Middleware): diff --git a/etc/designate/designate.conf.sample b/etc/designate/designate.conf.sample index 1c71a3f2f..df818bbf7 100644 --- a/etc/designate/designate.conf.sample +++ b/etc/designate/designate.conf.sample @@ -15,7 +15,7 @@ debug = False #logdir = /var/log/designate # Driver used for issuing notifications -#notification_driver = designate.openstack.common.notifier.rabbit_notifier +#notification_driver = designate.openstack.common.notifier.rpc_notifier # Use "sudo designate-rootwrap /etc/designate/rootwrap.conf" to use the real # root filter facility. diff --git a/openstack-common.conf b/openstack-common.conf index f62ce3e41..818fa05f5 100644 --- a/openstack-common.conf +++ b/openstack-common.conf @@ -2,7 +2,10 @@ # The list of modules to copy from oslo-incubator.git module=context +module=excutils +module=fixture module=gettextutils +module=importutils module=jsonutils module=local module=log @@ -12,21 +15,12 @@ module=processutils module=rootwrap module=rpc module=service +module=strutils module=test module=timeutils module=uuidutils -module=wsgi -# transitive dependencies -module=eventlet_backdoor -module=exception -module=excutils -module=importutils -module=loopingcall -module=network_utils -module=periodic_task -module=sslutils -module=threadgroup +# Modules needed for the deprecated oslo.wsgi we're still using module=xmlutils # The base module to hold the copy of openstack.common diff --git a/tox.ini b/tox.ini index 004e0feee..dbb3717ca 100644 --- a/tox.ini +++ b/tox.ini @@ -31,4 +31,4 @@ commands = {posargs} [flake8] ignore = H302,H306,H401,H402,H404 builtins = _ -exclude = .venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,tools +exclude = .venv,.git,.tox,dist,doc,*openstack/common*,*openstack/deprecated*,*lib/python*,*egg,build,tools