diff --git a/manila/api/extensions.py b/manila/api/extensions.py index 87af0cbf1f..2348f5aff1 100644 --- a/manila/api/extensions.py +++ b/manila/api/extensions.py @@ -26,7 +26,6 @@ from manila.api.openstack import wsgi from manila.api import xmlutil from manila import exception -from manila.openstack.common import exception as common_exception from manila.openstack.common import importutils from manila.openstack.common import log as logging import manila.policy @@ -373,7 +372,7 @@ def load_standard_extensions(ext_mgr, logger, path, package, ext_list=None): (package, relpkg, dname)) try: ext = importutils.import_class(ext_name) - except common_exception.NotFound: + except ImportError: # extension() doesn't exist on it, so we'll explore # the directory for ourselves subdirs.append(dname) diff --git a/manila/openstack/common/README b/manila/openstack/common/README index def4a172aa..0700c72bb9 100644 --- a/manila/openstack/common/README +++ b/manila/openstack/common/README @@ -6,7 +6,7 @@ A number of modules from openstack-common are imported into this project. These modules are "incubating" in openstack-common and are kept in sync with the help of openstack-common's update.py script. See: - http://wiki.openstack.org/CommonLibrary#Incubation + https://wiki.openstack.org/wiki/Oslo#Syncing_Code_from_Incubator The copy of the code should never be directly modified here. Please always update openstack-common first and then run the script to copy diff --git a/manila/openstack/common/__init__.py b/manila/openstack/common/__init__.py index 0a3b98867a..d1223eaf76 100644 --- a/manila/openstack/common/__init__.py +++ b/manila/openstack/common/__init__.py @@ -1,6 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2011 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -13,3 +10,8 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. + +import six + + +six.add_move(six.MovedModule('mox', 'mox', 'mox3.mox')) diff --git a/manila/openstack/common/context.py b/manila/openstack/common/context.py index e9cfd73cc1..3eeb445e48 100644 --- a/manila/openstack/common/context.py +++ b/manila/openstack/common/context.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # @@ -27,39 +25,60 @@ import uuid def generate_request_id(): - return 'req-' + str(uuid.uuid4()) + return b'req-' + str(uuid.uuid4()).encode('ascii') class RequestContext(object): - """ + """Helper class to represent useful information about a request context. + Stores information about the security context under which the user accesses the system, as well as additional request information. """ - def __init__(self, auth_token=None, user=None, tenant=None, is_admin=False, - read_only=False, show_deleted=False, request_id=None): + user_idt_format = '{user} {tenant} {domain} {user_domain} {p_domain}' + + def __init__(self, auth_token=None, user=None, tenant=None, domain=None, + user_domain=None, project_domain=None, is_admin=False, + read_only=False, show_deleted=False, request_id=None, + instance_uuid=None): self.auth_token = auth_token self.user = user self.tenant = tenant + self.domain = domain + self.user_domain = user_domain + self.project_domain = project_domain self.is_admin = is_admin self.read_only = read_only self.show_deleted = show_deleted + self.instance_uuid = instance_uuid if not request_id: request_id = generate_request_id() self.request_id = request_id def to_dict(self): + user_idt = ( + self.user_idt_format.format(user=self.user or '-', + tenant=self.tenant or '-', + domain=self.domain or '-', + user_domain=self.user_domain or '-', + p_domain=self.project_domain or '-')) + return {'user': self.user, 'tenant': self.tenant, + 'domain': self.domain, + 'user_domain': self.user_domain, + 'project_domain': self.project_domain, 'is_admin': self.is_admin, 'read_only': self.read_only, 'show_deleted': self.show_deleted, 'auth_token': self.auth_token, - 'request_id': self.request_id} + 'request_id': self.request_id, + 'instance_uuid': self.instance_uuid, + 'user_identity': user_idt} -def get_admin_context(show_deleted="no"): +def get_admin_context(show_deleted=False): context = RequestContext(None, tenant=None, is_admin=True, @@ -79,3 +98,14 @@ def get_context_from_function_and_args(function, args, kwargs): return arg return None + + +def is_user_context(context): + """Indicates if the request context is a normal user.""" + if not context: + return False + if context.is_admin: + return False + if not context.user_id or not context.project_id: + return False + return True diff --git a/manila/openstack/common/eventlet_backdoor.py b/manila/openstack/common/eventlet_backdoor.py index c0ad460fe6..3a648a81de 100644 --- a/manila/openstack/common/eventlet_backdoor.py +++ b/manila/openstack/common/eventlet_backdoor.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright (c) 2012 OpenStack Foundation. # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. @@ -16,8 +14,13 @@ # License for the specific language governing permissions and limitations # under the License. +from __future__ import print_function + +import errno import gc +import os import pprint +import socket import sys import traceback @@ -26,36 +29,82 @@ import eventlet.backdoor import greenlet from oslo.config import cfg +from manila.openstack.common.gettextutils import _LI +from manila.openstack.common import log as logging + +help_for_backdoor_port = ( + "Acceptable values are 0, , and :, where 0 results " + "in listening on a random tcp port number; results in listening " + "on the specified port number (and not enabling backdoor if that port " + "is in use); and : results in listening on the smallest " + "unused port number within the specified range of port numbers. The " + "chosen port is displayed in the service's log file.") eventlet_backdoor_opts = [ - cfg.IntOpt('backdoor_port', - default=None, - help='port for eventlet backdoor to listen') + cfg.StrOpt('backdoor_port', + help="Enable eventlet backdoor. %s" % help_for_backdoor_port) ] CONF = cfg.CONF CONF.register_opts(eventlet_backdoor_opts) +LOG = logging.getLogger(__name__) + + +class EventletBackdoorConfigValueError(Exception): + def __init__(self, port_range, help_msg, ex): + msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. ' + '%(help)s' % + {'range': port_range, 'ex': ex, 'help': help_msg}) + super(EventletBackdoorConfigValueError, self).__init__(msg) + self.port_range = port_range def _dont_use_this(): - print "Don't use this, just disconnect instead" + print("Don't use this, just disconnect instead") def _find_objects(t): - return filter(lambda o: isinstance(o, t), gc.get_objects()) + return [o for o in gc.get_objects() if isinstance(o, t)] def _print_greenthreads(): for i, gt in enumerate(_find_objects(greenlet.greenlet)): - print i, gt + print(i, gt) traceback.print_stack(gt.gr_frame) - print + print() def _print_nativethreads(): for threadId, stack in sys._current_frames().items(): - print threadId + print(threadId) traceback.print_stack(stack) - print + print() + + +def _parse_port_range(port_range): + if ':' not in port_range: + start, end = port_range, port_range + else: + start, end = port_range.split(':', 1) + try: + start, end = int(start), int(end) + if end < start: + raise ValueError + return start, end + except ValueError as ex: + raise EventletBackdoorConfigValueError(port_range, ex, + help_for_backdoor_port) + + +def _listen(host, start_port, end_port, listen_func): + try_port = start_port + while True: + try: + return listen_func((host, try_port)) + except socket.error as exc: + if (exc.errno != errno.EADDRINUSE or + try_port >= end_port): + raise + try_port += 1 def initialize_if_enabled(): @@ -70,6 +119,8 @@ def initialize_if_enabled(): if CONF.backdoor_port is None: return None + start_port, end_port = _parse_port_range(str(CONF.backdoor_port)) + # NOTE(johannes): The standard sys.displayhook will print the value of # the last expression and set it to __builtin__._, which overwrites # the __builtin__._ that gettext sets. Let's switch to using pprint @@ -80,8 +131,15 @@ def initialize_if_enabled(): pprint.pprint(val) sys.displayhook = displayhook - sock = eventlet.listen(('localhost', CONF.backdoor_port)) + sock = _listen('localhost', start_port, end_port, eventlet.listen) + + # In the case of backdoor port being zero, a port number is assigned by + # listen(). In any case, pull the port number out here. port = sock.getsockname()[1] + LOG.info( + _LI('Eventlet backdoor listening on %(port)s for process %(pid)d') % + {'port': port, 'pid': os.getpid()} + ) eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock, locals=backdoor_locals) return port diff --git a/manila/openstack/common/exception.py b/manila/openstack/common/exception.py deleted file mode 100644 index 8ea271096a..0000000000 --- a/manila/openstack/common/exception.py +++ /dev/null @@ -1,142 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Exceptions common to OpenStack projects -""" - -import logging - -from manila.openstack.common.gettextutils import _ - -_FATAL_EXCEPTION_FORMAT_ERRORS = False - - -class Error(Exception): - def __init__(self, message=None): - super(Error, self).__init__(message) - - -class ApiError(Error): - def __init__(self, message='Unknown', code='Unknown'): - self.message = message - self.code = code - super(ApiError, self).__init__('%s: %s' % (code, message)) - - -class NotFound(Error): - pass - - -class UnknownScheme(Error): - - msg = "Unknown scheme '%s' found in URI" - - def __init__(self, scheme): - msg = self.__class__.msg % scheme - super(UnknownScheme, self).__init__(msg) - - -class BadStoreUri(Error): - - msg = "The Store URI %s was malformed. Reason: %s" - - def __init__(self, uri, reason): - msg = self.__class__.msg % (uri, reason) - super(BadStoreUri, self).__init__(msg) - - -class Duplicate(Error): - pass - - -class NotAuthorized(Error): - pass - - -class NotEmpty(Error): - pass - - -class Invalid(Error): - pass - - -class BadInputError(Exception): - """Error resulting from a client sending bad input to a server""" - pass - - -class MissingArgumentError(Error): - pass - - -class DatabaseMigrationError(Error): - pass - - -class ClientConnectionError(Exception): - """Error resulting from a client connecting to a server""" - pass - - -def wrap_exception(f): - def _wrap(*args, **kw): - try: - return f(*args, **kw) - except Exception, e: - if not isinstance(e, Error): - #exc_type, exc_value, exc_traceback = sys.exc_info() - logging.exception(_('Uncaught exception')) - #logging.error(traceback.extract_stack(exc_traceback)) - raise Error(str(e)) - raise - _wrap.func_name = f.func_name - return _wrap - - -class OpenstackException(Exception): - """ - Base Exception - - To correctly use this class, inherit from it and define - a 'message' property. That message will get printf'd - with the keyword arguments provided to the constructor. - """ - message = "An unknown exception occurred" - - def __init__(self, **kwargs): - try: - self._error_string = self.message % kwargs - - except Exception as e: - if _FATAL_EXCEPTION_FORMAT_ERRORS: - raise e - else: - # at least get the core message out if something happened - self._error_string = self.message - - def __str__(self): - return self._error_string - - -class MalformedRequestBody(OpenstackException): - message = "Malformed message body: %(reason)s" - - -class InvalidContentType(OpenstackException): - message = "Invalid content type %(content_type)s" diff --git a/manila/openstack/common/excutils.py b/manila/openstack/common/excutils.py index 9c4e8b9060..ce035ae1b7 100644 --- a/manila/openstack/common/excutils.py +++ b/manila/openstack/common/excutils.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 OpenStack Foundation. # Copyright 2012, Red Hat, Inc. # @@ -19,16 +17,17 @@ Exception related utilities. """ -import contextlib import logging import sys +import time import traceback -from manila.openstack.common.gettextutils import _ +import six + +from manila.openstack.common.gettextutils import _LE -@contextlib.contextmanager -def save_and_reraise_exception(): +class save_and_reraise_exception(object): """Save current exception, run some code and then re-raise. In some cases the exception context can be cleared, resulting in None @@ -40,12 +39,75 @@ def save_and_reraise_exception(): To work around this, we save the exception state, run handler code, and then re-raise the original exception. If another exception occurs, the saved exception is logged and the new exception is re-raised. + + In some cases the caller may not want to re-raise the exception, and + for those circumstances this context provides a reraise flag that + can be used to suppress the exception. For example:: + + except Exception: + with save_and_reraise_exception() as ctxt: + decide_if_need_reraise() + if not should_be_reraised: + ctxt.reraise = False + + If another exception occurs and reraise flag is False, + the saved exception will not be logged. + + If the caller wants to raise new exception during exception handling + he/she sets reraise to False initially with an ability to set it back to + True if needed:: + + except Exception: + with save_and_reraise_exception(reraise=False) as ctxt: + [if statements to determine whether to raise a new exception] + # Not raising a new exception, so reraise + ctxt.reraise = True """ - type_, value, tb = sys.exc_info() - try: - yield - except Exception: - logging.error(_('Original exception being dropped: %s'), - traceback.format_exception(type_, value, tb)) - raise - raise type_, value, tb + def __init__(self, reraise=True): + self.reraise = reraise + + def __enter__(self): + self.type_, self.value, self.tb, = sys.exc_info() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if exc_type is not None: + if self.reraise: + logging.error(_LE('Original exception being dropped: %s'), + traceback.format_exception(self.type_, + self.value, + self.tb)) + return False + if self.reraise: + six.reraise(self.type_, self.value, self.tb) + + +def forever_retry_uncaught_exceptions(infunc): + def inner_func(*args, **kwargs): + last_log_time = 0 + last_exc_message = None + exc_count = 0 + while True: + try: + return infunc(*args, **kwargs) + except Exception as exc: + this_exc_message = six.u(str(exc)) + if this_exc_message == last_exc_message: + exc_count += 1 + else: + exc_count = 1 + # Do not log any more frequently than once a minute unless + # the exception message changes + cur_time = int(time.time()) + if (cur_time - last_log_time > 60 or + this_exc_message != last_exc_message): + logging.exception( + _LE('Unexpected exception occurred %d time(s)... ' + 'retrying.') % exc_count) + last_log_time = cur_time + last_exc_message = this_exc_message + exc_count = 0 + # This should be a very rare event. In case it isn't, do + # a sleep. + time.sleep(1) + return inner_func diff --git a/manila/openstack/common/fileutils.py b/manila/openstack/common/fileutils.py index b988ad03d5..34d7efbee3 100644 --- a/manila/openstack/common/fileutils.py +++ b/manila/openstack/common/fileutils.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # @@ -15,9 +13,17 @@ # License for the specific language governing permissions and limitations # under the License. - +import contextlib import errno import os +import tempfile + +from manila.openstack.common import excutils +from manila.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + +_FILE_CACHE = {} def ensure_tree(path): @@ -33,3 +39,97 @@ def ensure_tree(path): raise else: raise + + +def read_cached_file(filename, force_reload=False): + """Read from a file if it has been modified. + + :param force_reload: Whether to reload the file. + :returns: A tuple with a boolean specifying if the data is fresh + or not. + """ + global _FILE_CACHE + + if force_reload and filename in _FILE_CACHE: + del _FILE_CACHE[filename] + + reloaded = False + mtime = os.path.getmtime(filename) + cache_info = _FILE_CACHE.setdefault(filename, {}) + + if not cache_info or mtime > cache_info.get('mtime', 0): + LOG.debug("Reloading cached file %s" % filename) + with open(filename) as fap: + cache_info['data'] = fap.read() + cache_info['mtime'] = mtime + reloaded = True + return (reloaded, cache_info['data']) + + +def delete_if_exists(path, remove=os.unlink): + """Delete a file, but ignore file not found error. + + :param path: File to delete + :param remove: Optional function to remove passed path + """ + + try: + remove(path) + except OSError as e: + if e.errno != errno.ENOENT: + raise + + +@contextlib.contextmanager +def remove_path_on_error(path, remove=delete_if_exists): + """Protect code that wants to operate on PATH atomically. + Any exception will cause PATH to be removed. + + :param path: File to work with + :param remove: Optional function to remove passed path + """ + + try: + yield + except Exception: + with excutils.save_and_reraise_exception(): + remove(path) + + +def file_open(*args, **kwargs): + """Open file + + see built-in file() documentation for more details + + Note: The reason this is kept in a separate module is to easily + be able to provide a stub module that doesn't alter system + state at all (for unit tests) + """ + return open(*args, **kwargs) + + +def write_to_tempfile(content, path=None, suffix='', prefix='tmp'): + """Create temporary file or use existing file. + + This util is needed for creating temporary file with + specified content, suffix and prefix. If path is not None, + it will be used for writing content. If the path doesn't + exist it'll be created. + + :param content: content for temporary file. + :param path: same as parameter 'dir' for mkstemp + :param suffix: same as parameter 'suffix' for mkstemp + :param prefix: same as parameter 'prefix' for mkstemp + + For example: it can be used in database tests for creating + configuration files. + """ + if path: + ensure_tree(path) + + (fd, path) = tempfile.mkstemp(suffix=suffix, dir=path, prefix=prefix) + try: + os.write(fd, content) + finally: + os.close(fd) + return path diff --git a/manila/openstack/common/gettextutils.py b/manila/openstack/common/gettextutils.py index eddf7b8902..9a6be15574 100644 --- a/manila/openstack/common/gettextutils.py +++ b/manila/openstack/common/gettextutils.py @@ -1,6 +1,5 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2012 Red Hat, Inc. +# Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -23,18 +22,144 @@ Usual usage in an openstack.common module: from manila.openstack.common.gettextutils import _ """ +import copy +import functools import gettext +import locale +from logging import handlers import os -_localedir = os.environ.get('manila'.upper() + '_LOCALEDIR') -_t = gettext.translation('manila', localedir=_localedir, fallback=True) +from babel import localedata +import six + +_AVAILABLE_LANGUAGES = {} + +# FIXME(dhellmann): Remove this when moving to oslo.i18n. +USE_LAZY = False -def _(msg): - return _t.ugettext(msg) +class TranslatorFactory(object): + """Create translator functions + """ + + def __init__(self, domain, lazy=False, localedir=None): + """Establish a set of translation functions for the domain. + + :param domain: Name of translation domain, + specifying a message catalog. + :type domain: str + :param lazy: Delays translation until a message is emitted. + Defaults to False. + :type lazy: Boolean + :param localedir: Directory with translation catalogs. + :type localedir: str + """ + self.domain = domain + self.lazy = lazy + if localedir is None: + localedir = os.environ.get(domain.upper() + '_LOCALEDIR') + self.localedir = localedir + + def _make_translation_func(self, domain=None): + """Return a new translation function ready for use. + + Takes into account whether or not lazy translation is being + done. + + The domain can be specified to override the default from the + factory, but the localedir from the factory is always used + because we assume the log-level translation catalogs are + installed in the same directory as the main application + catalog. + + """ + if domain is None: + domain = self.domain + if self.lazy: + return functools.partial(Message, domain=domain) + t = gettext.translation( + domain, + localedir=self.localedir, + fallback=True, + ) + if six.PY3: + return t.gettext + return t.ugettext + + @property + def primary(self): + "The default translation function." + return self._make_translation_func() + + def _make_log_translation_func(self, level): + return self._make_translation_func(self.domain + '-log-' + level) + + @property + def log_info(self): + "Translate info-level log messages." + return self._make_log_translation_func('info') + + @property + def log_warning(self): + "Translate warning-level log messages." + return self._make_log_translation_func('warning') + + @property + def log_error(self): + "Translate error-level log messages." + return self._make_log_translation_func('error') + + @property + def log_critical(self): + "Translate critical-level log messages." + return self._make_log_translation_func('critical') -def install(domain): +# NOTE(dhellmann): When this module moves out of the incubator into +# oslo.i18n, these global variables can be moved to an integration +# module within each application. + +# Create the global translation functions. +_translators = TranslatorFactory('manila') + +# The primary translation function using the well-known name "_" +_ = _translators.primary + +# Translators for log levels. +# +# The abbreviated names are meant to reflect the usual use of a short +# name like '_'. The "L" is for "log" and the other letter comes from +# the level. +_LI = _translators.log_info +_LW = _translators.log_warning +_LE = _translators.log_error +_LC = _translators.log_critical + +# NOTE(dhellmann): End of globals that will move to the application's +# integration module. + + +def enable_lazy(): + """Convenience function for configuring _() to use lazy gettext + + Call this at the start of execution to enable the gettextutils._ + function to use lazy gettext functionality. This is useful if + your project is importing _ directly instead of using the + gettextutils.install() way of importing the _ function. + """ + # FIXME(dhellmann): This function will be removed in oslo.i18n, + # because the TranslatorFactory makes it superfluous. + global _, _LI, _LW, _LE, _LC, USE_LAZY + tf = TranslatorFactory('manila', lazy=True) + _ = tf.primary + _LI = tf.log_info + _LW = tf.log_warning + _LE = tf.log_error + _LC = tf.log_critical + USE_LAZY = True + + +def install(domain, lazy=False): """Install a _() function using the given translation domain. Given a translation domain, install a _() function using gettext's @@ -44,7 +169,330 @@ def install(domain): overriding the default localedir (e.g. /usr/share/locale) using a translation-domain-specific environment variable (e.g. NOVA_LOCALEDIR). + + :param domain: the translation domain + :param lazy: indicates whether or not to install the lazy _() function. + The lazy _() introduces a way to do deferred translation + of messages by installing a _ that builds Message objects, + instead of strings, which can then be lazily translated into + any available locale. """ - gettext.install(domain, - localedir=os.environ.get(domain.upper() + '_LOCALEDIR'), - unicode=True) + if lazy: + from six import moves + tf = TranslatorFactory(domain, lazy=True) + moves.builtins.__dict__['_'] = tf.primary + else: + localedir = '%s_LOCALEDIR' % domain.upper() + if six.PY3: + gettext.install(domain, + localedir=os.environ.get(localedir)) + else: + gettext.install(domain, + localedir=os.environ.get(localedir), + unicode=True) + + +class Message(six.text_type): + """A Message object is a unicode object that can be translated. + + Translation of Message is done explicitly using the translate() method. + For all non-translation intents and purposes, a Message is simply unicode, + and can be treated as such. + """ + + def __new__(cls, msgid, msgtext=None, params=None, + domain='manila', *args): + """Create a new Message object. + + In order for translation to work gettext requires a message ID, this + msgid will be used as the base unicode text. It is also possible + for the msgid and the base unicode text to be different by passing + the msgtext parameter. + """ + # If the base msgtext is not given, we use the default translation + # of the msgid (which is in English) just in case the system locale is + # not English, so that the base text will be in that locale by default. + if not msgtext: + msgtext = Message._translate_msgid(msgid, domain) + # We want to initialize the parent unicode with the actual object that + # would have been plain unicode if 'Message' was not enabled. + msg = super(Message, cls).__new__(cls, msgtext) + msg.msgid = msgid + msg.domain = domain + msg.params = params + return msg + + def translate(self, desired_locale=None): + """Translate this message to the desired locale. + + :param desired_locale: The desired locale to translate the message to, + if no locale is provided the message will be + translated to the system's default locale. + + :returns: the translated message in unicode + """ + + translated_message = Message._translate_msgid(self.msgid, + self.domain, + desired_locale) + if self.params is None: + # No need for more translation + return translated_message + + # This Message object may have been formatted with one or more + # Message objects as substitution arguments, given either as a single + # argument, part of a tuple, or as one or more values in a dictionary. + # When translating this Message we need to translate those Messages too + translated_params = _translate_args(self.params, desired_locale) + + translated_message = translated_message % translated_params + + return translated_message + + @staticmethod + def _translate_msgid(msgid, domain, desired_locale=None): + if not desired_locale: + system_locale = locale.getdefaultlocale() + # If the system locale is not available to the runtime use English + if not system_locale[0]: + desired_locale = 'en_US' + else: + desired_locale = system_locale[0] + + locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR') + lang = gettext.translation(domain, + localedir=locale_dir, + languages=[desired_locale], + fallback=True) + if six.PY3: + translator = lang.gettext + else: + translator = lang.ugettext + + translated_message = translator(msgid) + return translated_message + + def __mod__(self, other): + # When we mod a Message we want the actual operation to be performed + # by the parent class (i.e. unicode()), the only thing we do here is + # save the original msgid and the parameters in case of a translation + params = self._sanitize_mod_params(other) + unicode_mod = super(Message, self).__mod__(params) + modded = Message(self.msgid, + msgtext=unicode_mod, + params=params, + domain=self.domain) + return modded + + def _sanitize_mod_params(self, other): + """Sanitize the object being modded with this Message. + + - Add support for modding 'None' so translation supports it + - Trim the modded object, which can be a large dictionary, to only + those keys that would actually be used in a translation + - Snapshot the object being modded, in case the message is + translated, it will be used as it was when the Message was created + """ + if other is None: + params = (other,) + elif isinstance(other, dict): + # Merge the dictionaries + # Copy each item in case one does not support deep copy. + params = {} + if isinstance(self.params, dict): + for key, val in self.params.items(): + params[key] = self._copy_param(val) + for key, val in other.items(): + params[key] = self._copy_param(val) + else: + params = self._copy_param(other) + return params + + def _copy_param(self, param): + try: + return copy.deepcopy(param) + except Exception: + # Fallback to casting to unicode this will handle the + # python code-like objects that can't be deep-copied + return six.text_type(param) + + def __add__(self, other): + msg = _('Message objects do not support addition.') + raise TypeError(msg) + + def __radd__(self, other): + return self.__add__(other) + + if six.PY2: + def __str__(self): + # NOTE(luisg): Logging in python 2.6 tries to str() log records, + # and it expects specifically a UnicodeError in order to proceed. + msg = _('Message objects do not support str() because they may ' + 'contain non-ascii characters. ' + 'Please use unicode() or translate() instead.') + raise UnicodeError(msg) + + +def get_available_languages(domain): + """Lists the available languages for the given translation domain. + + :param domain: the domain to get languages for + """ + if domain in _AVAILABLE_LANGUAGES: + return copy.copy(_AVAILABLE_LANGUAGES[domain]) + + localedir = '%s_LOCALEDIR' % domain.upper() + find = lambda x: gettext.find(domain, + localedir=os.environ.get(localedir), + languages=[x]) + + # NOTE(mrodden): en_US should always be available (and first in case + # order matters) since our in-line message strings are en_US + language_list = ['en_US'] + # NOTE(luisg): Babel <1.0 used a function called list(), which was + # renamed to locale_identifiers() in >=1.0, the requirements master list + # requires >=0.9.6, uncapped, so defensively work with both. We can remove + # this check when the master list updates to >=1.0, and update all projects + list_identifiers = (getattr(localedata, 'list', None) or + getattr(localedata, 'locale_identifiers')) + locale_identifiers = list_identifiers() + + for i in locale_identifiers: + if find(i) is not None: + language_list.append(i) + + # NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported + # locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they + # are perfectly legitimate locales: + # https://github.com/mitsuhiko/babel/issues/37 + # In Babel 1.3 they fixed the bug and they support these locales, but + # they are still not explicitly "listed" by locale_identifiers(). + # That is why we add the locales here explicitly if necessary so that + # they are listed as supported. + aliases = {'zh': 'zh_CN', + 'zh_Hant_HK': 'zh_HK', + 'zh_Hant': 'zh_TW', + 'fil': 'tl_PH'} + for (locale, alias) in six.iteritems(aliases): + if locale in language_list and alias not in language_list: + language_list.append(alias) + + _AVAILABLE_LANGUAGES[domain] = language_list + return copy.copy(language_list) + + +def translate(obj, desired_locale=None): + """Gets the translated unicode representation of the given object. + + If the object is not translatable it is returned as-is. + If the locale is None the object is translated to the system locale. + + :param obj: the object to translate + :param desired_locale: the locale to translate the message to, if None the + default system locale will be used + :returns: the translated object in unicode, or the original object if + it could not be translated + """ + message = obj + if not isinstance(message, Message): + # If the object to translate is not already translatable, + # let's first get its unicode representation + message = six.text_type(obj) + if isinstance(message, Message): + # Even after unicoding() we still need to check if we are + # running with translatable unicode before translating + return message.translate(desired_locale) + return obj + + +def _translate_args(args, desired_locale=None): + """Translates all the translatable elements of the given arguments object. + + This method is used for translating the translatable values in method + arguments which include values of tuples or dictionaries. + If the object is not a tuple or a dictionary the object itself is + translated if it is translatable. + + If the locale is None the object is translated to the system locale. + + :param args: the args to translate + :param desired_locale: the locale to translate the args to, if None the + default system locale will be used + :returns: a new args object with the translated contents of the original + """ + if isinstance(args, tuple): + return tuple(translate(v, desired_locale) for v in args) + if isinstance(args, dict): + translated_dict = {} + for (k, v) in six.iteritems(args): + translated_v = translate(v, desired_locale) + translated_dict[k] = translated_v + return translated_dict + return translate(args, desired_locale) + + +class TranslationHandler(handlers.MemoryHandler): + """Handler that translates records before logging them. + + The TranslationHandler takes a locale and a target logging.Handler object + to forward LogRecord objects to after translating them. This handler + depends on Message objects being logged, instead of regular strings. + + The handler can be configured declaratively in the logging.conf as follows: + + [handlers] + keys = translatedlog, translator + + [handler_translatedlog] + class = handlers.WatchedFileHandler + args = ('/var/log/api-localized.log',) + formatter = context + + [handler_translator] + class = openstack.common.log.TranslationHandler + target = translatedlog + args = ('zh_CN',) + + If the specified locale is not available in the system, the handler will + log in the default locale. + """ + + def __init__(self, locale=None, target=None): + """Initialize a TranslationHandler + + :param locale: locale to use for translating messages + :param target: logging.Handler object to forward + LogRecord objects to after translation + """ + # NOTE(luisg): In order to allow this handler to be a wrapper for + # other handlers, such as a FileHandler, and still be able to + # configure it using logging.conf, this handler has to extend + # MemoryHandler because only the MemoryHandlers' logging.conf + # parsing is implemented such that it accepts a target handler. + handlers.MemoryHandler.__init__(self, capacity=0, target=target) + self.locale = locale + + def setFormatter(self, fmt): + self.target.setFormatter(fmt) + + def emit(self, record): + # We save the message from the original record to restore it + # after translation, so other handlers are not affected by this + original_msg = record.msg + original_args = record.args + + try: + self._translate_and_log_record(record) + finally: + record.msg = original_msg + record.args = original_args + + def _translate_and_log_record(self, record): + record.msg = translate(record.msg, self.locale) + + # In addition to translating the message, we also need to translate + # arguments that were passed to the log method that were not part + # of the main message e.g., log.info(_('Some message %s'), this_one)) + record.args = _translate_args(record.args, self.locale) + + self.target.emit(record) diff --git a/manila/openstack/common/importutils.py b/manila/openstack/common/importutils.py index 3bd277f47e..1cc1dd541a 100644 --- a/manila/openstack/common/importutils.py +++ b/manila/openstack/common/importutils.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # @@ -24,12 +22,12 @@ import traceback def import_class(import_str): - """Returns a class from a string including module and class""" + """Returns a class from a string including module and class.""" mod_str, _sep, class_str = import_str.rpartition('.') + __import__(mod_str) try: - __import__(mod_str) return getattr(sys.modules[mod_str], class_str) - except (ValueError, AttributeError): + except AttributeError: raise ImportError('Class %s cannot be found (%s)' % (class_str, traceback.format_exception(*sys.exc_info()))) @@ -41,8 +39,9 @@ def import_object(import_str, *args, **kwargs): def import_object_ns(name_space, import_str, *args, **kwargs): - """ - Import a class and return an instance of it, first by trying + """Tries to import object from default namespace. + + Imports a class and return an instance of it, first by trying to find the class in a default namespace, then failing back to a full path if not found in the default namespace. """ @@ -59,6 +58,13 @@ def import_module(import_str): return sys.modules[import_str] +def import_versioned_module(version, submodule=None): + module = 'manila.v%s' % version + if submodule: + module = '.'.join((module, submodule)) + return import_module(module) + + def try_import(import_str, default=None): """Try to import a module and if it fails return default.""" try: diff --git a/manila/openstack/common/jsonutils.py b/manila/openstack/common/jsonutils.py index ad45f715c9..1c7ea15ae0 100644 --- a/manila/openstack/common/jsonutils.py +++ b/manila/openstack/common/jsonutils.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara @@ -33,16 +31,32 @@ This module provides a few things: ''' +import codecs import datetime import functools import inspect import itertools -import json -import types -import xmlrpclib +import sys +if sys.version_info < (2, 7): + # On Python <= 2.6, json module is not C boosted, so try to use + # simplejson module if available + try: + import simplejson as json + except ImportError: + import json +else: + import json + +import six +import six.moves.xmlrpc_client as xmlrpclib + +from manila.openstack.common import gettextutils +from manila.openstack.common import importutils +from manila.openstack.common import strutils from manila.openstack.common import timeutils +netaddr = importutils.try_import("netaddr") _nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod, inspect.isfunction, inspect.isgeneratorfunction, @@ -50,7 +64,8 @@ _nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod, inspect.iscode, inspect.isbuiltin, inspect.isroutine, inspect.isabstract] -_simple_types = (types.NoneType, int, basestring, bool, float, long) +_simple_types = (six.string_types + six.integer_types + + (type(None), bool, float)) def to_primitive(value, convert_instances=False, convert_datetime=True, @@ -93,7 +108,7 @@ def to_primitive(value, convert_instances=False, convert_datetime=True, # value of itertools.count doesn't get caught by nasty_type_tests # and results in infinite loop when list(value) is called. if type(value) == itertools.count: - return unicode(value) + return six.text_type(value) # FIXME(vish): Workaround for LP bug 852095. Without this workaround, # tests that raise an exception in a mocked method that @@ -115,7 +130,7 @@ def to_primitive(value, convert_instances=False, convert_datetime=True, level=level, max_depth=max_depth) if isinstance(value, dict): - return dict((k, recursive(v)) for k, v in value.iteritems()) + return dict((k, recursive(v)) for k, v in six.iteritems(value)) elif isinstance(value, (list, tuple)): return [recursive(lv) for lv in value] @@ -127,6 +142,8 @@ def to_primitive(value, convert_instances=False, convert_datetime=True, if convert_datetime and isinstance(value, datetime.datetime): return timeutils.strtime(value) + elif isinstance(value, gettextutils.Message): + return value.data elif hasattr(value, 'iteritems'): return recursive(dict(value.iteritems()), level=level + 1) elif hasattr(value, '__iter__'): @@ -135,26 +152,28 @@ def to_primitive(value, convert_instances=False, convert_datetime=True, # Likely an instance of something. Watch for cycles. # Ignore class member vars. return recursive(value.__dict__, level=level + 1) + elif netaddr and isinstance(value, netaddr.IPAddress): + return six.text_type(value) else: if any(test(value) for test in _nasty_type_tests): - return unicode(value) + return six.text_type(value) return value except TypeError: # Class objects are tricky since they may define something like # __iter__ defined but it isn't callable as list(). - return unicode(value) + return six.text_type(value) def dumps(value, default=to_primitive, **kwargs): return json.dumps(value, default=default, **kwargs) -def loads(s): - return json.loads(s) +def loads(s, encoding='utf-8'): + return json.loads(strutils.safe_decode(s, encoding)) -def load(s): - return json.load(s) +def load(fp, encoding='utf-8'): + return json.load(codecs.getreader(encoding)(fp)) try: diff --git a/manila/openstack/common/local.py b/manila/openstack/common/local.py index f1bfc824bf..0819d5b97c 100644 --- a/manila/openstack/common/local.py +++ b/manila/openstack/common/local.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # @@ -15,16 +13,15 @@ # License for the specific language governing permissions and limitations # under the License. -"""Greenthread local storage of variables using weak references""" +"""Local storage of variables using weak references""" +import threading import weakref -from eventlet import corolocal - -class WeakLocal(corolocal.local): +class WeakLocal(threading.local): def __getattribute__(self, attr): - rval = corolocal.local.__getattribute__(self, attr) + rval = super(WeakLocal, self).__getattribute__(attr) if rval: # NOTE(mikal): this bit is confusing. What is stored is a weak # reference, not the value itself. We therefore need to lookup @@ -34,7 +31,7 @@ class WeakLocal(corolocal.local): def __setattr__(self, attr, value): value = weakref.ref(value) - return corolocal.local.__setattr__(self, attr, value) + return super(WeakLocal, self).__setattr__(attr, value) # NOTE(mikal): the name "store" should be deprecated in the future @@ -45,4 +42,4 @@ store = WeakLocal() # "strong" store will hold a reference to the object so that it never falls out # of scope. weak_store = WeakLocal() -strong_store = corolocal.local +strong_store = threading.local() diff --git a/manila/openstack/common/lockutils.py b/manila/openstack/common/lockutils.py index 55b509c1e2..162930d4a5 100644 --- a/manila/openstack/common/lockutils.py +++ b/manila/openstack/common/lockutils.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # @@ -15,9 +13,9 @@ # License for the specific language governing permissions and limitations # under the License. - import contextlib import errno +import fcntl import functools import os import shutil @@ -31,8 +29,7 @@ import weakref from oslo.config import cfg from manila.openstack.common import fileutils -from manila.openstack.common.gettextutils import _ # noqa -from manila.openstack.common import local +from manila.openstack.common.gettextutils import _, _LE, _LI from manila.openstack.common import log as logging @@ -41,10 +38,10 @@ LOG = logging.getLogger(__name__) util_opts = [ cfg.BoolOpt('disable_process_locking', default=False, - help='Whether to disable inter-process locks'), + help='Enables or disables inter-process locks.'), cfg.StrOpt('lock_path', default=os.environ.get("MANILA_LOCK_PATH"), - help=('Directory to use for lock files.')) + help='Directory to use for lock files.') ] @@ -56,7 +53,7 @@ def set_defaults(lock_path): cfg.set_defaults(util_opts, lock_path=lock_path) -class _InterProcessLock(object): +class _FileLock(object): """Lock implementation which allows multiple locks, working around issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does not require any cleanup. Since the lock is always held on a file @@ -78,7 +75,13 @@ class _InterProcessLock(object): self.lockfile = None self.fname = name - def __enter__(self): + def acquire(self): + basedir = os.path.dirname(self.fname) + + if not os.path.exists(basedir): + fileutils.ensure_tree(basedir) + LOG.info(_LI('Created lock path: %s'), basedir) + self.lockfile = open(self.fname, 'w') while True: @@ -88,23 +91,41 @@ class _InterProcessLock(object): # Also upon reading the MSDN docs for locking(), it seems # to have a laughable 10 attempts "blocking" mechanism. self.trylock() - return self + LOG.debug('Got file lock "%s"', self.fname) + return True except IOError as e: if e.errno in (errno.EACCES, errno.EAGAIN): # external locks synchronise things like iptables # updates - give it some time to prevent busy spinning time.sleep(0.01) else: - raise + raise threading.ThreadError(_("Unable to acquire lock on" + " `%(filename)s` due to" + " %(exception)s") % + { + 'filename': self.fname, + 'exception': e, + }) - def __exit__(self, exc_type, exc_val, exc_tb): + def __enter__(self): + self.acquire() + return self + + def release(self): try: self.unlock() self.lockfile.close() + LOG.debug('Released file lock "%s"', self.fname) except IOError: - LOG.exception(_("Could not release the acquired lock `%s`"), + LOG.exception(_LE("Could not release the acquired lock `%s`"), self.fname) + def __exit__(self, exc_type, exc_val, exc_tb): + self.release() + + def exists(self): + return os.path.exists(self.fname) + def trylock(self): raise NotImplementedError() @@ -112,7 +133,7 @@ class _InterProcessLock(object): raise NotImplementedError() -class _WindowsLock(_InterProcessLock): +class _WindowsLock(_FileLock): def trylock(self): msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1) @@ -120,7 +141,7 @@ class _WindowsLock(_InterProcessLock): msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1) -class _PosixLock(_InterProcessLock): +class _FcntlLock(_FileLock): def trylock(self): fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB) @@ -128,17 +149,120 @@ class _PosixLock(_InterProcessLock): fcntl.lockf(self.lockfile, fcntl.LOCK_UN) +class _PosixLock(object): + def __init__(self, name): + # Hash the name because it's not valid to have POSIX semaphore + # names with things like / in them. Then use base64 to encode + # the digest() instead taking the hexdigest() because the + # result is shorter and most systems can't have shm sempahore + # names longer than 31 characters. + h = hashlib.sha1() + h.update(name.encode('ascii')) + self.name = str((b'/' + base64.urlsafe_b64encode( + h.digest())).decode('ascii')) + + def acquire(self, timeout=None): + self.semaphore = posix_ipc.Semaphore(self.name, + flags=posix_ipc.O_CREAT, + initial_value=1) + self.semaphore.acquire(timeout) + return self + + def __enter__(self): + self.acquire() + return self + + def release(self): + self.semaphore.release() + self.semaphore.close() + + def __exit__(self, exc_type, exc_val, exc_tb): + self.release() + + def exists(self): + try: + semaphore = posix_ipc.Semaphore(self.name) + except posix_ipc.ExistentialError: + return False + else: + semaphore.close() + return True + + if os.name == 'nt': import msvcrt InterProcessLock = _WindowsLock + FileLock = _WindowsLock else: - import fcntl + import base64 + import hashlib + import posix_ipc InterProcessLock = _PosixLock + FileLock = _FcntlLock _semaphores = weakref.WeakValueDictionary() _semaphores_lock = threading.Lock() +def _get_lock_path(name, lock_file_prefix, lock_path=None): + # NOTE(mikal): the lock name cannot contain directory + # separators + name = name.replace(os.sep, '_') + if lock_file_prefix: + sep = '' if lock_file_prefix.endswith('-') else '-' + name = '%s%s%s' % (lock_file_prefix, sep, name) + + local_lock_path = lock_path or CONF.lock_path + + if not local_lock_path: + # NOTE(bnemec): Create a fake lock path for posix locks so we don't + # unnecessarily raise the RequiredOptError below. + if InterProcessLock is not _PosixLock: + raise cfg.RequiredOptError('lock_path') + local_lock_path = 'posixlock:/' + + return os.path.join(local_lock_path, name) + + +def external_lock(name, lock_file_prefix=None, lock_path=None): + LOG.debug('Attempting to grab external lock "%(lock)s"', + {'lock': name}) + + lock_file_path = _get_lock_path(name, lock_file_prefix, lock_path) + + # NOTE(bnemec): If an explicit lock_path was passed to us then it + # means the caller is relying on file-based locking behavior, so + # we can't use posix locks for those calls. + if lock_path: + return FileLock(lock_file_path) + return InterProcessLock(lock_file_path) + + +def remove_external_lock_file(name, lock_file_prefix=None): + """Remove a external lock file when it's not used anymore + This will be helpful when we have a lot of lock files + """ + with internal_lock(name): + lock_file_path = _get_lock_path(name, lock_file_prefix) + try: + os.remove(lock_file_path) + except OSError: + LOG.info(_LI('Failed to remove file %(file)s'), + {'file': lock_file_path}) + + +def internal_lock(name): + with _semaphores_lock: + try: + sem = _semaphores[name] + except KeyError: + sem = threading.Semaphore() + _semaphores[name] = sem + + LOG.debug('Got semaphore "%(lock)s"', {'lock': name}) + return sem + + @contextlib.contextmanager def lock(name, lock_file_prefix=None, external=False, lock_path=None): """Context based lock @@ -148,73 +272,22 @@ def lock(name, lock_file_prefix=None, external=False, lock_path=None): True, in which case, it'll yield an InterProcessLock instance. :param lock_file_prefix: The lock_file_prefix argument is used to provide - lock files on disk with a meaningful prefix. + lock files on disk with a meaningful prefix. :param external: The external keyword argument denotes whether this lock - should work across multiple processes. This means that if two different - workers both run a a method decorated with @synchronized('mylock', - external=True), only one of them will execute at a time. - - :param lock_path: The lock_path keyword argument is used to specify a - special location for external lock files to live. If nothing is set, then - CONF.lock_path is used as a default. + should work across multiple processes. This means that if two different + workers both run a method decorated with @synchronized('mylock', + external=True), only one of them will execute at a time. """ - with _semaphores_lock: - try: - sem = _semaphores[name] - except KeyError: - sem = threading.Semaphore() - _semaphores[name] = sem - - with sem: - LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name}) - - # NOTE(mikal): I know this looks odd - if not hasattr(local.strong_store, 'locks_held'): - local.strong_store.locks_held = [] - local.strong_store.locks_held.append(name) - - try: - if external and not CONF.disable_process_locking: - LOG.debug(_('Attempting to grab file lock "%(lock)s"'), - {'lock': name}) - - # We need a copy of lock_path because it is non-local - local_lock_path = lock_path or CONF.lock_path - if not local_lock_path: - raise cfg.RequiredOptError('lock_path') - - if not os.path.exists(local_lock_path): - fileutils.ensure_tree(local_lock_path) - LOG.info(_('Created lock path: %s'), local_lock_path) - - def add_prefix(name, prefix): - if not prefix: - return name - sep = '' if prefix.endswith('-') else '-' - return '%s%s%s' % (prefix, sep, name) - - # NOTE(mikal): the lock name cannot contain directory - # separators - lock_file_name = add_prefix(name.replace(os.sep, '_'), - lock_file_prefix) - - lock_file_path = os.path.join(local_lock_path, lock_file_name) - - try: - lock = InterProcessLock(lock_file_path) - with lock as lock: - LOG.debug(_('Got file lock "%(lock)s" at %(path)s'), - {'lock': name, 'path': lock_file_path}) - yield lock - finally: - LOG.debug(_('Released file lock "%(lock)s" at %(path)s'), - {'lock': name, 'path': lock_file_path}) - else: - yield sem - - finally: - local.strong_store.locks_held.remove(name) + int_lock = internal_lock(name) + with int_lock: + if external and not CONF.disable_process_locking: + ext_lock = external_lock(name, lock_file_prefix, lock_path) + with ext_lock: + yield ext_lock + else: + yield int_lock + LOG.debug('Released semaphore "%(lock)s"', {'lock': name}) def synchronized(name, lock_file_prefix=None, external=False, lock_path=None): @@ -246,11 +319,11 @@ def synchronized(name, lock_file_prefix=None, external=False, lock_path=None): def inner(*args, **kwargs): try: with lock(name, lock_file_prefix, external, lock_path): - LOG.debug(_('Got semaphore / lock "%(function)s"'), + LOG.debug('Got semaphore / lock "%(function)s"', {'function': f.__name__}) return f(*args, **kwargs) finally: - LOG.debug(_('Semaphore / lock released "%(function)s"'), + LOG.debug('Semaphore / lock released "%(function)s"', {'function': f.__name__}) return inner return wrap diff --git a/manila/openstack/common/log.py b/manila/openstack/common/log.py index 41fb9bb17a..23038bae67 100644 --- a/manila/openstack/common/log.py +++ b/manila/openstack/common/log.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 OpenStack Foundation. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. @@ -17,7 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. -"""Openstack logging handler. +"""OpenStack logging handler. This module adds to logging functionality by adding the option to specify a context object when calling the various log methods. If the context object @@ -29,29 +27,49 @@ It also allows setting of formatting information through conf. """ -import ConfigParser -import cStringIO import inspect import itertools import logging import logging.config import logging.handlers import os -import stat +import re import sys import traceback from oslo.config import cfg +import six +from six import moves from manila.openstack.common.gettextutils import _ +from manila.openstack.common import importutils from manila.openstack.common import jsonutils from manila.openstack.common import local -from manila.openstack.common import notifier -_DEFAULT_LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s" _DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" +_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password'] + +# NOTE(ldbragst): Let's build a list of regex objects using the list of +# _SANITIZE_KEYS we already have. This way, we only have to add the new key +# to the list of _SANITIZE_KEYS and we can generate regular expressions +# for XML and JSON automatically. +_SANITIZE_PATTERNS = [] +_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])', + r'(<%(key)s>).*?()', + r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])', + r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])', + r'([\'"].*?%(key)s[\'"]\s*,\s*\'--?[A-z]+\'\s*,\s*u?[\'"])' + '.*?([\'"])', + r'(%(key)s\s*--?[A-z]+\s*).*?([\s])'] + +for key in _SANITIZE_KEYS: + for pattern in _FORMAT_PATTERNS: + reg_ex = re.compile(pattern % {'key': key}, re.DOTALL) + _SANITIZE_PATTERNS.append(reg_ex) + + common_cli_opts = [ cfg.BoolOpt('debug', short='d', @@ -66,24 +84,26 @@ common_cli_opts = [ ] logging_cli_opts = [ - cfg.StrOpt('log-config', + cfg.StrOpt('log-config-append', metavar='PATH', - help='If this option is specified, the logging configuration ' - 'file specified is used and overrides any other logging ' - 'options specified. Please see the Python logging module ' - 'documentation for details on logging configuration ' - 'files.'), + deprecated_name='log-config', + help='The name of a logging configuration file. This file ' + 'is appended to any existing logging configuration ' + 'files. For details about logging configuration files, ' + 'see the Python logging module documentation.'), cfg.StrOpt('log-format', - default=_DEFAULT_LOG_FORMAT, metavar='FORMAT', - help='A logging.Formatter log message format string which may ' + help='DEPRECATED. ' + 'A logging.Formatter log message format string which may ' 'use any of the available logging.LogRecord attributes. ' - 'Default: %(default)s'), + 'This option is deprecated. Please use ' + 'logging_context_format_string and ' + 'logging_default_format_string instead.'), cfg.StrOpt('log-date-format', default=_DEFAULT_LOG_DATE_FORMAT, metavar='DATE_FORMAT', help='Format string for %%(asctime)s in log records. ' - 'Default: %(default)s'), + 'Default: %(default)s .'), cfg.StrOpt('log-file', metavar='PATH', deprecated_name='logfile', @@ -92,69 +112,80 @@ logging_cli_opts = [ cfg.StrOpt('log-dir', deprecated_name='logdir', help='(Optional) The base directory used for relative ' - '--log-file paths'), + '--log-file paths.'), cfg.BoolOpt('use-syslog', default=False, - help='Use syslog for logging.'), + help='Use syslog for logging. ' + 'Existing syslog format is DEPRECATED during I, ' + 'and will change in J to honor RFC5424.'), + cfg.BoolOpt('use-syslog-rfc-format', + # TODO(bogdando) remove or use True after existing + # syslog format deprecation in J + default=False, + help='(Optional) Enables or disables syslog rfc5424 format ' + 'for logging. If enabled, prefixes the MSG part of the ' + 'syslog message with APP-NAME (RFC5424). The ' + 'format without the APP-NAME is deprecated in I, ' + 'and will be removed in J.'), cfg.StrOpt('syslog-log-facility', default='LOG_USER', - help='syslog facility to receive log lines') + help='Syslog facility to receive log lines.') ] generic_log_opts = [ cfg.BoolOpt('use_stderr', default=True, - help='Log output to standard error'), - cfg.StrOpt('logfile_mode', - default='0644', - help='Default file mode used when creating log files'), + help='Log output to standard error.') ] log_opts = [ cfg.StrOpt('logging_context_format_string', default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' - '%(name)s [%(request_id)s %(user)s %(tenant)s] ' + '%(name)s [%(request_id)s %(user_identity)s] ' '%(instance)s%(message)s', - help='format string to use for log messages with context'), + help='Format string to use for log messages with context.'), cfg.StrOpt('logging_default_format_string', default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' '%(name)s [-] %(instance)s%(message)s', - help='format string to use for log messages without context'), + help='Format string to use for log messages without context.'), cfg.StrOpt('logging_debug_format_suffix', default='%(funcName)s %(pathname)s:%(lineno)d', - help='data to append to log format when level is DEBUG'), + help='Data to append to log format when level is DEBUG.'), cfg.StrOpt('logging_exception_prefix', default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s ' '%(instance)s', - help='prefix each line of exception output with this format'), + help='Prefix each line of exception output with this format.'), cfg.ListOpt('default_log_levels', default=[ + 'amqp=WARN', 'amqplib=WARN', - 'sqlalchemy=WARN', 'boto=WARN', + 'qpid=WARN', + 'sqlalchemy=WARN', 'suds=INFO', - 'keystone=INFO', - 'eventlet.wsgi.server=WARN' + 'oslo.messaging=INFO', + 'iso8601=WARN', + 'requests.packages.urllib3.connectionpool=WARN' ], - help='list of logger=LEVEL pairs'), + help='List of logger=LEVEL pairs.'), cfg.BoolOpt('publish_errors', default=False, - help='publish error events'), + help='Enables or disables publication of error events.'), cfg.BoolOpt('fatal_deprecations', default=False, - help='make deprecations fatal'), + help='Enables or disables fatal status of deprecations.'), # NOTE(mikal): there are two options here because sometimes we are handed # a full instance (and could include more information), and other times we # are just handed a UUID for the instance. cfg.StrOpt('instance_format', default='[instance: %(uuid)s] ', - help='If an instance is passed with the log message, format ' - 'it like this'), + help='The format for an instance that is passed with the log ' + 'message. '), cfg.StrOpt('instance_uuid_format', default='[instance: %(uuid)s] ', - help='If an instance UUID is passed with the log message, ' - 'format it like this'), + help='The format for an instance UUID that is passed with the ' + 'log message. '), ] CONF = cfg.CONF @@ -210,27 +241,112 @@ def _get_log_file_path(binary=None): binary = binary or _get_binary_name() return '%s.log' % (os.path.join(logdir, binary),) + return None -class ContextAdapter(logging.LoggerAdapter): + +def mask_password(message, secret="***"): + """Replace password with 'secret' in message. + + :param message: The string which includes security information. + :param secret: value with which to replace passwords. + :returns: The unicode value of message with the password fields masked. + + For example: + + >>> mask_password("'adminPass' : 'aaaaa'") + "'adminPass' : '***'" + >>> mask_password("'admin_pass' : 'aaaaa'") + "'admin_pass' : '***'" + >>> mask_password('"password" : "aaaaa"') + '"password" : "***"' + >>> mask_password("'original_password' : 'aaaaa'") + "'original_password' : '***'" + >>> mask_password("u'original_password' : u'aaaaa'") + "u'original_password' : u'***'" + """ + message = six.text_type(message) + + # NOTE(ldbragst): Check to see if anything in message contains any key + # specified in _SANITIZE_KEYS, if not then just return the message since + # we don't have to mask any passwords. + if not any(key in message for key in _SANITIZE_KEYS): + return message + + secret = r'\g<1>' + secret + r'\g<2>' + for pattern in _SANITIZE_PATTERNS: + message = re.sub(pattern, secret, message) + return message + + +class BaseLoggerAdapter(logging.LoggerAdapter): + + def audit(self, msg, *args, **kwargs): + self.log(logging.AUDIT, msg, *args, **kwargs) + + +class LazyAdapter(BaseLoggerAdapter): + def __init__(self, name='unknown', version='unknown'): + self._logger = None + self.extra = {} + self.name = name + self.version = version + + @property + def logger(self): + if not self._logger: + self._logger = getLogger(self.name, self.version) + return self._logger + + +class ContextAdapter(BaseLoggerAdapter): warn = logging.LoggerAdapter.warning def __init__(self, logger, project_name, version_string): self.logger = logger self.project = project_name self.version = version_string + self._deprecated_messages_sent = dict() - def audit(self, msg, *args, **kwargs): - self.log(logging.AUDIT, msg, *args, **kwargs) + @property + def handlers(self): + return self.logger.handlers def deprecated(self, msg, *args, **kwargs): + """Call this method when a deprecated feature is used. + + If the system is configured for fatal deprecations then the message + is logged at the 'critical' level and :class:`DeprecatedConfig` will + be raised. + + Otherwise, the message will be logged (once) at the 'warn' level. + + :raises: :class:`DeprecatedConfig` if the system is configured for + fatal deprecations. + + """ stdmsg = _("Deprecated: %s") % msg if CONF.fatal_deprecations: self.critical(stdmsg, *args, **kwargs) raise DeprecatedConfig(msg=stdmsg) - else: - self.warn(stdmsg, *args, **kwargs) + + # Using a list because a tuple with dict can't be stored in a set. + sent_args = self._deprecated_messages_sent.setdefault(msg, list()) + + if args in sent_args: + # Already logged this message, so don't log it again. + return + + sent_args.append(args) + self.warn(stdmsg, *args, **kwargs) def process(self, msg, kwargs): + # NOTE(mrodden): catch any Message/other object and + # coerce to unicode before they can get + # to the python logging and possibly + # cause string encoding trouble + if not isinstance(msg, six.string_types): + msg = six.text_type(msg) + if 'extra' not in kwargs: kwargs['extra'] = {} extra = kwargs['extra'] @@ -242,18 +358,20 @@ class ContextAdapter(logging.LoggerAdapter): extra.update(_dictify_context(context)) instance = kwargs.pop('instance', None) + instance_uuid = (extra.get('instance_uuid') or + kwargs.pop('instance_uuid', None)) instance_extra = '' if instance: instance_extra = CONF.instance_format % instance - else: - instance_uuid = kwargs.pop('instance_uuid', None) - if instance_uuid: - instance_extra = (CONF.instance_uuid_format - % {'uuid': instance_uuid}) - extra.update({'instance': instance_extra}) + elif instance_uuid: + instance_extra = (CONF.instance_uuid_format + % {'uuid': instance_uuid}) + extra['instance'] = instance_extra - extra.update({"project": self.project}) - extra.update({"version": self.version}) + extra.setdefault('user_identity', kwargs.pop('user_identity', None)) + + extra['project'] = self.project + extra['version'] = self.version extra['extra'] = extra.copy() return msg, kwargs @@ -267,7 +385,7 @@ class JSONFormatter(logging.Formatter): def formatException(self, ei, strip_newlines=True): lines = traceback.format_exception(*ei) if strip_newlines: - lines = [itertools.ifilter( + lines = [moves.filter( lambda x: x, line.rstrip().splitlines()) for line in lines] lines = list(itertools.chain(*lines)) @@ -304,23 +422,12 @@ class JSONFormatter(logging.Formatter): return jsonutils.dumps(message) -class PublishErrorsHandler(logging.Handler): - def emit(self, record): - if ('manila.openstack.common.notifier.log_notifier' in - CONF.notification_driver): - return - notifier.api.notify(None, 'error.publisher', - 'error_notification', - notifier.api.ERROR, - dict(error=record.msg)) - - def _create_logging_excepthook(product_name): - def logging_excepthook(type, value, tb): - extra = {} - if CONF.verbose: - extra['exc_info'] = (type, value, tb) - getLogger(product_name).critical(str(value), **extra) + def logging_excepthook(exc_type, value, tb): + extra = {'exc_info': (exc_type, value, tb)} + getLogger(product_name).critical( + "".join(traceback.format_exception_only(exc_type, value)), + **extra) return logging_excepthook @@ -337,19 +444,20 @@ class LogConfigError(Exception): err_msg=self.err_msg) -def _load_log_config(log_config): +def _load_log_config(log_config_append): try: - logging.config.fileConfig(log_config) - except ConfigParser.Error, exc: - raise LogConfigError(log_config, str(exc)) + logging.config.fileConfig(log_config_append, + disable_existing_loggers=False) + except moves.configparser.Error as exc: + raise LogConfigError(log_config_append, six.text_type(exc)) -def setup(product_name): +def setup(product_name, version='unknown'): """Setup logging.""" - if CONF.log_config: - _load_log_config(CONF.log_config) + if CONF.log_config_append: + _load_log_config(CONF.log_config_append) else: - _setup_logging_from_conf() + _setup_logging_from_conf(product_name, version) sys.excepthook = _create_logging_excepthook(product_name) @@ -383,15 +491,38 @@ def _find_facility_from_conf(): return facility -def _setup_logging_from_conf(): +class RFCSysLogHandler(logging.handlers.SysLogHandler): + def __init__(self, *args, **kwargs): + self.binary_name = _get_binary_name() + # Do not use super() unless type(logging.handlers.SysLogHandler) + # is 'type' (Python 2.7). + # Use old style calls, if the type is 'classobj' (Python 2.6) + logging.handlers.SysLogHandler.__init__(self, *args, **kwargs) + + def format(self, record): + # Do not use super() unless type(logging.handlers.SysLogHandler) + # is 'type' (Python 2.7). + # Use old style calls, if the type is 'classobj' (Python 2.6) + msg = logging.handlers.SysLogHandler.format(self, record) + msg = self.binary_name + ' ' + msg + return msg + + +def _setup_logging_from_conf(project, version): log_root = getLogger(None).logger for handler in log_root.handlers: log_root.removeHandler(handler) if CONF.use_syslog: facility = _find_facility_from_conf() - syslog = logging.handlers.SysLogHandler(address='/dev/log', - facility=facility) + # TODO(bogdando) use the format provided by RFCSysLogHandler + # after existing syslog format deprecation in J + if CONF.use_syslog_rfc_format: + syslog = RFCSysLogHandler(address='/dev/log', + facility=facility) + else: + syslog = logging.handlers.SysLogHandler(address='/dev/log', + facility=facility) log_root.addHandler(syslog) logpath = _get_log_file_path() @@ -399,31 +530,35 @@ def _setup_logging_from_conf(): filelog = logging.handlers.WatchedFileHandler(logpath) log_root.addHandler(filelog) - mode = int(CONF.logfile_mode, 8) - st = os.stat(logpath) - if st.st_mode != (stat.S_IFREG | mode): - os.chmod(logpath, mode) - if CONF.use_stderr: streamlog = ColorHandler() log_root.addHandler(streamlog) - elif not CONF.log_file: + elif not logpath: # pass sys.stdout as a positional argument # python2.6 calls the argument strm, in 2.7 it's stream streamlog = logging.StreamHandler(sys.stdout) log_root.addHandler(streamlog) if CONF.publish_errors: - log_root.addHandler(PublishErrorsHandler(logging.ERROR)) + handler = importutils.import_object( + "manila.openstack.common.log_handler.PublishErrorsHandler", + logging.ERROR) + log_root.addHandler(handler) + datefmt = CONF.log_date_format for handler in log_root.handlers: - datefmt = CONF.log_date_format + # NOTE(alaski): CONF.log_format overrides everything currently. This + # should be deprecated in favor of context aware formatting. if CONF.log_format: handler.setFormatter(logging.Formatter(fmt=CONF.log_format, datefmt=datefmt)) + log_root.info('Deprecated: log_format is now deprecated and will ' + 'be removed in the next release') else: - handler.setFormatter(LegacyFormatter(datefmt=datefmt)) + handler.setFormatter(ContextFormatter(project=project, + version=version, + datefmt=datefmt)) if CONF.debug: log_root.setLevel(logging.DEBUG) @@ -434,9 +569,15 @@ def _setup_logging_from_conf(): for pair in CONF.default_log_levels: mod, _sep, level_name = pair.partition('=') - level = logging.getLevelName(level_name) logger = logging.getLogger(mod) - logger.setLevel(level) + # NOTE(AAzza) in python2.6 Logger.setLevel doesn't convert string name + # to integer code. + if sys.version_info < (2, 7): + level = logging.getLevelName(level_name) + logger.setLevel(level) + else: + logger.setLevel(level_name) + _loggers = {} @@ -449,6 +590,16 @@ def getLogger(name='unknown', version='unknown'): return _loggers[name] +def getLazyLogger(name='unknown', version='unknown'): + """Returns lazy logger. + + Creates a pass-through logger that does not create the real logger + until it is really needed and delegates all calls to the real logger + once it is created. + """ + return LazyAdapter(name, version) + + class WritableLogger(object): """A thin wrapper that responds to `write` and logs.""" @@ -457,10 +608,10 @@ class WritableLogger(object): self.level = level def write(self, msg): - self.logger.log(self.level, msg) + self.logger.log(self.level, msg.rstrip()) -class LegacyFormatter(logging.Formatter): +class ContextFormatter(logging.Formatter): """A context.RequestContext aware formatter configured through flags. The flags used to set format strings are: logging_context_format_string @@ -471,18 +622,50 @@ class LegacyFormatter(logging.Formatter): For information about what variables are available for the formatter see: http://docs.python.org/library/logging.html#formatter + If available, uses the context value stored in TLS - local.store.context + """ + def __init__(self, *args, **kwargs): + """Initialize ContextFormatter instance + + Takes additional keyword arguments which can be used in the message + format string. + + :keyword project: project name + :type project: string + :keyword version: project version + :type version: string + + """ + + self.project = kwargs.pop('project', 'unknown') + self.version = kwargs.pop('version', 'unknown') + + logging.Formatter.__init__(self, *args, **kwargs) + def format(self, record): """Uses contextstring if request_id is set, otherwise default.""" - # NOTE(sdague): default the fancier formating params + + # store project info + record.project = self.project + record.version = self.version + + # store request info + context = getattr(local.store, 'context', None) + if context: + d = _dictify_context(context) + for k, v in d.items(): + setattr(record, k, v) + + # NOTE(sdague): default the fancier formatting params # to an empty string so we don't throw an exception if # they get used - for key in ('instance', 'color'): + for key in ('instance', 'color', 'user_identity'): if key not in record.__dict__: record.__dict__[key] = '' - if record.__dict__.get('request_id', None): + if record.__dict__.get('request_id'): self._fmt = CONF.logging_context_format_string else: self._fmt = CONF.logging_default_format_string @@ -491,7 +674,7 @@ class LegacyFormatter(logging.Formatter): CONF.logging_debug_format_suffix): self._fmt += " " + CONF.logging_debug_format_suffix - # Cache this on the record, Logger will respect our formated copy + # Cache this on the record, Logger will respect our formatted copy if record.exc_info: record.exc_text = self.formatException(record.exc_info, record) return logging.Formatter.format(self, record) @@ -501,7 +684,7 @@ class LegacyFormatter(logging.Formatter): if not record: return logging.Formatter.formatException(self, exc_info) - stringbuffer = cStringIO.StringIO() + stringbuffer = moves.StringIO() traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], None, stringbuffer) lines = stringbuffer.getvalue().split('\n') diff --git a/manila/openstack/common/loopingcall.py b/manila/openstack/common/loopingcall.py index 459ab7c1b3..e577c54143 100644 --- a/manila/openstack/common/loopingcall.py +++ b/manila/openstack/common/loopingcall.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara @@ -22,7 +20,7 @@ import sys from eventlet import event from eventlet import greenthread -from manila.openstack.common.gettextutils import _ +from manila.openstack.common.gettextutils import _LE, _LW from manila.openstack.common import log as logging from manila.openstack.common import timeutils @@ -30,19 +28,19 @@ LOG = logging.getLogger(__name__) class LoopingCallDone(Exception): - """Exception to break out and stop a LoopingCall. + """Exception to break out and stop a LoopingCallBase. - The poll-function passed to LoopingCall can raise this exception to + The poll-function passed to LoopingCallBase can raise this exception to break out of the loop normally. This is somewhat analogous to StopIteration. An optional return-value can be included as the argument to the exception; - this return-value will be returned by LoopingCall.wait() + this return-value will be returned by LoopingCallBase.wait() """ def __init__(self, retvalue=True): - """:param retvalue: Value that LoopingCall.wait() should return.""" + """:param retvalue: Value that LoopingCallBase.wait() should return.""" self.retvalue = retvalue @@ -81,14 +79,14 @@ class FixedIntervalLoopingCall(LoopingCallBase): break delay = interval - timeutils.delta_seconds(start, end) if delay <= 0: - LOG.warn(_('task run outlasted interval by %s sec') % + LOG.warn(_LW('task run outlasted interval by %s sec') % -delay) greenthread.sleep(delay if delay > 0 else 0) - except LoopingCallDone, e: + except LoopingCallDone as e: self.stop() done.send(e.retvalue) except Exception: - LOG.exception(_('in fixed duration looping call')) + LOG.exception(_LE('in fixed duration looping call')) done.send_exception(*sys.exc_info()) return else: @@ -100,11 +98,6 @@ class FixedIntervalLoopingCall(LoopingCallBase): return self.done -# TODO(mikal): this class name is deprecated in Havana and should be removed -# in the I release -LoopingCall = FixedIntervalLoopingCall - - class DynamicLoopingCall(LoopingCallBase): """A looping call which sleeps until the next known event. @@ -128,14 +121,14 @@ class DynamicLoopingCall(LoopingCallBase): if periodic_interval_max is not None: idle = min(idle, periodic_interval_max) - LOG.debug(_('Dynamic looping call sleeping for %.02f ' - 'seconds'), idle) + LOG.debug('Dynamic looping call sleeping for %.02f ' + 'seconds', idle) greenthread.sleep(idle) - except LoopingCallDone, e: + except LoopingCallDone as e: self.stop() done.send(e.retvalue) except Exception: - LOG.exception(_('in dynamic looping call')) + LOG.exception(_LE('in dynamic looping call')) done.send_exception(*sys.exc_info()) return else: diff --git a/manila/openstack/common/network_utils.py b/manila/openstack/common/network_utils.py index 5224e01aa9..92992e9cc0 100644 --- a/manila/openstack/common/network_utils.py +++ b/manila/openstack/common/network_utils.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2012 OpenStack Foundation. # All Rights Reserved. # @@ -19,14 +17,19 @@ Network-related utilities and helper functions. """ -import logging +import socket + +from six.moves.urllib import parse + +from manila.openstack.common.gettextutils import _LW +from manila.openstack.common import log as logging LOG = logging.getLogger(__name__) def parse_host_port(address, default_port=None): - """ - Interpret a string as a host:port pair. + """Interpret a string as a host:port pair. + An IPv6 address MUST be escaped if accompanied by a port, because otherwise ambiguity ensues: 2001:db8:85a3::8a2e:370:7334 means both [2001:db8:85a3::8a2e:370:7334] and @@ -66,3 +69,92 @@ def parse_host_port(address, default_port=None): port = default_port return (host, None if port is None else int(port)) + + +class ModifiedSplitResult(parse.SplitResult): + """Split results class for urlsplit.""" + + # NOTE(dims): The functions below are needed for Python 2.6.x. + # We can remove these when we drop support for 2.6.x. + @property + def hostname(self): + netloc = self.netloc.split('@', 1)[-1] + host, port = parse_host_port(netloc) + return host + + @property + def port(self): + netloc = self.netloc.split('@', 1)[-1] + host, port = parse_host_port(netloc) + return port + + +def urlsplit(url, scheme='', allow_fragments=True): + """Parse a URL using urlparse.urlsplit(), splitting query and fragments. + This function papers over Python issue9374 when needed. + + The parameters are the same as urlparse.urlsplit. + """ + scheme, netloc, path, query, fragment = parse.urlsplit( + url, scheme, allow_fragments) + if allow_fragments and '#' in path: + path, fragment = path.split('#', 1) + if '?' in path: + path, query = path.split('?', 1) + return ModifiedSplitResult(scheme, netloc, + path, query, fragment) + + +def set_tcp_keepalive(sock, tcp_keepalive=True, + tcp_keepidle=None, + tcp_keepalive_interval=None, + tcp_keepalive_count=None): + """Set values for tcp keepalive parameters + + This function configures tcp keepalive parameters if users wish to do + so. + :param tcp_keepalive: Boolean, turn on or off tcp_keepalive. If users are + not sure, this should be True, and default values will be used. + + :param tcp_keepidle: time to wait before starting to send keepalive probes + + :param tcp_keepalive_interval: time between successive probes, once the + initial wait time is over + + :param tcp_keepalive_count: number of probes to send before the connection + is killed + """ + + # NOTE(praneshp): Despite keepalive being a tcp concept, the level is + # still SOL_SOCKET. This is a quirk. + if isinstance(tcp_keepalive, bool): + sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, tcp_keepalive) + else: + raise TypeError("tcp_keepalive must be a boolean") + + if not tcp_keepalive: + return + + # These options aren't available in the OS X version of eventlet, + # Idle + Count * Interval effectively gives you the total timeout. + if tcp_keepidle is not None: + if hasattr(socket, 'TCP_KEEPIDLE'): + sock.setsockopt(socket.IPPROTO_TCP, + socket.TCP_KEEPIDLE, + tcp_keepidle) + else: + LOG.warning(_LW('tcp_keepidle not available on your system')) + if tcp_keepalive_interval is not None: + if hasattr(socket, 'TCP_KEEPINTVL'): + sock.setsockopt(socket.IPPROTO_TCP, + socket.TCP_KEEPINTVL, + tcp_keepalive_interval) + else: + LOG.warning(_LW('tcp_keepintvl not available on your system')) + if tcp_keepalive_count is not None: + if hasattr(socket, 'TCP_KEEPCNT'): + sock.setsockopt(socket.IPPROTO_TCP, + socket.TCP_KEEPCNT, + tcp_keepalive_count) + else: + LOG.warning(_LW('tcp_keepknt not available on your system')) diff --git a/manila/openstack/common/notifier/__init__.py b/manila/openstack/common/notifier/__init__.py index 45c3b46ae9..e69de29bb2 100644 --- a/manila/openstack/common/notifier/__init__.py +++ b/manila/openstack/common/notifier/__init__.py @@ -1,14 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/manila/openstack/common/notifier/api.py b/manila/openstack/common/notifier/api.py index 043a2477ad..0680c65284 100644 --- a/manila/openstack/common/notifier/api.py +++ b/manila/openstack/common/notifier/api.py @@ -13,12 +13,13 @@ # License for the specific language governing permissions and limitations # under the License. +import socket import uuid from oslo.config import cfg from manila.openstack.common import context -from manila.openstack.common.gettextutils import _ +from manila.openstack.common.gettextutils import _, _LE from manila.openstack.common import importutils from manila.openstack.common import jsonutils from manila.openstack.common import log as logging @@ -35,7 +36,6 @@ notifier_opts = [ default='INFO', help='Default notification level for outgoing notifications'), cfg.StrOpt('default_publisher_id', - default='$host', help='Default publisher_id for outgoing notifications'), ] @@ -56,7 +56,7 @@ class BadPriorityException(Exception): def notify_decorator(name, fn): - """ decorator for notify which is used from utils.monkey_patch() + """Decorator for notify which is used from utils.monkey_patch(). :param name: name of the function :param function: - object of the function @@ -74,7 +74,7 @@ def notify_decorator(name, fn): ctxt = context.get_context_from_function_and_args(fn, args, kwarg) notify(ctxt, - CONF.default_publisher_id, + CONF.default_publisher_id or socket.gethostname(), name, CONF.default_notification_level, body) @@ -84,7 +84,10 @@ def notify_decorator(name, fn): def publisher_id(service, host=None): if not host: - host = CONF.host + try: + host = CONF.host + except AttributeError: + host = CONF.default_publisher_id or socket.gethostname() return "%s.%s" % (service, host) @@ -138,9 +141,9 @@ def notify(context, publisher_id, event_type, priority, payload): try: driver.notify(context, msg) except Exception as e: - LOG.exception(_("Problem '%(e)s' attempting to " - "send to notification system. " - "Payload=%(payload)s") + LOG.exception(_LE("Problem '%(e)s' attempting to " + "send to notification system. " + "Payload=%(payload)s") % dict(e=e, payload=payload)) @@ -153,29 +156,16 @@ def _get_drivers(): if _drivers is None: _drivers = {} for notification_driver in CONF.notification_driver: - add_driver(notification_driver) - + try: + driver = importutils.import_module(notification_driver) + _drivers[notification_driver] = driver + except ImportError: + LOG.exception(_LE("Failed to load notifier %s. " + "These notifications will not be sent.") % + notification_driver) return _drivers.values() -def add_driver(notification_driver): - """Add a notification driver at runtime.""" - # Make sure the driver list is initialized. - _get_drivers() - if isinstance(notification_driver, basestring): - # Load and add - try: - driver = importutils.import_module(notification_driver) - _drivers[notification_driver] = driver - except ImportError: - LOG.exception(_("Failed to load notifier %s. " - "These notifications will not be sent.") % - notification_driver) - else: - # Driver is already loaded; just add the object. - _drivers[notification_driver] = notification_driver - - def _reset_drivers(): """Used by unit tests to reset the drivers.""" global _drivers diff --git a/manila/openstack/common/notifier/log_notifier.py b/manila/openstack/common/notifier/log_notifier.py index 9e8b52f951..9dd1f714b3 100644 --- a/manila/openstack/common/notifier/log_notifier.py +++ b/manila/openstack/common/notifier/log_notifier.py @@ -24,7 +24,9 @@ CONF = cfg.CONF def notify(_context, message): """Notifies the recipient of the desired event given the model. - Log notifications using openstack's default logging system""" + + Log notifications using OpenStack's default logging system. + """ priority = message.get('priority', CONF.default_notification_level) diff --git a/manila/openstack/common/notifier/no_op_notifier.py b/manila/openstack/common/notifier/no_op_notifier.py index bc7a56ca7a..13d946e362 100644 --- a/manila/openstack/common/notifier/no_op_notifier.py +++ b/manila/openstack/common/notifier/no_op_notifier.py @@ -15,5 +15,5 @@ def notify(_context, message): - """Notifies the recipient of the desired event given the model""" + """Notifies the recipient of the desired event given the model.""" pass diff --git a/manila/openstack/common/notifier/proxy.py b/manila/openstack/common/notifier/proxy.py new file mode 100644 index 0000000000..0c8a32f14c --- /dev/null +++ b/manila/openstack/common/notifier/proxy.py @@ -0,0 +1,77 @@ +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +A temporary helper which emulates oslo.messaging.Notifier. + +This helper method allows us to do the tedious porting to the new Notifier API +as a standalone commit so that the commit which switches us to oslo.messaging +is smaller and easier to review. This file will be removed as part of that +commit. +""" + +from oslo.config import cfg + +from manila.openstack.common.notifier import api as notifier_api + +CONF = cfg.CONF + + +class Notifier(object): + + def __init__(self, publisher_id): + super(Notifier, self).__init__() + self.publisher_id = publisher_id + + _marker = object() + + def prepare(self, publisher_id=_marker): + ret = self.__class__(self.publisher_id) + if publisher_id is not self._marker: + ret.publisher_id = publisher_id + return ret + + def _notify(self, ctxt, event_type, payload, priority): + notifier_api.notify(ctxt, + self.publisher_id, + event_type, + priority, + payload) + + def audit(self, ctxt, event_type, payload): + # No audit in old notifier. + self._notify(ctxt, event_type, payload, 'INFO') + + def debug(self, ctxt, event_type, payload): + self._notify(ctxt, event_type, payload, 'DEBUG') + + def info(self, ctxt, event_type, payload): + self._notify(ctxt, event_type, payload, 'INFO') + + def warn(self, ctxt, event_type, payload): + self._notify(ctxt, event_type, payload, 'WARN') + + warning = warn + + def error(self, ctxt, event_type, payload): + self._notify(ctxt, event_type, payload, 'ERROR') + + def critical(self, ctxt, event_type, payload): + self._notify(ctxt, event_type, payload, 'CRITICAL') + + +def get_notifier(service=None, host=None, publisher_id=None): + if not publisher_id: + publisher_id = "%s.%s" % (service, host or CONF.host) + return Notifier(publisher_id) diff --git a/manila/openstack/common/notifier/rpc_notifier.py b/manila/openstack/common/notifier/rpc_notifier.py index 8b9da4fbe7..8443b31767 100644 --- a/manila/openstack/common/notifier/rpc_notifier.py +++ b/manila/openstack/common/notifier/rpc_notifier.py @@ -16,7 +16,7 @@ from oslo.config import cfg from manila.openstack.common import context as req_context -from manila.openstack.common.gettextutils import _ +from manila.openstack.common.gettextutils import _LE from manila.openstack.common import log as logging from manila.openstack.common import rpc @@ -24,14 +24,14 @@ LOG = logging.getLogger(__name__) notification_topic_opt = cfg.ListOpt( 'notification_topics', default=['notifications', ], - help='AMQP topic used for openstack notifications') + help='AMQP topic used for OpenStack notifications') CONF = cfg.CONF CONF.register_opt(notification_topic_opt) def notify(context, message): - """Sends a notification via RPC""" + """Sends a notification via RPC.""" if not context: context = req_context.get_admin_context() priority = message.get('priority', @@ -42,5 +42,6 @@ def notify(context, message): try: rpc.notify(context, topic, message) except Exception: - LOG.exception(_("Could not send notification to %(topic)s. " - "Payload=%(message)s"), locals()) + LOG.exception(_LE("Could not send notification to %(topic)s. " + "Payload=%(message)s"), + {"topic": topic, "message": message}) diff --git a/manila/openstack/common/notifier/rpc_notifier2.py b/manila/openstack/common/notifier/rpc_notifier2.py index 36b3266aab..9dbf7ff4c7 100644 --- a/manila/openstack/common/notifier/rpc_notifier2.py +++ b/manila/openstack/common/notifier/rpc_notifier2.py @@ -18,7 +18,7 @@ from oslo.config import cfg from manila.openstack.common import context as req_context -from manila.openstack.common.gettextutils import _ +from manila.openstack.common.gettextutils import _LE from manila.openstack.common import log as logging from manila.openstack.common import rpc @@ -26,7 +26,7 @@ LOG = logging.getLogger(__name__) notification_topic_opt = cfg.ListOpt( 'topics', default=['notifications', ], - help='AMQP topic(s) used for openstack notifications') + help='AMQP topic(s) used for OpenStack notifications') opt_group = cfg.OptGroup(name='rpc_notifier2', title='Options for rpc_notifier2') @@ -37,7 +37,7 @@ CONF.register_opt(notification_topic_opt, opt_group) def notify(context, message): - """Sends a notification via RPC""" + """Sends a notification via RPC.""" if not context: context = req_context.get_admin_context() priority = message.get('priority', @@ -48,5 +48,6 @@ def notify(context, message): try: rpc.notify(context, topic, message, envelope=True) except Exception: - LOG.exception(_("Could not send notification to %(topic)s. " - "Payload=%(message)s"), locals()) + LOG.exception(_LE("Could not send notification to %(topic)s. " + "Payload=%(message)s"), + {"topic": topic, "message": message}) diff --git a/manila/openstack/common/notifier/test_notifier.py b/manila/openstack/common/notifier/test_notifier.py index 96c1746bf4..11fc21fc31 100644 --- a/manila/openstack/common/notifier/test_notifier.py +++ b/manila/openstack/common/notifier/test_notifier.py @@ -13,7 +13,6 @@ # License for the specific language governing permissions and limitations # under the License. - NOTIFICATIONS = [] diff --git a/manila/openstack/common/policy.py b/manila/openstack/common/policy.py index ed0571b87b..73797cdc30 100644 --- a/manila/openstack/common/policy.py +++ b/manila/openstack/common/policy.py @@ -1,6 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2011 OpenStack, LLC. +# Copyright (c) 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -15,231 +13,816 @@ # License for the specific language governing permissions and limitations # under the License. -"""Common Policy Engine Implementation""" +""" +Common Policy Engine Implementation -import logging -import urllib -import urllib2 +Policies can be expressed in one of two forms: A list of lists, or a +string written in the new policy language. -from manila.openstack.common.gettextutils import _ +In the list-of-lists representation, each check inside the innermost +list is combined as with an "and" conjunction--for that check to pass, +all the specified checks must pass. These innermost lists are then +combined as with an "or" conjunction. This is the original way of +expressing policies, but there now exists a new way: the policy +language. + +In the policy language, each check is specified the same way as in the +list-of-lists representation: a simple "a:b" pair that is matched to +the correct code to perform that check. However, conjunction +operators are available, allowing for more expressiveness in crafting +policies. + +As an example, take the following rule, expressed in the list-of-lists +representation:: + + [["role:admin"], ["project_id:%(project_id)s", "role:projectadmin"]] + +In the policy language, this becomes:: + + role:admin or (project_id:%(project_id)s and role:projectadmin) + +The policy language also has the "not" operator, allowing a richer +policy rule:: + + project_id:%(project_id)s and not role:dunce + +It is possible to perform policy checks on the following user +attributes (obtained through the token): user_id, domain_id or +project_id:: + + domain_id: + +Attributes sent along with API calls can be used by the policy engine +(on the right side of the expression), by using the following syntax:: + + :user.id + +Contextual attributes of objects identified by their IDs are loaded +from the database. They are also available to the policy engine and +can be checked through the `target` keyword:: + + :target.role.name + +All these attributes (related to users, API calls, and context) can be +checked against each other or against constants, be it literals (True, +) or strings. + +Finally, two special policy checks should be mentioned; the policy +check "@" will always accept an access, and the policy check "!" will +always reject an access. (Note that if a rule is either the empty +list ("[]") or the empty string, this is equivalent to the "@" policy +check.) Of these, the "!" policy check is probably the most useful, +as it allows particular rules to be explicitly disabled. +""" + +import abc +import ast +import re + +from oslo.config import cfg +import six +import six.moves.urllib.parse as urlparse +import six.moves.urllib.request as urlrequest + +from manila.openstack.common import fileutils +from manila.openstack.common.gettextutils import _, _LE from manila.openstack.common import jsonutils +from manila.openstack.common import log as logging +policy_opts = [ + cfg.StrOpt('policy_file', + default='policy.json', + help=_('The JSON file that defines policies.')), + cfg.StrOpt('policy_default_rule', + default='default', + help=_('Default rule. Enforced when a requested rule is not ' + 'found.')), +] + +CONF = cfg.CONF +CONF.register_opts(policy_opts) + LOG = logging.getLogger(__name__) - -_BRAIN = None +_checks = {} -def set_brain(brain): - """Set the brain used by enforce(). +class PolicyNotAuthorized(Exception): - Defaults use Brain() if not set. - - """ - global _BRAIN - _BRAIN = brain + def __init__(self, rule): + msg = _("Policy doesn't allow %s to be performed.") % rule + super(PolicyNotAuthorized, self).__init__(msg) -def reset(): - """Clear the brain used by enforce().""" - global _BRAIN - _BRAIN = None - - -def enforce(match_list, target_dict, credentials_dict, exc=None, - *args, **kwargs): - """Enforces authorization of some rules against credentials. - - :param match_list: nested tuples of data to match against - - The basic brain supports three types of match lists: - - 1) rules - - looks like: ``('rule:compute:get_instance',)`` - - Retrieves the named rule from the rules dict and recursively - checks against the contents of the rule. - - 2) roles - - looks like: ``('role:compute:admin',)`` - - Matches if the specified role is in credentials_dict['roles']. - - 3) generic - - looks like: ``('tenant_id:%(tenant_id)s',)`` - - Substitutes values from the target dict into the match using - the % operator and matches them against the creds dict. - - Combining rules: - - The brain returns True if any of the outer tuple of rules - match and also True if all of the inner tuples match. You - can use this to perform simple boolean logic. For - example, the following rule would return True if the creds - contain the role 'admin' OR the if the tenant_id matches - the target dict AND the the creds contains the role - 'compute_sysadmin': - - :: - - { - "rule:combined": ( - 'role:admin', - ('tenant_id:%(tenant_id)s', 'role:compute_sysadmin') - ) - } - - Note that rule and role are reserved words in the credentials match, so - you can't match against properties with those names. Custom brains may - also add new reserved words. For example, the HttpBrain adds http as a - reserved word. - - :param target_dict: dict of object properties - - Target dicts contain as much information as we can about the object being - operated on. - - :param credentials_dict: dict of actor properties - - Credentials dicts contain as much information as we can about the user - performing the action. - - :param exc: exception to raise - - Class of the exception to raise if the check fails. Any remaining - arguments passed to enforce() (both positional and keyword arguments) - will be passed to the exception class. If exc is not provided, returns - False. - - :return: True if the policy allows the action - :return: False if the policy does not allow the action and exc is not set - """ - global _BRAIN - if not _BRAIN: - _BRAIN = Brain() - if not _BRAIN.check(match_list, target_dict, credentials_dict): - if exc: - raise exc(*args, **kwargs) - return False - return True - - -class Brain(object): - """Implements policy checking.""" - - _checks = {} - - @classmethod - def _register(cls, name, func): - cls._checks[name] = func +class Rules(dict): + """A store for rules. Handles the default_rule setting directly.""" @classmethod def load_json(cls, data, default_rule=None): - """Init a brain using json instead of a rules dictionary.""" - rules_dict = jsonutils.loads(data) - return cls(rules=rules_dict, default_rule=default_rule) + """Allow loading of JSON rule data.""" + + # Suck in the JSON data and parse the rules + rules = dict((k, parse_rule(v)) for k, v in + jsonutils.loads(data).items()) + + return cls(rules, default_rule) def __init__(self, rules=None, default_rule=None): - if self.__class__ != Brain: - LOG.warning(_("Inheritance-based rules are deprecated; use " - "the default brain instead of %s.") % - self.__class__.__name__) + """Initialize the Rules store.""" - self.rules = rules or {} + super(Rules, self).__init__(rules or {}) self.default_rule = default_rule - def add_rule(self, key, match): - self.rules[key] = match + def __missing__(self, key): + """Implements the default rule handling.""" - def _check(self, match, target_dict, cred_dict): - try: - match_kind, match_value = match.split(':', 1) - except Exception: - LOG.exception(_("Failed to understand rule %(match)r") % locals()) - # If the rule is invalid, fail closed - return False + if isinstance(self.default_rule, dict): + raise KeyError(key) - func = None - try: - old_func = getattr(self, '_check_%s' % match_kind) - except AttributeError: - func = self._checks.get(match_kind, self._checks.get(None, None)) - else: - LOG.warning(_("Inheritance-based rules are deprecated; update " - "_check_%s") % match_kind) - func = lambda brain, kind, value, target, cred: old_func(value, - target, - cred) + # If the default rule isn't actually defined, do something + # reasonably intelligent + if not self.default_rule: + raise KeyError(key) - if not func: - LOG.error(_("No handler for matches of kind %s") % match_kind) - # Fail closed - return False + if isinstance(self.default_rule, BaseCheck): + return self.default_rule - return func(self, match_kind, match_value, target_dict, cred_dict) + # We need to check this or we can get infinite recursion + if self.default_rule not in self: + raise KeyError(key) - def check(self, match_list, target_dict, cred_dict): - """Checks authorization of some rules against credentials. + elif isinstance(self.default_rule, six.string_types): + return self[self.default_rule] - Detailed description of the check with examples in policy.enforce(). + def __str__(self): + """Dumps a string representation of the rules.""" - :param match_list: nested tuples of data to match against - :param target_dict: dict of object properties - :param credentials_dict: dict of actor properties + # Start by building the canonical strings for the rules + out_rules = {} + for key, value in self.items(): + # Use empty string for singleton TrueCheck instances + if isinstance(value, TrueCheck): + out_rules[key] = '' + else: + out_rules[key] = str(value) - :returns: True if the check passes + # Dump a pretty-printed JSON representation + return jsonutils.dumps(out_rules, indent=4) + +class Enforcer(object): + """Responsible for loading and enforcing rules. + + :param policy_file: Custom policy file to use, if none is + specified, `CONF.policy_file` will be + used. + :param rules: Default dictionary / Rules to use. It will be + considered just in the first instantiation. If + `load_rules(True)`, `clear()` or `set_rules(True)` + is called this will be overwritten. + :param default_rule: Default rule to use, CONF.default_rule will + be used if none is specified. + :param use_conf: Whether to load rules from cache or config file. + """ + + def __init__(self, policy_file=None, rules=None, + default_rule=None, use_conf=True): + self.rules = Rules(rules, default_rule) + self.default_rule = default_rule or CONF.policy_default_rule + + self.policy_path = None + self.policy_file = policy_file or CONF.policy_file + self.use_conf = use_conf + + def set_rules(self, rules, overwrite=True, use_conf=False): + """Create a new Rules object based on the provided dict of rules. + + :param rules: New rules to use. It should be an instance of dict. + :param overwrite: Whether to overwrite current rules or update them + with the new rules. + :param use_conf: Whether to reload rules from cache or config file. """ - if not match_list: - return True - for and_list in match_list: - if isinstance(and_list, basestring): - and_list = (and_list,) - if all([self._check(item, target_dict, cred_dict) - for item in and_list]): - return True + + if not isinstance(rules, dict): + raise TypeError(_("Rules must be an instance of dict or Rules, " + "got %s instead") % type(rules)) + self.use_conf = use_conf + if overwrite: + self.rules = Rules(rules, self.default_rule) + else: + self.rules.update(rules) + + def clear(self): + """Clears Enforcer rules, policy's cache and policy's path.""" + self.set_rules({}) + self.default_rule = None + self.policy_path = None + + def load_rules(self, force_reload=False): + """Loads policy_path's rules. + + Policy file is cached and will be reloaded if modified. + + :param force_reload: Whether to overwrite current rules. + """ + + if force_reload: + self.use_conf = force_reload + + if self.use_conf: + if not self.policy_path: + self.policy_path = self._get_policy_path() + + reloaded, data = fileutils.read_cached_file( + self.policy_path, force_reload=force_reload) + if reloaded or not self.rules: + rules = Rules.load_json(data, self.default_rule) + self.set_rules(rules) + LOG.debug("Rules successfully reloaded") + + def _get_policy_path(self): + """Locate the policy json data file. + + :param policy_file: Custom policy file to locate. + + :returns: The policy path + + :raises: ConfigFilesNotFoundError if the file couldn't + be located. + """ + policy_file = CONF.find_file(self.policy_file) + + if policy_file: + return policy_file + + raise cfg.ConfigFilesNotFoundError((self.policy_file,)) + + def enforce(self, rule, target, creds, do_raise=False, + exc=None, *args, **kwargs): + """Checks authorization of a rule against the target and credentials. + + :param rule: A string or BaseCheck instance specifying the rule + to evaluate. + :param target: As much information about the object being operated + on as possible, as a dictionary. + :param creds: As much information about the user performing the + action as possible, as a dictionary. + :param do_raise: Whether to raise an exception or not if check + fails. + :param exc: Class of the exception to raise if the check fails. + Any remaining arguments passed to check() (both + positional and keyword arguments) will be passed to + the exception class. If not specified, PolicyNotAuthorized + will be used. + + :return: Returns False if the policy does not allow the action and + exc is not provided; otherwise, returns a value that + evaluates to True. Note: for rules using the "case" + expression, this True value will be the specified string + from the expression. + """ + + # NOTE(flaper87): Not logging target or creds to avoid + # potential security issues. + LOG.debug("Rule %s will be now enforced" % rule) + + self.load_rules() + + # Allow the rule to be a Check tree + if isinstance(rule, BaseCheck): + result = rule(target, creds, self) + elif not self.rules: + # No rules to reference means we're going to fail closed + result = False + else: + try: + # Evaluate the rule + result = self.rules[rule](target, creds, self) + except KeyError: + LOG.debug("Rule [%s] doesn't exist" % rule) + # If the rule doesn't exist, fail closed + result = False + + # If it is False, raise the exception if requested + if do_raise and not result: + if exc: + raise exc(*args, **kwargs) + + raise PolicyNotAuthorized(rule) + + return result + + +@six.add_metaclass(abc.ABCMeta) +class BaseCheck(object): + """Abstract base class for Check classes.""" + + @abc.abstractmethod + def __str__(self): + """String representation of the Check tree rooted at this node.""" + + pass + + @abc.abstractmethod + def __call__(self, target, cred, enforcer): + """Triggers if instance of the class is called. + + Performs the check. Returns False to reject the access or a + true value (not necessary True) to accept the access. + """ + + pass + + +class FalseCheck(BaseCheck): + """A policy check that always returns False (disallow).""" + + def __str__(self): + """Return a string representation of this check.""" + + return "!" + + def __call__(self, target, cred, enforcer): + """Check the policy.""" + return False -class HttpBrain(Brain): - """A brain that can check external urls for policy. +class TrueCheck(BaseCheck): + """A policy check that always returns True (allow).""" - Posts json blobs for target and credentials. + def __str__(self): + """Return a string representation of this check.""" - Note that this brain is deprecated; the http check is registered - by default. + return "@" + + def __call__(self, target, cred, enforcer): + """Check the policy.""" + + return True + + +class Check(BaseCheck): + """A base class to allow for user-defined policy checks.""" + + def __init__(self, kind, match): + """Initiates Check instance. + + :param kind: The kind of the check, i.e., the field before the + ':'. + :param match: The match of the check, i.e., the field after + the ':'. + """ + + self.kind = kind + self.match = match + + def __str__(self): + """Return a string representation of this check.""" + + return "%s:%s" % (self.kind, self.match) + + +class NotCheck(BaseCheck): + """Implements the "not" logical operator. + + A policy check that inverts the result of another policy check. """ - pass + def __init__(self, rule): + """Initialize the 'not' check. + + :param rule: The rule to negate. Must be a Check. + """ + + self.rule = rule + + def __str__(self): + """Return a string representation of this check.""" + + return "not %s" % self.rule + + def __call__(self, target, cred, enforcer): + """Check the policy. + + Returns the logical inverse of the wrapped check. + """ + + return not self.rule(target, cred, enforcer) + + +class AndCheck(BaseCheck): + """Implements the "and" logical operator. + + A policy check that requires that a list of other checks all return True. + """ + + def __init__(self, rules): + """Initialize the 'and' check. + + :param rules: A list of rules that will be tested. + """ + + self.rules = rules + + def __str__(self): + """Return a string representation of this check.""" + + return "(%s)" % ' and '.join(str(r) for r in self.rules) + + def __call__(self, target, cred, enforcer): + """Check the policy. + + Requires that all rules accept in order to return True. + """ + + for rule in self.rules: + if not rule(target, cred, enforcer): + return False + + return True + + def add_check(self, rule): + """Adds rule to be tested. + + Allows addition of another rule to the list of rules that will + be tested. Returns the AndCheck object for convenience. + """ + + self.rules.append(rule) + return self + + +class OrCheck(BaseCheck): + """Implements the "or" operator. + + A policy check that requires that at least one of a list of other + checks returns True. + """ + + def __init__(self, rules): + """Initialize the 'or' check. + + :param rules: A list of rules that will be tested. + """ + + self.rules = rules + + def __str__(self): + """Return a string representation of this check.""" + + return "(%s)" % ' or '.join(str(r) for r in self.rules) + + def __call__(self, target, cred, enforcer): + """Check the policy. + + Requires that at least one rule accept in order to return True. + """ + + for rule in self.rules: + if rule(target, cred, enforcer): + return True + return False + + def add_check(self, rule): + """Adds rule to be tested. + + Allows addition of another rule to the list of rules that will + be tested. Returns the OrCheck object for convenience. + """ + + self.rules.append(rule) + return self + + +def _parse_check(rule): + """Parse a single base check rule into an appropriate Check object.""" + + # Handle the special checks + if rule == '!': + return FalseCheck() + elif rule == '@': + return TrueCheck() + + try: + kind, match = rule.split(':', 1) + except Exception: + LOG.exception(_LE("Failed to understand rule %s") % rule) + # If the rule is invalid, we'll fail closed + return FalseCheck() + + # Find what implements the check + if kind in _checks: + return _checks[kind](kind, match) + elif None in _checks: + return _checks[None](kind, match) + else: + LOG.error(_LE("No handler for matches of kind %s") % kind) + return FalseCheck() + + +def _parse_list_rule(rule): + """Translates the old list-of-lists syntax into a tree of Check objects. + + Provided for backwards compatibility. + """ + + # Empty rule defaults to True + if not rule: + return TrueCheck() + + # Outer list is joined by "or"; inner list by "and" + or_list = [] + for inner_rule in rule: + # Elide empty inner lists + if not inner_rule: + continue + + # Handle bare strings + if isinstance(inner_rule, six.string_types): + inner_rule = [inner_rule] + + # Parse the inner rules into Check objects + and_list = [_parse_check(r) for r in inner_rule] + + # Append the appropriate check to the or_list + if len(and_list) == 1: + or_list.append(and_list[0]) + else: + or_list.append(AndCheck(and_list)) + + # If we have only one check, omit the "or" + if not or_list: + return FalseCheck() + elif len(or_list) == 1: + return or_list[0] + + return OrCheck(or_list) + + +# Used for tokenizing the policy language +_tokenize_re = re.compile(r'\s+') + + +def _parse_tokenize(rule): + """Tokenizer for the policy language. + + Most of the single-character tokens are specified in the + _tokenize_re; however, parentheses need to be handled specially, + because they can appear inside a check string. Thankfully, those + parentheses that appear inside a check string can never occur at + the very beginning or end ("%(variable)s" is the correct syntax). + """ + + for tok in _tokenize_re.split(rule): + # Skip empty tokens + if not tok or tok.isspace(): + continue + + # Handle leading parens on the token + clean = tok.lstrip('(') + for i in range(len(tok) - len(clean)): + yield '(', '(' + + # If it was only parentheses, continue + if not clean: + continue + else: + tok = clean + + # Handle trailing parens on the token + clean = tok.rstrip(')') + trail = len(tok) - len(clean) + + # Yield the cleaned token + lowered = clean.lower() + if lowered in ('and', 'or', 'not'): + # Special tokens + yield lowered, clean + elif clean: + # Not a special token, but not composed solely of ')' + if len(tok) >= 2 and ((tok[0], tok[-1]) in + [('"', '"'), ("'", "'")]): + # It's a quoted string + yield 'string', tok[1:-1] + else: + yield 'check', _parse_check(clean) + + # Yield the trailing parens + for i in range(trail): + yield ')', ')' + + +class ParseStateMeta(type): + """Metaclass for the ParseState class. + + Facilitates identifying reduction methods. + """ + + def __new__(mcs, name, bases, cls_dict): + """Create the class. + + Injects the 'reducers' list, a list of tuples matching token sequences + to the names of the corresponding reduction methods. + """ + + reducers = [] + + for key, value in cls_dict.items(): + if not hasattr(value, 'reducers'): + continue + for reduction in value.reducers: + reducers.append((reduction, key)) + + cls_dict['reducers'] = reducers + + return super(ParseStateMeta, mcs).__new__(mcs, name, bases, cls_dict) + + +def reducer(*tokens): + """Decorator for reduction methods. + + Arguments are a sequence of tokens, in order, which should trigger running + this reduction method. + """ + + def decorator(func): + # Make sure we have a list of reducer sequences + if not hasattr(func, 'reducers'): + func.reducers = [] + + # Add the tokens to the list of reducer sequences + func.reducers.append(list(tokens)) + + return func + + return decorator + + +@six.add_metaclass(ParseStateMeta) +class ParseState(object): + """Implement the core of parsing the policy language. + + Uses a greedy reduction algorithm to reduce a sequence of tokens into + a single terminal, the value of which will be the root of the Check tree. + + Note: error reporting is rather lacking. The best we can get with + this parser formulation is an overall "parse failed" error. + Fortunately, the policy language is simple enough that this + shouldn't be that big a problem. + """ + + def __init__(self): + """Initialize the ParseState.""" + + self.tokens = [] + self.values = [] + + def reduce(self): + """Perform a greedy reduction of the token stream. + + If a reducer method matches, it will be executed, then the + reduce() method will be called recursively to search for any more + possible reductions. + """ + + for reduction, methname in self.reducers: + if (len(self.tokens) >= len(reduction) and + self.tokens[-len(reduction):] == reduction): + # Get the reduction method + meth = getattr(self, methname) + + # Reduce the token stream + results = meth(*self.values[-len(reduction):]) + + # Update the tokens and values + self.tokens[-len(reduction):] = [r[0] for r in results] + self.values[-len(reduction):] = [r[1] for r in results] + + # Check for any more reductions + return self.reduce() + + def shift(self, tok, value): + """Adds one more token to the state. Calls reduce().""" + + self.tokens.append(tok) + self.values.append(value) + + # Do a greedy reduce... + self.reduce() + + @property + def result(self): + """Obtain the final result of the parse. + + Raises ValueError if the parse failed to reduce to a single result. + """ + + if len(self.values) != 1: + raise ValueError("Could not parse rule") + return self.values[0] + + @reducer('(', 'check', ')') + @reducer('(', 'and_expr', ')') + @reducer('(', 'or_expr', ')') + def _wrap_check(self, _p1, check, _p2): + """Turn parenthesized expressions into a 'check' token.""" + + return [('check', check)] + + @reducer('check', 'and', 'check') + def _make_and_expr(self, check1, _and, check2): + """Create an 'and_expr'. + + Join two checks by the 'and' operator. + """ + + return [('and_expr', AndCheck([check1, check2]))] + + @reducer('and_expr', 'and', 'check') + def _extend_and_expr(self, and_expr, _and, check): + """Extend an 'and_expr' by adding one more check.""" + + return [('and_expr', and_expr.add_check(check))] + + @reducer('check', 'or', 'check') + def _make_or_expr(self, check1, _or, check2): + """Create an 'or_expr'. + + Join two checks by the 'or' operator. + """ + + return [('or_expr', OrCheck([check1, check2]))] + + @reducer('or_expr', 'or', 'check') + def _extend_or_expr(self, or_expr, _or, check): + """Extend an 'or_expr' by adding one more check.""" + + return [('or_expr', or_expr.add_check(check))] + + @reducer('not', 'check') + def _make_not_expr(self, _not, check): + """Invert the result of another check.""" + + return [('check', NotCheck(check))] + + +def _parse_text_rule(rule): + """Parses policy to the tree. + + Translates a policy written in the policy language into a tree of + Check objects. + """ + + # Empty rule means always accept + if not rule: + return TrueCheck() + + # Parse the token stream + state = ParseState() + for tok, value in _parse_tokenize(rule): + state.shift(tok, value) + + try: + return state.result + except ValueError: + # Couldn't parse the rule + LOG.exception(_LE("Failed to understand rule %r") % rule) + + # Fail closed + return FalseCheck() + + +def parse_rule(rule): + """Parses a policy rule into a tree of Check objects.""" + + # If the rule is a string, it's in the policy language + if isinstance(rule, six.string_types): + return _parse_text_rule(rule) + return _parse_list_rule(rule) def register(name, func=None): - """ - Register a function as a policy check. + """Register a function or Check class as a policy check. :param name: Gives the name of the check type, e.g., 'rule', - 'role', etc. If name is None, a default function + 'role', etc. If name is None, a default check type will be registered. - :param func: If given, provides the function to register. If not - given, returns a function taking one argument to - specify the function to register, allowing use as a - decorator. + :param func: If given, provides the function or class to register. + If not given, returns a function taking one argument + to specify the function or class to register, + allowing use as a decorator. """ - # Perform the actual decoration by registering the function. - # Returns the function for compliance with the decorator - # interface. + # Perform the actual decoration by registering the function or + # class. Returns the function or class for compliance with the + # decorator interface. def decorator(func): - # Register the function - Brain._register(name, func) + _checks[name] = func return func - # If the function is given, do the registration + # If the function or class is given, do the registration if func: return decorator(func) @@ -247,55 +830,69 @@ def register(name, func=None): @register("rule") -def _check_rule(brain, match_kind, match, target_dict, cred_dict): - """Recursively checks credentials based on the brains rules.""" - try: - new_match_list = brain.rules[match] - except KeyError: - if brain.default_rule and match != brain.default_rule: - new_match_list = ('rule:%s' % brain.default_rule,) - else: - return False +class RuleCheck(Check): + def __call__(self, target, creds, enforcer): + """Recursively checks credentials based on the defined rules.""" - return brain.check(new_match_list, target_dict, cred_dict) + try: + return enforcer.rules[self.match](target, creds, enforcer) + except KeyError: + # We don't have any matching rule; fail closed + return False @register("role") -def _check_role(brain, match_kind, match, target_dict, cred_dict): - """Check that there is a matching role in the cred dict.""" - return match.lower() in [x.lower() for x in cred_dict['roles']] +class RoleCheck(Check): + def __call__(self, target, creds, enforcer): + """Check that there is a matching role in the cred dict.""" + + return self.match.lower() in [x.lower() for x in creds['roles']] @register('http') -def _check_http(brain, match_kind, match, target_dict, cred_dict): - """Check http: rules by calling to a remote server. +class HttpCheck(Check): + def __call__(self, target, creds, enforcer): + """Check http: rules by calling to a remote server. - This example implementation simply verifies that the response is - exactly 'True'. A custom brain using response codes could easily - be implemented. + This example implementation simply verifies that the response + is exactly 'True'. + """ - """ - url = 'http:' + (match % target_dict) - data = {'target': jsonutils.dumps(target_dict), - 'credentials': jsonutils.dumps(cred_dict)} - post_data = urllib.urlencode(data) - f = urllib2.urlopen(url, post_data) - return f.read() == "True" + url = ('http:' + self.match) % target + data = {'target': jsonutils.dumps(target), + 'credentials': jsonutils.dumps(creds)} + post_data = urlparse.urlencode(data) + f = urlrequest.urlopen(url, post_data) + return f.read() == "True" @register(None) -def _check_generic(brain, match_kind, match, target_dict, cred_dict): - """Check an individual match. +class GenericCheck(Check): + def __call__(self, target, creds, enforcer): + """Check an individual match. - Matches look like: + Matches look like: - tenant:%(tenant_id)s - role:compute:admin + tenant:%(tenant_id)s + role:compute:admin + True:%(user.enabled)s + 'Member':%(role.name)s + """ - """ + # TODO(termie): do dict inspection via dot syntax + try: + match = self.match % target + except KeyError: + # While doing GenericCheck if key not + # present in Target return false + return False - # TODO(termie): do dict inspection via dot syntax - match = match % target_dict - if match_kind in cred_dict: - return match == unicode(cred_dict[match_kind]) - return False + try: + # Try to interpret self.kind as a literal + leftval = ast.literal_eval(self.kind) + except ValueError: + try: + leftval = creds[self.kind] + except KeyError: + return False + return match == six.text_type(leftval) diff --git a/manila/openstack/common/processutils.py b/manila/openstack/common/processutils.py index ec7a0c2f53..e7df54676e 100644 --- a/manila/openstack/common/processutils.py +++ b/manila/openstack/common/processutils.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # @@ -19,6 +17,8 @@ System-level utilities and helper functions. """ +import errno +import logging as stdlib_logging import os import random import shlex @@ -26,6 +26,7 @@ import signal from eventlet.green import subprocess from eventlet import greenthread +import six from manila.openstack.common.gettextutils import _ from manila.openstack.common import log as logging @@ -34,6 +35,11 @@ from manila.openstack.common import log as logging LOG = logging.getLogger(__name__) +class InvalidArgumentError(Exception): + def __init__(self, message=None): + super(InvalidArgumentError, self).__init__(message) + + class UnknownArgumentError(Exception): def __init__(self, message=None): super(UnknownArgumentError, self).__init__(message) @@ -49,11 +55,18 @@ class ProcessExecutionError(Exception): self.description = description if description is None: - description = "Unexpected error while running command." + description = _("Unexpected error while running command.") if exit_code is None: exit_code = '-' - message = ("%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r" - % (description, cmd, exit_code, stdout, stderr)) + message = _('%(description)s\n' + 'Command: %(cmd)s\n' + 'Exit code: %(exit_code)s\n' + 'Stdout: %(stdout)r\n' + 'Stderr: %(stderr)r') % {'description': description, + 'cmd': cmd, + 'exit_code': exit_code, + 'stdout': stdout, + 'stderr': stderr} super(ProcessExecutionError, self).__init__(message) @@ -69,14 +82,17 @@ def _subprocess_setup(): def execute(*cmd, **kwargs): - """ - Helper method to shell out and execute a command through subprocess with - optional retry. + """Helper method to shell out and execute a command through subprocess. + + Allows optional retry. :param cmd: Passed to subprocess.Popen. :type cmd: string :param process_input: Send to opened process. - :type proces_input: string + :type process_input: string + :param env_variables: Environment variables and their values that + will be set for the process. + :type env_variables: dict :param check_exit_code: Single bool, int, or list of allowed exit codes. Defaults to [0]. Raise :class:`ProcessExecutionError` unless @@ -97,6 +113,9 @@ def execute(*cmd, **kwargs): :param shell: whether or not there should be a shell used to execute this command. Defaults to false. :type shell: boolean + :param loglevel: log level for execute commands. + :type loglevel: int. (Should be stdlib_logging.DEBUG or + stdlib_logging.INFO) :returns: (stdout, stderr) from process execution :raises: :class:`UnknownArgumentError` on receiving unknown arguments @@ -104,6 +123,7 @@ def execute(*cmd, **kwargs): """ process_input = kwargs.pop('process_input', None) + env_variables = kwargs.pop('env_variables', None) check_exit_code = kwargs.pop('check_exit_code', [0]) ignore_exit_code = False delay_on_retry = kwargs.pop('delay_on_retry', True) @@ -111,6 +131,7 @@ def execute(*cmd, **kwargs): run_as_root = kwargs.pop('run_as_root', False) root_helper = kwargs.pop('root_helper', '') shell = kwargs.pop('shell', False) + loglevel = kwargs.pop('loglevel', stdlib_logging.DEBUG) if isinstance(check_exit_code, bool): ignore_exit_code = not check_exit_code @@ -118,15 +139,15 @@ def execute(*cmd, **kwargs): elif isinstance(check_exit_code, int): check_exit_code = [check_exit_code] - if len(kwargs): + if kwargs: raise UnknownArgumentError(_('Got unknown keyword args ' 'to utils.execute: %r') % kwargs) - if run_as_root and os.geteuid() != 0: + if run_as_root and hasattr(os, 'geteuid') and os.geteuid() != 0: if not root_helper: raise NoRootWrapSpecified( - message=('Command requested root, but did not specify a root ' - 'helper.')) + message=_('Command requested root, but did not ' + 'specify a root helper.')) cmd = shlex.split(root_helper) + list(cmd) cmd = map(str, cmd) @@ -134,7 +155,8 @@ def execute(*cmd, **kwargs): while attempts > 0: attempts -= 1 try: - LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd)) + LOG.log(loglevel, 'Running cmd (subprocess): %s', + ' '.join(logging.mask_password(cmd))) _PIPE = subprocess.PIPE # pylint: disable=E1101 if os.name == 'nt': @@ -150,28 +172,37 @@ def execute(*cmd, **kwargs): stderr=_PIPE, close_fds=close_fds, preexec_fn=preexec_fn, - shell=shell) + shell=shell, + env=env_variables) result = None - if process_input is not None: - result = obj.communicate(process_input) - else: - result = obj.communicate() + for _i in six.moves.range(20): + # NOTE(russellb) 20 is an arbitrary number of retries to + # prevent any chance of looping forever here. + try: + if process_input is not None: + result = obj.communicate(process_input) + else: + result = obj.communicate() + except OSError as e: + if e.errno in (errno.EAGAIN, errno.EINTR): + continue + raise + break obj.stdin.close() # pylint: disable=E1101 _returncode = obj.returncode # pylint: disable=E1101 - if _returncode: - LOG.debug(_('Result was %s') % _returncode) - if not ignore_exit_code and _returncode not in check_exit_code: - (stdout, stderr) = result - raise ProcessExecutionError(exit_code=_returncode, - stdout=stdout, - stderr=stderr, - cmd=' '.join(cmd)) + LOG.log(loglevel, 'Result was %s' % _returncode) + if not ignore_exit_code and _returncode not in check_exit_code: + (stdout, stderr) = result + raise ProcessExecutionError(exit_code=_returncode, + stdout=stdout, + stderr=stderr, + cmd=' '.join(cmd)) return result except ProcessExecutionError: if not attempts: raise else: - LOG.debug(_('%r failed. Retrying.'), cmd) + LOG.log(loglevel, '%r failed. Retrying.', cmd) if delay_on_retry: greenthread.sleep(random.randint(20, 200) / 100.0) finally: @@ -179,3 +210,63 @@ def execute(*cmd, **kwargs): # call clean something up in between calls, without # it two execute calls in a row hangs the second one greenthread.sleep(0) + + +def trycmd(*args, **kwargs): + """A wrapper around execute() to more easily handle warnings and errors. + + Returns an (out, err) tuple of strings containing the output of + the command's stdout and stderr. If 'err' is not empty then the + command can be considered to have failed. + + :discard_warnings True | False. Defaults to False. If set to True, + then for succeeding commands, stderr is cleared + + """ + discard_warnings = kwargs.pop('discard_warnings', False) + + try: + out, err = execute(*args, **kwargs) + failed = False + except ProcessExecutionError as exn: + out, err = '', six.text_type(exn) + failed = True + + if not failed and discard_warnings and err: + # Handle commands that output to stderr but otherwise succeed + err = '' + + return out, err + + +def ssh_execute(ssh, cmd, process_input=None, + addl_env=None, check_exit_code=True): + LOG.debug('Running cmd (SSH): %s', cmd) + if addl_env: + raise InvalidArgumentError(_('Environment not supported over SSH')) + + if process_input: + # This is (probably) fixable if we need it... + raise InvalidArgumentError(_('process_input not supported over SSH')) + + stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd) + channel = stdout_stream.channel + + # NOTE(justinsb): This seems suspicious... + # ...other SSH clients have buffering issues with this approach + stdout = stdout_stream.read() + stderr = stderr_stream.read() + stdin_stream.close() + + exit_status = channel.recv_exit_status() + + # exit_status == -1 if no exit code was returned + if exit_status != -1: + LOG.debug('Result was %s' % exit_status) + if check_exit_code and exit_status != 0: + raise ProcessExecutionError(exit_code=exit_status, + stdout=stdout, + stderr=stderr, + cmd=cmd) + + return (stdout, stderr) diff --git a/manila/openstack/common/rpc/__init__.py b/manila/openstack/common/rpc/__init__.py index 606b257e9d..e72c211ff5 100644 --- a/manila/openstack/common/rpc/__init__.py +++ b/manila/openstack/common/rpc/__init__.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. @@ -25,14 +23,10 @@ For some wrappers that add message versioning to rpc, see: rpc.proxy """ -import inspect -import logging - from oslo.config import cfg -from manila.openstack.common.gettextutils import _ from manila.openstack.common import importutils -from manila.openstack.common import local +from manila.openstack.common import log as logging LOG = logging.getLogger(__name__) @@ -56,13 +50,12 @@ rpc_opts = [ help='Seconds to wait before a cast expires (TTL). ' 'Only supported by impl_zmq.'), cfg.ListOpt('allowed_rpc_exception_modules', - default=['manila.openstack.common.exception', - 'nova.exception', - 'manila.exception', + default=['nova.exception', + 'cinder.exception', 'exceptions', ], help='Modules of exceptions that are permitted to be recreated' - 'upon receiving exception data from an rpc call.'), + ' upon receiving exception data from an rpc call.'), cfg.BoolOpt('fake_rabbit', default=False, help='If passed, use a fake RabbitMQ provider'), @@ -96,24 +89,7 @@ def create_connection(new=True): return _get_impl().create_connection(CONF, new=new) -def _check_for_lock(): - if not CONF.debug: - return None - - if ((hasattr(local.strong_store, 'locks_held') - and local.strong_store.locks_held)): - stack = ' :: '.join([frame[3] for frame in inspect.stack()]) - LOG.warn(_('A RPC is being made while holding a lock. The locks ' - 'currently held are %(locks)s. This is probably a bug. ' - 'Please report it. Include the following: [%(stack)s].'), - {'locks': local.strong_store.locks_held, - 'stack': stack}) - return True - - return False - - -def call(context, topic, msg, timeout=None, check_for_lock=False): +def call(context, topic, msg, timeout=None): """Invoke a remote method that returns something. :param context: Information that identifies the user that has made this @@ -127,16 +103,12 @@ def call(context, topic, msg, timeout=None, check_for_lock=False): "args" : dict_of_kwargs } :param timeout: int, number of seconds to use for a response timeout. If set, this overrides the rpc_response_timeout option. - :param check_for_lock: if True, a warning is emitted if a RPC call is made - with a lock held. :returns: A dict from the remote method. :raises: openstack.common.rpc.common.Timeout if a complete response is not received before the timeout is reached. """ - if check_for_lock: - _check_for_lock() return _get_impl().call(CONF, context, topic, msg, timeout) @@ -179,7 +151,7 @@ def fanout_cast(context, topic, msg): return _get_impl().fanout_cast(CONF, context, topic, msg) -def multicall(context, topic, msg, timeout=None, check_for_lock=False): +def multicall(context, topic, msg, timeout=None): """Invoke a remote method and get back an iterator. In this case, the remote method will be returning multiple values in @@ -197,8 +169,6 @@ def multicall(context, topic, msg, timeout=None, check_for_lock=False): "args" : dict_of_kwargs } :param timeout: int, number of seconds to use for a response timeout. If set, this overrides the rpc_response_timeout option. - :param check_for_lock: if True, a warning is emitted if a RPC call is made - with a lock held. :returns: An iterator. The iterator will yield a tuple (N, X) where N is an index that starts at 0 and increases by one for each value @@ -208,8 +178,6 @@ def multicall(context, topic, msg, timeout=None, check_for_lock=False): :raises: openstack.common.rpc.common.Timeout if a complete response is not received before the timeout is reached. """ - if check_for_lock: - _check_for_lock() return _get_impl().multicall(CONF, context, topic, msg, timeout) @@ -228,7 +196,7 @@ def notify(context, topic, msg, envelope=False): def cleanup(): - """Clean up resoruces in use by implementation. + """Clean up resources in use by implementation. Clean up any resources that have been allocated by the RPC implementation. This is typically open connections to a messaging service. This function diff --git a/manila/openstack/common/rpc/amqp.py b/manila/openstack/common/rpc/amqp.py index f6b208bc15..2dd3f1f8b3 100644 --- a/manila/openstack/common/rpc/amqp.py +++ b/manila/openstack/common/rpc/amqp.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. @@ -20,9 +18,9 @@ """ Shared code between AMQP based openstack.common.rpc implementations. -The code in this module is shared between the rpc implemenations based on AMQP. -Specifically, this includes impl_kombu and impl_qpid. impl_carrot also uses -AMQP, but is deprecated and predates this code. +The code in this module is shared between the rpc implementations based on +AMQP. Specifically, this includes impl_kombu and impl_qpid. impl_carrot also +uses AMQP, but is deprecated and predates this code. """ import collections @@ -34,24 +32,26 @@ from eventlet import greenpool from eventlet import pools from eventlet import queue from eventlet import semaphore -# TODO(pekowsk): Remove import cfg and below comment in Havana. -# This import should no longer be needed when the amqp_rpc_single_reply_queue -# option is removed. from oslo.config import cfg +import six + from manila.openstack.common import excutils -from manila.openstack.common.gettextutils import _ +from manila.openstack.common.gettextutils import _, _LE from manila.openstack.common import local from manila.openstack.common import log as logging from manila.openstack.common.rpc import common as rpc_common -# TODO(pekowski): Remove this option in Havana. amqp_opts = [ - cfg.BoolOpt('amqp_rpc_single_reply_queue', + cfg.BoolOpt('amqp_durable_queues', default=False, - help='Enable a fast single reply queue if using AMQP based ' - 'RPC like RabbitMQ or Qpid.'), + deprecated_name='rabbit_durable_queues', + deprecated_group='DEFAULT', + help='Use durable queues in amqp.'), + cfg.BoolOpt('amqp_auto_delete', + default=False, + help='Auto-delete queues in amqp.'), ] cfg.CONF.register_opts(amqp_opts) @@ -72,7 +72,7 @@ class Pool(pools.Pool): # TODO(comstud): Timeout connections not used in a while def create(self): - LOG.debug(_('Pool creating new connection')) + LOG.debug('Pool creating new connection') return self.connection_cls(self.conf) def empty(self): @@ -83,7 +83,7 @@ class Pool(pools.Pool): # is the above "while loop" gets all the cached connections from the # pool and closes them, but never returns them to the pool, a pool # leak. The unit tests hang waiting for an item to be returned to the - # pool. The unit tests get here via the teatDown() method. In the run + # pool. The unit tests get here via the tearDown() method. In the run # time code, it gets here via cleanup() and only appears in service.py # just before doing a sys.exit(), so cleanup() only happens once and # the leakage is not a problem. @@ -102,19 +102,19 @@ def get_connection_pool(conf, connection_cls): class ConnectionContext(rpc_common.Connection): - """The class that is actually returned to the caller of - create_connection(). This is essentially a wrapper around - Connection that supports 'with'. It can also return a new - Connection, or one from a pool. The function will also catch - when an instance of this class is to be deleted. With that - we can return Connections to the pool on exceptions and so - forth without making the caller be responsible for catching - them. If possible the function makes sure to return a - connection to the pool. + """The class that is actually returned to the create_connection() caller. + + This is essentially a wrapper around Connection that supports 'with'. + It can also return a new Connection, or one from a pool. + + The function will also catch when an instance of this class is to be + deleted. With that we can return Connections to the pool on exceptions + and so forth without making the caller be responsible for catching them. + If possible the function makes sure to return a connection to the pool. """ def __init__(self, conf, connection_pool, pooled=True, server_params=None): - """Create a new connection, or get one from the pool""" + """Create a new connection, or get one from the pool.""" self.connection = None self.conf = conf self.connection_pool = connection_pool @@ -127,7 +127,7 @@ class ConnectionContext(rpc_common.Connection): self.pooled = pooled def __enter__(self): - """When with ConnectionContext() is used, return self""" + """When with ConnectionContext() is used, return self.""" return self def _done(self): @@ -165,17 +165,19 @@ class ConnectionContext(rpc_common.Connection): def create_worker(self, topic, proxy, pool_name): self.connection.create_worker(topic, proxy, pool_name) - def join_consumer_pool(self, callback, pool_name, topic, exchange_name): + def join_consumer_pool(self, callback, pool_name, topic, exchange_name, + ack_on_error=True): self.connection.join_consumer_pool(callback, pool_name, topic, - exchange_name) + exchange_name, + ack_on_error) def consume_in_thread(self): - self.connection.consume_in_thread() + return self.connection.consume_in_thread() def __getattr__(self, key): - """Proxy all other calls to the Connection instance""" + """Proxy all other calls to the Connection instance.""" if self.connection: return getattr(self.connection, key) else: @@ -183,11 +185,11 @@ class ConnectionContext(rpc_common.Connection): class ReplyProxy(ConnectionContext): - """ Connection class for RPC replies / callbacks """ + """Connection class for RPC replies / callbacks.""" def __init__(self, conf, connection_pool): self._call_waiters = {} self._num_call_waiters = 0 - self._num_call_waiters_wrn_threshhold = 10 + self._num_call_waiters_wrn_threshold = 10 self._reply_q = 'reply_' + uuid.uuid4().hex super(ReplyProxy, self).__init__(conf, connection_pool, pooled=False) self.declare_direct_consumer(self._reply_q, self._process_data) @@ -197,18 +199,20 @@ class ReplyProxy(ConnectionContext): msg_id = message_data.pop('_msg_id', None) waiter = self._call_waiters.get(msg_id) if not waiter: - LOG.warn(_('no calling threads waiting for msg_id : %s' - ', message : %s') % (msg_id, message_data)) + LOG.warn(_('No calling threads waiting for msg_id : %(msg_id)s' + ', message : %(data)s'), {'msg_id': msg_id, + 'data': message_data}) + LOG.warn(_('_call_waiters: %s') % self._call_waiters) else: waiter.put(message_data) def add_call_waiter(self, waiter, msg_id): self._num_call_waiters += 1 - if self._num_call_waiters > self._num_call_waiters_wrn_threshhold: + if self._num_call_waiters > self._num_call_waiters_wrn_threshold: LOG.warn(_('Number of call waiters is greater than warning ' - 'threshhold: %d. There could be a MulticallProxyWaiter ' - 'leak.') % self._num_call_waiters_wrn_threshhold) - self._num_call_waiters_wrn_threshhold *= 2 + 'threshold: %d. There could be a MulticallProxyWaiter ' + 'leak.') % self._num_call_waiters_wrn_threshold) + self._num_call_waiters_wrn_threshold *= 2 self._call_waiters[msg_id] = waiter def del_call_waiter(self, msg_id): @@ -231,18 +235,13 @@ def msg_reply(conf, msg_id, reply_q, connection_pool, reply=None, failure = rpc_common.serialize_remote_exception(failure, log_failure) - try: - msg = {'result': reply, 'failure': failure} - except TypeError: - msg = {'result': dict((k, repr(v)) - for k, v in reply.__dict__.iteritems()), - 'failure': failure} + msg = {'result': reply, 'failure': failure} if ending: msg['ending'] = True _add_unique_id(msg) # If a reply_q exists, add the msg_id to the reply and pass the # reply_q to direct_send() to use it as the response queue. - # Otherwise use the msg_id for backward compatibilty. + # Otherwise use the msg_id for backward compatibility. if reply_q: msg['_msg_id'] = msg_id conn.direct_send(reply_q, rpc_common.serialize_msg(msg)) @@ -251,7 +250,7 @@ def msg_reply(conf, msg_id, reply_q, connection_pool, reply=None, class RpcContext(rpc_common.CommonRpcContext): - """Context that supports replying to a rpc.call""" + """Context that supports replying to a rpc.call.""" def __init__(self, **kwargs): self.msg_id = kwargs.pop('msg_id', None) self.reply_q = kwargs.pop('reply_q', None) @@ -288,7 +287,7 @@ def unpack_context(conf, msg): context_dict['reply_q'] = msg.pop('_reply_q', None) context_dict['conf'] = conf ctx = RpcContext.from_dict(context_dict) - rpc_common._safe_log(LOG.debug, _('unpacked context: %s'), ctx.to_dict()) + rpc_common._safe_log(LOG.debug, 'unpacked context: %s', ctx.to_dict()) return ctx @@ -301,8 +300,14 @@ def pack_context(msg, context): for args at some point. """ - context_d = dict([('_context_%s' % key, value) - for (key, value) in context.to_dict().iteritems()]) + if isinstance(context, dict): + context_d = dict([('_context_%s' % key, value) + for (key, value) in six.iteritems(context)]) + else: + context_d = dict([('_context_%s' % key, value) + for (key, value) in + six.iteritems(context.to_dict())]) + msg.update(context_d) @@ -334,12 +339,13 @@ def _add_unique_id(msg): """Add unique_id for checking duplicate messages.""" unique_id = uuid.uuid4().hex msg.update({UNIQUE_ID: unique_id}) - LOG.debug(_('UNIQUE_ID is %s.') % (unique_id)) + LOG.debug('UNIQUE_ID is %s.' % (unique_id)) class _ThreadPoolWithWait(object): - """Base class for a delayed invocation manager used by - the Connection class to start up green threads + """Base class for a delayed invocation manager. + + Used by the Connection class to start up green threads to handle incoming messages. """ @@ -354,25 +360,48 @@ class _ThreadPoolWithWait(object): class CallbackWrapper(_ThreadPoolWithWait): - """Wraps a straight callback to allow it to be invoked in a green - thread. + """Wraps a straight callback. + + Allows it to be invoked in a green thread. """ - def __init__(self, conf, callback, connection_pool): - """ + def __init__(self, conf, callback, connection_pool, + wait_for_consumers=False): + """Initiates CallbackWrapper object. + :param conf: cfg.CONF instance :param callback: a callable (probably a function) :param connection_pool: connection pool as returned by get_connection_pool() + :param wait_for_consumers: wait for all green threads to + complete and raise the last + caught exception, if any. + """ super(CallbackWrapper, self).__init__( conf=conf, connection_pool=connection_pool, ) self.callback = callback + self.wait_for_consumers = wait_for_consumers + self.exc_info = None + + def _wrap(self, message_data, **kwargs): + """Wrap the callback invocation to catch exceptions. + """ + try: + self.callback(message_data, **kwargs) + except Exception: + self.exc_info = sys.exc_info() def __call__(self, message_data): - self.pool.spawn_n(self.callback, message_data) + self.exc_info = None + self.pool.spawn_n(self._wrap, message_data) + + if self.wait_for_consumers: + self.pool.waitall() + if self.exc_info: + six.reraise(self.exc_info[1], None, self.exc_info[2]) class ProxyCallback(_ThreadPoolWithWait): @@ -403,7 +432,7 @@ class ProxyCallback(_ThreadPoolWithWait): # the previous context is stored in local.store.context if hasattr(local.store, 'context'): del local.store.context - rpc_common._safe_log(LOG.debug, _('received %s'), message_data) + rpc_common._safe_log(LOG.debug, 'received %s', message_data) self.msg_id_cache.check_duplicate_message(message_data) ctxt = unpack_context(self.conf, message_data) method = message_data.get('method') @@ -440,7 +469,7 @@ class ProxyCallback(_ThreadPoolWithWait): # This final None tells multicall that it is done. ctxt.reply(ending=True, connection_pool=self.connection_pool) except rpc_common.ClientException as e: - LOG.debug(_('Expected exception during message handling (%s)') % + LOG.debug('Expected exception during message handling (%s)' % e._exc_info[1]) ctxt.reply(None, e._exc_info, connection_pool=self.connection_pool, @@ -448,7 +477,7 @@ class ProxyCallback(_ThreadPoolWithWait): except Exception: # sys.exc_info() is deleted by LOG.exception(). exc_info = sys.exc_info() - LOG.error(_('Exception during message handling'), + LOG.error(_LE('Exception during message handling'), exc_info=exc_info) ctxt.reply(None, exc_info, connection_pool=self.connection_pool) @@ -490,7 +519,7 @@ class MulticallProxyWaiter(object): return result def __iter__(self): - """Return a result until we get a reply with an 'ending" flag""" + """Return a result until we get a reply with an 'ending' flag.""" if self._done: raise StopIteration while True: @@ -512,61 +541,8 @@ class MulticallProxyWaiter(object): yield result -#TODO(pekowski): Remove MulticallWaiter() in Havana. -class MulticallWaiter(object): - def __init__(self, conf, connection, timeout): - self._connection = connection - self._iterator = connection.iterconsume(timeout=timeout or - conf.rpc_response_timeout) - self._result = None - self._done = False - self._got_ending = False - self._conf = conf - self.msg_id_cache = _MsgIdCache() - - def done(self): - if self._done: - return - self._done = True - self._iterator.close() - self._iterator = None - self._connection.close() - - def __call__(self, data): - """The consume() callback will call this. Store the result.""" - self.msg_id_cache.check_duplicate_message(data) - if data['failure']: - failure = data['failure'] - self._result = rpc_common.deserialize_remote_exception(self._conf, - failure) - - elif data.get('ending', False): - self._got_ending = True - else: - self._result = data['result'] - - def __iter__(self): - """Return a result until we get a 'None' response from consumer""" - if self._done: - raise StopIteration - while True: - try: - self._iterator.next() - except Exception: - with excutils.save_and_reraise_exception(): - self.done() - if self._got_ending: - self.done() - raise StopIteration - result = self._result - if isinstance(result, Exception): - self.done() - raise result - yield result - - def create_connection(conf, new, connection_pool): - """Create a connection""" + """Create a connection.""" return ConnectionContext(conf, connection_pool, pooled=not new) @@ -575,36 +551,20 @@ _reply_proxy_create_sem = semaphore.Semaphore() def multicall(conf, context, topic, msg, timeout, connection_pool): """Make a call that returns multiple times.""" - # TODO(pekowski): Remove all these comments in Havana. - # For amqp_rpc_single_reply_queue = False, - # Can't use 'with' for multicall, as it returns an iterator - # that will continue to use the connection. When it's done, - # connection.close() will get called which will put it back into - # the pool - # For amqp_rpc_single_reply_queue = True, - # The 'with' statement is mandatory for closing the connection - LOG.debug(_('Making synchronous call on %s ...'), topic) + LOG.debug('Making synchronous call on %s ...', topic) msg_id = uuid.uuid4().hex msg.update({'_msg_id': msg_id}) - LOG.debug(_('MSG_ID is %s') % (msg_id)) + LOG.debug('MSG_ID is %s' % (msg_id)) _add_unique_id(msg) pack_context(msg, context) - # TODO(pekowski): Remove this flag and the code under the if clause - # in Havana. - if not conf.amqp_rpc_single_reply_queue: - conn = ConnectionContext(conf, connection_pool) - wait_msg = MulticallWaiter(conf, conn, timeout) - conn.declare_direct_consumer(msg_id, wait_msg) + with _reply_proxy_create_sem: + if not connection_pool.reply_proxy: + connection_pool.reply_proxy = ReplyProxy(conf, connection_pool) + msg.update({'_reply_q': connection_pool.reply_proxy.get_reply_q()}) + wait_msg = MulticallProxyWaiter(conf, msg_id, timeout, connection_pool) + with ConnectionContext(conf, connection_pool) as conn: conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout) - else: - with _reply_proxy_create_sem: - if not connection_pool.reply_proxy: - connection_pool.reply_proxy = ReplyProxy(conf, connection_pool) - msg.update({'_reply_q': connection_pool.reply_proxy.get_reply_q()}) - wait_msg = MulticallProxyWaiter(conf, msg_id, timeout, connection_pool) - with ConnectionContext(conf, connection_pool) as conn: - conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout) return wait_msg @@ -620,7 +580,7 @@ def call(conf, context, topic, msg, timeout, connection_pool): def cast(conf, context, topic, msg, connection_pool): """Sends a message on a topic without waiting for a response.""" - LOG.debug(_('Making asynchronous cast on %s...'), topic) + LOG.debug('Making asynchronous cast on %s...', topic) _add_unique_id(msg) pack_context(msg, context) with ConnectionContext(conf, connection_pool) as conn: @@ -629,7 +589,7 @@ def cast(conf, context, topic, msg, connection_pool): def fanout_cast(conf, context, topic, msg, connection_pool): """Sends a message on a fanout exchange without waiting for a response.""" - LOG.debug(_('Making asynchronous fanout cast...')) + LOG.debug('Making asynchronous fanout cast...') _add_unique_id(msg) pack_context(msg, context) with ConnectionContext(conf, connection_pool) as conn: @@ -657,7 +617,7 @@ def fanout_cast_to_server(conf, context, server_params, topic, msg, def notify(conf, context, topic, msg, connection_pool, envelope): """Sends a notification event on a topic.""" - LOG.debug(_('Sending %(event_type)s on %(topic)s'), + LOG.debug('Sending %(event_type)s on %(topic)s', dict(event_type=msg.get('event_type'), topic=topic)) _add_unique_id(msg) diff --git a/manila/openstack/common/rpc/common.py b/manila/openstack/common/rpc/common.py index 980e602907..466569b16e 100644 --- a/manila/openstack/common/rpc/common.py +++ b/manila/openstack/common/rpc/common.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. @@ -22,18 +20,21 @@ import sys import traceback from oslo.config import cfg +import six -from manila.openstack.common.gettextutils import _ +from manila.openstack.common.gettextutils import _, _LE from manila.openstack.common import importutils from manila.openstack.common import jsonutils from manila.openstack.common import local from manila.openstack.common import log as logging +from manila.openstack.common import versionutils CONF = cfg.CONF LOG = logging.getLogger(__name__) +_RPC_ENVELOPE_VERSION = '2.0' '''RPC Envelope Version. This version number applies to the top level structure of messages sent out. @@ -46,7 +47,7 @@ This version number applies to the message envelope that is used in the serialization done inside the rpc layer. See serialize_msg() and deserialize_msg(). -The current message format (version 2.0) is very simple. It is: +The current message format (version 2.0) is very simple. It is:: { 'oslo.version': , @@ -64,30 +65,31 @@ We will JSON encode the application message payload. The message envelope, which includes the JSON encoded application message body, will be passed down to the messaging libraries as a dict. ''' -_RPC_ENVELOPE_VERSION = '2.0' _VERSION_KEY = 'oslo.version' _MESSAGE_KEY = 'oslo.message' +_REMOTE_POSTFIX = '_Remote' + class RPCException(Exception): - message = _("An unknown RPC related exception occurred.") + msg_fmt = _("An unknown RPC related exception occurred.") def __init__(self, message=None, **kwargs): self.kwargs = kwargs if not message: try: - message = self.message % kwargs + message = self.msg_fmt % kwargs except Exception: # kwargs doesn't match a variable in the message # log the issue and the kwargs - LOG.exception(_('Exception in string format operation')) - for name, value in kwargs.iteritems(): + LOG.exception(_LE('Exception in string format operation')) + for name, value in six.iteritems(kwargs): LOG.error("%s: %s" % (name, value)) # at least get the core message out if something happened - message = self.message + message = self.msg_fmt super(RPCException, self).__init__(message) @@ -101,7 +103,7 @@ class RemoteError(RPCException): contains all of the relevant info. """ - message = _("Remote error: %(exc_type)s %(value)s\n%(traceback)s.") + msg_fmt = _("Remote error: %(exc_type)s %(value)s\n%(traceback)s.") def __init__(self, exc_type=None, value=None, traceback=None): self.exc_type = exc_type @@ -118,12 +120,13 @@ class Timeout(RPCException): This exception is raised if the rpc_response_timeout is reached while waiting for a response from the remote side. """ - message = _('Timeout while waiting on RPC response - ' + msg_fmt = _('Timeout while waiting on RPC response - ' 'topic: "%(topic)s", RPC method: "%(method)s" ' 'info: "%(info)s"') def __init__(self, info=None, topic=None, method=None): - """ + """Initiates Timeout object. + :param info: Extra info to convey to the user :param topic: The topic that the rpc call was sent to :param rpc_method_name: The name of the rpc method being @@ -140,23 +143,27 @@ class Timeout(RPCException): class DuplicateMessageError(RPCException): - message = _("Found duplicate message(%(msg_id)s). Skipping it.") + msg_fmt = _("Found duplicate message(%(msg_id)s). Skipping it.") class InvalidRPCConnectionReuse(RPCException): - message = _("Invalid reuse of an RPC connection.") + msg_fmt = _("Invalid reuse of an RPC connection.") class UnsupportedRpcVersion(RPCException): - message = _("Specified RPC version, %(version)s, not supported by " + msg_fmt = _("Specified RPC version, %(version)s, not supported by " "this endpoint.") class UnsupportedRpcEnvelopeVersion(RPCException): - message = _("Specified RPC envelope version, %(version)s, " + msg_fmt = _("Specified RPC envelope version, %(version)s, " "not supported by this endpoint.") +class RpcVersionCapError(RPCException): + msg_fmt = _("Specified RPC version cap, %(version_cap)s, is too low") + + class Connection(object): """A connection, returned by rpc.create_connection(). @@ -216,9 +223,9 @@ class Connection(object): raise NotImplementedError() def join_consumer_pool(self, callback, pool_name, topic, exchange_name): - """Register as a member of a group of consumers for a given topic from - the specified exchange. + """Register as a member of a group of consumers. + Uses given topic from the specified exchange. Exactly one member of a given pool will receive each message. A message will be delivered to multiple pools, if more than @@ -253,41 +260,24 @@ class Connection(object): def _safe_log(log_func, msg, msg_data): """Sanitizes the msg_data field before logging.""" - SANITIZE = {'set_admin_password': [('args', 'new_pass')], - 'run_instance': [('args', 'admin_password')], - 'route_message': [('args', 'message', 'args', 'method_info', - 'method_kwargs', 'password'), - ('args', 'message', 'args', 'method_info', - 'method_kwargs', 'admin_password')]} + SANITIZE = ['_context_auth_token', 'auth_token', 'new_pass'] - has_method = 'method' in msg_data and msg_data['method'] in SANITIZE - has_context_token = '_context_auth_token' in msg_data - has_token = 'auth_token' in msg_data + def _fix_passwords(d): + """Sanitizes the password fields in the dictionary.""" + for k in six.iterkeys(d): + if k.lower().find('password') != -1: + d[k] = '' + elif k.lower() in SANITIZE: + d[k] = '' + elif isinstance(d[k], list): + for e in d[k]: + if isinstance(e, dict): + _fix_passwords(e) + elif isinstance(d[k], dict): + _fix_passwords(d[k]) + return d - if not any([has_method, has_context_token, has_token]): - return log_func(msg, msg_data) - - msg_data = copy.deepcopy(msg_data) - - if has_method: - for arg in SANITIZE.get(msg_data['method'], []): - try: - d = msg_data - for elem in arg[:-1]: - d = d[elem] - d[arg[-1]] = '' - except KeyError, e: - LOG.info(_('Failed to sanitize %(item)s. Key error %(err)s'), - {'item': arg, - 'err': e}) - - if has_context_token: - msg_data['_context_auth_token'] = '' - - if has_token: - msg_data['auth_token'] = '' - - return log_func(msg, msg_data) + return log_func(msg, _fix_passwords(copy.deepcopy(msg_data))) def serialize_remote_exception(failure_info, log_failure=True): @@ -299,17 +289,27 @@ def serialize_remote_exception(failure_info, log_failure=True): tb = traceback.format_exception(*failure_info) failure = failure_info[1] if log_failure: - LOG.error(_("Returning exception %s to caller"), unicode(failure)) + LOG.error(_LE("Returning exception %s to caller"), + six.text_type(failure)) LOG.error(tb) kwargs = {} if hasattr(failure, 'kwargs'): kwargs = failure.kwargs + # NOTE(matiu): With cells, it's possible to re-raise remote, remote + # exceptions. Lets turn it back into the original exception type. + cls_name = str(failure.__class__.__name__) + mod_name = str(failure.__class__.__module__) + if (cls_name.endswith(_REMOTE_POSTFIX) and + mod_name.endswith(_REMOTE_POSTFIX)): + cls_name = cls_name[:-len(_REMOTE_POSTFIX)] + mod_name = mod_name[:-len(_REMOTE_POSTFIX)] + data = { - 'class': str(failure.__class__.__name__), - 'module': str(failure.__class__.__module__), - 'message': unicode(failure), + 'class': cls_name, + 'module': mod_name, + 'message': six.text_type(failure), 'tb': tb, 'args': failure.args, 'kwargs': kwargs @@ -345,8 +345,9 @@ def deserialize_remote_exception(conf, data): ex_type = type(failure) str_override = lambda self: message - new_ex_type = type(ex_type.__name__ + "_Remote", (ex_type,), + new_ex_type = type(ex_type.__name__ + _REMOTE_POSTFIX, (ex_type,), {'__str__': str_override, '__unicode__': str_override}) + new_ex_type.__module__ = '%s%s' % (module, _REMOTE_POSTFIX) try: # NOTE(ameade): Dynamically create a new exception type and swap it in # as the new type for the exception. This only works on user defined @@ -408,10 +409,11 @@ class CommonRpcContext(object): class ClientException(Exception): - """This encapsulates some actual exception that is expected to be - hit by an RPC proxy object. Merely instantiating it records the - current exception information, which will be passed back to the - RPC client without exceptional logging.""" + """Encapsulates actual exception expected to be hit by a RPC proxy object. + + Merely instantiating it records the current exception information, which + will be passed back to the RPC client without exceptional logging. + """ def __init__(self): self._exc_info = sys.exc_info() @@ -419,7 +421,7 @@ class ClientException(Exception): def catch_client_exception(exceptions, func, *args, **kwargs): try: return func(*args, **kwargs) - except Exception, e: + except Exception as e: if type(e) in exceptions: raise ClientException() else: @@ -428,11 +430,13 @@ def catch_client_exception(exceptions, func, *args, **kwargs): def client_exceptions(*exceptions): """Decorator for manager methods that raise expected exceptions. + Marking a Manager method with this decorator allows the declaration of expected exceptions that the RPC layer should not consider fatal, and not log as if they were generated in a real error scenario. Note that this will cause listed exceptions to be wrapped in a - ClientException, which is used internally by the RPC layer.""" + ClientException, which is used internally by the RPC layer. + """ def outer(func): def inner(*args, **kwargs): return catch_client_exception(exceptions, func, *args, **kwargs) @@ -440,19 +444,15 @@ def client_exceptions(*exceptions): return outer +# TODO(sirp): we should deprecate this in favor of +# using `versionutils.is_compatible` directly def version_is_compatible(imp_version, version): """Determine whether versions are compatible. :param imp_version: The version implemented :param version: The version requested by an incoming message. """ - version_parts = version.split('.') - imp_version_parts = imp_version.split('.') - if int(version_parts[0]) != int(imp_version_parts[0]): # Major - return False - if int(version_parts[1]) > int(imp_version_parts[1]): # Minor - return False - return True + return versionutils.is_compatible(version, imp_version) def serialize_msg(raw_msg): diff --git a/manila/openstack/common/rpc/dispatcher.py b/manila/openstack/common/rpc/dispatcher.py index 47eb8b0caa..c6e7bc3753 100644 --- a/manila/openstack/common/rpc/dispatcher.py +++ b/manila/openstack/common/rpc/dispatcher.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -83,7 +81,10 @@ On the client side, the same changes should be made as in example 1. The minimum version that supports the new parameter should be specified. """ +import six + from manila.openstack.common.rpc import common as rpc_common +from manila.openstack.common.rpc import serializer as rpc_serializer class RpcDispatcher(object): @@ -93,16 +94,38 @@ class RpcDispatcher(object): contains a list of underlying managers that have an API_VERSION attribute. """ - def __init__(self, callbacks): + def __init__(self, callbacks, serializer=None): """Initialize the rpc dispatcher. :param callbacks: List of proxy objects that are an instance of a class with rpc methods exposed. Each proxy object should have an RPC_API_VERSION attribute. + :param serializer: The Serializer object that will be used to + deserialize arguments before the method call and + to serialize the result after it returns. """ self.callbacks = callbacks + if serializer is None: + serializer = rpc_serializer.NoOpSerializer() + self.serializer = serializer super(RpcDispatcher, self).__init__() + def _deserialize_args(self, context, kwargs): + """Helper method called to deserialize args before dispatch. + + This calls our serializer on each argument, returning a new set of + args that have been deserialized. + + :param context: The request context + :param kwargs: The arguments to be deserialized + :returns: A new set of deserialized args + """ + new_kwargs = dict() + for argname, arg in six.iteritems(kwargs): + new_kwargs[argname] = self.serializer.deserialize_entity(context, + arg) + return new_kwargs + def dispatch(self, ctxt, version, method, namespace, **kwargs): """Dispatch a message based on a requested version. @@ -145,7 +168,9 @@ class RpcDispatcher(object): if not hasattr(proxyobj, method): continue if is_compatible: - return getattr(proxyobj, method)(ctxt, **kwargs) + kwargs = self._deserialize_args(ctxt, kwargs) + result = getattr(proxyobj, method)(ctxt, **kwargs) + return self.serializer.serialize_entity(ctxt, result) if had_compatible: raise AttributeError("No such RPC function '%s'" % method) diff --git a/manila/openstack/common/rpc/impl_fake.py b/manila/openstack/common/rpc/impl_fake.py index de4b541c3a..55135f13fb 100644 --- a/manila/openstack/common/rpc/impl_fake.py +++ b/manila/openstack/common/rpc/impl_fake.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -13,6 +11,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. + """Fake RPC implementation which calls proxy methods directly with no queues. Casts will block, but this is very useful for tests. """ @@ -26,6 +25,7 @@ import json import time import eventlet +import six from manila.openstack.common.rpc import common as rpc_common @@ -69,7 +69,7 @@ class Consumer(object): # Caller might have called ctxt.reply() manually for (reply, failure) in ctxt._response: if failure: - raise failure[0], failure[1], failure[2] + six.reraise(failure[0], failure[1], failure[2]) res.append(reply) # if ending not 'sent'...we might have more data to # return from the function itself @@ -122,7 +122,7 @@ class Connection(object): def create_connection(conf, new=True): - """Create a connection""" + """Create a connection.""" return Connection() @@ -140,13 +140,13 @@ def multicall(conf, context, topic, msg, timeout=None): if not method: return args = msg.get('args', {}) - version = msg.get('version', None) - namespace = msg.get('namespace', None) + version = msg.get('version') + namespace = msg.get('namespace') try: consumer = CONSUMERS[topic][0] except (KeyError, IndexError): - return iter([None]) + raise rpc_common.Timeout("No consumers available") else: return consumer.call(context, version, method, namespace, args, timeout) @@ -179,14 +179,14 @@ def cleanup(): def fanout_cast(conf, context, topic, msg): - """Cast to all consumers of a topic""" + """Cast to all consumers of a topic.""" check_serialize(msg) method = msg.get('method') if not method: return args = msg.get('args', {}) - version = msg.get('version', None) - namespace = msg.get('namespace', None) + version = msg.get('version') + namespace = msg.get('namespace') for consumer in CONSUMERS.get(topic, []): try: diff --git a/manila/openstack/common/rpc/impl_kombu.py b/manila/openstack/common/rpc/impl_kombu.py index 360403d3d1..ac7d5c3311 100644 --- a/manila/openstack/common/rpc/impl_kombu.py +++ b/manila/openstack/common/rpc/impl_kombu.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -18,7 +16,6 @@ import functools import itertools import socket import ssl -import sys import time import uuid @@ -29,16 +26,22 @@ import kombu.connection import kombu.entity import kombu.messaging from oslo.config import cfg +import six -from manila.openstack.common.gettextutils import _ +from manila.openstack.common import excutils +from manila.openstack.common.gettextutils import _, _LE, _LI from manila.openstack.common import network_utils from manila.openstack.common.rpc import amqp as rpc_amqp from manila.openstack.common.rpc import common as rpc_common +from manila.openstack.common import sslutils kombu_opts = [ cfg.StrOpt('kombu_ssl_version', default='', - help='SSL version to use (valid only if SSL enabled)'), + help='If SSL is enabled, the SSL version to use. Valid ' + 'values are TLSv1, SSLv23 and SSLv3. SSLv2 might ' + 'be available on some distributions.' + ), cfg.StrOpt('kombu_ssl_keyfile', default='', help='SSL key file (valid only if SSL enabled)'), @@ -47,8 +50,8 @@ kombu_opts = [ help='SSL cert file (valid only if SSL enabled)'), cfg.StrOpt('kombu_ssl_ca_certs', default='', - help=('SSL certification authority file ' - '(valid only if SSL enabled)')), + help='SSL certification authority file ' + '(valid only if SSL enabled)'), cfg.StrOpt('rabbit_host', default='localhost', help='The RabbitMQ broker address where a single node is used'), @@ -60,36 +63,33 @@ kombu_opts = [ help='RabbitMQ HA cluster host:port pairs'), cfg.BoolOpt('rabbit_use_ssl', default=False, - help='connect over SSL for RabbitMQ'), + help='Connect over SSL for RabbitMQ'), cfg.StrOpt('rabbit_userid', default='guest', - help='the RabbitMQ userid'), + help='The RabbitMQ userid'), cfg.StrOpt('rabbit_password', default='guest', - help='the RabbitMQ password', + help='The RabbitMQ password', secret=True), cfg.StrOpt('rabbit_virtual_host', default='/', - help='the RabbitMQ virtual host'), + help='The RabbitMQ virtual host'), cfg.IntOpt('rabbit_retry_interval', default=1, - help='how frequently to retry connecting with RabbitMQ'), + help='How frequently to retry connecting with RabbitMQ'), cfg.IntOpt('rabbit_retry_backoff', default=2, - help='how long to backoff for between retries when connecting ' + help='How long to backoff for between retries when connecting ' 'to RabbitMQ'), cfg.IntOpt('rabbit_max_retries', default=0, - help='maximum retries with trying to connect to RabbitMQ ' - '(the default of 0 implies an infinite retry count)'), - cfg.BoolOpt('rabbit_durable_queues', - default=False, - help='use durable queues in RabbitMQ'), + help='Maximum number of RabbitMQ connection retries. ' + 'Default is 0 (infinite retry count)'), cfg.BoolOpt('rabbit_ha_queues', default=False, - help='use H/A queues in RabbitMQ (x-ha-policy: all).' - 'You need to wipe RabbitMQ database when ' - 'changing this option.'), + help='Use HA queues in RabbitMQ (x-ha-policy: all). ' + 'If you change this option, you must wipe the ' + 'RabbitMQ database.'), ] @@ -129,15 +129,40 @@ class ConsumerBase(object): self.tag = str(tag) self.kwargs = kwargs self.queue = None + self.ack_on_error = kwargs.get('ack_on_error', True) self.reconnect(channel) def reconnect(self, channel): - """Re-declare the queue after a rabbit reconnect""" + """Re-declare the queue after a rabbit reconnect.""" self.channel = channel self.kwargs['channel'] = channel self.queue = kombu.entity.Queue(**self.kwargs) self.queue.declare() + def _callback_handler(self, message, callback): + """Call callback with deserialized message. + + Messages that are processed without exception are ack'ed. + + If the message processing generates an exception, it will be + ack'ed if ack_on_error=True. Otherwise it will be .requeue()'ed. + """ + + try: + msg = rpc_common.deserialize_msg(message.payload) + callback(msg) + except Exception: + if self.ack_on_error: + LOG.exception(_LE("Failed to process message" + " ... skipping it.")) + message.ack() + else: + LOG.exception(_LE("Failed to process message" + " ... will requeue.")) + message.requeue() + else: + message.ack() + def consume(self, *args, **kwargs): """Actually declare the consumer on the amqp channel. This will start the flow of messages from the queue. Using the @@ -150,8 +175,6 @@ class ConsumerBase(object): If kwargs['nowait'] is True, then this call will block until a message is read. - Messages will automatically be acked if the callback doesn't - raise an exception """ options = {'consumer_tag': self.tag} @@ -162,21 +185,15 @@ class ConsumerBase(object): def _callback(raw_message): message = self.channel.message_to_python(raw_message) - try: - msg = rpc_common.deserialize_msg(message.payload) - callback(msg) - except Exception: - LOG.exception(_("Failed to process message... skipping it.")) - finally: - message.ack() + self._callback_handler(message, callback) self.queue.consume(*args, callback=_callback, **options) def cancel(self): - """Cancel the consuming from the queue, if it has started""" + """Cancel the consuming from the queue, if it has started.""" try: self.queue.cancel(self.tag) - except KeyError, e: + except KeyError as e: # NOTE(comstud): Kludge to get around a amqplib bug if str(e) != "u'%s'" % self.tag: raise @@ -184,7 +201,7 @@ class ConsumerBase(object): class DirectConsumer(ConsumerBase): - """Queue/consumer class for 'direct'""" + """Queue/consumer class for 'direct'.""" def __init__(self, conf, channel, msg_id, callback, tag, **kwargs): """Init a 'direct' queue. @@ -216,7 +233,7 @@ class DirectConsumer(ConsumerBase): class TopicConsumer(ConsumerBase): - """Consumer class for 'topic'""" + """Consumer class for 'topic'.""" def __init__(self, conf, channel, topic, callback, tag, name=None, exchange_name=None, **kwargs): @@ -233,9 +250,9 @@ class TopicConsumer(ConsumerBase): Other kombu options may be passed as keyword arguments """ # Default options - options = {'durable': conf.rabbit_durable_queues, + options = {'durable': conf.amqp_durable_queues, 'queue_arguments': _get_queue_arguments(conf), - 'auto_delete': False, + 'auto_delete': conf.amqp_auto_delete, 'exclusive': False} options.update(kwargs) exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf) @@ -253,7 +270,7 @@ class TopicConsumer(ConsumerBase): class FanoutConsumer(ConsumerBase): - """Consumer class for 'fanout'""" + """Consumer class for 'fanout'.""" def __init__(self, conf, channel, topic, callback, tag, **kwargs): """Init a 'fanout' queue. @@ -286,7 +303,7 @@ class FanoutConsumer(ConsumerBase): class Publisher(object): - """Base Publisher class""" + """Base Publisher class.""" def __init__(self, channel, exchange_name, routing_key, **kwargs): """Init the Publisher class with the exchange_name, routing_key, @@ -298,7 +315,7 @@ class Publisher(object): self.reconnect(channel) def reconnect(self, channel): - """Re-establish the Producer after a rabbit reconnection""" + """Re-establish the Producer after a rabbit reconnection.""" self.exchange = kombu.entity.Exchange(name=self.exchange_name, **self.kwargs) self.producer = kombu.messaging.Producer(exchange=self.exchange, @@ -306,7 +323,7 @@ class Publisher(object): routing_key=self.routing_key) def send(self, msg, timeout=None): - """Send a message""" + """Send a message.""" if timeout: # # AMQP TTL is in milliseconds when set in the header. @@ -317,7 +334,7 @@ class Publisher(object): class DirectPublisher(Publisher): - """Publisher class for 'direct'""" + """Publisher class for 'direct'.""" def __init__(self, conf, channel, msg_id, **kwargs): """init a 'direct' publisher. @@ -333,14 +350,14 @@ class DirectPublisher(Publisher): class TopicPublisher(Publisher): - """Publisher class for 'topic'""" + """Publisher class for 'topic'.""" def __init__(self, conf, channel, topic, **kwargs): """init a 'topic' publisher. Kombu options may be passed as keyword args to override defaults """ - options = {'durable': conf.rabbit_durable_queues, - 'auto_delete': False, + options = {'durable': conf.amqp_durable_queues, + 'auto_delete': conf.amqp_auto_delete, 'exclusive': False} options.update(kwargs) exchange_name = rpc_amqp.get_control_exchange(conf) @@ -352,7 +369,7 @@ class TopicPublisher(Publisher): class FanoutPublisher(Publisher): - """Publisher class for 'fanout'""" + """Publisher class for 'fanout'.""" def __init__(self, conf, channel, topic, **kwargs): """init a 'fanout' publisher. @@ -367,10 +384,10 @@ class FanoutPublisher(Publisher): class NotifyPublisher(TopicPublisher): - """Publisher class for 'notify'""" + """Publisher class for 'notify'.""" def __init__(self, conf, channel, topic, **kwargs): - self.durable = kwargs.pop('durable', conf.rabbit_durable_queues) + self.durable = kwargs.pop('durable', conf.amqp_durable_queues) self.queue_arguments = _get_queue_arguments(conf) super(NotifyPublisher, self).__init__(conf, channel, topic, **kwargs) @@ -428,7 +445,7 @@ class Connection(object): 'virtual_host': self.conf.rabbit_virtual_host, } - for sp_key, value in server_params.iteritems(): + for sp_key, value in six.iteritems(server_params): p_key = server_params_to_kombu_params.get(sp_key, sp_key) params[p_key] = value @@ -441,19 +458,24 @@ class Connection(object): self.params_list = params_list + brokers_count = len(self.params_list) + self.next_broker_indices = itertools.cycle(range(brokers_count)) + self.memory_transport = self.conf.fake_rabbit self.connection = None self.reconnect() def _fetch_ssl_params(self): - """Handles fetching what ssl params - should be used for the connection (if any)""" + """Handles fetching what ssl params should be used for the connection + (if any). + """ ssl_params = dict() # http://docs.python.org/library/ssl.html - ssl.wrap_socket if self.conf.kombu_ssl_version: - ssl_params['ssl_version'] = self.conf.kombu_ssl_version + ssl_params['ssl_version'] = sslutils.validate_ssl_version( + self.conf.kombu_ssl_version) if self.conf.kombu_ssl_keyfile: ssl_params['keyfile'] = self.conf.kombu_ssl_keyfile if self.conf.kombu_ssl_certfile: @@ -464,12 +486,8 @@ class Connection(object): # future with this? ssl_params['cert_reqs'] = ssl.CERT_REQUIRED - if not ssl_params: - # Just have the default behavior - return True - else: - # Return the extended behavior - return ssl_params + # Return the extended behavior or just have the default behavior + return ssl_params or True def _connect(self, params): """Connect to rabbit. Re-establish any queues that may have @@ -477,7 +495,7 @@ class Connection(object): be handled by the caller. """ if self.connection: - LOG.info(_("Reconnecting to AMQP server on " + LOG.info(_LI("Reconnecting to AMQP server on " "%(hostname)s:%(port)d") % params) try: self.connection.release() @@ -499,7 +517,7 @@ class Connection(object): self.channel._new_queue('ae.undeliver') for consumer in self.consumers: consumer.reconnect(self.channel) - LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d') % + LOG.info(_LI('Connected to AMQP server on %(hostname)s:%(port)d') % params) def reconnect(self): @@ -513,14 +531,14 @@ class Connection(object): attempt = 0 while True: - params = self.params_list[attempt % len(self.params_list)] + params = self.params_list[next(self.next_broker_indices)] attempt += 1 try: self._connect(params) return except (IOError, self.connection_errors) as e: pass - except Exception, e: + except Exception as e: # NOTE(comstud): Unfortunately it's possible for amqplib # to return an error not covered by its transport # connection_errors in the case of a timeout waiting for @@ -531,18 +549,16 @@ class Connection(object): raise log_info = {} - log_info['err_str'] = str(e) + log_info['err_str'] = e log_info['max_retries'] = self.max_retries log_info.update(params) if self.max_retries and attempt == self.max_retries: - LOG.error(_('Unable to connect to AMQP server on ' - '%(hostname)s:%(port)d after %(max_retries)d ' - 'tries: %(err_str)s') % log_info) - # NOTE(comstud): Copied from original code. There's - # really no better recourse because if this was a queue we - # need to consume on, we have no way to consume anymore. - sys.exit(1) + msg = _('Unable to connect to AMQP server on ' + '%(hostname)s:%(port)d after %(max_retries)d ' + 'tries: %(err_str)s') % log_info + LOG.error(msg) + raise rpc_common.RPCException(msg) if attempt == 1: sleep_time = self.interval_start or 1 @@ -552,19 +568,19 @@ class Connection(object): sleep_time = min(sleep_time, self.interval_max) log_info['sleep_time'] = sleep_time - LOG.error(_('AMQP server on %(hostname)s:%(port)d is ' - 'unreachable: %(err_str)s. Trying again in ' - '%(sleep_time)d seconds.') % log_info) + LOG.error(_LE('AMQP server on %(hostname)s:%(port)d is ' + 'unreachable: %(err_str)s. Trying again in ' + '%(sleep_time)d seconds.') % log_info) time.sleep(sleep_time) def ensure(self, error_callback, method, *args, **kwargs): while True: try: return method(*args, **kwargs) - except (self.connection_errors, socket.timeout, IOError), e: + except (self.connection_errors, socket.timeout, IOError) as e: if error_callback: error_callback(e) - except Exception, e: + except Exception as e: # NOTE(comstud): Unfortunately it's possible for amqplib # to return an error not covered by its transport # connection_errors in the case of a timeout waiting for @@ -578,18 +594,18 @@ class Connection(object): self.reconnect() def get_channel(self): - """Convenience call for bin/clear_rabbit_queues""" + """Convenience call for bin/clear_rabbit_queues.""" return self.channel def close(self): - """Close/release this connection""" + """Close/release this connection.""" self.cancel_consumer_thread() self.wait_on_proxy_callbacks() self.connection.release() self.connection = None def reset(self): - """Reset a connection so it can be used again""" + """Reset a connection so it can be used again.""" self.cancel_consumer_thread() self.wait_on_proxy_callbacks() self.channel.close() @@ -605,37 +621,37 @@ class Connection(object): """ def _connect_error(exc): - log_info = {'topic': topic, 'err_str': str(exc)} - LOG.error(_("Failed to declare consumer for topic '%(topic)s': " + log_info = {'topic': topic, 'err_str': exc} + LOG.error(_LE("Failed to declare consumer for topic '%(topic)s': " "%(err_str)s") % log_info) def _declare_consumer(): consumer = consumer_cls(self.conf, self.channel, topic, callback, - self.consumer_num.next()) + six.next(self.consumer_num)) self.consumers.append(consumer) return consumer return self.ensure(_connect_error, _declare_consumer) def iterconsume(self, limit=None, timeout=None): - """Return an iterator that will consume from all queues/consumers""" + """Return an iterator that will consume from all queues/consumers.""" info = {'do_consume': True} def _error_callback(exc): if isinstance(exc, socket.timeout): - LOG.debug(_('Timed out waiting for RPC response: %s') % - str(exc)) + LOG.debug('Timed out waiting for RPC response: %s' % + exc) raise rpc_common.Timeout() else: - LOG.exception(_('Failed to consume message from queue: %s') % - str(exc)) + LOG.exception(_LE('Failed to consume message from queue: %s') % + exc) info['do_consume'] = True def _consume(): if info['do_consume']: - queues_head = self.consumers[:-1] - queues_tail = self.consumers[-1] + queues_head = self.consumers[:-1] # not fanout. + queues_tail = self.consumers[-1] # fanout for queue in queues_head: queue.consume(nowait=True) queues_tail.consume(nowait=False) @@ -648,7 +664,7 @@ class Connection(object): yield self.ensure(_error_callback, _consume) def cancel_consumer_thread(self): - """Cancel a consumer thread""" + """Cancel a consumer thread.""" if self.consumer_thread is not None: self.consumer_thread.kill() try: @@ -663,11 +679,11 @@ class Connection(object): proxy_cb.wait() def publisher_send(self, cls, topic, msg, timeout=None, **kwargs): - """Send to a publisher based on the publisher class""" + """Send to a publisher based on the publisher class.""" def _error_callback(exc): - log_info = {'topic': topic, 'err_str': str(exc)} - LOG.exception(_("Failed to publish message to topic " + log_info = {'topic': topic, 'err_str': exc} + LOG.exception(_LE("Failed to publish message to topic " "'%(topic)s': %(err_str)s") % log_info) def _publish(): @@ -684,45 +700,47 @@ class Connection(object): self.declare_consumer(DirectConsumer, topic, callback) def declare_topic_consumer(self, topic, callback=None, queue_name=None, - exchange_name=None): + exchange_name=None, ack_on_error=True): """Create a 'topic' consumer.""" self.declare_consumer(functools.partial(TopicConsumer, name=queue_name, exchange_name=exchange_name, + ack_on_error=ack_on_error, ), topic, callback) def declare_fanout_consumer(self, topic, callback): - """Create a 'fanout' consumer""" + """Create a 'fanout' consumer.""" self.declare_consumer(FanoutConsumer, topic, callback) def direct_send(self, msg_id, msg): - """Send a 'direct' message""" + """Send a 'direct' message.""" self.publisher_send(DirectPublisher, msg_id, msg) def topic_send(self, topic, msg, timeout=None): - """Send a 'topic' message""" + """Send a 'topic' message.""" self.publisher_send(TopicPublisher, topic, msg, timeout) def fanout_send(self, topic, msg): - """Send a 'fanout' message""" + """Send a 'fanout' message.""" self.publisher_send(FanoutPublisher, topic, msg) def notify_send(self, topic, msg, **kwargs): - """Send a notify message on a topic""" + """Send a notify message on a topic.""" self.publisher_send(NotifyPublisher, topic, msg, None, **kwargs) def consume(self, limit=None): - """Consume from all queues/consumers""" + """Consume from all queues/consumers.""" it = self.iterconsume(limit=limit) while True: try: - it.next() + six.next(it) except StopIteration: return def consume_in_thread(self): - """Consumer from all queues/consumers in a greenthread""" + """Consumer from all queues/consumers in a greenthread.""" + @excutils.forever_retry_uncaught_exceptions def _consumer_thread(): try: self.consume() @@ -733,7 +751,7 @@ class Connection(object): return self.consumer_thread def create_consumer(self, topic, proxy, fanout=False): - """Create a consumer that calls a method in a proxy object""" + """Create a consumer that calls a method in a proxy object.""" proxy_cb = rpc_amqp.ProxyCallback( self.conf, proxy, rpc_amqp.get_connection_pool(self.conf, Connection)) @@ -745,7 +763,7 @@ class Connection(object): self.declare_topic_consumer(topic, proxy_cb) def create_worker(self, topic, proxy, pool_name): - """Create a worker that calls a method in a proxy object""" + """Create a worker that calls a method in a proxy object.""" proxy_cb = rpc_amqp.ProxyCallback( self.conf, proxy, rpc_amqp.get_connection_pool(self.conf, Connection)) @@ -753,7 +771,7 @@ class Connection(object): self.declare_topic_consumer(topic, proxy_cb, pool_name) def join_consumer_pool(self, callback, pool_name, topic, - exchange_name=None): + exchange_name=None, ack_on_error=True): """Register as a member of a group of consumers for a given topic from the specified exchange. @@ -767,6 +785,7 @@ class Connection(object): callback=callback, connection_pool=rpc_amqp.get_connection_pool(self.conf, Connection), + wait_for_consumers=not ack_on_error ) self.proxy_callbacks.append(callback_wrapper) self.declare_topic_consumer( @@ -774,11 +793,12 @@ class Connection(object): topic=topic, exchange_name=exchange_name, callback=callback_wrapper, + ack_on_error=ack_on_error, ) def create_connection(conf, new=True): - """Create a connection""" + """Create a connection.""" return rpc_amqp.create_connection( conf, new, rpc_amqp.get_connection_pool(conf, Connection)) diff --git a/manila/openstack/common/rpc/impl_qpid.py b/manila/openstack/common/rpc/impl_qpid.py index 900adc0b7c..da3a355656 100644 --- a/manila/openstack/common/rpc/impl_qpid.py +++ b/manila/openstack/common/rpc/impl_qpid.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 OpenStack Foundation # Copyright 2011 - 2012, Red Hat, Inc. # @@ -18,19 +16,21 @@ import functools import itertools import time -import uuid import eventlet import greenlet from oslo.config import cfg +import six -from manila.openstack.common.gettextutils import _ +from manila.openstack.common import excutils +from manila.openstack.common.gettextutils import _, _LE, _LI from manila.openstack.common import importutils from manila.openstack.common import jsonutils from manila.openstack.common import log as logging from manila.openstack.common.rpc import amqp as rpc_amqp from manila.openstack.common.rpc import common as rpc_common +qpid_codec = importutils.try_import("qpid.codec010") qpid_messaging = importutils.try_import("qpid.messaging") qpid_exceptions = importutils.try_import("qpid.messaging.exceptions") @@ -65,15 +65,35 @@ qpid_opts = [ cfg.BoolOpt('qpid_tcp_nodelay', default=True, help='Disable Nagle algorithm'), + # NOTE(russellb) If any additional versions are added (beyond 1 and 2), + # this file could probably use some additional refactoring so that the + # differences between each version are split into different classes. + cfg.IntOpt('qpid_topology_version', + default=1, + help="The qpid topology version to use. Version 1 is what " + "was originally used by impl_qpid. Version 2 includes " + "some backwards-incompatible changes that allow broker " + "federation to work. Users should update to version 2 " + "when they are able to take everything down, as it " + "requires a clean break."), ] cfg.CONF.register_opts(qpid_opts) +JSON_CONTENT_TYPE = 'application/json; charset=utf8' + + +def raise_invalid_topology_version(conf): + msg = (_("Invalid value for qpid_topology_version: %d") % + conf.qpid_topology_version) + LOG.error(msg) + raise Exception(msg) + class ConsumerBase(object): """Consumer base class.""" - def __init__(self, session, callback, node_name, node_opts, + def __init__(self, conf, session, callback, node_name, node_opts, link_name, link_opts): """Declare a queue on an amqp session. @@ -91,55 +111,97 @@ class ConsumerBase(object): self.receiver = None self.session = None - addr_opts = { - "create": "always", - "node": { - "type": "topic", - "x-declare": { + if conf.qpid_topology_version == 1: + addr_opts = { + "create": "always", + "node": { + "type": "topic", + "x-declare": { + "durable": True, + "auto-delete": True, + }, + }, + "link": { "durable": True, - "auto-delete": True, + "x-declare": { + "durable": False, + "auto-delete": True, + "exclusive": False, + }, }, - }, - "link": { - "name": link_name, - "durable": True, - "x-declare": { - "durable": False, - "auto-delete": True, - "exclusive": False, + } + addr_opts["node"]["x-declare"].update(node_opts) + elif conf.qpid_topology_version == 2: + addr_opts = { + "link": { + "x-declare": { + "auto-delete": True, + "exclusive": False, + }, }, - }, - } - addr_opts["node"]["x-declare"].update(node_opts) + } + else: + raise_invalid_topology_version() + addr_opts["link"]["x-declare"].update(link_opts) + if link_name: + addr_opts["link"]["name"] = link_name self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts)) - self.reconnect(session) + self.connect(session) + + def connect(self, session): + """Declare the receiver on connect.""" + self._declare_receiver(session) def reconnect(self, session): - """Re-declare the receiver after a qpid reconnect""" + """Re-declare the receiver after a qpid reconnect.""" + self._declare_receiver(session) + + def _declare_receiver(self, session): self.session = session self.receiver = session.receiver(self.address) self.receiver.capacity = 1 + def _unpack_json_msg(self, msg): + """Load the JSON data in msg if msg.content_type indicates that it + is necessary. Put the loaded data back into msg.content and + update msg.content_type appropriately. + + A Qpid Message containing a dict will have a content_type of + 'amqp/map', whereas one containing a string that needs to be converted + back from JSON will have a content_type of JSON_CONTENT_TYPE. + + :param msg: a Qpid Message object + :returns: None + """ + if msg.content_type == JSON_CONTENT_TYPE: + msg.content = jsonutils.loads(msg.content) + msg.content_type = 'amqp/map' + def consume(self): - """Fetch the message and pass it to the callback object""" + """Fetch the message and pass it to the callback object.""" message = self.receiver.fetch() try: + self._unpack_json_msg(message) msg = rpc_common.deserialize_msg(message.content) self.callback(msg) except Exception: - LOG.exception(_("Failed to process message... skipping it.")) + LOG.exception(_LE("Failed to process message... skipping it.")) finally: + # TODO(sandy): Need support for optional ack_on_error. self.session.acknowledge(message) def get_receiver(self): return self.receiver + def get_node_name(self): + return self.address.split(';')[0] + class DirectConsumer(ConsumerBase): - """Queue/consumer class for 'direct'""" + """Queue/consumer class for 'direct'.""" def __init__(self, conf, session, msg_id, callback): """Init a 'direct' queue. @@ -149,15 +211,30 @@ class DirectConsumer(ConsumerBase): 'callback' is the callback to call when messages are received """ - super(DirectConsumer, self).__init__(session, callback, - "%s/%s" % (msg_id, msg_id), - {"type": "direct"}, - msg_id, - {"exclusive": True}) + link_opts = { + "auto-delete": conf.amqp_auto_delete, + "exclusive": True, + "durable": conf.amqp_durable_queues, + } + + if conf.qpid_topology_version == 1: + node_name = "%s/%s" % (msg_id, msg_id) + node_opts = {"type": "direct"} + link_name = msg_id + elif conf.qpid_topology_version == 2: + node_name = "amq.direct/%s" % msg_id + node_opts = {} + link_name = None + else: + raise_invalid_topology_version() + + super(DirectConsumer, self).__init__(conf, session, callback, + node_name, node_opts, link_name, + link_opts) class TopicConsumer(ConsumerBase): - """Consumer class for 'topic'""" + """Consumer class for 'topic'.""" def __init__(self, conf, session, topic, callback, name=None, exchange_name=None): @@ -171,13 +248,24 @@ class TopicConsumer(ConsumerBase): """ exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf) - super(TopicConsumer, self).__init__(session, callback, - "%s/%s" % (exchange_name, topic), - {}, name or topic, {}) + link_opts = { + "auto-delete": conf.amqp_auto_delete, + "durable": conf.amqp_durable_queues, + } + + if conf.qpid_topology_version == 1: + node_name = "%s/%s" % (exchange_name, topic) + elif conf.qpid_topology_version == 2: + node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic) + else: + raise_invalid_topology_version() + + super(TopicConsumer, self).__init__(conf, session, callback, node_name, + {}, name or topic, link_opts) class FanoutConsumer(ConsumerBase): - """Consumer class for 'fanout'""" + """Consumer class for 'fanout'.""" def __init__(self, conf, session, topic, callback): """Init a 'fanout' queue. @@ -186,90 +274,165 @@ class FanoutConsumer(ConsumerBase): 'topic' is the topic to listen on 'callback' is the callback to call when messages are received """ + self.conf = conf - super(FanoutConsumer, self).__init__( - session, callback, - "%s_fanout" % topic, - {"durable": False, "type": "fanout"}, - "%s_fanout_%s" % (topic, uuid.uuid4().hex), - {"exclusive": True}) + link_opts = {"exclusive": True} + + if conf.qpid_topology_version == 1: + node_name = "%s_fanout" % topic + node_opts = {"durable": False, "type": "fanout"} + elif conf.qpid_topology_version == 2: + node_name = "amq.topic/fanout/%s" % topic + node_opts = {} + else: + raise_invalid_topology_version() + + super(FanoutConsumer, self).__init__(conf, session, callback, + node_name, node_opts, None, + link_opts) class Publisher(object): - """Base Publisher class""" + """Base Publisher class.""" - def __init__(self, session, node_name, node_opts=None): + def __init__(self, conf, session, node_name, node_opts=None): """Init the Publisher class with the exchange_name, routing_key, and other options """ self.sender = None self.session = session - addr_opts = { - "create": "always", - "node": { - "type": "topic", - "x-declare": { - "durable": False, - # auto-delete isn't implemented for exchanges in qpid, - # but put in here anyway - "auto-delete": True, + if conf.qpid_topology_version == 1: + addr_opts = { + "create": "always", + "node": { + "type": "topic", + "x-declare": { + "durable": False, + # auto-delete isn't implemented for exchanges in qpid, + # but put in here anyway + "auto-delete": True, + }, }, - }, - } - if node_opts: - addr_opts["node"]["x-declare"].update(node_opts) + } + if node_opts: + addr_opts["node"]["x-declare"].update(node_opts) - self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts)) + self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts)) + elif conf.qpid_topology_version == 2: + self.address = node_name + else: + raise_invalid_topology_version() self.reconnect(session) def reconnect(self, session): - """Re-establish the Sender after a reconnection""" + """Re-establish the Sender after a reconnection.""" self.sender = session.sender(self.address) + def _pack_json_msg(self, msg): + """Qpid cannot serialize dicts containing strings longer than 65535 + characters. This function dumps the message content to a JSON + string, which Qpid is able to handle. + + :param msg: May be either a Qpid Message object or a bare dict. + :returns: A Qpid Message with its content field JSON encoded. + """ + try: + msg.content = jsonutils.dumps(msg.content) + except AttributeError: + # Need to have a Qpid message so we can set the content_type. + msg = qpid_messaging.Message(jsonutils.dumps(msg)) + msg.content_type = JSON_CONTENT_TYPE + return msg + def send(self, msg): - """Send a message""" + """Send a message.""" + try: + # Check if Qpid can encode the message + check_msg = msg + if not hasattr(check_msg, 'content_type'): + check_msg = qpid_messaging.Message(msg) + content_type = check_msg.content_type + enc, dec = qpid_messaging.message.get_codec(content_type) + enc(check_msg.content) + except qpid_codec.CodecException: + # This means the message couldn't be serialized as a dict. + msg = self._pack_json_msg(msg) self.sender.send(msg) class DirectPublisher(Publisher): - """Publisher class for 'direct'""" + """Publisher class for 'direct'.""" def __init__(self, conf, session, msg_id): """Init a 'direct' publisher.""" - super(DirectPublisher, self).__init__(session, msg_id, - {"type": "Direct"}) + + if conf.qpid_topology_version == 1: + node_name = msg_id + node_opts = {"type": "direct"} + elif conf.qpid_topology_version == 2: + node_name = "amq.direct/%s" % msg_id + node_opts = {} + else: + raise_invalid_topology_version() + + super(DirectPublisher, self).__init__(conf, session, node_name, + node_opts) class TopicPublisher(Publisher): - """Publisher class for 'topic'""" + """Publisher class for 'topic'.""" def __init__(self, conf, session, topic): - """init a 'topic' publisher. + """Init a 'topic' publisher. """ exchange_name = rpc_amqp.get_control_exchange(conf) - super(TopicPublisher, self).__init__(session, - "%s/%s" % (exchange_name, topic)) + + if conf.qpid_topology_version == 1: + node_name = "%s/%s" % (exchange_name, topic) + elif conf.qpid_topology_version == 2: + node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic) + else: + raise_invalid_topology_version() + + super(TopicPublisher, self).__init__(conf, session, node_name) class FanoutPublisher(Publisher): - """Publisher class for 'fanout'""" + """Publisher class for 'fanout'.""" def __init__(self, conf, session, topic): - """init a 'fanout' publisher. + """Init a 'fanout' publisher. """ - super(FanoutPublisher, self).__init__( - session, - "%s_fanout" % topic, {"type": "fanout"}) + + if conf.qpid_topology_version == 1: + node_name = "%s_fanout" % topic + node_opts = {"type": "fanout"} + elif conf.qpid_topology_version == 2: + node_name = "amq.topic/fanout/%s" % topic + node_opts = {} + else: + raise_invalid_topology_version() + + super(FanoutPublisher, self).__init__(conf, session, node_name, + node_opts) class NotifyPublisher(Publisher): - """Publisher class for notifications""" + """Publisher class for notifications.""" def __init__(self, conf, session, topic): - """init a 'topic' publisher. + """Init a 'topic' publisher. """ exchange_name = rpc_amqp.get_control_exchange(conf) - super(NotifyPublisher, self).__init__(session, - "%s/%s" % (exchange_name, topic), - {"durable": True}) + node_opts = {"durable": True} + + if conf.qpid_topology_version == 1: + node_name = "%s/%s" % (exchange_name, topic) + elif conf.qpid_topology_version == 2: + node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic) + else: + raise_invalid_topology_version() + + super(NotifyPublisher, self).__init__(conf, session, node_name, + node_opts) class Connection(object): @@ -304,6 +467,10 @@ class Connection(object): self.brokers = params['qpid_hosts'] self.username = params['username'] self.password = params['password'] + + brokers_count = len(self.brokers) + self.next_broker_indices = itertools.cycle(range(brokers_count)) + self.connection_create(self.brokers[0]) self.reconnect() @@ -330,31 +497,30 @@ class Connection(object): return self.consumers[str(receiver)] def reconnect(self): - """Handles reconnecting and re-establishing sessions and queues""" - if self.connection.opened(): - try: - self.connection.close() - except qpid_exceptions.ConnectionError: - pass - - attempt = 0 + """Handles reconnecting and re-establishing sessions and queues.""" delay = 1 while True: - broker = self.brokers[attempt % len(self.brokers)] - attempt += 1 + # Close the session if necessary + if self.connection.opened(): + try: + self.connection.close() + except qpid_exceptions.MessagingError: + pass + + broker = self.brokers[next(self.next_broker_indices)] try: self.connection_create(broker) self.connection.open() - except qpid_exceptions.ConnectionError, e: + except qpid_exceptions.MessagingError as e: msg_dict = dict(e=e, delay=delay) - msg = _("Unable to connect to AMQP server: %(e)s. " - "Sleeping %(delay)s seconds") % msg_dict + msg = _LE("Unable to connect to AMQP server: %(e)s. " + "Sleeping %(delay)s seconds") % msg_dict LOG.error(msg) time.sleep(delay) - delay = min(2 * delay, 60) + delay = min(delay + 1, 5) else: - LOG.info(_('Connected to AMQP server on %s'), broker) + LOG.info(_LI('Connected to AMQP server on %s'), broker) break self.session = self.connection.session() @@ -363,31 +529,37 @@ class Connection(object): consumers = self.consumers self.consumers = {} - for consumer in consumers.itervalues(): + for consumer in six.itervalues(consumers): consumer.reconnect(self.session) self._register_consumer(consumer) - LOG.debug(_("Re-established AMQP queues")) + LOG.debug("Re-established AMQP queues") def ensure(self, error_callback, method, *args, **kwargs): while True: try: return method(*args, **kwargs) except (qpid_exceptions.Empty, - qpid_exceptions.ConnectionError), e: + qpid_exceptions.MessagingError) as e: if error_callback: error_callback(e) self.reconnect() def close(self): - """Close/release this connection""" + """Close/release this connection.""" self.cancel_consumer_thread() self.wait_on_proxy_callbacks() - self.connection.close() + try: + self.connection.close() + except Exception: + # NOTE(dripton) Logging exceptions that happen during cleanup just + # causes confusion; there's really nothing useful we can do with + # them. + pass self.connection = None def reset(self): - """Reset a connection so it can be used again""" + """Reset a connection so it can be used again.""" self.cancel_consumer_thread() self.wait_on_proxy_callbacks() self.session.close() @@ -399,8 +571,8 @@ class Connection(object): add it to our list of consumers """ def _connect_error(exc): - log_info = {'topic': topic, 'err_str': str(exc)} - LOG.error(_("Failed to declare consumer for topic '%(topic)s': " + log_info = {'topic': topic, 'err_str': exc} + LOG.error(_LE("Failed to declare consumer for topic '%(topic)s': " "%(err_str)s") % log_info) def _declare_consumer(): @@ -411,23 +583,23 @@ class Connection(object): return self.ensure(_connect_error, _declare_consumer) def iterconsume(self, limit=None, timeout=None): - """Return an iterator that will consume from all queues/consumers""" + """Return an iterator that will consume from all queues/consumers.""" def _error_callback(exc): if isinstance(exc, qpid_exceptions.Empty): - LOG.debug(_('Timed out waiting for RPC response: %s') % - str(exc)) + LOG.debug('Timed out waiting for RPC response: %s' % + exc) raise rpc_common.Timeout() else: - LOG.exception(_('Failed to consume message from queue: %s') % - str(exc)) + LOG.exception(_LE('Failed to consume message from queue: %s') % + exc) def _consume(): nxt_receiver = self.session.next_receiver(timeout=timeout) try: self._lookup_consumer(nxt_receiver).consume() except Exception: - LOG.exception(_("Error processing message. Skipping it.")) + LOG.exception(_LE("Error processing message. Skipping it.")) for iteration in itertools.count(0): if limit and iteration >= limit: @@ -435,7 +607,7 @@ class Connection(object): yield self.ensure(_error_callback, _consume) def cancel_consumer_thread(self): - """Cancel a consumer thread""" + """Cancel a consumer thread.""" if self.consumer_thread is not None: self.consumer_thread.kill() try: @@ -450,11 +622,11 @@ class Connection(object): proxy_cb.wait() def publisher_send(self, cls, topic, msg): - """Send to a publisher based on the publisher class""" + """Send to a publisher based on the publisher class.""" def _connect_error(exc): - log_info = {'topic': topic, 'err_str': str(exc)} - LOG.exception(_("Failed to publish message to topic " + log_info = {'topic': topic, 'err_str': exc} + LOG.exception(_LE("Failed to publish message to topic " "'%(topic)s': %(err_str)s") % log_info) def _publisher_send(): @@ -480,15 +652,15 @@ class Connection(object): topic, callback) def declare_fanout_consumer(self, topic, callback): - """Create a 'fanout' consumer""" + """Create a 'fanout' consumer.""" self.declare_consumer(FanoutConsumer, topic, callback) def direct_send(self, msg_id, msg): - """Send a 'direct' message""" + """Send a 'direct' message.""" self.publisher_send(DirectPublisher, msg_id, msg) def topic_send(self, topic, msg, timeout=None): - """Send a 'topic' message""" + """Send a 'topic' message.""" # # We want to create a message with attributes, e.g. a TTL. We # don't really need to keep 'msg' in its JSON format any longer @@ -503,24 +675,25 @@ class Connection(object): self.publisher_send(TopicPublisher, topic, qpid_message) def fanout_send(self, topic, msg): - """Send a 'fanout' message""" + """Send a 'fanout' message.""" self.publisher_send(FanoutPublisher, topic, msg) def notify_send(self, topic, msg, **kwargs): - """Send a notify message on a topic""" + """Send a notify message on a topic.""" self.publisher_send(NotifyPublisher, topic, msg) def consume(self, limit=None): - """Consume from all queues/consumers""" + """Consume from all queues/consumers.""" it = self.iterconsume(limit=limit) while True: try: - it.next() + six.next(it) except StopIteration: return def consume_in_thread(self): - """Consumer from all queues/consumers in a greenthread""" + """Consumer from all queues/consumers in a greenthread.""" + @excutils.forever_retry_uncaught_exceptions def _consumer_thread(): try: self.consume() @@ -531,7 +704,7 @@ class Connection(object): return self.consumer_thread def create_consumer(self, topic, proxy, fanout=False): - """Create a consumer that calls a method in a proxy object""" + """Create a consumer that calls a method in a proxy object.""" proxy_cb = rpc_amqp.ProxyCallback( self.conf, proxy, rpc_amqp.get_connection_pool(self.conf, Connection)) @@ -547,7 +720,7 @@ class Connection(object): return consumer def create_worker(self, topic, proxy, pool_name): - """Create a worker that calls a method in a proxy object""" + """Create a worker that calls a method in a proxy object.""" proxy_cb = rpc_amqp.ProxyCallback( self.conf, proxy, rpc_amqp.get_connection_pool(self.conf, Connection)) @@ -561,7 +734,7 @@ class Connection(object): return consumer def join_consumer_pool(self, callback, pool_name, topic, - exchange_name=None): + exchange_name=None, ack_on_error=True): """Register as a member of a group of consumers for a given topic from the specified exchange. @@ -575,6 +748,7 @@ class Connection(object): callback=callback, connection_pool=rpc_amqp.get_connection_pool(self.conf, Connection), + wait_for_consumers=not ack_on_error ) self.proxy_callbacks.append(callback_wrapper) @@ -590,7 +764,7 @@ class Connection(object): def create_connection(conf, new=True): - """Create a connection""" + """Create a connection.""" return rpc_amqp.create_connection( conf, new, rpc_amqp.get_connection_pool(conf, Connection)) diff --git a/manila/openstack/common/rpc/impl_zmq.py b/manila/openstack/common/rpc/impl_zmq.py index d387764cc5..6146796d48 100644 --- a/manila/openstack/common/rpc/impl_zmq.py +++ b/manila/openstack/common/rpc/impl_zmq.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 Cloudscaling Group, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -25,12 +23,13 @@ import uuid import eventlet import greenlet from oslo.config import cfg +import six +from six import moves from manila.openstack.common import excutils -from manila.openstack.common.gettextutils import _ +from manila.openstack.common.gettextutils import _, _LE, _LI from manila.openstack.common import importutils from manila.openstack.common import jsonutils -from manila.openstack.common import processutils as utils from manila.openstack.common.rpc import common as rpc_common zmq = importutils.try_import('eventlet.green.zmq') @@ -64,7 +63,7 @@ zmq_opts = [ cfg.IntOpt('rpc_zmq_contexts', default=1, help='Number of ZeroMQ contexts, defaults to 1'), - cfg.IntOpt('rpc_zmq_topic_backlog', default=None, + cfg.IntOpt('rpc_zmq_topic_backlog', help='Maximum number of ingress messages to locally buffer ' 'per topic. Default is unlimited.'), @@ -81,12 +80,12 @@ CONF = cfg.CONF CONF.register_opts(zmq_opts) ZMQ_CTX = None # ZeroMQ Context, must be global. -matchmaker = None # memoized matchmaker object +matchmaker = None # memorized matchmaker object def _serialize(data): - """ - Serialization wrapper + """Serialization wrapper. + We prefer using JSON, but it cannot encode all types. Error if a developer passes us bad data. """ @@ -94,22 +93,19 @@ def _serialize(data): return jsonutils.dumps(data, ensure_ascii=True) except TypeError: with excutils.save_and_reraise_exception(): - LOG.error(_("JSON serialization failed.")) + LOG.error(_LE("JSON serialization failed.")) def _deserialize(data): - """ - Deserialization wrapper - """ - LOG.debug(_("Deserializing: %s"), data) + """Deserialization wrapper.""" + LOG.debug("Deserializing: %s", data) return jsonutils.loads(data) class ZmqSocket(object): - """ - A tiny wrapper around ZeroMQ to simplify the send/recv protocol - and connection management. + """A tiny wrapper around ZeroMQ. + Simplifies the send/recv protocol and connection management. Can be used as a Context (supports the 'with' statement). """ @@ -137,9 +133,9 @@ class ZmqSocket(object): str_data = {'addr': addr, 'type': self.socket_s(), 'subscribe': subscribe, 'bind': bind} - LOG.debug(_("Connecting to %(addr)s with %(type)s"), str_data) - LOG.debug(_("-> Subscribed to %(subscribe)s"), str_data) - LOG.debug(_("-> bind: %(bind)s"), str_data) + LOG.debug("Connecting to %(addr)s with %(type)s", str_data) + LOG.debug("-> Subscribed to %(subscribe)s", str_data) + LOG.debug("-> bind: %(bind)s", str_data) try: if bind: @@ -159,7 +155,7 @@ class ZmqSocket(object): """Subscribe.""" if not self.can_sub: raise RPCException("Cannot subscribe on this socket.") - LOG.debug(_("Subscribing to %s"), msg_filter) + LOG.debug("Subscribing to %s", msg_filter) try: self.sock.setsockopt(zmq.SUBSCRIBE, msg_filter) @@ -180,7 +176,7 @@ class ZmqSocket(object): return # We must unsubscribe, or we'll leak descriptors. - if len(self.subscriptions) > 0: + if self.subscriptions: for f in self.subscriptions: try: self.sock.setsockopt(zmq.UNSUBSCRIBE, f) @@ -196,29 +192,27 @@ class ZmqSocket(object): # it would be much worse if some of the code calling this # were to fail. For now, lets log, and later evaluate # if we can safely raise here. - LOG.error("ZeroMQ socket could not be closed.") + LOG.error(_LE("ZeroMQ socket could not be closed.")) self.sock = None - def recv(self): + def recv(self, **kwargs): if not self.can_recv: raise RPCException(_("You cannot recv on this socket.")) - return self.sock.recv_multipart() + return self.sock.recv_multipart(**kwargs) - def send(self, data): + def send(self, data, **kwargs): if not self.can_send: raise RPCException(_("You cannot send on this socket.")) - self.sock.send_multipart(data) + self.sock.send_multipart(data, **kwargs) class ZmqClient(object): """Client for ZMQ sockets.""" - def __init__(self, addr, socket_type=None, bind=False): - if socket_type is None: - socket_type = zmq.PUSH - self.outq = ZmqSocket(addr, socket_type, bind=bind) + def __init__(self, addr): + self.outq = ZmqSocket(addr, zmq.PUSH, bind=False) - def cast(self, msg_id, topic, data, envelope=False): + def cast(self, msg_id, topic, data, envelope): msg_id = msg_id or 0 if not envelope: @@ -227,7 +221,7 @@ class ZmqClient(object): return rpc_envelope = rpc_common.serialize_msg(data[1], envelope) - zmq_msg = reduce(lambda x, y: x + y, rpc_envelope.items()) + zmq_msg = moves.reduce(lambda x, y: x + y, rpc_envelope.items()) self.outq.send(map(bytes, (msg_id, topic, 'impl_zmq_v2', data[0]) + zmq_msg)) @@ -270,7 +264,7 @@ class InternalContext(object): def _get_response(self, ctx, proxy, topic, data): """Process a curried message and cast the result to topic.""" - LOG.debug(_("Running func with context: %s"), ctx.to_dict()) + LOG.debug("Running func with context: %s", ctx.to_dict()) data.setdefault('version', None) data.setdefault('args', {}) @@ -282,14 +276,14 @@ class InternalContext(object): except greenlet.GreenletExit: # ignore these since they are just from shutdowns pass - except rpc_common.ClientException, e: - LOG.debug(_("Expected exception during message handling (%s)") % + except rpc_common.ClientException as e: + LOG.debug("Expected exception during message handling (%s)" % e._exc_info[1]) return {'exc': rpc_common.serialize_remote_exception(e._exc_info, log_failure=False)} except Exception: - LOG.error(_("Exception during message handling")) + LOG.error(_LE("Exception during message handling")) return {'exc': rpc_common.serialize_remote_exception(sys.exc_info())} @@ -308,7 +302,7 @@ class InternalContext(object): self._get_response(ctx, proxy, topic, payload), ctx.replies) - LOG.debug(_("Sending reply")) + LOG.debug("Sending reply") _multi_send(_cast, ctx, topic, { 'method': '-process_reply', 'args': { @@ -342,7 +336,7 @@ class ConsumerBase(object): # processed internally. (non-valid method name) method = data.get('method') if not method: - LOG.error(_("RPC message did not include method.")) + LOG.error(_LE("RPC message did not include method.")) return # Internal method @@ -356,16 +350,14 @@ class ConsumerBase(object): class ZmqBaseReactor(ConsumerBase): - """ - A consumer class implementing a - centralized casting broker (PULL-PUSH) - for RoundRobin requests. + """A consumer class implementing a centralized casting broker (PULL-PUSH). + + Used for RoundRobin requests. """ def __init__(self, conf): super(ZmqBaseReactor, self).__init__() - self.mapping = {} self.proxies = {} self.threads = [] self.sockets = [] @@ -373,11 +365,10 @@ class ZmqBaseReactor(ConsumerBase): self.pool = eventlet.greenpool.GreenPool(conf.rpc_thread_pool_size) - def register(self, proxy, in_addr, zmq_type_in, out_addr=None, - zmq_type_out=None, in_bind=True, out_bind=True, - subscribe=None): + def register(self, proxy, in_addr, zmq_type_in, + in_bind=True, subscribe=None): - LOG.info(_("Registering reactor")) + LOG.info(_LI("Registering reactor")) if zmq_type_in not in (zmq.PULL, zmq.SUB): raise RPCException("Bad input socktype") @@ -389,26 +380,12 @@ class ZmqBaseReactor(ConsumerBase): self.proxies[inq] = proxy self.sockets.append(inq) - LOG.info(_("In reactor registered")) - - if not out_addr: - return - - if zmq_type_out not in (zmq.PUSH, zmq.PUB): - raise RPCException("Bad output socktype") - - # Items push out. - outq = ZmqSocket(out_addr, zmq_type_out, bind=out_bind) - - self.mapping[inq] = outq - self.mapping[outq] = inq - self.sockets.append(outq) - - LOG.info(_("Out reactor registered")) + LOG.info(_LI("In reactor registered")) def consume_in_thread(self): + @excutils.forever_retry_uncaught_exceptions def _consume(sock): - LOG.info(_("Consuming socket")) + LOG.info(_LI("Consuming socket")) while True: self.consume(sock) @@ -430,10 +407,9 @@ class ZmqBaseReactor(ConsumerBase): class ZmqProxy(ZmqBaseReactor): - """ - A consumer class implementing a - topic-based proxy, forwarding to - IPC sockets. + """A consumer class implementing a topic-based proxy. + + Forwards to IPC sockets. """ def __init__(self, conf): @@ -446,11 +422,8 @@ class ZmqProxy(ZmqBaseReactor): def consume(self, sock): ipc_dir = CONF.rpc_zmq_ipc_dir - #TODO(ewindisch): use zero-copy (i.e. references, not copying) - data = sock.recv() - topic = data[1] - - LOG.debug(_("CONSUMER GOT %s"), ' '.join(map(pformat, data))) + data = sock.recv(copy=False) + topic = data[1].bytes if topic.startswith('fanout~'): sock_type = zmq.PUB @@ -462,7 +435,7 @@ class ZmqProxy(ZmqBaseReactor): if topic not in self.topic_proxy: def publisher(waiter): - LOG.info(_("Creating proxy for topic: %s"), topic) + LOG.info(_LI("Creating proxy for topic: %s"), topic) try: # The topic is received over the network, @@ -492,9 +465,7 @@ class ZmqProxy(ZmqBaseReactor): while(True): data = self.topic_proxy[topic].get() - out_sock.send(data) - LOG.debug(_("ROUTER RELAY-OUT SUCCEEDED %(data)s") % - {'data': data}) + out_sock.send(data, copy=False) wait_sock_creation = eventlet.event.Event() eventlet.spawn(publisher, wait_sock_creation) @@ -502,69 +473,66 @@ class ZmqProxy(ZmqBaseReactor): try: wait_sock_creation.wait() except RPCException: - LOG.error(_("Topic socket file creation failed.")) + LOG.error(_LE("Topic socket file creation failed.")) return try: self.topic_proxy[topic].put_nowait(data) - LOG.debug(_("ROUTER RELAY-OUT QUEUED %(data)s") % - {'data': data}) except eventlet.queue.Full: - LOG.error(_("Local per-topic backlog buffer full for topic " - "%(topic)s. Dropping message.") % {'topic': topic}) + LOG.error(_LE("Local per-topic backlog buffer full for topic " + "%(topic)s. Dropping message.") % {'topic': topic}) def consume_in_thread(self): - """Runs the ZmqProxy service""" + """Runs the ZmqProxy service.""" ipc_dir = CONF.rpc_zmq_ipc_dir consume_in = "tcp://%s:%s" % \ (CONF.rpc_zmq_bind_address, CONF.rpc_zmq_port) consumption_proxy = InternalContext(None) - if not os.path.isdir(ipc_dir): - try: - utils.execute('mkdir', '-p', ipc_dir, run_as_root=True) - utils.execute('chown', "%s:%s" % (os.getuid(), os.getgid()), - ipc_dir, run_as_root=True) - utils.execute('chmod', '750', ipc_dir, run_as_root=True) - except utils.ProcessExecutionError: + try: + os.makedirs(ipc_dir) + except os.error: + if not os.path.isdir(ipc_dir): with excutils.save_and_reraise_exception(): - LOG.error(_("Could not create IPC directory %s") % - (ipc_dir, )) - + LOG.error(_LE("Required IPC directory does not exist at" + " %s") % (ipc_dir, )) try: self.register(consumption_proxy, consume_in, - zmq.PULL, - out_bind=True) + zmq.PULL) except zmq.ZMQError: + if os.access(ipc_dir, os.X_OK): + with excutils.save_and_reraise_exception(): + LOG.error(_LE("Permission denied to IPC directory at" + " %s") % (ipc_dir, )) with excutils.save_and_reraise_exception(): - LOG.error(_("Could not create ZeroMQ receiver daemon. " - "Socket may already be in use.")) + LOG.error(_LE("Could not create ZeroMQ receiver daemon. " + "Socket may already be in use.")) super(ZmqProxy, self).consume_in_thread() def unflatten_envelope(packenv): """Unflattens the RPC envelope. - Takes a list and returns a dictionary. - i.e. [1,2,3,4] => {1: 2, 3: 4} + + Takes a list and returns a dictionary. + i.e. [1,2,3,4] => {1: 2, 3: 4} """ i = iter(packenv) h = {} try: while True: - k = i.next() - h[k] = i.next() + k = six.next(i) + h[k] = six.next(i) except StopIteration: return h class ZmqReactor(ZmqBaseReactor): - """ - A consumer class implementing a - consumer for messages. Can also be - used as a 1:1 proxy + """A consumer class implementing a consumer for messages. + + Can also be used as a 1:1 proxy """ def __init__(self, conf): @@ -573,12 +541,7 @@ class ZmqReactor(ZmqBaseReactor): def consume(self, sock): #TODO(ewindisch): use zero-copy (i.e. references, not copying) data = sock.recv() - LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data) - if sock in self.mapping: - LOG.debug(_("ROUTER RELAY-OUT %(data)s") % { - 'data': data}) - self.mapping[sock].send(data) - return + LOG.debug("CONSUMER RECEIVED DATA: %s", data) proxy = self.proxies[sock] @@ -597,7 +560,7 @@ class ZmqReactor(ZmqBaseReactor): # Unmarshal only after verifying the message. ctx = RpcContext.unmarshal(data[3]) else: - LOG.error(_("ZMQ Envelope version unsupported or unknown.")) + LOG.error(_LE("ZMQ Envelope version unsupported or unknown.")) return self.pool.spawn_n(self.process, proxy, ctx, request) @@ -625,14 +588,14 @@ class Connection(rpc_common.Connection): topic = '.'.join((topic.split('.', 1)[0], CONF.rpc_zmq_host)) if topic in self.topics: - LOG.info(_("Skipping topic registration. Already registered.")) + LOG.info(_LI("Skipping topic registration. Already registered.")) return # Receive messages from (local) proxy inaddr = "ipc://%s/zmq_topic_%s" % \ (CONF.rpc_zmq_ipc_dir, topic) - LOG.debug(_("Consumer is a zmq.%s"), + LOG.debug("Consumer is a zmq.%s", ['PULL', 'SUB'][sock_type == zmq.SUB]) self.reactor.register(proxy, inaddr, sock_type, @@ -684,7 +647,7 @@ def _call(addr, context, topic, msg, timeout=None, # Replies always come into the reply service. reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host - LOG.debug(_("Creating payload")) + LOG.debug("Creating payload") # Curry the original request into a reply method. mcontext = RpcContext.marshal(context) payload = { @@ -697,7 +660,7 @@ def _call(addr, context, topic, msg, timeout=None, } } - LOG.debug(_("Creating queue socket for reply waiter")) + LOG.debug("Creating queue socket for reply waiter") # Messages arriving async. # TODO(ewindisch): have reply consumer with dynamic subscription mgmt @@ -710,14 +673,14 @@ def _call(addr, context, topic, msg, timeout=None, zmq.SUB, subscribe=msg_id, bind=False ) - LOG.debug(_("Sending cast")) + LOG.debug("Sending cast") _cast(addr, context, topic, payload, envelope) - LOG.debug(_("Cast sent; Waiting reply")) + LOG.debug("Cast sent; Waiting reply") # Blocks until receives reply msg = msg_waiter.recv() - LOG.debug(_("Received message: %s"), msg) - LOG.debug(_("Unpacking response")) + LOG.debug("Received message: %s", msg) + LOG.debug("Unpacking response") if msg[2] == 'cast': # Legacy version raw_msg = _deserialize(msg[-1])[-1] @@ -751,19 +714,18 @@ def _call(addr, context, topic, msg, timeout=None, def _multi_send(method, context, topic, msg, timeout=None, envelope=False, _msg_id=None): - """ - Wraps the sending of messages, - dispatches to the matchmaker and sends - message to all relevant hosts. + """Wraps the sending of messages. + + Dispatches to the matchmaker and sends message to all relevant hosts. """ conf = CONF - LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))}) + LOG.debug("%(msg)s" % {'msg': ' '.join(map(pformat, (topic, msg)))}) queues = _get_matchmaker().queues(topic) - LOG.debug(_("Sending message(s) to: %s"), queues) + LOG.debug("Sending message(s) to: %s", queues) # Don't stack if we have no matchmaker results - if len(queues) == 0: + if not queues: LOG.warn(_("No matchmaker results. Not casting.")) # While not strictly a timeout, callers know how to handle # this exception and a timeout isn't too big a lie. @@ -811,8 +773,8 @@ def fanout_cast(conf, context, topic, msg, **kwargs): def notify(conf, context, topic, msg, envelope): - """ - Send notification event. + """Send notification event. + Notifications are sent to topic-priority. This differs from the AMQP drivers which send to topic.priority. """ @@ -846,6 +808,11 @@ def _get_ctxt(): def _get_matchmaker(*args, **kwargs): global matchmaker if not matchmaker: - matchmaker = importutils.import_object( - CONF.rpc_zmq_matchmaker, *args, **kwargs) + mm = CONF.rpc_zmq_matchmaker + if mm.endswith('matchmaker.MatchMakerRing'): + mm.replace('matchmaker', 'matchmaker_ring') + LOG.warn(_('rpc_zmq_matchmaker = %(orig)s is deprecated; use' + ' %(new)s instead') % dict( + orig=CONF.rpc_zmq_matchmaker, new=mm)) + matchmaker = importutils.import_object(mm, *args, **kwargs) return matchmaker diff --git a/manila/openstack/common/rpc/matchmaker.py b/manila/openstack/common/rpc/matchmaker.py index 67f2635be8..47caced22b 100644 --- a/manila/openstack/common/rpc/matchmaker.py +++ b/manila/openstack/common/rpc/matchmaker.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 Cloudscaling Group, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -13,27 +11,22 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. + """ The MatchMaker classes should except a Topic or Fanout exchange key and return keys for direct exchanges, per (approximate) AMQP parlance. """ import contextlib -import itertools -import json import eventlet from oslo.config import cfg -from manila.openstack.common.gettextutils import _ +from manila.openstack.common.gettextutils import _, _LI from manila.openstack.common import log as logging matchmaker_opts = [ - # Matchmaker ring file - cfg.StrOpt('matchmaker_ringfile', - default='/etc/nova/matchmaker_ring.json', - help='Matchmaker ring file (JSON)'), cfg.IntOpt('matchmaker_heartbeat_freq', default=300, help='Heartbeat frequency'), @@ -54,8 +47,8 @@ class MatchMakerException(Exception): class Exchange(object): - """ - Implements lookups. + """Implements lookups. + Subclass this to support hashtables, dns, etc. """ def __init__(self): @@ -66,9 +59,7 @@ class Exchange(object): class Binding(object): - """ - A binding on which to perform a lookup. - """ + """A binding on which to perform a lookup.""" def __init__(self): pass @@ -77,10 +68,10 @@ class Binding(object): class MatchMakerBase(object): - """ - Match Maker Base Class. - Build off HeartbeatMatchMakerBase if building a - heartbeat-capable MatchMaker. + """Match Maker Base Class. + + Build off HeartbeatMatchMakerBase if building a heartbeat-capable + MatchMaker. """ def __init__(self): # Array of tuples. Index [2] toggles negation, [3] is last-if-true @@ -90,58 +81,47 @@ class MatchMakerBase(object): 'registration or heartbeat.') def register(self, key, host): - """ - Register a host on a backend. + """Register a host on a backend. + Heartbeats, if applicable, may keepalive registration. """ pass def ack_alive(self, key, host): - """ - Acknowledge that a key.host is alive. - Used internally for updating heartbeats, - but may also be used publically to acknowledge - a system is alive (i.e. rpc message successfully - sent to host) + """Acknowledge that a key.host is alive. + + Used internally for updating heartbeats, but may also be used + publicly to acknowledge a system is alive (i.e. rpc message + successfully sent to host) """ pass def is_alive(self, topic, host): - """ - Checks if a host is alive. - """ + """Checks if a host is alive.""" pass def expire(self, topic, host): - """ - Explicitly expire a host's registration. - """ + """Explicitly expire a host's registration.""" pass def send_heartbeats(self): - """ - Send all heartbeats. + """Send all heartbeats. + Use start_heartbeat to spawn a heartbeat greenthread, which loops this method. """ pass def unregister(self, key, host): - """ - Unregister a topic. - """ + """Unregister a topic.""" pass def start_heartbeat(self): - """ - Spawn heartbeat greenthread. - """ + """Spawn heartbeat greenthread.""" pass def stop_heartbeat(self): - """ - Destroys the heartbeat greenthread. - """ + """Destroys the heartbeat greenthread.""" pass def add_binding(self, binding, rule, last=True): @@ -168,10 +148,10 @@ class MatchMakerBase(object): class HeartbeatMatchMakerBase(MatchMakerBase): - """ - Base for a heart-beat capable MatchMaker. - Provides common methods for registering, - unregistering, and maintaining heartbeats. + """Base for a heart-beat capable MatchMaker. + + Provides common methods for registering, unregistering, and maintaining + heartbeats. """ def __init__(self): self.hosts = set() @@ -181,8 +161,8 @@ class HeartbeatMatchMakerBase(MatchMakerBase): super(HeartbeatMatchMakerBase, self).__init__() def send_heartbeats(self): - """ - Send all heartbeats. + """Send all heartbeats. + Use start_heartbeat to spawn a heartbeat greenthread, which loops this method. """ @@ -190,32 +170,31 @@ class HeartbeatMatchMakerBase(MatchMakerBase): self.ack_alive(key, host) def ack_alive(self, key, host): - """ - Acknowledge that a host.topic is alive. - Used internally for updating heartbeats, - but may also be used publically to acknowledge - a system is alive (i.e. rpc message successfully - sent to host) + """Acknowledge that a host.topic is alive. + + Used internally for updating heartbeats, but may also be used + publicly to acknowledge a system is alive (i.e. rpc message + successfully sent to host) """ raise NotImplementedError("Must implement ack_alive") def backend_register(self, key, host): - """ - Implements registration logic. + """Implements registration logic. + Called by register(self,key,host) """ raise NotImplementedError("Must implement backend_register") def backend_unregister(self, key, key_host): - """ - Implements de-registration logic. + """Implements de-registration logic. + Called by unregister(self,key,host) """ raise NotImplementedError("Must implement backend_unregister") def register(self, key, host): - """ - Register a host on a backend. + """Register a host on a backend. + Heartbeats, if applicable, may keepalive registration. """ self.hosts.add(host) @@ -227,25 +206,24 @@ class HeartbeatMatchMakerBase(MatchMakerBase): self.ack_alive(key, host) def unregister(self, key, host): - """ - Unregister a topic. - """ + """Unregister a topic.""" if (key, host) in self.host_topic: del self.host_topic[(key, host)] self.hosts.discard(host) self.backend_unregister(key, '.'.join((key, host))) - LOG.info(_("Matchmaker unregistered: %s, %s" % (key, host))) + LOG.info(_LI("Matchmaker unregistered: %(key)s, %(host)s"), + {'key': key, 'host': host}) def start_heartbeat(self): - """ - Implementation of MatchMakerBase.start_heartbeat + """Implementation of MatchMakerBase.start_heartbeat. + Launches greenthread looping send_heartbeats(), yielding for CONF.matchmaker_heartbeat_freq seconds between iterations. """ - if len(self.hosts) == 0: + if not self.hosts: raise MatchMakerException( _("Register before starting heartbeat.")) @@ -257,45 +235,37 @@ class HeartbeatMatchMakerBase(MatchMakerBase): self._heart = eventlet.spawn(do_heartbeat) def stop_heartbeat(self): - """ - Destroys the heartbeat greenthread. - """ + """Destroys the heartbeat greenthread.""" if self._heart: self._heart.kill() class DirectBinding(Binding): - """ - Specifies a host in the key via a '.' character + """Specifies a host in the key via a '.' character. + Although dots are used in the key, the behavior here is that it maps directly to a host, thus direct. """ def test(self, key): - if '.' in key: - return True - return False + return '.' in key class TopicBinding(Binding): - """ - Where a 'bare' key without dots. + """Where a 'bare' key without dots. + AMQP generally considers topic exchanges to be those *with* dots, but we deviate here in terminology as the behavior here matches that of a topic exchange (whereas where there are dots, behavior matches that of a direct exchange. """ def test(self, key): - if '.' not in key: - return True - return False + return '.' not in key class FanoutBinding(Binding): """Match on fanout keys, where key starts with 'fanout.' string.""" def test(self, key): - if key.startswith('fanout~'): - return True - return False + return key.startswith('fanout~') class StubExchange(Exchange): @@ -304,67 +274,6 @@ class StubExchange(Exchange): return [(key, None)] -class RingExchange(Exchange): - """ - Match Maker where hosts are loaded from a static file containing - a hashmap (JSON formatted). - - __init__ takes optional ring dictionary argument, otherwise - loads the ringfile from CONF.mathcmaker_ringfile. - """ - def __init__(self, ring=None): - super(RingExchange, self).__init__() - - if ring: - self.ring = ring - else: - fh = open(CONF.matchmaker_ringfile, 'r') - self.ring = json.load(fh) - fh.close() - - self.ring0 = {} - for k in self.ring.keys(): - self.ring0[k] = itertools.cycle(self.ring[k]) - - def _ring_has(self, key): - if key in self.ring0: - return True - return False - - -class RoundRobinRingExchange(RingExchange): - """A Topic Exchange based on a hashmap.""" - def __init__(self, ring=None): - super(RoundRobinRingExchange, self).__init__(ring) - - def run(self, key): - if not self._ring_has(key): - LOG.warn( - _("No key defining hosts for topic '%s', " - "see ringfile") % (key, ) - ) - return [] - host = next(self.ring0[key]) - return [(key + '.' + host, host)] - - -class FanoutRingExchange(RingExchange): - """Fanout Exchange based on a hashmap.""" - def __init__(self, ring=None): - super(FanoutRingExchange, self).__init__(ring) - - def run(self, key): - # Assume starts with "fanout~", strip it for lookup. - nkey = key.split('fanout~')[1:][0] - if not self._ring_has(nkey): - LOG.warn( - _("No key defining hosts for topic '%s', " - "see ringfile") % (nkey, ) - ) - return [] - return map(lambda x: (key + '.' + x, x), self.ring[nkey]) - - class LocalhostExchange(Exchange): """Exchange where all direct topics are local.""" def __init__(self, host='localhost'): @@ -376,8 +285,8 @@ class LocalhostExchange(Exchange): class DirectExchange(Exchange): - """ - Exchange where all topic keys are split, sending to second half. + """Exchange where all topic keys are split, sending to second half. + i.e. "compute.host" sends a message to "compute.host" running on "host" """ def __init__(self): @@ -388,20 +297,9 @@ class DirectExchange(Exchange): return [(key, e)] -class MatchMakerRing(MatchMakerBase): - """ - Match Maker where hosts are loaded from a static hashmap. - """ - def __init__(self, ring=None): - super(MatchMakerRing, self).__init__() - self.add_binding(FanoutBinding(), FanoutRingExchange(ring)) - self.add_binding(DirectBinding(), DirectExchange()) - self.add_binding(TopicBinding(), RoundRobinRingExchange(ring)) - - class MatchMakerLocalhost(MatchMakerBase): - """ - Match Maker where all bare topics resolve to localhost. + """Match Maker where all bare topics resolve to localhost. + Useful for testing. """ def __init__(self, host='localhost'): @@ -412,13 +310,13 @@ class MatchMakerLocalhost(MatchMakerBase): class MatchMakerStub(MatchMakerBase): - """ - Match Maker where topics are untouched. + """Match Maker where topics are untouched. + Useful for testing, or for AMQP/brokered queues. Will not work where knowledge of hosts is known (i.e. zeromq) """ def __init__(self): - super(MatchMakerLocalhost, self).__init__() + super(MatchMakerStub, self).__init__() self.add_binding(FanoutBinding(), StubExchange()) self.add_binding(DirectBinding(), StubExchange()) diff --git a/manila/openstack/common/rpc/matchmaker_redis.py b/manila/openstack/common/rpc/matchmaker_redis.py index eb2c9934eb..99f03289d9 100644 --- a/manila/openstack/common/rpc/matchmaker_redis.py +++ b/manila/openstack/common/rpc/matchmaker_redis.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2013 Cloudscaling Group, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -13,6 +11,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. + """ The MatchMaker classes should accept a Topic or Fanout exchange key and return keys for direct exchanges, per (approximate) AMQP parlance. @@ -35,7 +34,6 @@ matchmaker_redis_opts = [ default=6379, help='Use this port to connect to redis host.'), cfg.StrOpt('password', - default=None, help='Password for Redis server. (optional)'), ] @@ -55,8 +53,8 @@ class RedisExchange(mm_common.Exchange): class RedisTopicExchange(RedisExchange): - """ - Exchange where all topic keys are split, sending to second half. + """Exchange where all topic keys are split, sending to second half. + i.e. "compute.host" sends a message to "compute" running on "host" """ def run(self, topic): @@ -77,9 +75,7 @@ class RedisTopicExchange(RedisExchange): class RedisFanoutExchange(RedisExchange): - """ - Return a list of all hosts. - """ + """Return a list of all hosts.""" def run(self, topic): topic = topic.split('~', 1)[1] hosts = self.redis.smembers(topic) @@ -90,16 +86,14 @@ class RedisFanoutExchange(RedisExchange): class MatchMakerRedis(mm_common.HeartbeatMatchMakerBase): - """ - MatchMaker registering and looking-up hosts with a Redis server. - """ + """MatchMaker registering and looking-up hosts with a Redis server.""" def __init__(self): super(MatchMakerRedis, self).__init__() if not redis: raise ImportError("Failed to import module redis.") - self.redis = redis.StrictRedis( + self.redis = redis.Redis( host=CONF.matchmaker_redis.host, port=CONF.matchmaker_redis.port, password=CONF.matchmaker_redis.password) diff --git a/manila/openstack/common/rpc/matchmaker_ring.py b/manila/openstack/common/rpc/matchmaker_ring.py new file mode 100644 index 0000000000..95e55bf40a --- /dev/null +++ b/manila/openstack/common/rpc/matchmaker_ring.py @@ -0,0 +1,106 @@ +# Copyright 2011-2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +The MatchMaker classes should except a Topic or Fanout exchange key and +return keys for direct exchanges, per (approximate) AMQP parlance. +""" + +import itertools +import json + +from oslo.config import cfg + +from manila.openstack.common.gettextutils import _LW +from manila.openstack.common import log as logging +from manila.openstack.common.rpc import matchmaker as mm + + +matchmaker_opts = [ + # Matchmaker ring file + cfg.StrOpt('ringfile', + deprecated_name='matchmaker_ringfile', + deprecated_group='DEFAULT', + default='/etc/oslo/matchmaker_ring.json', + help='Matchmaker ring file (JSON)'), +] + +CONF = cfg.CONF +CONF.register_opts(matchmaker_opts, 'matchmaker_ring') +LOG = logging.getLogger(__name__) + + +class RingExchange(mm.Exchange): + """Match Maker where hosts are loaded from a static JSON formatted file. + + __init__ takes optional ring dictionary argument, otherwise + loads the ringfile from CONF.mathcmaker_ringfile. + """ + def __init__(self, ring=None): + super(RingExchange, self).__init__() + + if ring: + self.ring = ring + else: + with open(CONF.matchmaker_ring.ringfile, 'r') as fh: + self.ring = json.load(fh) + + self.ring0 = {} + for k in self.ring.keys(): + self.ring0[k] = itertools.cycle(self.ring[k]) + + def _ring_has(self, key): + return key in self.ring0 + + +class RoundRobinRingExchange(RingExchange): + """A Topic Exchange based on a hashmap.""" + def __init__(self, ring=None): + super(RoundRobinRingExchange, self).__init__(ring) + + def run(self, key): + if not self._ring_has(key): + LOG.warn( + _LW("No key defining hosts for topic '%s', " + "see ringfile") % (key, ) + ) + return [] + host = next(self.ring0[key]) + return [(key + '.' + host, host)] + + +class FanoutRingExchange(RingExchange): + """Fanout Exchange based on a hashmap.""" + def __init__(self, ring=None): + super(FanoutRingExchange, self).__init__(ring) + + def run(self, key): + # Assume starts with "fanout~", strip it for lookup. + nkey = key.split('fanout~')[1:][0] + if not self._ring_has(nkey): + LOG.warn( + _LW("No key defining hosts for topic '%s', " + "see ringfile") % (nkey, ) + ) + return [] + return map(lambda x: (key + '.' + x, x), self.ring[nkey]) + + +class MatchMakerRing(mm.MatchMakerBase): + """Match Maker where hosts are loaded from a static hashmap.""" + def __init__(self, ring=None): + super(MatchMakerRing, self).__init__() + self.add_binding(mm.FanoutBinding(), FanoutRingExchange(ring)) + self.add_binding(mm.DirectBinding(), mm.DirectExchange()) + self.add_binding(mm.TopicBinding(), RoundRobinRingExchange(ring)) diff --git a/manila/openstack/common/rpc/proxy.py b/manila/openstack/common/rpc/proxy.py index 62e45bf5bc..8a0b7aff84 100644 --- a/manila/openstack/common/rpc/proxy.py +++ b/manila/openstack/common/rpc/proxy.py @@ -1,6 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012 Red Hat, Inc. +# Copyright 2012-2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -21,8 +19,11 @@ For more information about rpc API version numbers, see: rpc/dispatcher.py """ +import six from manila.openstack.common import rpc +from manila.openstack.common.rpc import common as rpc_common +from manila.openstack.common.rpc import serializer as rpc_serializer class RpcProxy(object): @@ -34,16 +35,28 @@ class RpcProxy(object): rpc API. """ - def __init__(self, topic, default_version): + # The default namespace, which can be overridden in a subclass. + RPC_API_NAMESPACE = None + + def __init__(self, topic, default_version, version_cap=None, + serializer=None): """Initialize an RpcProxy. :param topic: The topic to use for all messages. :param default_version: The default API version to request in all outgoing messages. This can be overridden on a per-message basis. + :param version_cap: Optionally cap the maximum version used for sent + messages. + :param serializer: Optionally (de-)serialize entities with a + provided helper. """ self.topic = topic self.default_version = default_version + self.version_cap = version_cap + if serializer is None: + serializer = rpc_serializer.NoOpSerializer() + self.serializer = serializer super(RpcProxy, self).__init__() def _set_version(self, msg, vers): @@ -52,19 +65,44 @@ class RpcProxy(object): :param msg: The message having a version added to it. :param vers: The version number to add to the message. """ - msg['version'] = vers if vers else self.default_version + v = vers if vers else self.default_version + if (self.version_cap and not + rpc_common.version_is_compatible(self.version_cap, v)): + raise rpc_common.RpcVersionCapError(version_cap=self.version_cap) + msg['version'] = v def _get_topic(self, topic): """Return the topic to use for a message.""" return topic if topic else self.topic + def can_send_version(self, version): + """Check to see if a version is compatible with the version cap.""" + return (not self.version_cap or + rpc_common.version_is_compatible(self.version_cap, version)) + @staticmethod def make_namespaced_msg(method, namespace, **kwargs): return {'method': method, 'namespace': namespace, 'args': kwargs} - @staticmethod - def make_msg(method, **kwargs): - return RpcProxy.make_namespaced_msg(method, None, **kwargs) + def make_msg(self, method, **kwargs): + return self.make_namespaced_msg(method, self.RPC_API_NAMESPACE, + **kwargs) + + def _serialize_msg_args(self, context, kwargs): + """Helper method called to serialize message arguments. + + This calls our serializer on each argument, returning a new + set of args that have been serialized. + + :param context: The request context + :param kwargs: The arguments to serialize + :returns: A new set of serialized arguments + """ + new_kwargs = dict() + for argname, arg in six.iteritems(kwargs): + new_kwargs[argname] = self.serializer.serialize_entity(context, + arg) + return new_kwargs def call(self, context, msg, topic=None, version=None, timeout=None): """rpc.call() a remote method. @@ -81,9 +119,11 @@ class RpcProxy(object): :returns: The return value from the remote method. """ self._set_version(msg, version) + msg['args'] = self._serialize_msg_args(context, msg['args']) real_topic = self._get_topic(topic) try: - return rpc.call(context, real_topic, msg, timeout) + result = rpc.call(context, real_topic, msg, timeout) + return self.serializer.deserialize_entity(context, result) except rpc.common.Timeout as exc: raise rpc.common.Timeout( exc.info, real_topic, msg.get('method')) @@ -104,9 +144,11 @@ class RpcProxy(object): from the remote method as they arrive. """ self._set_version(msg, version) + msg['args'] = self._serialize_msg_args(context, msg['args']) real_topic = self._get_topic(topic) try: - return rpc.multicall(context, real_topic, msg, timeout) + result = rpc.multicall(context, real_topic, msg, timeout) + return self.serializer.deserialize_entity(context, result) except rpc.common.Timeout as exc: raise rpc.common.Timeout( exc.info, real_topic, msg.get('method')) @@ -124,6 +166,7 @@ class RpcProxy(object): remote method. """ self._set_version(msg, version) + msg['args'] = self._serialize_msg_args(context, msg['args']) rpc.cast(context, self._get_topic(topic), msg) def fanout_cast(self, context, msg, topic=None, version=None): @@ -139,6 +182,7 @@ class RpcProxy(object): from the remote method. """ self._set_version(msg, version) + msg['args'] = self._serialize_msg_args(context, msg['args']) rpc.fanout_cast(context, self._get_topic(topic), msg) def cast_to_server(self, context, server_params, msg, topic=None, @@ -157,6 +201,7 @@ class RpcProxy(object): return values. """ self._set_version(msg, version) + msg['args'] = self._serialize_msg_args(context, msg['args']) rpc.cast_to_server(context, server_params, self._get_topic(topic), msg) def fanout_cast_to_server(self, context, server_params, msg, topic=None, @@ -175,5 +220,6 @@ class RpcProxy(object): return values. """ self._set_version(msg, version) + msg['args'] = self._serialize_msg_args(context, msg['args']) rpc.fanout_cast_to_server(context, server_params, self._get_topic(topic), msg) diff --git a/manila/openstack/common/rpc/serializer.py b/manila/openstack/common/rpc/serializer.py new file mode 100644 index 0000000000..9bc6e2a3a0 --- /dev/null +++ b/manila/openstack/common/rpc/serializer.py @@ -0,0 +1,54 @@ +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Provides the definition of an RPC serialization handler""" + +import abc + +import six + + +@six.add_metaclass(abc.ABCMeta) +class Serializer(object): + """Generic (de-)serialization definition base class.""" + + @abc.abstractmethod + def serialize_entity(self, context, entity): + """Serialize something to primitive form. + + :param context: Security context + :param entity: Entity to be serialized + :returns: Serialized form of entity + """ + pass + + @abc.abstractmethod + def deserialize_entity(self, context, entity): + """Deserialize something from primitive form. + + :param context: Security context + :param entity: Primitive to be deserialized + :returns: Deserialized form of entity + """ + pass + + +class NoOpSerializer(Serializer): + """A serializer that does nothing.""" + + def serialize_entity(self, context, entity): + return entity + + def deserialize_entity(self, context, entity): + return entity diff --git a/manila/openstack/common/rpc/service.py b/manila/openstack/common/rpc/service.py index 57429a02e1..a0be6aee6f 100644 --- a/manila/openstack/common/rpc/service.py +++ b/manila/openstack/common/rpc/service.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. @@ -17,7 +15,6 @@ # License for the specific language governing permissions and limitations # under the License. -from manila.openstack.common.gettextutils import _ from manila.openstack.common import log as logging from manila.openstack.common import rpc from manila.openstack.common.rpc import dispatcher as rpc_dispatcher @@ -30,11 +27,13 @@ LOG = logging.getLogger(__name__) class Service(service.Service): """Service object for binaries running on hosts. - A service enables rpc by listening to queues based on topic and host.""" - def __init__(self, host, topic, manager=None): + A service enables rpc by listening to queues based on topic and host. + """ + def __init__(self, host, topic, manager=None, serializer=None): super(Service, self).__init__() self.host = host self.topic = topic + self.serializer = serializer if manager is None: self.manager = self else: @@ -44,10 +43,11 @@ class Service(service.Service): super(Service, self).start() self.conn = rpc.create_connection(new=True) - LOG.debug(_("Creating Consumer connection for Service %s") % + LOG.debug("Creating Consumer connection for Service %s" % self.topic) - dispatcher = rpc_dispatcher.RpcDispatcher([self.manager]) + dispatcher = rpc_dispatcher.RpcDispatcher([self.manager], + self.serializer) # Share this same connection for these Consumers self.conn.create_consumer(self.topic, dispatcher, fanout=False) diff --git a/manila/openstack/common/rpc/zmq_receiver.py b/manila/openstack/common/rpc/zmq_receiver.py old mode 100755 new mode 100644 index 44d6760781..cad5bba6cc --- a/manila/openstack/common/rpc/zmq_receiver.py +++ b/manila/openstack/common/rpc/zmq_receiver.py @@ -1,6 +1,3 @@ -#!/usr/bin/env python -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/manila/openstack/common/scheduler/base_filter.py b/manila/openstack/common/scheduler/base_filter.py new file mode 100644 index 0000000000..98641f6de4 --- /dev/null +++ b/manila/openstack/common/scheduler/base_filter.py @@ -0,0 +1,95 @@ +# Copyright (c) 2011-2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Filter support +""" + +from manila.openstack.common.gettextutils import _LI +from manila.openstack.common import log as logging +from manila.openstack.common.scheduler import base_handler + +LOG = logging.getLogger(__name__) + + +class BaseFilter(object): + """Base class for all filter classes.""" + def _filter_one(self, obj, filter_properties): + """Return True if it passes the filter, False otherwise. + Override this in a subclass. + """ + return True + + def filter_all(self, filter_obj_list, filter_properties): + """Yield objects that pass the filter. + + Can be overridden in a subclass, if you need to base filtering + decisions on all objects. Otherwise, one can just override + _filter_one() to filter a single object. + """ + for obj in filter_obj_list: + if self._filter_one(obj, filter_properties): + yield obj + + # Set to true in a subclass if a filter only needs to be run once + # for each request rather than for each instance + run_filter_once_per_request = False + + def run_filter_for_index(self, index): + """Return True if the filter needs to be run for the "index-th" + instance in a request. Only need to override this if a filter + needs anything other than "first only" or "all" behaviour. + """ + return not (self.run_filter_once_per_request and index > 0) + + +class BaseFilterHandler(base_handler.BaseHandler): + """Base class to handle loading filter classes. + + This class should be subclassed where one needs to use filters. + """ + + def get_filtered_objects(self, filter_classes, objs, + filter_properties, index=0): + """Get objects after filter + + :param filter_classes: filters that will be used to filter the + objects + :param objs: objects that will be filtered + :param filter_properties: client filter properties + :param index: This value needs to be increased in the caller + function of get_filtered_objects when handling + each resource. + """ + list_objs = list(objs) + LOG.debug("Starting with %d host(s)", len(list_objs)) + for filter_cls in filter_classes: + cls_name = filter_cls.__name__ + filter_class = filter_cls() + + if filter_class.run_filter_for_index(index): + objs = filter_class.filter_all(list_objs, filter_properties) + if objs is None: + LOG.debug("Filter %(cls_name)s says to stop filtering", + {'cls_name': cls_name}) + return + list_objs = list(objs) + msg = (_LI("Filter %(cls_name)s returned %(obj_len)d host(s)") + % {'cls_name': cls_name, 'obj_len': len(list_objs)}) + if not list_objs: + LOG.info(msg) + break + LOG.debug(msg) + return list_objs diff --git a/manila/openstack/common/scheduler/base_handler.py b/manila/openstack/common/scheduler/base_handler.py new file mode 100644 index 0000000000..44c8eca5b8 --- /dev/null +++ b/manila/openstack/common/scheduler/base_handler.py @@ -0,0 +1,46 @@ +# Copyright (c) 2011-2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +A common base for handling extension classes. + +Used by BaseFilterHandler and BaseWeightHandler +""" + +import inspect + +from stevedore import extension + + +class BaseHandler(object): + """Base class to handle loading filter and weight classes.""" + def __init__(self, modifier_class_type, modifier_namespace): + self.namespace = modifier_namespace + self.modifier_class_type = modifier_class_type + self.extension_manager = extension.ExtensionManager(modifier_namespace) + + def _is_correct_class(self, cls): + """Return whether an object is a class of the correct type and + is not prefixed with an underscore. + """ + return (inspect.isclass(cls) and + not cls.__name__.startswith('_') and + issubclass(cls, self.modifier_class_type)) + + def get_all_classes(self): + # We use a set, as some classes may have an entrypoint of their own, + # and also be returned by a function such as 'all_filters' for example + return [ext.plugin for ext in self.extension_manager if + self._is_correct_class(ext.plugin)] diff --git a/manila/openstack/common/scheduler/base_weight.py b/manila/openstack/common/scheduler/base_weight.py new file mode 100644 index 0000000000..ecf794f623 --- /dev/null +++ b/manila/openstack/common/scheduler/base_weight.py @@ -0,0 +1,146 @@ +# Copyright (c) 2011-2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Pluggable Weighing support +""" + +import abc + +import six + +from manila.openstack.common.scheduler import base_handler + + +def normalize(weight_list, minval=None, maxval=None): + """Normalize the values in a list between 0 and 1.0. + + The normalization is made regarding the lower and upper values present in + weight_list. If the minval and/or maxval parameters are set, these values + will be used instead of the minimum and maximum from the list. + + If all the values are equal, they are normalized to 0. + """ + + if not weight_list: + return () + + if maxval is None: + maxval = max(weight_list) + + if minval is None: + minval = min(weight_list) + + maxval = float(maxval) + minval = float(minval) + + if minval == maxval: + return [0] * len(weight_list) + + range_ = maxval - minval + return ((i - minval) / range_ for i in weight_list) + + +class WeighedObject(object): + """Object with weight information.""" + def __init__(self, obj, weight): + self.obj = obj + self.weight = weight + + def __repr__(self): + return "" % (self.obj, self.weight) + + +class BaseWeigher(six.with_metaclass(abc.ABCMeta, object)): + """Base class for pluggable weighers. + + The attributes maxval and minval can be specified to set up the maximum + and minimum values for the weighed objects. These values will then be + taken into account in the normalization step, instead of taking the values + from the calculated weights. + """ + + minval = None + maxval = None + + def weight_multiplier(self): + """How weighted this weigher should be. + + Override this method in a subclass, so that the returned value is + read from a configuration option to permit operators specify a + multiplier for the weigher. + """ + return 1.0 + + @abc.abstractmethod + def _weigh_object(self, obj, weight_properties): + """Override in a subclass to specify a weight for a specific + object. + """ + + def weigh_objects(self, weighed_obj_list, weight_properties): + """Weigh multiple objects. + + Override in a subclass if you need access to all objects in order + to calculate weights. Do not modify the weight of an object here, + just return a list of weights. + """ + # Calculate the weights + weights = [] + for obj in weighed_obj_list: + weight = self._weigh_object(obj.obj, weight_properties) + + # Record the min and max values if they are None. If they anything + # but none we assume that the weigher has set them + if self.minval is None: + self.minval = weight + if self.maxval is None: + self.maxval = weight + + if weight < self.minval: + self.minval = weight + elif weight > self.maxval: + self.maxval = weight + + weights.append(weight) + + return weights + + +class BaseWeightHandler(base_handler.BaseHandler): + object_class = WeighedObject + + def get_weighed_objects(self, weigher_classes, obj_list, + weighing_properties): + """Return a sorted (descending), normalized list of WeighedObjects.""" + + if not obj_list: + return [] + + weighed_objs = [self.object_class(obj, 0.0) for obj in obj_list] + for weigher_cls in weigher_classes: + weigher = weigher_cls() + weights = weigher.weigh_objects(weighed_objs, weighing_properties) + + # Normalize the weights + weights = normalize(weights, + minval=weigher.minval, + maxval=weigher.maxval) + + for i, weight in enumerate(weights): + obj = weighed_objs[i] + obj.weight += weigher.weight_multiplier() * weight + + return sorted(weighed_objs, key=lambda x: x.weight, reverse=True) diff --git a/manila/openstack/common/scheduler/filters/__init__.py b/manila/openstack/common/scheduler/filters/__init__.py index c41ed437d9..11a5eacef6 100644 --- a/manila/openstack/common/scheduler/filters/__init__.py +++ b/manila/openstack/common/scheduler/filters/__init__.py @@ -17,13 +17,10 @@ Scheduler host filters """ -from manila.openstack.common import log as logging -from manila.openstack.common.scheduler import filter - -LOG = logging.getLogger(__name__) +from manila.openstack.common.scheduler import base_filter -class BaseHostFilter(filter.BaseFilter): +class BaseHostFilter(base_filter.BaseFilter): """Base class for host filters.""" def _filter_one(self, obj, filter_properties): """Return True if the object passes the filter, otherwise False.""" @@ -36,6 +33,6 @@ class BaseHostFilter(filter.BaseFilter): raise NotImplementedError() -class HostFilterHandler(filter.BaseFilterHandler): +class HostFilterHandler(base_filter.BaseFilterHandler): def __init__(self, namespace): super(HostFilterHandler, self).__init__(BaseHostFilter, namespace) diff --git a/manila/openstack/common/scheduler/filters/availability_zone_filter.py b/manila/openstack/common/scheduler/filters/availability_zone_filter.py index 8d88090976..b654310890 100644 --- a/manila/openstack/common/scheduler/filters/availability_zone_filter.py +++ b/manila/openstack/common/scheduler/filters/availability_zone_filter.py @@ -13,16 +13,18 @@ # License for the specific language governing permissions and limitations # under the License. - from manila.openstack.common.scheduler import filters class AvailabilityZoneFilter(filters.BaseHostFilter): """Filters Hosts by availability zone.""" + # Availability zones do not change within a request + run_filter_once_per_request = True + def host_passes(self, host_state, filter_properties): spec = filter_properties.get('request_spec', {}) - props = spec.get('resource_properties', []) + props = spec.get('resource_properties', {}) availability_zone = props.get('availability_zone') if availability_zone: diff --git a/manila/openstack/common/scheduler/filters/capabilities_filter.py b/manila/openstack/common/scheduler/filters/capabilities_filter.py index 22d4914d9a..3cc3635c4f 100644 --- a/manila/openstack/common/scheduler/filters/capabilities_filter.py +++ b/manila/openstack/common/scheduler/filters/capabilities_filter.py @@ -13,25 +13,27 @@ # License for the specific language governing permissions and limitations # under the License. +import six + from manila.openstack.common import log as logging from manila.openstack.common.scheduler import filters from manila.openstack.common.scheduler.filters import extra_specs_ops - LOG = logging.getLogger(__name__) class CapabilitiesFilter(filters.BaseHostFilter): - """HostFilter to work with resource (instance & share) type records.""" + """HostFilter to work with resource (instance & volume) type records.""" def _satisfies_extra_specs(self, capabilities, resource_type): - """Check that the capabilities provided by the services - satisfy the extra specs associated with the instance type.""" + """Check that the capabilities provided by the services satisfy + the extra specs associated with the resource type. + """ extra_specs = resource_type.get('extra_specs', []) if not extra_specs: return True - for key, req in extra_specs.iteritems(): + for key, req in six.iteritems(extra_specs): # Either not scope format, or in capabilities scope scope = key.split(':') if len(scope) > 1 and scope[0] != "capabilities": @@ -40,24 +42,29 @@ class CapabilitiesFilter(filters.BaseHostFilter): del scope[0] cap = capabilities - for index in range(0, len(scope)): + for index in range(len(scope)): try: - cap = cap.get(scope[index], None) + cap = cap.get(scope[index]) except AttributeError: return False if cap is None: return False if not extra_specs_ops.match(cap, req): + LOG.debug("extra_spec requirement '%(req)s' " + "does not match '%(cap)s'", + {'req': req, 'cap': cap}) return False return True def host_passes(self, host_state, filter_properties): - """Return a list of hosts that can create instance_type.""" - # Note(zhiteng) Currently only Manila and Nova are using + """Return a list of hosts that can create resource_type.""" + # Note(zhiteng) Currently only Cinder and Nova are using # this filter, so the resource type is either instance or # volume. resource_type = filter_properties.get('resource_type') if not self._satisfies_extra_specs(host_state.capabilities, resource_type): + LOG.debug("%(host_state)s fails resource_type extra_specs " + "requirements", {'host_state': host_state}) return False return True diff --git a/manila/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py b/manila/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py new file mode 100644 index 0000000000..3953de3535 --- /dev/null +++ b/manila/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py @@ -0,0 +1,55 @@ +# Copyright (c) 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from manila.openstack.common import log as logging +from manila.openstack.common.scheduler import filters + +LOG = logging.getLogger(__name__) + + +class IgnoreAttemptedHostsFilter(filters.BaseHostFilter): + """Filter out previously attempted hosts + + A host passes this filter if it has not already been attempted for + scheduling. The scheduler needs to add previously attempted hosts + to the 'retry' key of filter_properties in order for this to work + correctly. For example: + { + 'retry': { + 'hosts': ['host1', 'host2'], + 'num_attempts': 3, + } + } + """ + + def host_passes(self, host_state, filter_properties): + """Skip nodes that have already been attempted.""" + attempted = filter_properties.get('retry') + if not attempted: + # Re-scheduling is disabled + LOG.debug("Re-scheduling is disabled.") + return True + + hosts = attempted.get('hosts', []) + host = host_state.host + + passes = host not in hosts + pass_msg = "passes" if passes else "fails" + + LOG.debug("Host %(host)s %(pass_msg)s. Previously tried hosts: " + "%(hosts)s" % {'host': host, + 'pass_msg': pass_msg, + 'hosts': hosts}) + return passes diff --git a/manila/openstack/common/scheduler/filters/json_filter.py b/manila/openstack/common/scheduler/filters/json_filter.py index 9d5d1b2882..5973682999 100644 --- a/manila/openstack/common/scheduler/filters/json_filter.py +++ b/manila/openstack/common/scheduler/filters/json_filter.py @@ -13,9 +13,10 @@ # License for the specific language governing permissions and limitations # under the License. - import operator +import six + from manila.openstack.common import jsonutils from manila.openstack.common.scheduler import filters @@ -51,7 +52,7 @@ class JsonFilter(filters.BaseHostFilter): return self._op_compare(args, operator.gt) def _in(self, args): - """First term is in set of remaining terms""" + """First term is in set of remaining terms.""" return self._op_compare(args, operator.contains) def _less_than_equal(self, args): @@ -102,7 +103,7 @@ class JsonFilter(filters.BaseHostFilter): if obj is None: return None for item in path[1:]: - obj = obj.get(item, None) + obj = obj.get(item) if obj is None: return None return obj @@ -117,7 +118,7 @@ class JsonFilter(filters.BaseHostFilter): for arg in query[1:]: if isinstance(arg, list): arg = self._process_filter(arg, host_state) - elif isinstance(arg, basestring): + elif isinstance(arg, six.string_types): arg = self._parse_string(arg, host_state) if arg is not None: cooked_args.append(arg) diff --git a/manila/openstack/common/scheduler/weights/__init__.py b/manila/openstack/common/scheduler/weights/__init__.py index ee6aaea3aa..eda99315ef 100644 --- a/manila/openstack/common/scheduler/weights/__init__.py +++ b/manila/openstack/common/scheduler/weights/__init__.py @@ -18,10 +18,10 @@ Scheduler host weights """ -from manila.openstack.common.scheduler import weight +from manila.openstack.common.scheduler import base_weight -class WeighedHost(weight.WeighedObject): +class WeighedHost(base_weight.WeighedObject): def to_dict(self): return { 'weight': self.weight, @@ -33,12 +33,12 @@ class WeighedHost(weight.WeighedObject): (self.obj.host, self.weight)) -class BaseHostWeigher(weight.BaseWeigher): +class BaseHostWeigher(base_weight.BaseWeigher): """Base class for host weights.""" pass -class HostWeightHandler(weight.BaseWeightHandler): +class HostWeightHandler(base_weight.BaseWeightHandler): object_class = WeighedHost def __init__(self, namespace): diff --git a/manila/openstack/common/service.py b/manila/openstack/common/service.py index 4a891f9adb..2c12d1c365 100644 --- a/manila/openstack/common/service.py +++ b/manila/openstack/common/service.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara @@ -20,20 +18,30 @@ """Generic Node base class for all workers that run on hosts.""" import errno +import logging as std_logging import os import random import signal import sys import time +try: + # Importing just the symbol here because the io module does not + # exist in Python 2.6. + from io import UnsupportedOperation # noqa +except ImportError: + # Python 2.6 + UnsupportedOperation = None + import eventlet -import logging as std_logging +from eventlet import event from oslo.config import cfg from manila.openstack.common import eventlet_backdoor -from manila.openstack.common.gettextutils import _ +from manila.openstack.common.gettextutils import _LE, _LI, _LW from manila.openstack.common import importutils from manila.openstack.common import log as logging +from manila.openstack.common import systemd from manila.openstack.common import threadgroup @@ -42,6 +50,53 @@ CONF = cfg.CONF LOG = logging.getLogger(__name__) +def _sighup_supported(): + return hasattr(signal, 'SIGHUP') + + +def _is_daemon(): + # The process group for a foreground process will match the + # process group of the controlling terminal. If those values do + # not match, or ioctl() fails on the stdout file handle, we assume + # the process is running in the background as a daemon. + # http://www.gnu.org/software/bash/manual/bashref.html#Job-Control-Basics + try: + is_daemon = os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno()) + except OSError as err: + if err.errno == errno.ENOTTY: + # Assume we are a daemon because there is no terminal. + is_daemon = True + else: + raise + except UnsupportedOperation: + # Could not get the fileno for stdout, so we must be a daemon. + is_daemon = True + return is_daemon + + +def _is_sighup_and_daemon(signo): + if not (_sighup_supported() and signo == signal.SIGHUP): + # Avoid checking if we are a daemon, because the signal isn't + # SIGHUP. + return False + return _is_daemon() + + +def _signo_to_signame(signo): + signals = {signal.SIGTERM: 'SIGTERM', + signal.SIGINT: 'SIGINT'} + if _sighup_supported(): + signals[signal.SIGHUP] = 'SIGHUP' + return signals[signo] + + +def _set_signals_handler(handler): + signal.signal(signal.SIGTERM, handler) + signal.signal(signal.SIGINT, handler) + if _sighup_supported(): + signal.signal(signal.SIGHUP, handler) + + class Launcher(object): """Launch one or more services and wait for them to complete.""" @@ -51,19 +106,8 @@ class Launcher(object): :returns: None """ - self._services = threadgroup.ThreadGroup() - eventlet_backdoor.initialize_if_enabled() - - @staticmethod - def run_service(service): - """Start and wait for a service to finish. - - :param service: service to run and wait for. - :returns: None - - """ - service.start() - service.wait() + self.services = Services() + self.backdoor_port = eventlet_backdoor.initialize_if_enabled() def launch_service(self, service): """Load and start the given service. @@ -72,7 +116,8 @@ class Launcher(object): :returns: None """ - self._services.add_thread(self.run_service, service) + service.backdoor_port = self.backdoor_port + self.services.add(service) def stop(self): """Stop all services which are currently running. @@ -80,7 +125,7 @@ class Launcher(object): :returns: None """ - self._services.stop() + self.services.stop() def wait(self): """Waits until all services have been stopped, and then returns. @@ -88,7 +133,16 @@ class Launcher(object): :returns: None """ - self._services.wait() + self.services.wait() + + def restart(self): + """Reload config files and restart service. + + :returns: None + + """ + cfg.CONF.reload_config_files() + self.services.restart() class SignalExit(SystemExit): @@ -100,33 +154,49 @@ class SignalExit(SystemExit): class ServiceLauncher(Launcher): def _handle_signal(self, signo, frame): # Allow the process to be killed again and die from natural causes - signal.signal(signal.SIGTERM, signal.SIG_DFL) - signal.signal(signal.SIGINT, signal.SIG_DFL) - + _set_signals_handler(signal.SIG_DFL) raise SignalExit(signo) - def wait(self): - signal.signal(signal.SIGTERM, self._handle_signal) - signal.signal(signal.SIGINT, self._handle_signal) + def handle_signal(self): + _set_signals_handler(self._handle_signal) - LOG.debug(_('Full set of CONF:')) + def _wait_for_exit_or_signal(self, ready_callback=None): + status = None + signo = 0 + + LOG.debug('Full set of CONF:') CONF.log_opt_values(LOG, std_logging.DEBUG) - status = None try: + if ready_callback: + ready_callback() super(ServiceLauncher, self).wait() except SignalExit as exc: - signame = {signal.SIGTERM: 'SIGTERM', - signal.SIGINT: 'SIGINT'}[exc.signo] - LOG.info(_('Caught %s, exiting'), signame) + signame = _signo_to_signame(exc.signo) + LOG.info(_LI('Caught %s, exiting'), signame) status = exc.code + signo = exc.signo except SystemExit as exc: status = exc.code finally: - if rpc: - rpc.cleanup() self.stop() - return status + if rpc: + try: + rpc.cleanup() + except Exception: + # We're shutting down, so it doesn't matter at this point. + LOG.exception(_LE('Exception during rpc cleanup.')) + + return status, signo + + def wait(self, ready_callback=None): + systemd.notify_once() + while True: + self.handle_signal() + status, signo = self._wait_for_exit_or_signal(ready_callback) + if not _is_sighup_and_daemon(signo): + return status + self.restart() class ServiceWrapper(object): @@ -138,43 +208,82 @@ class ServiceWrapper(object): class ProcessLauncher(object): - def __init__(self): + def __init__(self, wait_interval=0.01): + """Constructor. + + :param wait_interval: The interval to sleep for between checks + of child process exit. + """ self.children = {} self.sigcaught = None self.running = True + self.wait_interval = wait_interval rfd, self.writepipe = os.pipe() self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r') + self.handle_signal() - signal.signal(signal.SIGTERM, self._handle_signal) - signal.signal(signal.SIGINT, self._handle_signal) + def handle_signal(self): + _set_signals_handler(self._handle_signal) def _handle_signal(self, signo, frame): self.sigcaught = signo self.running = False # Allow the process to be killed again and die from natural causes - signal.signal(signal.SIGTERM, signal.SIG_DFL) - signal.signal(signal.SIGINT, signal.SIG_DFL) + _set_signals_handler(signal.SIG_DFL) def _pipe_watcher(self): # This will block until the write end is closed when the parent # dies unexpectedly self.readpipe.read() - LOG.info(_('Parent process has died unexpectedly, exiting')) + LOG.info(_LI('Parent process has died unexpectedly, exiting')) sys.exit(1) - def _child_process(self, service): + def _child_process_handle_signal(self): # Setup child signal handlers differently def _sigterm(*args): signal.signal(signal.SIGTERM, signal.SIG_DFL) raise SignalExit(signal.SIGTERM) + def _sighup(*args): + signal.signal(signal.SIGHUP, signal.SIG_DFL) + raise SignalExit(signal.SIGHUP) + signal.signal(signal.SIGTERM, _sigterm) + if _sighup_supported(): + signal.signal(signal.SIGHUP, _sighup) # Block SIGINT and let the parent send us a SIGTERM signal.signal(signal.SIGINT, signal.SIG_IGN) + def _child_wait_for_exit_or_signal(self, launcher): + status = 0 + signo = 0 + + # NOTE(johannes): All exceptions are caught to ensure this + # doesn't fallback into the loop spawning children. It would + # be bad for a child to spawn more children. + try: + launcher.wait() + except SignalExit as exc: + signame = _signo_to_signame(exc.signo) + LOG.info(_LI('Child caught %s, exiting'), signame) + status = exc.code + signo = exc.signo + except SystemExit as exc: + status = exc.code + except BaseException: + LOG.exception(_LE('Unhandled exception')) + status = 2 + finally: + launcher.stop() + + return status, signo + + def _child_process(self, service): + self._child_process_handle_signal() + # Reopen the eventlet hub to make sure we don't share an epoll # fd with parent and/or siblings, which would be bad eventlet.hubs.use_hub() @@ -188,7 +297,8 @@ class ProcessLauncher(object): random.seed() launcher = Launcher() - launcher.run_service(service) + launcher.launch_service(service) + return launcher def _start_child(self, wrap): if len(wrap.forktimes) > wrap.workers: @@ -197,7 +307,7 @@ class ProcessLauncher(object): # start up quickly but ensure we don't fork off children that # die instantly too quickly. if time.time() - wrap.forktimes[0] < wrap.workers: - LOG.info(_('Forking too fast, sleeping')) + LOG.info(_LI('Forking too fast, sleeping')) time.sleep(1) wrap.forktimes.pop(0) @@ -206,28 +316,17 @@ class ProcessLauncher(object): pid = os.fork() if pid == 0: - # NOTE(johannes): All exceptions are caught to ensure this - # doesn't fallback into the loop spawning children. It would - # be bad for a child to spawn more children. - status = 0 - try: - self._child_process(wrap.service) - except SignalExit as exc: - signame = {signal.SIGTERM: 'SIGTERM', - signal.SIGINT: 'SIGINT'}[exc.signo] - LOG.info(_('Caught %s, exiting'), signame) - status = exc.code - except SystemExit as exc: - status = exc.code - except BaseException: - LOG.exception(_('Unhandled exception')) - status = 2 - finally: - wrap.service.stop() + launcher = self._child_process(wrap.service) + while True: + self._child_process_handle_signal() + status, signo = self._child_wait_for_exit_or_signal(launcher) + if not _is_sighup_and_daemon(signo): + break + launcher.restart() os._exit(status) - LOG.info(_('Started child %d'), pid) + LOG.info(_LI('Started child %d'), pid) wrap.children.add(pid) self.children[pid] = wrap @@ -237,7 +336,7 @@ class ProcessLauncher(object): def launch_service(self, service, workers=1): wrap = ServiceWrapper(service, workers) - LOG.info(_('Starting %d workers'), wrap.workers) + LOG.info(_LI('Starting %d workers'), wrap.workers) while self.running and len(wrap.children) < wrap.workers: self._start_child(wrap) @@ -254,43 +353,56 @@ class ProcessLauncher(object): if os.WIFSIGNALED(status): sig = os.WTERMSIG(status) - LOG.info(_('Child %(pid)d killed by signal %(sig)d'), + LOG.info(_LI('Child %(pid)d killed by signal %(sig)d'), dict(pid=pid, sig=sig)) else: code = os.WEXITSTATUS(status) - LOG.info(_('Child %(pid)s exited with status %(code)d'), + LOG.info(_LI('Child %(pid)s exited with status %(code)d'), dict(pid=pid, code=code)) if pid not in self.children: - LOG.warning(_('pid %d not in child list'), pid) + LOG.warning(_LW('pid %d not in child list'), pid) return None wrap = self.children.pop(pid) wrap.children.remove(pid) return wrap - def wait(self): - """Loop waiting on children to die and respawning as necessary""" - - LOG.debug(_('Full set of CONF:')) - CONF.log_opt_values(LOG, std_logging.DEBUG) - + def _respawn_children(self): while self.running: wrap = self._wait_child() if not wrap: # Yield to other threads if no children have exited # Sleep for a short time to avoid excessive CPU usage # (see bug #1095346) - eventlet.greenthread.sleep(.01) + eventlet.greenthread.sleep(self.wait_interval) continue - while self.running and len(wrap.children) < wrap.workers: self._start_child(wrap) - if self.sigcaught: - signame = {signal.SIGTERM: 'SIGTERM', - signal.SIGINT: 'SIGINT'}[self.sigcaught] - LOG.info(_('Caught %s, stopping children'), signame) + def wait(self): + """Loop waiting on children to die and respawning as necessary.""" + + systemd.notify_once() + LOG.debug('Full set of CONF:') + CONF.log_opt_values(LOG, std_logging.DEBUG) + + try: + while True: + self.handle_signal() + self._respawn_children() + if self.sigcaught: + signame = _signo_to_signame(self.sigcaught) + LOG.info(_LI('Caught %s, stopping children'), signame) + if not _is_sighup_and_daemon(self.sigcaught): + break + + for pid in self.children: + os.kill(pid, signal.SIGHUP) + self.running = True + self.sigcaught = None + except eventlet.greenlet.GreenletExit: + LOG.info(_LI("Wait called after thread killed. Cleaning up.")) for pid in self.children: try: @@ -301,7 +413,7 @@ class ProcessLauncher(object): # Wait for children to die if self.children: - LOG.info(_('Waiting on %d children to exit'), len(self.children)) + LOG.info(_LI('Waiting on %d children to exit'), len(self.children)) while self.children: self._wait_child() @@ -312,21 +424,81 @@ class Service(object): def __init__(self, threads=1000): self.tg = threadgroup.ThreadGroup(threads) + # signal that the service is done shutting itself down: + self._done = event.Event() + + def reset(self): + # NOTE(Fengqian): docs for Event.reset() recommend against using it + self._done = event.Event() + def start(self): pass def stop(self): self.tg.stop() + self.tg.wait() + # Signal that service cleanup is done: + if not self._done.ready(): + self._done.send() + + def wait(self): + self._done.wait() + + +class Services(object): + + def __init__(self): + self.services = [] + self.tg = threadgroup.ThreadGroup() + self.done = event.Event() + + def add(self, service): + self.services.append(service) + self.tg.add_thread(self.run_service, service, self.done) + + def stop(self): + # wait for graceful shutdown of services: + for service in self.services: + service.stop() + service.wait() + + # Each service has performed cleanup, now signal that the run_service + # wrapper threads can now die: + if not self.done.ready(): + self.done.send() + + # reap threads: + self.tg.stop() def wait(self): self.tg.wait() + def restart(self): + self.stop() + self.done = event.Event() + for restart_service in self.services: + restart_service.reset() + self.tg.add_thread(self.run_service, restart_service, self.done) -def launch(service, workers=None): - if workers: - launcher = ProcessLauncher() - launcher.launch_service(service, workers=workers) - else: + @staticmethod + def run_service(service, done): + """Service start wrapper. + + :param service: service to run + :param done: event to wait on until a shutdown is triggered + :returns: None + + """ + service.start() + done.wait() + + +def launch(service, workers=1): + if workers is None or workers == 1: launcher = ServiceLauncher() launcher.launch_service(service) + else: + launcher = ProcessLauncher() + launcher.launch_service(service, workers=workers) + return launcher diff --git a/manila/openstack/common/sslutils.py b/manila/openstack/common/sslutils.py new file mode 100644 index 0000000000..a63dbe1252 --- /dev/null +++ b/manila/openstack/common/sslutils.py @@ -0,0 +1,95 @@ +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import ssl + +from oslo.config import cfg + +from manila.openstack.common.gettextutils import _ + + +ssl_opts = [ + cfg.StrOpt('ca_file', + help="CA certificate file to use to verify " + "connecting clients."), + cfg.StrOpt('cert_file', + help="Certificate file to use when starting " + "the server securely."), + cfg.StrOpt('key_file', + help="Private key file to use when starting " + "the server securely."), +] + + +CONF = cfg.CONF +CONF.register_opts(ssl_opts, "ssl") + + +def is_enabled(): + cert_file = CONF.ssl.cert_file + key_file = CONF.ssl.key_file + ca_file = CONF.ssl.ca_file + use_ssl = cert_file or key_file + + if cert_file and not os.path.exists(cert_file): + raise RuntimeError(_("Unable to find cert_file : %s") % cert_file) + + if ca_file and not os.path.exists(ca_file): + raise RuntimeError(_("Unable to find ca_file : %s") % ca_file) + + if key_file and not os.path.exists(key_file): + raise RuntimeError(_("Unable to find key_file : %s") % key_file) + + if use_ssl and (not cert_file or not key_file): + raise RuntimeError(_("When running server in SSL mode, you must " + "specify both a cert_file and key_file " + "option value in your configuration file")) + + return use_ssl + + +def wrap(sock): + ssl_kwargs = { + 'server_side': True, + 'certfile': CONF.ssl.cert_file, + 'keyfile': CONF.ssl.key_file, + 'cert_reqs': ssl.CERT_NONE, + } + + if CONF.ssl.ca_file: + ssl_kwargs['ca_certs'] = CONF.ssl.ca_file + ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED + + return ssl.wrap_socket(sock, **ssl_kwargs) + + +_SSL_PROTOCOLS = { + "tlsv1": ssl.PROTOCOL_TLSv1, + "sslv23": ssl.PROTOCOL_SSLv23, + "sslv3": ssl.PROTOCOL_SSLv3 +} + +try: + _SSL_PROTOCOLS["sslv2"] = ssl.PROTOCOL_SSLv2 +except AttributeError: + pass + + +def validate_ssl_version(version): + key = version.lower() + try: + return _SSL_PROTOCOLS[key] + except KeyError: + raise RuntimeError(_("Invalid SSL version : %s") % version) diff --git a/manila/openstack/common/strutils.py b/manila/openstack/common/strutils.py index cb5270e48e..9108554612 100644 --- a/manila/openstack/common/strutils.py +++ b/manila/openstack/common/strutils.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # @@ -19,18 +17,41 @@ System-level utilities and helper functions. """ +import math +import re import sys +import unicodedata + +import six from manila.openstack.common.gettextutils import _ +UNIT_PREFIX_EXPONENT = { + 'k': 1, + 'K': 1, + 'Ki': 1, + 'M': 2, + 'Mi': 2, + 'G': 3, + 'Gi': 3, + 'T': 4, + 'Ti': 4, +} +UNIT_SYSTEM_INFO = { + 'IEC': (1024, re.compile(r'(^[-+]?\d*\.?\d+)([KMGT]i?)?(b|bit|B)$')), + 'SI': (1000, re.compile(r'(^[-+]?\d*\.?\d+)([kMGT])?(b|bit|B)$')), +} + TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes') FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no') +SLUGIFY_STRIP_RE = re.compile(r"[^\w\s-]") +SLUGIFY_HYPHENATE_RE = re.compile(r"[-\s]+") + def int_from_bool_as_string(subject): - """ - Interpret a string as a boolean and return either 1 or 0. + """Interpret a string as a boolean and return either 1 or 0. Any string value in: @@ -43,13 +64,12 @@ def int_from_bool_as_string(subject): return bool_from_string(subject) and 1 or 0 -def bool_from_string(subject, strict=False): - """ - Interpret a string as a boolean. +def bool_from_string(subject, strict=False, default=False): + """Interpret a string as a boolean. A case-insensitive match is performed such that strings matching 't', 'true', 'on', 'y', 'yes', or '1' are considered True and, when - `strict=False`, anything else is considered False. + `strict=False`, anything else returns the value specified by 'default'. Useful for JSON-decoded stuff and config file parsing. @@ -57,8 +77,8 @@ def bool_from_string(subject, strict=False): ValueError which is useful when parsing values passed in from an API call. Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'. """ - if not isinstance(subject, basestring): - subject = str(subject) + if not isinstance(subject, six.string_types): + subject = six.text_type(subject) lowered = subject.strip().lower() @@ -74,25 +94,24 @@ def bool_from_string(subject, strict=False): 'acceptable': acceptable} raise ValueError(msg) else: - return False + return default def safe_decode(text, incoming=None, errors='strict'): - """ - Decodes incoming str using `incoming` if they're - not already unicode. + """Decodes incoming text/bytes string using `incoming` if they're not + already unicode. :param incoming: Text's current encoding :param errors: Errors handling policy. See here for valid values http://docs.python.org/2/library/codecs.html :returns: text or a unicode `incoming` encoded representation of it. - :raises TypeError: If text is not an isntance of basestring + :raises TypeError: If text is not an instance of str """ - if not isinstance(text, basestring): + if not isinstance(text, (six.string_types, six.binary_type)): raise TypeError("%s can't be decoded" % type(text)) - if isinstance(text, unicode): + if isinstance(text, six.text_type): return text if not incoming: @@ -119,11 +138,10 @@ def safe_decode(text, incoming=None, errors='strict'): def safe_encode(text, incoming=None, encoding='utf-8', errors='strict'): - """ - Encodes incoming str/unicode using `encoding`. If - incoming is not specified, text is expected to - be encoded with current python's default encoding. - (`sys.getdefaultencoding`) + """Encodes incoming text/bytes string using `encoding`. + + If incoming is not specified, text is expected to be encoded with + current python's default encoding. (`sys.getdefaultencoding`) :param incoming: Text's current encoding :param encoding: Expected encoding for text (Default UTF-8) @@ -131,20 +149,91 @@ def safe_encode(text, incoming=None, values http://docs.python.org/2/library/codecs.html :returns: text or a bytestring `encoding` encoded representation of it. - :raises TypeError: If text is not an isntance of basestring + :raises TypeError: If text is not an instance of str """ - if not isinstance(text, basestring): + if not isinstance(text, (six.string_types, six.binary_type)): raise TypeError("%s can't be encoded" % type(text)) if not incoming: incoming = (sys.stdin.encoding or sys.getdefaultencoding()) - if isinstance(text, unicode): + if isinstance(text, six.text_type): return text.encode(encoding, errors) elif text and encoding != incoming: # Decode text before encoding it with `encoding` text = safe_decode(text, incoming, errors) return text.encode(encoding, errors) + else: + return text - return text + +def string_to_bytes(text, unit_system='IEC', return_int=False): + """Converts a string into an float representation of bytes. + + The units supported for IEC :: + + Kb(it), Kib(it), Mb(it), Mib(it), Gb(it), Gib(it), Tb(it), Tib(it) + KB, KiB, MB, MiB, GB, GiB, TB, TiB + + The units supported for SI :: + + kb(it), Mb(it), Gb(it), Tb(it) + kB, MB, GB, TB + + Note that the SI unit system does not support capital letter 'K' + + :param text: String input for bytes size conversion. + :param unit_system: Unit system for byte size conversion. + :param return_int: If True, returns integer representation of text + in bytes. (default: decimal) + :returns: Numerical representation of text in bytes. + :raises ValueError: If text has an invalid value. + + """ + try: + base, reg_ex = UNIT_SYSTEM_INFO[unit_system] + except KeyError: + msg = _('Invalid unit system: "%s"') % unit_system + raise ValueError(msg) + match = reg_ex.match(text) + if match: + magnitude = float(match.group(1)) + unit_prefix = match.group(2) + if match.group(3) in ['b', 'bit']: + magnitude /= 8 + else: + msg = _('Invalid string format: %s') % text + raise ValueError(msg) + if not unit_prefix: + res = magnitude + else: + res = magnitude * pow(base, UNIT_PREFIX_EXPONENT[unit_prefix]) + if return_int: + return int(math.ceil(res)) + return res + + +def to_slug(value, incoming=None, errors="strict"): + """Normalize string. + + Convert to lowercase, remove non-word characters, and convert spaces + to hyphens. + + Inspired by Django's `slugify` filter. + + :param value: Text to slugify + :param incoming: Text's current encoding + :param errors: Errors handling policy. See here for valid + values http://docs.python.org/2/library/codecs.html + :returns: slugified unicode representation of `value` + :raises TypeError: If text is not an instance of str + """ + value = safe_decode(value, incoming, errors) + # NOTE(aababilov): no need to use safe_(encode|decode) here: + # encodings are always "ascii", error handling is always "ignore" + # and types are always known (first: unicode; second: str) + value = unicodedata.normalize("NFKD", value).encode( + "ascii", "ignore").decode("ascii") + value = SLUGIFY_STRIP_RE.sub("", value).strip().lower() + return SLUGIFY_HYPHENATE_RE.sub("-", value) diff --git a/manila/openstack/common/threadgroup.py b/manila/openstack/common/threadgroup.py index 6528444c0b..f0c05afa7f 100644 --- a/manila/openstack/common/threadgroup.py +++ b/manila/openstack/common/threadgroup.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -13,10 +11,10 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +import threading -from eventlet import greenlet +import eventlet from eventlet import greenpool -from eventlet import greenthread from manila.openstack.common import log as logging from manila.openstack.common import loopingcall @@ -26,7 +24,7 @@ LOG = logging.getLogger(__name__) def _thread_done(gt, *args, **kwargs): - """ Callback function to be passed to GreenThread.link() when we spawn() + """Callback function to be passed to GreenThread.link() when we spawn() Calls the :class:`ThreadGroup` to notify if. """ @@ -34,7 +32,7 @@ def _thread_done(gt, *args, **kwargs): class Thread(object): - """ Wrapper around a greenthread, that holds a reference to the + """Wrapper around a greenthread, that holds a reference to the :class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when it has done so it can be removed from the threads list. """ @@ -48,9 +46,12 @@ class Thread(object): def wait(self): return self.thread.wait() + def link(self, func, *args, **kwargs): + self.thread.link(func, *args, **kwargs) + class ThreadGroup(object): - """ The point of the ThreadGroup classis to: + """The point of the ThreadGroup class is to: * keep track of timers and greenthreads (making it easier to stop them when need be). @@ -61,6 +62,13 @@ class ThreadGroup(object): self.threads = [] self.timers = [] + def add_dynamic_timer(self, callback, initial_delay=None, + periodic_interval_max=None, *args, **kwargs): + timer = loopingcall.DynamicLoopingCall(callback, *args, **kwargs) + timer.start(initial_delay=initial_delay, + periodic_interval_max=periodic_interval_max) + self.timers.append(timer) + def add_timer(self, interval, callback, initial_delay=None, *args, **kwargs): pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs) @@ -72,13 +80,17 @@ class ThreadGroup(object): gt = self.pool.spawn(callback, *args, **kwargs) th = Thread(gt, self) self.threads.append(th) + return th def thread_done(self, thread): self.threads.remove(thread) - def stop(self): - current = greenthread.getcurrent() - for x in self.threads: + def _stop_threads(self): + current = threading.current_thread() + + # Iterate over a copy of self.threads so thread_done doesn't + # modify the list while we're iterating + for x in self.threads[:]: if x is current: # don't kill the current thread. continue @@ -87,6 +99,7 @@ class ThreadGroup(object): except Exception as ex: LOG.exception(ex) + def stop_timers(self): for x in self.timers: try: x.stop() @@ -94,21 +107,41 @@ class ThreadGroup(object): LOG.exception(ex) self.timers = [] + def stop(self, graceful=False): + """stop function has the option of graceful=True/False. + + * In case of graceful=True, wait for all threads to be finished. + Never kill threads. + * In case of graceful=False, kill threads immediately. + """ + self.stop_timers() + if graceful: + # In case of graceful=True, wait for all threads to be + # finished, never kill threads + self.wait() + else: + # In case of graceful=False(Default), kill threads + # immediately + self._stop_threads() + def wait(self): for x in self.timers: try: x.wait() - except greenlet.GreenletExit: + except eventlet.greenlet.GreenletExit: pass except Exception as ex: LOG.exception(ex) - current = greenthread.getcurrent() - for x in self.threads: + current = threading.current_thread() + + # Iterate over a copy of self.threads so thread_done doesn't + # modify the list while we're iterating + for x in self.threads[:]: if x is current: continue try: x.wait() - except greenlet.GreenletExit: + except eventlet.greenlet.GreenletExit: pass except Exception as ex: LOG.exception(ex) diff --git a/manila/openstack/common/timeutils.py b/manila/openstack/common/timeutils.py index 6094365907..52688a0268 100644 --- a/manila/openstack/common/timeutils.py +++ b/manila/openstack/common/timeutils.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # @@ -21,8 +19,10 @@ Time related utilities and helper functions. import calendar import datetime +import time import iso8601 +import six # ISO 8601 extended time format with microseconds @@ -32,7 +32,7 @@ PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND def isotime(at=None, subsecond=False): - """Stringify time in ISO 8601 format""" + """Stringify time in ISO 8601 format.""" if not at: at = utcnow() st = at.strftime(_ISO8601_TIME_FORMAT @@ -44,13 +44,13 @@ def isotime(at=None, subsecond=False): def parse_isotime(timestr): - """Parse time from ISO 8601 format""" + """Parse time from ISO 8601 format.""" try: return iso8601.parse_date(timestr) except iso8601.ParseError as e: - raise ValueError(e.message) + raise ValueError(six.text_type(e)) except TypeError as e: - raise ValueError(e.message) + raise ValueError(six.text_type(e)) def strtime(at=None, fmt=PERFECT_TIME_FORMAT): @@ -66,7 +66,7 @@ def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT): def normalize_time(timestamp): - """Normalize time in arbitrary timezone to UTC naive object""" + """Normalize time in arbitrary timezone to UTC naive object.""" offset = timestamp.utcoffset() if offset is None: return timestamp @@ -75,20 +75,31 @@ def normalize_time(timestamp): def is_older_than(before, seconds): """Return True if before is older than seconds.""" - if isinstance(before, basestring): + if isinstance(before, six.string_types): before = parse_strtime(before).replace(tzinfo=None) + else: + before = before.replace(tzinfo=None) + return utcnow() - before > datetime.timedelta(seconds=seconds) def is_newer_than(after, seconds): """Return True if after is newer than seconds.""" - if isinstance(after, basestring): + if isinstance(after, six.string_types): after = parse_strtime(after).replace(tzinfo=None) + else: + after = after.replace(tzinfo=None) + return after - utcnow() > datetime.timedelta(seconds=seconds) def utcnow_ts(): """Timestamp version of our utcnow function.""" + if utcnow.override_time is None: + # NOTE(kgriffs): This is several times faster + # than going through calendar.timegm(...) + return int(time.time()) + return calendar.timegm(utcnow().timetuple()) @@ -103,19 +114,22 @@ def utcnow(): def iso8601_from_timestamp(timestamp): - """Returns a iso8601 formated date from timestamp""" + """Returns a iso8601 formatted date from timestamp.""" return isotime(datetime.datetime.utcfromtimestamp(timestamp)) utcnow.override_time = None -def set_time_override(override_time=datetime.datetime.utcnow()): +def set_time_override(override_time=None): + """Overrides utils.utcnow. + + Make it return a constant time or a list thereof, one at a time. + + :param override_time: datetime instance or list thereof. If not + given, defaults to the current UTC time. """ - Override utils.utcnow to return a constant time or a list thereof, - one at a time. - """ - utcnow.override_time = override_time + utcnow.override_time = override_time or datetime.datetime.utcnow() def advance_time_delta(timedelta): @@ -141,7 +155,8 @@ def clear_time_override(): def marshall_now(now=None): """Make an rpc-safe datetime with microseconds. - Note: tzinfo is stripped, but not required for relative times.""" + Note: tzinfo is stripped, but not required for relative times. + """ if not now: now = utcnow() return dict(day=now.day, month=now.month, year=now.year, hour=now.hour, @@ -161,11 +176,21 @@ def unmarshall_time(tyme): def delta_seconds(before, after): - """ + """Return the difference between two timing objects. + Compute the difference in seconds between two date, time, or datetime objects (as a float, to microsecond resolution). """ delta = after - before + return total_seconds(delta) + + +def total_seconds(delta): + """Return the total seconds of datetime.timedelta object. + + Compute total seconds of datetime.timedelta, datetime.timedelta + doesn't have method total_seconds in Python2.6, calculate it manually. + """ try: return delta.total_seconds() except AttributeError: @@ -174,11 +199,10 @@ def delta_seconds(before, after): def is_soon(dt, window): - """ - Determines if time is going to happen in the next window seconds. + """Determines if time is going to happen in the next window seconds. - :params dt: the time - :params window: minimum seconds to remain to consider the time not soon + :param dt: the time + :param window: minimum seconds to remain to consider the time not soon :return: True if expiration is within the given duration """ diff --git a/manila/openstack/common/uuidutils.py b/manila/openstack/common/uuidutils.py index 7608acb942..234b880c99 100644 --- a/manila/openstack/common/uuidutils.py +++ b/manila/openstack/common/uuidutils.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright (c) 2012 Intel Corporation. # All Rights Reserved. # diff --git a/manila/openstack/common/versionutils.py b/manila/openstack/common/versionutils.py new file mode 100644 index 0000000000..bb65927b37 --- /dev/null +++ b/manila/openstack/common/versionutils.py @@ -0,0 +1,148 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Helpers for comparing version strings. +""" + +import functools +import pkg_resources + +from manila.openstack.common.gettextutils import _ +from manila.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class deprecated(object): + """A decorator to mark callables as deprecated. + + This decorator logs a deprecation message when the callable it decorates is + used. The message will include the release where the callable was + deprecated, the release where it may be removed and possibly an optional + replacement. + + Examples: + + 1. Specifying the required deprecated release + + >>> @deprecated(as_of=deprecated.ICEHOUSE) + ... def a(): pass + + 2. Specifying a replacement: + + >>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()') + ... def b(): pass + + 3. Specifying the release where the functionality may be removed: + + >>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=+1) + ... def c(): pass + + """ + + FOLSOM = 'F' + GRIZZLY = 'G' + HAVANA = 'H' + ICEHOUSE = 'I' + + _RELEASES = { + 'F': 'Folsom', + 'G': 'Grizzly', + 'H': 'Havana', + 'I': 'Icehouse', + } + + _deprecated_msg_with_alternative = _( + '%(what)s is deprecated as of %(as_of)s in favor of ' + '%(in_favor_of)s and may be removed in %(remove_in)s.') + + _deprecated_msg_no_alternative = _( + '%(what)s is deprecated as of %(as_of)s and may be ' + 'removed in %(remove_in)s. It will not be superseded.') + + def __init__(self, as_of, in_favor_of=None, remove_in=2, what=None): + """Initialize decorator + + :param as_of: the release deprecating the callable. Constants + are define in this class for convenience. + :param in_favor_of: the replacement for the callable (optional) + :param remove_in: an integer specifying how many releases to wait + before removing (default: 2) + :param what: name of the thing being deprecated (default: the + callable's name) + + """ + self.as_of = as_of + self.in_favor_of = in_favor_of + self.remove_in = remove_in + self.what = what + + def __call__(self, func): + if not self.what: + self.what = func.__name__ + '()' + + @functools.wraps(func) + def wrapped(*args, **kwargs): + msg, details = self._build_message() + LOG.deprecated(msg, details) + return func(*args, **kwargs) + return wrapped + + def _get_safe_to_remove_release(self, release): + # TODO(dstanek): this method will have to be reimplemented once + # when we get to the X release because once we get to the Y + # release, what is Y+2? + new_release = chr(ord(release) + self.remove_in) + if new_release in self._RELEASES: + return self._RELEASES[new_release] + else: + return new_release + + def _build_message(self): + details = dict(what=self.what, + as_of=self._RELEASES[self.as_of], + remove_in=self._get_safe_to_remove_release(self.as_of)) + + if self.in_favor_of: + details['in_favor_of'] = self.in_favor_of + msg = self._deprecated_msg_with_alternative + else: + msg = self._deprecated_msg_no_alternative + return msg, details + + +def is_compatible(requested_version, current_version, same_major=True): + """Determine whether `requested_version` is satisfied by + `current_version`; in other words, `current_version` is >= + `requested_version`. + + :param requested_version: version to check for compatibility + :param current_version: version to check against + :param same_major: if True, the major version must be identical between + `requested_version` and `current_version`. This is used when a + major-version difference indicates incompatibility between the two + versions. Since this is the common-case in practice, the default is + True. + :returns: True if compatible, False if not + """ + requested_parts = pkg_resources.parse_version(requested_version) + current_parts = pkg_resources.parse_version(current_version) + + if same_major and (requested_parts[0] != current_parts[0]): + return False + + return current_parts >= requested_parts diff --git a/manila/policy.py b/manila/policy.py index 6c6d317794..13879f1954 100644 --- a/manila/policy.py +++ b/manila/policy.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright (c) 2011 OpenStack, LLC. # All Rights Reserved. # @@ -16,26 +14,19 @@ # under the License. """Policy Engine For Manila""" + import functools +import os.path from oslo.config import cfg from manila import exception - from manila.openstack.common import policy from manila import utils -policy_opts = [ - cfg.StrOpt('policy_file', - default='policy.json', - help=_('JSON file representing policy')), - cfg.StrOpt('policy_default_rule', - default='default', - help=_('Rule checked when requested rule is not found')), ] - CONF = cfg.CONF -CONF.register_opts(policy_opts) +_ENFORCER = None _POLICY_PATH = None _POLICY_CACHE = {} @@ -43,26 +34,33 @@ _POLICY_CACHE = {} def reset(): global _POLICY_PATH global _POLICY_CACHE + global _ENFORCER _POLICY_PATH = None _POLICY_CACHE = {} - policy.reset() + _ENFORCER = None def init(): global _POLICY_PATH global _POLICY_CACHE + global _ENFORCER if not _POLICY_PATH: - _POLICY_PATH = utils.find_config(CONF.policy_file) - utils.read_cached_file(_POLICY_PATH, _POLICY_CACHE, - reload_func=_set_brain) + _POLICY_PATH = CONF.policy_file + if not os.path.exists(_POLICY_PATH): + _POLICY_PATH = utils.find_config(_POLICY_PATH) + if not _ENFORCER: + _ENFORCER = policy.Enforcer(policy_file=_POLICY_PATH) + utils.read_cached_file(_POLICY_PATH, _POLICY_CACHE, reload_func=_set_rules) -def _set_brain(data): +def _set_rules(data): + global _ENFORCER default_rule = CONF.policy_default_rule - policy.set_brain(policy.HttpBrain.load_json(data, default_rule)) + _ENFORCER.set_rules(policy.Rules.load_json( + data, default_rule)) -def enforce(context, action, target): +def enforce(context, action, target, do_raise=True): """Verifies that the action is valid on the target in this context. :param context: manila context @@ -80,12 +78,15 @@ def enforce(context, action, target): """ init() + if not isinstance(context, dict): + context = context.to_dict() - match_list = ('rule:%s' % action,) - credentials = context.to_dict() - - policy.enforce(match_list, target, credentials, - exception.PolicyNotAuthorized, action=action) + # Add the exception arguments if asked to do a raise + extra = {} + if do_raise: + extra.update(exc=exception.PolicyNotAuthorized, action=action, + do_raise=do_raise) + return _ENFORCER.enforce(action, target, context, **extra) def check_is_admin(roles): @@ -94,16 +95,13 @@ def check_is_admin(roles): """ init() - action = 'context_is_admin' - match_list = ('rule:%s' % action,) # include project_id on target to avoid KeyError if context_is_admin # policy definition is missing, and default admin_or_owner rule # attempts to apply. Since our credentials dict does not include a # project_id, this target can never match as a generic rule. target = {'project_id': ''} credentials = {'roles': roles} - - return policy.enforce(match_list, target, credentials) + return _ENFORCER.enforce("context_is_admin", target, credentials) def wrap_check_policy(resource): diff --git a/manila/tests/conf_fixture.py b/manila/tests/conf_fixture.py index 2c12b26273..bf6ca7c944 100644 --- a/manila/tests/conf_fixture.py +++ b/manila/tests/conf_fixture.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. @@ -20,10 +18,6 @@ from oslo.config import cfg CONF = cfg.CONF -CONF.import_opt('policy_file', 'manila.policy') - -def_vol_type = 'fake_vol_type' - def set_defaults(conf): conf.set_default('connection_type', 'fake') diff --git a/manila/tests/test_policy.py b/manila/tests/test_policy.py index ea55877ec6..0bc39e12c0 100644 --- a/manila/tests/test_policy.py +++ b/manila/tests/test_policy.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 Piston Cloud Computing, Inc. # All Rights Reserved. @@ -17,19 +15,19 @@ """Test of Policy Engine For Manila.""" +import mock import os.path -import StringIO -import urllib2 +import six +from six.moves.urllib import request as urlrequest + +from oslo.config import cfg from manila import context from manila import exception - -import manila.openstack.common.policy from manila.openstack.common import policy as common_policy from manila import policy from manila import test from manila import utils -from oslo.config import cfg CONF = cfg.CONF @@ -68,9 +66,8 @@ class PolicyTestCase(test.TestCase): def setUp(self): super(PolicyTestCase, self).setUp() policy.reset() - # NOTE(vish): preload rules to circumvent reloading from file policy.init() - rules = { + self.rules = { "true": [], "example:allowed": [], "example:denied": [["false:false"]], @@ -82,8 +79,7 @@ class PolicyTestCase(test.TestCase): "example:lowercase_admin": [["role:admin"], ["role:sysadmin"]], "example:uppercase_admin": [["role:ADMIN"], ["role:sysadmin"]], } - # NOTE(vish): then overload underlying brain - common_policy.set_brain(common_policy.HttpBrain(rules)) + self._set_rules() self.context = context.RequestContext('fake', 'fake', roles=['member']) self.target = {} @@ -91,6 +87,12 @@ class PolicyTestCase(test.TestCase): policy.reset() super(PolicyTestCase, self).tearDown() + def _set_rules(self): + these_rules = common_policy.Rules( + dict((k, common_policy.parse_rule(v)) + for k, v in self.rules.items())) + policy._ENFORCER.set_rules(these_rules) + def test_enforce_nonexistent_action_throws(self): action = "example:noexist" self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, @@ -108,22 +110,24 @@ class PolicyTestCase(test.TestCase): def test_enforce_http_true(self): def fakeurlopen(url, post_data): - return StringIO.StringIO("True") - self.stubs.Set(urllib2, 'urlopen', fakeurlopen) + return six.StringIO("True") + action = "example:get_http" target = {} - result = policy.enforce(self.context, action, target) - self.assertEqual(result, None) + with mock.patch.object(urlrequest, 'urlopen', fakeurlopen): + result = policy.enforce(self.context, action, target) + self.assertTrue(result) def test_enforce_http_false(self): def fakeurlopen(url, post_data): - return StringIO.StringIO("False") - self.stubs.Set(urllib2, 'urlopen', fakeurlopen) + return six.StringIO("False") + action = "example:get_http" target = {} - self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, - self.context, action, target) + with mock.patch.object(urlrequest, 'urlopen', fakeurlopen): + self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, + self.context, action, target) def test_templatized_enforcement(self): target_mine = {'project_id': 'fake'} @@ -165,20 +169,19 @@ class DefaultPolicyTestCase(test.TestCase): "default": [], "example:exist": [["false:false"]] } - - self._set_brain('default') - + self._set_rules('default') self.context = context.RequestContext('fake', 'fake') - def _set_brain(self, default_rule): - brain = manila.openstack.common.policy.HttpBrain(self.rules, - default_rule) - manila.openstack.common.policy.set_brain(brain) - def tearDown(self): super(DefaultPolicyTestCase, self).tearDown() policy.reset() + def _set_rules(self, default_rule): + these_rules = common_policy.Rules( + dict((k, common_policy.parse_rule(v)) + for k, v in self.rules.items()), default_rule) + policy._ENFORCER.set_rules(these_rules) + def test_policy_called(self): self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.context, "example:exist", {}) @@ -187,7 +190,13 @@ class DefaultPolicyTestCase(test.TestCase): policy.enforce(self.context, "example:noexist", {}) def test_default_not_found(self): - self._set_brain("default_noexist") + new_default_rule = "default_noexist" + # FIXME(gyee): need to overwrite the Enforcer's default_rule first + # as it is recreating the rules with its own default_rule instead + # of the default_rule passed in from set_rules(). I think this is a + # bug in Oslo policy. + policy._ENFORCER.default_rule = new_default_rule + self._set_rules(new_default_rule) self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.context, "example:noexist", {}) @@ -199,6 +208,12 @@ class ContextIsAdminPolicyTestCase(test.TestCase): policy.reset() policy.init() + def _set_rules(self, rules, default_rule): + these_rules = common_policy.Rules( + dict((k, common_policy.parse_rule(v)) + for k, v in rules.items()), default_rule) + policy._ENFORCER.set_rules(these_rules) + def test_default_admin_role_is_admin(self): ctx = context.RequestContext('fake', 'fake', roles=['johnny-admin']) self.assertFalse(ctx.is_admin) @@ -210,8 +225,7 @@ class ContextIsAdminPolicyTestCase(test.TestCase): rules = { 'context_is_admin': [["role:administrator"], ["role:johnny-admin"]] } - brain = common_policy.Brain(rules, CONF.policy_default_rule) - common_policy.set_brain(brain) + self._set_rules(rules, CONF.policy_default_rule) ctx = context.RequestContext('fake', 'fake', roles=['johnny-admin']) self.assertTrue(ctx.is_admin) ctx = context.RequestContext('fake', 'fake', roles=['administrator']) @@ -225,8 +239,7 @@ class ContextIsAdminPolicyTestCase(test.TestCase): "admin_or_owner": [["role:admin"], ["project_id:%(project_id)s"]], "default": [["rule:admin_or_owner"]], } - brain = common_policy.Brain(rules, CONF.policy_default_rule) - common_policy.set_brain(brain) + self._set_rules(rules, CONF.policy_default_rule) ctx = context.RequestContext('fake', 'fake') self.assertFalse(ctx.is_admin) ctx = context.RequestContext('fake', 'fake', roles=['admin']) diff --git a/openstack-common.conf b/openstack-common.conf index 773f98c62b..3b649d4675 100644 --- a/openstack-common.conf +++ b/openstack-common.conf @@ -2,7 +2,6 @@ # The list of modules to copy from openstack-common module=context -module=exception module=excutils module=fileutils module=flakes diff --git a/requirements.txt b/requirements.txt index 8c05df8eba..6917050c79 100644 --- a/requirements.txt +++ b/requirements.txt @@ -12,10 +12,12 @@ oslo.config>=1.2.0 paramiko>=1.8.0 Paste PasteDeploy>=1.5.0 +posix_ipc python-neutronclient>=2.3.0,<3 python-glanceclient>=0.9.0 python-keystoneclient>=0.3.2 Routes>=1.12.3 +six>=1.6.0 SQLAlchemy>=0.7.8,<=0.7.99 sqlalchemy-migrate>=0.7.2 stevedore>=0.10