Clean openstack.common import
Add an openstack-common.conf and only imports modules that are really used so far. Change-Id: I8a2bcb3e7d70ab059cdae0463876363f2c24f1b9 Signed-off-by: Julien Danjou <julien@danjou.info>
This commit is contained in:
parent
594949e64c
commit
ff8d249fc5
|
@ -1,24 +0,0 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# This ensures the openstack namespace is defined
|
||||
try:
|
||||
import pkg_resources
|
||||
pkg_resources.declare_namespace(__name__)
|
||||
except ImportError:
|
||||
import pkgutil
|
||||
__path__ = pkgutil.extend_path(__path__, __name__)
|
|
@ -16,4 +16,4 @@
|
|||
# under the License.
|
||||
|
||||
# TODO(jaypipes) Code in this module is intended to be ported to the eventual
|
||||
# openstack-common library
|
||||
# kwapi.openstack.common library
|
||||
|
|
|
@ -1,44 +0,0 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Authentication related utilities and helper functions.
|
||||
"""
|
||||
|
||||
|
||||
def auth_str_equal(provided, known):
|
||||
"""Constant-time string comparison.
|
||||
|
||||
:params provided: the first string
|
||||
:params known: the second string
|
||||
|
||||
:return: True if the strings are equal.
|
||||
|
||||
This function takes two strings and compares them. It is intended to be
|
||||
used when doing a comparison for authentication purposes to help guard
|
||||
against timing attacks. When using the function for this purpose, always
|
||||
provide the user-provided password as the first argument. The time this
|
||||
function will take is always a factor of the length of this string.
|
||||
"""
|
||||
result = 0
|
||||
p_len = len(provided)
|
||||
k_len = len(known)
|
||||
for i in xrange(p_len):
|
||||
a = ord(provided[i]) if i < p_len else 0
|
||||
b = ord(known[i]) if i < k_len else 0
|
||||
result |= a ^ b
|
||||
return (p_len == k_len) & (result == 0)
|
|
@ -107,7 +107,7 @@ and --config-dir::
|
|||
self.register_cli_opts(opts)
|
||||
|
||||
Option values are parsed from any supplied config files using
|
||||
openstack.common.iniparser. If none are specified, a default set is used
|
||||
kwapi.openstack.common.iniparser. If none are specified, a default set is used
|
||||
e.g. glance-api.conf and glance-common.conf::
|
||||
|
||||
glance-api.conf:
|
||||
|
@ -217,7 +217,7 @@ log files::
|
|||
...
|
||||
]
|
||||
|
||||
This module also contains a global instance of the CommonConfigOpts class
|
||||
This module also contains a global instance of the ConfigOpts class
|
||||
in order to support a common usage pattern in OpenStack::
|
||||
|
||||
from kwapi.openstack.common import cfg
|
||||
|
@ -236,10 +236,11 @@ in order to support a common usage pattern in OpenStack::
|
|||
Positional command line arguments are supported via a 'positional' Opt
|
||||
constructor argument::
|
||||
|
||||
>>> CONF.register_cli_opt(MultiStrOpt('bar', positional=True))
|
||||
>>> conf = ConfigOpts()
|
||||
>>> conf.register_cli_opt(MultiStrOpt('bar', positional=True))
|
||||
True
|
||||
>>> CONF(['a', 'b'])
|
||||
>>> CONF.bar
|
||||
>>> conf(['a', 'b'])
|
||||
>>> conf.bar
|
||||
['a', 'b']
|
||||
|
||||
It is also possible to use argparse "sub-parsers" to parse additional
|
||||
|
@ -249,10 +250,11 @@ command line arguments using the SubCommandOpt class:
|
|||
... list_action = subparsers.add_parser('list')
|
||||
... list_action.add_argument('id')
|
||||
...
|
||||
>>> CONF.register_cli_opt(SubCommandOpt('action', handler=add_parsers))
|
||||
>>> conf = ConfigOpts()
|
||||
>>> conf.register_cli_opt(SubCommandOpt('action', handler=add_parsers))
|
||||
True
|
||||
>>> CONF(['list', '10'])
|
||||
>>> CONF.action.name, CONF.action.id
|
||||
>>> conf(args=['list', '10'])
|
||||
>>> conf.action.name, conf.action.id
|
||||
('list', '10')
|
||||
|
||||
"""
|
||||
|
@ -1726,62 +1728,4 @@ class ConfigOpts(collections.Mapping):
|
|||
return value
|
||||
|
||||
|
||||
class CommonConfigOpts(ConfigOpts):
|
||||
|
||||
DEFAULT_LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s"
|
||||
DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
|
||||
|
||||
common_cli_opts = [
|
||||
BoolOpt('debug',
|
||||
short='d',
|
||||
default=False,
|
||||
help='Print debugging output'),
|
||||
BoolOpt('verbose',
|
||||
short='v',
|
||||
default=False,
|
||||
help='Print more verbose output'),
|
||||
]
|
||||
|
||||
logging_cli_opts = [
|
||||
StrOpt('log-config',
|
||||
metavar='PATH',
|
||||
help='If this option is specified, the logging configuration '
|
||||
'file specified is used and overrides any other logging '
|
||||
'options specified. Please see the Python logging module '
|
||||
'documentation for details on logging configuration '
|
||||
'files.'),
|
||||
StrOpt('log-format',
|
||||
default=DEFAULT_LOG_FORMAT,
|
||||
metavar='FORMAT',
|
||||
help='A logging.Formatter log message format string which may '
|
||||
'use any of the available logging.LogRecord attributes. '
|
||||
'Default: %(default)s'),
|
||||
StrOpt('log-date-format',
|
||||
default=DEFAULT_LOG_DATE_FORMAT,
|
||||
metavar='DATE_FORMAT',
|
||||
help='Format string for %%(asctime)s in log records. '
|
||||
'Default: %(default)s'),
|
||||
StrOpt('log-file',
|
||||
metavar='PATH',
|
||||
deprecated_name='logfile',
|
||||
help='(Optional) Name of log file to output to. '
|
||||
'If not set, logging will go to stdout.'),
|
||||
StrOpt('log-dir',
|
||||
deprecated_name='logdir',
|
||||
help='(Optional) The directory to keep log files in '
|
||||
'(will be prepended to --log-file)'),
|
||||
BoolOpt('use-syslog',
|
||||
default=False,
|
||||
help='Use syslog for logging.'),
|
||||
StrOpt('syslog-log-facility',
|
||||
default='LOG_USER',
|
||||
help='syslog facility to receive log lines')
|
||||
]
|
||||
|
||||
def __init__(self):
|
||||
super(CommonConfigOpts, self).__init__()
|
||||
self.register_cli_opts(self.common_cli_opts)
|
||||
self.register_cli_opts(self.logging_cli_opts)
|
||||
|
||||
|
||||
CONF = CommonConfigOpts()
|
||||
CONF = ConfigOpts()
|
||||
|
|
|
@ -1,63 +0,0 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2012 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import inspect
|
||||
|
||||
|
||||
class MissingArgs(Exception):
|
||||
|
||||
def __init__(self, missing):
|
||||
self.missing = missing
|
||||
|
||||
def __str__(self):
|
||||
if len(self.missing) == 1:
|
||||
return "An argument is missing"
|
||||
else:
|
||||
return ("%(num)d arguments are missing" %
|
||||
dict(num=len(self.missing)))
|
||||
|
||||
|
||||
def validate_args(fn, *args, **kwargs):
|
||||
"""Check that the supplied args are sufficient for calling a function.
|
||||
|
||||
>>> validate_args(lambda a: None)
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
MissingArgs: An argument is missing: a
|
||||
>>> validate_args(lambda a, b, c, d: None, 0, c=1)
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
MissingArgs: 2 arguments are missing: b, d
|
||||
|
||||
:param fn: the function to check
|
||||
:param arg: the positional arguments supplied
|
||||
:param kwargs: the keyword arguments supplied
|
||||
"""
|
||||
argspec = inspect.getargspec(fn)
|
||||
|
||||
num_defaults = len(argspec.defaults or [])
|
||||
required_args = argspec.args[:len(argspec.args) - num_defaults]
|
||||
|
||||
def isbound(method):
|
||||
return getattr(method, 'im_self', None) is not None
|
||||
|
||||
if isbound(fn):
|
||||
required_args.pop(0)
|
||||
|
||||
missing = [arg for arg in required_args if arg not in kwargs]
|
||||
missing = missing[len(args):]
|
||||
if missing:
|
||||
raise MissingArgs(missing)
|
|
@ -1,81 +0,0 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Simple class that stores security context information in the web request.
|
||||
|
||||
Projects should subclass this class if they wish to enhance the request
|
||||
context or provide additional information in their specific WSGI pipeline.
|
||||
"""
|
||||
|
||||
import itertools
|
||||
import uuid
|
||||
|
||||
|
||||
def generate_request_id():
|
||||
return 'req-' + str(uuid.uuid4())
|
||||
|
||||
|
||||
class RequestContext(object):
|
||||
|
||||
"""
|
||||
Stores information about the security context under which the user
|
||||
accesses the system, as well as additional request information.
|
||||
"""
|
||||
|
||||
def __init__(self, auth_tok=None, user=None, tenant=None, is_admin=False,
|
||||
read_only=False, show_deleted=False, request_id=None):
|
||||
self.auth_tok = auth_tok
|
||||
self.user = user
|
||||
self.tenant = tenant
|
||||
self.is_admin = is_admin
|
||||
self.read_only = read_only
|
||||
self.show_deleted = show_deleted
|
||||
if not request_id:
|
||||
request_id = generate_request_id()
|
||||
self.request_id = request_id
|
||||
|
||||
def to_dict(self):
|
||||
return {'user': self.user,
|
||||
'tenant': self.tenant,
|
||||
'is_admin': self.is_admin,
|
||||
'read_only': self.read_only,
|
||||
'show_deleted': self.show_deleted,
|
||||
'auth_token': self.auth_tok,
|
||||
'request_id': self.request_id}
|
||||
|
||||
|
||||
def get_admin_context(show_deleted="no"):
|
||||
context = RequestContext(None,
|
||||
tenant=None,
|
||||
is_admin=True,
|
||||
show_deleted=show_deleted)
|
||||
return context
|
||||
|
||||
|
||||
def get_context_from_function_and_args(function, args, kwargs):
|
||||
"""Find an arg of type RequestContext and return it.
|
||||
|
||||
This is useful in a couple of decorators where we don't
|
||||
know much about the function we're wrapping.
|
||||
"""
|
||||
|
||||
for arg in itertools.chain(kwargs.values(), args):
|
||||
if isinstance(arg, RequestContext):
|
||||
return arg
|
||||
|
||||
return None
|
|
@ -1,80 +0,0 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2012 Openstack, LLC.
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import gc
|
||||
import pprint
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
import eventlet
|
||||
import eventlet.backdoor
|
||||
import greenlet
|
||||
|
||||
from kwapi.openstack.common import cfg
|
||||
|
||||
eventlet_backdoor_opts = [
|
||||
cfg.IntOpt('backdoor_port',
|
||||
default=None,
|
||||
help='port for eventlet backdoor to listen')
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(eventlet_backdoor_opts)
|
||||
|
||||
|
||||
def _dont_use_this():
|
||||
print "Don't use this, just disconnect instead"
|
||||
|
||||
|
||||
def _find_objects(t):
|
||||
return filter(lambda o: isinstance(o, t), gc.get_objects())
|
||||
|
||||
|
||||
def _print_greenthreads():
|
||||
for i, gt in enumerate(_find_objects(greenlet.greenlet)):
|
||||
print i, gt
|
||||
traceback.print_stack(gt.gr_frame)
|
||||
print
|
||||
|
||||
|
||||
def initialize_if_enabled():
|
||||
backdoor_locals = {
|
||||
'exit': _dont_use_this, # So we don't exit the entire process
|
||||
'quit': _dont_use_this, # So we don't exit the entire process
|
||||
'fo': _find_objects,
|
||||
'pgt': _print_greenthreads,
|
||||
}
|
||||
|
||||
if CONF.backdoor_port is None:
|
||||
return None
|
||||
|
||||
# NOTE(johannes): The standard sys.displayhook will print the value of
|
||||
# the last expression and set it to __builtin__._, which overwrites
|
||||
# the __builtin__._ that gettext sets. Let's switch to using pprint
|
||||
# since it won't interact poorly with gettext, and it's easier to
|
||||
# read the output too.
|
||||
def displayhook(val):
|
||||
if val is not None:
|
||||
pprint.pprint(val)
|
||||
sys.displayhook = displayhook
|
||||
|
||||
sock = eventlet.listen(('localhost', CONF.backdoor_port))
|
||||
port = sock.getsockname()[1]
|
||||
eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
|
||||
locals=backdoor_locals)
|
||||
return port
|
|
@ -1,137 +0,0 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Exceptions common to OpenStack projects
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
from kwapi.openstack.common.gettextutils import _
|
||||
|
||||
|
||||
class Error(Exception):
|
||||
def __init__(self, message=None):
|
||||
super(Error, self).__init__(message)
|
||||
|
||||
|
||||
class ApiError(Error):
|
||||
def __init__(self, message='Unknown', code='Unknown'):
|
||||
self.message = message
|
||||
self.code = code
|
||||
super(ApiError, self).__init__('%s: %s' % (code, message))
|
||||
|
||||
|
||||
class NotFound(Error):
|
||||
pass
|
||||
|
||||
|
||||
class UnknownScheme(Error):
|
||||
|
||||
msg = "Unknown scheme '%s' found in URI"
|
||||
|
||||
def __init__(self, scheme):
|
||||
msg = self.__class__.msg % scheme
|
||||
super(UnknownScheme, self).__init__(msg)
|
||||
|
||||
|
||||
class BadStoreUri(Error):
|
||||
|
||||
msg = "The Store URI %s was malformed. Reason: %s"
|
||||
|
||||
def __init__(self, uri, reason):
|
||||
msg = self.__class__.msg % (uri, reason)
|
||||
super(BadStoreUri, self).__init__(msg)
|
||||
|
||||
|
||||
class Duplicate(Error):
|
||||
pass
|
||||
|
||||
|
||||
class NotAuthorized(Error):
|
||||
pass
|
||||
|
||||
|
||||
class NotEmpty(Error):
|
||||
pass
|
||||
|
||||
|
||||
class Invalid(Error):
|
||||
pass
|
||||
|
||||
|
||||
class BadInputError(Exception):
|
||||
"""Error resulting from a client sending bad input to a server"""
|
||||
pass
|
||||
|
||||
|
||||
class MissingArgumentError(Error):
|
||||
pass
|
||||
|
||||
|
||||
class DatabaseMigrationError(Error):
|
||||
pass
|
||||
|
||||
|
||||
class ClientConnectionError(Exception):
|
||||
"""Error resulting from a client connecting to a server"""
|
||||
pass
|
||||
|
||||
|
||||
def wrap_exception(f):
|
||||
def _wrap(*args, **kw):
|
||||
try:
|
||||
return f(*args, **kw)
|
||||
except Exception, e:
|
||||
if not isinstance(e, Error):
|
||||
#exc_type, exc_value, exc_traceback = sys.exc_info()
|
||||
logging.exception(_('Uncaught exception'))
|
||||
#logging.error(traceback.extract_stack(exc_traceback))
|
||||
raise Error(str(e))
|
||||
raise
|
||||
_wrap.func_name = f.func_name
|
||||
return _wrap
|
||||
|
||||
|
||||
class OpenstackException(Exception):
|
||||
"""
|
||||
Base Exception
|
||||
|
||||
To correctly use this class, inherit from it and define
|
||||
a 'message' property. That message will get printf'd
|
||||
with the keyword arguments provided to the constructor.
|
||||
"""
|
||||
message = "An unknown exception occurred"
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
try:
|
||||
self._error_string = self.message % kwargs
|
||||
|
||||
except Exception:
|
||||
# at least get the core message out if something happened
|
||||
self._error_string = self.message
|
||||
|
||||
def __str__(self):
|
||||
return self._error_string
|
||||
|
||||
|
||||
class MalformedRequestBody(OpenstackException):
|
||||
message = "Malformed message body: %(reason)s"
|
||||
|
||||
|
||||
class InvalidContentType(OpenstackException):
|
||||
message = "Invalid content type %(content_type)s"
|
|
@ -1,51 +0,0 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# Copyright 2012, Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Exception related utilities.
|
||||
"""
|
||||
|
||||
import contextlib
|
||||
import logging
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
from kwapi.openstack.common.gettextutils import _
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def save_and_reraise_exception():
|
||||
"""Save current exception, run some code and then re-raise.
|
||||
|
||||
In some cases the exception context can be cleared, resulting in None
|
||||
being attempted to be re-raised after an exception handler is run. This
|
||||
can happen when eventlet switches greenthreads or when running an
|
||||
exception handler, code raises and catches an exception. In both
|
||||
cases the exception context will be cleared.
|
||||
|
||||
To work around this, we save the exception state, run handler code, and
|
||||
then re-raise the original exception. If another exception occurs, the
|
||||
saved exception is logged and the new exception is re-raised.
|
||||
"""
|
||||
type_, value, tb = sys.exc_info()
|
||||
try:
|
||||
yield
|
||||
except Exception:
|
||||
logging.error(_('Original exception being dropped: %s'),
|
||||
traceback.format_exception(type_, value, tb))
|
||||
raise
|
||||
raise type_, value, tb
|
|
@ -1,35 +0,0 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import errno
|
||||
import os
|
||||
|
||||
|
||||
def ensure_tree(path):
|
||||
"""Create a directory (and any ancestor directories required)
|
||||
|
||||
:param path: Directory to create
|
||||
"""
|
||||
try:
|
||||
os.makedirs(path)
|
||||
except OSError as exc:
|
||||
if exc.errno == errno.EEXIST:
|
||||
if not os.path.isdir(path):
|
||||
raise
|
||||
else:
|
||||
raise
|
|
@ -16,9 +16,9 @@
|
|||
# under the License.
|
||||
|
||||
"""
|
||||
gettext for openstack-common modules.
|
||||
gettext for kwapi.openstack.common modules.
|
||||
|
||||
Usual usage in an openstack.common module:
|
||||
Usual usage in an kwapi.openstack.common module:
|
||||
|
||||
from kwapi.openstack.common.gettextutils import _
|
||||
"""
|
||||
|
@ -26,7 +26,7 @@ Usual usage in an openstack.common module:
|
|||
import gettext
|
||||
|
||||
|
||||
t = gettext.translation('openstack-common', 'locale', fallback=True)
|
||||
t = gettext.translation('kwapi.openstack.common', 'locale', fallback=True)
|
||||
|
||||
|
||||
def _(msg):
|
||||
|
|
|
@ -1,59 +0,0 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Import related utilities and helper functions.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
|
||||
def import_class(import_str):
|
||||
"""Returns a class from a string including module and class"""
|
||||
mod_str, _sep, class_str = import_str.rpartition('.')
|
||||
try:
|
||||
__import__(mod_str)
|
||||
return getattr(sys.modules[mod_str], class_str)
|
||||
except (ValueError, AttributeError):
|
||||
raise ImportError('Class %s cannot be found (%s)' %
|
||||
(class_str,
|
||||
traceback.format_exception(*sys.exc_info())))
|
||||
|
||||
|
||||
def import_object(import_str, *args, **kwargs):
|
||||
"""Import a class and return an instance of it."""
|
||||
return import_class(import_str)(*args, **kwargs)
|
||||
|
||||
|
||||
def import_object_ns(name_space, import_str, *args, **kwargs):
|
||||
"""
|
||||
Import a class and return an instance of it, first by trying
|
||||
to find the class in a default namespace, then failing back to
|
||||
a full path if not found in the default namespace.
|
||||
"""
|
||||
import_value = "%s.%s" % (name_space, import_str)
|
||||
try:
|
||||
return import_class(import_value)(*args, **kwargs)
|
||||
except ImportError:
|
||||
return import_class(import_str)(*args, **kwargs)
|
||||
|
||||
|
||||
def import_module(import_str):
|
||||
"""Import a module."""
|
||||
__import__(import_str)
|
||||
return sys.modules[import_str]
|
|
@ -1,233 +0,0 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import errno
|
||||
import functools
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
import time
|
||||
import weakref
|
||||
|
||||
from eventlet import semaphore
|
||||
|
||||
from kwapi.openstack.common import cfg
|
||||
from kwapi.openstack.common import fileutils
|
||||
from kwapi.openstack.common.gettextutils import _
|
||||
from kwapi.openstack.common import log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
util_opts = [
|
||||
cfg.BoolOpt('disable_process_locking', default=False,
|
||||
help='Whether to disable inter-process locks'),
|
||||
cfg.StrOpt('lock_path',
|
||||
default=os.path.abspath(os.path.join(os.path.dirname(__file__),
|
||||
'../')),
|
||||
help='Directory to use for lock files')
|
||||
]
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(util_opts)
|
||||
|
||||
|
||||
class _InterProcessLock(object):
|
||||
"""Lock implementation which allows multiple locks, working around
|
||||
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
|
||||
not require any cleanup. Since the lock is always held on a file
|
||||
descriptor rather than outside of the process, the lock gets dropped
|
||||
automatically if the process crashes, even if __exit__ is not executed.
|
||||
|
||||
There are no guarantees regarding usage by multiple green threads in a
|
||||
single process here. This lock works only between processes. Exclusive
|
||||
access between local threads should be achieved using the semaphores
|
||||
in the @synchronized decorator.
|
||||
|
||||
Note these locks are released when the descriptor is closed, so it's not
|
||||
safe to close the file descriptor while another green thread holds the
|
||||
lock. Just opening and closing the lock file can break synchronisation,
|
||||
so lock files must be accessed only using this abstraction.
|
||||
"""
|
||||
|
||||
def __init__(self, name):
|
||||
self.lockfile = None
|
||||
self.fname = name
|
||||
|
||||
def __enter__(self):
|
||||
self.lockfile = open(self.fname, 'w')
|
||||
|
||||
while True:
|
||||
try:
|
||||
# Using non-blocking locks since green threads are not
|
||||
# patched to deal with blocking locking calls.
|
||||
# Also upon reading the MSDN docs for locking(), it seems
|
||||
# to have a laughable 10 attempts "blocking" mechanism.
|
||||
self.trylock()
|
||||
return self
|
||||
except IOError, e:
|
||||
if e.errno in (errno.EACCES, errno.EAGAIN):
|
||||
# external locks synchronise things like iptables
|
||||
# updates - give it some time to prevent busy spinning
|
||||
time.sleep(0.01)
|
||||
else:
|
||||
raise
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
try:
|
||||
self.unlock()
|
||||
self.lockfile.close()
|
||||
except IOError:
|
||||
LOG.exception(_("Could not release the acquired lock `%s`"),
|
||||
self.fname)
|
||||
|
||||
def trylock(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def unlock(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class _WindowsLock(_InterProcessLock):
|
||||
def trylock(self):
|
||||
msvcrt.locking(self.lockfile, msvcrt.LK_NBLCK, 1)
|
||||
|
||||
def unlock(self):
|
||||
msvcrt.locking(self.lockfile, msvcrt.LK_UNLCK, 1)
|
||||
|
||||
|
||||
class _PosixLock(_InterProcessLock):
|
||||
def trylock(self):
|
||||
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||
|
||||
def unlock(self):
|
||||
fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
|
||||
|
||||
|
||||
if os.name == 'nt':
|
||||
import msvcrt
|
||||
InterProcessLock = _WindowsLock
|
||||
else:
|
||||
import fcntl
|
||||
InterProcessLock = _PosixLock
|
||||
|
||||
_semaphores = weakref.WeakValueDictionary()
|
||||
|
||||
|
||||
def synchronized(name, lock_file_prefix, external=False, lock_path=None):
|
||||
"""Synchronization decorator.
|
||||
|
||||
Decorating a method like so::
|
||||
|
||||
@synchronized('mylock')
|
||||
def foo(self, *args):
|
||||
...
|
||||
|
||||
ensures that only one thread will execute the bar method at a time.
|
||||
|
||||
Different methods can share the same lock::
|
||||
|
||||
@synchronized('mylock')
|
||||
def foo(self, *args):
|
||||
...
|
||||
|
||||
@synchronized('mylock')
|
||||
def bar(self, *args):
|
||||
...
|
||||
|
||||
This way only one of either foo or bar can be executing at a time.
|
||||
|
||||
The lock_file_prefix argument is used to provide lock files on disk with a
|
||||
meaningful prefix. The prefix should end with a hyphen ('-') if specified.
|
||||
|
||||
The external keyword argument denotes whether this lock should work across
|
||||
multiple processes. This means that if two different workers both run a
|
||||
a method decorated with @synchronized('mylock', external=True), only one
|
||||
of them will execute at a time.
|
||||
|
||||
The lock_path keyword argument is used to specify a special location for
|
||||
external lock files to live. If nothing is set, then CONF.lock_path is
|
||||
used as a default.
|
||||
"""
|
||||
|
||||
def wrap(f):
|
||||
@functools.wraps(f)
|
||||
def inner(*args, **kwargs):
|
||||
# NOTE(soren): If we ever go natively threaded, this will be racy.
|
||||
# See http://stackoverflow.com/questions/5390569/dyn
|
||||
# amically-allocating-and-destroying-mutexes
|
||||
sem = _semaphores.get(name, semaphore.Semaphore())
|
||||
if name not in _semaphores:
|
||||
# this check is not racy - we're already holding ref locally
|
||||
# so GC won't remove the item and there was no IO switch
|
||||
# (only valid in greenthreads)
|
||||
_semaphores[name] = sem
|
||||
|
||||
with sem:
|
||||
LOG.debug(_('Got semaphore "%(lock)s" for method '
|
||||
'"%(method)s"...'), {'lock': name,
|
||||
'method': f.__name__})
|
||||
if external and not CONF.disable_process_locking:
|
||||
LOG.debug(_('Attempting to grab file lock "%(lock)s" for '
|
||||
'method "%(method)s"...'),
|
||||
{'lock': name, 'method': f.__name__})
|
||||
cleanup_dir = False
|
||||
|
||||
# We need a copy of lock_path because it is non-local
|
||||
local_lock_path = lock_path
|
||||
if not local_lock_path:
|
||||
local_lock_path = CONF.lock_path
|
||||
|
||||
if not local_lock_path:
|
||||
cleanup_dir = True
|
||||
local_lock_path = tempfile.mkdtemp()
|
||||
|
||||
if not os.path.exists(local_lock_path):
|
||||
cleanup_dir = True
|
||||
fileutils.ensure_tree(local_lock_path)
|
||||
|
||||
# NOTE(mikal): the lock name cannot contain directory
|
||||
# separators
|
||||
safe_name = name.replace(os.sep, '_')
|
||||
lock_file_name = '%s%s' % (lock_file_prefix, safe_name)
|
||||
lock_file_path = os.path.join(local_lock_path,
|
||||
lock_file_name)
|
||||
|
||||
try:
|
||||
lock = InterProcessLock(lock_file_path)
|
||||
with lock:
|
||||
LOG.debug(_('Got file lock "%(lock)s" at %(path)s '
|
||||
'for method "%(method)s"...'),
|
||||
{'lock': name,
|
||||
'path': lock_file_path,
|
||||
'method': f.__name__})
|
||||
retval = f(*args, **kwargs)
|
||||
finally:
|
||||
# NOTE(vish): This removes the tempdir if we needed
|
||||
# to create one. This is used to cleanup
|
||||
# the locks left behind by unit tests.
|
||||
if cleanup_dir:
|
||||
shutil.rmtree(local_lock_path)
|
||||
else:
|
||||
retval = f(*args, **kwargs)
|
||||
|
||||
return retval
|
||||
return inner
|
||||
return wrap
|
|
@ -47,21 +47,82 @@ from kwapi.openstack.common import local
|
|||
from kwapi.openstack.common import notifier
|
||||
|
||||
|
||||
_DEFAULT_LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s"
|
||||
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
|
||||
|
||||
common_cli_opts = [
|
||||
cfg.BoolOpt('debug',
|
||||
short='d',
|
||||
default=False,
|
||||
help='Print debugging output (set logging level to '
|
||||
'DEBUG instead of default WARNING level).'),
|
||||
cfg.BoolOpt('verbose',
|
||||
short='v',
|
||||
default=False,
|
||||
help='Print more verbose output (set logging level to '
|
||||
'INFO instead of default WARNING level).'),
|
||||
]
|
||||
|
||||
logging_cli_opts = [
|
||||
cfg.StrOpt('log-config',
|
||||
metavar='PATH',
|
||||
help='If this option is specified, the logging configuration '
|
||||
'file specified is used and overrides any other logging '
|
||||
'options specified. Please see the Python logging module '
|
||||
'documentation for details on logging configuration '
|
||||
'files.'),
|
||||
cfg.StrOpt('log-format',
|
||||
default=_DEFAULT_LOG_FORMAT,
|
||||
metavar='FORMAT',
|
||||
help='A logging.Formatter log message format string which may '
|
||||
'use any of the available logging.LogRecord attributes. '
|
||||
'Default: %(default)s'),
|
||||
cfg.StrOpt('log-date-format',
|
||||
default=_DEFAULT_LOG_DATE_FORMAT,
|
||||
metavar='DATE_FORMAT',
|
||||
help='Format string for %%(asctime)s in log records. '
|
||||
'Default: %(default)s'),
|
||||
cfg.StrOpt('log-file',
|
||||
metavar='PATH',
|
||||
deprecated_name='logfile',
|
||||
help='(Optional) Name of log file to output to. '
|
||||
'If not set, logging will go to stdout.'),
|
||||
cfg.StrOpt('log-dir',
|
||||
deprecated_name='logdir',
|
||||
help='(Optional) The directory to keep log files in '
|
||||
'(will be prepended to --log-file)'),
|
||||
cfg.BoolOpt('use-syslog',
|
||||
default=False,
|
||||
help='Use syslog for logging.'),
|
||||
cfg.StrOpt('syslog-log-facility',
|
||||
default='LOG_USER',
|
||||
help='syslog facility to receive log lines')
|
||||
]
|
||||
|
||||
generic_log_opts = [
|
||||
cfg.BoolOpt('use_stderr',
|
||||
default=True,
|
||||
help='Log output to standard error'),
|
||||
cfg.StrOpt('logfile_mode',
|
||||
default='0644',
|
||||
help='Default file mode used when creating log files'),
|
||||
]
|
||||
|
||||
log_opts = [
|
||||
cfg.StrOpt('logging_context_format_string',
|
||||
default='%(asctime)s.%(msecs)d %(levelname)s %(name)s '
|
||||
default='%(asctime)s.%(msecs)03d %(levelname)s %(name)s '
|
||||
'[%(request_id)s %(user)s %(tenant)s] %(instance)s'
|
||||
'%(message)s',
|
||||
help='format string to use for log messages with context'),
|
||||
cfg.StrOpt('logging_default_format_string',
|
||||
default='%(asctime)s.%(msecs)d %(process)d %(levelname)s '
|
||||
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
|
||||
'%(name)s [-] %(instance)s%(message)s',
|
||||
help='format string to use for log messages without context'),
|
||||
cfg.StrOpt('logging_debug_format_suffix',
|
||||
default='%(funcName)s %(pathname)s:%(lineno)d',
|
||||
help='data to append to log format when level is DEBUG'),
|
||||
cfg.StrOpt('logging_exception_prefix',
|
||||
default='%(asctime)s.%(msecs)d %(process)d TRACE %(name)s '
|
||||
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
|
||||
'%(instance)s',
|
||||
help='prefix each line of exception output with this format'),
|
||||
cfg.ListOpt('default_log_levels',
|
||||
|
@ -94,24 +155,9 @@ log_opts = [
|
|||
'format it like this'),
|
||||
]
|
||||
|
||||
|
||||
generic_log_opts = [
|
||||
cfg.StrOpt('logdir',
|
||||
default=None,
|
||||
help='Log output to a per-service log file in named directory'),
|
||||
cfg.StrOpt('logfile',
|
||||
default=None,
|
||||
help='Log output to a named file'),
|
||||
cfg.BoolOpt('use_stderr',
|
||||
default=True,
|
||||
help='Log output to standard error'),
|
||||
cfg.StrOpt('logfile_mode',
|
||||
default='0644',
|
||||
help='Default file mode used when creating log files'),
|
||||
]
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_cli_opts(common_cli_opts)
|
||||
CONF.register_cli_opts(logging_cli_opts)
|
||||
CONF.register_opts(generic_log_opts)
|
||||
CONF.register_opts(log_opts)
|
||||
|
||||
|
@ -149,8 +195,8 @@ def _get_binary_name():
|
|||
|
||||
|
||||
def _get_log_file_path(binary=None):
|
||||
logfile = CONF.log_file or CONF.logfile
|
||||
logdir = CONF.log_dir or CONF.logdir
|
||||
logfile = CONF.log_file
|
||||
logdir = CONF.log_dir
|
||||
|
||||
if logfile and not logdir:
|
||||
return logfile
|
||||
|
@ -258,7 +304,7 @@ class JSONFormatter(logging.Formatter):
|
|||
|
||||
class PublishErrorsHandler(logging.Handler):
|
||||
def emit(self, record):
|
||||
if ('openstack.common.notifier.log_notifier' in
|
||||
if ('kwapi.openstack.common.notifier.log_notifier' in
|
||||
CONF.notification_driver):
|
||||
return
|
||||
notifier.api.notify(None, 'error.publisher',
|
||||
|
@ -361,10 +407,12 @@ def _setup_logging_from_conf(product_name):
|
|||
datefmt=datefmt))
|
||||
handler.setFormatter(LegacyFormatter(datefmt=datefmt))
|
||||
|
||||
if CONF.verbose or CONF.debug:
|
||||
if CONF.debug:
|
||||
log_root.setLevel(logging.DEBUG)
|
||||
else:
|
||||
elif CONF.verbose:
|
||||
log_root.setLevel(logging.INFO)
|
||||
else:
|
||||
log_root.setLevel(logging.WARNING)
|
||||
|
||||
level = logging.NOTSET
|
||||
for pair in CONF.default_log_levels:
|
||||
|
|
|
@ -1,95 +0,0 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# Copyright 2011 Justin Santa Barbara
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import sys
|
||||
|
||||
from eventlet import event
|
||||
from eventlet import greenthread
|
||||
|
||||
from kwapi.openstack.common.gettextutils import _
|
||||
from kwapi.openstack.common import log as logging
|
||||
from kwapi.openstack.common import timeutils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LoopingCallDone(Exception):
|
||||
"""Exception to break out and stop a LoopingCall.
|
||||
|
||||
The poll-function passed to LoopingCall can raise this exception to
|
||||
break out of the loop normally. This is somewhat analogous to
|
||||
StopIteration.
|
||||
|
||||
An optional return-value can be included as the argument to the exception;
|
||||
this return-value will be returned by LoopingCall.wait()
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, retvalue=True):
|
||||
""":param retvalue: Value that LoopingCall.wait() should return."""
|
||||
self.retvalue = retvalue
|
||||
|
||||
|
||||
class LoopingCall(object):
|
||||
def __init__(self, f=None, *args, **kw):
|
||||
self.args = args
|
||||
self.kw = kw
|
||||
self.f = f
|
||||
self._running = False
|
||||
|
||||
def start(self, interval, initial_delay=None):
|
||||
self._running = True
|
||||
done = event.Event()
|
||||
|
||||
def _inner():
|
||||
if initial_delay:
|
||||
greenthread.sleep(initial_delay)
|
||||
|
||||
try:
|
||||
while self._running:
|
||||
start = timeutils.utcnow()
|
||||
self.f(*self.args, **self.kw)
|
||||
end = timeutils.utcnow()
|
||||
if not self._running:
|
||||
break
|
||||
delay = interval - timeutils.delta_seconds(start, end)
|
||||
if delay <= 0:
|
||||
LOG.warn(_('task run outlasted interval by %s sec') %
|
||||
-delay)
|
||||
greenthread.sleep(delay if delay > 0 else 0)
|
||||
except LoopingCallDone, e:
|
||||
self.stop()
|
||||
done.send(e.retvalue)
|
||||
except Exception:
|
||||
LOG.exception(_('in looping call'))
|
||||
done.send_exception(*sys.exc_info())
|
||||
return
|
||||
else:
|
||||
done.send(True)
|
||||
|
||||
self.done = done
|
||||
|
||||
greenthread.spawn_n(_inner)
|
||||
return self.done
|
||||
|
||||
def stop(self):
|
||||
self._running = False
|
||||
|
||||
def wait(self):
|
||||
return self.done.wait()
|
|
@ -1,64 +0,0 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Middleware that attaches a context to the WSGI request
|
||||
"""
|
||||
|
||||
from kwapi.openstack.common import context
|
||||
from kwapi.openstack.common import importutils
|
||||
from kwapi.openstack.common import wsgi
|
||||
|
||||
|
||||
class ContextMiddleware(wsgi.Middleware):
|
||||
def __init__(self, app, options):
|
||||
self.options = options
|
||||
super(ContextMiddleware, self).__init__(app)
|
||||
|
||||
def make_context(self, *args, **kwargs):
|
||||
"""
|
||||
Create a context with the given arguments.
|
||||
"""
|
||||
|
||||
# Determine the context class to use
|
||||
ctxcls = context.RequestContext
|
||||
if 'context_class' in self.options:
|
||||
ctxcls = importutils.import_class(self.options['context_class'])
|
||||
|
||||
return ctxcls(*args, **kwargs)
|
||||
|
||||
def process_request(self, req):
|
||||
"""
|
||||
Extract any authentication information in the request and
|
||||
construct an appropriate context from it.
|
||||
"""
|
||||
# Use the default empty context, with admin turned on for
|
||||
# backwards compatibility
|
||||
req.context = self.make_context(is_admin=True)
|
||||
|
||||
|
||||
def filter_factory(global_conf, **local_conf):
|
||||
"""
|
||||
Factory method for paste.deploy
|
||||
"""
|
||||
conf = global_conf.copy()
|
||||
conf.update(local_conf)
|
||||
|
||||
def filter(app):
|
||||
return ContextMiddleware(app, conf)
|
||||
|
||||
return filter
|
|
@ -1,68 +0,0 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2012 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Network-related utilities and helper functions.
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def parse_host_port(address, default_port=None):
|
||||
"""
|
||||
Interpret a string as a host:port pair.
|
||||
An IPv6 address MUST be escaped if accompanied by a port,
|
||||
because otherwise ambiguity ensues: 2001:db8:85a3::8a2e:370:7334
|
||||
means both [2001:db8:85a3::8a2e:370:7334] and
|
||||
[2001:db8:85a3::8a2e:370]:7334.
|
||||
|
||||
>>> parse_host_port('server01:80')
|
||||
('server01', 80)
|
||||
>>> parse_host_port('server01')
|
||||
('server01', None)
|
||||
>>> parse_host_port('server01', default_port=1234)
|
||||
('server01', 1234)
|
||||
>>> parse_host_port('[::1]:80')
|
||||
('::1', 80)
|
||||
>>> parse_host_port('[::1]')
|
||||
('::1', None)
|
||||
>>> parse_host_port('[::1]', default_port=1234)
|
||||
('::1', 1234)
|
||||
>>> parse_host_port('2001:db8:85a3::8a2e:370:7334', default_port=1234)
|
||||
('2001:db8:85a3::8a2e:370:7334', 1234)
|
||||
|
||||
"""
|
||||
if address[0] == '[':
|
||||
# Escaped ipv6
|
||||
_host, _port = address[1:].split(']')
|
||||
host = _host
|
||||
if ':' in _port:
|
||||
port = _port.split(':')[1]
|
||||
else:
|
||||
port = default_port
|
||||
else:
|
||||
if address.count(':') == 1:
|
||||
host, port = address.split(':')
|
||||
else:
|
||||
# 0 means ipv4, >1 means ipv6.
|
||||
# We prohibit unescaped ipv6 addresses with port.
|
||||
host = address
|
||||
port = default_port
|
||||
|
||||
return (host, None if port is None else int(port))
|
|
@ -30,6 +30,6 @@ def notify(_context, message):
|
|||
CONF.default_notification_level)
|
||||
priority = priority.lower()
|
||||
logger = logging.getLogger(
|
||||
'openstack.common.notification.%s' %
|
||||
'kwapi.openstack.common.notification.%s' %
|
||||
message['event_type'])
|
||||
getattr(logger, priority)(jsonutils.dumps(message))
|
||||
|
|
|
@ -1,164 +0,0 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2012 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import sys
|
||||
|
||||
from paste import deploy
|
||||
|
||||
from kwapi.openstack.common import local
|
||||
|
||||
|
||||
class BasePasteFactory(object):
|
||||
|
||||
"""A base class for paste app and filter factories.
|
||||
|
||||
Sub-classes must override the KEY class attribute and provide
|
||||
a __call__ method.
|
||||
"""
|
||||
|
||||
KEY = None
|
||||
|
||||
def __init__(self, data):
|
||||
self.data = data
|
||||
|
||||
def _import_factory(self, local_conf):
|
||||
"""Import an app/filter class.
|
||||
|
||||
Lookup the KEY from the PasteDeploy local conf and import the
|
||||
class named there. This class can then be used as an app or
|
||||
filter factory.
|
||||
|
||||
Note we support the <module>:<class> format.
|
||||
|
||||
Note also that if you do e.g.
|
||||
|
||||
key =
|
||||
value
|
||||
|
||||
then ConfigParser returns a value with a leading newline, so
|
||||
we strip() the value before using it.
|
||||
"""
|
||||
mod_str, _sep, class_str = local_conf[self.KEY].strip().rpartition(':')
|
||||
del local_conf[self.KEY]
|
||||
|
||||
__import__(mod_str)
|
||||
return getattr(sys.modules[mod_str], class_str)
|
||||
|
||||
|
||||
class AppFactory(BasePasteFactory):
|
||||
|
||||
"""A Generic paste.deploy app factory.
|
||||
|
||||
This requires openstack.app_factory to be set to a callable which returns a
|
||||
WSGI app when invoked. The format of the name is <module>:<callable> e.g.
|
||||
|
||||
[app:myfooapp]
|
||||
paste.app_factory = openstack.common.pastedeploy:app_factory
|
||||
openstack.app_factory = myapp:Foo
|
||||
|
||||
The WSGI app constructor must accept a data object and a local config
|
||||
dict as its two arguments.
|
||||
"""
|
||||
|
||||
KEY = 'openstack.app_factory'
|
||||
|
||||
def __call__(self, global_conf, **local_conf):
|
||||
"""The actual paste.app_factory protocol method."""
|
||||
factory = self._import_factory(local_conf)
|
||||
return factory(self.data, **local_conf)
|
||||
|
||||
|
||||
class FilterFactory(AppFactory):
|
||||
|
||||
"""A Generic paste.deploy filter factory.
|
||||
|
||||
This requires openstack.filter_factory to be set to a callable which
|
||||
returns a WSGI filter when invoked. The format is <module>:<callable> e.g.
|
||||
|
||||
[filter:myfoofilter]
|
||||
paste.filter_factory = openstack.common.pastedeploy:filter_factory
|
||||
openstack.filter_factory = myfilter:Foo
|
||||
|
||||
The WSGI filter constructor must accept a WSGI app, a data object and
|
||||
a local config dict as its three arguments.
|
||||
"""
|
||||
|
||||
KEY = 'openstack.filter_factory'
|
||||
|
||||
def __call__(self, global_conf, **local_conf):
|
||||
"""The actual paste.filter_factory protocol method."""
|
||||
factory = self._import_factory(local_conf)
|
||||
|
||||
def filter(app):
|
||||
return factory(app, self.data, **local_conf)
|
||||
|
||||
return filter
|
||||
|
||||
|
||||
def app_factory(global_conf, **local_conf):
|
||||
"""A paste app factory used with paste_deploy_app()."""
|
||||
return local.store.app_factory(global_conf, **local_conf)
|
||||
|
||||
|
||||
def filter_factory(global_conf, **local_conf):
|
||||
"""A paste filter factory used with paste_deploy_app()."""
|
||||
return local.store.filter_factory(global_conf, **local_conf)
|
||||
|
||||
|
||||
def paste_deploy_app(paste_config_file, app_name, data):
|
||||
"""Load a WSGI app from a PasteDeploy configuration.
|
||||
|
||||
Use deploy.loadapp() to load the app from the PasteDeploy configuration,
|
||||
ensuring that the supplied data object is passed to the app and filter
|
||||
factories defined in this module.
|
||||
|
||||
To use these factories and the data object, the configuration should look
|
||||
like this:
|
||||
|
||||
[app:myapp]
|
||||
paste.app_factory = openstack.common.pastedeploy:app_factory
|
||||
openstack.app_factory = myapp:App
|
||||
...
|
||||
[filter:myfilter]
|
||||
paste.filter_factory = openstack.common.pastedeploy:filter_factory
|
||||
openstack.filter_factory = myapp:Filter
|
||||
|
||||
and then:
|
||||
|
||||
myapp.py:
|
||||
|
||||
class App(object):
|
||||
def __init__(self, data):
|
||||
...
|
||||
|
||||
class Filter(object):
|
||||
def __init__(self, app, data):
|
||||
...
|
||||
|
||||
:param paste_config_file: a PasteDeploy config file
|
||||
:param app_name: the name of the app/pipeline to load from the file
|
||||
:param data: a data object to supply to the app and its filters
|
||||
:returns: the WSGI app
|
||||
"""
|
||||
(af, ff) = (AppFactory(data), FilterFactory(data))
|
||||
|
||||
local.store.app_factory = af
|
||||
local.store.filter_factory = ff
|
||||
try:
|
||||
return deploy.loadapp("config:%s" % paste_config_file, name=app_name)
|
||||
finally:
|
||||
del local.store.app_factory
|
||||
del local.store.filter_factory
|
|
@ -1,115 +0,0 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from kwapi.openstack.common.gettextutils import _
|
||||
from kwapi.openstack.common import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def periodic_task(*args, **kwargs):
|
||||
"""Decorator to indicate that a method is a periodic task.
|
||||
|
||||
This decorator can be used in two ways:
|
||||
|
||||
1. Without arguments '@periodic_task', this will be run on every tick
|
||||
of the periodic scheduler.
|
||||
|
||||
2. With arguments, @periodic_task(ticks_between_runs=N), this will be
|
||||
run on every N ticks of the periodic scheduler.
|
||||
"""
|
||||
def decorator(f):
|
||||
f._periodic_task = True
|
||||
f._ticks_between_runs = kwargs.pop('ticks_between_runs', 0)
|
||||
return f
|
||||
|
||||
# NOTE(sirp): The `if` is necessary to allow the decorator to be used with
|
||||
# and without parens.
|
||||
#
|
||||
# In the 'with-parens' case (with kwargs present), this function needs to
|
||||
# return a decorator function since the interpreter will invoke it like:
|
||||
#
|
||||
# periodic_task(*args, **kwargs)(f)
|
||||
#
|
||||
# In the 'without-parens' case, the original function will be passed
|
||||
# in as the first argument, like:
|
||||
#
|
||||
# periodic_task(f)
|
||||
if kwargs:
|
||||
return decorator
|
||||
else:
|
||||
return decorator(args[0])
|
||||
|
||||
|
||||
class _PeriodicTasksMeta(type):
|
||||
def __init__(cls, names, bases, dict_):
|
||||
"""Metaclass that allows us to collect decorated periodic tasks."""
|
||||
super(_PeriodicTasksMeta, cls).__init__(names, bases, dict_)
|
||||
|
||||
# NOTE(sirp): if the attribute is not present then we must be the base
|
||||
# class, so, go ahead and initialize it. If the attribute is present,
|
||||
# then we're a subclass so make a copy of it so we don't step on our
|
||||
# parent's toes.
|
||||
try:
|
||||
cls._periodic_tasks = cls._periodic_tasks[:]
|
||||
except AttributeError:
|
||||
cls._periodic_tasks = []
|
||||
|
||||
try:
|
||||
cls._ticks_to_skip = cls._ticks_to_skip.copy()
|
||||
except AttributeError:
|
||||
cls._ticks_to_skip = {}
|
||||
|
||||
# This uses __dict__ instead of
|
||||
# inspect.getmembers(cls, inspect.ismethod) so only the methods of the
|
||||
# current class are added when this class is scanned, and base classes
|
||||
# are not added redundantly.
|
||||
for value in cls.__dict__.values():
|
||||
if getattr(value, '_periodic_task', False):
|
||||
task = value
|
||||
name = task.__name__
|
||||
cls._periodic_tasks.append((name, task))
|
||||
cls._ticks_to_skip[name] = task._ticks_between_runs
|
||||
|
||||
|
||||
class PeriodicTasks(object):
|
||||
__metaclass__ = _PeriodicTasksMeta
|
||||
|
||||
def run_periodic_tasks(self, context, raise_on_error=False):
|
||||
"""Tasks to be run at a periodic interval."""
|
||||
for task_name, task in self._periodic_tasks:
|
||||
full_task_name = '.'.join([self.__class__.__name__, task_name])
|
||||
|
||||
ticks_to_skip = self._ticks_to_skip[task_name]
|
||||
if ticks_to_skip > 0:
|
||||
LOG.debug(_("Skipping %(full_task_name)s, %(ticks_to_skip)s"
|
||||
" ticks left until next run"),
|
||||
dict(full_task_name=full_task_name,
|
||||
ticks_to_skip=ticks_to_skip))
|
||||
self._ticks_to_skip[task_name] -= 1
|
||||
continue
|
||||
|
||||
self._ticks_to_skip[task_name] = task._ticks_between_runs
|
||||
LOG.debug(_("Running periodic task %(full_task_name)s"),
|
||||
dict(full_task_name=full_task_name))
|
||||
|
||||
try:
|
||||
task(self, context)
|
||||
except Exception as e:
|
||||
if raise_on_error:
|
||||
raise
|
||||
LOG.exception(_("Error during %(full_task_name)s:"
|
||||
" %(e)s"),
|
||||
dict(e=e, full_task_name=full_task_name))
|
|
@ -1,14 +0,0 @@
|
|||
# Copyright 2012 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
|
@ -1,93 +0,0 @@
|
|||
# Copyright 2012 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from kwapi.openstack.common import log as logging
|
||||
from kwapi.openstack.common.plugin import plugin
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class _CallbackNotifier(object):
|
||||
"""Manages plugin-defined notification callbacks.
|
||||
|
||||
For each Plugin, a CallbackNotifier will be added to the
|
||||
notification driver list. Calls to notify() with appropriate
|
||||
messages will be hooked and prompt callbacks.
|
||||
|
||||
A callback should look like this:
|
||||
def callback(context, message, user_data)
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._callback_dict = {}
|
||||
|
||||
def _add_callback(self, event_type, callback, user_data):
|
||||
callback_list = self._callback_dict.get(event_type, [])
|
||||
callback_list.append({'function': callback,
|
||||
'user_data': user_data})
|
||||
self._callback_dict[event_type] = callback_list
|
||||
|
||||
def _remove_callback(self, callback):
|
||||
for callback_list in self._callback_dict.values():
|
||||
for entry in callback_list:
|
||||
if entry['function'] == callback:
|
||||
callback_list.remove(entry)
|
||||
|
||||
def notify(self, context, message):
|
||||
if message.get('event_type') not in self._callback_dict:
|
||||
return
|
||||
|
||||
for entry in self._callback_dict[message.get('event_type')]:
|
||||
entry['function'](context, message, entry.get('user_data'))
|
||||
|
||||
def callbacks(self):
|
||||
return self._callback_dict
|
||||
|
||||
|
||||
class CallbackPlugin(plugin.Plugin):
|
||||
""" Plugin with a simple callback interface.
|
||||
|
||||
This class is provided as a convenience for producing a simple
|
||||
plugin that only watches a couple of events. For example, here's
|
||||
a subclass which prints a line the first time an instance is created.
|
||||
|
||||
class HookInstanceCreation(CallbackPlugin):
|
||||
|
||||
def __init__(self, _service_name):
|
||||
super(HookInstanceCreation, self).__init__()
|
||||
self._add_callback(self.magic, 'compute.instance.create.start')
|
||||
|
||||
def magic(self):
|
||||
print "An instance was created!"
|
||||
self._remove_callback(self, self.magic)
|
||||
"""
|
||||
|
||||
def __init__(self, service_name):
|
||||
super(CallbackPlugin, self).__init__(service_name)
|
||||
self._callback_notifier = _CallbackNotifier()
|
||||
self._add_notifier(self._callback_notifier)
|
||||
|
||||
def _add_callback(self, callback, event_type, user_data=None):
|
||||
"""Add callback for a given event notification.
|
||||
|
||||
Subclasses can call this as an alternative to implementing
|
||||
a fullblown notify notifier.
|
||||
"""
|
||||
self._callback_notifier._add_callback(event_type, callback, user_data)
|
||||
|
||||
def _remove_callback(self, callback):
|
||||
"""Remove all notification callbacks to specified function."""
|
||||
self._callback_notifier._remove_callback(callback)
|
|
@ -1,86 +0,0 @@
|
|||
# Copyright 2012 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from kwapi.openstack.common import log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Plugin(object):
|
||||
"""Defines an interface for adding functionality to an OpenStack service.
|
||||
|
||||
A plugin interacts with a service via the following pathways:
|
||||
|
||||
- An optional set of notifiers, managed by calling add_notifier()
|
||||
or by overriding _notifiers()
|
||||
|
||||
- A set of api extensions, managed via add_api_extension_descriptor()
|
||||
|
||||
- Direct calls to service functions.
|
||||
|
||||
- Whatever else the plugin wants to do on its own.
|
||||
|
||||
This is the reference implementation.
|
||||
"""
|
||||
|
||||
# The following functions are provided as convenience methods
|
||||
# for subclasses. Subclasses should call them but probably not
|
||||
# override them.
|
||||
def _add_api_extension_descriptor(self, descriptor):
|
||||
"""Subclass convenience method which adds an extension descriptor.
|
||||
|
||||
Subclass constructors should call this method when
|
||||
extending a project's REST interface.
|
||||
|
||||
Note that once the api service has loaded, the
|
||||
API extension set is more-or-less fixed, so
|
||||
this should mainly be called by subclass constructors.
|
||||
"""
|
||||
self._api_extension_descriptors.append(descriptor)
|
||||
|
||||
def _add_notifier(self, notifier):
|
||||
"""Subclass convenience method which adds a notifier.
|
||||
|
||||
Notifier objects should implement the function notify(message).
|
||||
Each notifier receives a notify() call whenever an openstack
|
||||
service broadcasts a notification.
|
||||
|
||||
Best to call this during construction. Notifiers are enumerated
|
||||
and registered by the pluginmanager at plugin load time.
|
||||
"""
|
||||
self._notifiers.append(notifier)
|
||||
|
||||
# The following methods are called by OpenStack services to query
|
||||
# plugin features. Subclasses should probably not override these.
|
||||
def _notifiers(self):
|
||||
"""Returns list of notifiers for this plugin."""
|
||||
return self._notifiers
|
||||
|
||||
notifiers = property(_notifiers)
|
||||
|
||||
def _api_extension_descriptors(self):
|
||||
"""Return a list of API extension descriptors.
|
||||
|
||||
Called by a project API during its load sequence.
|
||||
"""
|
||||
return self._api_extension_descriptors
|
||||
|
||||
api_extension_descriptors = property(_api_extension_descriptors)
|
||||
|
||||
# Most plugins will override this:
|
||||
def __init__(self, service_name):
|
||||
self._notifiers = []
|
||||
self._api_extension_descriptors = []
|
|
@ -1,77 +0,0 @@
|
|||
# Copyright 2012 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import pkg_resources
|
||||
|
||||
from kwapi.openstack.common import cfg
|
||||
from kwapi.openstack.common.gettextutils import _
|
||||
from kwapi.openstack.common import log as logging
|
||||
from kwapi.openstack.common.notifier import api as notifier_api
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PluginManager(object):
|
||||
"""Manages plugin entrypoints and loading.
|
||||
|
||||
For a service to implement this plugin interface for callback purposes:
|
||||
|
||||
- Make use of the openstack-common notifier system
|
||||
- Instantiate this manager in each process (passing in
|
||||
project and service name)
|
||||
|
||||
For an API service to extend itself using this plugin interface,
|
||||
it needs to query the plugin_extension_factory provided by
|
||||
the already-instantiated PluginManager.
|
||||
"""
|
||||
|
||||
def __init__(self, project_name, service_name):
|
||||
""" Construct Plugin Manager; load and initialize plugins.
|
||||
|
||||
project_name (e.g. 'nova' or 'glance') is used
|
||||
to construct the entry point that identifies plugins.
|
||||
|
||||
The service_name (e.g. 'compute') is passed on to
|
||||
each plugin as a raw string for it to do what it will.
|
||||
"""
|
||||
self._project_name = project_name
|
||||
self._service_name = service_name
|
||||
self.plugins = []
|
||||
|
||||
def load_plugins(self):
|
||||
self.plugins = []
|
||||
|
||||
for entrypoint in pkg_resources.iter_entry_points('%s.plugin' %
|
||||
self._project_name):
|
||||
try:
|
||||
pluginclass = entrypoint.load()
|
||||
plugin = pluginclass(self._service_name)
|
||||
self.plugins.append(plugin)
|
||||
except Exception, exc:
|
||||
LOG.error(_("Failed to load plugin %(plug)s: %(exc)s") %
|
||||
{'plug': entrypoint, 'exc': exc})
|
||||
|
||||
# Register individual notifiers.
|
||||
for plugin in self.plugins:
|
||||
for notifier in plugin.notifiers:
|
||||
notifier_api.add_driver(notifier)
|
||||
|
||||
def plugin_extension_factory(self, ext_mgr):
|
||||
for plugin in self.plugins:
|
||||
descriptors = plugin.api_extension_descriptors
|
||||
for descriptor in descriptors:
|
||||
ext_mgr.load_extension(descriptor)
|
|
@ -1,779 +0,0 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2012 OpenStack, LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Common Policy Engine Implementation
|
||||
|
||||
Policies can be expressed in one of two forms: A list of lists, or a
|
||||
string written in the new policy language.
|
||||
|
||||
In the list-of-lists representation, each check inside the innermost
|
||||
list is combined as with an "and" conjunction--for that check to pass,
|
||||
all the specified checks must pass. These innermost lists are then
|
||||
combined as with an "or" conjunction. This is the original way of
|
||||
expressing policies, but there now exists a new way: the policy
|
||||
language.
|
||||
|
||||
In the policy language, each check is specified the same way as in the
|
||||
list-of-lists representation: a simple "a:b" pair that is matched to
|
||||
the correct code to perform that check. However, conjunction
|
||||
operators are available, allowing for more expressiveness in crafting
|
||||
policies.
|
||||
|
||||
As an example, take the following rule, expressed in the list-of-lists
|
||||
representation::
|
||||
|
||||
[["role:admin"], ["project_id:%(project_id)s", "role:projectadmin"]]
|
||||
|
||||
In the policy language, this becomes::
|
||||
|
||||
role:admin or (project_id:%(project_id)s and role:projectadmin)
|
||||
|
||||
The policy language also has the "not" operator, allowing a richer
|
||||
policy rule::
|
||||
|
||||
project_id:%(project_id)s and not role:dunce
|
||||
|
||||
Finally, two special policy checks should be mentioned; the policy
|
||||
check "@" will always accept an access, and the policy check "!" will
|
||||
always reject an access. (Note that if a rule is either the empty
|
||||
list ("[]") or the empty string, this is equivalent to the "@" policy
|
||||
check.) Of these, the "!" policy check is probably the most useful,
|
||||
as it allows particular rules to be explicitly disabled.
|
||||
"""
|
||||
|
||||
import abc
|
||||
import logging
|
||||
import re
|
||||
import urllib
|
||||
|
||||
import urllib2
|
||||
|
||||
from kwapi.openstack.common.gettextutils import _
|
||||
from kwapi.openstack.common import jsonutils
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
_rules = None
|
||||
_checks = {}
|
||||
|
||||
|
||||
class Rules(dict):
|
||||
"""
|
||||
A store for rules. Handles the default_rule setting directly.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def load_json(cls, data, default_rule=None):
|
||||
"""
|
||||
Allow loading of JSON rule data.
|
||||
"""
|
||||
|
||||
# Suck in the JSON data and parse the rules
|
||||
rules = dict((k, parse_rule(v)) for k, v in
|
||||
jsonutils.loads(data).items())
|
||||
|
||||
return cls(rules, default_rule)
|
||||
|
||||
def __init__(self, rules=None, default_rule=None):
|
||||
"""Initialize the Rules store."""
|
||||
|
||||
super(Rules, self).__init__(rules or {})
|
||||
self.default_rule = default_rule
|
||||
|
||||
def __missing__(self, key):
|
||||
"""Implements the default rule handling."""
|
||||
|
||||
# If the default rule isn't actually defined, do something
|
||||
# reasonably intelligent
|
||||
if not self.default_rule or self.default_rule not in self:
|
||||
raise KeyError(key)
|
||||
|
||||
return self[self.default_rule]
|
||||
|
||||
def __str__(self):
|
||||
"""Dumps a string representation of the rules."""
|
||||
|
||||
# Start by building the canonical strings for the rules
|
||||
out_rules = {}
|
||||
for key, value in self.items():
|
||||
# Use empty string for singleton TrueCheck instances
|
||||
if isinstance(value, TrueCheck):
|
||||
out_rules[key] = ''
|
||||
else:
|
||||
out_rules[key] = str(value)
|
||||
|
||||
# Dump a pretty-printed JSON representation
|
||||
return jsonutils.dumps(out_rules, indent=4)
|
||||
|
||||
|
||||
# Really have to figure out a way to deprecate this
|
||||
def set_rules(rules):
|
||||
"""Set the rules in use for policy checks."""
|
||||
|
||||
global _rules
|
||||
|
||||
_rules = rules
|
||||
|
||||
|
||||
# Ditto
|
||||
def reset():
|
||||
"""Clear the rules used for policy checks."""
|
||||
|
||||
global _rules
|
||||
|
||||
_rules = None
|
||||
|
||||
|
||||
def check(rule, target, creds, exc=None, *args, **kwargs):
|
||||
"""
|
||||
Checks authorization of a rule against the target and credentials.
|
||||
|
||||
:param rule: The rule to evaluate.
|
||||
:param target: As much information about the object being operated
|
||||
on as possible, as a dictionary.
|
||||
:param creds: As much information about the user performing the
|
||||
action as possible, as a dictionary.
|
||||
:param exc: Class of the exception to raise if the check fails.
|
||||
Any remaining arguments passed to check() (both
|
||||
positional and keyword arguments) will be passed to
|
||||
the exception class. If exc is not provided, returns
|
||||
False.
|
||||
|
||||
:return: Returns False if the policy does not allow the action and
|
||||
exc is not provided; otherwise, returns a value that
|
||||
evaluates to True. Note: for rules using the "case"
|
||||
expression, this True value will be the specified string
|
||||
from the expression.
|
||||
"""
|
||||
|
||||
# Allow the rule to be a Check tree
|
||||
if isinstance(rule, BaseCheck):
|
||||
result = rule(target, creds)
|
||||
elif not _rules:
|
||||
# No rules to reference means we're going to fail closed
|
||||
result = False
|
||||
else:
|
||||
try:
|
||||
# Evaluate the rule
|
||||
result = _rules[rule](target, creds)
|
||||
except KeyError:
|
||||
# If the rule doesn't exist, fail closed
|
||||
result = False
|
||||
|
||||
# If it is False, raise the exception if requested
|
||||
if exc and result is False:
|
||||
raise exc(*args, **kwargs)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
class BaseCheck(object):
|
||||
"""
|
||||
Abstract base class for Check classes.
|
||||
"""
|
||||
|
||||
__metaclass__ = abc.ABCMeta
|
||||
|
||||
@abc.abstractmethod
|
||||
def __str__(self):
|
||||
"""
|
||||
Retrieve a string representation of the Check tree rooted at
|
||||
this node.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def __call__(self, target, cred):
|
||||
"""
|
||||
Perform the check. Returns False to reject the access or a
|
||||
true value (not necessary True) to accept the access.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class FalseCheck(BaseCheck):
|
||||
"""
|
||||
A policy check that always returns False (disallow).
|
||||
"""
|
||||
|
||||
def __str__(self):
|
||||
"""Return a string representation of this check."""
|
||||
|
||||
return "!"
|
||||
|
||||
def __call__(self, target, cred):
|
||||
"""Check the policy."""
|
||||
|
||||
return False
|
||||
|
||||
|
||||
class TrueCheck(BaseCheck):
|
||||
"""
|
||||
A policy check that always returns True (allow).
|
||||
"""
|
||||
|
||||
def __str__(self):
|
||||
"""Return a string representation of this check."""
|
||||
|
||||
return "@"
|
||||
|
||||
def __call__(self, target, cred):
|
||||
"""Check the policy."""
|
||||
|
||||
return True
|
||||
|
||||
|
||||
class Check(BaseCheck):
|
||||
"""
|
||||
A base class to allow for user-defined policy checks.
|
||||
"""
|
||||
|
||||
def __init__(self, kind, match):
|
||||
"""
|
||||
:param kind: The kind of the check, i.e., the field before the
|
||||
':'.
|
||||
:param match: The match of the check, i.e., the field after
|
||||
the ':'.
|
||||
"""
|
||||
|
||||
self.kind = kind
|
||||
self.match = match
|
||||
|
||||
def __str__(self):
|
||||
"""Return a string representation of this check."""
|
||||
|
||||
return "%s:%s" % (self.kind, self.match)
|
||||
|
||||
|
||||
class NotCheck(BaseCheck):
|
||||
"""
|
||||
A policy check that inverts the result of another policy check.
|
||||
Implements the "not" operator.
|
||||
"""
|
||||
|
||||
def __init__(self, rule):
|
||||
"""
|
||||
Initialize the 'not' check.
|
||||
|
||||
:param rule: The rule to negate. Must be a Check.
|
||||
"""
|
||||
|
||||
self.rule = rule
|
||||
|
||||
def __str__(self):
|
||||
"""Return a string representation of this check."""
|
||||
|
||||
return "not %s" % self.rule
|
||||
|
||||
def __call__(self, target, cred):
|
||||
"""
|
||||
Check the policy. Returns the logical inverse of the wrapped
|
||||
check.
|
||||
"""
|
||||
|
||||
return not self.rule(target, cred)
|
||||
|
||||
|
||||
class AndCheck(BaseCheck):
|
||||
"""
|
||||
A policy check that requires that a list of other checks all
|
||||
return True. Implements the "and" operator.
|
||||
"""
|
||||
|
||||
def __init__(self, rules):
|
||||
"""
|
||||
Initialize the 'and' check.
|
||||
|
||||
:param rules: A list of rules that will be tested.
|
||||
"""
|
||||
|
||||
self.rules = rules
|
||||
|
||||
def __str__(self):
|
||||
"""Return a string representation of this check."""
|
||||
|
||||
return "(%s)" % ' and '.join(str(r) for r in self.rules)
|
||||
|
||||
def __call__(self, target, cred):
|
||||
"""
|
||||
Check the policy. Requires that all rules accept in order to
|
||||
return True.
|
||||
"""
|
||||
|
||||
for rule in self.rules:
|
||||
if not rule(target, cred):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def add_check(self, rule):
|
||||
"""
|
||||
Allows addition of another rule to the list of rules that will
|
||||
be tested. Returns the AndCheck object for convenience.
|
||||
"""
|
||||
|
||||
self.rules.append(rule)
|
||||
return self
|
||||
|
||||
|
||||
class OrCheck(BaseCheck):
|
||||
"""
|
||||
A policy check that requires that at least one of a list of other
|
||||
checks returns True. Implements the "or" operator.
|
||||
"""
|
||||
|
||||
def __init__(self, rules):
|
||||
"""
|
||||
Initialize the 'or' check.
|
||||
|
||||
:param rules: A list of rules that will be tested.
|
||||
"""
|
||||
|
||||
self.rules = rules
|
||||
|
||||
def __str__(self):
|
||||
"""Return a string representation of this check."""
|
||||
|
||||
return "(%s)" % ' or '.join(str(r) for r in self.rules)
|
||||
|
||||
def __call__(self, target, cred):
|
||||
"""
|
||||
Check the policy. Requires that at least one rule accept in
|
||||
order to return True.
|
||||
"""
|
||||
|
||||
for rule in self.rules:
|
||||
if rule(target, cred):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def add_check(self, rule):
|
||||
"""
|
||||
Allows addition of another rule to the list of rules that will
|
||||
be tested. Returns the OrCheck object for convenience.
|
||||
"""
|
||||
|
||||
self.rules.append(rule)
|
||||
return self
|
||||
|
||||
|
||||
def _parse_check(rule):
|
||||
"""
|
||||
Parse a single base check rule into an appropriate Check object.
|
||||
"""
|
||||
|
||||
# Handle the special checks
|
||||
if rule == '!':
|
||||
return FalseCheck()
|
||||
elif rule == '@':
|
||||
return TrueCheck()
|
||||
|
||||
try:
|
||||
kind, match = rule.split(':', 1)
|
||||
except Exception:
|
||||
LOG.exception(_("Failed to understand rule %(rule)s") % locals())
|
||||
# If the rule is invalid, we'll fail closed
|
||||
return FalseCheck()
|
||||
|
||||
# Find what implements the check
|
||||
if kind in _checks:
|
||||
return _checks[kind](kind, match)
|
||||
elif None in _checks:
|
||||
return _checks[None](kind, match)
|
||||
else:
|
||||
LOG.error(_("No handler for matches of kind %s") % kind)
|
||||
return FalseCheck()
|
||||
|
||||
|
||||
def _parse_list_rule(rule):
|
||||
"""
|
||||
Provided for backwards compatibility. Translates the old
|
||||
list-of-lists syntax into a tree of Check objects.
|
||||
"""
|
||||
|
||||
# Empty rule defaults to True
|
||||
if not rule:
|
||||
return TrueCheck()
|
||||
|
||||
# Outer list is joined by "or"; inner list by "and"
|
||||
or_list = []
|
||||
for inner_rule in rule:
|
||||
# Elide empty inner lists
|
||||
if not inner_rule:
|
||||
continue
|
||||
|
||||
# Handle bare strings
|
||||
if isinstance(inner_rule, basestring):
|
||||
inner_rule = [inner_rule]
|
||||
|
||||
# Parse the inner rules into Check objects
|
||||
and_list = [_parse_check(r) for r in inner_rule]
|
||||
|
||||
# Append the appropriate check to the or_list
|
||||
if len(and_list) == 1:
|
||||
or_list.append(and_list[0])
|
||||
else:
|
||||
or_list.append(AndCheck(and_list))
|
||||
|
||||
# If we have only one check, omit the "or"
|
||||
if len(or_list) == 0:
|
||||
return FalseCheck()
|
||||
elif len(or_list) == 1:
|
||||
return or_list[0]
|
||||
|
||||
return OrCheck(or_list)
|
||||
|
||||
|
||||
# Used for tokenizing the policy language
|
||||
_tokenize_re = re.compile(r'\s+')
|
||||
|
||||
|
||||
def _parse_tokenize(rule):
|
||||
"""
|
||||
Tokenizer for the policy language.
|
||||
|
||||
Most of the single-character tokens are specified in the
|
||||
_tokenize_re; however, parentheses need to be handled specially,
|
||||
because they can appear inside a check string. Thankfully, those
|
||||
parentheses that appear inside a check string can never occur at
|
||||
the very beginning or end ("%(variable)s" is the correct syntax).
|
||||
"""
|
||||
|
||||
for tok in _tokenize_re.split(rule):
|
||||
# Skip empty tokens
|
||||
if not tok or tok.isspace():
|
||||
continue
|
||||
|
||||
# Handle leading parens on the token
|
||||
clean = tok.lstrip('(')
|
||||
for i in range(len(tok) - len(clean)):
|
||||
yield '(', '('
|
||||
|
||||
# If it was only parentheses, continue
|
||||
if not clean:
|
||||
continue
|
||||
else:
|
||||
tok = clean
|
||||
|
||||
# Handle trailing parens on the token
|
||||
clean = tok.rstrip(')')
|
||||
trail = len(tok) - len(clean)
|
||||
|
||||
# Yield the cleaned token
|
||||
lowered = clean.lower()
|
||||
if lowered in ('and', 'or', 'not'):
|
||||
# Special tokens
|
||||
yield lowered, clean
|
||||
elif clean:
|
||||
# Not a special token, but not composed solely of ')'
|
||||
if len(tok) >= 2 and ((tok[0], tok[-1]) in
|
||||
[('"', '"'), ("'", "'")]):
|
||||
# It's a quoted string
|
||||
yield 'string', tok[1:-1]
|
||||
else:
|
||||
yield 'check', _parse_check(clean)
|
||||
|
||||
# Yield the trailing parens
|
||||
for i in range(trail):
|
||||
yield ')', ')'
|
||||
|
||||
|
||||
class ParseStateMeta(type):
|
||||
"""
|
||||
Metaclass for the ParseState class. Facilitates identifying
|
||||
reduction methods.
|
||||
"""
|
||||
|
||||
def __new__(mcs, name, bases, cls_dict):
|
||||
"""
|
||||
Create the class. Injects the 'reducers' list, a list of
|
||||
tuples matching token sequences to the names of the
|
||||
corresponding reduction methods.
|
||||
"""
|
||||
|
||||
reducers = []
|
||||
|
||||
for key, value in cls_dict.items():
|
||||
if not hasattr(value, 'reducers'):
|
||||
continue
|
||||
for reduction in value.reducers:
|
||||
reducers.append((reduction, key))
|
||||
|
||||
cls_dict['reducers'] = reducers
|
||||
|
||||
return super(ParseStateMeta, mcs).__new__(mcs, name, bases, cls_dict)
|
||||
|
||||
|
||||
def reducer(*tokens):
|
||||
"""
|
||||
Decorator for reduction methods. Arguments are a sequence of
|
||||
tokens, in order, which should trigger running this reduction
|
||||
method.
|
||||
"""
|
||||
|
||||
def decorator(func):
|
||||
# Make sure we have a list of reducer sequences
|
||||
if not hasattr(func, 'reducers'):
|
||||
func.reducers = []
|
||||
|
||||
# Add the tokens to the list of reducer sequences
|
||||
func.reducers.append(list(tokens))
|
||||
|
||||
return func
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
class ParseState(object):
|
||||
"""
|
||||
Implement the core of parsing the policy language. Uses a greedy
|
||||
reduction algorithm to reduce a sequence of tokens into a single
|
||||
terminal, the value of which will be the root of the Check tree.
|
||||
|
||||
Note: error reporting is rather lacking. The best we can get with
|
||||
this parser formulation is an overall "parse failed" error.
|
||||
Fortunately, the policy language is simple enough that this
|
||||
shouldn't be that big a problem.
|
||||
"""
|
||||
|
||||
__metaclass__ = ParseStateMeta
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the ParseState."""
|
||||
|
||||
self.tokens = []
|
||||
self.values = []
|
||||
|
||||
def reduce(self):
|
||||
"""
|
||||
Perform a greedy reduction of the token stream. If a reducer
|
||||
method matches, it will be executed, then the reduce() method
|
||||
will be called recursively to search for any more possible
|
||||
reductions.
|
||||
"""
|
||||
|
||||
for reduction, methname in self.reducers:
|
||||
if (len(self.tokens) >= len(reduction) and
|
||||
self.tokens[-len(reduction):] == reduction):
|
||||
# Get the reduction method
|
||||
meth = getattr(self, methname)
|
||||
|
||||
# Reduce the token stream
|
||||
results = meth(*self.values[-len(reduction):])
|
||||
|
||||
# Update the tokens and values
|
||||
self.tokens[-len(reduction):] = [r[0] for r in results]
|
||||
self.values[-len(reduction):] = [r[1] for r in results]
|
||||
|
||||
# Check for any more reductions
|
||||
return self.reduce()
|
||||
|
||||
def shift(self, tok, value):
|
||||
"""Adds one more token to the state. Calls reduce()."""
|
||||
|
||||
self.tokens.append(tok)
|
||||
self.values.append(value)
|
||||
|
||||
# Do a greedy reduce...
|
||||
self.reduce()
|
||||
|
||||
@property
|
||||
def result(self):
|
||||
"""
|
||||
Obtain the final result of the parse. Raises ValueError if
|
||||
the parse failed to reduce to a single result.
|
||||
"""
|
||||
|
||||
if len(self.values) != 1:
|
||||
raise ValueError("Could not parse rule")
|
||||
return self.values[0]
|
||||
|
||||
@reducer('(', 'check', ')')
|
||||
@reducer('(', 'and_expr', ')')
|
||||
@reducer('(', 'or_expr', ')')
|
||||
def _wrap_check(self, _p1, check, _p2):
|
||||
"""Turn parenthesized expressions into a 'check' token."""
|
||||
|
||||
return [('check', check)]
|
||||
|
||||
@reducer('check', 'and', 'check')
|
||||
def _make_and_expr(self, check1, _and, check2):
|
||||
"""
|
||||
Create an 'and_expr' from two checks joined by the 'and'
|
||||
operator.
|
||||
"""
|
||||
|
||||
return [('and_expr', AndCheck([check1, check2]))]
|
||||
|
||||
@reducer('and_expr', 'and', 'check')
|
||||
def _extend_and_expr(self, and_expr, _and, check):
|
||||
"""
|
||||
Extend an 'and_expr' by adding one more check.
|
||||
"""
|
||||
|
||||
return [('and_expr', and_expr.add_check(check))]
|
||||
|
||||
@reducer('check', 'or', 'check')
|
||||
def _make_or_expr(self, check1, _or, check2):
|
||||
"""
|
||||
Create an 'or_expr' from two checks joined by the 'or'
|
||||
operator.
|
||||
"""
|
||||
|
||||
return [('or_expr', OrCheck([check1, check2]))]
|
||||
|
||||
@reducer('or_expr', 'or', 'check')
|
||||
def _extend_or_expr(self, or_expr, _or, check):
|
||||
"""
|
||||
Extend an 'or_expr' by adding one more check.
|
||||
"""
|
||||
|
||||
return [('or_expr', or_expr.add_check(check))]
|
||||
|
||||
@reducer('not', 'check')
|
||||
def _make_not_expr(self, _not, check):
|
||||
"""Invert the result of another check."""
|
||||
|
||||
return [('check', NotCheck(check))]
|
||||
|
||||
|
||||
def _parse_text_rule(rule):
|
||||
"""
|
||||
Translates a policy written in the policy language into a tree of
|
||||
Check objects.
|
||||
"""
|
||||
|
||||
# Empty rule means always accept
|
||||
if not rule:
|
||||
return TrueCheck()
|
||||
|
||||
# Parse the token stream
|
||||
state = ParseState()
|
||||
for tok, value in _parse_tokenize(rule):
|
||||
state.shift(tok, value)
|
||||
|
||||
try:
|
||||
return state.result
|
||||
except ValueError:
|
||||
# Couldn't parse the rule
|
||||
LOG.exception(_("Failed to understand rule %(rule)r") % locals())
|
||||
|
||||
# Fail closed
|
||||
return FalseCheck()
|
||||
|
||||
|
||||
def parse_rule(rule):
|
||||
"""
|
||||
Parses a policy rule into a tree of Check objects.
|
||||
"""
|
||||
|
||||
# If the rule is a string, it's in the policy language
|
||||
if isinstance(rule, basestring):
|
||||
return _parse_text_rule(rule)
|
||||
return _parse_list_rule(rule)
|
||||
|
||||
|
||||
def register(name, func=None):
|
||||
"""
|
||||
Register a function or Check class as a policy check.
|
||||
|
||||
:param name: Gives the name of the check type, e.g., 'rule',
|
||||
'role', etc. If name is None, a default check type
|
||||
will be registered.
|
||||
:param func: If given, provides the function or class to register.
|
||||
If not given, returns a function taking one argument
|
||||
to specify the function or class to register,
|
||||
allowing use as a decorator.
|
||||
"""
|
||||
|
||||
# Perform the actual decoration by registering the function or
|
||||
# class. Returns the function or class for compliance with the
|
||||
# decorator interface.
|
||||
def decorator(func):
|
||||
_checks[name] = func
|
||||
return func
|
||||
|
||||
# If the function or class is given, do the registration
|
||||
if func:
|
||||
return decorator(func)
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
@register("rule")
|
||||
class RuleCheck(Check):
|
||||
def __call__(self, target, creds):
|
||||
"""
|
||||
Recursively checks credentials based on the defined rules.
|
||||
"""
|
||||
|
||||
try:
|
||||
return _rules[self.match](target, creds)
|
||||
except KeyError:
|
||||
# We don't have any matching rule; fail closed
|
||||
return False
|
||||
|
||||
|
||||
@register("role")
|
||||
class RoleCheck(Check):
|
||||
def __call__(self, target, creds):
|
||||
"""Check that there is a matching role in the cred dict."""
|
||||
|
||||
return self.match.lower() in [x.lower() for x in creds['roles']]
|
||||
|
||||
|
||||
@register('http')
|
||||
class HttpCheck(Check):
|
||||
def __call__(self, target, creds):
|
||||
"""
|
||||
Check http: rules by calling to a remote server.
|
||||
|
||||
This example implementation simply verifies that the response
|
||||
is exactly 'True'.
|
||||
"""
|
||||
|
||||
url = ('http:' + self.match) % target
|
||||
data = {'target': jsonutils.dumps(target),
|
||||
'credentials': jsonutils.dumps(creds)}
|
||||
post_data = urllib.urlencode(data)
|
||||
f = urllib2.urlopen(url, post_data)
|
||||
return f.read() == "True"
|
||||
|
||||
|
||||
@register(None)
|
||||
class GenericCheck(Check):
|
||||
def __call__(self, target, creds):
|
||||
"""
|
||||
Check an individual match.
|
||||
|
||||
Matches look like:
|
||||
|
||||
tenant:%(tenant_id)s
|
||||
role:compute:admin
|
||||
"""
|
||||
|
||||
# TODO(termie): do dict inspection via dot syntax
|
||||
match = self.match % target
|
||||
if self.kind in creds:
|
||||
return match == unicode(creds[self.kind])
|
||||
return False
|
|
@ -1,135 +0,0 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
System-level utilities and helper functions.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import random
|
||||
import shlex
|
||||
|
||||
from eventlet.green import subprocess
|
||||
from eventlet import greenthread
|
||||
|
||||
from kwapi.openstack.common.gettextutils import _
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class UnknownArgumentError(Exception):
|
||||
def __init__(self, message=None):
|
||||
super(UnknownArgumentError, self).__init__(message)
|
||||
|
||||
|
||||
class ProcessExecutionError(Exception):
|
||||
def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
|
||||
description=None):
|
||||
if description is None:
|
||||
description = "Unexpected error while running command."
|
||||
if exit_code is None:
|
||||
exit_code = '-'
|
||||
message = ("%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r"
|
||||
% (description, cmd, exit_code, stdout, stderr))
|
||||
super(ProcessExecutionError, self).__init__(message)
|
||||
|
||||
|
||||
def execute(*cmd, **kwargs):
|
||||
"""
|
||||
Helper method to shell out and execute a command through subprocess with
|
||||
optional retry.
|
||||
|
||||
:param cmd: Passed to subprocess.Popen.
|
||||
:type cmd: string
|
||||
:param process_input: Send to opened process.
|
||||
:type proces_input: string
|
||||
:param check_exit_code: Defaults to 0. Will raise
|
||||
:class:`ProcessExecutionError`
|
||||
if the command exits without returning this value
|
||||
as a returncode
|
||||
:type check_exit_code: int
|
||||
:param delay_on_retry: True | False. Defaults to True. If set to True,
|
||||
wait a short amount of time before retrying.
|
||||
:type delay_on_retry: boolean
|
||||
:param attempts: How many times to retry cmd.
|
||||
:type attempts: int
|
||||
:param run_as_root: True | False. Defaults to False. If set to True,
|
||||
the command is prefixed by the command specified
|
||||
in the root_helper kwarg.
|
||||
:type run_as_root: boolean
|
||||
:param root_helper: command to prefix all cmd's with
|
||||
:type root_helper: string
|
||||
:returns: (stdout, stderr) from process execution
|
||||
:raises: :class:`UnknownArgumentError` on
|
||||
receiving unknown arguments
|
||||
:raises: :class:`ProcessExecutionError`
|
||||
"""
|
||||
|
||||
process_input = kwargs.pop('process_input', None)
|
||||
check_exit_code = kwargs.pop('check_exit_code', 0)
|
||||
delay_on_retry = kwargs.pop('delay_on_retry', True)
|
||||
attempts = kwargs.pop('attempts', 1)
|
||||
run_as_root = kwargs.pop('run_as_root', False)
|
||||
root_helper = kwargs.pop('root_helper', '')
|
||||
if len(kwargs):
|
||||
raise UnknownArgumentError(_('Got unknown keyword args '
|
||||
'to utils.execute: %r') % kwargs)
|
||||
if run_as_root:
|
||||
cmd = shlex.split(root_helper) + list(cmd)
|
||||
cmd = map(str, cmd)
|
||||
|
||||
while attempts > 0:
|
||||
attempts -= 1
|
||||
try:
|
||||
LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd))
|
||||
_PIPE = subprocess.PIPE # pylint: disable=E1101
|
||||
obj = subprocess.Popen(cmd,
|
||||
stdin=_PIPE,
|
||||
stdout=_PIPE,
|
||||
stderr=_PIPE,
|
||||
close_fds=True)
|
||||
result = None
|
||||
if process_input is not None:
|
||||
result = obj.communicate(process_input)
|
||||
else:
|
||||
result = obj.communicate()
|
||||
obj.stdin.close() # pylint: disable=E1101
|
||||
_returncode = obj.returncode # pylint: disable=E1101
|
||||
if _returncode:
|
||||
LOG.debug(_('Result was %s') % _returncode)
|
||||
if (isinstance(check_exit_code, int) and
|
||||
not isinstance(check_exit_code, bool) and
|
||||
_returncode != check_exit_code):
|
||||
(stdout, stderr) = result
|
||||
raise ProcessExecutionError(exit_code=_returncode,
|
||||
stdout=stdout,
|
||||
stderr=stderr,
|
||||
cmd=' '.join(cmd))
|
||||
return result
|
||||
except ProcessExecutionError:
|
||||
if not attempts:
|
||||
raise
|
||||
else:
|
||||
LOG.debug(_('%r failed. Retrying.'), cmd)
|
||||
if delay_on_retry:
|
||||
greenthread.sleep(random.randint(20, 200) / 100.0)
|
||||
finally:
|
||||
# NOTE(termie): this appears to be necessary to let the subprocess
|
||||
# call clean something up in between calls, without
|
||||
# it two execute calls in a row hangs the second one
|
||||
greenthread.sleep(0)
|
|
@ -1,16 +0,0 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2011 OpenStack, LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
|
@ -1,180 +0,0 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2011 OpenStack, LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
import re
|
||||
|
||||
|
||||
class CommandFilter(object):
|
||||
"""Command filter only checking that the 1st argument matches exec_path"""
|
||||
|
||||
def __init__(self, exec_path, run_as, *args):
|
||||
self.name = ''
|
||||
self.exec_path = exec_path
|
||||
self.run_as = run_as
|
||||
self.args = args
|
||||
self.real_exec = None
|
||||
|
||||
def get_exec(self, exec_dirs=[]):
|
||||
"""Returns existing executable, or empty string if none found"""
|
||||
if self.real_exec is not None:
|
||||
return self.real_exec
|
||||
self.real_exec = ""
|
||||
if self.exec_path.startswith('/'):
|
||||
if os.access(self.exec_path, os.X_OK):
|
||||
self.real_exec = self.exec_path
|
||||
else:
|
||||
for binary_path in exec_dirs:
|
||||
expanded_path = os.path.join(binary_path, self.exec_path)
|
||||
if os.access(expanded_path, os.X_OK):
|
||||
self.real_exec = expanded_path
|
||||
break
|
||||
return self.real_exec
|
||||
|
||||
def match(self, userargs):
|
||||
"""Only check that the first argument (command) matches exec_path"""
|
||||
if (os.path.basename(self.exec_path) == userargs[0]):
|
||||
return True
|
||||
return False
|
||||
|
||||
def get_command(self, userargs, exec_dirs=[]):
|
||||
"""Returns command to execute (with sudo -u if run_as != root)."""
|
||||
to_exec = self.get_exec(exec_dirs=exec_dirs) or self.exec_path
|
||||
if (self.run_as != 'root'):
|
||||
# Used to run commands at lesser privileges
|
||||
return ['sudo', '-u', self.run_as, to_exec] + userargs[1:]
|
||||
return [to_exec] + userargs[1:]
|
||||
|
||||
def get_environment(self, userargs):
|
||||
"""Returns specific environment to set, None if none"""
|
||||
return None
|
||||
|
||||
|
||||
class RegExpFilter(CommandFilter):
|
||||
"""Command filter doing regexp matching for every argument"""
|
||||
|
||||
def match(self, userargs):
|
||||
# Early skip if command or number of args don't match
|
||||
if (len(self.args) != len(userargs)):
|
||||
# DENY: argument numbers don't match
|
||||
return False
|
||||
# Compare each arg (anchoring pattern explicitly at end of string)
|
||||
for (pattern, arg) in zip(self.args, userargs):
|
||||
try:
|
||||
if not re.match(pattern + '$', arg):
|
||||
break
|
||||
except re.error:
|
||||
# DENY: Badly-formed filter
|
||||
return False
|
||||
else:
|
||||
# ALLOW: All arguments matched
|
||||
return True
|
||||
|
||||
# DENY: Some arguments did not match
|
||||
return False
|
||||
|
||||
|
||||
class DnsmasqFilter(CommandFilter):
|
||||
"""Specific filter for the dnsmasq call (which includes env)"""
|
||||
|
||||
CONFIG_FILE_ARG = 'CONFIG_FILE'
|
||||
|
||||
def match(self, userargs):
|
||||
if (userargs[0] == 'env' and
|
||||
userargs[1].startswith(self.CONFIG_FILE_ARG) and
|
||||
userargs[2].startswith('NETWORK_ID=') and
|
||||
userargs[3] == 'dnsmasq'):
|
||||
return True
|
||||
return False
|
||||
|
||||
def get_command(self, userargs, exec_dirs=[]):
|
||||
to_exec = self.get_exec(exec_dirs=exec_dirs) or self.exec_path
|
||||
dnsmasq_pos = userargs.index('dnsmasq')
|
||||
return [to_exec] + userargs[dnsmasq_pos + 1:]
|
||||
|
||||
def get_environment(self, userargs):
|
||||
env = os.environ.copy()
|
||||
env[self.CONFIG_FILE_ARG] = userargs[1].split('=')[-1]
|
||||
env['NETWORK_ID'] = userargs[2].split('=')[-1]
|
||||
return env
|
||||
|
||||
|
||||
class DeprecatedDnsmasqFilter(DnsmasqFilter):
|
||||
"""Variant of dnsmasq filter to support old-style FLAGFILE"""
|
||||
CONFIG_FILE_ARG = 'FLAGFILE'
|
||||
|
||||
|
||||
class KillFilter(CommandFilter):
|
||||
"""Specific filter for the kill calls.
|
||||
1st argument is the user to run /bin/kill under
|
||||
2nd argument is the location of the affected executable
|
||||
Subsequent arguments list the accepted signals (if any)
|
||||
|
||||
This filter relies on /proc to accurately determine affected
|
||||
executable, so it will only work on procfs-capable systems (not OSX).
|
||||
"""
|
||||
|
||||
def __init__(self, *args):
|
||||
super(KillFilter, self).__init__("/bin/kill", *args)
|
||||
|
||||
def match(self, userargs):
|
||||
if userargs[0] != "kill":
|
||||
return False
|
||||
args = list(userargs)
|
||||
if len(args) == 3:
|
||||
# A specific signal is requested
|
||||
signal = args.pop(1)
|
||||
if signal not in self.args[1:]:
|
||||
# Requested signal not in accepted list
|
||||
return False
|
||||
else:
|
||||
if len(args) != 2:
|
||||
# Incorrect number of arguments
|
||||
return False
|
||||
if len(self.args) > 1:
|
||||
# No signal requested, but filter requires specific signal
|
||||
return False
|
||||
try:
|
||||
command = os.readlink("/proc/%d/exe" % int(args[1]))
|
||||
# NOTE(dprince): /proc/PID/exe may have ' (deleted)' on
|
||||
# the end if an executable is updated or deleted
|
||||
if command.endswith(" (deleted)"):
|
||||
command = command[:command.rindex(" ")]
|
||||
if command != self.args[0]:
|
||||
# Affected executable does not match
|
||||
return False
|
||||
except (ValueError, OSError):
|
||||
# Incorrect PID
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class ReadFileFilter(CommandFilter):
|
||||
"""Specific filter for the utils.read_file_as_root call"""
|
||||
|
||||
def __init__(self, file_path, *args):
|
||||
self.file_path = file_path
|
||||
super(ReadFileFilter, self).__init__("/bin/cat", "root", *args)
|
||||
|
||||
def match(self, userargs):
|
||||
if userargs[0] != 'cat':
|
||||
return False
|
||||
if userargs[1] != self.file_path:
|
||||
return False
|
||||
if len(userargs) != 2:
|
||||
return False
|
||||
return True
|
|
@ -1,149 +0,0 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2011 OpenStack, LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import ConfigParser
|
||||
import logging
|
||||
import logging.handlers
|
||||
import os
|
||||
import string
|
||||
|
||||
from kwapi.openstack.common.rootwrap import filters
|
||||
|
||||
|
||||
class NoFilterMatched(Exception):
|
||||
"""This exception is raised when no filter matched."""
|
||||
pass
|
||||
|
||||
|
||||
class FilterMatchNotExecutable(Exception):
|
||||
"""
|
||||
This exception is raised when a filter matched but no executable was
|
||||
found.
|
||||
"""
|
||||
def __init__(self, match=None, **kwargs):
|
||||
self.match = match
|
||||
|
||||
|
||||
class RootwrapConfig(object):
|
||||
|
||||
def __init__(self, config):
|
||||
# filters_path
|
||||
self.filters_path = config.get("DEFAULT", "filters_path").split(",")
|
||||
|
||||
# exec_dirs
|
||||
if config.has_option("DEFAULT", "exec_dirs"):
|
||||
self.exec_dirs = config.get("DEFAULT", "exec_dirs").split(",")
|
||||
else:
|
||||
# Use system PATH if exec_dirs is not specified
|
||||
self.exec_dirs = os.environ["PATH"].split(':')
|
||||
|
||||
# syslog_log_facility
|
||||
if config.has_option("DEFAULT", "syslog_log_facility"):
|
||||
v = config.get("DEFAULT", "syslog_log_facility")
|
||||
facility_names = logging.handlers.SysLogHandler.facility_names
|
||||
self.syslog_log_facility = getattr(logging.handlers.SysLogHandler,
|
||||
v, None)
|
||||
if self.syslog_log_facility is None and v in facility_names:
|
||||
self.syslog_log_facility = facility_names.get(v)
|
||||
if self.syslog_log_facility is None:
|
||||
raise ValueError('Unexpected syslog_log_facility: %s' % v)
|
||||
else:
|
||||
default_facility = logging.handlers.SysLogHandler.LOG_SYSLOG
|
||||
self.syslog_log_facility = default_facility
|
||||
|
||||
# syslog_log_level
|
||||
if config.has_option("DEFAULT", "syslog_log_level"):
|
||||
v = config.get("DEFAULT", "syslog_log_level")
|
||||
self.syslog_log_level = logging.getLevelName(v.upper())
|
||||
if (self.syslog_log_level == "Level %s" % v.upper()):
|
||||
raise ValueError('Unexepected syslog_log_level: %s' % v)
|
||||
else:
|
||||
self.syslog_log_level = logging.ERROR
|
||||
|
||||
# use_syslog
|
||||
if config.has_option("DEFAULT", "use_syslog"):
|
||||
self.use_syslog = config.getboolean("DEFAULT", "use_syslog")
|
||||
else:
|
||||
self.use_syslog = False
|
||||
|
||||
|
||||
def setup_syslog(execname, facility, level):
|
||||
rootwrap_logger = logging.getLogger()
|
||||
rootwrap_logger.setLevel(level)
|
||||
handler = logging.handlers.SysLogHandler(address='/dev/log',
|
||||
facility=facility)
|
||||
handler.setFormatter(logging.Formatter(
|
||||
os.path.basename(execname) + ': %(message)s'))
|
||||
rootwrap_logger.addHandler(handler)
|
||||
|
||||
|
||||
def build_filter(class_name, *args):
|
||||
"""Returns a filter object of class class_name"""
|
||||
if not hasattr(filters, class_name):
|
||||
logging.warning("Skipping unknown filter class (%s) specified "
|
||||
"in filter definitions" % class_name)
|
||||
return None
|
||||
filterclass = getattr(filters, class_name)
|
||||
return filterclass(*args)
|
||||
|
||||
|
||||
def load_filters(filters_path):
|
||||
"""Load filters from a list of directories"""
|
||||
filterlist = []
|
||||
for filterdir in filters_path:
|
||||
if not os.path.isdir(filterdir):
|
||||
continue
|
||||
for filterfile in os.listdir(filterdir):
|
||||
filterconfig = ConfigParser.RawConfigParser()
|
||||
filterconfig.read(os.path.join(filterdir, filterfile))
|
||||
for (name, value) in filterconfig.items("Filters"):
|
||||
filterdefinition = [string.strip(s) for s in value.split(',')]
|
||||
newfilter = build_filter(*filterdefinition)
|
||||
if newfilter is None:
|
||||
continue
|
||||
newfilter.name = name
|
||||
filterlist.append(newfilter)
|
||||
return filterlist
|
||||
|
||||
|
||||
def match_filter(filters, userargs, exec_dirs=[]):
|
||||
"""
|
||||
Checks user command and arguments through command filters and
|
||||
returns the first matching filter.
|
||||
Raises NoFilterMatched if no filter matched.
|
||||
Raises FilterMatchNotExecutable if no executable was found for the
|
||||
best filter match.
|
||||
"""
|
||||
first_not_executable_filter = None
|
||||
|
||||
for f in filters:
|
||||
if f.match(userargs):
|
||||
# Try other filters if executable is absent
|
||||
if not f.get_exec(exec_dirs=exec_dirs):
|
||||
if not first_not_executable_filter:
|
||||
first_not_executable_filter = f
|
||||
continue
|
||||
# Otherwise return matching filter for execution
|
||||
return f
|
||||
|
||||
if first_not_executable_filter:
|
||||
# A filter matched, but no executable was found for it
|
||||
raise FilterMatchNotExecutable(match=first_not_executable_filter)
|
||||
|
||||
# No filter matched
|
||||
raise NoFilterMatched()
|
|
@ -1,272 +0,0 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
# Copyright 2011 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
A remote procedure call (rpc) abstraction.
|
||||
|
||||
For some wrappers that add message versioning to rpc, see:
|
||||
rpc.dispatcher
|
||||
rpc.proxy
|
||||
"""
|
||||
|
||||
from kwapi.openstack.common import cfg
|
||||
from kwapi.openstack.common import importutils
|
||||
|
||||
|
||||
rpc_opts = [
|
||||
cfg.StrOpt('rpc_backend',
|
||||
default='%s.impl_kombu' % __package__,
|
||||
help="The messaging module to use, defaults to kombu."),
|
||||
cfg.IntOpt('rpc_thread_pool_size',
|
||||
default=64,
|
||||
help='Size of RPC thread pool'),
|
||||
cfg.IntOpt('rpc_conn_pool_size',
|
||||
default=30,
|
||||
help='Size of RPC connection pool'),
|
||||
cfg.IntOpt('rpc_response_timeout',
|
||||
default=60,
|
||||
help='Seconds to wait for a response from call or multicall'),
|
||||
cfg.IntOpt('rpc_cast_timeout',
|
||||
default=30,
|
||||
help='Seconds to wait before a cast expires (TTL). '
|
||||
'Only supported by impl_zmq.'),
|
||||
cfg.ListOpt('allowed_rpc_exception_modules',
|
||||
default=['openstack.common.exception',
|
||||
'nova.exception',
|
||||
'cinder.exception',
|
||||
'exceptions',
|
||||
],
|
||||
help='Modules of exceptions that are permitted to be recreated'
|
||||
'upon receiving exception data from an rpc call.'),
|
||||
cfg.BoolOpt('fake_rabbit',
|
||||
default=False,
|
||||
help='If passed, use a fake RabbitMQ provider'),
|
||||
cfg.StrOpt('control_exchange',
|
||||
default='openstack',
|
||||
help='AMQP exchange to connect to if using RabbitMQ or Qpid'),
|
||||
]
|
||||
|
||||
cfg.CONF.register_opts(rpc_opts)
|
||||
|
||||
|
||||
def set_defaults(control_exchange):
|
||||
cfg.set_defaults(rpc_opts,
|
||||
control_exchange=control_exchange)
|
||||
|
||||
|
||||
def create_connection(new=True):
|
||||
"""Create a connection to the message bus used for rpc.
|
||||
|
||||
For some example usage of creating a connection and some consumers on that
|
||||
connection, see nova.service.
|
||||
|
||||
:param new: Whether or not to create a new connection. A new connection
|
||||
will be created by default. If new is False, the
|
||||
implementation is free to return an existing connection from a
|
||||
pool.
|
||||
|
||||
:returns: An instance of openstack.common.rpc.common.Connection
|
||||
"""
|
||||
return _get_impl().create_connection(cfg.CONF, new=new)
|
||||
|
||||
|
||||
def call(context, topic, msg, timeout=None):
|
||||
"""Invoke a remote method that returns something.
|
||||
|
||||
:param context: Information that identifies the user that has made this
|
||||
request.
|
||||
:param topic: The topic to send the rpc message to. This correlates to the
|
||||
topic argument of
|
||||
openstack.common.rpc.common.Connection.create_consumer()
|
||||
and only applies when the consumer was created with
|
||||
fanout=False.
|
||||
:param msg: This is a dict in the form { "method" : "method_to_invoke",
|
||||
"args" : dict_of_kwargs }
|
||||
:param timeout: int, number of seconds to use for a response timeout.
|
||||
If set, this overrides the rpc_response_timeout option.
|
||||
|
||||
:returns: A dict from the remote method.
|
||||
|
||||
:raises: openstack.common.rpc.common.Timeout if a complete response
|
||||
is not received before the timeout is reached.
|
||||
"""
|
||||
return _get_impl().call(cfg.CONF, context, topic, msg, timeout)
|
||||
|
||||
|
||||
def cast(context, topic, msg):
|
||||
"""Invoke a remote method that does not return anything.
|
||||
|
||||
:param context: Information that identifies the user that has made this
|
||||
request.
|
||||
:param topic: The topic to send the rpc message to. This correlates to the
|
||||
topic argument of
|
||||
openstack.common.rpc.common.Connection.create_consumer()
|
||||
and only applies when the consumer was created with
|
||||
fanout=False.
|
||||
:param msg: This is a dict in the form { "method" : "method_to_invoke",
|
||||
"args" : dict_of_kwargs }
|
||||
|
||||
:returns: None
|
||||
"""
|
||||
return _get_impl().cast(cfg.CONF, context, topic, msg)
|
||||
|
||||
|
||||
def fanout_cast(context, topic, msg):
|
||||
"""Broadcast a remote method invocation with no return.
|
||||
|
||||
This method will get invoked on all consumers that were set up with this
|
||||
topic name and fanout=True.
|
||||
|
||||
:param context: Information that identifies the user that has made this
|
||||
request.
|
||||
:param topic: The topic to send the rpc message to. This correlates to the
|
||||
topic argument of
|
||||
openstack.common.rpc.common.Connection.create_consumer()
|
||||
and only applies when the consumer was created with
|
||||
fanout=True.
|
||||
:param msg: This is a dict in the form { "method" : "method_to_invoke",
|
||||
"args" : dict_of_kwargs }
|
||||
|
||||
:returns: None
|
||||
"""
|
||||
return _get_impl().fanout_cast(cfg.CONF, context, topic, msg)
|
||||
|
||||
|
||||
def multicall(context, topic, msg, timeout=None):
|
||||
"""Invoke a remote method and get back an iterator.
|
||||
|
||||
In this case, the remote method will be returning multiple values in
|
||||
separate messages, so the return values can be processed as the come in via
|
||||
an iterator.
|
||||
|
||||
:param context: Information that identifies the user that has made this
|
||||
request.
|
||||
:param topic: The topic to send the rpc message to. This correlates to the
|
||||
topic argument of
|
||||
openstack.common.rpc.common.Connection.create_consumer()
|
||||
and only applies when the consumer was created with
|
||||
fanout=False.
|
||||
:param msg: This is a dict in the form { "method" : "method_to_invoke",
|
||||
"args" : dict_of_kwargs }
|
||||
:param timeout: int, number of seconds to use for a response timeout.
|
||||
If set, this overrides the rpc_response_timeout option.
|
||||
|
||||
:returns: An iterator. The iterator will yield a tuple (N, X) where N is
|
||||
an index that starts at 0 and increases by one for each value
|
||||
returned and X is the Nth value that was returned by the remote
|
||||
method.
|
||||
|
||||
:raises: openstack.common.rpc.common.Timeout if a complete response
|
||||
is not received before the timeout is reached.
|
||||
"""
|
||||
return _get_impl().multicall(cfg.CONF, context, topic, msg, timeout)
|
||||
|
||||
|
||||
def notify(context, topic, msg, envelope=False):
|
||||
"""Send notification event.
|
||||
|
||||
:param context: Information that identifies the user that has made this
|
||||
request.
|
||||
:param topic: The topic to send the notification to.
|
||||
:param msg: This is a dict of content of event.
|
||||
:param envelope: Set to True to enable message envelope for notifications.
|
||||
|
||||
:returns: None
|
||||
"""
|
||||
return _get_impl().notify(cfg.CONF, context, topic, msg, envelope)
|
||||
|
||||
|
||||
def cleanup():
|
||||
"""Clean up resoruces in use by implementation.
|
||||
|
||||
Clean up any resources that have been allocated by the RPC implementation.
|
||||
This is typically open connections to a messaging service. This function
|
||||
would get called before an application using this API exits to allow
|
||||
connections to get torn down cleanly.
|
||||
|
||||
:returns: None
|
||||
"""
|
||||
return _get_impl().cleanup()
|
||||
|
||||
|
||||
def cast_to_server(context, server_params, topic, msg):
|
||||
"""Invoke a remote method that does not return anything.
|
||||
|
||||
:param context: Information that identifies the user that has made this
|
||||
request.
|
||||
:param server_params: Connection information
|
||||
:param topic: The topic to send the notification to.
|
||||
:param msg: This is a dict in the form { "method" : "method_to_invoke",
|
||||
"args" : dict_of_kwargs }
|
||||
|
||||
:returns: None
|
||||
"""
|
||||
return _get_impl().cast_to_server(cfg.CONF, context, server_params, topic,
|
||||
msg)
|
||||
|
||||
|
||||
def fanout_cast_to_server(context, server_params, topic, msg):
|
||||
"""Broadcast to a remote method invocation with no return.
|
||||
|
||||
:param context: Information that identifies the user that has made this
|
||||
request.
|
||||
:param server_params: Connection information
|
||||
:param topic: The topic to send the notification to.
|
||||
:param msg: This is a dict in the form { "method" : "method_to_invoke",
|
||||
"args" : dict_of_kwargs }
|
||||
|
||||
:returns: None
|
||||
"""
|
||||
return _get_impl().fanout_cast_to_server(cfg.CONF, context, server_params,
|
||||
topic, msg)
|
||||
|
||||
|
||||
def queue_get_for(context, topic, host):
|
||||
"""Get a queue name for a given topic + host.
|
||||
|
||||
This function only works if this naming convention is followed on the
|
||||
consumer side, as well. For example, in nova, every instance of the
|
||||
nova-foo service calls create_consumer() for two topics:
|
||||
|
||||
foo
|
||||
foo.<host>
|
||||
|
||||
Messages sent to the 'foo' topic are distributed to exactly one instance of
|
||||
the nova-foo service. The services are chosen in a round-robin fashion.
|
||||
Messages sent to the 'foo.<host>' topic are sent to the nova-foo service on
|
||||
<host>.
|
||||
"""
|
||||
return '%s.%s' % (topic, host) if host else topic
|
||||
|
||||
|
||||
_RPCIMPL = None
|
||||
|
||||
|
||||
def _get_impl():
|
||||
"""Delay import of rpc_backend until configuration is loaded."""
|
||||
global _RPCIMPL
|
||||
if _RPCIMPL is None:
|
||||
try:
|
||||
_RPCIMPL = importutils.import_module(cfg.CONF.rpc_backend)
|
||||
except ImportError:
|
||||
# For backwards compatibility with older nova config.
|
||||
impl = cfg.CONF.rpc_backend.replace('nova.rpc',
|
||||
'nova.openstack.common.rpc')
|
||||
_RPCIMPL = importutils.import_module(impl)
|
||||
return _RPCIMPL
|
|
@ -1,436 +0,0 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
# Copyright 2011 - 2012, Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Shared code between AMQP based openstack.common.rpc implementations.
|
||||
|
||||
The code in this module is shared between the rpc implemenations based on AMQP.
|
||||
Specifically, this includes impl_kombu and impl_qpid. impl_carrot also uses
|
||||
AMQP, but is deprecated and predates this code.
|
||||
"""
|
||||
|
||||
import inspect
|
||||
import sys
|
||||
import uuid
|
||||
|
||||
from eventlet import greenpool
|
||||
from eventlet import pools
|
||||
from eventlet import semaphore
|
||||
|
||||
from kwapi.openstack.common import excutils
|
||||
from kwapi.openstack.common.gettextutils import _
|
||||
from kwapi.openstack.common import local
|
||||
from kwapi.openstack.common import log as logging
|
||||
from kwapi.openstack.common.rpc import common as rpc_common
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Pool(pools.Pool):
|
||||
"""Class that implements a Pool of Connections."""
|
||||
def __init__(self, conf, connection_cls, *args, **kwargs):
|
||||
self.connection_cls = connection_cls
|
||||
self.conf = conf
|
||||
kwargs.setdefault("max_size", self.conf.rpc_conn_pool_size)
|
||||
kwargs.setdefault("order_as_stack", True)
|
||||
super(Pool, self).__init__(*args, **kwargs)
|
||||
|
||||
# TODO(comstud): Timeout connections not used in a while
|
||||
def create(self):
|
||||
LOG.debug(_('Pool creating new connection'))
|
||||
return self.connection_cls(self.conf)
|
||||
|
||||
def empty(self):
|
||||
while self.free_items:
|
||||
self.get().close()
|
||||
|
||||
|
||||
_pool_create_sem = semaphore.Semaphore()
|
||||
|
||||
|
||||
def get_connection_pool(conf, connection_cls):
|
||||
with _pool_create_sem:
|
||||
# Make sure only one thread tries to create the connection pool.
|
||||
if not connection_cls.pool:
|
||||
connection_cls.pool = Pool(conf, connection_cls)
|
||||
return connection_cls.pool
|
||||
|
||||
|
||||
class ConnectionContext(rpc_common.Connection):
|
||||
"""The class that is actually returned to the caller of
|
||||
create_connection(). This is essentially a wrapper around
|
||||
Connection that supports 'with'. It can also return a new
|
||||
Connection, or one from a pool. The function will also catch
|
||||
when an instance of this class is to be deleted. With that
|
||||
we can return Connections to the pool on exceptions and so
|
||||
forth without making the caller be responsible for catching
|
||||
them. If possible the function makes sure to return a
|
||||
connection to the pool.
|
||||
"""
|
||||
|
||||
def __init__(self, conf, connection_pool, pooled=True, server_params=None):
|
||||
"""Create a new connection, or get one from the pool"""
|
||||
self.connection = None
|
||||
self.conf = conf
|
||||
self.connection_pool = connection_pool
|
||||
if pooled:
|
||||
self.connection = connection_pool.get()
|
||||
else:
|
||||
self.connection = connection_pool.connection_cls(
|
||||
conf,
|
||||
server_params=server_params)
|
||||
self.pooled = pooled
|
||||
|
||||
def __enter__(self):
|
||||
"""When with ConnectionContext() is used, return self"""
|
||||
return self
|
||||
|
||||
def _done(self):
|
||||
"""If the connection came from a pool, clean it up and put it back.
|
||||
If it did not come from a pool, close it.
|
||||
"""
|
||||
if self.connection:
|
||||
if self.pooled:
|
||||
# Reset the connection so it's ready for the next caller
|
||||
# to grab from the pool
|
||||
self.connection.reset()
|
||||
self.connection_pool.put(self.connection)
|
||||
else:
|
||||
try:
|
||||
self.connection.close()
|
||||
except Exception:
|
||||
pass
|
||||
self.connection = None
|
||||
|
||||
def __exit__(self, exc_type, exc_value, tb):
|
||||
"""End of 'with' statement. We're done here."""
|
||||
self._done()
|
||||
|
||||
def __del__(self):
|
||||
"""Caller is done with this connection. Make sure we cleaned up."""
|
||||
self._done()
|
||||
|
||||
def close(self):
|
||||
"""Caller is done with this connection."""
|
||||
self._done()
|
||||
|
||||
def create_consumer(self, topic, proxy, fanout=False):
|
||||
self.connection.create_consumer(topic, proxy, fanout)
|
||||
|
||||
def create_worker(self, topic, proxy, pool_name):
|
||||
self.connection.create_worker(topic, proxy, pool_name)
|
||||
|
||||
def consume_in_thread(self):
|
||||
self.connection.consume_in_thread()
|
||||
|
||||
def __getattr__(self, key):
|
||||
"""Proxy all other calls to the Connection instance"""
|
||||
if self.connection:
|
||||
return getattr(self.connection, key)
|
||||
else:
|
||||
raise rpc_common.InvalidRPCConnectionReuse()
|
||||
|
||||
|
||||
def msg_reply(conf, msg_id, connection_pool, reply=None, failure=None,
|
||||
ending=False, log_failure=True):
|
||||
"""Sends a reply or an error on the channel signified by msg_id.
|
||||
|
||||
Failure should be a sys.exc_info() tuple.
|
||||
|
||||
"""
|
||||
with ConnectionContext(conf, connection_pool) as conn:
|
||||
if failure:
|
||||
failure = rpc_common.serialize_remote_exception(failure,
|
||||
log_failure)
|
||||
|
||||
try:
|
||||
msg = {'result': reply, 'failure': failure}
|
||||
except TypeError:
|
||||
msg = {'result': dict((k, repr(v))
|
||||
for k, v in reply.__dict__.iteritems()),
|
||||
'failure': failure}
|
||||
if ending:
|
||||
msg['ending'] = True
|
||||
conn.direct_send(msg_id, rpc_common.serialize_msg(msg))
|
||||
|
||||
|
||||
class RpcContext(rpc_common.CommonRpcContext):
|
||||
"""Context that supports replying to a rpc.call"""
|
||||
def __init__(self, **kwargs):
|
||||
self.msg_id = kwargs.pop('msg_id', None)
|
||||
self.conf = kwargs.pop('conf')
|
||||
super(RpcContext, self).__init__(**kwargs)
|
||||
|
||||
def deepcopy(self):
|
||||
values = self.to_dict()
|
||||
values['conf'] = self.conf
|
||||
values['msg_id'] = self.msg_id
|
||||
return self.__class__(**values)
|
||||
|
||||
def reply(self, reply=None, failure=None, ending=False,
|
||||
connection_pool=None, log_failure=True):
|
||||
if self.msg_id:
|
||||
msg_reply(self.conf, self.msg_id, connection_pool, reply, failure,
|
||||
ending, log_failure)
|
||||
if ending:
|
||||
self.msg_id = None
|
||||
|
||||
|
||||
def unpack_context(conf, msg):
|
||||
"""Unpack context from msg."""
|
||||
context_dict = {}
|
||||
for key in list(msg.keys()):
|
||||
# NOTE(vish): Some versions of python don't like unicode keys
|
||||
# in kwargs.
|
||||
key = str(key)
|
||||
if key.startswith('_context_'):
|
||||
value = msg.pop(key)
|
||||
context_dict[key[9:]] = value
|
||||
context_dict['msg_id'] = msg.pop('_msg_id', None)
|
||||
context_dict['conf'] = conf
|
||||
ctx = RpcContext.from_dict(context_dict)
|
||||
rpc_common._safe_log(LOG.debug, _('unpacked context: %s'), ctx.to_dict())
|
||||
return ctx
|
||||
|
||||
|
||||
def pack_context(msg, context):
|
||||
"""Pack context into msg.
|
||||
|
||||
Values for message keys need to be less than 255 chars, so we pull
|
||||
context out into a bunch of separate keys. If we want to support
|
||||
more arguments in rabbit messages, we may want to do the same
|
||||
for args at some point.
|
||||
|
||||
"""
|
||||
context_d = dict([('_context_%s' % key, value)
|
||||
for (key, value) in context.to_dict().iteritems()])
|
||||
msg.update(context_d)
|
||||
|
||||
|
||||
class ProxyCallback(object):
|
||||
"""Calls methods on a proxy object based on method and args."""
|
||||
|
||||
def __init__(self, conf, proxy, connection_pool):
|
||||
self.proxy = proxy
|
||||
self.pool = greenpool.GreenPool(conf.rpc_thread_pool_size)
|
||||
self.connection_pool = connection_pool
|
||||
self.conf = conf
|
||||
|
||||
def __call__(self, message_data):
|
||||
"""Consumer callback to call a method on a proxy object.
|
||||
|
||||
Parses the message for validity and fires off a thread to call the
|
||||
proxy object method.
|
||||
|
||||
Message data should be a dictionary with two keys:
|
||||
method: string representing the method to call
|
||||
args: dictionary of arg: value
|
||||
|
||||
Example: {'method': 'echo', 'args': {'value': 42}}
|
||||
|
||||
"""
|
||||
# It is important to clear the context here, because at this point
|
||||
# the previous context is stored in local.store.context
|
||||
if hasattr(local.store, 'context'):
|
||||
del local.store.context
|
||||
rpc_common._safe_log(LOG.debug, _('received %s'), message_data)
|
||||
ctxt = unpack_context(self.conf, message_data)
|
||||
method = message_data.get('method')
|
||||
args = message_data.get('args', {})
|
||||
version = message_data.get('version', None)
|
||||
if not method:
|
||||
LOG.warn(_('no method for message: %s') % message_data)
|
||||
ctxt.reply(_('No method for message: %s') % message_data,
|
||||
connection_pool=self.connection_pool)
|
||||
return
|
||||
self.pool.spawn_n(self._process_data, ctxt, version, method, args)
|
||||
|
||||
def _process_data(self, ctxt, version, method, args):
|
||||
"""Process a message in a new thread.
|
||||
|
||||
If the proxy object we have has a dispatch method
|
||||
(see rpc.dispatcher.RpcDispatcher), pass it the version,
|
||||
method, and args and let it dispatch as appropriate. If not, use
|
||||
the old behavior of magically calling the specified method on the
|
||||
proxy we have here.
|
||||
"""
|
||||
ctxt.update_store()
|
||||
try:
|
||||
rval = self.proxy.dispatch(ctxt, version, method, **args)
|
||||
# Check if the result was a generator
|
||||
if inspect.isgenerator(rval):
|
||||
for x in rval:
|
||||
ctxt.reply(x, None, connection_pool=self.connection_pool)
|
||||
else:
|
||||
ctxt.reply(rval, None, connection_pool=self.connection_pool)
|
||||
# This final None tells multicall that it is done.
|
||||
ctxt.reply(ending=True, connection_pool=self.connection_pool)
|
||||
except rpc_common.ClientException as e:
|
||||
LOG.debug(_('Expected exception during message handling (%s)') %
|
||||
e._exc_info[1])
|
||||
ctxt.reply(None, e._exc_info,
|
||||
connection_pool=self.connection_pool,
|
||||
log_failure=False)
|
||||
except Exception:
|
||||
LOG.exception(_('Exception during message handling'))
|
||||
ctxt.reply(None, sys.exc_info(),
|
||||
connection_pool=self.connection_pool)
|
||||
|
||||
def wait(self):
|
||||
"""Wait for all callback threads to exit."""
|
||||
self.pool.waitall()
|
||||
|
||||
|
||||
class MulticallWaiter(object):
|
||||
def __init__(self, conf, connection, timeout):
|
||||
self._connection = connection
|
||||
self._iterator = connection.iterconsume(timeout=timeout or
|
||||
conf.rpc_response_timeout)
|
||||
self._result = None
|
||||
self._done = False
|
||||
self._got_ending = False
|
||||
self._conf = conf
|
||||
|
||||
def done(self):
|
||||
if self._done:
|
||||
return
|
||||
self._done = True
|
||||
self._iterator.close()
|
||||
self._iterator = None
|
||||
self._connection.close()
|
||||
|
||||
def __call__(self, data):
|
||||
"""The consume() callback will call this. Store the result."""
|
||||
if data['failure']:
|
||||
failure = data['failure']
|
||||
self._result = rpc_common.deserialize_remote_exception(self._conf,
|
||||
failure)
|
||||
|
||||
elif data.get('ending', False):
|
||||
self._got_ending = True
|
||||
else:
|
||||
self._result = data['result']
|
||||
|
||||
def __iter__(self):
|
||||
"""Return a result until we get a 'None' response from consumer"""
|
||||
if self._done:
|
||||
raise StopIteration
|
||||
while True:
|
||||
try:
|
||||
self._iterator.next()
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
self.done()
|
||||
if self._got_ending:
|
||||
self.done()
|
||||
raise StopIteration
|
||||
result = self._result
|
||||
if isinstance(result, Exception):
|
||||
self.done()
|
||||
raise result
|
||||
yield result
|
||||
|
||||
|
||||
def create_connection(conf, new, connection_pool):
|
||||
"""Create a connection"""
|
||||
return ConnectionContext(conf, connection_pool, pooled=not new)
|
||||
|
||||
|
||||
def multicall(conf, context, topic, msg, timeout, connection_pool):
|
||||
"""Make a call that returns multiple times."""
|
||||
# Can't use 'with' for multicall, as it returns an iterator
|
||||
# that will continue to use the connection. When it's done,
|
||||
# connection.close() will get called which will put it back into
|
||||
# the pool
|
||||
LOG.debug(_('Making synchronous call on %s ...'), topic)
|
||||
msg_id = uuid.uuid4().hex
|
||||
msg.update({'_msg_id': msg_id})
|
||||
LOG.debug(_('MSG_ID is %s') % (msg_id))
|
||||
pack_context(msg, context)
|
||||
|
||||
conn = ConnectionContext(conf, connection_pool)
|
||||
wait_msg = MulticallWaiter(conf, conn, timeout)
|
||||
conn.declare_direct_consumer(msg_id, wait_msg)
|
||||
conn.topic_send(topic, rpc_common.serialize_msg(msg))
|
||||
return wait_msg
|
||||
|
||||
|
||||
def call(conf, context, topic, msg, timeout, connection_pool):
|
||||
"""Sends a message on a topic and wait for a response."""
|
||||
rv = multicall(conf, context, topic, msg, timeout, connection_pool)
|
||||
# NOTE(vish): return the last result from the multicall
|
||||
rv = list(rv)
|
||||
if not rv:
|
||||
return
|
||||
return rv[-1]
|
||||
|
||||
|
||||
def cast(conf, context, topic, msg, connection_pool):
|
||||
"""Sends a message on a topic without waiting for a response."""
|
||||
LOG.debug(_('Making asynchronous cast on %s...'), topic)
|
||||
pack_context(msg, context)
|
||||
with ConnectionContext(conf, connection_pool) as conn:
|
||||
conn.topic_send(topic, rpc_common.serialize_msg(msg))
|
||||
|
||||
|
||||
def fanout_cast(conf, context, topic, msg, connection_pool):
|
||||
"""Sends a message on a fanout exchange without waiting for a response."""
|
||||
LOG.debug(_('Making asynchronous fanout cast...'))
|
||||
pack_context(msg, context)
|
||||
with ConnectionContext(conf, connection_pool) as conn:
|
||||
conn.fanout_send(topic, rpc_common.serialize_msg(msg))
|
||||
|
||||
|
||||
def cast_to_server(conf, context, server_params, topic, msg, connection_pool):
|
||||
"""Sends a message on a topic to a specific server."""
|
||||
pack_context(msg, context)
|
||||
with ConnectionContext(conf, connection_pool, pooled=False,
|
||||
server_params=server_params) as conn:
|
||||
conn.topic_send(topic, rpc_common.serialize_msg(msg))
|
||||
|
||||
|
||||
def fanout_cast_to_server(conf, context, server_params, topic, msg,
|
||||
connection_pool):
|
||||
"""Sends a message on a fanout exchange to a specific server."""
|
||||
pack_context(msg, context)
|
||||
with ConnectionContext(conf, connection_pool, pooled=False,
|
||||
server_params=server_params) as conn:
|
||||
conn.fanout_send(topic, rpc_common.serialize_msg(msg))
|
||||
|
||||
|
||||
def notify(conf, context, topic, msg, connection_pool, envelope):
|
||||
"""Sends a notification event on a topic."""
|
||||
LOG.debug(_('Sending %(event_type)s on %(topic)s'),
|
||||
dict(event_type=msg.get('event_type'),
|
||||
topic=topic))
|
||||
pack_context(msg, context)
|
||||
with ConnectionContext(conf, connection_pool) as conn:
|
||||
if envelope:
|
||||
msg = rpc_common.serialize_msg(msg, force_envelope=True)
|
||||
conn.notify_send(topic, msg)
|
||||
|
||||
|
||||
def cleanup(connection_pool):
|
||||
if connection_pool:
|
||||
connection_pool.empty()
|
||||
|
||||
|
||||
def get_control_exchange(conf):
|
||||
return conf.control_exchange
|
|
@ -1,470 +0,0 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
# Copyright 2011 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import copy
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
from kwapi.openstack.common import cfg
|
||||
from kwapi.openstack.common.gettextutils import _
|
||||
from kwapi.openstack.common import importutils
|
||||
from kwapi.openstack.common import jsonutils
|
||||
from kwapi.openstack.common import local
|
||||
from kwapi.openstack.common import log as logging
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
'''RPC Envelope Version.
|
||||
|
||||
This version number applies to the top level structure of messages sent out.
|
||||
It does *not* apply to the message payload, which must be versioned
|
||||
independently. For example, when using rpc APIs, a version number is applied
|
||||
for changes to the API being exposed over rpc. This version number is handled
|
||||
in the rpc proxy and dispatcher modules.
|
||||
|
||||
This version number applies to the message envelope that is used in the
|
||||
serialization done inside the rpc layer. See serialize_msg() and
|
||||
deserialize_msg().
|
||||
|
||||
The current message format (version 2.0) is very simple. It is:
|
||||
|
||||
{
|
||||
'oslo.version': <RPC Envelope Version as a String>,
|
||||
'oslo.message': <Application Message Payload, JSON encoded>
|
||||
}
|
||||
|
||||
Message format version '1.0' is just considered to be the messages we sent
|
||||
without a message envelope.
|
||||
|
||||
So, the current message envelope just includes the envelope version. It may
|
||||
eventually contain additional information, such as a signature for the message
|
||||
payload.
|
||||
|
||||
We will JSON encode the application message payload. The message envelope,
|
||||
which includes the JSON encoded application message body, will be passed down
|
||||
to the messaging libraries as a dict.
|
||||
'''
|
||||
_RPC_ENVELOPE_VERSION = '2.0'
|
||||
|
||||
_VERSION_KEY = 'oslo.version'
|
||||
_MESSAGE_KEY = 'oslo.message'
|
||||
|
||||
|
||||
# TODO(russellb) Turn this on after Grizzly.
|
||||
_SEND_RPC_ENVELOPE = False
|
||||
|
||||
|
||||
class RPCException(Exception):
|
||||
message = _("An unknown RPC related exception occurred.")
|
||||
|
||||
def __init__(self, message=None, **kwargs):
|
||||
self.kwargs = kwargs
|
||||
|
||||
if not message:
|
||||
try:
|
||||
message = self.message % kwargs
|
||||
|
||||
except Exception:
|
||||
# kwargs doesn't match a variable in the message
|
||||
# log the issue and the kwargs
|
||||
LOG.exception(_('Exception in string format operation'))
|
||||
for name, value in kwargs.iteritems():
|
||||
LOG.error("%s: %s" % (name, value))
|
||||
# at least get the core message out if something happened
|
||||
message = self.message
|
||||
|
||||
super(RPCException, self).__init__(message)
|
||||
|
||||
|
||||
class RemoteError(RPCException):
|
||||
"""Signifies that a remote class has raised an exception.
|
||||
|
||||
Contains a string representation of the type of the original exception,
|
||||
the value of the original exception, and the traceback. These are
|
||||
sent to the parent as a joined string so printing the exception
|
||||
contains all of the relevant info.
|
||||
|
||||
"""
|
||||
message = _("Remote error: %(exc_type)s %(value)s\n%(traceback)s.")
|
||||
|
||||
def __init__(self, exc_type=None, value=None, traceback=None):
|
||||
self.exc_type = exc_type
|
||||
self.value = value
|
||||
self.traceback = traceback
|
||||
super(RemoteError, self).__init__(exc_type=exc_type,
|
||||
value=value,
|
||||
traceback=traceback)
|
||||
|
||||
|
||||
class Timeout(RPCException):
|
||||
"""Signifies that a timeout has occurred.
|
||||
|
||||
This exception is raised if the rpc_response_timeout is reached while
|
||||
waiting for a response from the remote side.
|
||||
"""
|
||||
message = _("Timeout while waiting on RPC response.")
|
||||
|
||||
|
||||
class InvalidRPCConnectionReuse(RPCException):
|
||||
message = _("Invalid reuse of an RPC connection.")
|
||||
|
||||
|
||||
class UnsupportedRpcVersion(RPCException):
|
||||
message = _("Specified RPC version, %(version)s, not supported by "
|
||||
"this endpoint.")
|
||||
|
||||
|
||||
class UnsupportedRpcEnvelopeVersion(RPCException):
|
||||
message = _("Specified RPC envelope version, %(version)s, "
|
||||
"not supported by this endpoint.")
|
||||
|
||||
|
||||
class Connection(object):
|
||||
"""A connection, returned by rpc.create_connection().
|
||||
|
||||
This class represents a connection to the message bus used for rpc.
|
||||
An instance of this class should never be created by users of the rpc API.
|
||||
Use rpc.create_connection() instead.
|
||||
"""
|
||||
def close(self):
|
||||
"""Close the connection.
|
||||
|
||||
This method must be called when the connection will no longer be used.
|
||||
It will ensure that any resources associated with the connection, such
|
||||
as a network connection, and cleaned up.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def create_consumer(self, topic, proxy, fanout=False):
|
||||
"""Create a consumer on this connection.
|
||||
|
||||
A consumer is associated with a message queue on the backend message
|
||||
bus. The consumer will read messages from the queue, unpack them, and
|
||||
dispatch them to the proxy object. The contents of the message pulled
|
||||
off of the queue will determine which method gets called on the proxy
|
||||
object.
|
||||
|
||||
:param topic: This is a name associated with what to consume from.
|
||||
Multiple instances of a service may consume from the same
|
||||
topic. For example, all instances of nova-compute consume
|
||||
from a queue called "compute". In that case, the
|
||||
messages will get distributed amongst the consumers in a
|
||||
round-robin fashion if fanout=False. If fanout=True,
|
||||
every consumer associated with this topic will get a
|
||||
copy of every message.
|
||||
:param proxy: The object that will handle all incoming messages.
|
||||
:param fanout: Whether or not this is a fanout topic. See the
|
||||
documentation for the topic parameter for some
|
||||
additional comments on this.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def create_worker(self, topic, proxy, pool_name):
|
||||
"""Create a worker on this connection.
|
||||
|
||||
A worker is like a regular consumer of messages directed to a
|
||||
topic, except that it is part of a set of such consumers (the
|
||||
"pool") which may run in parallel. Every pool of workers will
|
||||
receive a given message, but only one worker in the pool will
|
||||
be asked to process it. Load is distributed across the members
|
||||
of the pool in round-robin fashion.
|
||||
|
||||
:param topic: This is a name associated with what to consume from.
|
||||
Multiple instances of a service may consume from the same
|
||||
topic.
|
||||
:param proxy: The object that will handle all incoming messages.
|
||||
:param pool_name: String containing the name of the pool of workers
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def consume_in_thread(self):
|
||||
"""Spawn a thread to handle incoming messages.
|
||||
|
||||
Spawn a thread that will be responsible for handling all incoming
|
||||
messages for consumers that were set up on this connection.
|
||||
|
||||
Message dispatching inside of this is expected to be implemented in a
|
||||
non-blocking manner. An example implementation would be having this
|
||||
thread pull messages in for all of the consumers, but utilize a thread
|
||||
pool for dispatching the messages to the proxy objects.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
def _safe_log(log_func, msg, msg_data):
|
||||
"""Sanitizes the msg_data field before logging."""
|
||||
SANITIZE = {'set_admin_password': [('args', 'new_pass')],
|
||||
'run_instance': [('args', 'admin_password')],
|
||||
'route_message': [('args', 'message', 'args', 'method_info',
|
||||
'method_kwargs', 'password'),
|
||||
('args', 'message', 'args', 'method_info',
|
||||
'method_kwargs', 'admin_password')]}
|
||||
|
||||
has_method = 'method' in msg_data and msg_data['method'] in SANITIZE
|
||||
has_context_token = '_context_auth_token' in msg_data
|
||||
has_token = 'auth_token' in msg_data
|
||||
|
||||
if not any([has_method, has_context_token, has_token]):
|
||||
return log_func(msg, msg_data)
|
||||
|
||||
msg_data = copy.deepcopy(msg_data)
|
||||
|
||||
if has_method:
|
||||
for arg in SANITIZE.get(msg_data['method'], []):
|
||||
try:
|
||||
d = msg_data
|
||||
for elem in arg[:-1]:
|
||||
d = d[elem]
|
||||
d[arg[-1]] = '<SANITIZED>'
|
||||
except KeyError, e:
|
||||
LOG.info(_('Failed to sanitize %(item)s. Key error %(err)s'),
|
||||
{'item': arg,
|
||||
'err': e})
|
||||
|
||||
if has_context_token:
|
||||
msg_data['_context_auth_token'] = '<SANITIZED>'
|
||||
|
||||
if has_token:
|
||||
msg_data['auth_token'] = '<SANITIZED>'
|
||||
|
||||
return log_func(msg, msg_data)
|
||||
|
||||
|
||||
def serialize_remote_exception(failure_info, log_failure=True):
|
||||
"""Prepares exception data to be sent over rpc.
|
||||
|
||||
Failure_info should be a sys.exc_info() tuple.
|
||||
|
||||
"""
|
||||
tb = traceback.format_exception(*failure_info)
|
||||
failure = failure_info[1]
|
||||
if log_failure:
|
||||
LOG.error(_("Returning exception %s to caller"), unicode(failure))
|
||||
LOG.error(tb)
|
||||
|
||||
kwargs = {}
|
||||
if hasattr(failure, 'kwargs'):
|
||||
kwargs = failure.kwargs
|
||||
|
||||
data = {
|
||||
'class': str(failure.__class__.__name__),
|
||||
'module': str(failure.__class__.__module__),
|
||||
'message': unicode(failure),
|
||||
'tb': tb,
|
||||
'args': failure.args,
|
||||
'kwargs': kwargs
|
||||
}
|
||||
|
||||
json_data = jsonutils.dumps(data)
|
||||
|
||||
return json_data
|
||||
|
||||
|
||||
def deserialize_remote_exception(conf, data):
|
||||
failure = jsonutils.loads(str(data))
|
||||
|
||||
trace = failure.get('tb', [])
|
||||
message = failure.get('message', "") + "\n" + "\n".join(trace)
|
||||
name = failure.get('class')
|
||||
module = failure.get('module')
|
||||
|
||||
# NOTE(ameade): We DO NOT want to allow just any module to be imported, in
|
||||
# order to prevent arbitrary code execution.
|
||||
if not module in conf.allowed_rpc_exception_modules:
|
||||
return RemoteError(name, failure.get('message'), trace)
|
||||
|
||||
try:
|
||||
mod = importutils.import_module(module)
|
||||
klass = getattr(mod, name)
|
||||
if not issubclass(klass, Exception):
|
||||
raise TypeError("Can only deserialize Exceptions")
|
||||
|
||||
failure = klass(**failure.get('kwargs', {}))
|
||||
except (AttributeError, TypeError, ImportError):
|
||||
return RemoteError(name, failure.get('message'), trace)
|
||||
|
||||
ex_type = type(failure)
|
||||
str_override = lambda self: message
|
||||
new_ex_type = type(ex_type.__name__ + "_Remote", (ex_type,),
|
||||
{'__str__': str_override, '__unicode__': str_override})
|
||||
try:
|
||||
# NOTE(ameade): Dynamically create a new exception type and swap it in
|
||||
# as the new type for the exception. This only works on user defined
|
||||
# Exceptions and not core python exceptions. This is important because
|
||||
# we cannot necessarily change an exception message so we must override
|
||||
# the __str__ method.
|
||||
failure.__class__ = new_ex_type
|
||||
except TypeError:
|
||||
# NOTE(ameade): If a core exception then just add the traceback to the
|
||||
# first exception argument.
|
||||
failure.args = (message,) + failure.args[1:]
|
||||
return failure
|
||||
|
||||
|
||||
class CommonRpcContext(object):
|
||||
def __init__(self, **kwargs):
|
||||
self.values = kwargs
|
||||
|
||||
def __getattr__(self, key):
|
||||
try:
|
||||
return self.values[key]
|
||||
except KeyError:
|
||||
raise AttributeError(key)
|
||||
|
||||
def to_dict(self):
|
||||
return copy.deepcopy(self.values)
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, values):
|
||||
return cls(**values)
|
||||
|
||||
def deepcopy(self):
|
||||
return self.from_dict(self.to_dict())
|
||||
|
||||
def update_store(self):
|
||||
local.store.context = self
|
||||
|
||||
def elevated(self, read_deleted=None, overwrite=False):
|
||||
"""Return a version of this context with admin flag set."""
|
||||
# TODO(russellb) This method is a bit of a nova-ism. It makes
|
||||
# some assumptions about the data in the request context sent
|
||||
# across rpc, while the rest of this class does not. We could get
|
||||
# rid of this if we changed the nova code that uses this to
|
||||
# convert the RpcContext back to its native RequestContext doing
|
||||
# something like nova.context.RequestContext.from_dict(ctxt.to_dict())
|
||||
|
||||
context = self.deepcopy()
|
||||
context.values['is_admin'] = True
|
||||
|
||||
context.values.setdefault('roles', [])
|
||||
|
||||
if 'admin' not in context.values['roles']:
|
||||
context.values['roles'].append('admin')
|
||||
|
||||
if read_deleted is not None:
|
||||
context.values['read_deleted'] = read_deleted
|
||||
|
||||
return context
|
||||
|
||||
|
||||
class ClientException(Exception):
|
||||
"""This encapsulates some actual exception that is expected to be
|
||||
hit by an RPC proxy object. Merely instantiating it records the
|
||||
current exception information, which will be passed back to the
|
||||
RPC client without exceptional logging."""
|
||||
def __init__(self):
|
||||
self._exc_info = sys.exc_info()
|
||||
|
||||
|
||||
def catch_client_exception(exceptions, func, *args, **kwargs):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except Exception, e:
|
||||
if type(e) in exceptions:
|
||||
raise ClientException()
|
||||
else:
|
||||
raise
|
||||
|
||||
|
||||
def client_exceptions(*exceptions):
|
||||
"""Decorator for manager methods that raise expected exceptions.
|
||||
Marking a Manager method with this decorator allows the declaration
|
||||
of expected exceptions that the RPC layer should not consider fatal,
|
||||
and not log as if they were generated in a real error scenario. Note
|
||||
that this will cause listed exceptions to be wrapped in a
|
||||
ClientException, which is used internally by the RPC layer."""
|
||||
def outer(func):
|
||||
def inner(*args, **kwargs):
|
||||
return catch_client_exception(exceptions, func, *args, **kwargs)
|
||||
return inner
|
||||
return outer
|
||||
|
||||
|
||||
def version_is_compatible(imp_version, version):
|
||||
"""Determine whether versions are compatible.
|
||||
|
||||
:param imp_version: The version implemented
|
||||
:param version: The version requested by an incoming message.
|
||||
"""
|
||||
version_parts = version.split('.')
|
||||
imp_version_parts = imp_version.split('.')
|
||||
if int(version_parts[0]) != int(imp_version_parts[0]): # Major
|
||||
return False
|
||||
if int(version_parts[1]) > int(imp_version_parts[1]): # Minor
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def serialize_msg(raw_msg, force_envelope=False):
|
||||
if not _SEND_RPC_ENVELOPE and not force_envelope:
|
||||
return raw_msg
|
||||
|
||||
# NOTE(russellb) See the docstring for _RPC_ENVELOPE_VERSION for more
|
||||
# information about this format.
|
||||
msg = {_VERSION_KEY: _RPC_ENVELOPE_VERSION,
|
||||
_MESSAGE_KEY: jsonutils.dumps(raw_msg)}
|
||||
|
||||
return msg
|
||||
|
||||
|
||||
def deserialize_msg(msg):
|
||||
# NOTE(russellb): Hang on to your hats, this road is about to
|
||||
# get a little bumpy.
|
||||
#
|
||||
# Robustness Principle:
|
||||
# "Be strict in what you send, liberal in what you accept."
|
||||
#
|
||||
# At this point we have to do a bit of guessing about what it
|
||||
# is we just received. Here is the set of possibilities:
|
||||
#
|
||||
# 1) We received a dict. This could be 2 things:
|
||||
#
|
||||
# a) Inspect it to see if it looks like a standard message envelope.
|
||||
# If so, great!
|
||||
#
|
||||
# b) If it doesn't look like a standard message envelope, it could either
|
||||
# be a notification, or a message from before we added a message
|
||||
# envelope (referred to as version 1.0).
|
||||
# Just return the message as-is.
|
||||
#
|
||||
# 2) It's any other non-dict type. Just return it and hope for the best.
|
||||
# This case covers return values from rpc.call() from before message
|
||||
# envelopes were used. (messages to call a method were always a dict)
|
||||
|
||||
if not isinstance(msg, dict):
|
||||
# See #2 above.
|
||||
return msg
|
||||
|
||||
base_envelope_keys = (_VERSION_KEY, _MESSAGE_KEY)
|
||||
if not all(map(lambda key: key in msg, base_envelope_keys)):
|
||||
# See #1.b above.
|
||||
return msg
|
||||
|
||||
# At this point we think we have the message envelope
|
||||
# format we were expecting. (#1.a above)
|
||||
|
||||
if not version_is_compatible(_RPC_ENVELOPE_VERSION, msg[_VERSION_KEY]):
|
||||
raise UnsupportedRpcEnvelopeVersion(version=msg[_VERSION_KEY])
|
||||
|
||||
raw_msg = jsonutils.loads(msg[_MESSAGE_KEY])
|
||||
|
||||
return raw_msg
|
|
@ -1,138 +0,0 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2012 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Code for rpc message dispatching.
|
||||
|
||||
Messages that come in have a version number associated with them. RPC API
|
||||
version numbers are in the form:
|
||||
|
||||
Major.Minor
|
||||
|
||||
For a given message with version X.Y, the receiver must be marked as able to
|
||||
handle messages of version A.B, where:
|
||||
|
||||
A = X
|
||||
|
||||
B >= Y
|
||||
|
||||
The Major version number would be incremented for an almost completely new API.
|
||||
The Minor version number would be incremented for backwards compatible changes
|
||||
to an existing API. A backwards compatible change could be something like
|
||||
adding a new method, adding an argument to an existing method (but not
|
||||
requiring it), or changing the type for an existing argument (but still
|
||||
handling the old type as well).
|
||||
|
||||
The conversion over to a versioned API must be done on both the client side and
|
||||
server side of the API at the same time. However, as the code stands today,
|
||||
there can be both versioned and unversioned APIs implemented in the same code
|
||||
base.
|
||||
|
||||
EXAMPLES
|
||||
========
|
||||
|
||||
Nova was the first project to use versioned rpc APIs. Consider the compute rpc
|
||||
API as an example. The client side is in nova/compute/rpcapi.py and the server
|
||||
side is in nova/compute/manager.py.
|
||||
|
||||
|
||||
Example 1) Adding a new method.
|
||||
-------------------------------
|
||||
|
||||
Adding a new method is a backwards compatible change. It should be added to
|
||||
nova/compute/manager.py, and RPC_API_VERSION should be bumped from X.Y to
|
||||
X.Y+1. On the client side, the new method in nova/compute/rpcapi.py should
|
||||
have a specific version specified to indicate the minimum API version that must
|
||||
be implemented for the method to be supported. For example::
|
||||
|
||||
def get_host_uptime(self, ctxt, host):
|
||||
topic = _compute_topic(self.topic, ctxt, host, None)
|
||||
return self.call(ctxt, self.make_msg('get_host_uptime'), topic,
|
||||
version='1.1')
|
||||
|
||||
In this case, version '1.1' is the first version that supported the
|
||||
get_host_uptime() method.
|
||||
|
||||
|
||||
Example 2) Adding a new parameter.
|
||||
----------------------------------
|
||||
|
||||
Adding a new parameter to an rpc method can be made backwards compatible. The
|
||||
RPC_API_VERSION on the server side (nova/compute/manager.py) should be bumped.
|
||||
The implementation of the method must not expect the parameter to be present.::
|
||||
|
||||
def some_remote_method(self, arg1, arg2, newarg=None):
|
||||
# The code needs to deal with newarg=None for cases
|
||||
# where an older client sends a message without it.
|
||||
pass
|
||||
|
||||
On the client side, the same changes should be made as in example 1. The
|
||||
minimum version that supports the new parameter should be specified.
|
||||
"""
|
||||
|
||||
from kwapi.openstack.common.rpc import common as rpc_common
|
||||
|
||||
|
||||
class RpcDispatcher(object):
|
||||
"""Dispatch rpc messages according to the requested API version.
|
||||
|
||||
This class can be used as the top level 'manager' for a service. It
|
||||
contains a list of underlying managers that have an API_VERSION attribute.
|
||||
"""
|
||||
|
||||
def __init__(self, callbacks):
|
||||
"""Initialize the rpc dispatcher.
|
||||
|
||||
:param callbacks: List of proxy objects that are an instance
|
||||
of a class with rpc methods exposed. Each proxy
|
||||
object should have an RPC_API_VERSION attribute.
|
||||
"""
|
||||
self.callbacks = callbacks
|
||||
super(RpcDispatcher, self).__init__()
|
||||
|
||||
def dispatch(self, ctxt, version, method, **kwargs):
|
||||
"""Dispatch a message based on a requested version.
|
||||
|
||||
:param ctxt: The request context
|
||||
:param version: The requested API version from the incoming message
|
||||
:param method: The method requested to be called by the incoming
|
||||
message.
|
||||
:param kwargs: A dict of keyword arguments to be passed to the method.
|
||||
|
||||
:returns: Whatever is returned by the underlying method that gets
|
||||
called.
|
||||
"""
|
||||
if not version:
|
||||
version = '1.0'
|
||||
|
||||
had_compatible = False
|
||||
for proxyobj in self.callbacks:
|
||||
if hasattr(proxyobj, 'RPC_API_VERSION'):
|
||||
rpc_api_version = proxyobj.RPC_API_VERSION
|
||||
else:
|
||||
rpc_api_version = '1.0'
|
||||
is_compatible = rpc_common.version_is_compatible(rpc_api_version,
|
||||
version)
|
||||
had_compatible = had_compatible or is_compatible
|
||||
if not hasattr(proxyobj, method):
|
||||
continue
|
||||
if is_compatible:
|
||||
return getattr(proxyobj, method)(ctxt, **kwargs)
|
||||
|
||||
if had_compatible:
|
||||
raise AttributeError("No such RPC function '%s'" % method)
|
||||
else:
|
||||
raise rpc_common.UnsupportedRpcVersion(version=version)
|
|
@ -1,191 +0,0 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""Fake RPC implementation which calls proxy methods directly with no
|
||||
queues. Casts will block, but this is very useful for tests.
|
||||
"""
|
||||
|
||||
import inspect
|
||||
# NOTE(russellb): We specifically want to use json, not our own jsonutils.
|
||||
# jsonutils has some extra logic to automatically convert objects to primitive
|
||||
# types so that they can be serialized. We want to catch all cases where
|
||||
# non-primitive types make it into this code and treat it as an error.
|
||||
import json
|
||||
import time
|
||||
|
||||
import eventlet
|
||||
|
||||
from kwapi.openstack.common.rpc import common as rpc_common
|
||||
|
||||
CONSUMERS = {}
|
||||
|
||||
|
||||
class RpcContext(rpc_common.CommonRpcContext):
|
||||
def __init__(self, **kwargs):
|
||||
super(RpcContext, self).__init__(**kwargs)
|
||||
self._response = []
|
||||
self._done = False
|
||||
|
||||
def deepcopy(self):
|
||||
values = self.to_dict()
|
||||
new_inst = self.__class__(**values)
|
||||
new_inst._response = self._response
|
||||
new_inst._done = self._done
|
||||
return new_inst
|
||||
|
||||
def reply(self, reply=None, failure=None, ending=False):
|
||||
if ending:
|
||||
self._done = True
|
||||
if not self._done:
|
||||
self._response.append((reply, failure))
|
||||
|
||||
|
||||
class Consumer(object):
|
||||
def __init__(self, topic, proxy):
|
||||
self.topic = topic
|
||||
self.proxy = proxy
|
||||
|
||||
def call(self, context, version, method, args, timeout):
|
||||
done = eventlet.event.Event()
|
||||
|
||||
def _inner():
|
||||
ctxt = RpcContext.from_dict(context.to_dict())
|
||||
try:
|
||||
rval = self.proxy.dispatch(context, version, method, **args)
|
||||
res = []
|
||||
# Caller might have called ctxt.reply() manually
|
||||
for (reply, failure) in ctxt._response:
|
||||
if failure:
|
||||
raise failure[0], failure[1], failure[2]
|
||||
res.append(reply)
|
||||
# if ending not 'sent'...we might have more data to
|
||||
# return from the function itself
|
||||
if not ctxt._done:
|
||||
if inspect.isgenerator(rval):
|
||||
for val in rval:
|
||||
res.append(val)
|
||||
else:
|
||||
res.append(rval)
|
||||
done.send(res)
|
||||
except rpc_common.ClientException as e:
|
||||
done.send_exception(e._exc_info[1])
|
||||
except Exception as e:
|
||||
done.send_exception(e)
|
||||
|
||||
thread = eventlet.greenthread.spawn(_inner)
|
||||
|
||||
if timeout:
|
||||
start_time = time.time()
|
||||
while not done.ready():
|
||||
eventlet.greenthread.sleep(1)
|
||||
cur_time = time.time()
|
||||
if (cur_time - start_time) > timeout:
|
||||
thread.kill()
|
||||
raise rpc_common.Timeout()
|
||||
|
||||
return done.wait()
|
||||
|
||||
|
||||
class Connection(object):
|
||||
"""Connection object."""
|
||||
|
||||
def __init__(self):
|
||||
self.consumers = []
|
||||
|
||||
def create_consumer(self, topic, proxy, fanout=False):
|
||||
consumer = Consumer(topic, proxy)
|
||||
self.consumers.append(consumer)
|
||||
if topic not in CONSUMERS:
|
||||
CONSUMERS[topic] = []
|
||||
CONSUMERS[topic].append(consumer)
|
||||
|
||||
def close(self):
|
||||
for consumer in self.consumers:
|
||||
CONSUMERS[consumer.topic].remove(consumer)
|
||||
self.consumers = []
|
||||
|
||||
def consume_in_thread(self):
|
||||
pass
|
||||
|
||||
|
||||
def create_connection(conf, new=True):
|
||||
"""Create a connection"""
|
||||
return Connection()
|
||||
|
||||
|
||||
def check_serialize(msg):
|
||||
"""Make sure a message intended for rpc can be serialized."""
|
||||
json.dumps(msg)
|
||||
|
||||
|
||||
def multicall(conf, context, topic, msg, timeout=None):
|
||||
"""Make a call that returns multiple times."""
|
||||
|
||||
check_serialize(msg)
|
||||
|
||||
method = msg.get('method')
|
||||
if not method:
|
||||
return
|
||||
args = msg.get('args', {})
|
||||
version = msg.get('version', None)
|
||||
|
||||
try:
|
||||
consumer = CONSUMERS[topic][0]
|
||||
except (KeyError, IndexError):
|
||||
return iter([None])
|
||||
else:
|
||||
return consumer.call(context, version, method, args, timeout)
|
||||
|
||||
|
||||
def call(conf, context, topic, msg, timeout=None):
|
||||
"""Sends a message on a topic and wait for a response."""
|
||||
rv = multicall(conf, context, topic, msg, timeout)
|
||||
# NOTE(vish): return the last result from the multicall
|
||||
rv = list(rv)
|
||||
if not rv:
|
||||
return
|
||||
return rv[-1]
|
||||
|
||||
|
||||
def cast(conf, context, topic, msg):
|
||||
check_serialize(msg)
|
||||
try:
|
||||
call(conf, context, topic, msg)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
def notify(conf, context, topic, msg):
|
||||
check_serialize(msg)
|
||||
|
||||
|
||||
def cleanup():
|
||||
pass
|
||||
|
||||
|
||||
def fanout_cast(conf, context, topic, msg):
|
||||
"""Cast to all consumers of a topic"""
|
||||
check_serialize(msg)
|
||||
method = msg.get('method')
|
||||
if not method:
|
||||
return
|
||||
args = msg.get('args', {})
|
||||
version = msg.get('version', None)
|
||||
|
||||
for consumer in CONSUMERS.get(topic, []):
|
||||
try:
|
||||
consumer.call(context, version, method, args, None)
|
||||
except Exception:
|
||||
pass
|
|
@ -1,805 +0,0 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import functools
|
||||
import itertools
|
||||
import socket
|
||||
import ssl
|
||||
import sys
|
||||
import time
|
||||
import uuid
|
||||
|
||||
import eventlet
|
||||
import greenlet
|
||||
import kombu
|
||||
import kombu.connection
|
||||
import kombu.entity
|
||||
import kombu.messaging
|
||||
|
||||
from kwapi.openstack.common import cfg
|
||||
from kwapi.openstack.common.gettextutils import _
|
||||
from kwapi.openstack.common import network_utils
|
||||
from kwapi.openstack.common.rpc import amqp as rpc_amqp
|
||||
from kwapi.openstack.common.rpc import common as rpc_common
|
||||
|
||||
kombu_opts = [
|
||||
cfg.StrOpt('kombu_ssl_version',
|
||||
default='',
|
||||
help='SSL version to use (valid only if SSL enabled)'),
|
||||
cfg.StrOpt('kombu_ssl_keyfile',
|
||||
default='',
|
||||
help='SSL key file (valid only if SSL enabled)'),
|
||||
cfg.StrOpt('kombu_ssl_certfile',
|
||||
default='',
|
||||
help='SSL cert file (valid only if SSL enabled)'),
|
||||
cfg.StrOpt('kombu_ssl_ca_certs',
|
||||
default='',
|
||||
help=('SSL certification authority file '
|
||||
'(valid only if SSL enabled)')),
|
||||
cfg.StrOpt('rabbit_host',
|
||||
default='localhost',
|
||||
help='The RabbitMQ broker address where a single node is used'),
|
||||
cfg.IntOpt('rabbit_port',
|
||||
default=5672,
|
||||
help='The RabbitMQ broker port where a single node is used'),
|
||||
cfg.ListOpt('rabbit_hosts',
|
||||
default=['$rabbit_host:$rabbit_port'],
|
||||
help='RabbitMQ HA cluster host:port pairs'),
|
||||
cfg.BoolOpt('rabbit_use_ssl',
|
||||
default=False,
|
||||
help='connect over SSL for RabbitMQ'),
|
||||
cfg.StrOpt('rabbit_userid',
|
||||
default='guest',
|
||||
help='the RabbitMQ userid'),
|
||||
cfg.StrOpt('rabbit_password',
|
||||
default='guest',
|
||||
help='the RabbitMQ password'),
|
||||
cfg.StrOpt('rabbit_virtual_host',
|
||||
default='/',
|
||||
help='the RabbitMQ virtual host'),
|
||||
cfg.IntOpt('rabbit_retry_interval',
|
||||
default=1,
|
||||
help='how frequently to retry connecting with RabbitMQ'),
|
||||
cfg.IntOpt('rabbit_retry_backoff',
|
||||
default=2,
|
||||
help='how long to backoff for between retries when connecting '
|
||||
'to RabbitMQ'),
|
||||
cfg.IntOpt('rabbit_max_retries',
|
||||
default=0,
|
||||
help='maximum retries with trying to connect to RabbitMQ '
|
||||
'(the default of 0 implies an infinite retry count)'),
|
||||
cfg.BoolOpt('rabbit_durable_queues',
|
||||
default=False,
|
||||
help='use durable queues in RabbitMQ'),
|
||||
cfg.BoolOpt('rabbit_ha_queues',
|
||||
default=False,
|
||||
help='use H/A queues in RabbitMQ (x-ha-policy: all).'
|
||||
'You need to wipe RabbitMQ database when '
|
||||
'changing this option.'),
|
||||
|
||||
]
|
||||
|
||||
cfg.CONF.register_opts(kombu_opts)
|
||||
|
||||
LOG = rpc_common.LOG
|
||||
|
||||
|
||||
def _get_queue_arguments(conf):
|
||||
"""Construct the arguments for declaring a queue.
|
||||
|
||||
If the rabbit_ha_queues option is set, we declare a mirrored queue
|
||||
as described here:
|
||||
|
||||
http://www.rabbitmq.com/ha.html
|
||||
|
||||
Setting x-ha-policy to all means that the queue will be mirrored
|
||||
to all nodes in the cluster.
|
||||
"""
|
||||
return {'x-ha-policy': 'all'} if conf.rabbit_ha_queues else {}
|
||||
|
||||
|
||||
class ConsumerBase(object):
|
||||
"""Consumer base class."""
|
||||
|
||||
def __init__(self, channel, callback, tag, **kwargs):
|
||||
"""Declare a queue on an amqp channel.
|
||||
|
||||
'channel' is the amqp channel to use
|
||||
'callback' is the callback to call when messages are received
|
||||
'tag' is a unique ID for the consumer on the channel
|
||||
|
||||
queue name, exchange name, and other kombu options are
|
||||
passed in here as a dictionary.
|
||||
"""
|
||||
self.callback = callback
|
||||
self.tag = str(tag)
|
||||
self.kwargs = kwargs
|
||||
self.queue = None
|
||||
self.reconnect(channel)
|
||||
|
||||
def reconnect(self, channel):
|
||||
"""Re-declare the queue after a rabbit reconnect"""
|
||||
self.channel = channel
|
||||
self.kwargs['channel'] = channel
|
||||
self.queue = kombu.entity.Queue(**self.kwargs)
|
||||
self.queue.declare()
|
||||
|
||||
def consume(self, *args, **kwargs):
|
||||
"""Actually declare the consumer on the amqp channel. This will
|
||||
start the flow of messages from the queue. Using the
|
||||
Connection.iterconsume() iterator will process the messages,
|
||||
calling the appropriate callback.
|
||||
|
||||
If a callback is specified in kwargs, use that. Otherwise,
|
||||
use the callback passed during __init__()
|
||||
|
||||
If kwargs['nowait'] is True, then this call will block until
|
||||
a message is read.
|
||||
|
||||
Messages will automatically be acked if the callback doesn't
|
||||
raise an exception
|
||||
"""
|
||||
|
||||
options = {'consumer_tag': self.tag}
|
||||
options['nowait'] = kwargs.get('nowait', False)
|
||||
callback = kwargs.get('callback', self.callback)
|
||||
if not callback:
|
||||
raise ValueError("No callback defined")
|
||||
|
||||
def _callback(raw_message):
|
||||
message = self.channel.message_to_python(raw_message)
|
||||
try:
|
||||
msg = rpc_common.deserialize_msg(message.payload)
|
||||
callback(msg)
|
||||
message.ack()
|
||||
except Exception:
|
||||
LOG.exception(_("Failed to process message... skipping it."))
|
||||
|
||||
self.queue.consume(*args, callback=_callback, **options)
|
||||
|
||||
def cancel(self):
|
||||
"""Cancel the consuming from the queue, if it has started"""
|
||||
try:
|
||||
self.queue.cancel(self.tag)
|
||||
except KeyError, e:
|
||||
# NOTE(comstud): Kludge to get around a amqplib bug
|
||||
if str(e) != "u'%s'" % self.tag:
|
||||
raise
|
||||
self.queue = None
|
||||
|
||||
|
||||
class DirectConsumer(ConsumerBase):
|
||||
"""Queue/consumer class for 'direct'"""
|
||||
|
||||
def __init__(self, conf, channel, msg_id, callback, tag, **kwargs):
|
||||
"""Init a 'direct' queue.
|
||||
|
||||
'channel' is the amqp channel to use
|
||||
'msg_id' is the msg_id to listen on
|
||||
'callback' is the callback to call when messages are received
|
||||
'tag' is a unique ID for the consumer on the channel
|
||||
|
||||
Other kombu options may be passed
|
||||
"""
|
||||
# Default options
|
||||
options = {'durable': False,
|
||||
'auto_delete': True,
|
||||
'exclusive': False}
|
||||
options.update(kwargs)
|
||||
exchange = kombu.entity.Exchange(name=msg_id,
|
||||
type='direct',
|
||||
durable=options['durable'],
|
||||
auto_delete=options['auto_delete'])
|
||||
super(DirectConsumer, self).__init__(channel,
|
||||
callback,
|
||||
tag,
|
||||
name=msg_id,
|
||||
exchange=exchange,
|
||||
routing_key=msg_id,
|
||||
**options)
|
||||
|
||||
|
||||
class TopicConsumer(ConsumerBase):
|
||||
"""Consumer class for 'topic'"""
|
||||
|
||||
def __init__(self, conf, channel, topic, callback, tag, name=None,
|
||||
exchange_name=None, **kwargs):
|
||||
"""Init a 'topic' queue.
|
||||
|
||||
:param channel: the amqp channel to use
|
||||
:param topic: the topic to listen on
|
||||
:paramtype topic: str
|
||||
:param callback: the callback to call when messages are received
|
||||
:param tag: a unique ID for the consumer on the channel
|
||||
:param name: optional queue name, defaults to topic
|
||||
:paramtype name: str
|
||||
|
||||
Other kombu options may be passed as keyword arguments
|
||||
"""
|
||||
# Default options
|
||||
options = {'durable': conf.rabbit_durable_queues,
|
||||
'queue_arguments': _get_queue_arguments(conf),
|
||||
'auto_delete': False,
|
||||
'exclusive': False}
|
||||
options.update(kwargs)
|
||||
exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf)
|
||||
exchange = kombu.entity.Exchange(name=exchange_name,
|
||||
type='topic',
|
||||
durable=options['durable'],
|
||||
auto_delete=options['auto_delete'])
|
||||
super(TopicConsumer, self).__init__(channel,
|
||||
callback,
|
||||
tag,
|
||||
name=name or topic,
|
||||
exchange=exchange,
|
||||
routing_key=topic,
|
||||
**options)
|
||||
|
||||
|
||||
class FanoutConsumer(ConsumerBase):
|
||||
"""Consumer class for 'fanout'"""
|
||||
|
||||
def __init__(self, conf, channel, topic, callback, tag, **kwargs):
|
||||
"""Init a 'fanout' queue.
|
||||
|
||||
'channel' is the amqp channel to use
|
||||
'topic' is the topic to listen on
|
||||
'callback' is the callback to call when messages are received
|
||||
'tag' is a unique ID for the consumer on the channel
|
||||
|
||||
Other kombu options may be passed
|
||||
"""
|
||||
unique = uuid.uuid4().hex
|
||||
exchange_name = '%s_fanout' % topic
|
||||
queue_name = '%s_fanout_%s' % (topic, unique)
|
||||
|
||||
# Default options
|
||||
options = {'durable': False,
|
||||
'queue_arguments': _get_queue_arguments(conf),
|
||||
'auto_delete': True,
|
||||
'exclusive': False}
|
||||
options.update(kwargs)
|
||||
exchange = kombu.entity.Exchange(name=exchange_name, type='fanout',
|
||||
durable=options['durable'],
|
||||
auto_delete=options['auto_delete'])
|
||||
super(FanoutConsumer, self).__init__(channel, callback, tag,
|
||||
name=queue_name,
|
||||
exchange=exchange,
|
||||
routing_key=topic,
|
||||
**options)
|
||||
|
||||
|
||||
class Publisher(object):
|
||||
"""Base Publisher class"""
|
||||
|
||||
def __init__(self, channel, exchange_name, routing_key, **kwargs):
|
||||
"""Init the Publisher class with the exchange_name, routing_key,
|
||||
and other options
|
||||
"""
|
||||
self.exchange_name = exchange_name
|
||||
self.routing_key = routing_key
|
||||
self.kwargs = kwargs
|
||||
self.reconnect(channel)
|
||||
|
||||
def reconnect(self, channel):
|
||||
"""Re-establish the Producer after a rabbit reconnection"""
|
||||
self.exchange = kombu.entity.Exchange(name=self.exchange_name,
|
||||
**self.kwargs)
|
||||
self.producer = kombu.messaging.Producer(exchange=self.exchange,
|
||||
channel=channel,
|
||||
routing_key=self.routing_key)
|
||||
|
||||
def send(self, msg):
|
||||
"""Send a message"""
|
||||
self.producer.publish(msg)
|
||||
|
||||
|
||||
class DirectPublisher(Publisher):
|
||||
"""Publisher class for 'direct'"""
|
||||
def __init__(self, conf, channel, msg_id, **kwargs):
|
||||
"""init a 'direct' publisher.
|
||||
|
||||
Kombu options may be passed as keyword args to override defaults
|
||||
"""
|
||||
|
||||
options = {'durable': False,
|
||||
'auto_delete': True,
|
||||
'exclusive': False}
|
||||
options.update(kwargs)
|
||||
super(DirectPublisher, self).__init__(channel, msg_id, msg_id,
|
||||
type='direct', **options)
|
||||
|
||||
|
||||
class TopicPublisher(Publisher):
|
||||
"""Publisher class for 'topic'"""
|
||||
def __init__(self, conf, channel, topic, **kwargs):
|
||||
"""init a 'topic' publisher.
|
||||
|
||||
Kombu options may be passed as keyword args to override defaults
|
||||
"""
|
||||
options = {'durable': conf.rabbit_durable_queues,
|
||||
'auto_delete': False,
|
||||
'exclusive': False}
|
||||
options.update(kwargs)
|
||||
exchange_name = rpc_amqp.get_control_exchange(conf)
|
||||
super(TopicPublisher, self).__init__(channel,
|
||||
exchange_name,
|
||||
topic,
|
||||
type='topic',
|
||||
**options)
|
||||
|
||||
|
||||
class FanoutPublisher(Publisher):
|
||||
"""Publisher class for 'fanout'"""
|
||||
def __init__(self, conf, channel, topic, **kwargs):
|
||||
"""init a 'fanout' publisher.
|
||||
|
||||
Kombu options may be passed as keyword args to override defaults
|
||||
"""
|
||||
options = {'durable': False,
|
||||
'auto_delete': True,
|
||||
'exclusive': False}
|
||||
options.update(kwargs)
|
||||
super(FanoutPublisher, self).__init__(channel, '%s_fanout' % topic,
|
||||
None, type='fanout', **options)
|
||||
|
||||
|
||||
class NotifyPublisher(TopicPublisher):
|
||||
"""Publisher class for 'notify'"""
|
||||
|
||||
def __init__(self, conf, channel, topic, **kwargs):
|
||||
self.durable = kwargs.pop('durable', conf.rabbit_durable_queues)
|
||||
self.queue_arguments = _get_queue_arguments(conf)
|
||||
super(NotifyPublisher, self).__init__(conf, channel, topic, **kwargs)
|
||||
|
||||
def reconnect(self, channel):
|
||||
super(NotifyPublisher, self).reconnect(channel)
|
||||
|
||||
# NOTE(jerdfelt): Normally the consumer would create the queue, but
|
||||
# we do this to ensure that messages don't get dropped if the
|
||||
# consumer is started after we do
|
||||
queue = kombu.entity.Queue(channel=channel,
|
||||
exchange=self.exchange,
|
||||
durable=self.durable,
|
||||
name=self.routing_key,
|
||||
routing_key=self.routing_key,
|
||||
queue_arguments=self.queue_arguments)
|
||||
queue.declare()
|
||||
|
||||
|
||||
class Connection(object):
|
||||
"""Connection object."""
|
||||
|
||||
pool = None
|
||||
|
||||
def __init__(self, conf, server_params=None):
|
||||
self.consumers = []
|
||||
self.consumer_thread = None
|
||||
self.proxy_callbacks = []
|
||||
self.conf = conf
|
||||
self.max_retries = self.conf.rabbit_max_retries
|
||||
# Try forever?
|
||||
if self.max_retries <= 0:
|
||||
self.max_retries = None
|
||||
self.interval_start = self.conf.rabbit_retry_interval
|
||||
self.interval_stepping = self.conf.rabbit_retry_backoff
|
||||
# max retry-interval = 30 seconds
|
||||
self.interval_max = 30
|
||||
self.memory_transport = False
|
||||
|
||||
if server_params is None:
|
||||
server_params = {}
|
||||
# Keys to translate from server_params to kombu params
|
||||
server_params_to_kombu_params = {'username': 'userid'}
|
||||
|
||||
ssl_params = self._fetch_ssl_params()
|
||||
params_list = []
|
||||
for adr in self.conf.rabbit_hosts:
|
||||
hostname, port = network_utils.parse_host_port(
|
||||
adr, default_port=self.conf.rabbit_port)
|
||||
|
||||
params = {
|
||||
'hostname': hostname,
|
||||
'port': port,
|
||||
'userid': self.conf.rabbit_userid,
|
||||
'password': self.conf.rabbit_password,
|
||||
'virtual_host': self.conf.rabbit_virtual_host,
|
||||
}
|
||||
|
||||
for sp_key, value in server_params.iteritems():
|
||||
p_key = server_params_to_kombu_params.get(sp_key, sp_key)
|
||||
params[p_key] = value
|
||||
|
||||
if self.conf.fake_rabbit:
|
||||
params['transport'] = 'memory'
|
||||
if self.conf.rabbit_use_ssl:
|
||||
params['ssl'] = ssl_params
|
||||
|
||||
params_list.append(params)
|
||||
|
||||
self.params_list = params_list
|
||||
|
||||
self.memory_transport = self.conf.fake_rabbit
|
||||
|
||||
self.connection = None
|
||||
self.reconnect()
|
||||
|
||||
def _fetch_ssl_params(self):
|
||||
"""Handles fetching what ssl params
|
||||
should be used for the connection (if any)"""
|
||||
ssl_params = dict()
|
||||
|
||||
# http://docs.python.org/library/ssl.html - ssl.wrap_socket
|
||||
if self.conf.kombu_ssl_version:
|
||||
ssl_params['ssl_version'] = self.conf.kombu_ssl_version
|
||||
if self.conf.kombu_ssl_keyfile:
|
||||
ssl_params['keyfile'] = self.conf.kombu_ssl_keyfile
|
||||
if self.conf.kombu_ssl_certfile:
|
||||
ssl_params['certfile'] = self.conf.kombu_ssl_certfile
|
||||
if self.conf.kombu_ssl_ca_certs:
|
||||
ssl_params['ca_certs'] = self.conf.kombu_ssl_ca_certs
|
||||
# We might want to allow variations in the
|
||||
# future with this?
|
||||
ssl_params['cert_reqs'] = ssl.CERT_REQUIRED
|
||||
|
||||
if not ssl_params:
|
||||
# Just have the default behavior
|
||||
return True
|
||||
else:
|
||||
# Return the extended behavior
|
||||
return ssl_params
|
||||
|
||||
def _connect(self, params):
|
||||
"""Connect to rabbit. Re-establish any queues that may have
|
||||
been declared before if we are reconnecting. Exceptions should
|
||||
be handled by the caller.
|
||||
"""
|
||||
if self.connection:
|
||||
LOG.info(_("Reconnecting to AMQP server on "
|
||||
"%(hostname)s:%(port)d") % params)
|
||||
try:
|
||||
self.connection.release()
|
||||
except self.connection_errors:
|
||||
pass
|
||||
# Setting this in case the next statement fails, though
|
||||
# it shouldn't be doing any network operations, yet.
|
||||
self.connection = None
|
||||
self.connection = kombu.connection.BrokerConnection(**params)
|
||||
self.connection_errors = self.connection.connection_errors
|
||||
if self.memory_transport:
|
||||
# Kludge to speed up tests.
|
||||
self.connection.transport.polling_interval = 0.0
|
||||
self.consumer_num = itertools.count(1)
|
||||
self.connection.connect()
|
||||
self.channel = self.connection.channel()
|
||||
# work around 'memory' transport bug in 1.1.3
|
||||
if self.memory_transport:
|
||||
self.channel._new_queue('ae.undeliver')
|
||||
for consumer in self.consumers:
|
||||
consumer.reconnect(self.channel)
|
||||
LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d') %
|
||||
params)
|
||||
|
||||
def reconnect(self):
|
||||
"""Handles reconnecting and re-establishing queues.
|
||||
Will retry up to self.max_retries number of times.
|
||||
self.max_retries = 0 means to retry forever.
|
||||
Sleep between tries, starting at self.interval_start
|
||||
seconds, backing off self.interval_stepping number of seconds
|
||||
each attempt.
|
||||
"""
|
||||
|
||||
attempt = 0
|
||||
while True:
|
||||
params = self.params_list[attempt % len(self.params_list)]
|
||||
attempt += 1
|
||||
try:
|
||||
self._connect(params)
|
||||
return
|
||||
except (IOError, self.connection_errors) as e:
|
||||
pass
|
||||
except Exception, e:
|
||||
# NOTE(comstud): Unfortunately it's possible for amqplib
|
||||
# to return an error not covered by its transport
|
||||
# connection_errors in the case of a timeout waiting for
|
||||
# a protocol response. (See paste link in LP888621)
|
||||
# So, we check all exceptions for 'timeout' in them
|
||||
# and try to reconnect in this case.
|
||||
if 'timeout' not in str(e):
|
||||
raise
|
||||
|
||||
log_info = {}
|
||||
log_info['err_str'] = str(e)
|
||||
log_info['max_retries'] = self.max_retries
|
||||
log_info.update(params)
|
||||
|
||||
if self.max_retries and attempt == self.max_retries:
|
||||
LOG.error(_('Unable to connect to AMQP server on '
|
||||
'%(hostname)s:%(port)d after %(max_retries)d '
|
||||
'tries: %(err_str)s') % log_info)
|
||||
# NOTE(comstud): Copied from original code. There's
|
||||
# really no better recourse because if this was a queue we
|
||||
# need to consume on, we have no way to consume anymore.
|
||||
sys.exit(1)
|
||||
|
||||
if attempt == 1:
|
||||
sleep_time = self.interval_start or 1
|
||||
elif attempt > 1:
|
||||
sleep_time += self.interval_stepping
|
||||
if self.interval_max:
|
||||
sleep_time = min(sleep_time, self.interval_max)
|
||||
|
||||
log_info['sleep_time'] = sleep_time
|
||||
LOG.error(_('AMQP server on %(hostname)s:%(port)d is '
|
||||
'unreachable: %(err_str)s. Trying again in '
|
||||
'%(sleep_time)d seconds.') % log_info)
|
||||
time.sleep(sleep_time)
|
||||
|
||||
def ensure(self, error_callback, method, *args, **kwargs):
|
||||
while True:
|
||||
try:
|
||||
return method(*args, **kwargs)
|
||||
except (self.connection_errors, socket.timeout, IOError), e:
|
||||
if error_callback:
|
||||
error_callback(e)
|
||||
except Exception, e:
|
||||
# NOTE(comstud): Unfortunately it's possible for amqplib
|
||||
# to return an error not covered by its transport
|
||||
# connection_errors in the case of a timeout waiting for
|
||||
# a protocol response. (See paste link in LP888621)
|
||||
# So, we check all exceptions for 'timeout' in them
|
||||
# and try to reconnect in this case.
|
||||
if 'timeout' not in str(e):
|
||||
raise
|
||||
if error_callback:
|
||||
error_callback(e)
|
||||
self.reconnect()
|
||||
|
||||
def get_channel(self):
|
||||
"""Convenience call for bin/clear_rabbit_queues"""
|
||||
return self.channel
|
||||
|
||||
def close(self):
|
||||
"""Close/release this connection"""
|
||||
self.cancel_consumer_thread()
|
||||
self.wait_on_proxy_callbacks()
|
||||
self.connection.release()
|
||||
self.connection = None
|
||||
|
||||
def reset(self):
|
||||
"""Reset a connection so it can be used again"""
|
||||
self.cancel_consumer_thread()
|
||||
self.wait_on_proxy_callbacks()
|
||||
self.channel.close()
|
||||
self.channel = self.connection.channel()
|
||||
# work around 'memory' transport bug in 1.1.3
|
||||
if self.memory_transport:
|
||||
self.channel._new_queue('ae.undeliver')
|
||||
self.consumers = []
|
||||
|
||||
def declare_consumer(self, consumer_cls, topic, callback):
|
||||
"""Create a Consumer using the class that was passed in and
|
||||
add it to our list of consumers
|
||||
"""
|
||||
|
||||
def _connect_error(exc):
|
||||
log_info = {'topic': topic, 'err_str': str(exc)}
|
||||
LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
|
||||
"%(err_str)s") % log_info)
|
||||
|
||||
def _declare_consumer():
|
||||
consumer = consumer_cls(self.conf, self.channel, topic, callback,
|
||||
self.consumer_num.next())
|
||||
self.consumers.append(consumer)
|
||||
return consumer
|
||||
|
||||
return self.ensure(_connect_error, _declare_consumer)
|
||||
|
||||
def iterconsume(self, limit=None, timeout=None):
|
||||
"""Return an iterator that will consume from all queues/consumers"""
|
||||
|
||||
info = {'do_consume': True}
|
||||
|
||||
def _error_callback(exc):
|
||||
if isinstance(exc, socket.timeout):
|
||||
LOG.exception(_('Timed out waiting for RPC response: %s') %
|
||||
str(exc))
|
||||
raise rpc_common.Timeout()
|
||||
else:
|
||||
LOG.exception(_('Failed to consume message from queue: %s') %
|
||||
str(exc))
|
||||
info['do_consume'] = True
|
||||
|
||||
def _consume():
|
||||
if info['do_consume']:
|
||||
queues_head = self.consumers[:-1]
|
||||
queues_tail = self.consumers[-1]
|
||||
for queue in queues_head:
|
||||
queue.consume(nowait=True)
|
||||
queues_tail.consume(nowait=False)
|
||||
info['do_consume'] = False
|
||||
return self.connection.drain_events(timeout=timeout)
|
||||
|
||||
for iteration in itertools.count(0):
|
||||
if limit and iteration >= limit:
|
||||
raise StopIteration
|
||||
yield self.ensure(_error_callback, _consume)
|
||||
|
||||
def cancel_consumer_thread(self):
|
||||
"""Cancel a consumer thread"""
|
||||
if self.consumer_thread is not None:
|
||||
self.consumer_thread.kill()
|
||||
try:
|
||||
self.consumer_thread.wait()
|
||||
except greenlet.GreenletExit:
|
||||
pass
|
||||
self.consumer_thread = None
|
||||
|
||||
def wait_on_proxy_callbacks(self):
|
||||
"""Wait for all proxy callback threads to exit."""
|
||||
for proxy_cb in self.proxy_callbacks:
|
||||
proxy_cb.wait()
|
||||
|
||||
def publisher_send(self, cls, topic, msg, **kwargs):
|
||||
"""Send to a publisher based on the publisher class"""
|
||||
|
||||
def _error_callback(exc):
|
||||
log_info = {'topic': topic, 'err_str': str(exc)}
|
||||
LOG.exception(_("Failed to publish message to topic "
|
||||
"'%(topic)s': %(err_str)s") % log_info)
|
||||
|
||||
def _publish():
|
||||
publisher = cls(self.conf, self.channel, topic, **kwargs)
|
||||
publisher.send(msg)
|
||||
|
||||
self.ensure(_error_callback, _publish)
|
||||
|
||||
def declare_direct_consumer(self, topic, callback):
|
||||
"""Create a 'direct' queue.
|
||||
In nova's use, this is generally a msg_id queue used for
|
||||
responses for call/multicall
|
||||
"""
|
||||
self.declare_consumer(DirectConsumer, topic, callback)
|
||||
|
||||
def declare_topic_consumer(self, topic, callback=None, queue_name=None,
|
||||
exchange_name=None):
|
||||
"""Create a 'topic' consumer."""
|
||||
self.declare_consumer(functools.partial(TopicConsumer,
|
||||
name=queue_name,
|
||||
exchange_name=exchange_name,
|
||||
),
|
||||
topic, callback)
|
||||
|
||||
def declare_fanout_consumer(self, topic, callback):
|
||||
"""Create a 'fanout' consumer"""
|
||||
self.declare_consumer(FanoutConsumer, topic, callback)
|
||||
|
||||
def direct_send(self, msg_id, msg):
|
||||
"""Send a 'direct' message"""
|
||||
self.publisher_send(DirectPublisher, msg_id, msg)
|
||||
|
||||
def topic_send(self, topic, msg):
|
||||
"""Send a 'topic' message"""
|
||||
self.publisher_send(TopicPublisher, topic, msg)
|
||||
|
||||
def fanout_send(self, topic, msg):
|
||||
"""Send a 'fanout' message"""
|
||||
self.publisher_send(FanoutPublisher, topic, msg)
|
||||
|
||||
def notify_send(self, topic, msg, **kwargs):
|
||||
"""Send a notify message on a topic"""
|
||||
self.publisher_send(NotifyPublisher, topic, msg, **kwargs)
|
||||
|
||||
def consume(self, limit=None):
|
||||
"""Consume from all queues/consumers"""
|
||||
it = self.iterconsume(limit=limit)
|
||||
while True:
|
||||
try:
|
||||
it.next()
|
||||
except StopIteration:
|
||||
return
|
||||
|
||||
def consume_in_thread(self):
|
||||
"""Consumer from all queues/consumers in a greenthread"""
|
||||
def _consumer_thread():
|
||||
try:
|
||||
self.consume()
|
||||
except greenlet.GreenletExit:
|
||||
return
|
||||
if self.consumer_thread is None:
|
||||
self.consumer_thread = eventlet.spawn(_consumer_thread)
|
||||
return self.consumer_thread
|
||||
|
||||
def create_consumer(self, topic, proxy, fanout=False):
|
||||
"""Create a consumer that calls a method in a proxy object"""
|
||||
proxy_cb = rpc_amqp.ProxyCallback(
|
||||
self.conf, proxy,
|
||||
rpc_amqp.get_connection_pool(self.conf, Connection))
|
||||
self.proxy_callbacks.append(proxy_cb)
|
||||
|
||||
if fanout:
|
||||
self.declare_fanout_consumer(topic, proxy_cb)
|
||||
else:
|
||||
self.declare_topic_consumer(topic, proxy_cb)
|
||||
|
||||
def create_worker(self, topic, proxy, pool_name):
|
||||
"""Create a worker that calls a method in a proxy object"""
|
||||
proxy_cb = rpc_amqp.ProxyCallback(
|
||||
self.conf, proxy,
|
||||
rpc_amqp.get_connection_pool(self.conf, Connection))
|
||||
self.proxy_callbacks.append(proxy_cb)
|
||||
self.declare_topic_consumer(topic, proxy_cb, pool_name)
|
||||
|
||||
|
||||
def create_connection(conf, new=True):
|
||||
"""Create a connection"""
|
||||
return rpc_amqp.create_connection(
|
||||
conf, new,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
||||
|
||||
def multicall(conf, context, topic, msg, timeout=None):
|
||||
"""Make a call that returns multiple times."""
|
||||
return rpc_amqp.multicall(
|
||||
conf, context, topic, msg, timeout,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
||||
|
||||
def call(conf, context, topic, msg, timeout=None):
|
||||
"""Sends a message on a topic and wait for a response."""
|
||||
return rpc_amqp.call(
|
||||
conf, context, topic, msg, timeout,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
||||
|
||||
def cast(conf, context, topic, msg):
|
||||
"""Sends a message on a topic without waiting for a response."""
|
||||
return rpc_amqp.cast(
|
||||
conf, context, topic, msg,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
||||
|
||||
def fanout_cast(conf, context, topic, msg):
|
||||
"""Sends a message on a fanout exchange without waiting for a response."""
|
||||
return rpc_amqp.fanout_cast(
|
||||
conf, context, topic, msg,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
||||
|
||||
def cast_to_server(conf, context, server_params, topic, msg):
|
||||
"""Sends a message on a topic to a specific server."""
|
||||
return rpc_amqp.cast_to_server(
|
||||
conf, context, server_params, topic, msg,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
||||
|
||||
def fanout_cast_to_server(conf, context, server_params, topic, msg):
|
||||
"""Sends a message on a fanout exchange to a specific server."""
|
||||
return rpc_amqp.fanout_cast_to_server(
|
||||
conf, context, server_params, topic, msg,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
||||
|
||||
def notify(conf, context, topic, msg, envelope):
|
||||
"""Sends a notification event on a topic."""
|
||||
return rpc_amqp.notify(
|
||||
conf, context, topic, msg,
|
||||
rpc_amqp.get_connection_pool(conf, Connection),
|
||||
envelope)
|
||||
|
||||
|
||||
def cleanup():
|
||||
return rpc_amqp.cleanup(Connection.pool)
|
|
@ -1,604 +0,0 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack LLC
|
||||
# Copyright 2011 - 2012, Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import functools
|
||||
import itertools
|
||||
import time
|
||||
import uuid
|
||||
|
||||
import eventlet
|
||||
import greenlet
|
||||
import qpid.messaging
|
||||
import qpid.messaging.exceptions
|
||||
|
||||
from kwapi.openstack.common import cfg
|
||||
from kwapi.openstack.common.gettextutils import _
|
||||
from kwapi.openstack.common import jsonutils
|
||||
from kwapi.openstack.common import log as logging
|
||||
from kwapi.openstack.common.rpc import amqp as rpc_amqp
|
||||
from kwapi.openstack.common.rpc import common as rpc_common
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
qpid_opts = [
|
||||
cfg.StrOpt('qpid_hostname',
|
||||
default='localhost',
|
||||
help='Qpid broker hostname'),
|
||||
cfg.StrOpt('qpid_port',
|
||||
default='5672',
|
||||
help='Qpid broker port'),
|
||||
cfg.ListOpt('qpid_hosts',
|
||||
default=['$qpid_hostname:$qpid_port'],
|
||||
help='Qpid HA cluster host:port pairs'),
|
||||
cfg.StrOpt('qpid_username',
|
||||
default='',
|
||||
help='Username for qpid connection'),
|
||||
cfg.StrOpt('qpid_password',
|
||||
default='',
|
||||
help='Password for qpid connection'),
|
||||
cfg.StrOpt('qpid_sasl_mechanisms',
|
||||
default='',
|
||||
help='Space separated list of SASL mechanisms to use for auth'),
|
||||
cfg.IntOpt('qpid_heartbeat',
|
||||
default=60,
|
||||
help='Seconds between connection keepalive heartbeats'),
|
||||
cfg.StrOpt('qpid_protocol',
|
||||
default='tcp',
|
||||
help="Transport to use, either 'tcp' or 'ssl'"),
|
||||
cfg.BoolOpt('qpid_tcp_nodelay',
|
||||
default=True,
|
||||
help='Disable Nagle algorithm'),
|
||||
]
|
||||
|
||||
cfg.CONF.register_opts(qpid_opts)
|
||||
|
||||
|
||||
class ConsumerBase(object):
|
||||
"""Consumer base class."""
|
||||
|
||||
def __init__(self, session, callback, node_name, node_opts,
|
||||
link_name, link_opts):
|
||||
"""Declare a queue on an amqp session.
|
||||
|
||||
'session' is the amqp session to use
|
||||
'callback' is the callback to call when messages are received
|
||||
'node_name' is the first part of the Qpid address string, before ';'
|
||||
'node_opts' will be applied to the "x-declare" section of "node"
|
||||
in the address string.
|
||||
'link_name' goes into the "name" field of the "link" in the address
|
||||
string
|
||||
'link_opts' will be applied to the "x-declare" section of "link"
|
||||
in the address string.
|
||||
"""
|
||||
self.callback = callback
|
||||
self.receiver = None
|
||||
self.session = None
|
||||
|
||||
addr_opts = {
|
||||
"create": "always",
|
||||
"node": {
|
||||
"type": "topic",
|
||||
"x-declare": {
|
||||
"durable": True,
|
||||
"auto-delete": True,
|
||||
},
|
||||
},
|
||||
"link": {
|
||||
"name": link_name,
|
||||
"durable": True,
|
||||
"x-declare": {
|
||||
"durable": False,
|
||||
"auto-delete": True,
|
||||
"exclusive": False,
|
||||
},
|
||||
},
|
||||
}
|
||||
addr_opts["node"]["x-declare"].update(node_opts)
|
||||
addr_opts["link"]["x-declare"].update(link_opts)
|
||||
|
||||
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
|
||||
|
||||
self.reconnect(session)
|
||||
|
||||
def reconnect(self, session):
|
||||
"""Re-declare the receiver after a qpid reconnect"""
|
||||
self.session = session
|
||||
self.receiver = session.receiver(self.address)
|
||||
self.receiver.capacity = 1
|
||||
|
||||
def consume(self):
|
||||
"""Fetch the message and pass it to the callback object"""
|
||||
message = self.receiver.fetch()
|
||||
try:
|
||||
msg = rpc_common.deserialize_msg(message.content)
|
||||
self.callback(msg)
|
||||
except Exception:
|
||||
LOG.exception(_("Failed to process message... skipping it."))
|
||||
finally:
|
||||
self.session.acknowledge(message)
|
||||
|
||||
def get_receiver(self):
|
||||
return self.receiver
|
||||
|
||||
|
||||
class DirectConsumer(ConsumerBase):
|
||||
"""Queue/consumer class for 'direct'"""
|
||||
|
||||
def __init__(self, conf, session, msg_id, callback):
|
||||
"""Init a 'direct' queue.
|
||||
|
||||
'session' is the amqp session to use
|
||||
'msg_id' is the msg_id to listen on
|
||||
'callback' is the callback to call when messages are received
|
||||
"""
|
||||
|
||||
super(DirectConsumer, self).__init__(session, callback,
|
||||
"%s/%s" % (msg_id, msg_id),
|
||||
{"type": "direct"},
|
||||
msg_id,
|
||||
{"exclusive": True})
|
||||
|
||||
|
||||
class TopicConsumer(ConsumerBase):
|
||||
"""Consumer class for 'topic'"""
|
||||
|
||||
def __init__(self, conf, session, topic, callback, name=None,
|
||||
exchange_name=None):
|
||||
"""Init a 'topic' queue.
|
||||
|
||||
:param session: the amqp session to use
|
||||
:param topic: is the topic to listen on
|
||||
:paramtype topic: str
|
||||
:param callback: the callback to call when messages are received
|
||||
:param name: optional queue name, defaults to topic
|
||||
"""
|
||||
|
||||
exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf)
|
||||
super(TopicConsumer, self).__init__(session, callback,
|
||||
"%s/%s" % (exchange_name, topic),
|
||||
{}, name or topic, {})
|
||||
|
||||
|
||||
class FanoutConsumer(ConsumerBase):
|
||||
"""Consumer class for 'fanout'"""
|
||||
|
||||
def __init__(self, conf, session, topic, callback):
|
||||
"""Init a 'fanout' queue.
|
||||
|
||||
'session' is the amqp session to use
|
||||
'topic' is the topic to listen on
|
||||
'callback' is the callback to call when messages are received
|
||||
"""
|
||||
|
||||
super(FanoutConsumer, self).__init__(
|
||||
session, callback,
|
||||
"%s_fanout" % topic,
|
||||
{"durable": False, "type": "fanout"},
|
||||
"%s_fanout_%s" % (topic, uuid.uuid4().hex),
|
||||
{"exclusive": True})
|
||||
|
||||
|
||||
class Publisher(object):
|
||||
"""Base Publisher class"""
|
||||
|
||||
def __init__(self, session, node_name, node_opts=None):
|
||||
"""Init the Publisher class with the exchange_name, routing_key,
|
||||
and other options
|
||||
"""
|
||||
self.sender = None
|
||||
self.session = session
|
||||
|
||||
addr_opts = {
|
||||
"create": "always",
|
||||
"node": {
|
||||
"type": "topic",
|
||||
"x-declare": {
|
||||
"durable": False,
|
||||
# auto-delete isn't implemented for exchanges in qpid,
|
||||
# but put in here anyway
|
||||
"auto-delete": True,
|
||||
},
|
||||
},
|
||||
}
|
||||
if node_opts:
|
||||
addr_opts["node"]["x-declare"].update(node_opts)
|
||||
|
||||
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
|
||||
|
||||
self.reconnect(session)
|
||||
|
||||
def reconnect(self, session):
|
||||
"""Re-establish the Sender after a reconnection"""
|
||||
self.sender = session.sender(self.address)
|
||||
|
||||
def send(self, msg):
|
||||
"""Send a message"""
|
||||
self.sender.send(msg)
|
||||
|
||||
|
||||
class DirectPublisher(Publisher):
|
||||
"""Publisher class for 'direct'"""
|
||||
def __init__(self, conf, session, msg_id):
|
||||
"""Init a 'direct' publisher."""
|
||||
super(DirectPublisher, self).__init__(session, msg_id,
|
||||
{"type": "Direct"})
|
||||
|
||||
|
||||
class TopicPublisher(Publisher):
|
||||
"""Publisher class for 'topic'"""
|
||||
def __init__(self, conf, session, topic):
|
||||
"""init a 'topic' publisher.
|
||||
"""
|
||||
exchange_name = rpc_amqp.get_control_exchange(conf)
|
||||
super(TopicPublisher, self).__init__(session,
|
||||
"%s/%s" % (exchange_name, topic))
|
||||
|
||||
|
||||
class FanoutPublisher(Publisher):
|
||||
"""Publisher class for 'fanout'"""
|
||||
def __init__(self, conf, session, topic):
|
||||
"""init a 'fanout' publisher.
|
||||
"""
|
||||
super(FanoutPublisher, self).__init__(
|
||||
session,
|
||||
"%s_fanout" % topic, {"type": "fanout"})
|
||||
|
||||
|
||||
class NotifyPublisher(Publisher):
|
||||
"""Publisher class for notifications"""
|
||||
def __init__(self, conf, session, topic):
|
||||
"""init a 'topic' publisher.
|
||||
"""
|
||||
exchange_name = rpc_amqp.get_control_exchange(conf)
|
||||
super(NotifyPublisher, self).__init__(session,
|
||||
"%s/%s" % (exchange_name, topic),
|
||||
{"durable": True})
|
||||
|
||||
|
||||
class Connection(object):
|
||||
"""Connection object."""
|
||||
|
||||
pool = None
|
||||
|
||||
def __init__(self, conf, server_params=None):
|
||||
self.session = None
|
||||
self.consumers = {}
|
||||
self.consumer_thread = None
|
||||
self.proxy_callbacks = []
|
||||
self.conf = conf
|
||||
|
||||
if server_params and 'hostname' in server_params:
|
||||
# NOTE(russellb) This enables support for cast_to_server.
|
||||
server_params['qpid_hosts'] = [
|
||||
'%s:%d' % (server_params['hostname'],
|
||||
server_params.get('port', 5672))
|
||||
]
|
||||
|
||||
params = {
|
||||
'qpid_hosts': self.conf.qpid_hosts,
|
||||
'username': self.conf.qpid_username,
|
||||
'password': self.conf.qpid_password,
|
||||
}
|
||||
params.update(server_params or {})
|
||||
|
||||
self.brokers = params['qpid_hosts']
|
||||
self.username = params['username']
|
||||
self.password = params['password']
|
||||
self.connection_create(self.brokers[0])
|
||||
self.reconnect()
|
||||
|
||||
def connection_create(self, broker):
|
||||
# Create the connection - this does not open the connection
|
||||
self.connection = qpid.messaging.Connection(broker)
|
||||
|
||||
# Check if flags are set and if so set them for the connection
|
||||
# before we call open
|
||||
self.connection.username = self.username
|
||||
self.connection.password = self.password
|
||||
|
||||
self.connection.sasl_mechanisms = self.conf.qpid_sasl_mechanisms
|
||||
# Reconnection is done by self.reconnect()
|
||||
self.connection.reconnect = False
|
||||
self.connection.heartbeat = self.conf.qpid_heartbeat
|
||||
self.connection.protocol = self.conf.qpid_protocol
|
||||
self.connection.tcp_nodelay = self.conf.qpid_tcp_nodelay
|
||||
|
||||
def _register_consumer(self, consumer):
|
||||
self.consumers[str(consumer.get_receiver())] = consumer
|
||||
|
||||
def _lookup_consumer(self, receiver):
|
||||
return self.consumers[str(receiver)]
|
||||
|
||||
def reconnect(self):
|
||||
"""Handles reconnecting and re-establishing sessions and queues"""
|
||||
if self.connection.opened():
|
||||
try:
|
||||
self.connection.close()
|
||||
except qpid.messaging.exceptions.ConnectionError:
|
||||
pass
|
||||
|
||||
attempt = 0
|
||||
delay = 1
|
||||
while True:
|
||||
broker = self.brokers[attempt % len(self.brokers)]
|
||||
attempt += 1
|
||||
|
||||
try:
|
||||
self.connection_create(broker)
|
||||
self.connection.open()
|
||||
except qpid.messaging.exceptions.ConnectionError, e:
|
||||
msg_dict = dict(e=e, delay=delay)
|
||||
msg = _("Unable to connect to AMQP server: %(e)s. "
|
||||
"Sleeping %(delay)s seconds") % msg_dict
|
||||
LOG.error(msg)
|
||||
time.sleep(delay)
|
||||
delay = min(2 * delay, 60)
|
||||
else:
|
||||
LOG.info(_('Connected to AMQP server on %s'), broker)
|
||||
break
|
||||
|
||||
self.session = self.connection.session()
|
||||
|
||||
if self.consumers:
|
||||
consumers = self.consumers
|
||||
self.consumers = {}
|
||||
|
||||
for consumer in consumers.itervalues():
|
||||
consumer.reconnect(self.session)
|
||||
self._register_consumer(consumer)
|
||||
|
||||
LOG.debug(_("Re-established AMQP queues"))
|
||||
|
||||
def ensure(self, error_callback, method, *args, **kwargs):
|
||||
while True:
|
||||
try:
|
||||
return method(*args, **kwargs)
|
||||
except (qpid.messaging.exceptions.Empty,
|
||||
qpid.messaging.exceptions.ConnectionError), e:
|
||||
if error_callback:
|
||||
error_callback(e)
|
||||
self.reconnect()
|
||||
|
||||
def close(self):
|
||||
"""Close/release this connection"""
|
||||
self.cancel_consumer_thread()
|
||||
self.wait_on_proxy_callbacks()
|
||||
self.connection.close()
|
||||
self.connection = None
|
||||
|
||||
def reset(self):
|
||||
"""Reset a connection so it can be used again"""
|
||||
self.cancel_consumer_thread()
|
||||
self.wait_on_proxy_callbacks()
|
||||
self.session.close()
|
||||
self.session = self.connection.session()
|
||||
self.consumers = {}
|
||||
|
||||
def declare_consumer(self, consumer_cls, topic, callback):
|
||||
"""Create a Consumer using the class that was passed in and
|
||||
add it to our list of consumers
|
||||
"""
|
||||
def _connect_error(exc):
|
||||
log_info = {'topic': topic, 'err_str': str(exc)}
|
||||
LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
|
||||
"%(err_str)s") % log_info)
|
||||
|
||||
def _declare_consumer():
|
||||
consumer = consumer_cls(self.conf, self.session, topic, callback)
|
||||
self._register_consumer(consumer)
|
||||
return consumer
|
||||
|
||||
return self.ensure(_connect_error, _declare_consumer)
|
||||
|
||||
def iterconsume(self, limit=None, timeout=None):
|
||||
"""Return an iterator that will consume from all queues/consumers"""
|
||||
|
||||
def _error_callback(exc):
|
||||
if isinstance(exc, qpid.messaging.exceptions.Empty):
|
||||
LOG.exception(_('Timed out waiting for RPC response: %s') %
|
||||
str(exc))
|
||||
raise rpc_common.Timeout()
|
||||
else:
|
||||
LOG.exception(_('Failed to consume message from queue: %s') %
|
||||
str(exc))
|
||||
|
||||
def _consume():
|
||||
nxt_receiver = self.session.next_receiver(timeout=timeout)
|
||||
try:
|
||||
self._lookup_consumer(nxt_receiver).consume()
|
||||
except Exception:
|
||||
LOG.exception(_("Error processing message. Skipping it."))
|
||||
|
||||
for iteration in itertools.count(0):
|
||||
if limit and iteration >= limit:
|
||||
raise StopIteration
|
||||
yield self.ensure(_error_callback, _consume)
|
||||
|
||||
def cancel_consumer_thread(self):
|
||||
"""Cancel a consumer thread"""
|
||||
if self.consumer_thread is not None:
|
||||
self.consumer_thread.kill()
|
||||
try:
|
||||
self.consumer_thread.wait()
|
||||
except greenlet.GreenletExit:
|
||||
pass
|
||||
self.consumer_thread = None
|
||||
|
||||
def wait_on_proxy_callbacks(self):
|
||||
"""Wait for all proxy callback threads to exit."""
|
||||
for proxy_cb in self.proxy_callbacks:
|
||||
proxy_cb.wait()
|
||||
|
||||
def publisher_send(self, cls, topic, msg):
|
||||
"""Send to a publisher based on the publisher class"""
|
||||
|
||||
def _connect_error(exc):
|
||||
log_info = {'topic': topic, 'err_str': str(exc)}
|
||||
LOG.exception(_("Failed to publish message to topic "
|
||||
"'%(topic)s': %(err_str)s") % log_info)
|
||||
|
||||
def _publisher_send():
|
||||
publisher = cls(self.conf, self.session, topic)
|
||||
publisher.send(msg)
|
||||
|
||||
return self.ensure(_connect_error, _publisher_send)
|
||||
|
||||
def declare_direct_consumer(self, topic, callback):
|
||||
"""Create a 'direct' queue.
|
||||
In nova's use, this is generally a msg_id queue used for
|
||||
responses for call/multicall
|
||||
"""
|
||||
self.declare_consumer(DirectConsumer, topic, callback)
|
||||
|
||||
def declare_topic_consumer(self, topic, callback=None, queue_name=None,
|
||||
exchange_name=None):
|
||||
"""Create a 'topic' consumer."""
|
||||
self.declare_consumer(functools.partial(TopicConsumer,
|
||||
name=queue_name,
|
||||
exchange_name=exchange_name,
|
||||
),
|
||||
topic, callback)
|
||||
|
||||
def declare_fanout_consumer(self, topic, callback):
|
||||
"""Create a 'fanout' consumer"""
|
||||
self.declare_consumer(FanoutConsumer, topic, callback)
|
||||
|
||||
def direct_send(self, msg_id, msg):
|
||||
"""Send a 'direct' message"""
|
||||
self.publisher_send(DirectPublisher, msg_id, msg)
|
||||
|
||||
def topic_send(self, topic, msg):
|
||||
"""Send a 'topic' message"""
|
||||
self.publisher_send(TopicPublisher, topic, msg)
|
||||
|
||||
def fanout_send(self, topic, msg):
|
||||
"""Send a 'fanout' message"""
|
||||
self.publisher_send(FanoutPublisher, topic, msg)
|
||||
|
||||
def notify_send(self, topic, msg, **kwargs):
|
||||
"""Send a notify message on a topic"""
|
||||
self.publisher_send(NotifyPublisher, topic, msg)
|
||||
|
||||
def consume(self, limit=None):
|
||||
"""Consume from all queues/consumers"""
|
||||
it = self.iterconsume(limit=limit)
|
||||
while True:
|
||||
try:
|
||||
it.next()
|
||||
except StopIteration:
|
||||
return
|
||||
|
||||
def consume_in_thread(self):
|
||||
"""Consumer from all queues/consumers in a greenthread"""
|
||||
def _consumer_thread():
|
||||
try:
|
||||
self.consume()
|
||||
except greenlet.GreenletExit:
|
||||
return
|
||||
if self.consumer_thread is None:
|
||||
self.consumer_thread = eventlet.spawn(_consumer_thread)
|
||||
return self.consumer_thread
|
||||
|
||||
def create_consumer(self, topic, proxy, fanout=False):
|
||||
"""Create a consumer that calls a method in a proxy object"""
|
||||
proxy_cb = rpc_amqp.ProxyCallback(
|
||||
self.conf, proxy,
|
||||
rpc_amqp.get_connection_pool(self.conf, Connection))
|
||||
self.proxy_callbacks.append(proxy_cb)
|
||||
|
||||
if fanout:
|
||||
consumer = FanoutConsumer(self.conf, self.session, topic, proxy_cb)
|
||||
else:
|
||||
consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb)
|
||||
|
||||
self._register_consumer(consumer)
|
||||
|
||||
return consumer
|
||||
|
||||
def create_worker(self, topic, proxy, pool_name):
|
||||
"""Create a worker that calls a method in a proxy object"""
|
||||
proxy_cb = rpc_amqp.ProxyCallback(
|
||||
self.conf, proxy,
|
||||
rpc_amqp.get_connection_pool(self.conf, Connection))
|
||||
self.proxy_callbacks.append(proxy_cb)
|
||||
|
||||
consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb,
|
||||
name=pool_name)
|
||||
|
||||
self._register_consumer(consumer)
|
||||
|
||||
return consumer
|
||||
|
||||
|
||||
def create_connection(conf, new=True):
|
||||
"""Create a connection"""
|
||||
return rpc_amqp.create_connection(
|
||||
conf, new,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
||||
|
||||
def multicall(conf, context, topic, msg, timeout=None):
|
||||
"""Make a call that returns multiple times."""
|
||||
return rpc_amqp.multicall(
|
||||
conf, context, topic, msg, timeout,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
||||
|
||||
def call(conf, context, topic, msg, timeout=None):
|
||||
"""Sends a message on a topic and wait for a response."""
|
||||
return rpc_amqp.call(
|
||||
conf, context, topic, msg, timeout,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
||||
|
||||
def cast(conf, context, topic, msg):
|
||||
"""Sends a message on a topic without waiting for a response."""
|
||||
return rpc_amqp.cast(
|
||||
conf, context, topic, msg,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
||||
|
||||
def fanout_cast(conf, context, topic, msg):
|
||||
"""Sends a message on a fanout exchange without waiting for a response."""
|
||||
return rpc_amqp.fanout_cast(
|
||||
conf, context, topic, msg,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
||||
|
||||
def cast_to_server(conf, context, server_params, topic, msg):
|
||||
"""Sends a message on a topic to a specific server."""
|
||||
return rpc_amqp.cast_to_server(
|
||||
conf, context, server_params, topic, msg,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
||||
|
||||
def fanout_cast_to_server(conf, context, server_params, topic, msg):
|
||||
"""Sends a message on a fanout exchange to a specific server."""
|
||||
return rpc_amqp.fanout_cast_to_server(
|
||||
conf, context, server_params, topic, msg,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
||||
|
||||
def notify(conf, context, topic, msg, envelope):
|
||||
"""Sends a notification event on a topic."""
|
||||
return rpc_amqp.notify(conf, context, topic, msg,
|
||||
rpc_amqp.get_connection_pool(conf, Connection),
|
||||
envelope)
|
||||
|
||||
|
||||
def cleanup():
|
||||
return rpc_amqp.cleanup(Connection.pool)
|
|
@ -1,732 +0,0 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 Cloudscaling Group, Inc
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import pprint
|
||||
import socket
|
||||
import string
|
||||
import sys
|
||||
import types
|
||||
import uuid
|
||||
|
||||
import eventlet
|
||||
from eventlet.green import zmq
|
||||
import greenlet
|
||||
|
||||
from kwapi.openstack.common import cfg
|
||||
from kwapi.openstack.common.gettextutils import _
|
||||
from kwapi.openstack.common import importutils
|
||||
from kwapi.openstack.common import jsonutils
|
||||
from kwapi.openstack.common.rpc import common as rpc_common
|
||||
|
||||
|
||||
# for convenience, are not modified.
|
||||
pformat = pprint.pformat
|
||||
Timeout = eventlet.timeout.Timeout
|
||||
LOG = rpc_common.LOG
|
||||
RemoteError = rpc_common.RemoteError
|
||||
RPCException = rpc_common.RPCException
|
||||
|
||||
zmq_opts = [
|
||||
cfg.StrOpt('rpc_zmq_bind_address', default='*',
|
||||
help='ZeroMQ bind address. Should be a wildcard (*), '
|
||||
'an ethernet interface, or IP. '
|
||||
'The "host" option should point or resolve to this '
|
||||
'address.'),
|
||||
|
||||
# The module.Class to use for matchmaking.
|
||||
cfg.StrOpt(
|
||||
'rpc_zmq_matchmaker',
|
||||
default=('openstack.common.rpc.'
|
||||
'matchmaker.MatchMakerLocalhost'),
|
||||
help='MatchMaker driver',
|
||||
),
|
||||
|
||||
# The following port is unassigned by IANA as of 2012-05-21
|
||||
cfg.IntOpt('rpc_zmq_port', default=9501,
|
||||
help='ZeroMQ receiver listening port'),
|
||||
|
||||
cfg.IntOpt('rpc_zmq_contexts', default=1,
|
||||
help='Number of ZeroMQ contexts, defaults to 1'),
|
||||
|
||||
cfg.StrOpt('rpc_zmq_ipc_dir', default='/var/run/openstack',
|
||||
help='Directory for holding IPC sockets'),
|
||||
|
||||
cfg.StrOpt('rpc_zmq_host', default=socket.gethostname(),
|
||||
help='Name of this node. Must be a valid hostname, FQDN, or '
|
||||
'IP address. Must match "host" option, if running Nova.')
|
||||
]
|
||||
|
||||
|
||||
# These globals are defined in register_opts(conf),
|
||||
# a mandatory initialization call
|
||||
CONF = None
|
||||
ZMQ_CTX = None # ZeroMQ Context, must be global.
|
||||
matchmaker = None # memoized matchmaker object
|
||||
|
||||
|
||||
def _serialize(data):
|
||||
"""
|
||||
Serialization wrapper
|
||||
We prefer using JSON, but it cannot encode all types.
|
||||
Error if a developer passes us bad data.
|
||||
"""
|
||||
try:
|
||||
return str(jsonutils.dumps(data, ensure_ascii=True))
|
||||
except TypeError:
|
||||
LOG.error(_("JSON serialization failed."))
|
||||
raise
|
||||
|
||||
|
||||
def _deserialize(data):
|
||||
"""
|
||||
Deserialization wrapper
|
||||
"""
|
||||
LOG.debug(_("Deserializing: %s"), data)
|
||||
return jsonutils.loads(data)
|
||||
|
||||
|
||||
class ZmqSocket(object):
|
||||
"""
|
||||
A tiny wrapper around ZeroMQ to simplify the send/recv protocol
|
||||
and connection management.
|
||||
|
||||
Can be used as a Context (supports the 'with' statement).
|
||||
"""
|
||||
|
||||
def __init__(self, addr, zmq_type, bind=True, subscribe=None):
|
||||
self.sock = ZMQ_CTX.socket(zmq_type)
|
||||
self.addr = addr
|
||||
self.type = zmq_type
|
||||
self.subscriptions = []
|
||||
|
||||
# Support failures on sending/receiving on wrong socket type.
|
||||
self.can_recv = zmq_type in (zmq.PULL, zmq.SUB)
|
||||
self.can_send = zmq_type in (zmq.PUSH, zmq.PUB)
|
||||
self.can_sub = zmq_type in (zmq.SUB, )
|
||||
|
||||
# Support list, str, & None for subscribe arg (cast to list)
|
||||
do_sub = {
|
||||
list: subscribe,
|
||||
str: [subscribe],
|
||||
type(None): []
|
||||
}[type(subscribe)]
|
||||
|
||||
for f in do_sub:
|
||||
self.subscribe(f)
|
||||
|
||||
str_data = {'addr': addr, 'type': self.socket_s(),
|
||||
'subscribe': subscribe, 'bind': bind}
|
||||
|
||||
LOG.debug(_("Connecting to %(addr)s with %(type)s"), str_data)
|
||||
LOG.debug(_("-> Subscribed to %(subscribe)s"), str_data)
|
||||
LOG.debug(_("-> bind: %(bind)s"), str_data)
|
||||
|
||||
try:
|
||||
if bind:
|
||||
self.sock.bind(addr)
|
||||
else:
|
||||
self.sock.connect(addr)
|
||||
except Exception:
|
||||
raise RPCException(_("Could not open socket."))
|
||||
|
||||
def socket_s(self):
|
||||
"""Get socket type as string."""
|
||||
t_enum = ('PUSH', 'PULL', 'PUB', 'SUB', 'REP', 'REQ', 'ROUTER',
|
||||
'DEALER')
|
||||
return dict(map(lambda t: (getattr(zmq, t), t), t_enum))[self.type]
|
||||
|
||||
def subscribe(self, msg_filter):
|
||||
"""Subscribe."""
|
||||
if not self.can_sub:
|
||||
raise RPCException("Cannot subscribe on this socket.")
|
||||
LOG.debug(_("Subscribing to %s"), msg_filter)
|
||||
|
||||
try:
|
||||
self.sock.setsockopt(zmq.SUBSCRIBE, msg_filter)
|
||||
except Exception:
|
||||
return
|
||||
|
||||
self.subscriptions.append(msg_filter)
|
||||
|
||||
def unsubscribe(self, msg_filter):
|
||||
"""Unsubscribe."""
|
||||
if msg_filter not in self.subscriptions:
|
||||
return
|
||||
self.sock.setsockopt(zmq.UNSUBSCRIBE, msg_filter)
|
||||
self.subscriptions.remove(msg_filter)
|
||||
|
||||
def close(self):
|
||||
if self.sock is None or self.sock.closed:
|
||||
return
|
||||
|
||||
# We must unsubscribe, or we'll leak descriptors.
|
||||
if len(self.subscriptions) > 0:
|
||||
for f in self.subscriptions:
|
||||
try:
|
||||
self.sock.setsockopt(zmq.UNSUBSCRIBE, f)
|
||||
except Exception:
|
||||
pass
|
||||
self.subscriptions = []
|
||||
|
||||
# Linger -1 prevents lost/dropped messages
|
||||
try:
|
||||
self.sock.close(linger=-1)
|
||||
except Exception:
|
||||
pass
|
||||
self.sock = None
|
||||
|
||||
def recv(self):
|
||||
if not self.can_recv:
|
||||
raise RPCException(_("You cannot recv on this socket."))
|
||||
return self.sock.recv_multipart()
|
||||
|
||||
def send(self, data):
|
||||
if not self.can_send:
|
||||
raise RPCException(_("You cannot send on this socket."))
|
||||
self.sock.send_multipart(data)
|
||||
|
||||
|
||||
class ZmqClient(object):
|
||||
"""Client for ZMQ sockets."""
|
||||
|
||||
def __init__(self, addr, socket_type=zmq.PUSH, bind=False):
|
||||
self.outq = ZmqSocket(addr, socket_type, bind=bind)
|
||||
|
||||
def cast(self, msg_id, topic, data, serialize=True, force_envelope=False):
|
||||
if serialize:
|
||||
data = rpc_common.serialize_msg(data, force_envelope)
|
||||
self.outq.send([str(msg_id), str(topic), str('cast'),
|
||||
_serialize(data)])
|
||||
|
||||
def close(self):
|
||||
self.outq.close()
|
||||
|
||||
|
||||
class RpcContext(rpc_common.CommonRpcContext):
|
||||
"""Context that supports replying to a rpc.call."""
|
||||
def __init__(self, **kwargs):
|
||||
self.replies = []
|
||||
super(RpcContext, self).__init__(**kwargs)
|
||||
|
||||
def deepcopy(self):
|
||||
values = self.to_dict()
|
||||
values['replies'] = self.replies
|
||||
return self.__class__(**values)
|
||||
|
||||
def reply(self, reply=None, failure=None, ending=False):
|
||||
if ending:
|
||||
return
|
||||
self.replies.append(reply)
|
||||
|
||||
@classmethod
|
||||
def marshal(self, ctx):
|
||||
ctx_data = ctx.to_dict()
|
||||
return _serialize(ctx_data)
|
||||
|
||||
@classmethod
|
||||
def unmarshal(self, data):
|
||||
return RpcContext.from_dict(_deserialize(data))
|
||||
|
||||
|
||||
class InternalContext(object):
|
||||
"""Used by ConsumerBase as a private context for - methods."""
|
||||
|
||||
def __init__(self, proxy):
|
||||
self.proxy = proxy
|
||||
self.msg_waiter = None
|
||||
|
||||
def _get_response(self, ctx, proxy, topic, data):
|
||||
"""Process a curried message and cast the result to topic."""
|
||||
LOG.debug(_("Running func with context: %s"), ctx.to_dict())
|
||||
data.setdefault('version', None)
|
||||
data.setdefault('args', {})
|
||||
|
||||
try:
|
||||
result = proxy.dispatch(
|
||||
ctx, data['version'], data['method'], **data['args'])
|
||||
return ConsumerBase.normalize_reply(result, ctx.replies)
|
||||
except greenlet.GreenletExit:
|
||||
# ignore these since they are just from shutdowns
|
||||
pass
|
||||
except rpc_common.ClientException, e:
|
||||
LOG.debug(_("Expected exception during message handling (%s)") %
|
||||
e._exc_info[1])
|
||||
return {'exc':
|
||||
rpc_common.serialize_remote_exception(e._exc_info,
|
||||
log_failure=False)}
|
||||
except Exception:
|
||||
LOG.error(_("Exception during message handling"))
|
||||
return {'exc':
|
||||
rpc_common.serialize_remote_exception(sys.exc_info())}
|
||||
|
||||
def reply(self, ctx, proxy,
|
||||
msg_id=None, context=None, topic=None, msg=None):
|
||||
"""Reply to a casted call."""
|
||||
# Our real method is curried into msg['args']
|
||||
|
||||
child_ctx = RpcContext.unmarshal(msg[0])
|
||||
response = ConsumerBase.normalize_reply(
|
||||
self._get_response(child_ctx, proxy, topic, msg[1]),
|
||||
ctx.replies)
|
||||
|
||||
LOG.debug(_("Sending reply"))
|
||||
cast(CONF, ctx, topic, {
|
||||
'method': '-process_reply',
|
||||
'args': {
|
||||
'msg_id': msg_id,
|
||||
'response': response
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
class ConsumerBase(object):
|
||||
"""Base Consumer."""
|
||||
|
||||
def __init__(self):
|
||||
self.private_ctx = InternalContext(None)
|
||||
|
||||
@classmethod
|
||||
def normalize_reply(self, result, replies):
|
||||
#TODO(ewindisch): re-evaluate and document this method.
|
||||
if isinstance(result, types.GeneratorType):
|
||||
return list(result)
|
||||
elif replies:
|
||||
return replies
|
||||
else:
|
||||
return [result]
|
||||
|
||||
def process(self, style, target, proxy, ctx, data):
|
||||
# Method starting with - are
|
||||
# processed internally. (non-valid method name)
|
||||
method = data['method']
|
||||
|
||||
# Internal method
|
||||
# uses internal context for safety.
|
||||
if data['method'][0] == '-':
|
||||
# For reply / process_reply
|
||||
method = method[1:]
|
||||
if method == 'reply':
|
||||
self.private_ctx.reply(ctx, proxy, **data['args'])
|
||||
return
|
||||
|
||||
data.setdefault('version', None)
|
||||
data.setdefault('args', {})
|
||||
proxy.dispatch(ctx, data['version'],
|
||||
data['method'], **data['args'])
|
||||
|
||||
|
||||
class ZmqBaseReactor(ConsumerBase):
|
||||
"""
|
||||
A consumer class implementing a
|
||||
centralized casting broker (PULL-PUSH)
|
||||
for RoundRobin requests.
|
||||
"""
|
||||
|
||||
def __init__(self, conf):
|
||||
super(ZmqBaseReactor, self).__init__()
|
||||
|
||||
self.mapping = {}
|
||||
self.proxies = {}
|
||||
self.threads = []
|
||||
self.sockets = []
|
||||
self.subscribe = {}
|
||||
|
||||
self.pool = eventlet.greenpool.GreenPool(conf.rpc_thread_pool_size)
|
||||
|
||||
def register(self, proxy, in_addr, zmq_type_in, out_addr=None,
|
||||
zmq_type_out=None, in_bind=True, out_bind=True,
|
||||
subscribe=None):
|
||||
|
||||
LOG.info(_("Registering reactor"))
|
||||
|
||||
if zmq_type_in not in (zmq.PULL, zmq.SUB):
|
||||
raise RPCException("Bad input socktype")
|
||||
|
||||
# Items push in.
|
||||
inq = ZmqSocket(in_addr, zmq_type_in, bind=in_bind,
|
||||
subscribe=subscribe)
|
||||
|
||||
self.proxies[inq] = proxy
|
||||
self.sockets.append(inq)
|
||||
|
||||
LOG.info(_("In reactor registered"))
|
||||
|
||||
if not out_addr:
|
||||
return
|
||||
|
||||
if zmq_type_out not in (zmq.PUSH, zmq.PUB):
|
||||
raise RPCException("Bad output socktype")
|
||||
|
||||
# Items push out.
|
||||
outq = ZmqSocket(out_addr, zmq_type_out, bind=out_bind)
|
||||
|
||||
self.mapping[inq] = outq
|
||||
self.mapping[outq] = inq
|
||||
self.sockets.append(outq)
|
||||
|
||||
LOG.info(_("Out reactor registered"))
|
||||
|
||||
def consume_in_thread(self):
|
||||
def _consume(sock):
|
||||
LOG.info(_("Consuming socket"))
|
||||
while True:
|
||||
self.consume(sock)
|
||||
|
||||
for k in self.proxies.keys():
|
||||
self.threads.append(
|
||||
self.pool.spawn(_consume, k)
|
||||
)
|
||||
|
||||
def wait(self):
|
||||
for t in self.threads:
|
||||
t.wait()
|
||||
|
||||
def close(self):
|
||||
for s in self.sockets:
|
||||
s.close()
|
||||
|
||||
for t in self.threads:
|
||||
t.kill()
|
||||
|
||||
|
||||
class ZmqProxy(ZmqBaseReactor):
|
||||
"""
|
||||
A consumer class implementing a
|
||||
topic-based proxy, forwarding to
|
||||
IPC sockets.
|
||||
"""
|
||||
|
||||
def __init__(self, conf):
|
||||
super(ZmqProxy, self).__init__(conf)
|
||||
|
||||
self.topic_proxy = {}
|
||||
ipc_dir = CONF.rpc_zmq_ipc_dir
|
||||
|
||||
self.topic_proxy['zmq_replies'] = \
|
||||
ZmqSocket("ipc://%s/zmq_topic_zmq_replies" % (ipc_dir, ),
|
||||
zmq.PUB, bind=True)
|
||||
self.sockets.append(self.topic_proxy['zmq_replies'])
|
||||
|
||||
def consume(self, sock):
|
||||
ipc_dir = CONF.rpc_zmq_ipc_dir
|
||||
|
||||
#TODO(ewindisch): use zero-copy (i.e. references, not copying)
|
||||
data = sock.recv()
|
||||
msg_id, topic, style, in_msg = data
|
||||
topic = topic.split('.', 1)[0]
|
||||
|
||||
LOG.debug(_("CONSUMER GOT %s"), ' '.join(map(pformat, data)))
|
||||
|
||||
# Handle zmq_replies magic
|
||||
if topic.startswith('fanout~'):
|
||||
sock_type = zmq.PUB
|
||||
elif topic.startswith('zmq_replies'):
|
||||
sock_type = zmq.PUB
|
||||
inside = rpc_common.deserialize_msg(_deserialize(in_msg))
|
||||
msg_id = inside[-1]['args']['msg_id']
|
||||
response = inside[-1]['args']['response']
|
||||
LOG.debug(_("->response->%s"), response)
|
||||
data = [str(msg_id), _serialize(response)]
|
||||
else:
|
||||
sock_type = zmq.PUSH
|
||||
|
||||
if not topic in self.topic_proxy:
|
||||
outq = ZmqSocket("ipc://%s/zmq_topic_%s" % (ipc_dir, topic),
|
||||
sock_type, bind=True)
|
||||
self.topic_proxy[topic] = outq
|
||||
self.sockets.append(outq)
|
||||
LOG.info(_("Created topic proxy: %s"), topic)
|
||||
|
||||
# It takes some time for a pub socket to open,
|
||||
# before we can have any faith in doing a send() to it.
|
||||
if sock_type == zmq.PUB:
|
||||
eventlet.sleep(.5)
|
||||
|
||||
LOG.debug(_("ROUTER RELAY-OUT START %(data)s") % {'data': data})
|
||||
self.topic_proxy[topic].send(data)
|
||||
LOG.debug(_("ROUTER RELAY-OUT SUCCEEDED %(data)s") % {'data': data})
|
||||
|
||||
|
||||
class ZmqReactor(ZmqBaseReactor):
|
||||
"""
|
||||
A consumer class implementing a
|
||||
consumer for messages. Can also be
|
||||
used as a 1:1 proxy
|
||||
"""
|
||||
|
||||
def __init__(self, conf):
|
||||
super(ZmqReactor, self).__init__(conf)
|
||||
|
||||
def consume(self, sock):
|
||||
#TODO(ewindisch): use zero-copy (i.e. references, not copying)
|
||||
data = sock.recv()
|
||||
LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data)
|
||||
if sock in self.mapping:
|
||||
LOG.debug(_("ROUTER RELAY-OUT %(data)s") % {
|
||||
'data': data})
|
||||
self.mapping[sock].send(data)
|
||||
return
|
||||
|
||||
msg_id, topic, style, in_msg = data
|
||||
|
||||
ctx, request = rpc_common.deserialize_msg(_deserialize(in_msg))
|
||||
ctx = RpcContext.unmarshal(ctx)
|
||||
|
||||
proxy = self.proxies[sock]
|
||||
|
||||
self.pool.spawn_n(self.process, style, topic,
|
||||
proxy, ctx, request)
|
||||
|
||||
|
||||
class Connection(rpc_common.Connection):
|
||||
"""Manages connections and threads."""
|
||||
|
||||
def __init__(self, conf):
|
||||
self.reactor = ZmqReactor(conf)
|
||||
|
||||
def create_consumer(self, topic, proxy, fanout=False):
|
||||
# Only consume on the base topic name.
|
||||
topic = topic.split('.', 1)[0]
|
||||
|
||||
LOG.info(_("Create Consumer for topic (%(topic)s)") %
|
||||
{'topic': topic})
|
||||
|
||||
# Subscription scenarios
|
||||
if fanout:
|
||||
subscribe = ('', fanout)[type(fanout) == str]
|
||||
sock_type = zmq.SUB
|
||||
topic = 'fanout~' + topic
|
||||
else:
|
||||
sock_type = zmq.PULL
|
||||
subscribe = None
|
||||
|
||||
# Receive messages from (local) proxy
|
||||
inaddr = "ipc://%s/zmq_topic_%s" % \
|
||||
(CONF.rpc_zmq_ipc_dir, topic)
|
||||
|
||||
LOG.debug(_("Consumer is a zmq.%s"),
|
||||
['PULL', 'SUB'][sock_type == zmq.SUB])
|
||||
|
||||
self.reactor.register(proxy, inaddr, sock_type,
|
||||
subscribe=subscribe, in_bind=False)
|
||||
|
||||
def close(self):
|
||||
self.reactor.close()
|
||||
|
||||
def wait(self):
|
||||
self.reactor.wait()
|
||||
|
||||
def consume_in_thread(self):
|
||||
self.reactor.consume_in_thread()
|
||||
|
||||
|
||||
def _cast(addr, context, msg_id, topic, msg, timeout=None, serialize=True,
|
||||
force_envelope=False):
|
||||
timeout_cast = timeout or CONF.rpc_cast_timeout
|
||||
payload = [RpcContext.marshal(context), msg]
|
||||
|
||||
with Timeout(timeout_cast, exception=rpc_common.Timeout):
|
||||
try:
|
||||
conn = ZmqClient(addr)
|
||||
|
||||
# assumes cast can't return an exception
|
||||
conn.cast(msg_id, topic, payload, serialize, force_envelope)
|
||||
except zmq.ZMQError:
|
||||
raise RPCException("Cast failed. ZMQ Socket Exception")
|
||||
finally:
|
||||
if 'conn' in vars():
|
||||
conn.close()
|
||||
|
||||
|
||||
def _call(addr, context, msg_id, topic, msg, timeout=None):
|
||||
# timeout_response is how long we wait for a response
|
||||
timeout = timeout or CONF.rpc_response_timeout
|
||||
|
||||
# The msg_id is used to track replies.
|
||||
msg_id = uuid.uuid4().hex
|
||||
|
||||
# Replies always come into the reply service.
|
||||
reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host
|
||||
|
||||
LOG.debug(_("Creating payload"))
|
||||
# Curry the original request into a reply method.
|
||||
mcontext = RpcContext.marshal(context)
|
||||
payload = {
|
||||
'method': '-reply',
|
||||
'args': {
|
||||
'msg_id': msg_id,
|
||||
'context': mcontext,
|
||||
'topic': reply_topic,
|
||||
'msg': [mcontext, msg]
|
||||
}
|
||||
}
|
||||
|
||||
LOG.debug(_("Creating queue socket for reply waiter"))
|
||||
|
||||
# Messages arriving async.
|
||||
# TODO(ewindisch): have reply consumer with dynamic subscription mgmt
|
||||
with Timeout(timeout, exception=rpc_common.Timeout):
|
||||
try:
|
||||
msg_waiter = ZmqSocket(
|
||||
"ipc://%s/zmq_topic_zmq_replies" % CONF.rpc_zmq_ipc_dir,
|
||||
zmq.SUB, subscribe=msg_id, bind=False
|
||||
)
|
||||
|
||||
LOG.debug(_("Sending cast"))
|
||||
_cast(addr, context, msg_id, topic, payload)
|
||||
|
||||
LOG.debug(_("Cast sent; Waiting reply"))
|
||||
# Blocks until receives reply
|
||||
msg = msg_waiter.recv()
|
||||
LOG.debug(_("Received message: %s"), msg)
|
||||
LOG.debug(_("Unpacking response"))
|
||||
responses = _deserialize(msg[-1])
|
||||
# ZMQError trumps the Timeout error.
|
||||
except zmq.ZMQError:
|
||||
raise RPCException("ZMQ Socket Error")
|
||||
finally:
|
||||
if 'msg_waiter' in vars():
|
||||
msg_waiter.close()
|
||||
|
||||
# It seems we don't need to do all of the following,
|
||||
# but perhaps it would be useful for multicall?
|
||||
# One effect of this is that we're checking all
|
||||
# responses for Exceptions.
|
||||
for resp in responses:
|
||||
if isinstance(resp, types.DictType) and 'exc' in resp:
|
||||
raise rpc_common.deserialize_remote_exception(CONF, resp['exc'])
|
||||
|
||||
return responses[-1]
|
||||
|
||||
|
||||
def _multi_send(method, context, topic, msg, timeout=None, serialize=True,
|
||||
force_envelope=False):
|
||||
"""
|
||||
Wraps the sending of messages,
|
||||
dispatches to the matchmaker and sends
|
||||
message to all relevant hosts.
|
||||
"""
|
||||
conf = CONF
|
||||
LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))})
|
||||
|
||||
queues = matchmaker.queues(topic)
|
||||
LOG.debug(_("Sending message(s) to: %s"), queues)
|
||||
|
||||
# Don't stack if we have no matchmaker results
|
||||
if len(queues) == 0:
|
||||
LOG.warn(_("No matchmaker results. Not casting."))
|
||||
# While not strictly a timeout, callers know how to handle
|
||||
# this exception and a timeout isn't too big a lie.
|
||||
raise rpc_common.Timeout, "No match from matchmaker."
|
||||
|
||||
# This supports brokerless fanout (addresses > 1)
|
||||
for queue in queues:
|
||||
(_topic, ip_addr) = queue
|
||||
_addr = "tcp://%s:%s" % (ip_addr, conf.rpc_zmq_port)
|
||||
|
||||
if method.__name__ == '_cast':
|
||||
eventlet.spawn_n(method, _addr, context,
|
||||
_topic, _topic, msg, timeout, serialize,
|
||||
force_envelope)
|
||||
return
|
||||
return method(_addr, context, _topic, _topic, msg, timeout)
|
||||
|
||||
|
||||
def create_connection(conf, new=True):
|
||||
return Connection(conf)
|
||||
|
||||
|
||||
def multicall(conf, *args, **kwargs):
|
||||
"""Multiple calls."""
|
||||
return _multi_send(_call, *args, **kwargs)
|
||||
|
||||
|
||||
def call(conf, *args, **kwargs):
|
||||
"""Send a message, expect a response."""
|
||||
data = _multi_send(_call, *args, **kwargs)
|
||||
return data[-1]
|
||||
|
||||
|
||||
def cast(conf, *args, **kwargs):
|
||||
"""Send a message expecting no reply."""
|
||||
_multi_send(_cast, *args, **kwargs)
|
||||
|
||||
|
||||
def fanout_cast(conf, context, topic, msg, **kwargs):
|
||||
"""Send a message to all listening and expect no reply."""
|
||||
# NOTE(ewindisch): fanout~ is used because it avoid splitting on .
|
||||
# and acts as a non-subtle hint to the matchmaker and ZmqProxy.
|
||||
_multi_send(_cast, context, 'fanout~' + str(topic), msg, **kwargs)
|
||||
|
||||
|
||||
def notify(conf, context, topic, msg, **kwargs):
|
||||
"""
|
||||
Send notification event.
|
||||
Notifications are sent to topic-priority.
|
||||
This differs from the AMQP drivers which send to topic.priority.
|
||||
"""
|
||||
# NOTE(ewindisch): dot-priority in rpc notifier does not
|
||||
# work with our assumptions.
|
||||
topic.replace('.', '-')
|
||||
kwargs['serialize'] = kwargs.pop('envelope')
|
||||
kwargs['force_envelope'] = True
|
||||
cast(conf, context, topic, msg, **kwargs)
|
||||
|
||||
|
||||
def cleanup():
|
||||
"""Clean up resources in use by implementation."""
|
||||
global ZMQ_CTX
|
||||
global matchmaker
|
||||
matchmaker = None
|
||||
ZMQ_CTX.term()
|
||||
ZMQ_CTX = None
|
||||
|
||||
|
||||
def register_opts(conf):
|
||||
"""Registration of options for this driver."""
|
||||
#NOTE(ewindisch): ZMQ_CTX and matchmaker
|
||||
# are initialized here as this is as good
|
||||
# an initialization method as any.
|
||||
|
||||
# We memoize through these globals
|
||||
global ZMQ_CTX
|
||||
global matchmaker
|
||||
global CONF
|
||||
|
||||
if not CONF:
|
||||
conf.register_opts(zmq_opts)
|
||||
CONF = conf
|
||||
# Don't re-set, if this method is called twice.
|
||||
if not ZMQ_CTX:
|
||||
ZMQ_CTX = zmq.Context(conf.rpc_zmq_contexts)
|
||||
if not matchmaker:
|
||||
# rpc_zmq_matchmaker should be set to a 'module.Class'
|
||||
mm_path = conf.rpc_zmq_matchmaker.split('.')
|
||||
mm_module = '.'.join(mm_path[:-1])
|
||||
mm_class = mm_path[-1]
|
||||
|
||||
# Only initialize a class.
|
||||
if mm_path[-1][0] not in string.ascii_uppercase:
|
||||
LOG.error(_("Matchmaker could not be loaded.\n"
|
||||
"rpc_zmq_matchmaker is not a class."))
|
||||
raise RPCException(_("Error loading Matchmaker."))
|
||||
|
||||
mm_impl = importutils.import_module(mm_module)
|
||||
mm_constructor = getattr(mm_impl, mm_class)
|
||||
matchmaker = mm_constructor()
|
||||
|
||||
|
||||
register_opts(cfg.CONF)
|
|
@ -1,258 +0,0 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 Cloudscaling Group, Inc
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
The MatchMaker classes should except a Topic or Fanout exchange key and
|
||||
return keys for direct exchanges, per (approximate) AMQP parlance.
|
||||
"""
|
||||
|
||||
import contextlib
|
||||
import itertools
|
||||
import json
|
||||
|
||||
from kwapi.openstack.common import cfg
|
||||
from kwapi.openstack.common.gettextutils import _
|
||||
from kwapi.openstack.common import log as logging
|
||||
|
||||
|
||||
matchmaker_opts = [
|
||||
# Matchmaker ring file
|
||||
cfg.StrOpt('matchmaker_ringfile',
|
||||
default='/etc/nova/matchmaker_ring.json',
|
||||
help='Matchmaker ring file (JSON)'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(matchmaker_opts)
|
||||
LOG = logging.getLogger(__name__)
|
||||
contextmanager = contextlib.contextmanager
|
||||
|
||||
|
||||
class MatchMakerException(Exception):
|
||||
"""Signified a match could not be found."""
|
||||
message = _("Match not found by MatchMaker.")
|
||||
|
||||
|
||||
class Exchange(object):
|
||||
"""
|
||||
Implements lookups.
|
||||
Subclass this to support hashtables, dns, etc.
|
||||
"""
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def run(self, key):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class Binding(object):
|
||||
"""
|
||||
A binding on which to perform a lookup.
|
||||
"""
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def test(self, key):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class MatchMakerBase(object):
|
||||
"""Match Maker Base Class."""
|
||||
|
||||
def __init__(self):
|
||||
# Array of tuples. Index [2] toggles negation, [3] is last-if-true
|
||||
self.bindings = []
|
||||
|
||||
def add_binding(self, binding, rule, last=True):
|
||||
self.bindings.append((binding, rule, False, last))
|
||||
|
||||
#NOTE(ewindisch): kept the following method in case we implement the
|
||||
# underlying support.
|
||||
#def add_negate_binding(self, binding, rule, last=True):
|
||||
# self.bindings.append((binding, rule, True, last))
|
||||
|
||||
def queues(self, key):
|
||||
workers = []
|
||||
|
||||
# bit is for negate bindings - if we choose to implement it.
|
||||
# last stops processing rules if this matches.
|
||||
for (binding, exchange, bit, last) in self.bindings:
|
||||
if binding.test(key):
|
||||
workers.extend(exchange.run(key))
|
||||
|
||||
# Support last.
|
||||
if last:
|
||||
return workers
|
||||
return workers
|
||||
|
||||
|
||||
class DirectBinding(Binding):
|
||||
"""
|
||||
Specifies a host in the key via a '.' character
|
||||
Although dots are used in the key, the behavior here is
|
||||
that it maps directly to a host, thus direct.
|
||||
"""
|
||||
def test(self, key):
|
||||
if '.' in key:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class TopicBinding(Binding):
|
||||
"""
|
||||
Where a 'bare' key without dots.
|
||||
AMQP generally considers topic exchanges to be those *with* dots,
|
||||
but we deviate here in terminology as the behavior here matches
|
||||
that of a topic exchange (whereas where there are dots, behavior
|
||||
matches that of a direct exchange.
|
||||
"""
|
||||
def test(self, key):
|
||||
if '.' not in key:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class FanoutBinding(Binding):
|
||||
"""Match on fanout keys, where key starts with 'fanout.' string."""
|
||||
def test(self, key):
|
||||
if key.startswith('fanout~'):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class StubExchange(Exchange):
|
||||
"""Exchange that does nothing."""
|
||||
def run(self, key):
|
||||
return [(key, None)]
|
||||
|
||||
|
||||
class RingExchange(Exchange):
|
||||
"""
|
||||
Match Maker where hosts are loaded from a static file containing
|
||||
a hashmap (JSON formatted).
|
||||
|
||||
__init__ takes optional ring dictionary argument, otherwise
|
||||
loads the ringfile from CONF.mathcmaker_ringfile.
|
||||
"""
|
||||
def __init__(self, ring=None):
|
||||
super(RingExchange, self).__init__()
|
||||
|
||||
if ring:
|
||||
self.ring = ring
|
||||
else:
|
||||
fh = open(CONF.matchmaker_ringfile, 'r')
|
||||
self.ring = json.load(fh)
|
||||
fh.close()
|
||||
|
||||
self.ring0 = {}
|
||||
for k in self.ring.keys():
|
||||
self.ring0[k] = itertools.cycle(self.ring[k])
|
||||
|
||||
def _ring_has(self, key):
|
||||
if key in self.ring0:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class RoundRobinRingExchange(RingExchange):
|
||||
"""A Topic Exchange based on a hashmap."""
|
||||
def __init__(self, ring=None):
|
||||
super(RoundRobinRingExchange, self).__init__(ring)
|
||||
|
||||
def run(self, key):
|
||||
if not self._ring_has(key):
|
||||
LOG.warn(
|
||||
_("No key defining hosts for topic '%s', "
|
||||
"see ringfile") % (key, )
|
||||
)
|
||||
return []
|
||||
host = next(self.ring0[key])
|
||||
return [(key + '.' + host, host)]
|
||||
|
||||
|
||||
class FanoutRingExchange(RingExchange):
|
||||
"""Fanout Exchange based on a hashmap."""
|
||||
def __init__(self, ring=None):
|
||||
super(FanoutRingExchange, self).__init__(ring)
|
||||
|
||||
def run(self, key):
|
||||
# Assume starts with "fanout~", strip it for lookup.
|
||||
nkey = key.split('fanout~')[1:][0]
|
||||
if not self._ring_has(nkey):
|
||||
LOG.warn(
|
||||
_("No key defining hosts for topic '%s', "
|
||||
"see ringfile") % (nkey, )
|
||||
)
|
||||
return []
|
||||
return map(lambda x: (key + '.' + x, x), self.ring[nkey])
|
||||
|
||||
|
||||
class LocalhostExchange(Exchange):
|
||||
"""Exchange where all direct topics are local."""
|
||||
def __init__(self):
|
||||
super(Exchange, self).__init__()
|
||||
|
||||
def run(self, key):
|
||||
return [(key.split('.')[0] + '.localhost', 'localhost')]
|
||||
|
||||
|
||||
class DirectExchange(Exchange):
|
||||
"""
|
||||
Exchange where all topic keys are split, sending to second half.
|
||||
i.e. "compute.host" sends a message to "compute" running on "host"
|
||||
"""
|
||||
def __init__(self):
|
||||
super(Exchange, self).__init__()
|
||||
|
||||
def run(self, key):
|
||||
b, e = key.split('.', 1)
|
||||
return [(b, e)]
|
||||
|
||||
|
||||
class MatchMakerRing(MatchMakerBase):
|
||||
"""
|
||||
Match Maker where hosts are loaded from a static hashmap.
|
||||
"""
|
||||
def __init__(self, ring=None):
|
||||
super(MatchMakerRing, self).__init__()
|
||||
self.add_binding(FanoutBinding(), FanoutRingExchange(ring))
|
||||
self.add_binding(DirectBinding(), DirectExchange())
|
||||
self.add_binding(TopicBinding(), RoundRobinRingExchange(ring))
|
||||
|
||||
|
||||
class MatchMakerLocalhost(MatchMakerBase):
|
||||
"""
|
||||
Match Maker where all bare topics resolve to localhost.
|
||||
Useful for testing.
|
||||
"""
|
||||
def __init__(self):
|
||||
super(MatchMakerLocalhost, self).__init__()
|
||||
self.add_binding(FanoutBinding(), LocalhostExchange())
|
||||
self.add_binding(DirectBinding(), DirectExchange())
|
||||
self.add_binding(TopicBinding(), LocalhostExchange())
|
||||
|
||||
|
||||
class MatchMakerStub(MatchMakerBase):
|
||||
"""
|
||||
Match Maker where topics are untouched.
|
||||
Useful for testing, or for AMQP/brokered queues.
|
||||
Will not work where knowledge of hosts is known (i.e. zeromq)
|
||||
"""
|
||||
def __init__(self):
|
||||
super(MatchMakerLocalhost, self).__init__()
|
||||
|
||||
self.add_binding(FanoutBinding(), StubExchange())
|
||||
self.add_binding(DirectBinding(), StubExchange())
|
||||
self.add_binding(TopicBinding(), StubExchange())
|
|
@ -1,165 +0,0 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2012 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
A helper class for proxy objects to remote APIs.
|
||||
|
||||
For more information about rpc API version numbers, see:
|
||||
rpc/dispatcher.py
|
||||
"""
|
||||
|
||||
|
||||
from kwapi.openstack.common import rpc
|
||||
|
||||
|
||||
class RpcProxy(object):
|
||||
"""A helper class for rpc clients.
|
||||
|
||||
This class is a wrapper around the RPC client API. It allows you to
|
||||
specify the topic and API version in a single place. This is intended to
|
||||
be used as a base class for a class that implements the client side of an
|
||||
rpc API.
|
||||
"""
|
||||
|
||||
def __init__(self, topic, default_version):
|
||||
"""Initialize an RpcProxy.
|
||||
|
||||
:param topic: The topic to use for all messages.
|
||||
:param default_version: The default API version to request in all
|
||||
outgoing messages. This can be overridden on a per-message
|
||||
basis.
|
||||
"""
|
||||
self.topic = topic
|
||||
self.default_version = default_version
|
||||
super(RpcProxy, self).__init__()
|
||||
|
||||
def _set_version(self, msg, vers):
|
||||
"""Helper method to set the version in a message.
|
||||
|
||||
:param msg: The message having a version added to it.
|
||||
:param vers: The version number to add to the message.
|
||||
"""
|
||||
msg['version'] = vers if vers else self.default_version
|
||||
|
||||
def _get_topic(self, topic):
|
||||
"""Return the topic to use for a message."""
|
||||
return topic if topic else self.topic
|
||||
|
||||
@staticmethod
|
||||
def make_msg(method, **kwargs):
|
||||
return {'method': method, 'args': kwargs}
|
||||
|
||||
def call(self, context, msg, topic=None, version=None, timeout=None):
|
||||
"""rpc.call() a remote method.
|
||||
|
||||
:param context: The request context
|
||||
:param msg: The message to send, including the method and args.
|
||||
:param topic: Override the topic for this message.
|
||||
:param timeout: (Optional) A timeout to use when waiting for the
|
||||
response. If no timeout is specified, a default timeout will be
|
||||
used that is usually sufficient.
|
||||
:param version: (Optional) Override the requested API version in this
|
||||
message.
|
||||
|
||||
:returns: The return value from the remote method.
|
||||
"""
|
||||
self._set_version(msg, version)
|
||||
return rpc.call(context, self._get_topic(topic), msg, timeout)
|
||||
|
||||
def multicall(self, context, msg, topic=None, version=None, timeout=None):
|
||||
"""rpc.multicall() a remote method.
|
||||
|
||||
:param context: The request context
|
||||
:param msg: The message to send, including the method and args.
|
||||
:param topic: Override the topic for this message.
|
||||
:param timeout: (Optional) A timeout to use when waiting for the
|
||||
response. If no timeout is specified, a default timeout will be
|
||||
used that is usually sufficient.
|
||||
:param version: (Optional) Override the requested API version in this
|
||||
message.
|
||||
|
||||
:returns: An iterator that lets you process each of the returned values
|
||||
from the remote method as they arrive.
|
||||
"""
|
||||
self._set_version(msg, version)
|
||||
return rpc.multicall(context, self._get_topic(topic), msg, timeout)
|
||||
|
||||
def cast(self, context, msg, topic=None, version=None):
|
||||
"""rpc.cast() a remote method.
|
||||
|
||||
:param context: The request context
|
||||
:param msg: The message to send, including the method and args.
|
||||
:param topic: Override the topic for this message.
|
||||
:param version: (Optional) Override the requested API version in this
|
||||
message.
|
||||
|
||||
:returns: None. rpc.cast() does not wait on any return value from the
|
||||
remote method.
|
||||
"""
|
||||
self._set_version(msg, version)
|
||||
rpc.cast(context, self._get_topic(topic), msg)
|
||||
|
||||
def fanout_cast(self, context, msg, topic=None, version=None):
|
||||
"""rpc.fanout_cast() a remote method.
|
||||
|
||||
:param context: The request context
|
||||
:param msg: The message to send, including the method and args.
|
||||
:param topic: Override the topic for this message.
|
||||
:param version: (Optional) Override the requested API version in this
|
||||
message.
|
||||
|
||||
:returns: None. rpc.fanout_cast() does not wait on any return value
|
||||
from the remote method.
|
||||
"""
|
||||
self._set_version(msg, version)
|
||||
rpc.fanout_cast(context, self._get_topic(topic), msg)
|
||||
|
||||
def cast_to_server(self, context, server_params, msg, topic=None,
|
||||
version=None):
|
||||
"""rpc.cast_to_server() a remote method.
|
||||
|
||||
:param context: The request context
|
||||
:param server_params: Server parameters. See rpc.cast_to_server() for
|
||||
details.
|
||||
:param msg: The message to send, including the method and args.
|
||||
:param topic: Override the topic for this message.
|
||||
:param version: (Optional) Override the requested API version in this
|
||||
message.
|
||||
|
||||
:returns: None. rpc.cast_to_server() does not wait on any
|
||||
return values.
|
||||
"""
|
||||
self._set_version(msg, version)
|
||||
rpc.cast_to_server(context, server_params, self._get_topic(topic), msg)
|
||||
|
||||
def fanout_cast_to_server(self, context, server_params, msg, topic=None,
|
||||
version=None):
|
||||
"""rpc.fanout_cast_to_server() a remote method.
|
||||
|
||||
:param context: The request context
|
||||
:param server_params: Server parameters. See rpc.cast_to_server() for
|
||||
details.
|
||||
:param msg: The message to send, including the method and args.
|
||||
:param topic: Override the topic for this message.
|
||||
:param version: (Optional) Override the requested API version in this
|
||||
message.
|
||||
|
||||
:returns: None. rpc.fanout_cast_to_server() does not wait on any
|
||||
return values.
|
||||
"""
|
||||
self._set_version(msg, version)
|
||||
rpc.fanout_cast_to_server(context, server_params,
|
||||
self._get_topic(topic), msg)
|
|
@ -1,75 +0,0 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
# Copyright 2011 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from kwapi.openstack.common.gettextutils import _
|
||||
from kwapi.openstack.common import log as logging
|
||||
from kwapi.openstack.common import rpc
|
||||
from kwapi.openstack.common.rpc import dispatcher as rpc_dispatcher
|
||||
from kwapi.openstack.common import service
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Service(service.Service):
|
||||
"""Service object for binaries running on hosts.
|
||||
|
||||
A service enables rpc by listening to queues based on topic and host."""
|
||||
def __init__(self, host, topic, manager=None):
|
||||
super(Service, self).__init__()
|
||||
self.host = host
|
||||
self.topic = topic
|
||||
if manager is None:
|
||||
self.manager = self
|
||||
else:
|
||||
self.manager = manager
|
||||
|
||||
def start(self):
|
||||
super(Service, self).start()
|
||||
|
||||
self.conn = rpc.create_connection(new=True)
|
||||
LOG.debug(_("Creating Consumer connection for Service %s") %
|
||||
self.topic)
|
||||
|
||||
dispatcher = rpc_dispatcher.RpcDispatcher([self.manager])
|
||||
|
||||
# Share this same connection for these Consumers
|
||||
self.conn.create_consumer(self.topic, dispatcher, fanout=False)
|
||||
|
||||
node_topic = '%s.%s' % (self.topic, self.host)
|
||||
self.conn.create_consumer(node_topic, dispatcher, fanout=False)
|
||||
|
||||
self.conn.create_consumer(self.topic, dispatcher, fanout=True)
|
||||
|
||||
# Hook to allow the manager to do other initializations after
|
||||
# the rpc connection is created.
|
||||
if callable(getattr(self.manager, 'initialize_service_hook', None)):
|
||||
self.manager.initialize_service_hook(self)
|
||||
|
||||
# Consume from all consumers in a thread
|
||||
self.conn.consume_in_thread()
|
||||
|
||||
def stop(self):
|
||||
# Try to shut the connection down, but if we get any sort of
|
||||
# errors, go ahead and ignore them.. as we're shutting down anyway
|
||||
try:
|
||||
self.conn.close()
|
||||
except Exception:
|
||||
pass
|
||||
super(Service, self).stop()
|
|
@ -1,71 +0,0 @@
|
|||
# Copyright (c) 2011-2012 OpenStack, LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Filter support
|
||||
"""
|
||||
|
||||
import inspect
|
||||
|
||||
from stevedore import extension
|
||||
|
||||
|
||||
class BaseFilter(object):
|
||||
"""Base class for all filter classes."""
|
||||
def _filter_one(self, obj, filter_properties):
|
||||
"""Return True if it passes the filter, False otherwise.
|
||||
Override this in a subclass.
|
||||
"""
|
||||
return True
|
||||
|
||||
def filter_all(self, filter_obj_list, filter_properties):
|
||||
"""Yield objects that pass the filter.
|
||||
|
||||
Can be overriden in a subclass, if you need to base filtering
|
||||
decisions on all objects. Otherwise, one can just override
|
||||
_filter_one() to filter a single object.
|
||||
"""
|
||||
for obj in filter_obj_list:
|
||||
if self._filter_one(obj, filter_properties):
|
||||
yield obj
|
||||
|
||||
|
||||
class BaseFilterHandler(object):
|
||||
""" Base class to handle loading filter classes.
|
||||
|
||||
This class should be subclassed where one needs to use filters.
|
||||
"""
|
||||
def __init__(self, filter_class_type, filter_namespace):
|
||||
self.namespace = filter_namespace
|
||||
self.filter_class_type = filter_class_type
|
||||
self.filter_manager = extension.ExtensionManager(filter_namespace)
|
||||
|
||||
def _is_correct_class(self, obj):
|
||||
"""Return whether an object is a class of the correct type and
|
||||
is not prefixed with an underscore.
|
||||
"""
|
||||
return (inspect.isclass(obj) and
|
||||
not obj.__name__.startswith('_') and
|
||||
issubclass(obj, self.filter_class_type))
|
||||
|
||||
def get_all_classes(self):
|
||||
return [x.plugin for x in self.filter_manager
|
||||
if self._is_correct_class(x.plugin)]
|
||||
|
||||
def get_filtered_objects(self, filter_classes, objs,
|
||||
filter_properties):
|
||||
for filter_cls in filter_classes:
|
||||
objs = filter_cls().filter_all(objs, filter_properties)
|
||||
return list(objs)
|
|
@ -1,41 +0,0 @@
|
|||
# Copyright (c) 2011 OpenStack, LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Scheduler host filters
|
||||
"""
|
||||
|
||||
from kwapi.openstack.common import log as logging
|
||||
from kwapi.openstack.common.scheduler import filter
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BaseHostFilter(filter.BaseFilter):
|
||||
"""Base class for host filters."""
|
||||
def _filter_one(self, obj, filter_properties):
|
||||
"""Return True if the object passes the filter, otherwise False."""
|
||||
return self.host_passes(obj, filter_properties)
|
||||
|
||||
def host_passes(self, host_state, filter_properties):
|
||||
"""Return True if the HostState passes the filter, otherwise False.
|
||||
Override this in a subclass.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class HostFilterHandler(filter.BaseFilterHandler):
|
||||
def __init__(self, namespace):
|
||||
super(HostFilterHandler, self).__init__(BaseHostFilter, namespace)
|
|
@ -1,30 +0,0 @@
|
|||
# Copyright (c) 2011-2012 OpenStack, LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
from kwapi.openstack.common.scheduler import filters
|
||||
|
||||
|
||||
class AvailabilityZoneFilter(filters.BaseHostFilter):
|
||||
"""Filters Hosts by availability zone."""
|
||||
|
||||
def host_passes(self, host_state, filter_properties):
|
||||
spec = filter_properties.get('request_spec', {})
|
||||
props = spec.get('resource_properties', [])
|
||||
availability_zone = props.get('availability_zone')
|
||||
|
||||
if availability_zone:
|
||||
return availability_zone == host_state.service['availability_zone']
|
||||
return True
|
|
@ -1,63 +0,0 @@
|
|||
# Copyright (c) 2011 OpenStack, LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from kwapi.openstack.common import log as logging
|
||||
from kwapi.openstack.common.scheduler import filters
|
||||
from kwapi.openstack.common.scheduler.filters import extra_specs_ops
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CapabilitiesFilter(filters.BaseHostFilter):
|
||||
"""HostFilter to work with resource (instance & volume) type records."""
|
||||
|
||||
def _satisfies_extra_specs(self, capabilities, resource_type):
|
||||
"""Check that the capabilities provided by the services
|
||||
satisfy the extra specs associated with the instance type"""
|
||||
extra_specs = resource_type.get('extra_specs', [])
|
||||
if not extra_specs:
|
||||
return True
|
||||
|
||||
for key, req in extra_specs.iteritems():
|
||||
# Either not scope format, or in capabilities scope
|
||||
scope = key.split(':')
|
||||
if len(scope) > 1 and scope[0] != "capabilities":
|
||||
continue
|
||||
elif scope[0] == "capabilities":
|
||||
del scope[0]
|
||||
|
||||
cap = capabilities
|
||||
for index in range(0, len(scope)):
|
||||
try:
|
||||
cap = cap.get(scope[index], None)
|
||||
except AttributeError:
|
||||
return False
|
||||
if cap is None:
|
||||
return False
|
||||
if not extra_specs_ops.match(cap, req):
|
||||
return False
|
||||
return True
|
||||
|
||||
def host_passes(self, host_state, filter_properties):
|
||||
"""Return a list of hosts that can create instance_type."""
|
||||
# Note(zhiteng) Currently only Cinder and Nova are using
|
||||
# this filter, so the resource type is either instance or
|
||||
# volume.
|
||||
resource_type = filter_properties.get('resource_type')
|
||||
if not self._satisfies_extra_specs(host_state.capabilities,
|
||||
resource_type):
|
||||
return False
|
||||
return True
|
|
@ -1,68 +0,0 @@
|
|||
# Copyright (c) 2011 OpenStack, LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import operator
|
||||
|
||||
# 1. The following operations are supported:
|
||||
# =, s==, s!=, s>=, s>, s<=, s<, <in>, <or>, ==, !=, >=, <=
|
||||
# 2. Note that <or> is handled in a different way below.
|
||||
# 3. If the first word in the extra_specs is not one of the operators,
|
||||
# it is ignored.
|
||||
_op_methods = {'=': lambda x, y: float(x) >= float(y),
|
||||
'<in>': lambda x, y: y in x,
|
||||
'==': lambda x, y: float(x) == float(y),
|
||||
'!=': lambda x, y: float(x) != float(y),
|
||||
'>=': lambda x, y: float(x) >= float(y),
|
||||
'<=': lambda x, y: float(x) <= float(y),
|
||||
's==': operator.eq,
|
||||
's!=': operator.ne,
|
||||
's<': operator.lt,
|
||||
's<=': operator.le,
|
||||
's>': operator.gt,
|
||||
's>=': operator.ge}
|
||||
|
||||
|
||||
def match(value, req):
|
||||
words = req.split()
|
||||
|
||||
op = method = None
|
||||
if words:
|
||||
op = words.pop(0)
|
||||
method = _op_methods.get(op)
|
||||
|
||||
if op != '<or>' and not method:
|
||||
return value == req
|
||||
|
||||
if value is None:
|
||||
return False
|
||||
|
||||
if op == '<or>': # Ex: <or> v1 <or> v2 <or> v3
|
||||
while True:
|
||||
if words.pop(0) == value:
|
||||
return True
|
||||
if not words:
|
||||
break
|
||||
op = words.pop(0) # remove a keyword <or>
|
||||
if not words:
|
||||
break
|
||||
return False
|
||||
|
||||
try:
|
||||
if words and method(value, words[0]):
|
||||
return True
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
return False
|
|
@ -1,150 +0,0 @@
|
|||
# Copyright (c) 2011 OpenStack, LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import operator
|
||||
|
||||
from kwapi.openstack.common import jsonutils
|
||||
from kwapi.openstack.common.scheduler import filters
|
||||
|
||||
|
||||
class JsonFilter(filters.BaseHostFilter):
|
||||
"""Host Filter to allow simple JSON-based grammar for
|
||||
selecting hosts.
|
||||
"""
|
||||
def _op_compare(self, args, op):
|
||||
"""Returns True if the specified operator can successfully
|
||||
compare the first item in the args with all the rest. Will
|
||||
return False if only one item is in the list.
|
||||
"""
|
||||
if len(args) < 2:
|
||||
return False
|
||||
if op is operator.contains:
|
||||
bad = not args[0] in args[1:]
|
||||
else:
|
||||
bad = [arg for arg in args[1:]
|
||||
if not op(args[0], arg)]
|
||||
return not bool(bad)
|
||||
|
||||
def _equals(self, args):
|
||||
"""First term is == all the other terms."""
|
||||
return self._op_compare(args, operator.eq)
|
||||
|
||||
def _less_than(self, args):
|
||||
"""First term is < all the other terms."""
|
||||
return self._op_compare(args, operator.lt)
|
||||
|
||||
def _greater_than(self, args):
|
||||
"""First term is > all the other terms."""
|
||||
return self._op_compare(args, operator.gt)
|
||||
|
||||
def _in(self, args):
|
||||
"""First term is in set of remaining terms"""
|
||||
return self._op_compare(args, operator.contains)
|
||||
|
||||
def _less_than_equal(self, args):
|
||||
"""First term is <= all the other terms."""
|
||||
return self._op_compare(args, operator.le)
|
||||
|
||||
def _greater_than_equal(self, args):
|
||||
"""First term is >= all the other terms."""
|
||||
return self._op_compare(args, operator.ge)
|
||||
|
||||
def _not(self, args):
|
||||
"""Flip each of the arguments."""
|
||||
return [not arg for arg in args]
|
||||
|
||||
def _or(self, args):
|
||||
"""True if any arg is True."""
|
||||
return any(args)
|
||||
|
||||
def _and(self, args):
|
||||
"""True if all args are True."""
|
||||
return all(args)
|
||||
|
||||
commands = {
|
||||
'=': _equals,
|
||||
'<': _less_than,
|
||||
'>': _greater_than,
|
||||
'in': _in,
|
||||
'<=': _less_than_equal,
|
||||
'>=': _greater_than_equal,
|
||||
'not': _not,
|
||||
'or': _or,
|
||||
'and': _and,
|
||||
}
|
||||
|
||||
def _parse_string(self, string, host_state):
|
||||
"""Strings prefixed with $ are capability lookups in the
|
||||
form '$variable' where 'variable' is an attribute in the
|
||||
HostState class. If $variable is a dictionary, you may
|
||||
use: $variable.dictkey
|
||||
"""
|
||||
if not string:
|
||||
return None
|
||||
if not string.startswith("$"):
|
||||
return string
|
||||
|
||||
path = string[1:].split(".")
|
||||
obj = getattr(host_state, path[0], None)
|
||||
if obj is None:
|
||||
return None
|
||||
for item in path[1:]:
|
||||
obj = obj.get(item, None)
|
||||
if obj is None:
|
||||
return None
|
||||
return obj
|
||||
|
||||
def _process_filter(self, query, host_state):
|
||||
"""Recursively parse the query structure."""
|
||||
if not query:
|
||||
return True
|
||||
cmd = query[0]
|
||||
method = self.commands[cmd]
|
||||
cooked_args = []
|
||||
for arg in query[1:]:
|
||||
if isinstance(arg, list):
|
||||
arg = self._process_filter(arg, host_state)
|
||||
elif isinstance(arg, basestring):
|
||||
arg = self._parse_string(arg, host_state)
|
||||
if arg is not None:
|
||||
cooked_args.append(arg)
|
||||
result = method(self, cooked_args)
|
||||
return result
|
||||
|
||||
def host_passes(self, host_state, filter_properties):
|
||||
"""Return a list of hosts that can fulfill the requirements
|
||||
specified in the query.
|
||||
"""
|
||||
# TODO(zhiteng) Add description for filter_properties structure
|
||||
# and scheduler_hints.
|
||||
try:
|
||||
query = filter_properties['scheduler_hints']['query']
|
||||
except KeyError:
|
||||
query = None
|
||||
if not query:
|
||||
return True
|
||||
|
||||
# NOTE(comstud): Not checking capabilities or service for
|
||||
# enabled/disabled so that a provided json filter can decide
|
||||
|
||||
result = self._process_filter(jsonutils.loads(query), host_state)
|
||||
if isinstance(result, list):
|
||||
# If any succeeded, include the host
|
||||
result = any(result)
|
||||
if result:
|
||||
# Filter it out.
|
||||
return True
|
||||
return False
|
|
@ -1,91 +0,0 @@
|
|||
# Copyright (c) 2011-2012 OpenStack, LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Pluggable Weighing support
|
||||
"""
|
||||
|
||||
import inspect
|
||||
|
||||
from stevedore import extension
|
||||
|
||||
|
||||
class WeighedObject(object):
|
||||
"""Object with weight information."""
|
||||
def __init__(self, obj, weight):
|
||||
self.obj = obj
|
||||
self.weight = weight
|
||||
|
||||
def __repr__(self):
|
||||
return "<WeighedObject '%s': %s>" % (self.obj, self.weight)
|
||||
|
||||
|
||||
class BaseWeigher(object):
|
||||
"""Base class for pluggable weighers."""
|
||||
def _weight_multiplier(self):
|
||||
"""How weighted this weigher should be. Normally this would
|
||||
be overriden in a subclass based on a config value.
|
||||
"""
|
||||
return 1.0
|
||||
|
||||
def _weigh_object(self, obj, weight_properties):
|
||||
"""Override in a subclass to specify a weight for a specific
|
||||
object.
|
||||
"""
|
||||
return 0.0
|
||||
|
||||
def weigh_objects(self, weighed_obj_list, weight_properties):
|
||||
"""Weigh multiple objects. Override in a subclass if you need
|
||||
need access to all objects in order to manipulate weights.
|
||||
"""
|
||||
constant = self._weight_multiplier()
|
||||
for obj in weighed_obj_list:
|
||||
obj.weight += (constant *
|
||||
self._weigh_object(obj.obj, weight_properties))
|
||||
|
||||
|
||||
class BaseWeightHandler(object):
|
||||
object_class = WeighedObject
|
||||
|
||||
def __init__(self, weighed_object_type, weight_namespace):
|
||||
self.namespace = weight_namespace
|
||||
self.weighed_object_type = weighed_object_type
|
||||
self.weight_manager = extension.ExtensionManager(weight_namespace)
|
||||
|
||||
def _is_correct_class(self, obj):
|
||||
"""Return whether an object is a class of the correct type and
|
||||
is not prefixed with an underscore.
|
||||
"""
|
||||
return (inspect.isclass(obj) and
|
||||
not obj.__name__.startswith('_') and
|
||||
issubclass(obj, self.weighed_object_type))
|
||||
|
||||
def get_all_classes(self):
|
||||
return [x.plugin for x in self.weight_manager
|
||||
if self._is_correct_class(x.plugin)]
|
||||
|
||||
def get_weighed_objects(self, weigher_classes, obj_list,
|
||||
weighing_properties):
|
||||
"""Return a sorted (highest score first) list of WeighedObjects."""
|
||||
|
||||
if not obj_list:
|
||||
return []
|
||||
|
||||
weighed_objs = [self.object_class(obj, 0.0) for obj in obj_list]
|
||||
for weigher_cls in weigher_classes:
|
||||
weigher = weigher_cls()
|
||||
weigher.weigh_objects(weighed_objs, weighing_properties)
|
||||
|
||||
return sorted(weighed_objs, key=lambda x: x.weight, reverse=True)
|
|
@ -1,45 +0,0 @@
|
|||
# Copyright (c) 2011 OpenStack, LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Scheduler host weights
|
||||
"""
|
||||
|
||||
|
||||
from kwapi.openstack.common.scheduler import weight
|
||||
|
||||
|
||||
class WeighedHost(weight.WeighedObject):
|
||||
def to_dict(self):
|
||||
return {
|
||||
'weight': self.weight,
|
||||
'host': self.obj.host,
|
||||
}
|
||||
|
||||
def __repr__(self):
|
||||
return ("WeighedHost [host: %s, weight: %s]" %
|
||||
(self.obj.host, self.weight))
|
||||
|
||||
|
||||
class BaseHostWeigher(weight.BaseWeigher):
|
||||
"""Base class for host weights."""
|
||||
pass
|
||||
|
||||
|
||||
class HostWeightHandler(weight.BaseWeightHandler):
|
||||
object_class = WeighedHost
|
||||
|
||||
def __init__(self, namespace):
|
||||
super(HostWeightHandler, self).__init__(BaseHostWeigher, namespace)
|
|
@ -1,332 +0,0 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# Copyright 2011 Justin Santa Barbara
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Generic Node base class for all workers that run on hosts."""
|
||||
|
||||
import errno
|
||||
import os
|
||||
import random
|
||||
import signal
|
||||
import sys
|
||||
import time
|
||||
|
||||
import eventlet
|
||||
import extras
|
||||
import logging as std_logging
|
||||
|
||||
from kwapi.openstack.common import cfg
|
||||
from kwapi.openstack.common import eventlet_backdoor
|
||||
from kwapi.openstack.common.gettextutils import _
|
||||
from kwapi.openstack.common import log as logging
|
||||
from kwapi.openstack.common import threadgroup
|
||||
|
||||
|
||||
rpc = extras.try_import('openstack.common.rpc')
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Launcher(object):
|
||||
"""Launch one or more services and wait for them to complete."""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the service launcher.
|
||||
|
||||
:returns: None
|
||||
|
||||
"""
|
||||
self._services = threadgroup.ThreadGroup('launcher')
|
||||
eventlet_backdoor.initialize_if_enabled()
|
||||
|
||||
@staticmethod
|
||||
def run_service(service):
|
||||
"""Start and wait for a service to finish.
|
||||
|
||||
:param service: service to run and wait for.
|
||||
:returns: None
|
||||
|
||||
"""
|
||||
service.start()
|
||||
service.wait()
|
||||
|
||||
def launch_service(self, service):
|
||||
"""Load and start the given service.
|
||||
|
||||
:param service: The service you would like to start.
|
||||
:returns: None
|
||||
|
||||
"""
|
||||
self._services.add_thread(self.run_service, service)
|
||||
|
||||
def stop(self):
|
||||
"""Stop all services which are currently running.
|
||||
|
||||
:returns: None
|
||||
|
||||
"""
|
||||
self._services.stop()
|
||||
|
||||
def wait(self):
|
||||
"""Waits until all services have been stopped, and then returns.
|
||||
|
||||
:returns: None
|
||||
|
||||
"""
|
||||
self._services.wait()
|
||||
|
||||
|
||||
class SignalExit(SystemExit):
|
||||
def __init__(self, signo, exccode=1):
|
||||
super(SignalExit, self).__init__(exccode)
|
||||
self.signo = signo
|
||||
|
||||
|
||||
class ServiceLauncher(Launcher):
|
||||
def _handle_signal(self, signo, frame):
|
||||
# Allow the process to be killed again and die from natural causes
|
||||
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
||||
signal.signal(signal.SIGINT, signal.SIG_DFL)
|
||||
|
||||
raise SignalExit(signo)
|
||||
|
||||
def wait(self):
|
||||
signal.signal(signal.SIGTERM, self._handle_signal)
|
||||
signal.signal(signal.SIGINT, self._handle_signal)
|
||||
|
||||
LOG.debug(_('Full set of CONF:'))
|
||||
CONF.log_opt_values(LOG, std_logging.DEBUG)
|
||||
|
||||
status = None
|
||||
try:
|
||||
super(ServiceLauncher, self).wait()
|
||||
except SignalExit as exc:
|
||||
signame = {signal.SIGTERM: 'SIGTERM',
|
||||
signal.SIGINT: 'SIGINT'}[exc.signo]
|
||||
LOG.info(_('Caught %s, exiting'), signame)
|
||||
status = exc.code
|
||||
except SystemExit as exc:
|
||||
status = exc.code
|
||||
finally:
|
||||
if rpc:
|
||||
rpc.cleanup()
|
||||
self.stop()
|
||||
return status
|
||||
|
||||
|
||||
class ServiceWrapper(object):
|
||||
def __init__(self, service, workers):
|
||||
self.service = service
|
||||
self.workers = workers
|
||||
self.children = set()
|
||||
self.forktimes = []
|
||||
|
||||
|
||||
class ProcessLauncher(object):
|
||||
def __init__(self):
|
||||
self.children = {}
|
||||
self.sigcaught = None
|
||||
self.running = True
|
||||
rfd, self.writepipe = os.pipe()
|
||||
self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r')
|
||||
|
||||
signal.signal(signal.SIGTERM, self._handle_signal)
|
||||
signal.signal(signal.SIGINT, self._handle_signal)
|
||||
|
||||
def _handle_signal(self, signo, frame):
|
||||
self.sigcaught = signo
|
||||
self.running = False
|
||||
|
||||
# Allow the process to be killed again and die from natural causes
|
||||
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
||||
signal.signal(signal.SIGINT, signal.SIG_DFL)
|
||||
|
||||
def _pipe_watcher(self):
|
||||
# This will block until the write end is closed when the parent
|
||||
# dies unexpectedly
|
||||
self.readpipe.read()
|
||||
|
||||
LOG.info(_('Parent process has died unexpectedly, exiting'))
|
||||
|
||||
sys.exit(1)
|
||||
|
||||
def _child_process(self, service):
|
||||
# Setup child signal handlers differently
|
||||
def _sigterm(*args):
|
||||
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
||||
raise SignalExit(signal.SIGTERM)
|
||||
|
||||
signal.signal(signal.SIGTERM, _sigterm)
|
||||
# Block SIGINT and let the parent send us a SIGTERM
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
|
||||
# Reopen the eventlet hub to make sure we don't share an epoll
|
||||
# fd with parent and/or siblings, which would be bad
|
||||
eventlet.hubs.use_hub()
|
||||
|
||||
# Close write to ensure only parent has it open
|
||||
os.close(self.writepipe)
|
||||
# Create greenthread to watch for parent to close pipe
|
||||
eventlet.spawn_n(self._pipe_watcher)
|
||||
|
||||
# Reseed random number generator
|
||||
random.seed()
|
||||
|
||||
launcher = Launcher()
|
||||
launcher.run_service(service)
|
||||
|
||||
def _start_child(self, wrap):
|
||||
if len(wrap.forktimes) > wrap.workers:
|
||||
# Limit ourselves to one process a second (over the period of
|
||||
# number of workers * 1 second). This will allow workers to
|
||||
# start up quickly but ensure we don't fork off children that
|
||||
# die instantly too quickly.
|
||||
if time.time() - wrap.forktimes[0] < wrap.workers:
|
||||
LOG.info(_('Forking too fast, sleeping'))
|
||||
time.sleep(1)
|
||||
|
||||
wrap.forktimes.pop(0)
|
||||
|
||||
wrap.forktimes.append(time.time())
|
||||
|
||||
pid = os.fork()
|
||||
if pid == 0:
|
||||
# NOTE(johannes): All exceptions are caught to ensure this
|
||||
# doesn't fallback into the loop spawning children. It would
|
||||
# be bad for a child to spawn more children.
|
||||
status = 0
|
||||
try:
|
||||
self._child_process(wrap.service)
|
||||
except SignalExit as exc:
|
||||
signame = {signal.SIGTERM: 'SIGTERM',
|
||||
signal.SIGINT: 'SIGINT'}[exc.signo]
|
||||
LOG.info(_('Caught %s, exiting'), signame)
|
||||
status = exc.code
|
||||
except SystemExit as exc:
|
||||
status = exc.code
|
||||
except BaseException:
|
||||
LOG.exception(_('Unhandled exception'))
|
||||
status = 2
|
||||
finally:
|
||||
wrap.service.stop()
|
||||
|
||||
os._exit(status)
|
||||
|
||||
LOG.info(_('Started child %d'), pid)
|
||||
|
||||
wrap.children.add(pid)
|
||||
self.children[pid] = wrap
|
||||
|
||||
return pid
|
||||
|
||||
def launch_service(self, service, workers=1):
|
||||
wrap = ServiceWrapper(service, workers)
|
||||
|
||||
LOG.info(_('Starting %d workers'), wrap.workers)
|
||||
while self.running and len(wrap.children) < wrap.workers:
|
||||
self._start_child(wrap)
|
||||
|
||||
def _wait_child(self):
|
||||
try:
|
||||
# Don't block if no child processes have exited
|
||||
pid, status = os.waitpid(0, os.WNOHANG)
|
||||
if not pid:
|
||||
return None
|
||||
except OSError as exc:
|
||||
if exc.errno not in (errno.EINTR, errno.ECHILD):
|
||||
raise
|
||||
return None
|
||||
|
||||
if os.WIFSIGNALED(status):
|
||||
sig = os.WTERMSIG(status)
|
||||
LOG.info(_('Child %(pid)d killed by signal %(sig)d'),
|
||||
dict(pid=pid, sig=sig))
|
||||
else:
|
||||
code = os.WEXITSTATUS(status)
|
||||
LOG.info(_('Child %(pid)s exited with status %(code)d'),
|
||||
dict(pid=pid, code=code))
|
||||
|
||||
if pid not in self.children:
|
||||
LOG.warning(_('pid %d not in child list'), pid)
|
||||
return None
|
||||
|
||||
wrap = self.children.pop(pid)
|
||||
wrap.children.remove(pid)
|
||||
return wrap
|
||||
|
||||
def wait(self):
|
||||
"""Loop waiting on children to die and respawning as necessary"""
|
||||
|
||||
LOG.debug(_('Full set of CONF:'))
|
||||
CONF.log_opt_values(LOG, std_logging.DEBUG)
|
||||
|
||||
while self.running:
|
||||
wrap = self._wait_child()
|
||||
if not wrap:
|
||||
# Yield to other threads if no children have exited
|
||||
# Sleep for a short time to avoid excessive CPU usage
|
||||
# (see bug #1095346)
|
||||
eventlet.greenthread.sleep(.01)
|
||||
continue
|
||||
|
||||
while self.running and len(wrap.children) < wrap.workers:
|
||||
self._start_child(wrap)
|
||||
|
||||
if self.sigcaught:
|
||||
signame = {signal.SIGTERM: 'SIGTERM',
|
||||
signal.SIGINT: 'SIGINT'}[self.sigcaught]
|
||||
LOG.info(_('Caught %s, stopping children'), signame)
|
||||
|
||||
for pid in self.children:
|
||||
try:
|
||||
os.kill(pid, signal.SIGTERM)
|
||||
except OSError as exc:
|
||||
if exc.errno != errno.ESRCH:
|
||||
raise
|
||||
|
||||
# Wait for children to die
|
||||
if self.children:
|
||||
LOG.info(_('Waiting on %d children to exit'), len(self.children))
|
||||
while self.children:
|
||||
self._wait_child()
|
||||
|
||||
|
||||
class Service(object):
|
||||
"""Service object for binaries running on hosts."""
|
||||
|
||||
def __init__(self, threads=1000):
|
||||
self.tg = threadgroup.ThreadGroup('service', threads)
|
||||
|
||||
def start(self):
|
||||
pass
|
||||
|
||||
def stop(self):
|
||||
self.tg.stop()
|
||||
|
||||
def wait(self):
|
||||
self.tg.wait()
|
||||
|
||||
|
||||
def launch(service, workers=None):
|
||||
if workers:
|
||||
launcher = ProcessLauncher()
|
||||
launcher.launch_service(service, workers=workers)
|
||||
else:
|
||||
launcher = ServiceLauncher()
|
||||
launcher.launch_service(service)
|
||||
return launcher
|
|
@ -1,374 +0,0 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Utilities with minimum-depends for use in setup.py
|
||||
"""
|
||||
|
||||
import datetime
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
from setuptools.command import sdist
|
||||
|
||||
|
||||
def parse_mailmap(mailmap='.mailmap'):
|
||||
mapping = {}
|
||||
if os.path.exists(mailmap):
|
||||
with open(mailmap, 'r') as fp:
|
||||
for l in fp:
|
||||
l = l.strip()
|
||||
if not l.startswith('#') and ' ' in l:
|
||||
canonical_email, alias = [x for x in l.split(' ')
|
||||
if x.startswith('<')]
|
||||
mapping[alias] = canonical_email
|
||||
return mapping
|
||||
|
||||
|
||||
def canonicalize_emails(changelog, mapping):
|
||||
"""Takes in a string and an email alias mapping and replaces all
|
||||
instances of the aliases in the string with their real email.
|
||||
"""
|
||||
for alias, email in mapping.iteritems():
|
||||
changelog = changelog.replace(alias, email)
|
||||
return changelog
|
||||
|
||||
|
||||
# Get requirements from the first file that exists
|
||||
def get_reqs_from_files(requirements_files):
|
||||
for requirements_file in requirements_files:
|
||||
if os.path.exists(requirements_file):
|
||||
with open(requirements_file, 'r') as fil:
|
||||
return fil.read().split('\n')
|
||||
return []
|
||||
|
||||
|
||||
def parse_requirements(requirements_files=['requirements.txt',
|
||||
'tools/pip-requires']):
|
||||
requirements = []
|
||||
for line in get_reqs_from_files(requirements_files):
|
||||
# For the requirements list, we need to inject only the portion
|
||||
# after egg= so that distutils knows the package it's looking for
|
||||
# such as:
|
||||
# -e git://github.com/openstack/nova/master#egg=nova
|
||||
if re.match(r'\s*-e\s+', line):
|
||||
requirements.append(re.sub(r'\s*-e\s+.*#egg=(.*)$', r'\1',
|
||||
line))
|
||||
# such as:
|
||||
# http://github.com/openstack/nova/zipball/master#egg=nova
|
||||
elif re.match(r'\s*https?:', line):
|
||||
requirements.append(re.sub(r'\s*https?:.*#egg=(.*)$', r'\1',
|
||||
line))
|
||||
# -f lines are for index locations, and don't get used here
|
||||
elif re.match(r'\s*-f\s+', line):
|
||||
pass
|
||||
# argparse is part of the standard library starting with 2.7
|
||||
# adding it to the requirements list screws distro installs
|
||||
elif line == 'argparse' and sys.version_info >= (2, 7):
|
||||
pass
|
||||
else:
|
||||
requirements.append(line)
|
||||
|
||||
return requirements
|
||||
|
||||
|
||||
def parse_dependency_links(requirements_files=['requirements.txt',
|
||||
'tools/pip-requires']):
|
||||
dependency_links = []
|
||||
# dependency_links inject alternate locations to find packages listed
|
||||
# in requirements
|
||||
for line in get_reqs_from_files(requirements_files):
|
||||
# skip comments and blank lines
|
||||
if re.match(r'(\s*#)|(\s*$)', line):
|
||||
continue
|
||||
# lines with -e or -f need the whole line, minus the flag
|
||||
if re.match(r'\s*-[ef]\s+', line):
|
||||
dependency_links.append(re.sub(r'\s*-[ef]\s+', '', line))
|
||||
# lines that are only urls can go in unmolested
|
||||
elif re.match(r'\s*https?:', line):
|
||||
dependency_links.append(line)
|
||||
return dependency_links
|
||||
|
||||
|
||||
def write_requirements():
|
||||
venv = os.environ.get('VIRTUAL_ENV', None)
|
||||
if venv is not None:
|
||||
with open("requirements.txt", "w") as req_file:
|
||||
output = subprocess.Popen(["pip", "-E", venv, "freeze", "-l"],
|
||||
stdout=subprocess.PIPE)
|
||||
requirements = output.communicate()[0].strip()
|
||||
req_file.write(requirements)
|
||||
|
||||
|
||||
def _run_shell_command(cmd):
|
||||
if os.name == 'nt':
|
||||
output = subprocess.Popen(["cmd.exe", "/C", cmd],
|
||||
stdout=subprocess.PIPE)
|
||||
else:
|
||||
output = subprocess.Popen(["/bin/sh", "-c", cmd],
|
||||
stdout=subprocess.PIPE)
|
||||
out = output.communicate()
|
||||
if len(out) == 0:
|
||||
return None
|
||||
if len(out[0].strip()) == 0:
|
||||
return None
|
||||
return out[0].strip()
|
||||
|
||||
|
||||
def _get_git_next_version_suffix(branch_name):
|
||||
datestamp = datetime.datetime.now().strftime('%Y%m%d')
|
||||
if branch_name == 'milestone-proposed':
|
||||
revno_prefix = "r"
|
||||
else:
|
||||
revno_prefix = ""
|
||||
_run_shell_command("git fetch origin +refs/meta/*:refs/remotes/meta/*")
|
||||
milestone_cmd = "git show meta/openstack/release:%s" % branch_name
|
||||
milestonever = _run_shell_command(milestone_cmd)
|
||||
if milestonever:
|
||||
first_half = "%s~%s" % (milestonever, datestamp)
|
||||
else:
|
||||
first_half = datestamp
|
||||
|
||||
post_version = _get_git_post_version()
|
||||
# post version should look like:
|
||||
# 0.1.1.4.gcc9e28a
|
||||
# where the bit after the last . is the short sha, and the bit between
|
||||
# the last and second to last is the revno count
|
||||
(revno, sha) = post_version.split(".")[-2:]
|
||||
second_half = "%s%s.%s" % (revno_prefix, revno, sha)
|
||||
return ".".join((first_half, second_half))
|
||||
|
||||
|
||||
def _get_git_current_tag():
|
||||
return _run_shell_command("git tag --contains HEAD")
|
||||
|
||||
|
||||
def _get_git_tag_info():
|
||||
return _run_shell_command("git describe --tags")
|
||||
|
||||
|
||||
def _get_git_post_version():
|
||||
current_tag = _get_git_current_tag()
|
||||
if current_tag is not None:
|
||||
return current_tag
|
||||
else:
|
||||
tag_info = _get_git_tag_info()
|
||||
if tag_info is None:
|
||||
base_version = "0.0"
|
||||
cmd = "git --no-pager log --oneline"
|
||||
out = _run_shell_command(cmd)
|
||||
revno = len(out.split("\n"))
|
||||
sha = _run_shell_command("git describe --always")
|
||||
else:
|
||||
tag_infos = tag_info.split("-")
|
||||
base_version = "-".join(tag_infos[:-2])
|
||||
(revno, sha) = tag_infos[-2:]
|
||||
return "%s.%s.%s" % (base_version, revno, sha)
|
||||
|
||||
|
||||
def write_git_changelog():
|
||||
"""Write a changelog based on the git changelog."""
|
||||
new_changelog = 'ChangeLog'
|
||||
if not os.getenv('SKIP_WRITE_GIT_CHANGELOG'):
|
||||
if os.path.isdir('.git'):
|
||||
git_log_cmd = 'git log --stat'
|
||||
changelog = _run_shell_command(git_log_cmd)
|
||||
mailmap = parse_mailmap()
|
||||
with open(new_changelog, "w") as changelog_file:
|
||||
changelog_file.write(canonicalize_emails(changelog, mailmap))
|
||||
else:
|
||||
open(new_changelog, 'w').close()
|
||||
|
||||
|
||||
def generate_authors():
|
||||
"""Create AUTHORS file using git commits."""
|
||||
jenkins_email = 'jenkins@review.(openstack|stackforge).org'
|
||||
old_authors = 'AUTHORS.in'
|
||||
new_authors = 'AUTHORS'
|
||||
if not os.getenv('SKIP_GENERATE_AUTHORS'):
|
||||
if os.path.isdir('.git'):
|
||||
# don't include jenkins email address in AUTHORS file
|
||||
git_log_cmd = ("git log --format='%aN <%aE>' | sort -u | "
|
||||
"egrep -v '" + jenkins_email + "'")
|
||||
changelog = _run_shell_command(git_log_cmd)
|
||||
mailmap = parse_mailmap()
|
||||
with open(new_authors, 'w') as new_authors_fh:
|
||||
new_authors_fh.write(canonicalize_emails(changelog, mailmap))
|
||||
if os.path.exists(old_authors):
|
||||
with open(old_authors, "r") as old_authors_fh:
|
||||
new_authors_fh.write('\n' + old_authors_fh.read())
|
||||
else:
|
||||
open(new_authors, 'w').close()
|
||||
|
||||
|
||||
_rst_template = """%(heading)s
|
||||
%(underline)s
|
||||
|
||||
.. automodule:: %(module)s
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
"""
|
||||
|
||||
|
||||
def read_versioninfo(project):
|
||||
"""Read the versioninfo file. If it doesn't exist, we're in a github
|
||||
zipball, and there's really no way to know what version we really
|
||||
are, but that should be ok, because the utility of that should be
|
||||
just about nil if this code path is in use in the first place."""
|
||||
versioninfo_path = os.path.join(project, 'versioninfo')
|
||||
if os.path.exists(versioninfo_path):
|
||||
with open(versioninfo_path, 'r') as vinfo:
|
||||
version = vinfo.read().strip()
|
||||
else:
|
||||
version = "0.0.0"
|
||||
return version
|
||||
|
||||
|
||||
def write_versioninfo(project, version):
|
||||
"""Write a simple file containing the version of the package."""
|
||||
with open(os.path.join(project, 'versioninfo'), 'w') as fil:
|
||||
fil.write("%s\n" % version)
|
||||
|
||||
|
||||
def get_cmdclass():
|
||||
"""Return dict of commands to run from setup.py."""
|
||||
|
||||
cmdclass = dict()
|
||||
|
||||
def _find_modules(arg, dirname, files):
|
||||
for filename in files:
|
||||
if filename.endswith('.py') and filename != '__init__.py':
|
||||
arg["%s.%s" % (dirname.replace('/', '.'),
|
||||
filename[:-3])] = True
|
||||
|
||||
class LocalSDist(sdist.sdist):
|
||||
"""Builds the ChangeLog and Authors files from VC first."""
|
||||
|
||||
def run(self):
|
||||
write_git_changelog()
|
||||
generate_authors()
|
||||
# sdist.sdist is an old style class, can't use super()
|
||||
sdist.sdist.run(self)
|
||||
|
||||
cmdclass['sdist'] = LocalSDist
|
||||
|
||||
# If Sphinx is installed on the box running setup.py,
|
||||
# enable setup.py to build the documentation, otherwise,
|
||||
# just ignore it
|
||||
try:
|
||||
from sphinx.setup_command import BuildDoc
|
||||
|
||||
class LocalBuildDoc(BuildDoc):
|
||||
|
||||
builders = ['html', 'man']
|
||||
|
||||
def generate_autoindex(self):
|
||||
print "**Autodocumenting from %s" % os.path.abspath(os.curdir)
|
||||
modules = {}
|
||||
option_dict = self.distribution.get_option_dict('build_sphinx')
|
||||
source_dir = os.path.join(option_dict['source_dir'][1], 'api')
|
||||
if not os.path.exists(source_dir):
|
||||
os.makedirs(source_dir)
|
||||
for pkg in self.distribution.packages:
|
||||
if '.' not in pkg:
|
||||
os.path.walk(pkg, _find_modules, modules)
|
||||
module_list = modules.keys()
|
||||
module_list.sort()
|
||||
autoindex_filename = os.path.join(source_dir, 'autoindex.rst')
|
||||
with open(autoindex_filename, 'w') as autoindex:
|
||||
autoindex.write(""".. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
""")
|
||||
for module in module_list:
|
||||
output_filename = os.path.join(source_dir,
|
||||
"%s.rst" % module)
|
||||
heading = "The :mod:`%s` Module" % module
|
||||
underline = "=" * len(heading)
|
||||
values = dict(module=module, heading=heading,
|
||||
underline=underline)
|
||||
|
||||
print "Generating %s" % output_filename
|
||||
with open(output_filename, 'w') as output_file:
|
||||
output_file.write(_rst_template % values)
|
||||
autoindex.write(" %s.rst\n" % module)
|
||||
|
||||
def run(self):
|
||||
if not os.getenv('SPHINX_DEBUG'):
|
||||
self.generate_autoindex()
|
||||
|
||||
for builder in self.builders:
|
||||
self.builder = builder
|
||||
self.finalize_options()
|
||||
self.project = self.distribution.get_name()
|
||||
self.version = self.distribution.get_version()
|
||||
self.release = self.distribution.get_version()
|
||||
BuildDoc.run(self)
|
||||
|
||||
class LocalBuildLatex(LocalBuildDoc):
|
||||
builders = ['latex']
|
||||
|
||||
cmdclass['build_sphinx'] = LocalBuildDoc
|
||||
cmdclass['build_sphinx_latex'] = LocalBuildLatex
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
return cmdclass
|
||||
|
||||
|
||||
def get_git_branchname():
|
||||
for branch in _run_shell_command("git branch --color=never").split("\n"):
|
||||
if branch.startswith('*'):
|
||||
_branch_name = branch.split()[1].strip()
|
||||
if _branch_name == "(no":
|
||||
_branch_name = "no-branch"
|
||||
return _branch_name
|
||||
|
||||
|
||||
def get_pre_version(projectname, base_version):
|
||||
"""Return a version which is leading up to a version that will
|
||||
be released in the future."""
|
||||
if os.path.isdir('.git'):
|
||||
current_tag = _get_git_current_tag()
|
||||
if current_tag is not None:
|
||||
version = current_tag
|
||||
else:
|
||||
branch_name = os.getenv('BRANCHNAME',
|
||||
os.getenv('GERRIT_REFNAME',
|
||||
get_git_branchname()))
|
||||
version_suffix = _get_git_next_version_suffix(branch_name)
|
||||
version = "%s~%s" % (base_version, version_suffix)
|
||||
write_versioninfo(projectname, version)
|
||||
return version
|
||||
else:
|
||||
version = read_versioninfo(projectname)
|
||||
return version
|
||||
|
||||
|
||||
def get_post_version(projectname):
|
||||
"""Return a version which is equal to the tag that's on the current
|
||||
revision if there is one, or tag plus number of additional revisions
|
||||
if the current revision has no tag."""
|
||||
|
||||
if os.path.isdir('.git'):
|
||||
version = _get_git_post_version()
|
||||
write_versioninfo(projectname, version)
|
||||
return version
|
||||
return read_versioninfo(projectname)
|
|
@ -1,59 +0,0 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
System-level utilities and helper functions.
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def int_from_bool_as_string(subject):
|
||||
"""
|
||||
Interpret a string as a boolean and return either 1 or 0.
|
||||
|
||||
Any string value in:
|
||||
|
||||
('True', 'true', 'On', 'on', '1')
|
||||
|
||||
is interpreted as a boolean True.
|
||||
|
||||
Useful for JSON-decoded stuff and config file parsing
|
||||
"""
|
||||
return bool_from_string(subject) and 1 or 0
|
||||
|
||||
|
||||
def bool_from_string(subject):
|
||||
"""
|
||||
Interpret a string as a boolean.
|
||||
|
||||
Any string value in:
|
||||
|
||||
('True', 'true', 'On', 'on', 'Yes', 'yes', '1')
|
||||
|
||||
is interpreted as a boolean True.
|
||||
|
||||
Useful for JSON-decoded stuff and config file parsing
|
||||
"""
|
||||
if isinstance(subject, bool):
|
||||
return subject
|
||||
if isinstance(subject, basestring):
|
||||
if subject.strip().lower() in ('true', 'on', 'yes', '1'):
|
||||
return True
|
||||
return False
|
|
@ -1,68 +0,0 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Utilities for unit tests."""
|
||||
|
||||
import functools
|
||||
import nose
|
||||
|
||||
|
||||
class skip_test(object):
|
||||
"""Decorator that skips a test."""
|
||||
# TODO(tr3buchet): remember forever what comstud did here
|
||||
def __init__(self, msg):
|
||||
self.message = msg
|
||||
|
||||
def __call__(self, func):
|
||||
@functools.wraps(func)
|
||||
def _skipper(*args, **kw):
|
||||
"""Wrapped skipper function."""
|
||||
raise nose.SkipTest(self.message)
|
||||
return _skipper
|
||||
|
||||
|
||||
class skip_if(object):
|
||||
"""Decorator that skips a test if condition is true."""
|
||||
def __init__(self, condition, msg):
|
||||
self.condition = condition
|
||||
self.message = msg
|
||||
|
||||
def __call__(self, func):
|
||||
@functools.wraps(func)
|
||||
def _skipper(*args, **kw):
|
||||
"""Wrapped skipper function."""
|
||||
if self.condition:
|
||||
raise nose.SkipTest(self.message)
|
||||
func(*args, **kw)
|
||||
return _skipper
|
||||
|
||||
|
||||
class skip_unless(object):
|
||||
"""Decorator that skips a test if condition is not true."""
|
||||
def __init__(self, condition, msg):
|
||||
self.condition = condition
|
||||
self.message = msg
|
||||
|
||||
def __call__(self, func):
|
||||
@functools.wraps(func)
|
||||
def _skipper(*args, **kw):
|
||||
"""Wrapped skipper function."""
|
||||
if not self.condition:
|
||||
raise nose.SkipTest(self.message)
|
||||
func(*args, **kw)
|
||||
return _skipper
|
|
@ -1,116 +0,0 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2012 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from eventlet import greenlet
|
||||
from eventlet import greenpool
|
||||
from eventlet import greenthread
|
||||
|
||||
from kwapi.openstack.common import log as logging
|
||||
from kwapi.openstack.common import loopingcall
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _thread_done(gt, *args, **kwargs):
|
||||
""" Callback function to be passed to GreenThread.link() when we spawn()
|
||||
Calls the :class:`ThreadGroup` to notify if.
|
||||
|
||||
"""
|
||||
kwargs['group'].thread_done(kwargs['thread'])
|
||||
|
||||
|
||||
class Thread(object):
|
||||
""" Wrapper around a greenthread, that holds a reference to the
|
||||
:class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when
|
||||
it has done so it can be removed from the threads list.
|
||||
"""
|
||||
def __init__(self, name, thread, group):
|
||||
self.name = name
|
||||
self.thread = thread
|
||||
self.thread.link(_thread_done, group=group, thread=self)
|
||||
|
||||
def stop(self):
|
||||
self.thread.kill()
|
||||
|
||||
def wait(self):
|
||||
return self.thread.wait()
|
||||
|
||||
|
||||
class ThreadGroup(object):
|
||||
""" The point of the ThreadGroup classis to:
|
||||
|
||||
* keep track of timers and greenthreads (making it easier to stop them
|
||||
when need be).
|
||||
* provide an easy API to add timers.
|
||||
"""
|
||||
def __init__(self, name, thread_pool_size=10):
|
||||
self.name = name
|
||||
self.pool = greenpool.GreenPool(thread_pool_size)
|
||||
self.threads = []
|
||||
self.timers = []
|
||||
|
||||
def add_timer(self, interval, callback, initial_delay=None,
|
||||
*args, **kwargs):
|
||||
pulse = loopingcall.LoopingCall(callback, *args, **kwargs)
|
||||
pulse.start(interval=interval,
|
||||
initial_delay=initial_delay)
|
||||
self.timers.append(pulse)
|
||||
|
||||
def add_thread(self, callback, *args, **kwargs):
|
||||
gt = self.pool.spawn(callback, *args, **kwargs)
|
||||
th = Thread(callback.__name__, gt, self)
|
||||
self.threads.append(th)
|
||||
|
||||
def thread_done(self, thread):
|
||||
self.threads.remove(thread)
|
||||
|
||||
def stop(self):
|
||||
current = greenthread.getcurrent()
|
||||
for x in self.threads:
|
||||
if x is current:
|
||||
# don't kill the current thread.
|
||||
continue
|
||||
try:
|
||||
x.stop()
|
||||
except Exception as ex:
|
||||
LOG.exception(ex)
|
||||
|
||||
for x in self.timers:
|
||||
try:
|
||||
x.stop()
|
||||
except Exception as ex:
|
||||
LOG.exception(ex)
|
||||
self.timers = []
|
||||
|
||||
def wait(self):
|
||||
for x in self.timers:
|
||||
try:
|
||||
x.wait()
|
||||
except greenlet.GreenletExit:
|
||||
pass
|
||||
except Exception as ex:
|
||||
LOG.exception(ex)
|
||||
current = greenthread.getcurrent()
|
||||
for x in self.threads:
|
||||
if x is current:
|
||||
continue
|
||||
try:
|
||||
x.wait()
|
||||
except greenlet.GreenletExit:
|
||||
pass
|
||||
except Exception as ex:
|
||||
LOG.exception(ex)
|
|
@ -98,6 +98,11 @@ def utcnow():
|
|||
return datetime.datetime.utcnow()
|
||||
|
||||
|
||||
def iso8601_from_timestamp(timestamp):
|
||||
"""Returns a iso8601 formated date from timestamp"""
|
||||
return isotime(datetime.datetime.utcfromtimestamp(timestamp))
|
||||
|
||||
|
||||
utcnow.override_time = None
|
||||
|
||||
|
||||
|
@ -162,3 +167,16 @@ def delta_seconds(before, after):
|
|||
except AttributeError:
|
||||
return ((delta.days * 24 * 3600) + delta.seconds +
|
||||
float(delta.microseconds) / (10 ** 6))
|
||||
|
||||
|
||||
def is_soon(dt, window):
|
||||
"""
|
||||
Determines if time is going to happen in the next window seconds.
|
||||
|
||||
:params dt: the time
|
||||
:params window: minimum seconds to remain to consider the time not soon
|
||||
|
||||
:return: True if expiration is within the given duration
|
||||
"""
|
||||
soon = (utcnow() + datetime.timedelta(seconds=window))
|
||||
return normalize_time(dt) < soon
|
||||
|
|
|
@ -1,39 +0,0 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2012 Intel Corporation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
UUID related utilities and helper functions.
|
||||
"""
|
||||
|
||||
import uuid
|
||||
|
||||
|
||||
def generate_uuid():
|
||||
return str(uuid.uuid4())
|
||||
|
||||
|
||||
def is_uuid_like(val):
|
||||
"""Returns validation of a value as a UUID.
|
||||
|
||||
For our purposes, a UUID is a canonical form string:
|
||||
aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa
|
||||
|
||||
"""
|
||||
try:
|
||||
return str(uuid.UUID(val)) == val
|
||||
except (TypeError, ValueError, AttributeError):
|
||||
return False
|
|
@ -1,139 +0,0 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2012 OpenStack LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Utilities for consuming the auto-generated versioninfo files.
|
||||
"""
|
||||
|
||||
import datetime
|
||||
import pkg_resources
|
||||
|
||||
import setup
|
||||
|
||||
|
||||
class VersionInfo(object):
|
||||
|
||||
def __init__(self, package, python_package=None, pre_version=None):
|
||||
"""Object that understands versioning for a package
|
||||
:param package: name of the top level python namespace. For glance,
|
||||
this would be "glance" for python-glanceclient, it
|
||||
would be "glanceclient"
|
||||
:param python_package: optional name of the project name. For
|
||||
glance this can be left unset. For
|
||||
python-glanceclient, this would be
|
||||
"python-glanceclient"
|
||||
:param pre_version: optional version that the project is working to
|
||||
"""
|
||||
self.package = package
|
||||
if python_package is None:
|
||||
self.python_package = package
|
||||
else:
|
||||
self.python_package = python_package
|
||||
self.pre_version = pre_version
|
||||
self.version = None
|
||||
self._cached_version = None
|
||||
|
||||
def _generate_version(self):
|
||||
"""Defer to the openstack.common.setup routines for making a
|
||||
version from git."""
|
||||
if self.pre_version is None:
|
||||
return setup.get_post_version(self.package)
|
||||
else:
|
||||
return setup.get_pre_version(self.package, self.pre_version)
|
||||
|
||||
def _newer_version(self, pending_version):
|
||||
"""Check to see if we're working with a stale version or not.
|
||||
We expect a version string that either looks like:
|
||||
2012.2~f3~20120708.10.4426392
|
||||
which is an unreleased version of a pre-version, or:
|
||||
0.1.1.4.gcc9e28a
|
||||
which is an unreleased version of a post-version, or:
|
||||
0.1.1
|
||||
Which is a release and which should match tag.
|
||||
For now, if we have a date-embedded version, check to see if it's
|
||||
old, and if so re-generate. Otherwise, just deal with it.
|
||||
"""
|
||||
try:
|
||||
version_date = int(self.version.split("~")[-1].split('.')[0])
|
||||
if version_date < int(datetime.date.today().strftime('%Y%m%d')):
|
||||
return self._generate_version()
|
||||
else:
|
||||
return pending_version
|
||||
except Exception:
|
||||
return pending_version
|
||||
|
||||
def version_string_with_vcs(self, always=False):
|
||||
"""Return the full version of the package including suffixes indicating
|
||||
VCS status.
|
||||
|
||||
For instance, if we are working towards the 2012.2 release,
|
||||
canonical_version_string should return 2012.2 if this is a final
|
||||
release, or else something like 2012.2~f1~20120705.20 if it's not.
|
||||
|
||||
:param always: if true, skip all version caching
|
||||
"""
|
||||
if always:
|
||||
self.version = self._generate_version()
|
||||
|
||||
if self.version is None:
|
||||
|
||||
requirement = pkg_resources.Requirement.parse(self.python_package)
|
||||
versioninfo = "%s/versioninfo" % self.package
|
||||
try:
|
||||
raw_version = pkg_resources.resource_string(requirement,
|
||||
versioninfo)
|
||||
self.version = self._newer_version(raw_version.strip())
|
||||
except (IOError, pkg_resources.DistributionNotFound):
|
||||
self.version = self._generate_version()
|
||||
|
||||
return self.version
|
||||
|
||||
def canonical_version_string(self, always=False):
|
||||
"""Return the simple version of the package excluding any suffixes.
|
||||
|
||||
For instance, if we are working towards the 2012.2 release,
|
||||
canonical_version_string should return 2012.2 in all cases.
|
||||
|
||||
:param always: if true, skip all version caching
|
||||
"""
|
||||
return self.version_string_with_vcs(always).split('~')[0]
|
||||
|
||||
def version_string(self, always=False):
|
||||
"""Return the base version of the package.
|
||||
|
||||
For instance, if we are working towards the 2012.2 release,
|
||||
version_string should return 2012.2 if this is a final release, or
|
||||
2012.2-dev if it is not.
|
||||
|
||||
:param always: if true, skip all version caching
|
||||
"""
|
||||
version_parts = self.version_string_with_vcs(always).split('~')
|
||||
if len(version_parts) == 1:
|
||||
return version_parts[0]
|
||||
else:
|
||||
return '%s-dev' % (version_parts[0],)
|
||||
|
||||
def cached_version_string(self, prefix=""):
|
||||
"""Generate an object which will expand in a string context to
|
||||
the results of version_string(). We do this so that don't
|
||||
call into pkg_resources every time we start up a program when
|
||||
passing version information into the CONF constructor, but
|
||||
rather only do the calculation when and if a version is requested
|
||||
"""
|
||||
if not self._cached_version:
|
||||
self._cached_version = "%s%s" % (prefix,
|
||||
self.version_string())
|
||||
return self._cached_version
|
|
@ -1,733 +0,0 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Utility methods for working with WSGI servers."""
|
||||
|
||||
import datetime
|
||||
import eventlet
|
||||
import eventlet.wsgi
|
||||
|
||||
eventlet.patcher.monkey_patch(all=False, socket=True)
|
||||
|
||||
import routes
|
||||
import routes.middleware
|
||||
import sys
|
||||
import webob.dec
|
||||
import webob.exc
|
||||
from xml.dom import minidom
|
||||
from xml.parsers import expat
|
||||
|
||||
from kwapi.openstack.common import exception
|
||||
from kwapi.openstack.common.gettextutils import _
|
||||
from kwapi.openstack.common import jsonutils
|
||||
from kwapi.openstack.common import log as logging
|
||||
from kwapi.openstack.common import service
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def run_server(application, port):
|
||||
"""Run a WSGI server with the given application."""
|
||||
sock = eventlet.listen(('0.0.0.0', port))
|
||||
eventlet.wsgi.server(sock, application)
|
||||
|
||||
|
||||
class Service(service.Service):
|
||||
"""
|
||||
Provides a Service API for wsgi servers.
|
||||
|
||||
This gives us the ability to launch wsgi servers with the
|
||||
Launcher classes in service.py.
|
||||
"""
|
||||
|
||||
def __init__(self, application, port,
|
||||
host='0.0.0.0', backlog=128, threads=1000):
|
||||
self.application = application
|
||||
self._port = port
|
||||
self._host = host
|
||||
self.backlog = backlog
|
||||
super(Service, self).__init__(threads)
|
||||
|
||||
def start(self):
|
||||
"""Start serving this service using the provided server instance.
|
||||
|
||||
:returns: None
|
||||
|
||||
"""
|
||||
super(Service, self).start()
|
||||
self._socket = eventlet.listen((self._host, self._port),
|
||||
backlog=self.backlog)
|
||||
self.tg.add_thread(self._run, self.application, self._socket)
|
||||
|
||||
@property
|
||||
def host(self):
|
||||
return self._socket.getsockname()[0] if self._socket else self._host
|
||||
|
||||
@property
|
||||
def port(self):
|
||||
return self._socket.getsockname()[1] if self._socket else self._port
|
||||
|
||||
def stop(self):
|
||||
"""Stop serving this API.
|
||||
|
||||
:returns: None
|
||||
|
||||
"""
|
||||
super(Service, self).stop()
|
||||
|
||||
def _run(self, application, socket):
|
||||
"""Start a WSGI server in a new green thread."""
|
||||
logger = logging.getLogger('eventlet.wsgi')
|
||||
eventlet.wsgi.server(socket, application, custom_pool=self.tg.pool,
|
||||
log=logging.WritableLogger(logger))
|
||||
|
||||
|
||||
class Middleware(object):
|
||||
"""
|
||||
Base WSGI middleware wrapper. These classes require an application to be
|
||||
initialized that will be called next. By default the middleware will
|
||||
simply call its wrapped app, or you can override __call__ to customize its
|
||||
behavior.
|
||||
"""
|
||||
|
||||
def __init__(self, application):
|
||||
self.application = application
|
||||
|
||||
def process_request(self, req):
|
||||
"""
|
||||
Called on each request.
|
||||
|
||||
If this returns None, the next application down the stack will be
|
||||
executed. If it returns a response then that response will be returned
|
||||
and execution will stop here.
|
||||
"""
|
||||
return None
|
||||
|
||||
def process_response(self, response):
|
||||
"""Do whatever you'd like to the response."""
|
||||
return response
|
||||
|
||||
@webob.dec.wsgify
|
||||
def __call__(self, req):
|
||||
response = self.process_request(req)
|
||||
if response:
|
||||
return response
|
||||
response = req.get_response(self.application)
|
||||
return self.process_response(response)
|
||||
|
||||
|
||||
class Debug(Middleware):
|
||||
"""
|
||||
Helper class that can be inserted into any WSGI application chain
|
||||
to get information about the request and response.
|
||||
"""
|
||||
|
||||
@webob.dec.wsgify
|
||||
def __call__(self, req):
|
||||
print ("*" * 40) + " REQUEST ENVIRON"
|
||||
for key, value in req.environ.items():
|
||||
print key, "=", value
|
||||
print
|
||||
resp = req.get_response(self.application)
|
||||
|
||||
print ("*" * 40) + " RESPONSE HEADERS"
|
||||
for (key, value) in resp.headers.iteritems():
|
||||
print key, "=", value
|
||||
print
|
||||
|
||||
resp.app_iter = self.print_generator(resp.app_iter)
|
||||
|
||||
return resp
|
||||
|
||||
@staticmethod
|
||||
def print_generator(app_iter):
|
||||
"""
|
||||
Iterator that prints the contents of a wrapper string iterator
|
||||
when iterated.
|
||||
"""
|
||||
print ("*" * 40) + " BODY"
|
||||
for part in app_iter:
|
||||
sys.stdout.write(part)
|
||||
sys.stdout.flush()
|
||||
yield part
|
||||
print
|
||||
|
||||
|
||||
class Router(object):
|
||||
|
||||
"""
|
||||
WSGI middleware that maps incoming requests to WSGI apps.
|
||||
"""
|
||||
|
||||
def __init__(self, mapper):
|
||||
"""
|
||||
Create a router for the given routes.Mapper.
|
||||
|
||||
Each route in `mapper` must specify a 'controller', which is a
|
||||
WSGI app to call. You'll probably want to specify an 'action' as
|
||||
well and have your controller be a wsgi.Controller, who will route
|
||||
the request to the action method.
|
||||
|
||||
Examples:
|
||||
mapper = routes.Mapper()
|
||||
sc = ServerController()
|
||||
|
||||
# Explicit mapping of one route to a controller+action
|
||||
mapper.connect(None, "/svrlist", controller=sc, action="list")
|
||||
|
||||
# Actions are all implicitly defined
|
||||
mapper.resource("server", "servers", controller=sc)
|
||||
|
||||
# Pointing to an arbitrary WSGI app. You can specify the
|
||||
# {path_info:.*} parameter so the target app can be handed just that
|
||||
# section of the URL.
|
||||
mapper.connect(None, "/v1.0/{path_info:.*}", controller=BlogApp())
|
||||
"""
|
||||
self.map = mapper
|
||||
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
|
||||
self.map)
|
||||
|
||||
@webob.dec.wsgify
|
||||
def __call__(self, req):
|
||||
"""
|
||||
Route the incoming request to a controller based on self.map.
|
||||
If no match, return a 404.
|
||||
"""
|
||||
return self._router
|
||||
|
||||
@staticmethod
|
||||
@webob.dec.wsgify
|
||||
def _dispatch(req):
|
||||
"""
|
||||
Called by self._router after matching the incoming request to a route
|
||||
and putting the information into req.environ. Either returns 404
|
||||
or the routed WSGI app's response.
|
||||
"""
|
||||
match = req.environ['wsgiorg.routing_args'][1]
|
||||
if not match:
|
||||
return webob.exc.HTTPNotFound()
|
||||
app = match['controller']
|
||||
return app
|
||||
|
||||
|
||||
class Request(webob.Request):
|
||||
"""Add some Openstack API-specific logic to the base webob.Request."""
|
||||
|
||||
default_request_content_types = ('application/json', 'application/xml')
|
||||
default_accept_types = ('application/json', 'application/xml')
|
||||
default_accept_type = 'application/json'
|
||||
|
||||
def best_match_content_type(self, supported_content_types=None):
|
||||
"""Determine the requested response content-type.
|
||||
|
||||
Based on the query extension then the Accept header.
|
||||
Defaults to default_accept_type if we don't find a preference
|
||||
|
||||
"""
|
||||
supported_content_types = (supported_content_types or
|
||||
self.default_accept_types)
|
||||
|
||||
parts = self.path.rsplit('.', 1)
|
||||
if len(parts) > 1:
|
||||
ctype = 'application/{0}'.format(parts[1])
|
||||
if ctype in supported_content_types:
|
||||
return ctype
|
||||
|
||||
bm = self.accept.best_match(supported_content_types)
|
||||
return bm or self.default_accept_type
|
||||
|
||||
def get_content_type(self, allowed_content_types=None):
|
||||
"""Determine content type of the request body.
|
||||
|
||||
Does not do any body introspection, only checks header
|
||||
|
||||
"""
|
||||
if not "Content-Type" in self.headers:
|
||||
return None
|
||||
|
||||
content_type = self.content_type
|
||||
allowed_content_types = (allowed_content_types or
|
||||
self.default_request_content_types)
|
||||
|
||||
if content_type not in allowed_content_types:
|
||||
raise exception.InvalidContentType(content_type=content_type)
|
||||
return content_type
|
||||
|
||||
|
||||
class Resource(object):
|
||||
"""
|
||||
WSGI app that handles (de)serialization and controller dispatch.
|
||||
|
||||
Reads routing information supplied by RoutesMiddleware and calls
|
||||
the requested action method upon its deserializer, controller,
|
||||
and serializer. Those three objects may implement any of the basic
|
||||
controller action methods (create, update, show, index, delete)
|
||||
along with any that may be specified in the api router. A 'default'
|
||||
method may also be implemented to be used in place of any
|
||||
non-implemented actions. Deserializer methods must accept a request
|
||||
argument and return a dictionary. Controller methods must accept a
|
||||
request argument. Additionally, they must also accept keyword
|
||||
arguments that represent the keys returned by the Deserializer. They
|
||||
may raise a webob.exc exception or return a dict, which will be
|
||||
serialized by requested content type.
|
||||
"""
|
||||
def __init__(self, controller, deserializer=None, serializer=None):
|
||||
"""
|
||||
:param controller: object that implement methods created by routes lib
|
||||
:param deserializer: object that supports webob request deserialization
|
||||
through controller-like actions
|
||||
:param serializer: object that supports webob response serialization
|
||||
through controller-like actions
|
||||
"""
|
||||
self.controller = controller
|
||||
self.serializer = serializer or ResponseSerializer()
|
||||
self.deserializer = deserializer or RequestDeserializer()
|
||||
|
||||
@webob.dec.wsgify(RequestClass=Request)
|
||||
def __call__(self, request):
|
||||
"""WSGI method that controls (de)serialization and method dispatch."""
|
||||
|
||||
try:
|
||||
action, action_args, accept = self.deserialize_request(request)
|
||||
except exception.InvalidContentType:
|
||||
msg = _("Unsupported Content-Type")
|
||||
return webob.exc.HTTPUnsupportedMediaType(explanation=msg)
|
||||
except exception.MalformedRequestBody:
|
||||
msg = _("Malformed request body")
|
||||
return webob.exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
action_result = self.execute_action(action, request, **action_args)
|
||||
try:
|
||||
return self.serialize_response(action, action_result, accept)
|
||||
# return unserializable result (typically a webob exc)
|
||||
except Exception:
|
||||
return action_result
|
||||
|
||||
def deserialize_request(self, request):
|
||||
return self.deserializer.deserialize(request)
|
||||
|
||||
def serialize_response(self, action, action_result, accept):
|
||||
return self.serializer.serialize(action_result, accept, action)
|
||||
|
||||
def execute_action(self, action, request, **action_args):
|
||||
return self.dispatch(self.controller, action, request, **action_args)
|
||||
|
||||
def dispatch(self, obj, action, *args, **kwargs):
|
||||
"""Find action-specific method on self and call it."""
|
||||
try:
|
||||
method = getattr(obj, action)
|
||||
except AttributeError:
|
||||
method = getattr(obj, 'default')
|
||||
|
||||
return method(*args, **kwargs)
|
||||
|
||||
def get_action_args(self, request_environment):
|
||||
"""Parse dictionary created by routes library."""
|
||||
try:
|
||||
args = request_environment['wsgiorg.routing_args'][1].copy()
|
||||
except Exception:
|
||||
return {}
|
||||
|
||||
try:
|
||||
del args['controller']
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
try:
|
||||
del args['format']
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
return args
|
||||
|
||||
|
||||
class ActionDispatcher(object):
|
||||
"""Maps method name to local methods through action name."""
|
||||
|
||||
def dispatch(self, *args, **kwargs):
|
||||
"""Find and call local method."""
|
||||
action = kwargs.pop('action', 'default')
|
||||
action_method = getattr(self, str(action), self.default)
|
||||
return action_method(*args, **kwargs)
|
||||
|
||||
def default(self, data):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class DictSerializer(ActionDispatcher):
|
||||
"""Default request body serialization"""
|
||||
|
||||
def serialize(self, data, action='default'):
|
||||
return self.dispatch(data, action=action)
|
||||
|
||||
def default(self, data):
|
||||
return ""
|
||||
|
||||
|
||||
class JSONDictSerializer(DictSerializer):
|
||||
"""Default JSON request body serialization"""
|
||||
|
||||
def default(self, data):
|
||||
def sanitizer(obj):
|
||||
if isinstance(obj, datetime.datetime):
|
||||
_dtime = obj - datetime.timedelta(microseconds=obj.microsecond)
|
||||
return _dtime.isoformat()
|
||||
return unicode(obj)
|
||||
return jsonutils.dumps(data, default=sanitizer)
|
||||
|
||||
|
||||
class XMLDictSerializer(DictSerializer):
|
||||
|
||||
def __init__(self, metadata=None, xmlns=None):
|
||||
"""
|
||||
:param metadata: information needed to deserialize xml into
|
||||
a dictionary.
|
||||
:param xmlns: XML namespace to include with serialized xml
|
||||
"""
|
||||
super(XMLDictSerializer, self).__init__()
|
||||
self.metadata = metadata or {}
|
||||
self.xmlns = xmlns
|
||||
|
||||
def default(self, data):
|
||||
# We expect data to contain a single key which is the XML root.
|
||||
root_key = data.keys()[0]
|
||||
doc = minidom.Document()
|
||||
node = self._to_xml_node(doc, self.metadata, root_key, data[root_key])
|
||||
|
||||
return self.to_xml_string(node)
|
||||
|
||||
def to_xml_string(self, node, has_atom=False):
|
||||
self._add_xmlns(node, has_atom)
|
||||
return node.toprettyxml(indent=' ', encoding='UTF-8')
|
||||
|
||||
#NOTE (ameade): the has_atom should be removed after all of the
|
||||
# xml serializers and view builders have been updated to the current
|
||||
# spec that required all responses include the xmlns:atom, the has_atom
|
||||
# flag is to prevent current tests from breaking
|
||||
def _add_xmlns(self, node, has_atom=False):
|
||||
if self.xmlns is not None:
|
||||
node.setAttribute('xmlns', self.xmlns)
|
||||
if has_atom:
|
||||
node.setAttribute('xmlns:atom', "http://www.w3.org/2005/Atom")
|
||||
|
||||
def _to_xml_node(self, doc, metadata, nodename, data):
|
||||
"""Recursive method to convert data members to XML nodes."""
|
||||
result = doc.createElement(nodename)
|
||||
|
||||
# Set the xml namespace if one is specified
|
||||
# TODO(justinsb): We could also use prefixes on the keys
|
||||
xmlns = metadata.get('xmlns', None)
|
||||
if xmlns:
|
||||
result.setAttribute('xmlns', xmlns)
|
||||
|
||||
#TODO(bcwaldon): accomplish this without a type-check
|
||||
if type(data) is list:
|
||||
collections = metadata.get('list_collections', {})
|
||||
if nodename in collections:
|
||||
metadata = collections[nodename]
|
||||
for item in data:
|
||||
node = doc.createElement(metadata['item_name'])
|
||||
node.setAttribute(metadata['item_key'], str(item))
|
||||
result.appendChild(node)
|
||||
return result
|
||||
singular = metadata.get('plurals', {}).get(nodename, None)
|
||||
if singular is None:
|
||||
if nodename.endswith('s'):
|
||||
singular = nodename[:-1]
|
||||
else:
|
||||
singular = 'item'
|
||||
for item in data:
|
||||
node = self._to_xml_node(doc, metadata, singular, item)
|
||||
result.appendChild(node)
|
||||
#TODO(bcwaldon): accomplish this without a type-check
|
||||
elif type(data) is dict:
|
||||
collections = metadata.get('dict_collections', {})
|
||||
if nodename in collections:
|
||||
metadata = collections[nodename]
|
||||
for k, v in data.items():
|
||||
node = doc.createElement(metadata['item_name'])
|
||||
node.setAttribute(metadata['item_key'], str(k))
|
||||
text = doc.createTextNode(str(v))
|
||||
node.appendChild(text)
|
||||
result.appendChild(node)
|
||||
return result
|
||||
attrs = metadata.get('attributes', {}).get(nodename, {})
|
||||
for k, v in data.items():
|
||||
if k in attrs:
|
||||
result.setAttribute(k, str(v))
|
||||
else:
|
||||
node = self._to_xml_node(doc, metadata, k, v)
|
||||
result.appendChild(node)
|
||||
else:
|
||||
# Type is atom
|
||||
node = doc.createTextNode(str(data))
|
||||
result.appendChild(node)
|
||||
return result
|
||||
|
||||
def _create_link_nodes(self, xml_doc, links):
|
||||
link_nodes = []
|
||||
for link in links:
|
||||
link_node = xml_doc.createElement('atom:link')
|
||||
link_node.setAttribute('rel', link['rel'])
|
||||
link_node.setAttribute('href', link['href'])
|
||||
if 'type' in link:
|
||||
link_node.setAttribute('type', link['type'])
|
||||
link_nodes.append(link_node)
|
||||
return link_nodes
|
||||
|
||||
|
||||
class ResponseHeadersSerializer(ActionDispatcher):
|
||||
"""Default response headers serialization"""
|
||||
|
||||
def serialize(self, response, data, action):
|
||||
self.dispatch(response, data, action=action)
|
||||
|
||||
def default(self, response, data):
|
||||
response.status_int = 200
|
||||
|
||||
|
||||
class ResponseSerializer(object):
|
||||
"""Encode the necessary pieces into a response object"""
|
||||
|
||||
def __init__(self, body_serializers=None, headers_serializer=None):
|
||||
self.body_serializers = {
|
||||
'application/xml': XMLDictSerializer(),
|
||||
'application/json': JSONDictSerializer(),
|
||||
}
|
||||
self.body_serializers.update(body_serializers or {})
|
||||
|
||||
self.headers_serializer = (headers_serializer or
|
||||
ResponseHeadersSerializer())
|
||||
|
||||
def serialize(self, response_data, content_type, action='default'):
|
||||
"""Serialize a dict into a string and wrap in a wsgi.Request object.
|
||||
|
||||
:param response_data: dict produced by the Controller
|
||||
:param content_type: expected mimetype of serialized response body
|
||||
|
||||
"""
|
||||
response = webob.Response()
|
||||
self.serialize_headers(response, response_data, action)
|
||||
self.serialize_body(response, response_data, content_type, action)
|
||||
return response
|
||||
|
||||
def serialize_headers(self, response, data, action):
|
||||
self.headers_serializer.serialize(response, data, action)
|
||||
|
||||
def serialize_body(self, response, data, content_type, action):
|
||||
response.headers['Content-Type'] = content_type
|
||||
if data is not None:
|
||||
serializer = self.get_body_serializer(content_type)
|
||||
response.body = serializer.serialize(data, action)
|
||||
|
||||
def get_body_serializer(self, content_type):
|
||||
try:
|
||||
return self.body_serializers[content_type]
|
||||
except (KeyError, TypeError):
|
||||
raise exception.InvalidContentType(content_type=content_type)
|
||||
|
||||
|
||||
class RequestHeadersDeserializer(ActionDispatcher):
|
||||
"""Default request headers deserializer"""
|
||||
|
||||
def deserialize(self, request, action):
|
||||
return self.dispatch(request, action=action)
|
||||
|
||||
def default(self, request):
|
||||
return {}
|
||||
|
||||
|
||||
class RequestDeserializer(object):
|
||||
"""Break up a Request object into more useful pieces."""
|
||||
|
||||
def __init__(self, body_deserializers=None, headers_deserializer=None,
|
||||
supported_content_types=None):
|
||||
|
||||
self.supported_content_types = supported_content_types
|
||||
|
||||
self.body_deserializers = {
|
||||
'application/xml': XMLDeserializer(),
|
||||
'application/json': JSONDeserializer(),
|
||||
}
|
||||
self.body_deserializers.update(body_deserializers or {})
|
||||
|
||||
self.headers_deserializer = (headers_deserializer or
|
||||
RequestHeadersDeserializer())
|
||||
|
||||
def deserialize(self, request):
|
||||
"""Extract necessary pieces of the request.
|
||||
|
||||
:param request: Request object
|
||||
:returns: tuple of (expected controller action name, dictionary of
|
||||
keyword arguments to pass to the controller, the expected
|
||||
content type of the response)
|
||||
|
||||
"""
|
||||
action_args = self.get_action_args(request.environ)
|
||||
action = action_args.pop('action', None)
|
||||
|
||||
action_args.update(self.deserialize_headers(request, action))
|
||||
action_args.update(self.deserialize_body(request, action))
|
||||
|
||||
accept = self.get_expected_content_type(request)
|
||||
|
||||
return (action, action_args, accept)
|
||||
|
||||
def deserialize_headers(self, request, action):
|
||||
return self.headers_deserializer.deserialize(request, action)
|
||||
|
||||
def deserialize_body(self, request, action):
|
||||
if not len(request.body) > 0:
|
||||
LOG.debug(_("Empty body provided in request"))
|
||||
return {}
|
||||
|
||||
try:
|
||||
content_type = request.get_content_type()
|
||||
except exception.InvalidContentType:
|
||||
LOG.debug(_("Unrecognized Content-Type provided in request"))
|
||||
raise
|
||||
|
||||
if content_type is None:
|
||||
LOG.debug(_("No Content-Type provided in request"))
|
||||
return {}
|
||||
|
||||
try:
|
||||
deserializer = self.get_body_deserializer(content_type)
|
||||
except exception.InvalidContentType:
|
||||
LOG.debug(_("Unable to deserialize body as provided Content-Type"))
|
||||
raise
|
||||
|
||||
return deserializer.deserialize(request.body, action)
|
||||
|
||||
def get_body_deserializer(self, content_type):
|
||||
try:
|
||||
return self.body_deserializers[content_type]
|
||||
except (KeyError, TypeError):
|
||||
raise exception.InvalidContentType(content_type=content_type)
|
||||
|
||||
def get_expected_content_type(self, request):
|
||||
return request.best_match_content_type(self.supported_content_types)
|
||||
|
||||
def get_action_args(self, request_environment):
|
||||
"""Parse dictionary created by routes library."""
|
||||
try:
|
||||
args = request_environment['wsgiorg.routing_args'][1].copy()
|
||||
except Exception:
|
||||
return {}
|
||||
|
||||
try:
|
||||
del args['controller']
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
try:
|
||||
del args['format']
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
return args
|
||||
|
||||
|
||||
class TextDeserializer(ActionDispatcher):
|
||||
"""Default request body deserialization"""
|
||||
|
||||
def deserialize(self, datastring, action='default'):
|
||||
return self.dispatch(datastring, action=action)
|
||||
|
||||
def default(self, datastring):
|
||||
return {}
|
||||
|
||||
|
||||
class JSONDeserializer(TextDeserializer):
|
||||
|
||||
def _from_json(self, datastring):
|
||||
try:
|
||||
return jsonutils.loads(datastring)
|
||||
except ValueError:
|
||||
msg = _("cannot understand JSON")
|
||||
raise exception.MalformedRequestBody(reason=msg)
|
||||
|
||||
def default(self, datastring):
|
||||
return {'body': self._from_json(datastring)}
|
||||
|
||||
|
||||
class XMLDeserializer(TextDeserializer):
|
||||
|
||||
def __init__(self, metadata=None):
|
||||
"""
|
||||
:param metadata: information needed to deserialize xml into
|
||||
a dictionary.
|
||||
"""
|
||||
super(XMLDeserializer, self).__init__()
|
||||
self.metadata = metadata or {}
|
||||
|
||||
def _from_xml(self, datastring):
|
||||
plurals = set(self.metadata.get('plurals', {}))
|
||||
|
||||
try:
|
||||
node = minidom.parseString(datastring).childNodes[0]
|
||||
return {node.nodeName: self._from_xml_node(node, plurals)}
|
||||
except expat.ExpatError:
|
||||
msg = _("cannot understand XML")
|
||||
raise exception.MalformedRequestBody(reason=msg)
|
||||
|
||||
def _from_xml_node(self, node, listnames):
|
||||
"""Convert a minidom node to a simple Python type.
|
||||
|
||||
:param listnames: list of XML node names whose subnodes should
|
||||
be considered list items.
|
||||
|
||||
"""
|
||||
|
||||
if len(node.childNodes) == 1 and node.childNodes[0].nodeType == 3:
|
||||
return node.childNodes[0].nodeValue
|
||||
elif node.nodeName in listnames:
|
||||
return [self._from_xml_node(n, listnames) for n in node.childNodes]
|
||||
else:
|
||||
result = dict()
|
||||
for attr in node.attributes.keys():
|
||||
result[attr] = node.attributes[attr].nodeValue
|
||||
for child in node.childNodes:
|
||||
if child.nodeType != node.TEXT_NODE:
|
||||
result[child.nodeName] = self._from_xml_node(child,
|
||||
listnames)
|
||||
return result
|
||||
|
||||
def find_first_child_named(self, parent, name):
|
||||
"""Search a nodes children for the first child with a given name"""
|
||||
for node in parent.childNodes:
|
||||
if node.nodeName == name:
|
||||
return node
|
||||
return None
|
||||
|
||||
def find_children_named(self, parent, name):
|
||||
"""Return all of a nodes children who have the given name"""
|
||||
for node in parent.childNodes:
|
||||
if node.nodeName == name:
|
||||
yield node
|
||||
|
||||
def extract_text(self, node):
|
||||
"""Get the text field contained by the given node"""
|
||||
if len(node.childNodes) == 1:
|
||||
child = node.childNodes[0]
|
||||
if child.nodeType == child.TEXT_NODE:
|
||||
return child.nodeValue
|
||||
return ""
|
||||
|
||||
def default(self, datastring):
|
||||
return {'body': self._from_xml(datastring)}
|
|
@ -0,0 +1,3 @@
|
|||
[DEFAULT]
|
||||
modules=cfg,log,iniparser,gettextutils,jsonutils,timeutils,local,notifier
|
||||
base=kwapi
|
Loading…
Reference in New Issue